corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-4401
0807.3795
Relational Lattice Axioms
<|reference_start|>Relational Lattice Axioms: Relational lattice is a formal mathematical model for Relational algebra. It reduces the set of six classic relational algebra operators to two: natural join and inner union. We continue to investigate Relational lattice properties with emphasis onto axiomatic definition. New results include additional axioms, equational definition for set difference (more generally anti-join), and case study demonstrating application of the relational lattice theory for query transformations.<|reference_end|>
arxiv
@article{spight2008relational, title={Relational Lattice Axioms}, author={Marshall Spight, Vadim Tropashko}, journal={arXiv preprint arXiv:0807.3795}, year={2008}, archivePrefix={arXiv}, eprint={0807.3795}, primaryClass={cs.DB} }
spight2008relational
arxiv-4402
0807.3803
Quantum Convolutional Coding with Shared Entanglement: General Structure
<|reference_start|>Quantum Convolutional Coding with Shared Entanglement: General Structure: We present a general theory of entanglement-assisted quantum convolutional coding. The codes have a convolutional or memory structure, they assume that the sender and receiver share noiseless entanglement prior to quantum communication, and they are not restricted to possess the Calderbank-Shor-Steane structure as in previous work. We provide two significant advances for quantum convolutional coding theory. We first show how to "expand" a given set of quantum convolutional generators. This expansion step acts as a preprocessor for a polynomial symplectic Gram-Schmidt orthogonalization procedure that simplifies the commutation relations of the expanded generators to be the same as those of entangled Bell states (ebits) and ancilla qubits. The above two steps produce a set of generators with equivalent error-correcting properties to those of the original generators. We then demonstrate how to perform online encoding and decoding for a stream of information qubits, halves of ebits, and ancilla qubits. The upshot of our theory is that the quantum code designer can engineer quantum convolutional codes with desirable error-correcting properties without having to worry about the commutation relations of these generators.<|reference_end|>
arxiv
@article{wilde2008quantum, title={Quantum Convolutional Coding with Shared Entanglement: General Structure}, author={Mark M. Wilde and Todd A. Brun}, journal={Quantum Information Processing, Volume 9, Number 5, pages 509-540, September 2010}, year={2008}, doi={10.1007/s11128-010-0179-9}, number={CSI-08-07-02}, archivePrefix={arXiv}, eprint={0807.3803}, primaryClass={quant-ph cs.IT math.IT} }
wilde2008quantum
arxiv-4403
0807.3806
On the Rate of Channel Polarization
<|reference_start|>On the Rate of Channel Polarization: It is shown that for any binary-input discrete memoryless channel $W$ with symmetric capacity $I(W)$ and any rate $R <I(W)$, the probability of block decoding error for polar coding under successive cancellation decoding satisfies $P_e \le 2^{-N^\beta}$ for any $\beta<\frac12$ when the block-length $N$ is large enough.<|reference_end|>
arxiv
@article{arikan2008on, title={On the Rate of Channel Polarization}, author={Erdal Arikan, Emre Telatar}, journal={arXiv preprint arXiv:0807.3806}, year={2008}, archivePrefix={arXiv}, eprint={0807.3806}, primaryClass={cs.IT math.IT} }
arikan2008on
arxiv-4404
0807.3845
Formal semantics of language and the Richard-Berry paradox
<|reference_start|>Formal semantics of language and the Richard-Berry paradox: The classical logical antinomy known as Richard-Berry paradox is combined with plausible assumptions about the size i.e. the descriptional complexity of Turing machines formalizing certain sentences, to show that formalization of language leads to contradiction.<|reference_end|>
arxiv
@article{reghizzi2008formal, title={Formal semantics of language and the Richard-Berry paradox}, author={Stefano Crespi Reghizzi}, journal={arXiv preprint arXiv:0807.3845}, year={2008}, archivePrefix={arXiv}, eprint={0807.3845}, primaryClass={cs.CL cs.CC cs.LO} }
reghizzi2008formal
arxiv-4405
0807.3865
Pseudo-random Sequences Generated by Cellular Automata
<|reference_start|>Pseudo-random Sequences Generated by Cellular Automata: Generation of pseudo random sequences by cellular automata, as well as by hybrid cellular automata is surveyed. An application to the fast evaluation and FPGA implementation of some classes of boolean functions is sketched out.<|reference_end|>
arxiv
@article{martin2008pseudo-random, title={Pseudo-random Sequences Generated by Cellular Automata}, author={Bruno Martin (I3S), Patrick Sol'e (I3S)}, journal={International Conference on Relations, Orders and Graphs: Interactions with Computer Science, Mahdia : Tunisie (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0807.3865}, primaryClass={cs.DM} }
martin2008pseudo-random
arxiv-4406
0807.3879
Quantifying Timing Leaks and Cost Optimisation
<|reference_start|>Quantifying Timing Leaks and Cost Optimisation: We develop a new notion of security against timing attacks where the attacker is able to simultaneously observe the execution time of a program and the probability of the values of low variables. We then show how to measure the security of a program with respect to this notion via a computable estimate of the timing leakage and use this estimate for cost optimisation.<|reference_end|>
arxiv
@article{di pierro2008quantifying, title={Quantifying Timing Leaks and Cost Optimisation}, author={Alessandra Di Pierro, Chris Hankin and Herbert Wiklicky}, journal={arXiv preprint arXiv:0807.3879}, year={2008}, archivePrefix={arXiv}, eprint={0807.3879}, primaryClass={cs.CR cs.PL} }
di pierro2008quantifying
arxiv-4407
0807.3908
A Distributed Process Infrastructure for a Distributed Data Structure
<|reference_start|>A Distributed Process Infrastructure for a Distributed Data Structure: The Resource Description Framework (RDF) is continuing to grow outside the bounds of its initial function as a metadata framework and into the domain of general-purpose data modeling. This expansion has been facilitated by the continued increase in the capacity and speed of RDF database repositories known as triple-stores. High-end RDF triple-stores can hold and process on the order of 10 billion triples. In an effort to provide a seamless integration of the data contained in RDF repositories, the Linked Data community is providing specifications for linking RDF data sets into a universal distributed graph that can be traversed by both man and machine. While the seamless integration of RDF data sets is important, at the scale of the data sets that currently exist and will ultimately grow to become, the "download and index" philosophy of the World Wide Web will not so easily map over to the Semantic Web. This essay discusses the importance of adding a distributed RDF process infrastructure to the current distributed RDF data structure.<|reference_end|>
arxiv
@article{rodriguez2008a, title={A Distributed Process Infrastructure for a Distributed Data Structure}, author={Marko A. Rodriguez}, journal={arXiv preprint arXiv:0807.3908}, year={2008}, number={LA-UR-08-04138}, archivePrefix={arXiv}, eprint={0807.3908}, primaryClass={cs.AI cs.DL} }
rodriguez2008a
arxiv-4408
0807.3913
DMT of weighted Parallel Channels: Application to Broadcast Channel
<|reference_start|>DMT of weighted Parallel Channels: Application to Broadcast Channel: In a broadcast channel with random packet arrival and transmission queues, the stability of the system is achieved by maximizing a weighted sum rate capacity with suitable weights that depend on the queue size. The weighted sum rate capacity using Dirty Paper Coding (DPC) and Zero Forcing (ZF) is asymptotically equivalent to the weighted sum capacity over parallel single-channels. In this paper, we study the Diversity Multiplexing Tradeoff (DMT) of the fading broadcast channel under a fixed weighted sum rate capacity constraint. The DMT of both identical and different parallel weighted MISO channels is first derived. Finally, we deduce the DMT of a broadcast channel using DPC and ZF precoders.<|reference_end|>
arxiv
@article{mroueh2008dmt, title={DMT of weighted Parallel Channels: Application to Broadcast Channel}, author={Lina Mroueh, St'ephanie Rouquette-L'eveil, Ghaya Rekaya-Ben Othman and Jean-Claude Belfiore}, journal={arXiv preprint arXiv:0807.3913}, year={2008}, doi={10.1109/ISIT.2008.4595413}, archivePrefix={arXiv}, eprint={0807.3913}, primaryClass={cs.IT math.IT} }
mroueh2008dmt
arxiv-4409
0807.3917
Channel polarization: A method for constructing capacity-achieving codes for symmetric binary-input memoryless channels
<|reference_start|>Channel polarization: A method for constructing capacity-achieving codes for symmetric binary-input memoryless channels: A method is proposed, called channel polarization, to construct code sequences that achieve the symmetric capacity $I(W)$ of any given binary-input discrete memoryless channel (B-DMC) $W$. The symmetric capacity is the highest rate achievable subject to using the input letters of the channel with equal probability. Channel polarization refers to the fact that it is possible to synthesize, out of $N$ independent copies of a given B-DMC $W$, a second set of $N$ binary-input channels $\{W_N^{(i)}:1\le i\le N\}$ such that, as $N$ becomes large, the fraction of indices $i$ for which $I(W_N^{(i)})$ is near 1 approaches $I(W)$ and the fraction for which $I(W_N^{(i)})$ is near 0 approaches $1-I(W)$. The polarized channels $\{W_N^{(i)}\}$ are well-conditioned for channel coding: one need only send data at rate 1 through those with capacity near 1 and at rate 0 through the remaining. Codes constructed on the basis of this idea are called polar codes. The paper proves that, given any B-DMC $W$ with $I(W)>0$ and any target rate $R < I(W)$, there exists a sequence of polar codes $\{{\mathscr C}_n;n\ge 1\}$ such that ${\mathscr C}_n$ has block-length $N=2^n$, rate $\ge R$, and probability of block error under successive cancellation decoding bounded as $P_{e}(N,R) \le \bigoh(N^{-\frac14})$ independently of the code rate. This performance is achievable by encoders and decoders with complexity $O(N\log N)$ for each.<|reference_end|>
arxiv
@article{arikan2008channel, title={Channel polarization: A method for constructing capacity-achieving codes for symmetric binary-input memoryless channels}, author={Erdal Arikan}, journal={arXiv preprint arXiv:0807.3917}, year={2008}, doi={10.1109/TIT.2009.2021379}, archivePrefix={arXiv}, eprint={0807.3917}, primaryClass={cs.IT math.IT} }
arikan2008channel
arxiv-4410
0807.3933
Interface Matching and Combining Techniques for Services Integration
<|reference_start|>Interface Matching and Combining Techniques for Services Integration: The development of many highly dynamic environments, like pervasive environments, introduces the possibility to use geographically close-related services. Dynamically integrating and unintegrating these services in running applications is a key challenge for this use. In this article, we classify service integration issues according to interfaces exported by services and internal combining techniques. We also propose a contextual integration service, IntegServ, and an interface, Integrable, for developing services.<|reference_end|>
arxiv
@article{mouël2008interface, title={Interface Matching and Combining Techniques for Services Integration}, author={Fr'ed'eric Le Mou"el (INRIA Rh^one-Alpes / CITI), Noha Ibrahim (INRIA Rh^one-Alpes / CITI), St'ephane Fr'enot (INRIA Rh^one-Alpes / CITI)}, journal={Dans 3er Congreso Nacional de Ciencias de la Computacion (CNCC'2005) (2005)}, year={2008}, archivePrefix={arXiv}, eprint={0807.3933}, primaryClass={cs.OS cs.SE} }
mouël2008interface
arxiv-4411
0807.3979
Unfolding in CHR
<|reference_start|>Unfolding in CHR: Program transformation is an appealing technique which allows to improve run-time efficiency, space-consumption and more generally to optimize a given program. Essentially it consists of a sequence of syntactic program manipulations which preserves some kind of semantic equivalence. One of the basic operations which is used by most program transformation systems is unfolding which consists in the replacement of a procedure call by its definition. While there is a large body of literature on transformation and unfolding of sequential programs, very few papers have addressed this issue for concurrent languages and, to the best of our knowledge, no other has considered unfolding of CHR programs. This paper defines a correct unfolding system for CHR programs. We define an unfolding rule, show its correctness and discuss some conditions which can be used to delete an unfolded rule while preserving the program meaning. We prove that confluence and termination properties are preserved by the above transformations.<|reference_end|>
arxiv
@article{gabbrielli2008unfolding, title={Unfolding in CHR}, author={Maurizio Gabbrielli, Maria Chiara Meo, Paolo Tacchella}, journal={arXiv preprint arXiv:0807.3979}, year={2008}, archivePrefix={arXiv}, eprint={0807.3979}, primaryClass={cs.PL} }
gabbrielli2008unfolding
arxiv-4412
0807.3991
Codes Associated with Special Linear Groups and Power Moments of Multi-dimensional Kloosterman Sums
<|reference_start|>Codes Associated with Special Linear Groups and Power Moments of Multi-dimensional Kloosterman Sums: In this paper, we construct the binary linear codes $C(SL(n,q))$ associated with finite special linear groups $SL(n,q)$, with both \emph{n,q} powers of two. Then, via Pless power moment identity and utilizing our previous result on the explicit expression of the Gauss sum for $SL(n,q)$, we obtain a recursive formula for the power moments of multi-dimensional Kloosterman sums in terms of the frequencies of weights in $C(SL(n,q))$. In particular, when $n=2$, this gives a recursive formula for the power moments of Kloosterman sums. We illustrate our results with some examples.<|reference_end|>
arxiv
@article{kim2008codes, title={Codes Associated with Special Linear Groups and Power Moments of Multi-dimensional Kloosterman Sums}, author={Dae San Kim}, journal={arXiv preprint arXiv:0807.3991}, year={2008}, archivePrefix={arXiv}, eprint={0807.3991}, primaryClass={math.NT cs.IT math.IT} }
kim2008codes
arxiv-4413
0807.3996
Topology and Geometry of Online Social Networks
<|reference_start|>Topology and Geometry of Online Social Networks: In this paper, we study certain geometric and topological properties of online social networks using the concept of density and geometric vector spaces. "Moi Krug" ("My Circle"), a Russian social network that promotes the principle of the "six degrees of separation" and is positioning itself as a vehicle for professionals and recruiters seeking each others' services, is used as a test vehicle.<|reference_end|>
arxiv
@article{zinoviev2008topology, title={Topology and Geometry of Online Social Networks}, author={D. Zinoviev}, journal={Proc. 12th World Multi-Conference on Systemics, Cybernetics and Informatics VI (2008) 138-143}, year={2008}, archivePrefix={arXiv}, eprint={0807.3996}, primaryClass={cs.CY physics.soc-ph} }
zinoviev2008topology
arxiv-4414
0807.4009
Statistical mechanics of lossy compression for non-monotonic multilayer perceptrons
<|reference_start|>Statistical mechanics of lossy compression for non-monotonic multilayer perceptrons: A lossy data compression scheme for uniformly biased Boolean messages is investigated via statistical mechanics techniques. We utilize tree-like committee machine (committee tree) and tree-like parity machine (parity tree) whose transfer functions are non-monotonic. The scheme performance at the infinite code length limit is analyzed using the replica method. Both committee and parity treelike networks are shown to saturate the Shannon bound. The AT stability of the Replica Symmetric solution is analyzed, and the tuning of the non-monotonic transfer function is also discussed.<|reference_end|>
arxiv
@article{cousseau2008statistical, title={Statistical mechanics of lossy compression for non-monotonic multilayer perceptrons}, author={Florent Cousseau, Kazushi Mimura, Toshiaki Omori, Masato Okada}, journal={Phys. Rev. E, 78, 021124 (2008)}, year={2008}, doi={10.1103/PhysRevE.78.021124}, archivePrefix={arXiv}, eprint={0807.4009}, primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.IT math.IT} }
cousseau2008statistical
arxiv-4415
0807.4052
Modularity clustering is force-directed layout
<|reference_start|>Modularity clustering is force-directed layout: Two natural and widely used representations for the community structure of networks are clusterings, which partition the vertex set into disjoint subsets, and layouts, which assign the vertices to positions in a metric space. This paper unifies prominent characterizations of layout quality and clustering quality, by showing that energy models of pairwise attraction and repulsion subsume Newman and Girvan's modularity measure. Layouts with optimal energy are relaxations of, and are thus consistent with, clusterings with optimal modularity, which is of practical relevance because both representations are complementary and often used together.<|reference_end|>
arxiv
@article{noack2008modularity, title={Modularity clustering is force-directed layout}, author={Andreas Noack}, journal={Phys. Rev. E 79, 026102 (2009)}, year={2008}, doi={10.1103/PhysRevE.79.026102}, archivePrefix={arXiv}, eprint={0807.4052}, primaryClass={cs.DM cs.CG physics.soc-ph} }
noack2008modularity
arxiv-4416
0807.4073
Rational streams coalgebraically
<|reference_start|>Rational streams coalgebraically: We study rational streams (over a field) from a coalgebraic perspective. Exploiting the finality of the set of streams, we present an elementary and uniform proof of the equivalence of four notions of representability of rational streams: by finite dimensional linear systems; by finite stream circuits; by finite weighted stream automata; and by finite dimensional subsystems of the set of streams.<|reference_end|>
arxiv
@article{rutten2008rational, title={Rational streams coalgebraically}, author={J.J.M.M. Rutten}, journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September 19, 2008) lmcs:1164}, year={2008}, doi={10.2168/LMCS-4(3:9)2008}, archivePrefix={arXiv}, eprint={0807.4073}, primaryClass={cs.LO} }
rutten2008rational
arxiv-4417
0807.4074
Low-delay, Low-PAPR, High-rate Non-square Complex Orthogonal Designs
<|reference_start|>Low-delay, Low-PAPR, High-rate Non-square Complex Orthogonal Designs: The maximal rate for non-square Complex Orthogonal Designs (CODs) with $n$ transmit antennas is ${1/2}+\frac{1}{n}$ if $n$ is even and ${1/2}+\frac{1}{n+1}$ if $n$ is odd, which are close to 1/2 for large values of $n.$ A class of maximal rate non-square CODs have been constructed by Liang (IEEE Trans. Inform. Theory, 2003) and Lu et. al. (IEEE Trans. Inform. Theory, 2005) have shown that the decoding delay of the codes given by Liang, can be reduced by 50% when number of transmit antennas is a multiple of 4. Adams et. al. (IEEE Trans. Inform. Theory, 2007) have shown that the designs of Liang are of minimal-delay for $n$ equal to 1 and 3 modulo 4 and that of Lu et.al. are of minimal delay when $n$ is a multiple of $4.$ However, these minimal delays are large compared to the delays of the rate 1/2 non-square CODs constructed by Tarokh et al (IEEE Trans. Inform. Theory, 1999) from rate-1 real orthogonal designs (RODs). In this paper, we construct a class of rate-1/2 non-square CODs for any $n$ with the decoding delay equal to 50% of that of the delay of the rate-1/2 codes given by Tarokh et al. This is achieved by giving first a general construction of rate-1 square Real Orthogonal Designs (RODs) which includes as special cases the well known constructions of Adams, Lax and Phillips and Geramita and Pullman, and then making use of it to obtain the desired rate-1/2 non-square COD. For the case of 9 transmit antennas, our rate-1/2 COD is shown to be of minimal-delay. The proposed construction results in designs with zero entries which may have high Peak-to-Average Power Ratio (PAPR) and it is shown that by appropriate postmultiplication, a design with no zero entries can be obtained with no change in the code parameters.<|reference_end|>
arxiv
@article{das2008low-delay,, title={Low-delay, Low-PAPR, High-rate Non-square Complex Orthogonal Designs}, author={Smarajit Das and B. Sundar Rajan}, journal={arXiv preprint arXiv:0807.4074}, year={2008}, archivePrefix={arXiv}, eprint={0807.4074}, primaryClass={cs.IT math.IT} }
das2008low-delay,
arxiv-4418
0807.4111
A High Speed, Post-Processing Free, Quantum Random Number Generator
<|reference_start|>A High Speed, Post-Processing Free, Quantum Random Number Generator: A quantum random number generator (QRNG) based on gated single photon detection of an InGaAs photodiode at GHz frequency is demonstrated. Owing to the extremely long coherence time of each photon, each photons' wavefuntion extends over many gating cycles of the photodiode. The collapse of the photon wavefunction on random gating cycles as well as photon random arrival time detection events are used to generate sequences of random bits at a rate of 4.01 megabits/s. Importantly, the random outputs are intrinsically bias-free and require no post-processing procedure to pass random number statistical tests, making this QRNG an extremely simple device.<|reference_end|>
arxiv
@article{dynes2008a, title={A High Speed, Post-Processing Free, Quantum Random Number Generator}, author={J. F. Dynes, Z. L. Yuan, A. W. Sharpe and A. J. Shields}, journal={Applied Physics Letters Vol. 93 031109 (2008)}, year={2008}, doi={10.1063/1.2961000}, archivePrefix={arXiv}, eprint={0807.4111}, primaryClass={quant-ph cs.CR} }
dynes2008a
arxiv-4419
0807.4128
Square Complex Orthogonal Designs with Low PAPR and Signaling Complexity
<|reference_start|>Square Complex Orthogonal Designs with Low PAPR and Signaling Complexity: Space-Time Block Codes from square complex orthogonal designs (SCOD) have been extensively studied and most of the existing SCODs contain large number of zero. The zeros in the designs result in high peak-to-average power ratio (PAPR) and also impose a severe constraint on hardware implementation of the code when turning off some of the transmitting antennas whenever a zero is transmitted. Recently, rate 1/2 SCODs with no zero entry have been reported for 8 transmit antennas. In this paper, SCODs with no zero entry for $2^a$ transmit antennas whenever $a+1$ is a power of 2, are constructed which includes the 8 transmit antennas case as a special case. More generally, for arbitrary values of $a$, explicit construction of $2^a\times 2^a$ rate $\frac{a+1}{2^a}$ SCODs with the ratio of number of zero entries to the total number of entries equal to $1-\frac{a+1}{2^a}2^{\lfloor log_2(\frac{2^a}{a+1}) \rfloor}$ is reported, whereas for standard known constructions, the ratio is $1-\frac{a+1}{2^a}$. The codes presented do not result in increased signaling complexity. Simulation results show that the codes constructed in this paper outperform the codes using the standard construction under peak power constraint while performing the same under average power constraint.<|reference_end|>
arxiv
@article{das2008square, title={Square Complex Orthogonal Designs with Low PAPR and Signaling Complexity}, author={Smarajit Das and B. Sundar Rajan}, journal={arXiv preprint arXiv:0807.4128}, year={2008}, archivePrefix={arXiv}, eprint={0807.4128}, primaryClass={cs.IT math.IT} }
das2008square
arxiv-4420
0807.4132
Modeling Time in Computing: A Taxonomy and a Comparative Survey
<|reference_start|>Modeling Time in Computing: A Taxonomy and a Comparative Survey: The increasing relevance of areas such as real-time and embedded systems, pervasive computing, hybrid systems control, and biological and social systems modeling is bringing a growing attention to the temporal aspects of computing, not only in the computer science domain, but also in more traditional fields of engineering. This article surveys various approaches to the formal modeling and analysis of the temporal features of computer-based systems, with a level of detail that is suitable also for non-specialists. In doing so, it provides a unifying framework, rather than just a comprehensive list of formalisms. The paper first lays out some key dimensions along which the various formalisms can be evaluated and compared. Then, a significant sample of formalisms for time modeling in computing are presented and discussed according to these dimensions. The adopted perspective is, to some extent, historical, going from "traditional" models and formalisms to more modern ones.<|reference_end|>
arxiv
@article{furia2008modeling, title={Modeling Time in Computing: A Taxonomy and a Comparative Survey}, author={Carlo A. Furia, Dino Mandrioli, Angelo Morzenti, Matteo Rossi}, journal={ACM Computing Surveys, 42(2):1--59, February 2010}, year={2008}, doi={10.1145/1667062.1667063}, archivePrefix={arXiv}, eprint={0807.4132}, primaryClass={cs.GL} }
furia2008modeling
arxiv-4421
0807.4198
Positive factor networks: A graphical framework for modeling non-negative sequential data
<|reference_start|>Positive factor networks: A graphical framework for modeling non-negative sequential data: We present a novel graphical framework for modeling non-negative sequential data with hierarchical structure. Our model corresponds to a network of coupled non-negative matrix factorization (NMF) modules, which we refer to as a positive factor network (PFN). The data model is linear, subject to non-negativity constraints, so that observation data consisting of an additive combination of individually representable observations is also representable by the network. This is a desirable property for modeling problems in computational auditory scene analysis, since distinct sound sources in the environment are often well-modeled as combining additively in the corresponding magnitude spectrogram. We propose inference and learning algorithms that leverage existing NMF algorithms and that are straightforward to implement. We present a target tracking example and provide results for synthetic observation data which serve to illustrate the interesting properties of PFNs and motivate their potential usefulness in applications such as music transcription, source separation, and speech recognition. We show how a target process characterized by a hierarchical state transition model can be represented as a PFN. Our results illustrate that a PFN which is defined in terms of a single target observation can then be used to effectively track the states of multiple simultaneous targets. Our results show that the quality of the inferred target states degrades gradually as the observation noise is increased. We also present results for an example in which meaningful hierarchical features are extracted from a spectrogram. Such a hierarchical representation could be useful for music transcription and source separation applications. We also propose a network for language modeling.<|reference_end|>
arxiv
@article{vogel2008positive, title={Positive factor networks: A graphical framework for modeling non-negative sequential data}, author={Brian K. Vogel}, journal={arXiv preprint arXiv:0807.4198}, year={2008}, archivePrefix={arXiv}, eprint={0807.4198}, primaryClass={cs.LG} }
vogel2008positive
arxiv-4422
0807.4224
Encapsulation theory fundamentals
<|reference_start|>Encapsulation theory fundamentals: This paper proposes a theory of encapsulation, establishing a relationship between encapsulation and information hiding through the concept of potential structural complexity (P.S.C.), the maximum possible number of source code dependencies that can exist between program units in a software system. The P.S.C. of various, simple systems is examined in an attempt to demonstrate how P.S.C. changes as program units are encapsulated among different configurations of subsystems.<|reference_end|>
arxiv
@article{kirwan2008encapsulation, title={Encapsulation theory fundamentals}, author={Edmund Kirwan}, journal={arXiv preprint arXiv:0807.4224}, year={2008}, archivePrefix={arXiv}, eprint={0807.4224}, primaryClass={cs.SE} }
kirwan2008encapsulation
arxiv-4423
0807.4229
Positive circuits and maximal number of fixed points in discrete dynamical systems
<|reference_start|>Positive circuits and maximal number of fixed points in discrete dynamical systems: We consider the Cartesian product X of n finite intervals of integers and a map F from X to itself. As main result, we establish an upper bound on the number of fixed points for F which only depends on X and on the topology of the positive circuits of the interaction graph associated with F. The proof uses and strongly generalizes a theorem of Richard and Comet which corresponds to a discrete version of the Thomas' conjecture: if the interaction graph associated with F has no positive circuit, then F has at most one fixed point. The obtained upper bound on the number of fixed points also strongly generalizes the one established by Aracena et al for a particular class of Boolean networks.<|reference_end|>
arxiv
@article{richard2008positive, title={Positive circuits and maximal number of fixed points in discrete dynamical systems}, author={Adrien Richard}, journal={arXiv preprint arXiv:0807.4229}, year={2008}, archivePrefix={arXiv}, eprint={0807.4229}, primaryClass={cs.DM} }
richard2008positive
arxiv-4424
0807.4234
Linear Coloring and Linear Graphs
<|reference_start|>Linear Coloring and Linear Graphs: Motivated by the definition of linear coloring on simplicial complexes, recently introduced in the context of algebraic topology \cite{Civan}, and the framework through which it was studied, we introduce the linear coloring on graphs. We provide an upper bound for the chromatic number $\chi(G)$, for any graph $G$, and show that $G$ can be linearly colored in polynomial time by proposing a simple linear coloring algorithm. Based on these results, we define a new class of perfect graphs, which we call co-linear graphs, and study their complement graphs, namely linear graphs. The linear coloring of a graph $G$ is a vertex coloring such that two vertices can be assigned the same color, if their corresponding clique sets are associated by the set inclusion relation (a clique set of a vertex $u$ is the set of all maximal cliques containing $u$); the linear chromatic number $\mathcal{\lambda}(G)$ of $G$ is the least integer $k$ for which $G$ admits a linear coloring with $k$ colors. We show that linear graphs are those graphs $G$ for which the linear chromatic number achieves its theoretical lower bound in every induced subgraph of $G$. We prove inclusion relations between these two classes of graphs and other subclasses of chordal and co-chordal graphs, and also study the structure of the forbidden induced subgraphs of the class of linear graphs.<|reference_end|>
arxiv
@article{ioannidou2008linear, title={Linear Coloring and Linear Graphs}, author={Kyriaki Ioannidou and Stavros D. Nikolopoulos}, journal={arXiv preprint arXiv:0807.4234}, year={2008}, archivePrefix={arXiv}, eprint={0807.4234}, primaryClass={cs.DM cs.DS} }
ioannidou2008linear
arxiv-4425
0807.4247
Z2Z4-linear codes: rank and kernel
<|reference_start|>Z2Z4-linear codes: rank and kernel: A code C is Z2Z4-additive if the set of coordinates can be partitioned into two subsets X and Y such that the punctured code of C by deleting the coordinates outside X (respectively, Y) is a binary linear code (respectively, a quaternary linear code). In this paper, the rank and dimension of the kernel for Z2Z4-linear codes, which are the corresponding binary codes of Z2Z4-additive codes, are studied. The possible values of these two parameters for Z2Z4-linear codes, giving lower and upper bounds, are established. For each possible rank r between these bounds, the construction of a Z2Z4-linear code with rank r is given. Equivalently, for each possible dimension of the kernel k, the construction of a Z2Z4-linear code with dimension of the kernel k is given. Finally, the bounds on the rank, once the kernel dimension is fixed, are established and the construction of a Z2Z4-additive code for each possible pair (r,k) is given.<|reference_end|>
arxiv
@article{fernandez-cordoba2008z2z4-linear, title={Z2Z4-linear codes: rank and kernel}, author={Cristina Fernandez-Cordoba, Jaume Pujol, and Merce Villanueva}, journal={arXiv preprint arXiv:0807.4247}, year={2008}, archivePrefix={arXiv}, eprint={0807.4247}, primaryClass={cs.IT cs.DM math.IT} }
fernandez-cordoba2008z2z4-linear
arxiv-4426
0807.4268
Phase Diagrams of Network Traffic
<|reference_start|>Phase Diagrams of Network Traffic: This paper has been withdrawn due to errors in the analysis of data with Carrier Access Rate control and statistical methodologies.<|reference_end|>
arxiv
@article{smith2008phase, title={Phase Diagrams of Network Traffic}, author={Reginald D. Smith}, journal={arXiv preprint arXiv:0807.4268}, year={2008}, archivePrefix={arXiv}, eprint={0807.4268}, primaryClass={cs.NI} }
smith2008phase
arxiv-4427
0807.4309
Array Based Java Source Code Obfuscation Using Classes with Restructured Arrays
<|reference_start|>Array Based Java Source Code Obfuscation Using Classes with Restructured Arrays: Array restructuring operations obscure arrays. Our work aims on java source code obfuscation containing arrays. Our main proposal is Classes with restructured array members and obscured member methods for setting, getting array elements and to get the length of arrays. The class method definition codes are obscured through index transformation and constant hiding. The instantiated objects of these classes are used for source code writing. A tool named JDATATRANS is developed for generating classes and to the best of our knowledge this is the first tool available for array restructuring, on java source codes.<|reference_end|>
arxiv
@article{sivadasan2008array, title={Array Based Java Source Code Obfuscation Using Classes with Restructured Arrays}, author={Praveen Sivadasan, P. Sojan Lal}, journal={arXiv preprint arXiv:0807.4309}, year={2008}, archivePrefix={arXiv}, eprint={0807.4309}, primaryClass={cs.CR} }
sivadasan2008array
arxiv-4428
0807.4322
Carnot in the Information Age: Discrete Symmetric Channels
<|reference_start|>Carnot in the Information Age: Discrete Symmetric Channels: Modeling communication channels as thermal systems results in Hamiltonians which are an explicit function of the temperature. The first two authors have recently generalized the second thermodynamic law to encompass systems with temperature-dependent energy levels, $dQ=TdS+<d\mathcal{E}/dT>dT$, where {$<\cdot>$} denotes averaging over the Boltzmann distribution, recomputing the mutual information and other main properties of the popular Gaussian channel. Here the mutual information for the binary symmetric channel as well as for the discrete symmetric channel consisting of 4 input/output (I/O) symbols is explicitly calculated using the generalized second law of thermodynamics. For equiprobable I/O the mutual information of the examined channels has a very simple form, -$\gamma U(\gamma)|_0^\beta$, where $U$ denotes the internal energy of the channel. We prove that this simple form of the mutual information governs the class of discrete memoryless symmetric communication channels with equiprobable I/O symbols.<|reference_end|>
arxiv
@article{kanter2008carnot, title={Carnot in the Information Age: Discrete Symmetric Channels}, author={Ido Kanter, Ori Shental, Hadar Efraim and Nadav Yacov}, journal={arXiv preprint arXiv:0807.4322}, year={2008}, archivePrefix={arXiv}, eprint={0807.4322}, primaryClass={cond-mat.stat-mech cs.IT math.IT} }
kanter2008carnot
arxiv-4429
0807.4325
Schroedinger-like PageRank equation and localization in the WWW
<|reference_start|>Schroedinger-like PageRank equation and localization in the WWW: The WorldWide Web is one of the most important communication systems we use in our everyday life. Despite its central role, the growth and the development of the WWW is not controlled by any central authority. This situation has created a huge ensemble of connections whose complexity can be fruitfully described and quantified by network theory. One important application that allows to sort out the information present in these connections is given by the PageRank alghorithm. Computation of this quantity is usually made iteratively with a large use of computational time. In this paper we show that the PageRank can be expressed in terms of a wave function obeying a Schroedinger-like equation. In particular the topological disorder given by the unbalance of outgoing and ingoing links between pages, induces wave function and potential structuring. This allows to directly localize the pages with the largest score. Through this new representation we can now compute the PageRank without iterative techniques. For most of the cases of interest our method is faster than the original one. Our results also clarify the role of topology in the diffusion of information within complex networks. The whole approach opens the possibility to novel techniques inspired by quantum physics for the analysis of the WWW properties.<|reference_end|>
arxiv
@article{perra2008schroedinger-like, title={Schroedinger-like PageRank equation and localization in the WWW}, author={Nicola Perra (1,2), Vinko Zlatic (3,4), Alessandro Chessa (1,2), Claudio Conti (5), Debora Donato (6), Guido Caldarelli (3,2) ((1) Dep of Physics, SLACS-CNR University of Cagliari Italy, (2) Linkalab, Complex Systems Computational Lab. Cagliari, Italy, (3) Centre SMC CNR-INFM, Dip. Fisica, Universita' Sapienza Rome, Italy, (4) Theor. Physics Div., Rudjer Boskovic Inst., Zagreb Croatia, (5) Centre SOFT CNR-INFM, Dip. Fisica, Universita' Sapienza Rome, Italy, (6) Yahoo! Research Barcelona Spain)}, journal={arXiv preprint arXiv:0807.4325}, year={2008}, doi={10.1209/0295-5075/88/48002}, archivePrefix={arXiv}, eprint={0807.4325}, primaryClass={physics.soc-ph cond-mat.stat-mech cs.IR physics.data-an} }
perra2008schroedinger-like
arxiv-4430
0807.4326
On the random satisfiable process
<|reference_start|>On the random satisfiable process: In this work we suggest a new model for generating random satisfiable k-CNF formulas. To generate such formulas -- randomly permute all 2^k\binom{n}{k} possible clauses over the variables x_1, ..., x_n, and starting from the empty formula, go over the clauses one by one, including each new clause as you go along if after its addition the formula remains satisfiable. We study the evolution of this process, namely the distribution over formulas obtained after scanning through the first m clauses (in the random permutation's order). Random processes with conditioning on a certain property being respected are widely studied in the context of graph properties. This study was pioneered by Ruci\'nski and Wormald in 1992 for graphs with a fixed degree sequence, and also by Erd\H{o}s, Suen, and Winkler in 1995 for triangle-free and bipartite graphs. Since then many other graph properties were studied such as planarity and H-freeness. Thus our model is a natural extension of this approach to the satisfiability setting. Our main contribution is as follows. For m \geq cn, c=c(k) a sufficiently large constant, we are able to characterize the structure of the solution space of a typical formula in this distribution. Specifically, we show that typically all satisfying assignments are essentially clustered in one cluster, and all but e^{-\Omega(m/n)} n of the variables take the same value in all satisfying assignments. We also describe a polynomial time algorithm that finds with high probability a satisfying assignment for such formulas.<|reference_end|>
arxiv
@article{krivelevich2008on, title={On the random satisfiable process}, author={Michael Krivelevich, Benny Sudakov and Dan Vilenchik}, journal={arXiv preprint arXiv:0807.4326}, year={2008}, archivePrefix={arXiv}, eprint={0807.4326}, primaryClass={math.CO cs.CC math.PR} }
krivelevich2008on
arxiv-4431
0807.4345
Avoider robot design to dim the fire with dt basic mini system
<|reference_start|>Avoider robot design to dim the fire with dt basic mini system: Avoider robot is mean robot who is designed to avoid the block in around. Except that, this robot is also added by an addition application to dim the fire. This robot is made with ultrasonic sensor PING. This sensor is set on the front, right and left from robot. This sensor is used robot to look for the right street, so that robot can walk on. After the robot can look for the right street, next accomplished the robot is looking for the fire in around. And the next, dim the fire with fan. This robot is made with basic stamp 2 micro-controller. And that micro-controller can be found in dt-basic mini system module. This robot is made with servo motor on the right and left side, which is used to movement.<|reference_end|>
arxiv
@article{prasetyo2008avoider, title={Avoider robot design to dim the fire with dt basic mini system}, author={Eri Prasetyo, Wahyu K.R. and Bumi Prabu Prabowo}, journal={9 th IES 2007, EEPIS ITS Surabaya}, year={2008}, archivePrefix={arXiv}, eprint={0807.4345}, primaryClass={cs.RO} }
prasetyo2008avoider
arxiv-4432
0807.4368
Improved Algorithms for Approximate String Matching (Extended Abstract)
<|reference_start|>Improved Algorithms for Approximate String Matching (Extended Abstract): The problem of approximate string matching is important in many different areas such as computational biology, text processing and pattern recognition. A great effort has been made to design efficient algorithms addressing several variants of the problem, including comparison of two strings, approximate pattern identification in a string or calculation of the longest common subsequence that two strings share. We designed an output sensitive algorithm solving the edit distance problem between two strings of lengths n and m respectively in time O((s-|n-m|)min(m,n,s)+m+n) and linear space, where s is the edit distance between the two strings. This worst-case time bound sets the quadratic factor of the algorithm independent of the longest string length and improves existing theoretical bounds for this problem. The implementation of our algorithm excels also in practice, especially in cases where the two strings compared differ significantly in length. Source code of our algorithm is available at http://www.cs.miami.edu/\~dimitris/edit_distance<|reference_end|>
arxiv
@article{papamichail2008improved, title={Improved Algorithms for Approximate String Matching (Extended Abstract)}, author={Dimitris Papamichail and Georgios Papamichail}, journal={arXiv preprint arXiv:0807.4368}, year={2008}, archivePrefix={arXiv}, eprint={0807.4368}, primaryClass={cs.DS} }
papamichail2008improved
arxiv-4433
0807.4417
On Introspection, Metacognitive Control and Augmented Data Mining Live Cycles
<|reference_start|>On Introspection, Metacognitive Control and Augmented Data Mining Live Cycles: We discuss metacognitive modelling as an enhancement to cognitive modelling and computing. Metacognitive control mechanisms should enable AI systems to self-reflect, reason about their actions, and to adapt to new situations. In this respect, we propose implementation details of a knowledge taxonomy and an augmented data mining life cycle which supports a live integration of obtained models.<|reference_end|>
arxiv
@article{sonntag2008on, title={On Introspection, Metacognitive Control and Augmented Data Mining Live Cycles}, author={Daniel Sonntag}, journal={arXiv preprint arXiv:0807.4417}, year={2008}, archivePrefix={arXiv}, eprint={0807.4417}, primaryClass={cs.AI} }
sonntag2008on
arxiv-4434
0807.4450
Candy-passing Games on General Graphs, I
<|reference_start|>Candy-passing Games on General Graphs, I: We undertake the first study of the candy-passing game on arbitrary connected graphs. We obtain a general stabilization result which encompasses the first author's results (arXiv:0709.2156) for candy-passing games on n-cycles with at least 3n candies.<|reference_end|>
arxiv
@article{kominers2008candy-passing, title={Candy-passing Games on General Graphs, I}, author={Paul M. Kominers, Scott D. Kominers}, journal={arXiv preprint arXiv:0807.4450}, year={2008}, archivePrefix={arXiv}, eprint={0807.4450}, primaryClass={math.CO cs.DM} }
kominers2008candy-passing
arxiv-4435
0807.4478
An Image-Based Sensor System for Autonomous Rendez-Vous with Uncooperative Satellites
<|reference_start|>An Image-Based Sensor System for Autonomous Rendez-Vous with Uncooperative Satellites: In this paper are described the image processing algorithms developed by SENER, Ingenieria y Sistemas to cope with the problem of image-based, autonomous rendez-vous (RV) with an orbiting satellite. The methods developed have a direct application in the OLEV (Orbital Life Extension Extension Vehicle) mission. OLEV is a commercial mission under development by a consortium formed by Swedish Space Corporation, Kayser-Threde and SENER, aimed to extend the operational life of geostationary telecommunication satellites by supplying them control, navigation and guidance services. OLEV is planned to use a set of cameras to determine the angular position and distance to the client satellite during the complete phases of rendez-vous and docking, thus enabling the operation with satellites not equipped with any specific navigational aid to provide support during the approach. The ability to operate with un-equipped client satellites significantly expands the range of applicability of the system under development, compared to other competing video technologies already tested in previous spatial missions, such as the ones described here below.<|reference_end|>
arxiv
@article{miravet2008an, title={An Image-Based Sensor System for Autonomous Rendez-Vous with Uncooperative Satellites}, author={Carlos Miravet, Luis Pascual, Eloise Krouch, Juan Manuel del Cura}, journal={arXiv preprint arXiv:0807.4478}, year={2008}, archivePrefix={arXiv}, eprint={0807.4478}, primaryClass={cs.CV cs.AI} }
miravet2008an
arxiv-4436
0807.4479
FiEstAS sampling -- a Monte Carlo algorithm for multidimensional numerical integration
<|reference_start|>FiEstAS sampling -- a Monte Carlo algorithm for multidimensional numerical integration: This paper describes a new algorithm for Monte Carlo integration, based on the Field Estimator for Arbitrary Spaces (FiEstAS). The algorithm is discussed in detail, and its performance is evaluated in the context of Bayesian analysis, with emphasis on multimodal distributions with strong parameter degeneracies. Source code is available upon request.<|reference_end|>
arxiv
@article{ascasibar2008fiestas, title={FiEstAS sampling -- a Monte Carlo algorithm for multidimensional numerical integration}, author={Yago Ascasibar}, journal={arXiv preprint arXiv:0807.4479}, year={2008}, doi={10.1016/j.cpc.2008.07.011}, archivePrefix={arXiv}, eprint={0807.4479}, primaryClass={astro-ph cs.DS} }
ascasibar2008fiestas
arxiv-4437
0807.4494
Fast, exact (but unstable) spin spherical harmonic transforms
<|reference_start|>Fast, exact (but unstable) spin spherical harmonic transforms: In many applications data are measured or defined on a spherical manifold; spherical harmonic transforms are then required to access the frequency content of the data. We derive algorithms to perform forward and inverse spin spherical harmonic transforms for functions of arbitrary spin number. These algorithms involve recasting the spin transform on the two-sphere S^2 as a Fourier transform on the two-torus T^2. Fast Fourier transforms are then used to compute Fourier coefficients, which are related to spherical harmonic coefficients through a linear transform. By recasting the problem as a Fourier transform on the torus we appeal to the usual Shannon sampling theorem to develop spherical harmonic transforms that are theoretically exact for band-limited functions, thereby providing an alternative sampling theorem on the sphere. The computational complexity of our forward and inverse spin spherical harmonic transforms scale as O(L^3) for any arbitrary spin number, where L is the harmonic band-limit of the spin function on the sphere. Numerical experiments are performed and unfortunately the forward transform is found to be unstable for band-limits above L~32. The instability is due to the poorly conditioned linear system relating Fourier and spherical harmonic coefficients. The inverse transform is expected to be stable, although it is not possible to verify this hypothesis.<|reference_end|>
arxiv
@article{mcewen2008fast,, title={Fast, exact (but unstable) spin spherical harmonic transforms}, author={J. D. McEwen}, journal={All Res.J.Phys.1:4-18,2011}, year={2008}, archivePrefix={arXiv}, eprint={0807.4494}, primaryClass={astro-ph cs.IT math.IT} }
mcewen2008fast,
arxiv-4438
0807.4548
Compound Multiple Access Channels with Partial Cooperation
<|reference_start|>Compound Multiple Access Channels with Partial Cooperation: A two-user discrete memoryless compound multiple access channel with a common message and conferencing decoders is considered. The capacity region is characterized in the special cases of physically degraded channels and unidirectional cooperation, and achievable rate regions are provided for the general case. The results are then extended to the corresponding Gaussian model. In the Gaussian setup, the provided achievable rates are shown to lie within some constant number of bits from the boundary of the capacity region in several special cases. An alternative model, in which the encoders are connected by conferencing links rather than having a common message, is studied as well, and the capacity region for this model is also determined for the cases of physically degraded channels and unidirectional cooperation. Numerical results are also provided to obtain insights about the potential gains of conferencing at the decoders and encoders.<|reference_end|>
arxiv
@article{simeone2008compound, title={Compound Multiple Access Channels with Partial Cooperation}, author={O. Simeone, D. Gunduz, H. V. Poor, A. Goldsmith, S. Shamai (Shitz)}, journal={arXiv preprint arXiv:0807.4548}, year={2008}, doi={10.1109/TIT.2009.2018343}, archivePrefix={arXiv}, eprint={0807.4548}, primaryClass={cs.IT math.IT} }
simeone2008compound
arxiv-4439
0807.4580
A Logical Model and Data Placement Strategies for MEMS Storage Devices
<|reference_start|>A Logical Model and Data Placement Strategies for MEMS Storage Devices: MEMS storage devices are new non-volatile secondary storages that have outstanding advantages over magnetic disks. MEMS storage devices, however, are much different from magnetic disks in the structure and access characteristics. They have thousands of heads called probe tips and provide the following two major access facilities: (1) flexibility: freely selecting a set of probe tips for accessing data, (2) parallelism: simultaneously reading and writing data with the set of probe tips selected. Due to these characteristics, it is nontrivial to find data placements that fully utilize the capability of MEMS storage devices. In this paper, we propose a simple logical model called the Region-Sector (RS) model that abstracts major characteristics affecting data retrieval performance, such as flexibility and parallelism, from the physical MEMS storage model. We also suggest heuristic data placement strategies based on the RS model and derive new data placements for relational data and two-dimensional spatial data by using those strategies. Experimental results show that the proposed data placements improve the data retrieval performance by up to 4.0 times for relational data and by up to 4.8 times for two-dimensional spatial data of approximately 320 Mbytes compared with those of existing data placements. Further, these improvements are expected to be more marked as the database size grows.<|reference_end|>
arxiv
@article{kim2008a, title={A Logical Model and Data Placement Strategies for MEMS Storage Devices}, author={Yi-Reun Kim, Kyu-Young Whang, Min-Soo Kim, Il-Yeol Song}, journal={arXiv preprint arXiv:0807.4580}, year={2008}, doi={10.1587/transinf.E92.D.2218}, archivePrefix={arXiv}, eprint={0807.4580}, primaryClass={cs.DB} }
kim2008a
arxiv-4440
0807.4581
Robust Recovery of Signals From a Structured Union of Subspaces
<|reference_start|>Robust Recovery of Signals From a Structured Union of Subspaces: Traditional sampling theories consider the problem of reconstructing an unknown signal $x$ from a series of samples. A prevalent assumption which often guarantees recovery from the given measurements is that $x$ lies in a known subspace. Recently, there has been growing interest in nonlinear but structured signal models, in which $x$ lies in a union of subspaces. In this paper we develop a general framework for robust and efficient recovery of such signals from a given set of samples. More specifically, we treat the case in which $x$ lies in a sum of $k$ subspaces, chosen from a larger set of $m$ possibilities. The samples are modelled as inner products with an arbitrary set of sampling functions. To derive an efficient and robust recovery algorithm, we show that our problem can be formulated as that of recovering a block-sparse vector whose non-zero elements appear in fixed blocks. We then propose a mixed $\ell_2/\ell_1$ program for block sparse recovery. Our main result is an equivalence condition under which the proposed convex algorithm is guaranteed to recover the original signal. This result relies on the notion of block restricted isometry property (RIP), which is a generalization of the standard RIP used extensively in the context of compressed sensing. Based on RIP we also prove stability of our approach in the presence of noise and modelling errors. A special case of our framework is that of recovering multiple measurement vectors (MMV) that share a joint sparsity pattern. Adapting our results to this context leads to new MMV recovery methods as well as equivalence conditions under which the entire set can be determined efficiently.<|reference_end|>
arxiv
@article{eldar2008robust, title={Robust Recovery of Signals From a Structured Union of Subspaces}, author={Yonina C. Eldar and Moshe Mishali}, journal={arXiv preprint arXiv:0807.4581}, year={2008}, archivePrefix={arXiv}, eprint={0807.4581}, primaryClass={nlin.CG cs.IT math.IT nlin.SI} }
eldar2008robust
arxiv-4441
0807.4582
Lower Bounds for Embedding into Distributions over Excluded Minor Graph Families
<|reference_start|>Lower Bounds for Embedding into Distributions over Excluded Minor Graph Families: It was shown recently by Fakcharoenphol et al that arbitrary finite metrics can be embedded into distributions over tree metrics with distortion O(log n). It is also known that this bound is tight since there are expander graphs which cannot be embedded into distributions over trees with better than Omega(log n) distortion. We show that this same lower bound holds for embeddings into distributions over any minor excluded family. Given a family of graphs F which excludes minor M where |M|=k, we explicitly construct a family of graphs with treewidth-(k+1) which cannot be embedded into a distribution over F with better than Omega(log n) distortion. Thus, while these minor excluded families of graphs are more expressive than trees, they do not provide asymptotically better approximations in general. An important corollary of this is that graphs of treewidth-k cannot be embedded into distributions over graphs of treewidth-(k-3) with distortion less than Omega(log n). We also extend a result of Alon et al by showing that for any k, planar graphs cannot be embedded into distributions over treewidth-k graphs with better than Omega(log n) distortion.<|reference_end|>
arxiv
@article{carroll2008lower, title={Lower Bounds for Embedding into Distributions over Excluded Minor Graph Families}, author={Douglas E. Carroll and Ashish Goel}, journal={arXiv preprint arXiv:0807.4582}, year={2008}, archivePrefix={arXiv}, eprint={0807.4582}, primaryClass={cs.DS cs.DM} }
carroll2008lower
arxiv-4442
0807.4609
Analisis Kinerja Sistem Cluster Terhadapa Aplikasi Simulasi Dinamika Molekular NAMD Memanfaatkan Pustaka CHARM++
<|reference_start|>Analisis Kinerja Sistem Cluster Terhadapa Aplikasi Simulasi Dinamika Molekular NAMD Memanfaatkan Pustaka CHARM++: Tingkat kompleksitas dari program simulasi dinamika molekular membutuhkan mesin pemroses dengan kemampuan yang sangat besar. Mesin-mesin paralel terbukti memiliki potensi untuk menjawab tantangan komputasi ini. Untuk memanfaatkan potensi ini secara maksimal, diperlukan suatu program paralel dengan tingkat efisiensi, efektifitas, skalabilitas, dan ekstensibilitas yang maksimal pula. Program NAMD yang dibahas pada penulisan ini dianggap mampu untuk memenuhi semua kriteria yang diinginkan. Program ini dirancang dengan mengimplementasikan pustaka Charm++ untuk pembagian tugas perhitungan secara paralel. NAMD memiliki sistem automatic load balancing secara periodik yang cerdas, sehingga dapat memaksimalkan penggunaan kemampuan mesin yang tersedia. Program ini juga dirancang secara modular, sehingga dapat dimodifikasi dan ditambah dengan sangat mudah. NAMD menggunakan banyak kombinasi algoritma perhitungan dan tehnik-tehnik numerik lainnya dalam melakukan tugasnya. NAMD 2.5 mengimplementasikan semua tehnik dan persamaan perhitungan yang digunakan dalam dunia simulasi dinamika molekular saat ini. NAMD dapat berjalan diatas berbagai mesin paralel termasuk arsitektur cluster, dengan hasil speedup yang mengejutkan. Tulisan ini akan menjelaskan dan membuktikan kemampuan NAMD secara paralel diatas lima buah mesin cluster. Penulisan ini juga akan memaparkan kinerja NAMD pada beberapa.<|reference_end|>
arxiv
@article{mutiara2008analisis, title={Analisis Kinerja Sistem Cluster Terhadapa Aplikasi Simulasi Dinamika Molekular NAMD Memanfaatkan Pustaka CHARM++}, author={A.B. Mutiara}, journal={arXiv preprint arXiv:0807.4609}, year={2008}, archivePrefix={arXiv}, eprint={0807.4609}, primaryClass={cs.DC} }
mutiara2008analisis
arxiv-4443
0807.4618
AceWiki: A Natural and Expressive Semantic Wiki
<|reference_start|>AceWiki: A Natural and Expressive Semantic Wiki: We present AceWiki, a prototype of a new kind of semantic wiki using the controlled natural language Attempto Controlled English (ACE) for representing its content. ACE is a subset of English with a restricted grammar and a formal semantics. The use of ACE has two important advantages over existing semantic wikis. First, we can improve the usability and achieve a shallow learning curve. Second, ACE is more expressive than the formal languages of existing semantic wikis. Our evaluation shows that people who are not familiar with the formal foundations of the Semantic Web are able to deal with AceWiki after a very short learning phase and without the help of an expert.<|reference_end|>
arxiv
@article{kuhn2008acewiki:, title={AceWiki: A Natural and Expressive Semantic Wiki}, author={Tobias Kuhn}, journal={In Proceedings of the Fifth International Workshop on Semantic Web User Interaction (SWUI 2008), CEUR Workshop Proceedings, Volume 543, 2009}, year={2008}, archivePrefix={arXiv}, eprint={0807.4618}, primaryClass={cs.HC cs.AI} }
kuhn2008acewiki:
arxiv-4444
0807.4619
Guaranteed Cost LQG Control of Uncertain Linear Quantum Stochastic Systems
<|reference_start|>Guaranteed Cost LQG Control of Uncertain Linear Quantum Stochastic Systems: In this paper, we formulate and solve a guaranteed cost control problem for a class of uncertain linear stochastic quantum systems. For these quantum systems, a connection with an associated classical (non-quantum) system is first established. Using this connection, the desired guaranteed cost results are established. The theory presented is illustrated using an example from quantum optics.<|reference_end|>
arxiv
@article{shaiju2008guaranteed, title={Guaranteed Cost LQG Control of Uncertain Linear Quantum Stochastic Systems}, author={A. J. Shaiju, I. R. Petersen, and M. R. James}, journal={arXiv preprint arXiv:0807.4619}, year={2008}, archivePrefix={arXiv}, eprint={0807.4619}, primaryClass={quant-ph cs.SY math.OC} }
shaiju2008guaranteed
arxiv-4445
0807.4620
A Compositional Query Algebra for Second-Order Logic and Uncertain Databases
<|reference_start|>A Compositional Query Algebra for Second-Order Logic and Uncertain Databases: World-set algebra is a variable-free query language for uncertain databases. It constitutes the core of the query language implemented in MayBMS, an uncertain database system. This paper shows that world-set algebra captures exactly second-order logic over finite structures, or equivalently, the polynomial hierarchy. The proofs also imply that world-set algebra is closed under composition, a previously open problem.<|reference_end|>
arxiv
@article{koch2008a, title={A Compositional Query Algebra for Second-Order Logic and Uncertain Databases}, author={Christoph Koch}, journal={arXiv preprint arXiv:0807.4620}, year={2008}, archivePrefix={arXiv}, eprint={0807.4620}, primaryClass={cs.DB cs.LO} }
koch2008a
arxiv-4446
0807.4623
AceWiki: Collaborative Ontology Management in Controlled Natural Language
<|reference_start|>AceWiki: Collaborative Ontology Management in Controlled Natural Language: AceWiki is a prototype that shows how a semantic wiki using controlled natural language - Attempto Controlled English (ACE) in our case - can make ontology management easy for everybody. Sentences in ACE can automatically be translated into first-order logic, OWL, or SWRL. AceWiki integrates the OWL reasoner Pellet and ensures that the ontology is always consistent. Previous results have shown that people with no background in logic are able to add formal knowledge to AceWiki without being instructed or trained in advance.<|reference_end|>
arxiv
@article{kuhn2008acewiki:, title={AceWiki: Collaborative Ontology Management in Controlled Natural Language}, author={Tobias Kuhn}, journal={In Proceedings of the 3rd Semantic Wiki Workshop, CEUR Workshop Proceedings, 2008}, year={2008}, archivePrefix={arXiv}, eprint={0807.4623}, primaryClass={cs.HC cs.AI} }
kuhn2008acewiki:
arxiv-4447
0807.4626
Approximate kernel clustering
<|reference_start|>Approximate kernel clustering: In the kernel clustering problem we are given a large $n\times n$ positive semi-definite matrix $A=(a_{ij})$ with $\sum_{i,j=1}^na_{ij}=0$ and a small $k\times k$ positive semi-definite matrix $B=(b_{ij})$. The goal is to find a partition $S_1,...,S_k$ of $\{1,... n\}$ which maximizes the quantity $$ \sum_{i,j=1}^k (\sum_{(i,j)\in S_i\times S_j}a_{ij})b_{ij}. $$ We study the computational complexity of this generic clustering problem which originates in the theory of machine learning. We design a constant factor polynomial time approximation algorithm for this problem, answering a question posed by Song, Smola, Gretton and Borgwardt. In some cases we manage to compute the sharp approximation threshold for this problem assuming the Unique Games Conjecture (UGC). In particular, when $B$ is the $3\times 3$ identity matrix the UGC hardness threshold of this problem is exactly $\frac{16\pi}{27}$. We present and study a geometric conjecture of independent interest which we show would imply that the UGC threshold when $B$ is the $k\times k$ identity matrix is $\frac{8\pi}{9}(1-\frac{1}{k})$ for every $k\ge 3$.<|reference_end|>
arxiv
@article{khot2008approximate, title={Approximate kernel clustering}, author={Subhash Khot and Assaf Naor}, journal={arXiv preprint arXiv:0807.4626}, year={2008}, archivePrefix={arXiv}, eprint={0807.4626}, primaryClass={cs.DS cs.CC math.FA} }
khot2008approximate
arxiv-4448
0807.4655
Candy-passing Games on General Graphs, II
<|reference_start|>Candy-passing Games on General Graphs, II: We give a new proof that any candy-passing game on a graph G with at least 4|E(G)|-|V(G)| candies stabilizes. (This result was first proven in arXiv:0807.4450.) Unlike the prior literature on candy-passing games, we use methods from the general theory of chip-firing games which allow us to obtain a polynomial bound on the number of rounds before stabilization.<|reference_end|>
arxiv
@article{kominers2008candy-passing, title={Candy-passing Games on General Graphs, II}, author={Paul M. Kominers, Scott D. Kominers}, journal={arXiv preprint arXiv:0807.4655}, year={2008}, archivePrefix={arXiv}, eprint={0807.4655}, primaryClass={math.CO cs.DM} }
kominers2008candy-passing
arxiv-4449
0807.4656
Energy-delay bounds analysis in wireless multi-hop networks with unreliable radio links
<|reference_start|>Energy-delay bounds analysis in wireless multi-hop networks with unreliable radio links: Energy efficiency and transmission delay are very important parameters for wireless multi-hop networks. Previous works that study energy efficiency and delay are based on the assumption of reliable links. However, the unreliability of the channel is inevitable in wireless multi-hop networks. This paper investigates the trade-off between the energy consumption and the end-to-end delay of multi-hop communications in a wireless network using an unreliable link model. It provides a closed form expression of the lower bound on the energy-delay trade-off for different channel models (AWGN, Raleigh flat fading and Nakagami block-fading) in a linear network. These analytical results are also verified in 2-dimensional Poisson networks using simulations. The main contribution of this work is the use of a probabilistic link model to define the energy efficiency of the system and capture the energy-delay trade-offs. Hence, it provides a more realistic lower bound on both the energy efficiency and the energy-delay trade-off since it does not restrict the study to the set of perfect links as proposed in earlier works.<|reference_end|>
arxiv
@article{zhang2008energy-delay, title={Energy-delay bounds analysis in wireless multi-hop networks with unreliable radio links}, author={Ruifeng Zhang (CITI, INRIA Rh^one-Alpes / CITI), Jean-Marie Gorce (CITI, INRIA Rh^one-Alpes / CITI), Katia Jaffr`es-Runser (CITI, WNET)}, journal={arXiv preprint arXiv:0807.4656}, year={2008}, number={RR-6598}, archivePrefix={arXiv}, eprint={0807.4656}, primaryClass={cs.NI} }
zhang2008energy-delay
arxiv-4450
0807.4671
Codes Associated with $O^+(2n,2^r)$ and Power Moments of Kloosterman Sums
<|reference_start|>Codes Associated with $O^+(2n,2^r)$ and Power Moments of Kloosterman Sums: In this paper, we construct three binary linear codes $C(SO^+(2,q))$, $C(O^+(2,q))$, $C(SO^+(4,q))$, respectively associated with the orthogonal groups $SO^+(2,q)$, $O^+(2,q)$, $SO^+(4,q)$, with $q$ powers of two. Then we obtain recursive formulas for the power moments of Kloosterman and 2-dimensional Kloosterman sums in terms of the frequencies of weights in the codes. This is done via Pless power moment identity and by utilizing the explicit expressions of Gauss sums for the orthogonal groups. We emphasize that, when the recursive formulas for the power moments of Kloosterman sums are compared, the present one is computationally more effective than the previous one constructed from the special linear group $SL(2,q)$. We illustrate our results with some examples.<|reference_end|>
arxiv
@article{kim2008codes, title={Codes Associated with $O^+(2n,2^r)$ and Power Moments of Kloosterman Sums}, author={Dae San Kim (Sogang University)}, journal={arXiv preprint arXiv:0807.4671}, year={2008}, archivePrefix={arXiv}, eprint={0807.4671}, primaryClass={math.NT cs.IT math.IT} }
kim2008codes
arxiv-4451
0807.4680
Hacia una teoria de unificacion para los comportamientos cognitivos
<|reference_start|>Hacia una teoria de unificacion para los comportamientos cognitivos: Each cognitive science tries to understand a set of cognitive behaviors. The structuring of knowledge of this nature's aspect is far from what it can be expected about a science. Until now universal standard consistently describing the set of cognitive behaviors has not been found, and there are many questions about the cognitive behaviors for which only there are opinions of members of the scientific community. This article has three proposals. The first proposal is to raise to the scientific community the necessity of unified the cognitive behaviors. The second proposal is claim the application of the Newton's reasoning rules about nature of his book, Philosophiae Naturalis Principia Mathematica, to the cognitive behaviors. The third is to propose a scientific theory, currently developing, that follows the rules established by Newton to make sense of nature, and could be the theory to explain all the cognitive behaviors.<|reference_end|>
arxiv
@article{miguel2008hacia, title={Hacia una teoria de unificacion para los comportamientos cognitivos}, author={Sergio Miguel}, journal={arXiv preprint arXiv:0807.4680}, year={2008}, archivePrefix={arXiv}, eprint={0807.4680}, primaryClass={cs.AI} }
miguel2008hacia
arxiv-4452
0807.4701
An image processing analysis of skin textures
<|reference_start|>An image processing analysis of skin textures: Colour and coarseness of skin are visually different. When image processing is involved in the skin analysis, it is important to quantitatively evaluate such differences using texture features. In this paper, we discuss a texture analysis and measurements based on a statistical approach to the pattern recognition. Grain size and anisotropy are evaluated with proper diagrams. The possibility to determine the presence of pattern defects is also discussed.<|reference_end|>
arxiv
@article{sparavigna2008an, title={An image processing analysis of skin textures}, author={A. Sparavigna, R. Marazzato}, journal={Skin Research and Technology, Volume 16 Issue 2, Pages 161 - 167, 2010}, year={2008}, doi={10.1111/j.1600-0846.2009.00413.x}, archivePrefix={arXiv}, eprint={0807.4701}, primaryClass={cs.CV} }
sparavigna2008an
arxiv-4453
0807.4753
Counterexamples to the maximal p-norm multiplicativity conjecture for all p > 1
<|reference_start|>Counterexamples to the maximal p-norm multiplicativity conjecture for all p > 1: For all p > 1, we demonstrate the existence of quantum channels with non-multiplicative maximal output p-norms. Equivalently, for all p >1, the minimum output Renyi entropy of order p of a quantum channel is not additive. The violations found are large; in all cases, the minimum output Renyi entropy of order p for a product channel need not be significantly greater than the minimum output entropy of its individual factors. Since p=1 corresponds to the von Neumann entropy, these counterexamples demonstrate that if the additivity conjecture of quantum information theory is true, it cannot be proved as a consequence of any channel-independent guarantee of maximal p-norm multiplicativity. We also show that a class of channels previously studied in the context of approximate encryption lead to counterexamples for all p > 2.<|reference_end|>
arxiv
@article{hayden2008counterexamples, title={Counterexamples to the maximal p-norm multiplicativity conjecture for all p > 1}, author={Patrick Hayden and Andreas Winter}, journal={Comm. Math. Phys. 284(1):263-280, 2008.}, year={2008}, doi={10.1007/s00220-008-0624-0}, archivePrefix={arXiv}, eprint={0807.4753}, primaryClass={quant-ph cs.IT math-ph math.IT math.MP} }
hayden2008counterexamples
arxiv-4454
0807.4770
Channel Coding and Decoding in a Relay System Operated with Physical layer Network Coding
<|reference_start|>Channel Coding and Decoding in a Relay System Operated with Physical layer Network Coding: Physical-layer Network Coding (PNC) can significantly improve the throughput of wireless two way relay channel (TWRC) by allowing the two end nodes to transmit messages to the relay simultaneously. To achieve reliable communication, channel coding could be applied on top of PNC. This paper investigates link-by-link channel-coded PNC, in which a critical process at the relay is to transform the superimposed channel-coded packets received from the two end nodes plus noise, Y3=X1+X2+W3, to the network-coded combination of the source packets, S1 XOR S2 . This is in distinct to the traditional multiple-access problem, in which the goal is to obtain S1 and S2 separately. The transformation from Y3 to (S1 XOR S2) is referred to as the Channel-decoding-Network-Coding process (CNC) in that it involves both channel decoding and network coding operations. A contribution of this paper is the insight that in designing CNC, we should first (i) channel-decode Y3 to the superimposed source symbols S1+S2 before (ii) transforming S1+S2 to the network-coded packets (S1 XOR S2) . Compared with previously proposed strategies for CNC, this strategy reduces the channel-coding network-coding mismatch. It is not obvious, however, that an efficient decoder for step (i) exists. A second contribution of this paper is to provide an explicit construction of such a decoder based on the use of the Repeat Accumulate (RA) code. Specifically, we redesign the belief propagation algorithm of the RA code for traditional point-to-point channel to suit the need of the PNC multiple-access channel. Simulation results show that our new scheme outperforms the previously proposed schemes significantly in terms of BER without added complexity.<|reference_end|>
arxiv
@article{zhang2008channel, title={Channel Coding and Decoding in a Relay System Operated with Physical layer Network Coding}, author={Shengli Zhang, Soung-Chang Liew}, journal={IEEE journal on selection area in communications, Jun. 2009}, year={2008}, doi={10.1109/JSAC.2009.090618}, archivePrefix={arXiv}, eprint={0807.4770}, primaryClass={cs.NI cs.IT math.IT} }
zhang2008channel
arxiv-4455
0807.4846
Error-Correcting Codes in Projective Spaces via Rank-Metric Codes and Ferrers Diagrams
<|reference_start|>Error-Correcting Codes in Projective Spaces via Rank-Metric Codes and Ferrers Diagrams: Coding in the projective space has received recently a lot of attention due to its application in network coding. Reduced row echelon form of the linear subspaces and Ferrers diagram can play a key role for solving coding problems in the projective space. In this paper we propose a method to design error-correcting codes in the projective space. We use a multilevel approach to design our codes. First, we select a constant weight code. Each codeword defines a skeleton of a basis for a subspace in reduced row echelon form. This skeleton contains a Ferrers diagram on which we design a rank-metric code. Each such rank-metric code is lifted to a constant dimension code. The union of these codes is our final constant dimension code. In particular the codes constructed recently by Koetter and Kschischang are a subset of our codes. The rank-metric codes used for this construction form a new class of rank-metric codes. We present a decoding algorithm to the constructed codes in the projective space. The efficiency of the decoding depends on the efficiency of the decoding for the constant weight codes and the rank-metric codes. Finally, we use puncturing on our final constant dimension codes to obtain large codes in the projective space which are not constant dimension.<|reference_end|>
arxiv
@article{etzion2008error-correcting, title={Error-Correcting Codes in Projective Spaces via Rank-Metric Codes and Ferrers Diagrams}, author={Tuvi Etzion and Natalia Silberstein}, journal={arXiv preprint arXiv:0807.4846}, year={2008}, archivePrefix={arXiv}, eprint={0807.4846}, primaryClass={cs.IT math.IT} }
etzion2008error-correcting
arxiv-4456
0807.4881
Capacity and Performance of Adaptive MIMO System Based on Beam-Nulling
<|reference_start|>Capacity and Performance of Adaptive MIMO System Based on Beam-Nulling: In this paper, we propose a scheme called "beam-nulling" for MIMO adaptation. In the beam-nulling scheme, the eigenvector of the weakest subchannel is fed back and then signals are sent over a generated subspace orthogonal to the weakest subchannel. Theoretical analysis and numerical results show that the capacity of beam-nulling is closed to the optimal water-filling at medium SNR. Additionally, signal-to-interference-plus-noise ratio (SINR) of MMSE receiver is derived for beam-nulling. Then the paper presents the associated average bit-error rate (BER) of beam-nulling numerically which is verified by simulation. Simulation results are also provided to compare beam-nulling with beamforming. To improve performance further, beam-nulling is concatenated with linear dispersion code. Simulation results are also provided to compare the concatenated beam-nulling scheme with the beamforming scheme at the same data rate. Additionally, the existing beamforming and new proposed beam-nulling can be extended if more than one eigenvector is available at the transmitter. The new extended schemes are called multi-dimensional (MD) beamforming and MD beam-nulling. Theoretical analysis and numerical results in terms of capacity are also provided to evaluate the new extended schemes. Simulation results show that the MD scheme with LDC can outperform the MD scheme with STBC significantly when the data rate is high.<|reference_end|>
arxiv
@article{gheryani2008capacity, title={Capacity and Performance of Adaptive MIMO System Based on Beam-Nulling}, author={Mabruk Gheryani, Zhiyuan Wu, and Yousef R. Shayan}, journal={arXiv preprint arXiv:0807.4881}, year={2008}, archivePrefix={arXiv}, eprint={0807.4881}, primaryClass={cs.IT math.IT} }
gheryani2008capacity
arxiv-4457
0807.4912
Research Challenges in Management and Compliance of Policies on the Web
<|reference_start|>Research Challenges in Management and Compliance of Policies on the Web: In this paper we argue that policies are an increasing concern for organizations that are operating a web site. Examples of policies that are relevant in the domain of the web address issues such as privacy of personal data, accessibility for the disabled, user conduct, e-commerce, and intellectual property. Web site policies--and the overarching concept of web site governance--are cross-cutting concerns that have to be addressed and implemented at different levels (e.g., policy documents, legal statements, business processes, contracts, auditing, and software systems). For web sites, policies are also reflected in the legal statements that the web site posts, and in the behavior and features that the web site offers to its users. Both policies and software tend to evolve independently, but at the same time they both have to be kept in sync. This is a practical challenge for operators of web sites that is poorly addressed right now and is, we believe, a promising avenue for future research. In this paper, we discuss various challenges that policy poses for web sites with an emphasis on privacy and data protection and identify open issues for future research.<|reference_end|>
arxiv
@article{kienle2008research, title={Research Challenges in Management and Compliance of Policies on the Web}, author={Holger M. Kienle and Hausi A. M"uller}, journal={arXiv preprint arXiv:0807.4912}, year={2008}, archivePrefix={arXiv}, eprint={0807.4912}, primaryClass={cs.CY cs.SE} }
kienle2008research
arxiv-4458
0807.4995
Algebraic Soft-Decision Decoding of Hermitian Codes
<|reference_start|>Algebraic Soft-Decision Decoding of Hermitian Codes: An algebraic soft-decision decoder for Hermitian codes is presented. We apply Koetter and Vardy's soft-decision decoding framework, now well established for Reed-Solomon codes, to Hermitian codes. First we provide an algebraic foundation for soft-decision decoding. Then we present an interpolation algorithm finding the Q-polynomial that plays a key role in the decoding. With some simulation results, we compare performances of the algebraic soft-decision decoders for Hermitian codes and Reed-Solomon codes, favorable to the former.<|reference_end|>
arxiv
@article{lee2008algebraic, title={Algebraic Soft-Decision Decoding of Hermitian Codes}, author={Kwankyu Lee and Michael E. O'Sullivan}, journal={arXiv preprint arXiv:0807.4995}, year={2008}, archivePrefix={arXiv}, eprint={0807.4995}, primaryClass={cs.IT math.IT} }
lee2008algebraic
arxiv-4459
0807.5091
Message-passing for Maximum Weight Independent Set
<|reference_start|>Message-passing for Maximum Weight Independent Set: We investigate the use of message-passing algorithms for the problem of finding the max-weight independent set (MWIS) in a graph. First, we study the performance of the classical loopy max-product belief propagation. We show that each fixed point estimate of max-product can be mapped in a natural way to an extreme point of the LP polytope associated with the MWIS problem. However, this extreme point may not be the one that maximizes the value of node weights; the particular extreme point at final convergence depends on the initialization of max-product. We then show that if max-product is started from the natural initialization of uninformative messages, it always solves the correct LP -- if it converges. This result is obtained via a direct analysis of the iterative algorithm, and cannot be obtained by looking only at fixed points. The tightness of the LP relaxation is thus necessary for max-product optimality, but it is not sufficient. Motivated by this observation, we show that a simple modification of max-product becomes gradient descent on (a convexified version of) the dual of the LP, and converges to the dual optimum. We also develop a message-passing algorithm that recovers the primal MWIS solution from the output of the descent algorithm. We show that the MWIS estimate obtained using these two algorithms in conjunction is correct when the graph is bipartite and the MWIS is unique. Finally, we show that any problem of MAP estimation for probability distributions over finite domains can be reduced to an MWIS problem. We believe this reduction will yield new insights and algorithms for MAP estimation.<|reference_end|>
arxiv
@article{sanghavi2008message-passing, title={Message-passing for Maximum Weight Independent Set}, author={Sujay Sanghavi, Devavrat Shah and Alan Willsky}, journal={arXiv preprint arXiv:0807.5091}, year={2008}, doi={10.1109/TIT.2009.2030448}, archivePrefix={arXiv}, eprint={0807.5091}, primaryClass={cs.AI cs.IT math.IT} }
sanghavi2008message-passing
arxiv-4460
0807.5111
Finding Dense Subgraphs in G(n,1/2)
<|reference_start|>Finding Dense Subgraphs in G(n,1/2): Finding the largest clique is a notoriously hard problem, even on random graphs. It is known that the clique number of a random graph G(n,1/2) is almost surely either k or k+1, where k = 2log n - 2log(log n) - 1. However, a simple greedy algorithm finds a clique of size only (1+o(1))log n, with high probability, and finding larger cliques -- that of size even (1+ epsilon)log n -- in randomized polynomial time has been a long-standing open problem. In this paper, we study the following generalization: given a random graph G(n,1/2), find the largest subgraph with edge density at least (1-delta). We show that a simple modification of the greedy algorithm finds a subset of 2log n vertices whose induced subgraph has edge density at least 0.951, with high probability. To complement this, we show that almost surely there is no subset of 2.784log n vertices whose induced subgraph has edge density 0.951 or more.<|reference_end|>
arxiv
@article{sarma2008finding, title={Finding Dense Subgraphs in G(n,1/2)}, author={Atish Das Sarma, Amit Deshpande, Ravi Kannan}, journal={arXiv preprint arXiv:0807.5111}, year={2008}, archivePrefix={arXiv}, eprint={0807.5111}, primaryClass={cs.DS} }
sarma2008finding
arxiv-4461
0807.5120
Accelerated Option Pricing in Multiple Scenarios
<|reference_start|>Accelerated Option Pricing in Multiple Scenarios: This paper covers a massive acceleration of Monte-Carlo based pricing method for financial products and financial derivatives. The method is applicable in risk management settings, where a financial product has to be priced under a number of potential future scenarios. Instead of starting a separate nested Monte Carlo simulation for each scenario under consideration, the new method covers the utilization of very few representative nested simulations and estimating the product prices at each scenario by a smoothing method based on the state-space. This smoothing technique can be e.g. non-parametric regression or kernel smoothing.<|reference_end|>
arxiv
@article{dirnstorfer2008accelerated, title={Accelerated Option Pricing in Multiple Scenarios}, author={Stefan Dirnstorfer, Andreas J. Grau}, journal={arXiv preprint arXiv:0807.5120}, year={2008}, archivePrefix={arXiv}, eprint={0807.5120}, primaryClass={cs.CE} }
dirnstorfer2008accelerated
arxiv-4462
0808.0012
Lectures on Probability, Entropy, and Statistical Physics
<|reference_start|>Lectures on Probability, Entropy, and Statistical Physics: These lectures deal with the problem of inductive inference, that is, the problem of reasoning under conditions of incomplete information. Is there a general method for handling uncertainty? Or, at least, are there rules that could in principle be followed by an ideally rational mind when discussing scientific matters? What makes one statement more plausible than another? How much more plausible? And then, when new information is acquired how do we change our minds? Or, to put it differently, are there rules for learning? Are there rules for processing information that are objective and consistent? Are they unique? And, come to think of it, what, after all, is information? It is clear that data contains or conveys information, but what does this precisely mean? Can information be conveyed in other ways? Is information physical? Can we measure amounts of information? Do we need to? Our goal is to develop the main tools for inductive inference--probability and entropy--from a thoroughly Bayesian point of view and to illustrate their use in physics with examples borrowed from the foundations of classical statistical physics.<|reference_end|>
arxiv
@article{caticha2008lectures, title={Lectures on Probability, Entropy, and Statistical Physics}, author={Ariel Caticha}, journal={arXiv preprint arXiv:0808.0012}, year={2008}, archivePrefix={arXiv}, eprint={0808.0012}, primaryClass={physics.data-an cond-mat.stat-mech cs.IT math.IT math.ST physics.gen-ph stat.TH} }
caticha2008lectures
arxiv-4463
0808.0023
Branching proofs of infeasibility in low density subset sum problems
<|reference_start|>Branching proofs of infeasibility in low density subset sum problems: We prove that the subset sum problem has a polynomial time computable certificate of infeasibility for all $a$ weight vectors with density at most $1/(2n)$ and for almost all integer right hand sides. The certificate is branching on a hyperplane, i.e. by a methodology dual to the one explored by Lagarias and Odlyzko; Frieze; Furst and Kannan; and Coster et. al. The proof has two ingredients. We first prove that a vector that is near parallel to $a$ is a suitable branching direction, regardless of the density. Then we show that for a low density $a$ such a near parallel vector can be computed using diophantine approximation, via a methodology introduced by Frank and Tardos. We also show that there is a small number of long intervals whose disjoint union covers the integer right hand sides, for which the infeasibility is proven by branching on the above hyperplane.<|reference_end|>
arxiv
@article{pataki2008branching, title={Branching proofs of infeasibility in low density subset sum problems}, author={Gabor Pataki and Mustafa Tural}, journal={arXiv preprint arXiv:0808.0023}, year={2008}, archivePrefix={arXiv}, eprint={0808.0023}, primaryClass={cs.CC cs.CR math.CO math.OC} }
pataki2008branching
arxiv-4464
0808.0036
Why Does a Kronecker Model Result in Misleading Capacity Estimates?
<|reference_start|>Why Does a Kronecker Model Result in Misleading Capacity Estimates?: Many recent works that study the performance of multi-input multi-output (MIMO) systems in practice assume a Kronecker model where the variances of the channel entries, upon decomposition on to the transmit and the receive eigen-bases, admit a separable form. Measurement campaigns, however, show that the Kronecker model results in poor estimates for capacity. Motivated by these observations, a channel model that does not impose a separable structure has been recently proposed and shown to fit the capacity of measured channels better. In this work, we show that this recently proposed modeling framework can be viewed as a natural consequence of channel decomposition on to its canonical coordinates, the transmit and/or the receive eigen-bases. Using tools from random matrix theory, we then establish the theoretical basis behind the Kronecker mismatch at the low- and the high-SNR extremes: 1) Sparsity of the dominant statistical degrees of freedom (DoF) in the true channel at the low-SNR extreme, and 2) Non-regularity of the sparsity structure (disparities in the distribution of the DoF across the rows and the columns) at the high-SNR extreme.<|reference_end|>
arxiv
@article{raghavan2008why, title={Why Does a Kronecker Model Result in Misleading Capacity Estimates?}, author={Vasanthan Raghavan, Jayesh H. Kotecha, Akbar M. Sayeed}, journal={arXiv preprint arXiv:0808.0036}, year={2008}, doi={10.1109/TIT.2010.2059811}, archivePrefix={arXiv}, eprint={0808.0036}, primaryClass={cs.IT math.IT} }
raghavan2008why
arxiv-4465
0808.0037
An Energy-Based Comparison of Long-Hop and Short-Hop Routing in MIMO Networks
<|reference_start|>An Energy-Based Comparison of Long-Hop and Short-Hop Routing in MIMO Networks: This paper considers the problem of selecting either routes that consist of long hops or routes that consist of short hops in a network of multiple-antenna nodes, where each transmitting node employs spatial multiplexing. This distance-dependent route selection problem is approached from the viewpoint of energy efficiency, where a route is selected with the objective of minimizing the transmission energy consumed while satisfying a target outage criterion at the final destination. Deterministic line networks and two-dimensional random networks are considered. It is shown that when 1) the number of hops traversed between the source and destination grows large or 2) when the target success probability approaches one or 3) when the number of transmit and/or receive antennas grows large, short-hop routing requires less energy than long-hop routing. It is also shown that if both routing strategies are subject to the same delay constraint, long-hop routing requires less energy than short-hop routing as the target success probability approaches one. In addition, numerical analysis indicates that given loose outage constraints, only a small number of transmit antennas are needed for short-hop routing to have its maximum advantage over long-hop routing, while given stringent outage constraints, the advantage of short-hop over long-hop routing always increases with additional transmit antennas.<|reference_end|>
arxiv
@article{lo2008an, title={An Energy-Based Comparison of Long-Hop and Short-Hop Routing in MIMO Networks}, author={Caleb K. Lo, Sriram Vishwanath and Robert W. Heath Jr}, journal={arXiv preprint arXiv:0808.0037}, year={2008}, doi={10.1109/TVT.2009.2033075}, archivePrefix={arXiv}, eprint={0808.0037}, primaryClass={cs.IT math.IT} }
lo2008an
arxiv-4466
0808.0055
Integrating OPC Data into GSN Infrastructures
<|reference_start|>Integrating OPC Data into GSN Infrastructures: This paper presents the design and the implementation of an interface software component between OLE for Process Control (OPC) formatted data and the Global Sensor Network (GSN) framework for management of data from sensors. This interface, named wrapper in the GSN context, communicates in Data Access mode with an OPC server and converts the received data to the internal GSN format, according to several temporal modes. This work is realized in the context of a Ph.D. Thesis about the control of distributed information fusion systems. The developed component allows the injection of OPC data, like measurements or industrial processes states information, into a distributed information fusion system deployed in a GSN framework. The component behaves as a client of the OPC server. Developed in Java and based on the Opensaca Utgard, it can be deployed on any computation node supporting a Java virtual machine. The experiments show the component conformity according to the Data Access 2.05a specification of the OPC standard and to the temporal modes.<|reference_end|>
arxiv
@article{passalacqua2008integrating, title={Integrating OPC Data into GSN Infrastructures}, author={Olivier Passalacqua (LISTIC), Eric Benoit (LISTIC, LAMII), Marc-Philippe Huget (LISTIC), Patrice Moreaux (LISTIC)}, journal={IADIS International Conference APPLIED COMPUTING 2008, Algarve : Portugal (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0808.0055}, primaryClass={cs.SE} }
passalacqua2008integrating
arxiv-4467
0808.0056
I'm sorry to say, but your understanding of image processing fundamentals is absolutely wrong
<|reference_start|>I'm sorry to say, but your understanding of image processing fundamentals is absolutely wrong: The ongoing discussion whether modern vision systems have to be viewed as visually-enabled cognitive systems or cognitively-enabled vision systems is groundless, because perceptual and cognitive faculties of vision are separate components of human (and consequently, artificial) information processing system modeling.<|reference_end|>
arxiv
@article{diamant2008i'm, title={I'm sorry to say, but your understanding of image processing fundamentals is absolutely wrong}, author={Emanuel Diamant}, journal={arXiv preprint arXiv:0808.0056}, year={2008}, archivePrefix={arXiv}, eprint={0808.0056}, primaryClass={cs.AI cs.CV cs.IR cs.RO q-bio.NC} }
diamant2008i'm
arxiv-4468
0808.0059
Quantum walk based search algorithms
<|reference_start|>Quantum walk based search algorithms: In this survey paper we give an intuitive treatment of the discrete time quantization of classical Markov chains. Grover search and the quantum walk based search algorithms of Ambainis, Szegedy and Magniez et al. will be stated as quantum analogues of classical search procedures. We present a rather detailed description of a somewhat simplified version of the MNRS algorithm. Finally, in the query complexity model, we show how quantum walks can be applied to the following search problems: Element Distinctness, Matrix Product Verification, Restricted Range Associativity, Triangle, and Group Commutativity.<|reference_end|>
arxiv
@article{santha2008quantum, title={Quantum walk based search algorithms}, author={Miklos Santha}, journal={5th Theory and Applications of Models of Computation (TAMC08), Xian, April 2008, LNCS 4978, 31-46}, year={2008}, archivePrefix={arXiv}, eprint={0808.0059}, primaryClass={quant-ph cs.CC} }
santha2008quantum
arxiv-4469
0808.0075
Optimal Beamforming for Two-Way Multi-Antenna Relay Channel with Analogue Network Coding
<|reference_start|>Optimal Beamforming for Two-Way Multi-Antenna Relay Channel with Analogue Network Coding: This paper studies the wireless two-way relay channel (TWRC), where two source nodes, S1 and S2, exchange information through an assisting relay node, R. It is assumed that R receives the sum signal from S1 and S2 in one time-slot, and then amplifies and forwards the received signal to both S1 and S2 in the next time-slot. By applying the principle of analogue network (ANC), each of S1 and S2 cancels the so-called "self-interference" in the received signal from R and then decodes the desired message. Assuming that S1 and S2 are each equipped with a single antenna and R with multi-antennas, this paper analyzes the capacity region of an ANC-based TWRC with linear processing (beamforming) at R. The capacity region contains all the achievable bidirectional rate-pairs of S1 and S2 under the given transmit power constraints at S1, S2, and R. We present the optimal relay beamforming structure as well as an efficient algorithm to compute the optimal beamforming matrix based on convex optimization techniques. Low-complexity suboptimal relay beamforming schemes are also presented, and their achievable rates are compared against the capacity with the optimal scheme.<|reference_end|>
arxiv
@article{zhang2008optimal, title={Optimal Beamforming for Two-Way Multi-Antenna Relay Channel with Analogue Network Coding}, author={Rui Zhang, Ying-Chang Liang, Chin Choy Chai, and Shuguang Cui}, journal={arXiv preprint arXiv:0808.0075}, year={2008}, archivePrefix={arXiv}, eprint={0808.0075}, primaryClass={cs.IT math.IT} }
zhang2008optimal
arxiv-4470
0808.0084
On the hitting times of quantum versus random walks
<|reference_start|>On the hitting times of quantum versus random walks: In this paper we define new Monte Carlo type classical and quantum hitting times, and we prove several relationships among these and the already existing Las Vegas type definitions. In particular, we show that for some marked state the two types of hitting time are of the same order in both the classical and the quantum case. Further, we prove that for any reversible ergodic Markov chain $P$, the quantum hitting time of the quantum analogue of $P$ has the same order as the square root of the classical hitting time of $P$. We also investigate the (im)possibility of achieving a gap greater than quadratic using an alternative quantum walk. Finally, we present new quantum algorithms for the detection and finding problems. The complexities of both algorithms are related to the new, potentially smaller, quantum hitting times. The detection algorithm is based on phase estimation and is particularly simple. The finding algorithm combines a similar phase estimation based procedure with ideas of Tulsi from his recent theorem for the 2D grid. Extending his result, we show that for any state-transitive Markov chain with unique marked state, the quantum hitting time is of the same order for both the detection and finding problems.<|reference_end|>
arxiv
@article{magniez2008on, title={On the hitting times of quantum versus random walks}, author={Frederic Magniez, Ashwin Nayak, Peter C. Richter, and Miklos Santha}, journal={arXiv preprint arXiv:0808.0084}, year={2008}, archivePrefix={arXiv}, eprint={0808.0084}, primaryClass={quant-ph cs.DS} }
magniez2008on
arxiv-4471
0808.0103
Use of Astronomical Literature - A Report on Usage Patterns
<|reference_start|>Use of Astronomical Literature - A Report on Usage Patterns: In this paper we present a number of metrics for usage of the SAO/NASA Astrophysics Data System (ADS). Since the ADS is used by the entire astronomical community, these are indicative of how the astronomical literature is used. We will show how the use of the ADS has changed both quantitatively and qualitatively. We will also show that different types of users access the system in different ways. Finally, we show how use of the ADS has evolved over the years in various regions of the world. The ADS is funded by NASA Grant NNG06GG68G.<|reference_end|>
arxiv
@article{henneken2008use, title={Use of Astronomical Literature - A Report on Usage Patterns}, author={Edwin A. Henneken, Michael J. Kurtz, Alberto Accomazzi, Carolyn S. Grant, Donna Thompson, Elizabeth Bohlen, Stephen S. Murray}, journal={arXiv preprint arXiv:0808.0103}, year={2008}, doi={10.1016/j.joi.2008.10.001}, archivePrefix={arXiv}, eprint={0808.0103}, primaryClass={cs.DL astro-ph} }
henneken2008use
arxiv-4472
0808.0111
Stay by thy neighbor? Social organization determines the efficiency of biodiversity markets with spatial incentives
<|reference_start|>Stay by thy neighbor? Social organization determines the efficiency of biodiversity markets with spatial incentives: Market-based conservation instruments, such as payments, auctions or tradable permits, are environmental policies that create financial incentives for landowners to engage in voluntary conservation on their land. But what if ecological processes operate across property boundaries and land use decisions on one property influence ecosystem functions on neighboring sites? This paper examines how to account for such spatial externalities when designing market-based conservation instruments. We use an agent-based model to analyze different spatial metrics and their implications on land use decisions in a dynamic cost environment. The model contains a number of alternative submodels which differ in incentive design and social interactions of agents, the latter including coordinating as well as cooperating behavior of agents. We find that incentive design and social interactions have a strong influence on the spatial allocation and the costs of the conservation market.<|reference_end|>
arxiv
@article{hartig2008stay, title={Stay by thy neighbor? Social organization determines the efficiency of biodiversity markets with spatial incentives}, author={Florian Hartig and Martin Drechsler}, journal={Ecological Complexity, 2010, 7, 91-99}, year={2008}, doi={10.1016/j.ecocom.2009.07.001}, archivePrefix={arXiv}, eprint={0808.0111}, primaryClass={physics.soc-ph cs.GT} }
hartig2008stay
arxiv-4473
0808.0112
Mathematical Structure of Quantum Decision Theory
<|reference_start|>Mathematical Structure of Quantum Decision Theory: One of the most complex systems is the human brain whose formalized functioning is characterized by decision theory. We present a "Quantum Decision Theory" of decision making, based on the mathematical theory of separable Hilbert spaces. This mathematical structure captures the effect of superposition of composite prospects, including many incorporated intentions, which allows us to explain a variety of interesting fallacies and anomalies that have been reported to particularize the decision making of real human beings. The theory describes entangled decision making, non-commutativity of subsequent decisions, and intention interference of composite prospects. We demonstrate how the violation of the Savage's sure-thing principle (disjunction effect) can be explained as a result of the interference of intentions, when making decisions under uncertainty. The conjunction fallacy is also explained by the presence of the interference terms. We demonstrate that all known anomalies and paradoxes, documented in the context of classical decision theory, are reducible to just a few mathematical archetypes, all of which finding straightforward explanations in the frame of the developed quantum approach.<|reference_end|>
arxiv
@article{yukalov2008mathematical, title={Mathematical Structure of Quantum Decision Theory}, author={V.I. Yukalov and D. Sornette}, journal={Advances in Complex Systems 13, 659-698 (2010)}, year={2008}, archivePrefix={arXiv}, eprint={0808.0112}, primaryClass={cs.AI math-ph math.MP quant-ph} }
yukalov2008mathematical
arxiv-4474
0808.0148
Eigenvalue bounds, spectral partitioning, and metrical deformations via flows
<|reference_start|>Eigenvalue bounds, spectral partitioning, and metrical deformations via flows: We present a new method for upper bounding the second eigenvalue of the Laplacian of graphs. Our approach uses multi-commodity flows to deform the geometry of the graph; we embed the resulting metric into Euclidean space to recover a bound on the Rayleigh quotient. Using this, we show that every $n$-vertex graph of genus $g$ and maximum degree $d$ satisfies $\lambda_2(G) = O((g+1)^3 d/n)$. This recovers the $O(d/n)$ bound of Spielman and Teng for planar graphs, and compares to Kelner's bound of $O((g+1) poly(d)/n)$, but our proof does not make use of conformal mappings or circle packings. We are thus able to extend this to resolve positively a conjecture of Spielman and Teng, by proving that $\lambda_2(G) = O(d h^6 \log h/n)$ whenever $G$ is $K_h$-minor free. This shows, in particular, that spectral partitioning can be used to recover $O(\sqrt{n})$-sized separators in bounded degree graphs that exclude a fixed minor. We extend this further by obtaining nearly optimal bounds on $\lambda_2$ for graphs which exclude small-depth minors in the sense of Plotkin, Rao, and Smith. Consequently, we show that spectral algorithms find small separators in a general class of geometric graphs. Moreover, while the standard "sweep" algorithm applied to the second eigenvector may fail to find good quotient cuts in graphs of unbounded degree, our approach produces a vector that works for arbitrary graphs. This yields an alternate proof of the result of Alon, Seymour, and Thomas that every excluded-minor family of graphs has $O(\sqrt{n})$-node balanced separators.<|reference_end|>
arxiv
@article{biswal2008eigenvalue, title={Eigenvalue bounds, spectral partitioning, and metrical deformations via flows}, author={Punyashloka Biswal, James R. Lee, Satish Rao}, journal={arXiv preprint arXiv:0808.0148}, year={2008}, archivePrefix={arXiv}, eprint={0808.0148}, primaryClass={cs.DS cs.CG math.MG math.SP} }
biswal2008eigenvalue
arxiv-4475
0808.0156
Authenticated Adversarial Routing
<|reference_start|>Authenticated Adversarial Routing: The aim of this paper is to demonstrate the feasibility of authenticated throughput-efficient routing in an unreliable and dynamically changing synchronous network in which the majority of malicious insiders try to destroy and alter messages or disrupt communication in any way. More specifically, in this paper we seek to answer the following question: Given a network in which the majority of nodes are controlled by a malicious adversary and whose topology is changing every round, is it possible to develop a protocol with polynomially-bounded memory per processor that guarantees throughput-efficient and correct end-to-end communication? We answer the question affirmatively for extremely general corruption patterns: we only request that the topology of the network and the corruption pattern of the adversary leaves at least one path each round connecting the sender and receiver through honest nodes (though this path may change at every round). Out construction works in the public-key setting and enjoys bounded memory per processor (that does not depend on the amount of traffic and is polynomial in the network size.) Our protocol achieves optimal transfer rate with negligible decoding error. We stress that our protocol assumes no knowledge of which nodes are corrupted nor which path is reliable at any round, and is also fully distributed with nodes making decisions locally, so that they need not know the topology of the network at any time.<|reference_end|>
arxiv
@article{amir2008authenticated, title={Authenticated Adversarial Routing}, author={Yair Amir, Paul Bunn, Rafail Ostrovksy}, journal={arXiv preprint arXiv:0808.0156}, year={2008}, archivePrefix={arXiv}, eprint={0808.0156}, primaryClass={cs.CR cs.NI} }
amir2008authenticated
arxiv-4476
0808.0159
Front Propagation with Rejuvenation in Flipping Processes
<|reference_start|>Front Propagation with Rejuvenation in Flipping Processes: We study a directed flipping process that underlies the performance of the random edge simplex algorithm. In this stochastic process, which takes place on a one-dimensional lattice whose sites may be either occupied or vacant, occupied sites become vacant at a constant rate and simultaneously cause all sites to the right to change their state. This random process exhibits rich phenomenology. First, there is a front, defined by the position of the left-most occupied site, that propagates at a nontrivial velocity. Second, the front involves a depletion zone with an excess of vacant sites. The total excess D_k increases logarithmically, D_k ~ ln k, with the distance k from the front. Third, the front exhibits rejuvenation -- young fronts are vigorous but old fronts are sluggish. We investigate these phenomena using a quasi-static approximation, direct solutions of small systems, and numerical simulations.<|reference_end|>
arxiv
@article{antal2008front, title={Front Propagation with Rejuvenation in Flipping Processes}, author={T. Antal, D. ben-Avraham, E. Ben-Naim, P.L. Krapivsky}, journal={J. Phys. A 41, 465002 (2008)}, year={2008}, doi={10.1088/1751-8113/41/46/465002}, archivePrefix={arXiv}, eprint={0808.0159}, primaryClass={cond-mat.stat-mech cs.DS math.PR} }
antal2008front
arxiv-4477
0808.0163
Twice-Ramanujan Sparsifiers
<|reference_start|>Twice-Ramanujan Sparsifiers: We prove that every graph has a spectral sparsifier with a number of edges linear in its number of vertices. As linear-sized spectral sparsifiers of complete graphs are expanders, our sparsifiers of arbitrary graphs can be viewed as generalizations of expander graphs. In particular, we prove that for every $d>1$ and every undirected, weighted graph $G=(V,E,w)$ on $n$ vertices, there exists a weighted graph $H=(V,F,\tilde{w})$ with at most $\lceil d(n-1) \rceil$ edges such that for every $x \in \mathbb{R}^{V}$, \[ x^{T}L_{G}x \leq x^{T}L_{H}x \leq (\frac{d+1+2\sqrt{d}}{d+1-2\sqrt{d}})\cdot x^{T}L_{G}x \] where $L_{G}$ and $L_{H}$ are the Laplacian matrices of $G$ and $H$, respectively. Thus, $H$ approximates $G$ spectrally at least as well as a Ramanujan expander with $dn/2$ edges approximates the complete graph. We give an elementary deterministic polynomial time algorithm for constructing $H$.<|reference_end|>
arxiv
@article{batson2008twice-ramanujan, title={Twice-Ramanujan Sparsifiers}, author={Joshua Batson, Daniel A. Spielman, Nikhil Srivastava}, journal={arXiv preprint arXiv:0808.0163}, year={2008}, archivePrefix={arXiv}, eprint={0808.0163}, primaryClass={cs.DS cs.DM} }
batson2008twice-ramanujan
arxiv-4478
0808.0202
The Degree Distribution of Random k-Trees
<|reference_start|>The Degree Distribution of Random k-Trees: A power law degree distribution is established for a graph evolution model based on the graph class of k-trees. This k-tree-based graph process can be viewed as an idealized model that captures some characteristics of the preferential attachment and copying mechanisms that existing evolving graph processes fail to model due to technical obstacles. The result also serves as a further cautionary note reinforcing the point of view that a power law degree distribution should not be regarded as the only important characteristic of a complex network, as has been previously argued.<|reference_end|>
arxiv
@article{gao2008the, title={The Degree Distribution of Random k-Trees}, author={Yong Gao}, journal={arXiv preprint arXiv:0808.0202}, year={2008}, doi={10.1016/j.tcs.2008.10.015}, archivePrefix={arXiv}, eprint={0808.0202}, primaryClass={cs.DM cs.NI} }
gao2008the
arxiv-4479
0808.0234
DMT of Multi-hop Cooperative Networks - Part I: Basic Results
<|reference_start|>DMT of Multi-hop Cooperative Networks - Part I: Basic Results: In this two-part paper, the DMT of cooperative multi-hop networks is examined. The focus is on single-source single-sink (ss-ss) multi-hop relay networks having slow-fading links and relays that potentially possess multiple antennas. The present paper examines the two end-points of the DMT of full-duplex networks. In particular, the maximum achievable diversity of arbitrary multi-terminal wireless networks is shown to be equal to the min-cut. The maximum multiplexing gain of arbitrary full-duplex ss-ss networks is shown to be equal to the min-cut rank, using a new connection to a deterministic network. We also prove some basic results including a proof that the colored noise encountered in AF protocols for cooperative networks can be treated as white noise for DMT computations. The DMT of a parallel channel with independent MIMO links is also computed here. As an application of these basic results, we prove that a linear tradeoff between maximum diversity and maximum multiplexing gain is achievable for full-duplex networks with single antenna nodes. All protocols in this paper are explicit and rely only upon amplify-and-forward (AF) relaying. Half duplex networks are studied, and explicit codes for all protocols proposed in both parts, are provided in the companion paper.<|reference_end|>
arxiv
@article{sreeram2008dmt, title={DMT of Multi-hop Cooperative Networks - Part I: Basic Results}, author={K. Sreeram, S. Birenjith, P. Vijay Kumar}, journal={arXiv preprint arXiv:0808.0234}, year={2008}, archivePrefix={arXiv}, eprint={0808.0234}, primaryClass={cs.IT math.IT} }
sreeram2008dmt
arxiv-4480
0808.0235
DMT of Multi-hop Cooperative Networks - Part II: Half-Duplex Networks with Full-Duplex Performance
<|reference_start|>DMT of Multi-hop Cooperative Networks - Part II: Half-Duplex Networks with Full-Duplex Performance: We consider single-source single-sink (ss-ss) multi-hop relay networks, with slow-fading links and single-antenna half-duplex relay nodes. In a companion paper, we established some basic results which laid the foundation for the results presented here. In the present paper, we consider two families of networks of half-duplex networks. KPP networks may be viewed as the union of K node-disjoint parallel relaying paths. Generalizations of these networks include KPP(I) networks, which permit interference between paths and KPP(D) networks, which possess a direct link between source and sink. We characterize the DMT of these families of networks completely and show that they can achieve the cut-set bound, thus proving that full-duplex performance can be obtained even in the presence of the half-duplex constraint. We then consider layered networks, and prove that a linear DMT between maximum diversity and maximum multiplexing gain is achievable. All protocols in this paper are explicit and use only amplify-and-forward relaying. We also construct codes that achieve the optimal DMT for all the proposed schemes. Two key implications of the results in the paper are that the half-duplex constraint does not entail any rate loss for a large class of cooperative networks and that AF protocols are often optimal.<|reference_end|>
arxiv
@article{sreeram2008dmt, title={DMT of Multi-hop Cooperative Networks - Part II: Half-Duplex Networks with Full-Duplex Performance}, author={K. Sreeram, S. Birenjith, P. Vijay Kumar}, journal={arXiv preprint arXiv:0808.0235}, year={2008}, archivePrefix={arXiv}, eprint={0808.0235}, primaryClass={cs.IT math.IT} }
sreeram2008dmt
arxiv-4481
0808.0247
A Public Key Block Cipher Based on Multivariate Quadratic Quasigroups
<|reference_start|>A Public Key Block Cipher Based on Multivariate Quadratic Quasigroups: We have designed a new class of public key algorithms based on quasigroup string transformations using a specific class of quasigroups called multivariate quadratic quasigroups (MQQ). Our public key algorithm is a bijective mapping, it does not perform message expansions and can be used both for encryption and signatures. The public key consist of n quadratic polynomials with n variables where n=140, 160, ... . A particular characteristic of our public key algorithm is that it is very fast and highly parallelizable. More concretely, it has the speed of a typical modern symmetric block cipher - the reason for the phrase "A Public Key Block Cipher" in the title of this paper. Namely the reference C code for the 160-bit variant of the algorithm performs decryption in less than 11,000 cycles (on Intel Core 2 Duo -- using only one processor core), and around 6,000 cycles using two CPU cores and OpenMP 2.0 library. However, implemented in Xilinx Virtex-5 FPGA that is running on 249.4 MHz it achieves decryption throughput of 399 Mbps, and implemented on four Xilinx Virtex-5 chips that are running on 276.7 MHz it achieves encryption throughput of 44.27 Gbps. Compared to fastest RSA implementations on similar FPGA platforms, MQQ algorithm is more than 10,000 times faster.<|reference_end|>
arxiv
@article{gligoroski2008a, title={A Public Key Block Cipher Based on Multivariate Quadratic Quasigroups}, author={Danilo Gligoroski and Smile Markovski and Svein Johan Knapskog}, journal={arXiv preprint arXiv:0808.0247}, year={2008}, archivePrefix={arXiv}, eprint={0808.0247}, primaryClass={cs.CR} }
gligoroski2008a
arxiv-4482
0808.0272
Kovalenko's Full-Rank Limit and Overhead as Lower Bounds for Error-Performances of LDPC and LT Codes over Binary Erasure Channels
<|reference_start|>Kovalenko's Full-Rank Limit and Overhead as Lower Bounds for Error-Performances of LDPC and LT Codes over Binary Erasure Channels: We present Kovalenko's full-rank limit as a tight lower bound for decoding error probability of LDPC codes and LT codes over BEC. From the limit, we derive a full-rank overhead as a lower bound for stable overheads for successful maximum-likelihood decoding of the codes.<|reference_end|>
arxiv
@article{lee2008kovalenko's, title={Kovalenko's Full-Rank Limit and Overhead as Lower Bounds for Error-Performances of LDPC and LT Codes over Binary Erasure Channels}, author={Ki-Moon Lee, Hayder Radha, and Beom-Jin Kim}, journal={arXiv preprint arXiv:0808.0272}, year={2008}, doi={10.1109/ISITA.2008.4895488}, archivePrefix={arXiv}, eprint={0808.0272}, primaryClass={cs.IT cs.DM math.CO math.IT} }
lee2008kovalenko's
arxiv-4483
0808.0284
Uniqueness of certain polynomials constant on a line
<|reference_start|>Uniqueness of certain polynomials constant on a line: We study a question with connections to linear algebra, real algebraic geometry, combinatorics, and complex analysis. Let $p(x,y)$ be a polynomial of degree $d$ with $N$ positive coefficients and no negative coefficients, such that $p=1$ when $x+y=1$. A sharp estimate $d \leq 2N-3$ is known. In this paper we study the $p$ for which equality holds. We prove some new results about the form of these "sharp" polynomials. Using these new results and using two independent computational methods we give a complete classification of these polynomials up to $d=17$. The question is motivated by the problem of classification of CR maps between spheres in different dimensions.<|reference_end|>
arxiv
@article{lebl2008uniqueness, title={Uniqueness of certain polynomials constant on a line}, author={Jiri Lebl and Daniel Lichtblau}, journal={Linear Algebra and Its Applications 433 (2010) pp. 824-837}, year={2008}, doi={10.1016/j.laa.2010.04.020}, archivePrefix={arXiv}, eprint={0808.0284}, primaryClass={math.CV cs.CG math.NT} }
lebl2008uniqueness
arxiv-4484
0808.0291
One shot schemes for decentralized quickest change detection
<|reference_start|>One shot schemes for decentralized quickest change detection: This work considers the problem of quickest detection with N distributed sensors that receive continuous sequential observations from the environment. These sensors employ cumulative sum (CUSUM) strategies and communicate to a central fusion center by one shot schemes. One shot schemes are schemes in which the sensors communicate with the fusion center only once, after which they must signal a detection. The communication is clearly asynchronous and the case is considered in which the fusion center employs a minimal strategy, which means that it declares an alarm when the first communication takes place. It is assumed that the observations received at the sensors are independent and that the time points at which the appearance of a signal can take place are different. It is shown that there is no loss of performance of one shot schemes as compared to the centralized case in an extended Lorden min-max sense, since the minimum of N CUSUMs is asymptotically optimal as the mean time between false alarms increases without bound.<|reference_end|>
arxiv
@article{hadjiliadis2008one, title={One shot schemes for decentralized quickest change detection}, author={Olympia Hadjiliadis, Hongzhong Zhang and H.V. Poor}, journal={arXiv preprint arXiv:0808.0291}, year={2008}, archivePrefix={arXiv}, eprint={0808.0291}, primaryClass={cs.IT math.IT} }
hadjiliadis2008one
arxiv-4485
0808.0298
Computing the nucleolus of weighted voting games
<|reference_start|>Computing the nucleolus of weighted voting games: Weighted voting games (WVG) are coalitional games in which an agent's contribution to a coalition is given by his it weight, and a coalition wins if its total weight meets or exceeds a given quota. These games model decision-making in political bodies as well as collaboration and surplus division in multiagent domains. The computational complexity of various solution concepts for weighted voting games received a lot of attention in recent years. In particular, Elkind et al.(2007) studied the complexity of stability-related solution concepts in WVGs, namely, of the core, the least core, and the nucleolus. While they have completely characterized the algorithmic complexity of the core and the least core, for the nucleolus they have only provided an NP-hardness result. In this paper, we solve an open problem posed by Elkind et al. by showing that the nucleolus of WVGs, and, more generally, k-vector weighted voting games with fixed k, can be computed in pseudopolynomial time, i.e., there exists an algorithm that correctly computes the nucleolus and runs in time polynomial in the number of players and the maximum weight. In doing so, we propose a general framework for computing the nucleolus, which may be applicable to a wider of class of games.<|reference_end|>
arxiv
@article{elkind2008computing, title={Computing the nucleolus of weighted voting games}, author={Edith Elkind and Dmitrii V. Pasechnik}, journal={Proceedings of SODA 2009, pp. 327-335}, year={2008}, doi={10.1145/1496770.1496807}, archivePrefix={arXiv}, eprint={0808.0298}, primaryClass={cs.GT cs.DS} }
elkind2008computing
arxiv-4486
0808.0309
A Reliable SVD based Watermarking Schem
<|reference_start|>A Reliable SVD based Watermarking Schem: We propose a novel scheme for watermarking of digital images based on singular value decomposition (SVD), which makes use of the fact that the SVD subspace preserves significant amount of information of an image, as compared to its singular value matrix, Zhang and Li (2005). The principal components of the watermark are embedded in the original image, leaving the detector with a complimentary set of singular vectors for watermark extraction. The above step invariably ensures that watermark extraction from the embedded watermark image, using a modified matrix, is not possible, thereby removing a major drawback of an earlier proposed algorithm by Liu and Tan (2002).<|reference_end|>
arxiv
@article{jain2008a, title={A Reliable SVD based Watermarking Schem}, author={Chirag Jain, Siddharth Arora, and Prasanta K. Panigrahi}, journal={arXiv preprint arXiv:0808.0309}, year={2008}, archivePrefix={arXiv}, eprint={0808.0309}, primaryClass={cs.MM} }
jain2008a
arxiv-4487
0808.0347
Towards a Process for Developing Maintenance Tools in Academia
<|reference_start|>Towards a Process for Developing Maintenance Tools in Academia: Building of tools--from simple prototypes to industrial-strength applications--is a pervasive activity in academic research. When proposing a new technique for software maintenance, effective tool support is typically required to demonstrate the feasibility and effectiveness of the approach. However, even though tool building is both pervasive and requiring significant time and effort, it is still pursued in an ad hoc manner. In this paper, we address these issues by proposing a dedicated development process for tool building that takes the unique characteristics of an academic research environment into account. We first identify process requirements based on a review of the literature and our extensive tool building experience in the domain of maintenance tools. We then outline a process framework based on work products that accommodates the requirements while providing needed flexibility for tailoring the process to account for specific tool building approaches and project constraints. The work products are concrete milestones of the process, tracking progress, rationalizing (design) decisions, and documenting the current state of the tool building project. Thus, the work products provide important input for strategic project decisions and rapid initiation of new team members. Leveraging a dedicated tool building process promises tools that are designed, build, and maintained in a more disciplined, predictable and efficient manner.<|reference_end|>
arxiv
@article{kienle2008towards, title={Towards a Process for Developing Maintenance Tools in Academia}, author={Holger M. Kienle and Hausi A. M"uller}, journal={arXiv preprint arXiv:0808.0347}, year={2008}, archivePrefix={arXiv}, eprint={0808.0347}, primaryClass={cs.SE} }
kienle2008towards
arxiv-4488
0808.0374
A 8 bits Pipeline Analog to Digital Converter Design for High Speed Camera Application
<|reference_start|>A 8 bits Pipeline Analog to Digital Converter Design for High Speed Camera Application: - This paper describes a pipeline analog-to-digital converter is implemented for high speed camera. In the pipeline ADC design, prime factor is designing operational amplifier with high gain so ADC have been high speed. The other advantage of pipeline is simple on concept, easy to implement in layout and have flexibility to increase speed. We made design and simulation using Mentor Graphics Software with 0.6 \mu m CMOS technology with a total power dissipation of 75.47 mW. Circuit techniques used include a precise comparator, operational amplifier and clock management. A switched capacitor is used to sample and multiplying at each stage. Simulation a worst case DNL and INL of 0.75 LSB. The design operates at 5 V dc. The ADC achieves a SNDR of 44.86 dB. keywords: pipeline, switched capacitor, clock management<|reference_end|>
arxiv
@article{prasetyo2008a, title={A 8 bits Pipeline Analog to Digital Converter Design for High Speed Camera Application}, author={Eri Prasetyo, Hamzah Afandi, Nurul Huda Dominique Ginhac, Michel Paindavoine}, journal={EEPIS, 2007}, year={2008}, archivePrefix={arXiv}, eprint={0808.0374}, primaryClass={cs.RO cs.CV} }
prasetyo2008a
arxiv-4489
0808.0387
Design and Implementation a 8 bits Pipeline Analog to Digital Converter in the Technology 06 \mu m CMOS Process
<|reference_start|>Design and Implementation a 8 bits Pipeline Analog to Digital Converter in the Technology 06 \mu m CMOS Process: This paper describes a 8 bits, 20 Msamples/s pipeline analog-to-digital converter implemented in 0.6 \mu m CMOS technology with a total power dissipation of 75.47 mW. Circuit techniques used include a precise comparator, operational amplifier and clock management. A switched capacitor is used to sample and multiplying at each stage. Simulation a worst case DNL and INL of 0.75 LSB. The design operate at 5 V dc. The ADC achieves a SNDR of 44.86 dB. keywords : pipeline, switched capacitor, clock management<|reference_end|>
arxiv
@article{prasetyo2008design, title={Design and Implementation a 8 bits Pipeline Analog to Digital Converter in the Technology 0.6 \mu m CMOS Process}, author={Eri Prasetyo, Dominique Ginhac, Michel Paindavoine}, journal={arXiv preprint arXiv:0808.0387}, year={2008}, archivePrefix={arXiv}, eprint={0808.0387}, primaryClass={cs.RO cs.CV} }
prasetyo2008design
arxiv-4490
0808.0441
Exhaustible sets in higher-type computation
<|reference_start|>Exhaustible sets in higher-type computation: We say that a set is exhaustible if it admits algorithmic universal quantification for continuous predicates in finite time, and searchable if there is an algorithm that, given any continuous predicate, either selects an element for which the predicate holds or else tells there is no example. The Cantor space of infinite sequences of binary digits is known to be searchable. Searchable sets are exhaustible, and we show that the converse also holds for sets of hereditarily total elements in the hierarchy of continuous functionals; moreover, a selection functional can be constructed uniformly from a quantification functional. We prove that searchable sets are closed under intersections with decidable sets, and under the formation of computable images and of finite and countably infinite products. This is related to the fact, established here, that exhaustible sets are topologically compact. We obtain a complete description of exhaustible total sets by developing a computational version of a topological Arzela--Ascoli type characterization of compact subsets of function spaces. We also show that, in the non-empty case, they are precisely the computable images of the Cantor space. The emphasis of this paper is on the theory of exhaustible and searchable sets, but we also briefly sketch applications.<|reference_end|>
arxiv
@article{escardo2008exhaustible, title={Exhaustible sets in higher-type computation}, author={Martin Escardo}, journal={Logical Methods in Computer Science, Volume 4, Issue 3 (August 27, 2008) lmcs:693}, year={2008}, doi={10.2168/LMCS-4(3:3)2008}, archivePrefix={arXiv}, eprint={0808.0441}, primaryClass={cs.LO} }
escardo2008exhaustible
arxiv-4491
0808.0469
Non-degeneracy of Pollard Rho Collisions
<|reference_start|>Non-degeneracy of Pollard Rho Collisions: The Pollard Rho algorithm is a widely used algorithm for solving discrete logarithms on general cyclic groups, including elliptic curves. Recently the first nontrivial runtime estimates were provided for it, culminating in a sharp O(sqrt(n)) bound for the collision time on a cyclic group of order n. In this paper we show that for n satisfying a mild arithmetic condition, the collisions guaranteed by these results are nondegenerate with high probability: that is, the Pollard Rho algorithm successfully finds the discrete logarithm.<|reference_end|>
arxiv
@article{miller2008non-degeneracy, title={Non-degeneracy of Pollard Rho Collisions}, author={Stephen D. Miller and Ramarathnam Venkatesan}, journal={arXiv preprint arXiv:0808.0469}, year={2008}, archivePrefix={arXiv}, eprint={0808.0469}, primaryClass={math.NT cs.CR cs.DM math.CO} }
miller2008non-degeneracy
arxiv-4492
0808.0509
Evolving Clustered Random Networks
<|reference_start|>Evolving Clustered Random Networks: We propose a Markov chain simulation method to generate simple connected random graphs with a specified degree sequence and level of clustering. The networks generated by our algorithm are random in all other respects and can thus serve as generic models for studying the impacts of degree distributions and clustering on dynamical processes as well as null models for detecting other structural properties in empirical networks.<|reference_end|>
arxiv
@article{bansal2008evolving, title={Evolving Clustered Random Networks}, author={Shweta Bansal, Shashank Khandelwal, Lauren Ancel Meyers}, journal={BMC Bioinformatics, Vol 10: 405, 2009}, year={2008}, doi={10.1186/1471-2105-10-405}, archivePrefix={arXiv}, eprint={0808.0509}, primaryClass={cs.DM physics.soc-ph} }
bansal2008evolving
arxiv-4493
0808.0518
Building a terminology network for search: the KoMoHe project
<|reference_start|>Building a terminology network for search: the KoMoHe project: The paper reports about results on the GESIS-IZ project "Competence Center Modeling and Treatment of Semantic Heterogeneity" (KoMoHe). KoMoHe supervised a terminology mapping effort, in which 'cross-concordances' between major controlled vocabularies were organized, created and managed. In this paper we describe the establishment and implementation of cross-concordances for search in a digital library (DL).<|reference_end|>
arxiv
@article{mayr2008building, title={Building a terminology network for search: the KoMoHe project}, author={Philipp Mayr, Vivien Petras}, journal={arXiv preprint arXiv:0808.0518}, year={2008}, doi={10.18452/1264}, archivePrefix={arXiv}, eprint={0808.0518}, primaryClass={cs.DL cs.DB} }
mayr2008building
arxiv-4494
0808.0521
Logics for the Relational Syllogistic
<|reference_start|>Logics for the Relational Syllogistic: The Aristotelian syllogistic cannot account for the validity of many inferences involving relational facts. In this paper, we investigate the prospects for providing a relational syllogistic. We identify several fragments based on (a) whether negation is permitted on all nouns, including those in the subject of a sentence; and (b) whether the subject noun phrase may contain a relative clause. The logics we present are extensions of the classical syllogistic, and we pay special attention to the question of whether reductio ad absurdum is needed. Thus our main goal is to derive results on the existence (or non-existence) of syllogistic proof systems for relational fragments. We also determine the computational complexity of all our fragments.<|reference_end|>
arxiv
@article{pratt-hartmann2008logics, title={Logics for the Relational Syllogistic}, author={Ian Pratt-Hartmann and Lawrence S. Moss}, journal={Review of Symbolic Logic, 2(4), 2009, pp. 647--683}, year={2008}, doi={10.1017/S1755020309990086}, archivePrefix={arXiv}, eprint={0808.0521}, primaryClass={cs.LO cs.CC cs.CL} }
pratt-hartmann2008logics
arxiv-4495
0808.0540
Executable Set Theory and Arithmetic Encodings in Prolog
<|reference_start|>Executable Set Theory and Arithmetic Encodings in Prolog: The paper is organized as a self-contained literate Prolog program that implements elements of an executable finite set theory with focus on combinatorial generation and arithmetic encodings. The complete Prolog code is available at http://logic.csci.unt.edu/tarau/research/2008/pHFS.zip . First, ranking and unranking functions for some "mathematically elegant" data types in the universe of Hereditarily Finite Sets with Urelements are provided, resulting in arithmetic encodings for powersets, hypergraphs, ordinals and choice functions. After implementing a digraph representation of Hereditarily Finite Sets we define {\em decoration functions} that can recover well-founded sets from encodings of their associated acyclic digraphs. We conclude with an encoding of arbitrary digraphs and discuss a concept of duality induced by the set membership relation. In the process, we uncover the surprising possibility of internally sharing isomorphic objects, independently of their language level types and meanings.<|reference_end|>
arxiv
@article{tarau2008executable, title={Executable Set Theory and Arithmetic Encodings in Prolog}, author={Paul Tarau}, journal={arXiv preprint arXiv:0808.0540}, year={2008}, archivePrefix={arXiv}, eprint={0808.0540}, primaryClass={cs.LO cs.DM cs.DS cs.MS cs.SC} }
tarau2008executable
arxiv-4496
0808.0544
Generalized Cross-correlation Properties of Chu Sequences
<|reference_start|>Generalized Cross-correlation Properties of Chu Sequences: In this paper, we analyze the cross-correlation properties for Chu sequences, which provide information on the distribution of the maximum magnitudes of the cross-correlation function. Furthermore, we can obtain the number of available sequences for a given maximum magnitude of the cross-correlation function and the sequence length.<|reference_end|>
arxiv
@article{kang2008generalized, title={Generalized Cross-correlation Properties of Chu Sequences}, author={Jae Won Kang, Younghoon Whang, Hyo Yol Park and Kwang Soon Kim}, journal={arXiv preprint arXiv:0808.0544}, year={2008}, archivePrefix={arXiv}, eprint={0808.0544}, primaryClass={cs.IT math.IT} }
kang2008generalized
arxiv-4497
0808.0548
How could the replica method improve accuracy of performance assessment of channel coding?
<|reference_start|>How could the replica method improve accuracy of performance assessment of channel coding?: We explore the relation between the techniques of statistical mechanics and information theory for assessing the performance of channel coding. We base our study on a framework developed by Gallager in {\em IEEE Trans. Inform. Theory} {\bf 11}, 3 (1965), where the minimum decoding error probability is upper-bounded by an average of a generalized Chernoff's bound over a code ensemble. We show that the resulting bound in the framework can be directly assessed by the replica method, which has been developed in statistical mechanics of disordered systems, whereas in Gallager's original methodology further replacement by another bound utilizing Jensen's inequality is necessary. Our approach associates a seemingly {\em ad hoc} restriction with respect to an adjustable parameter for optimizing the bound with a phase transition between two replica symmetric solutions, and can improve the accuracy of performance assessments of general code ensembles including low density parity check codes, although its mathematical justification is still open.<|reference_end|>
arxiv
@article{kabashima2008how, title={How could the replica method improve accuracy of performance assessment of channel coding?}, author={Yoshiyuki Kabashima}, journal={arXiv preprint arXiv:0808.0548}, year={2008}, doi={10.1088/1742-6596/143/1/012018}, archivePrefix={arXiv}, eprint={0808.0548}, primaryClass={cs.IT math.IT} }
kabashima2008how
arxiv-4498
0808.0549
Resource Allocation of MU-OFDM Based Cognitive Radio Systems Under Partial Channel State Information
<|reference_start|>Resource Allocation of MU-OFDM Based Cognitive Radio Systems Under Partial Channel State Information: This paper has been withdrawn by the author due to some errors.<|reference_end|>
arxiv
@article{huang2008resource, title={Resource Allocation of MU-OFDM Based Cognitive Radio Systems Under Partial Channel State Information}, author={Dong Huang, Chunyan Miao, Cyril Leung, Zhiqi Shen}, journal={arXiv preprint arXiv:0808.0549}, year={2008}, archivePrefix={arXiv}, eprint={0808.0549}, primaryClass={cs.IT cs.NE math.CO math.IT} }
huang2008resource
arxiv-4499
0808.0554
Ranking and Unranking of Hereditarily Finite Functions and Permutations
<|reference_start|>Ranking and Unranking of Hereditarily Finite Functions and Permutations: Prolog's ability to return multiple answers on backtracking provides an elegant mechanism to derive reversible encodings of combinatorial objects as Natural Numbers i.e. {\em ranking} and {\em unranking} functions. Starting from a generalization of Ackerman's encoding of Hereditarily Finite Sets with Urelements and a novel tupling/untupling operation, we derive encodings for Finite Functions and use them as building blocks for an executable theory of {\em Hereditarily Finite Functions}. The more difficult problem of {\em ranking} and {\em unranking} {\em Hereditarily Finite Permutations} is then tackled using Lehmer codes and factoradics. The paper is organized as a self-contained literate Prolog program available at \url{http://logic.csci.unt.edu/tarau/research/2008/pHFF.zip}<|reference_end|>
arxiv
@article{tarau2008ranking, title={Ranking and Unranking of Hereditarily Finite Functions and Permutations}, author={Paul Tarau}, journal={arXiv preprint arXiv:0808.0554}, year={2008}, archivePrefix={arXiv}, eprint={0808.0554}, primaryClass={cs.LO cs.MS} }
tarau2008ranking
arxiv-4500
0808.0555
Pairing Functions, Boolean Evaluation and Binary Decision Diagrams in Prolog
<|reference_start|>Pairing Functions, Boolean Evaluation and Binary Decision Diagrams in Prolog: A "pairing function" J associates a unique natural number z to any two natural numbers x,y such that for two "unpairing functions" K and L, the equalities K(J(x,y))=x, L(J(x,y))=y and J(K(z),L(z))=z hold. Using pairing functions on natural number representations of truth tables, we derive an encoding for Binary Decision Diagrams with the unique property that its boolean evaluation faithfully mimics its structural conversion to a a natural number through recursive application of a matching pairing function. We then use this result to derive {\em ranking} and {\em unranking} functions for BDDs and reduced BDDs. The paper is organized as a self-contained literate Prolog program, available at http://logic.csci.unt.edu/tarau/research/2008/pBDD.zip Keywords: logic programming and computational mathematics, pairing/unpairing functions, encodings of boolean functions, binary decision diagrams, natural number representations of truth tables<|reference_end|>
arxiv
@article{tarau2008pairing, title={Pairing Functions, Boolean Evaluation and Binary Decision Diagrams in Prolog}, author={Paul Tarau}, journal={arXiv preprint arXiv:0808.0555}, year={2008}, archivePrefix={arXiv}, eprint={0808.0555}, primaryClass={cs.LO cs.SC} }
tarau2008pairing