corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-4601 | 0808.2666 | Impact of Vehicular Communications Security on Transportation Safety | <|reference_start|>Impact of Vehicular Communications Security on Transportation Safety: Transportation safety, one of the main driving forces of the development of vehicular communication (VC) systems, relies on high-rate safety messaging (beaconing). At the same time, there is consensus among authorities, industry, and academia on the need to secure VC systems. With specific proposals in the literature, a critical question must be answered: can secure VC systems be practical and satisfy the requirements of safety applications, in spite of the significant communication and processing overhead and other restrictions security and privacy-enhancing mechanisms impose? To answer this question, we investigate in this paper the following three dimensions for secure and privacy-enhancing VC schemes: the reliability of communication, the processing overhead at each node, and the impact on a safety application. The results indicate that with the appropriate system design, including sufficiently high processing power, applications enabled by secure VC can be in practice as effective as those enabled by unsecured VC.<|reference_end|> | arxiv | @article{papadimitratos2008impact,
title={Impact of Vehicular Communications Security on Transportation Safety},
author={Panos Papadimitratos, Giorgio Calandriello, Jean-Pierre Hubaux and
Antonio Lioy},
journal={IEEE Conference on Computer Communications (INFOCOM) Workshop on
Mobile Networking for Vehicular Environments (MOVE), pp. 1-6, Phoenix, AZ,
USA, April 2008},
year={2008},
doi={10.1109/INFOCOM.2008.4544663},
archivePrefix={arXiv},
eprint={0808.2666},
primaryClass={cs.CR cs.NI}
} | papadimitratos2008impact |
arxiv-4602 | 0808.2668 | Secure Neighbor Discovery in Wireless Networks: Formal Investigation of Possibility | <|reference_start|>Secure Neighbor Discovery in Wireless Networks: Formal Investigation of Possibility: Wireless communication enables a broad spectrum of applications, ranging from commodity to tactical systems. Neighbor discovery (ND), that is, determining which devices are within direct radio communication, is a building block of network protocols and applications, and its vulnerability can severely compromise their functionalities. A number of proposals to secure ND have been published, but none have analyzed the problem formally. In this paper, we contribute such an analysis: We build a formal model capturing salient characteristics of wireless systems, most notably obstacles and interference, and we provide a specification of a basic variant of the ND problem. Then, we derive an impossibility result for a general class of protocols we term "time-based protocols," to which many of the schemes in the literature belong. We also identify the conditions under which the impossibility result is lifted. Moreover, we explore a second class of protocols we term "time- and location-based protocols," and prove they can secure ND.<|reference_end|> | arxiv | @article{poturalski2008secure,
title={Secure Neighbor Discovery in Wireless Networks: Formal Investigation of
Possibility},
author={Marcin Poturalski, Panos Papadimitratos and Jean-Pierre Hubaux},
journal={ACM Symposium on Information, Computer and Communications Security
(ASIACCS), pages 189{200, Tokyo, Japan, March 2008},
year={2008},
archivePrefix={arXiv},
eprint={0808.2668},
primaryClass={cs.CR cs.NI}
} | poturalski2008secure |
arxiv-4603 | 0808.2669 | Closed Timelike Curves Make Quantum and Classical Computing Equivalent | <|reference_start|>Closed Timelike Curves Make Quantum and Classical Computing Equivalent: While closed timelike curves (CTCs) are not known to exist, studying their consequences has led to nontrivial insights in general relativity, quantum information, and other areas. In this paper we show that if CTCs existed, then quantum computers would be no more powerful than classical computers: both would have the (extremely large) power of the complexity class PSPACE, consisting of all problems solvable by a conventional computer using a polynomial amount of memory. This solves an open problem proposed by one of us in 2005, and gives an essentially complete understanding of computational complexity in the presence of CTCs. Following the work of Deutsch, we treat a CTC as simply a region of spacetime where a "causal consistency" condition is imposed, meaning that Nature has to produce a (probabilistic or quantum) fixed-point of some evolution operator. Our conclusion is then a consequence of the following theorem: given any quantum circuit (not necessarily unitary), a fixed-point of the circuit can be (implicitly) computed in polynomial space. This theorem might have independent applications in quantum information.<|reference_end|> | arxiv | @article{aaronson2008closed,
title={Closed Timelike Curves Make Quantum and Classical Computing Equivalent},
author={Scott Aaronson and John Watrous},
journal={arXiv preprint arXiv:0808.2669},
year={2008},
doi={10.1098/rspa.2008.0350},
archivePrefix={arXiv},
eprint={0808.2669},
primaryClass={quant-ph cs.CC}
} | aaronson2008closed |
arxiv-4604 | 0808.2670 | Solving the apparent diversity-accuracy dilemma of recommender systems | <|reference_start|>Solving the apparent diversity-accuracy dilemma of recommender systems: Recommender systems use data on past user preferences to predict possible future likes and interests. A key challenge is that while the most useful individual recommendations are to be found among diverse niche objects, the most reliably accurate results are obtained by methods that recommend objects based on user or object similarity. In this paper we introduce a new algorithm specifically to address the challenge of diversity and show how it can be used to resolve this apparent dilemma when combined in an elegant hybrid with an accuracy-focused algorithm. By tuning the hybrid appropriately we are able to obtain, without relying on any semantic or context-specific information, simultaneous gains in both accuracy and diversity of recommendations.<|reference_end|> | arxiv | @article{zhou2008solving,
title={Solving the apparent diversity-accuracy dilemma of recommender systems},
author={Tao Zhou, Zoltan Kuscsik, Jian-Guo Liu, Matus Medo, Joseph R.
Wakeling, Yi-Cheng Zhang},
journal={PNAS 107, 4511-4515, 2010},
year={2008},
doi={10.1073/pnas.1000488107},
archivePrefix={arXiv},
eprint={0808.2670},
primaryClass={cs.IR physics.soc-ph}
} | zhou2008solving |
arxiv-4605 | 0808.2676 | Efficient and Robust Secure Aggregation for Sensor Networks | <|reference_start|>Efficient and Robust Secure Aggregation for Sensor Networks: Wireless Sensor Networks (WSNs) rely on in-network aggregation for efficiency, however, this comes at a price: A single adversary can severely influence the outcome by contributing an arbitrary partial aggregate value. Secure in-network aggregation can detect such manipulation. But as long as such faults persist, no aggregation result can be obtained. In contrast, the collection of individual sensor node values is robust and solves the problem of availability, yet in an inefficient way. Our work seeks to bridge this gap in secure data collection: We propose a system that enhances availability with an efficiency close to that of in-network aggregation. To achieve this, our scheme relies on costly operations to localize and exclude nodes that manipulate the aggregation, but \emph{only} when a failure is detected. The detection of aggregation disruptions and the removal of faulty nodes provides robustness. At the same time, after removing faulty nodes, the WSN can enjoy low cost (secure) aggregation. Thus, the high exclusion cost is amortized, and efficiency increases.<|reference_end|> | arxiv | @article{haghani2008efficient,
title={Efficient and Robust Secure Aggregation for Sensor Networks},
author={P. Haghani, P. Papadimitratos, M. Poturalski, K. Aberer, J.-P. Hubaux},
journal={IEEE ICNP Workshop on Secure Network Protocols (NPSec), pages 1{6,
Beijing, China, October 2007},
year={2008},
doi={10.1109/NPSEC.2007.4371623},
archivePrefix={arXiv},
eprint={0808.2676},
primaryClass={cs.CR cs.NI}
} | haghani2008efficient |
arxiv-4606 | 0808.2703 | Low-Signal-Energy Asymptotics of Capacity and Mutual Information for the Discrete-Time Poisson Channel | <|reference_start|>Low-Signal-Energy Asymptotics of Capacity and Mutual Information for the Discrete-Time Poisson Channel: The first terms of the low-signal-energy asymptotics for the mutual information in the discrete-time Poisson channel are derived and compared to an asymptotic expression of the capacity. In the presence of non-zero additive noise (either Poisson or geometric), the mutual information is concave at zero signal-energy and the minimum energy per bit is not attained at zero capacity. Fixed signal constellations which scale with the signal energy do not attain the minimum energy per bit. The minimum energy per bit is zero when additive Poisson noise is present and $\ew\log 2$ when additive geometric noise of mean $\ew$ is present.<|reference_end|> | arxiv | @article{martinez2008low-signal-energy,
title={Low-Signal-Energy Asymptotics of Capacity and Mutual Information for the
Discrete-Time Poisson Channel},
author={Alfonso Martinez},
journal={arXiv preprint arXiv:0808.2703},
year={2008},
archivePrefix={arXiv},
eprint={0808.2703},
primaryClass={cs.IT math.IT}
} | martinez2008low-signal-energy |
arxiv-4607 | 0808.2794 | Accelerating Scientific Computations with Mixed Precision Algorithms | <|reference_start|>Accelerating Scientific Computations with Mixed Precision Algorithms: On modern architectures, the performance of 32-bit operations is often at least twice as fast as the performance of 64-bit operations. By using a combination of 32-bit and 64-bit floating point arithmetic, the performance of many dense and sparse linear algebra algorithms can be significantly enhanced while maintaining the 64-bit accuracy of the resulting solution. The approach presented here can apply not only to conventional processors but also to other technologies such as Field Programmable Gate Arrays (FPGA), Graphical Processing Units (GPU), and the STI Cell BE processor. Results on modern processor architectures and the STI Cell BE are presented.<|reference_end|> | arxiv | @article{baboulin2008accelerating,
title={Accelerating Scientific Computations with Mixed Precision Algorithms},
author={Marc Baboulin, Alfredo Buttari, Jack Dongarra, Jakub Kurzak, Julie
Langou, Julien Langou, Piotr Luszczek, and Stanimire Tomov},
journal={arXiv preprint arXiv:0808.2794},
year={2008},
doi={10.1016/j.cpc.2008.11.005},
archivePrefix={arXiv},
eprint={0808.2794},
primaryClass={cs.MS}
} | baboulin2008accelerating |
arxiv-4608 | 0808.2801 | Discretized Multinomial Distributions and Nash Equilibria in Anonymous Games | <|reference_start|>Discretized Multinomial Distributions and Nash Equilibria in Anonymous Games: We show that there is a polynomial-time approximation scheme for computing Nash equilibria in anonymous games with any fixed number of strategies (a very broad and important class of games), extending the two-strategy result of Daskalakis and Papadimitriou 2007. The approximation guarantee follows from a probabilistic result of more general interest: The distribution of the sum of n independent unit vectors with values ranging over {e1, e2, ...,ek}, where ei is the unit vector along dimension i of the k-dimensional Euclidean space, can be approximated by the distribution of the sum of another set of independent unit vectors whose probabilities of obtaining each value are multiples of 1/z for some integer z, and so that the variational distance of the two distributions is at most eps, where eps is bounded by an inverse polynomial in z and a function of k, but with no dependence on n. Our probabilistic result specifies the construction of a surprisingly sparse eps-cover -- under the total variation distance -- of the set of distributions of sums of independent unit vectors, which is of interest on its own right.<|reference_end|> | arxiv | @article{daskalakis2008discretized,
title={Discretized Multinomial Distributions and Nash Equilibria in Anonymous
Games},
author={Constantinos Daskalakis, Christos H. Papadimitriou},
journal={arXiv preprint arXiv:0808.2801},
year={2008},
doi={10.1109/FOCS.2008.84},
archivePrefix={arXiv},
eprint={0808.2801},
primaryClass={cs.GT}
} | daskalakis2008discretized |
arxiv-4609 | 0808.2827 | Fast Intrinsic Mode Decomposition and Filtering of Time Series Data | <|reference_start|>Fast Intrinsic Mode Decomposition and Filtering of Time Series Data: The intrinsic mode function (IMF) provides adaptive function bases for nonlinear and non-stationary time series data. A fast convergent iterative method is introduced in this paper to find the IMF components of the data, the method is faster and more predictable than the Empirical Mode Decomposition method devised by the author of Hilbert Huang Transform. The approach is to iteratively adjust the control points on the data function corresponding to the extrema of the refining IMF, the control points of the residue function are calculated as the median of the straight line segments passing through the data control points, the residue function is then constructed as the cubic spline function of the median points. The initial residue function is simply constructed as the straight line segments passing through the extrema of the first derivative of the data function. The refining IMF is the difference between the data function and the improved residue function. The IMF found reveals all the riding waves in the whole data set. A new data filtering method on frequency and amplitude of IMF is also presented with the similar approach of finding the residue on the part to be filtered out. The program to demonstrate the method is distributed under BSD open source license.<|reference_end|> | arxiv | @article{lu2008fast,
title={Fast Intrinsic Mode Decomposition and Filtering of Time Series Data},
author={Louis Yu Lu},
journal={arXiv preprint arXiv:0808.2827},
year={2008},
archivePrefix={arXiv},
eprint={0808.2827},
primaryClass={cs.NA}
} | lu2008fast |
arxiv-4610 | 0808.2833 | Efficient tests for equivalence of hidden Markov processes and quantum random walks | <|reference_start|>Efficient tests for equivalence of hidden Markov processes and quantum random walks: While two hidden Markov process (HMP) resp. quantum random walk (QRW) parametrizations can differ from one another, the stochastic processes arising from them can be equivalent. Here a polynomial-time algorithm is presented which can determine equivalence of two HMP parametrizations $\cM_1,\cM_2$ resp. two QRW parametrizations $\cQ_1,\cQ_2$ in time $O(|\S|\max(N_1,N_2)^{4})$, where $N_1,N_2$ are the number of hidden states in $\cM_1,\cM_2$ resp. the dimension of the state spaces associated with $\cQ_1,\cQ_2$, and $\S$ is the set of output symbols. Previously available algorithms for testing equivalence of HMPs were exponential in the number of hidden states. In case of QRWs, algorithms for testing equivalence had not yet been presented. The core subroutines of this algorithm can also be used to efficiently test hidden Markov processes and quantum random walks for ergodicity.<|reference_end|> | arxiv | @article{faigle2008efficient,
title={Efficient tests for equivalence of hidden Markov processes and quantum
random walks},
author={Ulrich Faigle, Alexander Sch"onhuth},
journal={IEEE Transactions on Information Theory, 57(3), 1746-1753, 2011},
year={2008},
doi={10.1109/TIT.2011.2104511},
archivePrefix={arXiv},
eprint={0808.2833},
primaryClass={cs.IT math.IT}
} | faigle2008efficient |
arxiv-4611 | 0808.2837 | List Decoding of Burst Errors | <|reference_start|>List Decoding of Burst Errors: A generalization of the Reiger bound is presented for the list decoding of burst errors. It is then shown that Reed-Solomon codes attain this bound.<|reference_end|> | arxiv | @article{roth2008list,
title={List Decoding of Burst Errors},
author={Ron M. Roth, Pascal O. Vontobel},
journal={arXiv preprint arXiv:0808.2837},
year={2008},
archivePrefix={arXiv},
eprint={0808.2837},
primaryClass={cs.IT cs.DM math.IT}
} | roth2008list |
arxiv-4612 | 0808.2904 | Investigation of the Zipf-plot of the extinct Meroitic language | <|reference_start|>Investigation of the Zipf-plot of the extinct Meroitic language: The ancient and extinct language Meroitic is investigated using Zipf's Law. In particular, since Meroitic is still undeciphered, the Zipf law analysis allows us to assess the quality of current texts and possible avenues for future investigation using statistical techniques.<|reference_end|> | arxiv | @article{smith2008investigation,
title={Investigation of the Zipf-plot of the extinct Meroitic language},
author={Reginald D. Smith},
journal={Glottometrics 15, 2007, 53-61},
year={2008},
archivePrefix={arXiv},
eprint={0808.2904},
primaryClass={cs.CL}
} | smith2008investigation |
arxiv-4613 | 0808.2931 | Spatial planning with constraints on translational distances between geometric objects | <|reference_start|>Spatial planning with constraints on translational distances between geometric objects: The main constraint on relative position of geometric objects, used in spatial planning for computing the C-space maps (for example, in robotics, CAD, and packaging), is the relative non-overlapping of objects. This is the simplest constraint in which the minimum translational distance between objects is greater than zero, or more generally, than some positive value. We present a technique, based on the Minkowski operations, for generating the translational C-space maps for spatial planning with more general and more complex constraints on the relative position of geometric objects, such as constraints on various types (not only on the minimum) of the translational distances between objects. The developed technique can also be used, respectively, for spatial planning with constraints on translational distances in a given direction, and rotational distances between geometric objects, as well as for spatial planning with given dynamic geometric situation of moving objects.<|reference_end|> | arxiv | @article{pustylnik2008spatial,
title={Spatial planning with constraints on translational distances between
geometric objects},
author={Gennady Pustylnik},
journal={arXiv preprint arXiv:0808.2931},
year={2008},
archivePrefix={arXiv},
eprint={0808.2931},
primaryClass={cs.CG cs.RO}
} | pustylnik2008spatial |
arxiv-4614 | 0808.2953 | Declarative Combinatorics: Isomorphisms, Hylomorphisms and Hereditarily Finite Data Types in Haskell | <|reference_start|>Declarative Combinatorics: Isomorphisms, Hylomorphisms and Hereditarily Finite Data Types in Haskell: This paper is an exploration in a functional programming framework of {\em isomorphisms} between elementary data types (natural numbers, sets, multisets, finite functions, permutations binary decision diagrams, graphs, hypergraphs, parenthesis languages, dyadic rationals, primes, DNA sequences etc.) and their extension to hereditarily finite universes through {\em hylomorphisms} derived from {\em ranking/unranking} and {\em pairing/unpairing} operations. An embedded higher order {\em combinator language} provides any-to-any encodings automatically. Besides applications to experimental mathematics, a few examples of ``free algorithms'' obtained by transferring operations between data types are shown. Other applications range from stream iterators on combinatorial objects to self-delimiting codes, succinct data representations and generation of random instances. The paper covers 59 data types and, through the use of the embedded combinator language, provides 3540 distinct bijective transformations between them. The self-contained source code of the paper, as generated from a literate Haskell program, is available at \url{http://logic.csci.unt.edu/tarau/research/2008/fISO.zip}. {\bf Keywords}: Haskell data representations, data type isomorphisms, declarative combinatorics, computational mathematics, Ackermann encoding, G\"{o}del numberings, arithmetization, ranking/unranking, hereditarily finite sets, functions and permutations, encodings of binary decision diagrams, dyadic rationals, DNA encodings<|reference_end|> | arxiv | @article{tarau2008declarative,
title={Declarative Combinatorics: Isomorphisms, Hylomorphisms and Hereditarily
Finite Data Types in Haskell},
author={Paul Tarau},
journal={arXiv preprint arXiv:0808.2953},
year={2008},
archivePrefix={arXiv},
eprint={0808.2953},
primaryClass={cs.PL cs.DS}
} | tarau2008declarative |
arxiv-4615 | 0808.2964 | Estimating the Lengths of Memory Words | <|reference_start|>Estimating the Lengths of Memory Words: For a stationary stochastic process $\{X_n\}$ with values in some set $A$, a finite word $w \in A^K$ is called a memory word if the conditional probability of $X_0$ given the past is constant on the cylinder set defined by $X_{-K}^{-1}=w$. It is a called a minimal memory word if no proper suffix of $w$ is also a memory word. For example in a $K$-step Markov processes all words of length $K$ are memory words but not necessarily minimal. We consider the problem of determining the lengths of the longest minimal memory words and the shortest memory words of an unknown process $\{X_n\}$ based on sequentially observing the outputs of a single sample $\{\xi_1,\xi_2,...\xi_n\}$. We will give a universal estimator which converges almost surely to the length of the longest minimal memory word and show that no such universal estimator exists for the length of the shortest memory word. The alphabet $A$ may be finite or countable.<|reference_end|> | arxiv | @article{morvai2008estimating,
title={Estimating the Lengths of Memory Words},
author={Gusztav Morvai and Benjamin Weiss},
journal={IEEE Transactions on Information Theory, Vol. 54, No. 8. (2008),
pp. 3804-3807},
year={2008},
archivePrefix={arXiv},
eprint={0808.2964},
primaryClass={cs.IT math.IT}
} | morvai2008estimating |
arxiv-4616 | 0808.2984 | Building an interpretable fuzzy rule base from data using Orthogonal Least Squares Application to a depollution problem | <|reference_start|>Building an interpretable fuzzy rule base from data using Orthogonal Least Squares Application to a depollution problem: In many fields where human understanding plays a crucial role, such as bioprocesses, the capacity of extracting knowledge from data is of critical importance. Within this framework, fuzzy learning methods, if properly used, can greatly help human experts. Amongst these methods, the aim of orthogonal transformations, which have been proven to be mathematically robust, is to build rules from a set of training data and to select the most important ones by linear regression or rank revealing techniques. The OLS algorithm is a good representative of those methods. However, it was originally designed so that it only cared about numerical performance. Thus, we propose some modifications of the original method to take interpretability into account. After recalling the original algorithm, this paper presents the changes made to the original method, then discusses some results obtained from benchmark problems. Finally, the algorithm is applied to a real-world fault detection depollution problem.<|reference_end|> | arxiv | @article{destercke2008building,
title={Building an interpretable fuzzy rule base from data using Orthogonal
Least Squares Application to a depollution problem},
author={S'ebastien Destercke (IRSN, IRIT), Serge Guillaume (ITAP), Brigitte
Charnomordic (ASB)},
journal={Fuzzy Sets and Systems 158, 18 (2007) 2078-2094},
year={2008},
doi={10.1016/j.fss.2007.04.026},
archivePrefix={arXiv},
eprint={0808.2984},
primaryClass={cs.LG cs.AI}
} | destercke2008building |
arxiv-4617 | 0808.3003 | Codes Associated with Orthogonal Groups and Power Moments of Kloosterman Sums | <|reference_start|>Codes Associated with Orthogonal Groups and Power Moments of Kloosterman Sums: In this paper, we construct three binary linear codes $C(SO^{-}(2,q))$, $C(O^{-}(2,q))$, $C(SO^{-}(4,q))$, respectively associated with the orthogonal groups $SO^{-}(2,q)$, $O^{-}(2,q)$, $SO^{-}(4,q)$, with $q$ powers of two. Then we obtain recursive formulas for the power moments of Kloosterman and 2-dimensional Kloosterman sums in terms of the frequencies of weights in the codes. This is done via Pless power moment identity and by utilizing the explicit expressions of Gauss sums for the orthogonal groups. We emphasize that, when the recursive formulas for the power moments of Kloosterman sums are compared, the present one is computationally more effective than the previous one constructed from the special linear group $SL(2,q)$. We illustrate our results with some examples.<|reference_end|> | arxiv | @article{kim2008codes,
title={Codes Associated with Orthogonal Groups and Power Moments of Kloosterman
Sums},
author={Dae San Kim (Sogang University)},
journal={arXiv preprint arXiv:0808.3003},
year={2008},
archivePrefix={arXiv},
eprint={0808.3003},
primaryClass={math.NT cs.IT math.IT}
} | kim2008codes |
arxiv-4618 | 0808.3019 | Data Mining Using High Performance Data Clouds: Experimental Studies Using Sector and Sphere | <|reference_start|>Data Mining Using High Performance Data Clouds: Experimental Studies Using Sector and Sphere: We describe the design and implementation of a high performance cloud that we have used to archive, analyze and mine large distributed data sets. By a cloud, we mean an infrastructure that provides resources and/or services over the Internet. A storage cloud provides storage services, while a compute cloud provides compute services. We describe the design of the Sector storage cloud and how it provides the storage services required by the Sphere compute cloud. We also describe the programming paradigm supported by the Sphere compute cloud. Sector and Sphere are designed for analyzing large data sets using computer clusters connected with wide area high performance networks (for example, 10+ Gb/s). We describe a distributed data mining application that we have developed using Sector and Sphere. Finally, we describe some experimental studies comparing Sector/Sphere to Hadoop.<|reference_end|> | arxiv | @article{grossman2008data,
title={Data Mining Using High Performance Data Clouds: Experimental Studies
Using Sector and Sphere},
author={Robert L Grossman and Yunhong Gu},
journal={arXiv preprint arXiv:0808.3019},
year={2008},
archivePrefix={arXiv},
eprint={0808.3019},
primaryClass={cs.DC}
} | grossman2008data |
arxiv-4619 | 0808.3038 | Tschirnhaus-Weierstrass curves | <|reference_start|>Tschirnhaus-Weierstrass curves: We define the concept of Tschirnhaus-Weierstrass curve, named after the Weierstrass form of an elliptic curve and Tschirnhaus transformations. Every pointed curve has a Tschirnhaus-Weierstrass form, and this representation is unique up to a scaling of variables. This is useful for computing isomorphisms between curves.<|reference_end|> | arxiv | @article{schicho2008tschirnhaus-weierstrass,
title={Tschirnhaus-Weierstrass curves},
author={Josef Schicho and David Sevilla},
journal={arXiv preprint arXiv:0808.3038},
year={2008},
archivePrefix={arXiv},
eprint={0808.3038},
primaryClass={math.AG cs.SC}
} | schicho2008tschirnhaus-weierstrass |
arxiv-4620 | 0808.3100 | Optimizing Compiler for Engineering Problems | <|reference_start|>Optimizing Compiler for Engineering Problems: New information technologies provide a lot of prospects for performance improvement. One of them is "Dynamic Source Code Generation and Compilation". This article shows how this way provides high performance for engineering problems.<|reference_end|> | arxiv | @article{ivankov2008optimizing,
title={Optimizing Compiler for Engineering Problems},
author={Petr R. Ivankov},
journal={arXiv preprint arXiv:0808.3100},
year={2008},
archivePrefix={arXiv},
eprint={0808.3100},
primaryClass={cs.PF}
} | ivankov2008optimizing |
arxiv-4621 | 0808.3109 | n-ary Fuzzy Logic and Neutrosophic Logic Operators | <|reference_start|>n-ary Fuzzy Logic and Neutrosophic Logic Operators: We extend Knuth's 16 Boolean binary logic operators to fuzzy logic and neutrosophic logic binary operators. Then we generalize them to n-ary fuzzy logic and neutrosophic logic operators using the smarandache codification of the Venn diagram and a defined vector neutrosophic law. In such way, new operators in neutrosophic logic/set/probability are built.<|reference_end|> | arxiv | @article{smarandache2008n-ary,
title={n-ary Fuzzy Logic and Neutrosophic Logic Operators},
author={Florentin Smarandache, V. Christianto},
journal={Studies in Logic, Grammar and Rethoric [Belarus], 17 (30), pp.
1-16, 2009.},
year={2008},
archivePrefix={arXiv},
eprint={0808.3109},
primaryClass={cs.AI}
} | smarandache2008n-ary |
arxiv-4622 | 0808.3112 | On the decidability of semigroup freeness | <|reference_start|>On the decidability of semigroup freeness: This paper deals with the decidability of semigroup freeness. More precisely, the freeness problem over a semigroup S is defined as: given a finite subset X of S, decide whether each element of S has at most one factorization over X. To date, the decidabilities of two freeness problems have been closely examined. In 1953, Sardinas and Patterson proposed a now famous algorithm for the freeness problem over the free monoid. In 1991, Klarner, Birget and Satterfield proved the undecidability of the freeness problem over three-by-three integer matrices. Both results led to the publication of many subsequent papers. The aim of the present paper is three-fold: (i) to present general results concerning freeness problems, (ii) to study the decidability of freeness problems over various particular semigroups (special attention is devoted to multiplicative matrix semigroups), and (iii) to propose precise, challenging open questions in order to promote the study of the topic.<|reference_end|> | arxiv | @article{cassaigne2008on,
title={On the decidability of semigroup freeness},
author={Julien Cassaigne and Francois Nicolas},
journal={arXiv preprint arXiv:0808.3112},
year={2008},
archivePrefix={arXiv},
eprint={0808.3112},
primaryClass={cs.DM}
} | cassaigne2008on |
arxiv-4623 | 0808.3145 | Approximate capacity of the two-way relay channel: A deterministic approach | <|reference_start|>Approximate capacity of the two-way relay channel: A deterministic approach: We study the capacity of the full-duplex bidirectional (or two-way) relay channel with two nodes and one relay. The channels in the forward direction are assumed to be different (in general) than the channels in the backward direction, i.e. channel reciprocity is not assumed. We use the recently proposed deterministic approach to capture the essence of the problem and to determine a good transmission and relay strategy for the Gaussian channel. Depending on the ratio of the individual channel gains, we propose to use either a simple amplify-and-forward or a particular superposition coding strategy at the relay. We analyze the achievable rate region and show that the scheme achieves to within 3 bits the cut-set bound for all values of channel gains.<|reference_end|> | arxiv | @article{avestimehr2008approximate,
title={Approximate capacity of the two-way relay channel: A deterministic
approach},
author={Amir Salman Avestimehr, Aydin Sezgin, David N.C. Tse},
journal={arXiv preprint arXiv:0808.3145},
year={2008},
archivePrefix={arXiv},
eprint={0808.3145},
primaryClass={cs.IT math.IT}
} | avestimehr2008approximate |
arxiv-4624 | 0808.3166 | Privacy Preserving Association Rule Mining Revisited | <|reference_start|>Privacy Preserving Association Rule Mining Revisited: The privacy preserving data mining (PPDM) has been one of the most interesting, yet challenging, research issues. In the PPDM, we seek to outsource our data for data mining tasks to a third party while maintaining its privacy. In this paper, we revise one of the recent PPDM schemes (i.e., FS) which is designed for privacy preserving association rule mining (PP-ARM). Our analysis shows some limitations of the FS scheme in term of its storage requirements guaranteeing a reasonable privacy standard and the high computation as well. On the other hand, we introduce a robust definition of privacy that considers the average case privacy and motivates the study of a weakness in the structure of FS (i.e., fake transactions filtering). In order to overcome this limit, we introduce a hybrid scheme that considers both privacy and resources guidelines. Experimental results show the efficiency of our proposed scheme over the previously introduced one and opens directions for further development.<|reference_end|> | arxiv | @article{mohaisen2008privacy,
title={Privacy Preserving Association Rule Mining Revisited},
author={Abedelaziz Mohaisen and Dowon Hong},
journal={arXiv preprint arXiv:0808.3166},
year={2008},
archivePrefix={arXiv},
eprint={0808.3166},
primaryClass={cs.CR}
} | mohaisen2008privacy |
arxiv-4625 | 0808.3196 | Queue-length Variations In A Two-Restaurant Problem | <|reference_start|>Queue-length Variations In A Two-Restaurant Problem: This paper attempts to find out numerically the distribution of the queue-length ratio in the context of a model of preferential attachment. Here we consider two restaurants only and a large number of customers (agents) who come to these restaurants. Each day the same number of agents sequentially arrives and decides which restaurant to enter. If all the agents literally follow the crowd then there is no difference between this model and the famous `P\'olya's Urn' model. But as agents alter their strategies different kind of dynamics of the model is seen. It is seen from numerical results that the existence of a distribution of the fixed points is quite robust and it is also seen that in some cases the variations in the ratio of the queue-lengths follow a power-law.<|reference_end|> | arxiv | @article{chakrabarti2008queue-length,
title={Queue-length Variations In A Two-Restaurant Problem},
author={Anindya S. Chakrabarti, Bikas K. Chakrabarti},
journal={arXiv preprint arXiv:0808.3196},
year={2008},
archivePrefix={arXiv},
eprint={0808.3196},
primaryClass={cs.GT q-fin.TR}
} | chakrabarti2008queue-length |
arxiv-4626 | 0808.3197 | On the Monotonicity of Work Function in k-Server Conjecture | <|reference_start|>On the Monotonicity of Work Function in k-Server Conjecture: This paper presents a mistake in work function algorithm of k-server conjecture. That is, the monotonicity of the work function is not always true.<|reference_end|> | arxiv | @article{chen2008on,
title={On the Monotonicity of Work Function in k-Server Conjecture},
author={Ming-Zhe Chen},
journal={arXiv preprint arXiv:0808.3197},
year={2008},
archivePrefix={arXiv},
eprint={0808.3197},
primaryClass={cs.DS}
} | chen2008on |
arxiv-4627 | 0808.3203 | Sex is always well worth its two-fold cost | <|reference_start|>Sex is always well worth its two-fold cost: Sex is considered as an evolutionary paradox, since its evolutionary advantage does not necessarily overcome the two fold cost of sharing half of one's offspring's genome with another member of the population. Here we demonstrate that sexual reproduction can be evolutionary stable even when its Darwinian fitness is twice as low when compared to the fitness of asexual mutants. We also show that more than two sexes are always evolutionary unstable. Our approach generalizes the evolutionary game theory to analyze species whose members are able to sense the sexual state of their conspecifics and to switch sexes consequently. The widespread emergence and maintenance of sex follows therefore from its co-evolution with even more widespread environmental sensing abilities.<|reference_end|> | arxiv | @article{feigel2008sex,
title={Sex is always well worth its two-fold cost},
author={Alexander Feigel, Avraham Englander and Assaf Engel},
journal={arXiv preprint arXiv:0808.3203},
year={2008},
doi={10.1371/journal.pone.0006012},
archivePrefix={arXiv},
eprint={0808.3203},
primaryClass={q-bio.PE cs.GT physics.bio-ph}
} | feigel2008sex |
arxiv-4628 | 0808.3214 | The discrete Fourier transform: A canonical basis of eigenfunctions | <|reference_start|>The discrete Fourier transform: A canonical basis of eigenfunctions: The discrete Fourier transform (DFT) is an important operator which acts on the Hilbert space of complex valued functions on the ring Z/NZ. In the case where N=p is an odd prime number, we exhibit a canonical basis of eigenvectors for the DFT. The transition matrix from the standard basis to the canonical basis defines a novel transform which we call the "discrete oscillator transform" (DOT for short). Finally, we describe a fast algorithm for computing the DOT in certain cases.<|reference_end|> | arxiv | @article{gurevich2008the,
title={The discrete Fourier transform: A canonical basis of eigenfunctions},
author={Shamgar Gurevich, Ronny Hadani, Nir Sochen},
journal={arXiv preprint arXiv:0808.3214},
year={2008},
archivePrefix={arXiv},
eprint={0808.3214},
primaryClass={cs.IT cs.DM math.IT math.RT}
} | gurevich2008the |
arxiv-4629 | 0808.3222 | Analysis of the postulates produced by Karp's Theorem | <|reference_start|>Analysis of the postulates produced by Karp's Theorem: This is the final article in a series of four articles. Richard Karp has proven that a deterministic polynomial time solution to K-SAT will result in a deterministic polynomial time solution to all NP-Complete problems. However, it is demonstrated that a deterministic polynomial time solution to any NP-Complete problem does not necessarily produce a deterministic polynomial time solution to all NP-Complete problems.<|reference_end|> | arxiv | @article{meek2008analysis,
title={Analysis of the postulates produced by Karp's Theorem},
author={Jerrald Meek},
journal={arXiv preprint arXiv:0808.3222},
year={2008},
archivePrefix={arXiv},
eprint={0808.3222},
primaryClass={cs.CC}
} | meek2008analysis |
arxiv-4630 | 0808.3230 | Phase Transitions on Fixed Connected Graphs and Random Graphs in the Presence of Noise | <|reference_start|>Phase Transitions on Fixed Connected Graphs and Random Graphs in the Presence of Noise: In this paper, we study the phase transition behavior emerging from the interactions among multiple agents in the presence of noise. We propose a simple discrete-time model in which a group of non-mobile agents form either a fixed connected graph or a random graph process, and each agent, taking bipolar value either +1 or -1, updates its value according to its previous value and the noisy measurements of the values of the agents connected to it. We present proofs for the occurrence of the following phase transition behavior: At a noise level higher than some threshold, the system generates symmetric behavior (vapor or melt of magnetization) or disagreement; whereas at a noise level lower than the threshold, the system exhibits spontaneous symmetry breaking (solid or magnetization) or consensus. The threshold is found analytically. The phase transition occurs for any dimension. Finally, we demonstrate the phase transition behavior and all analytic results using simulations. This result may be found useful in the study of the collective behavior of complex systems under communication constraints.<|reference_end|> | arxiv | @article{liu2008phase,
title={Phase Transitions on Fixed Connected Graphs and Random Graphs in the
Presence of Noise},
author={Jialing Liu, Vikas Yadav, Hullas Sehgal, Joshua M. Olson, Haifeng Liu,
and Nicola Elia},
journal={IEEE TRANSACTIONS ON AUTOMATIC CONTROL, VOL. 53, NO. 8, 1817-1825,
SEPTEMBER 2008},
year={2008},
doi={10.1109/TAC.2008.929382},
archivePrefix={arXiv},
eprint={0808.3230},
primaryClass={math.OC cs.IT math.IT}
} | liu2008phase |
arxiv-4631 | 0808.3231 | Multi-Instance Multi-Label Learning | <|reference_start|>Multi-Instance Multi-Label Learning: In this paper, we propose the MIML (Multi-Instance Multi-Label learning) framework where an example is described by multiple instances and associated with multiple class labels. Compared to traditional learning frameworks, the MIML framework is more convenient and natural for representing complicated objects which have multiple semantic meanings. To learn from MIML examples, we propose the MimlBoost and MimlSvm algorithms based on a simple degeneration strategy, and experiments show that solving problems involving complicated objects with multiple semantic meanings in the MIML framework can lead to good performance. Considering that the degeneration process may lose information, we propose the D-MimlSvm algorithm which tackles MIML problems directly in a regularization framework. Moreover, we show that even when we do not have access to the real objects and thus cannot capture more information from real objects by using the MIML representation, MIML is still useful. We propose the InsDif and SubCod algorithms. InsDif works by transforming single-instances into the MIML representation for learning, while SubCod works by transforming single-label examples into the MIML representation for learning. Experiments show that in some tasks they are able to achieve better performance than learning the single-instances or single-label examples directly.<|reference_end|> | arxiv | @article{zhou2008multi-instance,
title={Multi-Instance Multi-Label Learning},
author={Zhi-Hua Zhou, Min-Ling Zhang, Sheng-Jun Huang, Yu-Feng Li},
journal={Artificial Intelligence, 2012, 176(1): 2291-2320},
year={2008},
doi={10.1016/j.artint.2011.10.002},
archivePrefix={arXiv},
eprint={0808.3231},
primaryClass={cs.LG cs.AI}
} | zhou2008multi-instance |
arxiv-4632 | 0808.3244 | Duality between quasi-concave functions and monotone linkage functions | <|reference_start|>Duality between quasi-concave functions and monotone linkage functions: A function $F$ defined on all subsets of a finite ground set $E$ is quasi-concave if $F(X\cup Y)\geq\min\{F(X),F(Y)\}$ for all $X,Y\subset E$. Quasi-concave functions arise in many fields of mathematics and computer science such as social choice, theory of graph, data mining, clustering and other fields. The maximization of quasi-concave function takes, in general, exponential time. However, if a quasi-concave function is defined by associated monotone linkage function then it can be optimized by the greedy type algorithm in a polynomial time. Quasi-concave functions defined as minimum values of monotone linkage functions were considered on antimatroids, where the correspondence between quasi-concave and bottleneck functions was shown (Kempner & Levit, 2003). The goal of this paper is to analyze quasi-concave functions on different families of sets and to investigate their relationships with monotone linkage functions.<|reference_end|> | arxiv | @article{kempner2008duality,
title={Duality between quasi-concave functions and monotone linkage functions},
author={Yulia Kempner and Vadim E. Levit},
journal={arXiv preprint arXiv:0808.3244},
year={2008},
archivePrefix={arXiv},
eprint={0808.3244},
primaryClass={math.CO cs.DM}
} | kempner2008duality |
arxiv-4633 | 0808.3281 | On the diagonalization of the discrete Fourier transform | <|reference_start|>On the diagonalization of the discrete Fourier transform: The discrete Fourier transform (DFT) is an important operator which acts on the Hilbert space of complex valued functions on the ring Z/NZ. In the case where N=p is an odd prime number, we exhibit a canonical basis of eigenvectors for the DFT. The transition matrix from the standard basis to the canonical basis defines a novel transform which we call the discrete oscillator transform (DOT for short). Finally, we describe a fast algorithm for computing the discrete oscillator transform in certain cases.<|reference_end|> | arxiv | @article{gurevich2008on,
title={On the diagonalization of the discrete Fourier transform},
author={Shamgar Gurevich (UC Berkeley) and Ronny Hadani (University of
Chicago)},
journal={arXiv preprint arXiv:0808.3281},
year={2008},
archivePrefix={arXiv},
eprint={0808.3281},
primaryClass={cs.IT cs.DM math.IT math.RT}
} | gurevich2008on |
arxiv-4634 | 0808.3292 | Network Motifs in Object-Oriented Software Systems | <|reference_start|>Network Motifs in Object-Oriented Software Systems: Nowadays, software has become a complex piece of work that may be beyond our control. Understanding how software evolves over time plays an important role in controlling software development processes. Recently, a few researchers found the quantitative evidence of structural duplication in software systems or web applications, which is similar to the evolutionary trend found in biological systems. To investigate the principles or rules of software evolution, we introduce the relevant theories and methods of complex networks into structural evolution and change of software systems. According to the results of our experiment on network motifs, we find that the stability of a motif shows positive correlation with its abundance and a motif with high Z score tends to have stable structure. These findings imply that the evolution of software systems is based on functional cloning as well as structural duplication and tends to be structurally stable. So, the work presented in this paper will be useful for the analysis of structural changes of software systems in reverse engineering.<|reference_end|> | arxiv | @article{ma2008network,
title={Network Motifs in Object-Oriented Software Systems},
author={Yutao Ma, Keqing He, and Jing Liu},
journal={Dynamics of Continuous, Discrete and Impulsive Systems (Series B:
Applications & Algorithms), 2007, 14(S6): 166-172},
year={2008},
archivePrefix={arXiv},
eprint={0808.3292},
primaryClass={cs.SE}
} | ma2008network |
arxiv-4635 | 0808.3296 | Confirmation Bias and the Open Access Advantage: Some Methodological Suggestions for the Davis Citation Study | <|reference_start|>Confirmation Bias and the Open Access Advantage: Some Methodological Suggestions for the Davis Citation Study: Davis (2008) analyzes citations from 2004-2007 in 11 biomedical journals. 15% of authors paid to make them Open Access (OA). The outcome is a significant OA citation Advantage, but a small one (21%). The author infers that the OA advantage has been shrinking yearly, but the data suggest the opposite. Further analyses are necessary: (1) Not just author-choice (paid) OA but Free OA self-archiving needs to be taken into account rather than being counted as non-OA. (2) proportion of OA articles per journal per year needs to be reported and taken into account. (3) The Journal Impact Factor and the relation between the size of the OA Advantage article 'citation-bracket' need to be taken into account. (4) The sample-size for the highest-impact, largest-sample journal analyzed, PNAS, is restricted and excluded from some of the analyses. The full PNAS dataset is needed. (5) The interaction between OA and time, 2004-2007, is based on retrospective data from a June 2008 total cumulative citation count. The dates of both the cited articles and the citing articles need to be taken into account. The author proposes that author self-selection bias for is the primary cause of the observed OA Advantage, but this study does not test this or of any of the other potential causal factors. The author suggests that paid OA is not worth the cost, per extra citation. But with OA self-archiving both the OA and the extra citations are free.<|reference_end|> | arxiv | @article{harnad2008confirmation,
title={Confirmation Bias and the Open Access Advantage: Some Methodological
Suggestions for the Davis Citation Study},
author={Stevan Harnad},
journal={arXiv preprint arXiv:0808.3296},
year={2008},
archivePrefix={arXiv},
eprint={0808.3296},
primaryClass={cs.DL cs.DB}
} | harnad2008confirmation |
arxiv-4636 | 0808.3307 | Proving Noninterference by a Fully Complete Translation to the Simply Typed lambda-calculus | <|reference_start|>Proving Noninterference by a Fully Complete Translation to the Simply Typed lambda-calculus: Tse and Zdancewic have formalized the notion of noninterference for Abadi et al.'s DCC in terms of logical relations and given a proof of noninterference by reduction to parametricity of System F. Unfortunately, their proof contains errors in a key lemma that their translation from DCC to System F preserves the logical relations defined for both calculi. In fact, we have found a counterexample for it. In this article, instead of DCC, we prove noninterference for sealing calculus, a new variant of DCC, by reduction to the basic lemma of a logical relation for the simply typed lambda-calculus, using a fully complete translation to the simply typed lambda-calculus. Full completeness plays an important role in showing preservation of the two logical relations through the translation. Also, we investigate relationship among sealing calculus, DCC, and an extension of DCC by Tse and Zdancewic and show that the first and the last of the three are equivalent.<|reference_end|> | arxiv | @article{shikuma2008proving,
title={Proving Noninterference by a Fully Complete Translation to the Simply
Typed lambda-calculus},
author={Naokata Shikuma, Atsushi Igarashi},
journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September
20, 2008) lmcs:683},
year={2008},
doi={10.2168/LMCS-4(3:10)2008},
archivePrefix={arXiv},
eprint={0808.3307},
primaryClass={cs.PL cs.CR}
} | shikuma2008proving |
arxiv-4637 | 0808.3331 | Efficient algorithms for the basis of finite Abelian groups | <|reference_start|>Efficient algorithms for the basis of finite Abelian groups: Let $G$ be a finite abelian group $G$ with $N$ elements. In this paper we give a O(N) time algorithm for computing a basis of $G$. Furthermore, we obtain an algorithm for computing a basis from a generating system of $G$ with $M$ elements having time complexity $O(M\sum_{p|N} e(p)\lceil p^{1/2}\rceil^{\mu(p)})$, where $p$ runs over all the prime divisors of $N$, and $p^{e(p)}$, $\mu(p)$ are the exponent and the number of cyclic groups which are direct factors of the $p$-primary component of $G$, respectively. In case where $G$ is a cyclic group having a generating system with $M$ elements, a $O(MN^{\epsilon})$ time algorithm for the computation of a basis of $G$ is obtained.<|reference_end|> | arxiv | @article{karagiorgos2008efficient,
title={Efficient algorithms for the basis of finite Abelian groups},
author={Gregory Karagiorgos, Dimitrios Poulakis},
journal={arXiv preprint arXiv:0808.3331},
year={2008},
archivePrefix={arXiv},
eprint={0808.3331},
primaryClass={cs.DS cs.CC}
} | karagiorgos2008efficient |
arxiv-4638 | 0808.3386 | Linear Programming Formulation of the Boolean Satisfiability Problem | <|reference_start|>Linear Programming Formulation of the Boolean Satisfiability Problem: In this paper, we present a new, graph-based modeling approach and a polynomial-sized linear programming (LP) formulation of the Boolean satisfiability problem (SAT). The approach is illustrated with a numerical example.<|reference_end|> | arxiv | @article{diaby2008linear,
title={Linear Programming Formulation of the Boolean Satisfiability Problem},
author={Moustapha Diaby},
journal={arXiv preprint arXiv:0808.3386},
year={2008},
archivePrefix={arXiv},
eprint={0808.3386},
primaryClass={cs.DM cs.CC}
} | diaby2008linear |
arxiv-4639 | 0808.3418 | Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part II: Parallel Slow Fading Channels | <|reference_start|>Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part II: Parallel Slow Fading Channels: This is the second part of a two-part paper that studies the problem of jamming in a fixed-rate transmission system with fading. In the first part, we studied the scenario with a fast fading channel, and found Nash equilibria of mixed strategies for short term power constraints, and for average power constraints with and without channel state information (CSI) feedback. We also solved the equally important maximin and minimax problems with pure strategies. Whenever we dealt with average power constraints, we decomposed the problem into two levels of power control, which we solved individually. In this second part of the paper, we study the scenario with a parallel, slow fading channel, which usually models multi-carrier transmissions, such as OFDM. Although the framework is similar as the one in Part I \cite{myself3}, dealing with the slow fading requires more intricate techniques. Unlike in the fast fading scenario, where the frames supporting the transmission of the codewords were equivalent and completely characterized by the channel statistics, in our present scenario the frames are unique, and characterized by a specific set of channel realizations. This leads to more involved inter-frame power allocation strategies, and in some cases even to the need for a third level of power control. We also show that for parallel slow fading channels, the CSI feedback helps in the battle against jamming, as evidenced by the significant degradation to system performance when CSI is not sent back. We expect this degradation to decrease as the number of parallel channels $M$ increases, until it becomes marginal for $M\to \infty$ (which can be considered as the case in Part I).<|reference_end|> | arxiv | @article{amariucai2008jamming,
title={Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part II:
Parallel Slow Fading Channels},
author={George T. Amariucai, Shuangqing Wei and Rajgopal Kannan},
journal={arXiv preprint arXiv:0808.3418},
year={2008},
archivePrefix={arXiv},
eprint={0808.3418},
primaryClass={cs.IT cs.CR math.IT}
} | amariucai2008jamming |
arxiv-4640 | 0808.3431 | Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part I: Fast Fading Channels | <|reference_start|>Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part I: Fast Fading Channels: This is the first part of a two-part paper that studies the problem of jamming in a fixed-rate transmission system with fading. Both transmitter and jammer are subject to power constraints which can be enforced over each codeword short-term / peak) or over all codewords (long-term / average), hence generating different scenarios. All our jamming problems are formulated as zero-sum games, having the probability of outage as pay-off function and power control functions as strategies. The paper aims at providing a comprehensive coverage of these problems, under fast and slow fading, peak and average power constraints, pure and mixed strategies, with and without channel state information (CSI) feedback. In this first part we study the fast fading scenario. We first assume full CSI to be available to all parties. For peak power constraints, a Nash equilibrium of pure strategies is found. For average power constraints, both pure and mixed strategies are investigated. With pure strategies, we derive the optimal power control functions for both intra-frame and inter-frame power allocation. Maximin and minimax solutions are found and shown to be different, which implies the non-existence of a saddle point. In addition we provide alternative perspectives in obtaining the optimal intra-frame power control functions under the long-term power constraints. With mixed strategies, the Nash equilibrium is found by solving the generalized form of an older problem dating back to Bell and Cover \cite{bell}. Finally, we derive a Nash equilibrium of the game in which no CSI is fed back from the receiver. We show that full channel state information brings only a very slight improvement in the system's performance.<|reference_end|> | arxiv | @article{amariucai2008jamming,
title={Jamming in Fixed-Rate Wireless Systems with Power Constraints - Part I:
Fast Fading Channels},
author={George T. Amariucai and Shuangqing Wei},
journal={arXiv preprint arXiv:0808.3431},
year={2008},
archivePrefix={arXiv},
eprint={0808.3431},
primaryClass={cs.IT cs.CR math.IT}
} | amariucai2008jamming |
arxiv-4641 | 0808.3453 | Codes on hypergraphs | <|reference_start|>Codes on hypergraphs: Codes on hypergraphs are an extension of the well-studied family of codes on bipartite graphs. Bilu and Hoory (2004) constructed an explicit family of codes on regular t-partite hypergraphs whose minimum distance improves earlier estimates of the distance of bipartite-graph codes. They also suggested a decoding algorithm for such codes and estimated its error-correcting capability. In this paper we study two aspects of hypergraph codes. First, we compute the weight enumerators of several ensembles of such codes, establishing conditions under which they attain the Gilbert-Varshamov bound and deriving estimates of their distance. In particular, we show that this bound is attained by codes constructed on a fixed bipartite graph with a large spectral gap. We also suggest a new decoding algorithm of hypergraph codes that corrects a constant fraction of errors, improving upon the algorithm of Bilu and Hoory.<|reference_end|> | arxiv | @article{barg2008codes,
title={Codes on hypergraphs},
author={Alexander Barg and Arya Mazumdar and Gilles Z'emor},
journal={Advances in Mathematics of Communications (AMC), Vol. 2, No 4,
(2008) pp. 433 - 450.},
year={2008},
doi={10.3934/amc.2008.2.433},
archivePrefix={arXiv},
eprint={0808.3453},
primaryClass={cs.IT math.IT}
} | barg2008codes |
arxiv-4642 | 0808.3502 | Cooperative Protocols for Random Access Networks | <|reference_start|>Cooperative Protocols for Random Access Networks: Cooperative communications have emerged as a significant concept to improve reliability and throughput in wireless systems. On the other hand, WLANs based on random access mechanism have become popular due to ease of deployment and low cost. Since cooperation introduces extra transmissions among the cooperating nodes and therefore increases the number of packet collisions, it is not clear whether there is any benefit from using physical layer cooperation under random access. In this paper, we develop new low complexity cooperative protocols for random access that outperform the conventional non cooperative scheme for a large range of signal-to-noise ratios.<|reference_end|> | arxiv | @article{böcherer2008cooperative,
title={Cooperative Protocols for Random Access Networks},
author={Georg B"ocherer and Alexandre de Baynast},
journal={arXiv preprint arXiv:0808.3502},
year={2008},
archivePrefix={arXiv},
eprint={0808.3502},
primaryClass={cs.IT math.IT}
} | böcherer2008cooperative |
arxiv-4643 | 0808.3504 | On the Growth Rate of the Weight Distribution of Irregular Doubly-Generalized LDPC Codes | <|reference_start|>On the Growth Rate of the Weight Distribution of Irregular Doubly-Generalized LDPC Codes: In this paper, an expression for the asymptotic growth rate of the number of small linear-weight codewords of irregular doubly-generalized LDPC (D-GLDPC) codes is derived. The expression is compact and generalizes existing results for LDPC and generalized LDPC (GLDPC) codes. Assuming that there exist check and variable nodes with minimum distance 2, it is shown that the growth rate depends only on these nodes. An important connection between this new result and the stability condition of D-GLDPC codes over the BEC is highlighted. Such a connection, previously observed for LDPC and GLDPC codes, is now extended to the case of D-GLDPC codes.<|reference_end|> | arxiv | @article{flanagan2008on,
title={On the Growth Rate of the Weight Distribution of Irregular
Doubly-Generalized LDPC Codes},
author={Mark F. Flanagan, Enrico Paolini, Marco Chiani and Marc Fossorier},
journal={arXiv preprint arXiv:0808.3504},
year={2008},
archivePrefix={arXiv},
eprint={0808.3504},
primaryClass={cs.IT math.IT}
} | flanagan2008on |
arxiv-4644 | 0808.3511 | Conditional probability based significance tests for sequential patterns in multi-neuronal spike trains | <|reference_start|>Conditional probability based significance tests for sequential patterns in multi-neuronal spike trains: In this paper we consider the problem of detecting statistically significant sequential patterns in multi-neuronal spike trains. These patterns are characterized by ordered sequences of spikes from different neurons with specific delays between spikes. We have previously proposed a data mining scheme to efficiently discover such patterns which are frequent in the sense that the count of non-overlapping occurrences of the pattern in the data stream is above a threshold. Here we propose a method to determine the statistical significance of these repeating patterns and to set the thresholds automatically. The novelty of our approach is that we use a compound null hypothesis that includes not only models of independent neurons but also models where neurons have weak dependencies. The strength of interaction among the neurons is represented in terms of certain pair-wise conditional probabilities. We specify our null hypothesis by putting an upper bound on all such conditional probabilities. We construct a probabilistic model that captures the counting process and use this to calculate the mean and variance of the count for any pattern. Using this we derive a test of significance for rejecting such a null hypothesis. This also allows us to rank-order different significant patterns. We illustrate the effectiveness of our approach using spike trains generated from a non-homogeneous Poisson model with embedded dependencies.<|reference_end|> | arxiv | @article{sastry2008conditional,
title={Conditional probability based significance tests for sequential patterns
in multi-neuronal spike trains},
author={P.S. Sastry (Indian Institute of Science), and K.P. Unnikrishnan
(General Motors Research)},
journal={arXiv preprint arXiv:0808.3511},
year={2008},
archivePrefix={arXiv},
eprint={0808.3511},
primaryClass={q-bio.NC cond-mat.dis-nn cs.DB q-bio.QM stat.ME}
} | sastry2008conditional |
arxiv-4645 | 0808.3535 | Data Diffusion: Dynamic Resource Provision and Data-Aware Scheduling for Data Intensive Applications | <|reference_start|>Data Diffusion: Dynamic Resource Provision and Data-Aware Scheduling for Data Intensive Applications: Data intensive applications often involve the analysis of large datasets that require large amounts of compute and storage resources. While dedicated compute and/or storage farms offer good task/data throughput, they suffer low resource utilization problem under varying workloads conditions. If we instead move such data to distributed computing resources, then we incur expensive data transfer cost. In this paper, we propose a data diffusion approach that combines dynamic resource provisioning, on-demand data replication and caching, and data locality-aware scheduling to achieve improved resource efficiency under varying workloads. We define an abstract "data diffusion model" that takes into consideration the workload characteristics, data accessing cost, application throughput and resource utilization; we validate the model using a real-world large-scale astronomy application. Our results show that data diffusion can increase the performance index by as much as 34X, and improve application response time by over 506X, while achieving near-optimal throughputs and execution times.<|reference_end|> | arxiv | @article{raicu2008data,
title={Data Diffusion: Dynamic Resource Provision and Data-Aware Scheduling for
Data Intensive Applications},
author={Ioan Raicu, Yong Zhao, Ian Foster, Alex Szalay},
journal={arXiv preprint arXiv:0808.3535},
year={2008},
archivePrefix={arXiv},
eprint={0808.3535},
primaryClass={cs.DC}
} | raicu2008data |
arxiv-4646 | 0808.3536 | Enabling Loosely-Coupled Serial Job Execution on the IBM BlueGene/P Supercomputer and the SiCortex SC5832 | <|reference_start|>Enabling Loosely-Coupled Serial Job Execution on the IBM BlueGene/P Supercomputer and the SiCortex SC5832: Our work addresses the enabling of the execution of highly parallel computations composed of loosely coupled serial jobs with no modifications to the respective applications, on large-scale systems. This approach allows new-and potentially far larger-classes of application to leverage systems such as the IBM Blue Gene/P supercomputer and similar emerging petascale architectures. We present here the challenges of I/O performance encountered in making this model practical, and show results using both micro-benchmarks and real applications on two large-scale systems, the BG/P and the SiCortex SC5832. Our preliminary benchmarks show that we can scale to 4096 processors on the Blue Gene/P and 5832 processors on the SiCortex with high efficiency, and can achieve thousands of tasks/sec sustained execution rates for parallel workloads of ordinary serial applications. We measured applications from two domains, economic energy modeling and molecular dynamics.<|reference_end|> | arxiv | @article{raicu2008enabling,
title={Enabling Loosely-Coupled Serial Job Execution on the IBM BlueGene/P
Supercomputer and the SiCortex SC5832},
author={Ioan Raicu, Zhao Zhang, Mike Wilde, Ian Foster},
journal={arXiv preprint arXiv:0808.3536},
year={2008},
archivePrefix={arXiv},
eprint={0808.3536},
primaryClass={cs.DC}
} | raicu2008enabling |
arxiv-4647 | 0808.3540 | Towards Loosely-Coupled Programming on Petascale Systems | <|reference_start|>Towards Loosely-Coupled Programming on Petascale Systems: We have extended the Falkon lightweight task execution framework to make loosely coupled programming on petascale systems a practical and useful programming model. This work studies and measures the performance factors involved in applying this approach to enable the use of petascale systems by a broader user community, and with greater ease. Our work enables the execution of highly parallel computations composed of loosely coupled serial jobs with no modifications to the respective applications. This approach allows a new-and potentially far larger-class of applications to leverage petascale systems, such as the IBM Blue Gene/P supercomputer. We present the challenges of I/O performance encountered in making this model practical, and show results using both microbenchmarks and real applications from two domains: economic energy modeling and molecular dynamics. Our benchmarks show that we can scale up to 160K processor-cores with high efficiency, and can achieve sustained execution rates of thousands of tasks per second.<|reference_end|> | arxiv | @article{raicu2008towards,
title={Towards Loosely-Coupled Programming on Petascale Systems},
author={Ioan Raicu, Zhao Zhang, Mike Wilde, Ian Foster, Pete Beckman, Kamil
Iskra, Ben Clifford},
journal={arXiv preprint arXiv:0808.3540},
year={2008},
doi={10.1109/SC.2008.5219768},
archivePrefix={arXiv},
eprint={0808.3540},
primaryClass={cs.DC}
} | raicu2008towards |
arxiv-4648 | 0808.3545 | Scientific Workflow Systems for 21st Century e-Science, New Bottle or New Wine? | <|reference_start|>Scientific Workflow Systems for 21st Century e-Science, New Bottle or New Wine?: With the advances in e-Sciences and the growing complexity of scientific analyses, more and more scientists and researchers are relying on workflow systems for process coordination, derivation automation, provenance tracking, and bookkeeping. While workflow systems have been in use for decades, it is unclear whether scientific workflows can or even should build on existing workflow technologies, or they require fundamentally new approaches. In this paper, we analyze the status and challenges of scientific workflows, investigate both existing technologies and emerging languages, platforms and systems, and identify the key challenges that must be addressed by workflow systems for e-science in the 21st century.<|reference_end|> | arxiv | @article{zhao2008scientific,
title={Scientific Workflow Systems for 21st Century e-Science, New Bottle or
New Wine?},
author={Yong Zhao, Ioan Raicu, Ian Foster},
journal={arXiv preprint arXiv:0808.3545},
year={2008},
archivePrefix={arXiv},
eprint={0808.3545},
primaryClass={cs.SE cs.DC}
} | zhao2008scientific |
arxiv-4649 | 0808.3546 | Accelerating Large-scale Data Exploration through Data Diffusion | <|reference_start|>Accelerating Large-scale Data Exploration through Data Diffusion: Data-intensive applications often require exploratory analysis of large datasets. If analysis is performed on distributed resources, data locality can be crucial to high throughput and performance. We propose a "data diffusion" approach that acquires compute and storage resources dynamically, replicates data in response to demand, and schedules computations close to data. As demand increases, more resources are acquired, thus allowing faster response to subsequent requests that refer to the same data; when demand drops, resources are released. This approach can provide the benefits of dedicated hardware without the associated high costs, depending on workload and resource characteristics. The approach is reminiscent of cooperative caching, web-caching, and peer-to-peer storage systems, but addresses different application demands. Other data-aware scheduling approaches assume dedicated resources, which can be expensive and/or inefficient if load varies significantly. To explore the feasibility of the data diffusion approach, we have extended the Falkon resource provisioning and task scheduling system to support data caching and data-aware scheduling. Performance results from both micro-benchmarks and a large scale astronomy application demonstrate that our approach improves performance relative to alternative approaches, as well as provides improved scalability as aggregated I/O bandwidth scales linearly with the number of data cache nodes.<|reference_end|> | arxiv | @article{raicu2008accelerating,
title={Accelerating Large-scale Data Exploration through Data Diffusion},
author={Ioan Raicu, Yong Zhao, Ian Foster, Alex Szalay},
journal={arXiv preprint arXiv:0808.3546},
year={2008},
doi={10.1145/1383519.1383521},
archivePrefix={arXiv},
eprint={0808.3546},
primaryClass={cs.DC}
} | raicu2008accelerating |
arxiv-4650 | 0808.3548 | Realizing Fast, Scalable and Reliable Scientific Computations in Grid Environments | <|reference_start|>Realizing Fast, Scalable and Reliable Scientific Computations in Grid Environments: The practical realization of managing and executing large scale scientific computations efficiently and reliably is quite challenging. Scientific computations often involve thousands or even millions of tasks operating on large quantities of data, such data are often diversely structured and stored in heterogeneous physical formats, and scientists must specify and run such computations over extended periods on collections of compute, storage and network resources that are heterogeneous, distributed and may change constantly. We present the integration of several advanced systems: Swift, Karajan, and Falkon, to address the challenges in running various large scale scientific applications in Grid environments. Swift is a parallel programming tool for rapid and reliable specification, execution, and management of large-scale science and engineering workflows. Swift consists of a simple scripting language called SwiftScript and a powerful runtime system that is based on the CoG Karajan workflow engine and integrates the Falkon light-weight task execution service that uses multi-level scheduling and a streamlined dispatcher. We showcase the scalability, performance and reliability of the integrated system using application examples drawn from astronomy, cognitive neuroscience and molecular dynamics, which all comprise large number of fine-grained jobs. We show that Swift is able to represent dynamic workflows whose structures can only be determined during runtime and reduce largely the code size of various workflow representations using SwiftScript; schedule the execution of hundreds of thousands of parallel computations via the Karajan engine; and achieve up to 90% reduction in execution time when compared to traditional batch schedulers.<|reference_end|> | arxiv | @article{zhao2008realizing,
title={Realizing Fast, Scalable and Reliable Scientific Computations in Grid
Environments},
author={Yong Zhao, Ioan Raicu, Ian Foster, Mihael Hategan, Veronika Nefedova,
Mike Wilde},
journal={arXiv preprint arXiv:0808.3548},
year={2008},
archivePrefix={arXiv},
eprint={0808.3548},
primaryClass={cs.DC cs.PL}
} | zhao2008realizing |
arxiv-4651 | 0808.3558 | Market-Oriented Cloud Computing: Vision, Hype, and Reality for Delivering IT Services as Computing Utilities | <|reference_start|>Market-Oriented Cloud Computing: Vision, Hype, and Reality for Delivering IT Services as Computing Utilities: This keynote paper: presents a 21st century vision of computing; identifies various computing paradigms promising to deliver the vision of computing utilities; defines Cloud computing and provides the architecture for creating market-oriented Clouds by leveraging technologies such as VMs; provides thoughts on market-based resource management strategies that encompass both customer-driven service management and computational risk management to sustain SLA-oriented resource allocation; presents some representative Cloud platforms especially those developed in industries along with our current work towards realising market-oriented resource allocation of Clouds by leveraging the 3rd generation Aneka enterprise Grid technology; reveals our early thoughts on interconnecting Clouds for dynamically creating an atmospheric computing environment along with pointers to future community research; and concludes with the need for convergence of competing IT paradigms for delivering our 21st century vision.<|reference_end|> | arxiv | @article{buyya2008market-oriented,
title={Market-Oriented Cloud Computing: Vision, Hype, and Reality for
Delivering IT Services as Computing Utilities},
author={Rajkumar Buyya, Chee Shin Yeo, and Srikumar Venugopal},
journal={Proceedings of the 10th IEEE International Conference on High
Performance Computing and Communications (HPCC-08, IEEE CS Press, Los
Alamitos, CA, USA), Sept. 25-27, 2008, Dalian, China},
year={2008},
doi={10.1109/HPCC.2008.172},
archivePrefix={arXiv},
eprint={0808.3558},
primaryClass={cs.DC}
} | buyya2008market-oriented |
arxiv-4652 | 0808.3563 | What It Feels Like To Hear Voices: Fond Memories of Julian Jaynes | <|reference_start|>What It Feels Like To Hear Voices: Fond Memories of Julian Jaynes: Julian Jaynes's profound humanitarian convictions not only prevented him from going to war, but would have prevented him from ever kicking a dog. Yet according to his theory, not only are language-less dogs unconscious, but so too were the speaking/hearing Greeks in the Bicameral Era, when they heard gods' voices telling them what to do rather than thinking for themselves. I argue that to be conscious is to be able to feel, and that all mammals (and probably lower vertebrates and invertebrates too) feel, hence are conscious. Julian Jaynes's brilliant analysis of our concepts of consciousness nevertheless keeps inspiring ever more inquiry and insights into the age-old mind/body problem and its relation to cognition and language.<|reference_end|> | arxiv | @article{harnad2008what,
title={What It Feels Like To Hear Voices: Fond Memories of Julian Jaynes},
author={Stevan Harnad},
journal={arXiv preprint arXiv:0808.3563},
year={2008},
archivePrefix={arXiv},
eprint={0808.3563},
primaryClass={cs.CL}
} | harnad2008what |
arxiv-4653 | 0808.3569 | Offloading Cognition onto Cognitive Technology | <|reference_start|>Offloading Cognition onto Cognitive Technology: "Cognizing" (e.g., thinking, understanding, and knowing) is a mental state. Systems without mental states, such as cognitive technology, can sometimes contribute to human cognition, but that does not make them cognizers. Cognizers can offload some of their cognitive functions onto cognitive technology, thereby extending their performance capacity beyond the limits of their own brain power. Language itself is a form of cognitive technology that allows cognizers to offload some of their cognitive functions onto the brains of other cognizers. Language also extends cognizers' individual and joint performance powers, distributing the load through interactive and collaborative cognition. Reading, writing, print, telecommunications and computing further extend cognizers' capacities. And now the web, with its network of cognizers, digital databases and software agents, all accessible anytime, anywhere, has become our 'Cognitive Commons,' in which distributed cognizers and cognitive technology can interoperate globally with a speed, scope and degree of interactivity inconceivable through local individual cognition alone. And as with language, the cognitive tool par excellence, such technological changes are not merely instrumental and quantitative: they can have profound effects on how we think and encode information, on how we communicate with one another, on our mental states, and on our very nature.<|reference_end|> | arxiv | @article{dror2008offloading,
title={Offloading Cognition onto Cognitive Technology},
author={Itiel Dror, Stevan Harnad},
journal={arXiv preprint arXiv:0808.3569},
year={2008},
archivePrefix={arXiv},
eprint={0808.3569},
primaryClass={cs.MA cs.CL}
} | dror2008offloading |
arxiv-4654 | 0808.3572 | Model-Based Compressive Sensing | <|reference_start|>Model-Based Compressive Sensing: Compressive sensing (CS) is an alternative to Shannon/Nyquist sampling for the acquisition of sparse or compressible signals that can be well approximated by just K << N elements from an N-dimensional basis. Instead of taking periodic samples, CS measures inner products with M < N random vectors and then recovers the signal via a sparsity-seeking optimization or greedy algorithm. Standard CS dictates that robust signal recovery is possible from M = O(K log(N/K)) measurements. It is possible to substantially decrease M without sacrificing robustness by leveraging more realistic signal models that go beyond simple sparsity and compressibility by including structural dependencies between the values and locations of the signal coefficients. This paper introduces a model-based CS theory that parallels the conventional theory and provides concrete guidelines on how to create model-based recovery algorithms with provable performance guarantees. A highlight is the introduction of a new class of structured compressible signals along with a new sufficient condition for robust structured compressible signal recovery that we dub the restricted amplification property, which is the natural counterpart to the restricted isometry property of conventional CS. Two examples integrate two relevant signal models - wavelet trees and block sparsity - into two state-of-the-art CS recovery algorithms and prove that they offer robust recovery from just M=O(K) measurements. Extensive numerical simulations demonstrate the validity and applicability of our new theory and algorithms.<|reference_end|> | arxiv | @article{baraniuk2008model-based,
title={Model-Based Compressive Sensing},
author={Richard G. Baraniuk, Volkan Cevher, Marco F. Duarte, Chinmay Hegde},
journal={arXiv preprint arXiv:0808.3572},
year={2008},
doi={10.1109/TIT.2010.2040894},
archivePrefix={arXiv},
eprint={0808.3572},
primaryClass={cs.IT math.IT}
} | baraniuk2008model-based |
arxiv-4655 | 0808.3574 | Classical Knowledge for Quantum Security | <|reference_start|>Classical Knowledge for Quantum Security: We propose a decision procedure for analysing security of quantum cryptographic protocols, combining a classical algebraic rewrite system for knowledge with an operational semantics for quantum distributed computing. As a test case, we use our procedure to reason about security properties of a recently developed quantum secret sharing protocol that uses graph states. We analyze three different scenarios based on the safety assumptions of the classical and quantum channels and discover the path of an attack in the presence of an adversary. The epistemic analysis that leads to this and similar types of attacks is purely based on our classical notion of knowledge.<|reference_end|> | arxiv | @article{d'hondt2008classical,
title={Classical Knowledge for Quantum Security},
author={Ellie D'Hondt and Mehrnoosh Sadrzadeh},
journal={arXiv preprint arXiv:0808.3574},
year={2008},
archivePrefix={arXiv},
eprint={0808.3574},
primaryClass={cs.CR cs.LO quant-ph}
} | d'hondt2008classical |
arxiv-4656 | 0808.3616 | Constructing word similarities in Meroitic as an aid to decipherment | <|reference_start|>Constructing word similarities in Meroitic as an aid to decipherment: Meroitic is the still undeciphered language of the ancient civilization of Kush. Over the years, various techniques for decipherment such as finding a bilingual text or cognates from modern or other ancient languages in the Sudan and surrounding areas has not been successful. Using techniques borrowed from information theory and natural language statistics, similar words are paired and attempts are made to use currently defined words to extract at least partial meaning from unknown words.<|reference_end|> | arxiv | @article{smith2008constructing,
title={Constructing word similarities in Meroitic as an aid to decipherment},
author={Reginald D. Smith},
journal={British Museum Studies in Ancient Egypt and Sudan, 12, 1-10 (2009)},
year={2008},
archivePrefix={arXiv},
eprint={0808.3616},
primaryClass={cs.CL}
} | smith2008constructing |
arxiv-4657 | 0808.3651 | Flow Faster: Efficient Decision Algorithms for Probabilistic Simulations | <|reference_start|>Flow Faster: Efficient Decision Algorithms for Probabilistic Simulations: Strong and weak simulation relations have been proposed for Markov chains, while strong simulation and strong probabilistic simulation relations have been proposed for probabilistic automata. However, decision algorithms for strong and weak simulation over Markov chains, and for strong simulation over probabilistic automata are not efficient, which makes it as yet unclear whether they can be used as effectively as their non-probabilistic counterparts. This paper presents drastically improved algorithms to decide whether some (discrete- or continuous-time) Markov chain strongly or weakly simulates another, or whether a probabilistic automaton strongly simulates another. The key innovation is the use of parametric maximum flow techniques to amortize computations. We also present a novel algorithm for deciding strong probabilistic simulation preorders on probabilistic automata, which has polynomial complexity via a reduction to an LP problem. When extending the algorithms for probabilistic automata to their continuous-time counterpart, we retain the same complexity for both strong and strong probabilistic simulations.<|reference_end|> | arxiv | @article{zhang2008flow,
title={Flow Faster: Efficient Decision Algorithms for Probabilistic Simulations},
author={Lijun Zhang, Holger Hermanns, Friedrich Eisenbrand and David N. Jansen},
journal={Logical Methods in Computer Science, Volume 4, Issue 4 (November
11, 2008) lmcs:989},
year={2008},
doi={10.2168/LMCS-4(4:6)2008},
archivePrefix={arXiv},
eprint={0808.3651},
primaryClass={cs.LO}
} | zhang2008flow |
arxiv-4658 | 0808.3689 | Optimal Power Allocation for Fading Channels in Cognitive Radio Networks: Ergodic Capacity and Outage Capacity | <|reference_start|>Optimal Power Allocation for Fading Channels in Cognitive Radio Networks: Ergodic Capacity and Outage Capacity: A cognitive radio network (CRN) is formed by either allowing the secondary users (SUs) in a secondary communication network (SCN) to opportunistically operate in the frequency bands originally allocated to a primary communication network (PCN) or by allowing SCN to coexist with the primary users (PUs) in PCN as long as the interference caused by SCN to each PU is properly regulated. In this paper, we consider the latter case, known as spectrum sharing, and study the optimal power allocation strategies to achieve the ergodic capacity and the outage capacity of the SU fading channel under different types of power constraints and fading channel models. In particular, besides the interference power constraint at PU, the transmit power constraint of SU is also considered. Since the transmit power and the interference power can be limited either by a peak or an average constraint, various combinations of power constraints are studied. It is shown that there is a capacity gain for SU under the average over the peak transmit/interference power constraint. It is also shown that fading for the channel between SU transmitter and PU receiver is usually a beneficial factor for enhancing the SU channel capacities.<|reference_end|> | arxiv | @article{kang2008optimal,
title={Optimal Power Allocation for Fading Channels in Cognitive Radio
Networks: Ergodic Capacity and Outage Capacity},
author={Xin Kang, Ying-Chang Liang, Arumugam Nallanathan, Hari Krishna Garg,
and Rui Zhang},
journal={arXiv preprint arXiv:0808.3689},
year={2008},
doi={10.1109/TWC.2009.071448},
archivePrefix={arXiv},
eprint={0808.3689},
primaryClass={cs.IT math.IT}
} | kang2008optimal |
arxiv-4659 | 0808.3693 | Providing Virtual Execution Environments: A Twofold Illustration | <|reference_start|>Providing Virtual Execution Environments: A Twofold Illustration: Platform virtualization helps solving major grid computing challenges: share resource with flexible, user-controlled and custom execution environments and in the meanwhile, isolate failures and malicious code. Grid resource management tools will evolve to embrace support for virtual resource. We present two open source projects that transparently supply virtual execution environments. Tycoon has been developed at HP Labs to optimise resource usage in creating an economy where users bid to access virtual machines and compete for CPU cycles. SmartDomains provides a peer-to-peer layer that automates virtual machines deployment using a description language and deployment engine from HP Labs. These projects demonstrate both client-server and peer-to-peer approaches to virtual resource management. The first case makes extensive use of virtual machines features for dynamic resource allocation. The second translates virtual machines capabilities into a sophisticated language where resource management components can be plugged in configurations and architectures defined at deployment time. We propose to share our experience at CERN openlab developing SmartDomains and deploying Tycoon to give an illustrative introduction to emerging research in virtual resource management.<|reference_end|> | arxiv | @article{grehant2008providing,
title={Providing Virtual Execution Environments: A Twofold Illustration},
author={Xavier Grehant and J.M. Dana},
journal={arXiv preprint arXiv:0808.3693},
year={2008},
archivePrefix={arXiv},
eprint={0808.3693},
primaryClass={cs.DC}
} | grehant2008providing |
arxiv-4660 | 0808.3694 | Studying Geometric Graph Properties of Road Networks Through an Algorithmic Lens | <|reference_start|>Studying Geometric Graph Properties of Road Networks Through an Algorithmic Lens: This paper studies real-world road networks from an algorithmic perspective, focusing on empirical studies that yield useful properties of road networks that can be exploited in the design of fast algorithms that deal with geographic data. Unlike previous approaches, our study is not based on the assumption that road networks are planar graphs. Indeed, based on the a number of experiments we have performed on the road networks of the 50 United States and District of Columbia, we provide strong empirical evidence that road networks are quite non-planar. Our approach therefore instead is directed at finding algorithmically-motivated properties of road networks as non-planar geometric graphs, focusing on alternative properties of road networks that can still lead to efficient algorithms for such problems as shortest paths and Voronoi diagrams. In particular, we study road networks as multiscale-dispersed graphs, which is a concept we formalize in terms of disk neighborhood systems. This approach allows us to develop fast algorithms for road networks without making any additional assumptions about the distribution of edge weights. In fact, our algorithms can allow for non-metric weights.<|reference_end|> | arxiv | @article{eppstein2008studying,
title={Studying Geometric Graph Properties of Road Networks Through an
Algorithmic Lens},
author={David Eppstein and Michael T. Goodrich},
journal={arXiv preprint arXiv:0808.3694},
year={2008},
archivePrefix={arXiv},
eprint={0808.3694},
primaryClass={cs.CG}
} | eppstein2008studying |
arxiv-4661 | 0808.3712 | Critique du rapport signal \`a bruit en communications num\'eriques -- Questioning the signal to noise ratio in digital communications | <|reference_start|>Critique du rapport signal \`a bruit en communications num\'eriques -- Questioning the signal to noise ratio in digital communications: The signal to noise ratio, which plays such an important r\^ole in information theory, is shown to become pointless for digital communications where the demodulation is achieved via new fast estimation techniques. Operational calculus, differential algebra, noncommutative algebra and nonstandard analysis are the main mathematical tools.<|reference_end|> | arxiv | @article{fliess2008critique,
title={Critique du rapport signal \`a bruit en communications num\'eriques --
Questioning the signal to noise ratio in digital communications},
author={Michel Fliess (LIX, INRIA Saclay - Ile de France)},
journal={ARIMA (Revue africaine d'informatique et de Math\'ematiques
appliqu\'ees) 9 (2008) 419-429},
year={2008},
archivePrefix={arXiv},
eprint={0808.3712},
primaryClass={cs.IT math.IT math.PR math.RA}
} | fliess2008critique |
arxiv-4662 | 0808.3717 | Free and Open Source Software for Development | <|reference_start|>Free and Open Source Software for Development: Development organizations and International Non-Governmental Organizations have been emphasizing the high potential of Free and Open Source Software for the Less Developed Countries. Cost reduction, less vendor dependency and increased potential for local capacity development have been their main arguments. In spite of its advantages, Free and Open Source Software is not widely adopted at the African continent. In this book the authors will explore the grounds on with these expectations are based. Where do they come from and is there evidence to support these expectations? Over the past years several projects have been initiated and some good results have been achieved, but at the same time many challenges were encountered. What lessons can be drawn from these experiences and do these experiences contain enough evidence to support the high expectations? Several projects and their achievements will be considered. In the final part of the book the future of Free and Open Source Software for Development will be explored. Special attention is given to the African continent since here challenges are highest. What is the role of Free and open Source Software for Development and how do we need to position and explore the potential? What are the threats? The book aims at professionals that are engaged in the design and implementation of ICT for Development (ICT4D) projects and want to improve their understanding of the role Free and Open Source Software can play.<|reference_end|> | arxiv | @article{van reijswoud2008free,
title={Free and Open Source Software for Development},
author={Victor van Reijswoud, Arjan de Jager},
journal={"Publishing studies" book series, edited by Giandomenico Sica,
ISSN 1973-6061 (Printed edition), ISSN 1973-6053 (Electronic edition)},
year={2008},
archivePrefix={arXiv},
eprint={0808.3717},
primaryClass={cs.GL}
} | van reijswoud2008free |
arxiv-4663 | 0808.3726 | Highly accurate recommendation algorithm based on high-order similarities | <|reference_start|>Highly accurate recommendation algorithm based on high-order similarities: In this Letter, we introduce a modified collaborative filtering (MCF) algorithm, which has remarkably higher accuracy than the standard collaborative filtering. In the MCF, instead of the standard Pearson coefficient, the user-user similarities are obtained by a diffusion process. Furthermore, by considering the second order similarities, we design an effective algorithm that depresses the influence of mainstream preferences. The corresponding algorithmic accuracy, measured by the ranking score, is further improved by 24.9% in the optimal case. In addition, two significant criteria of algorithmic performance, diversity and popularity, are also taken into account. Numerical results show that the algorithm based on second order similarity can outperform the MCF simultaneously in all three criteria.<|reference_end|> | arxiv | @article{liu2008highly,
title={Highly accurate recommendation algorithm based on high-order
similarities},
author={Jian-Guo Liu, Tao Zhou, Bing-Hong Wang, and Yi-Cheng Zhang},
journal={Physica A 389, 881-886 (2010)},
year={2008},
doi={10.1016/j.physa.2009.10.027},
archivePrefix={arXiv},
eprint={0808.3726},
primaryClass={physics.data-an cs.IR}
} | liu2008highly |
arxiv-4664 | 0808.3746 | A game-theoretic version of Oakes' example for randomized forecasting | <|reference_start|>A game-theoretic version of Oakes' example for randomized forecasting: Using the game-theoretic framework for probability, Vovk and Shafer. have shown that it is always possible, using randomization, to make sequential probability forecasts that pass any countable set of well-behaved statistical tests. This result generalizes work by other authors, who consider only tests of calbration. We complement this result with a lower bound. We show that Vovk and Shafer's result is valid only when the forecasts are computed with unrestrictedly increasing degree of accuracy. When some level of discreteness is fixed, we present a game-theoretic generalization of Oakes' example for randomized forecasting that is a test failing any given method of deferministic forecasting; originally, this example was presented for deterministic calibration.<|reference_end|> | arxiv | @article{v'yugin2008a,
title={A game-theoretic version of Oakes' example for randomized forecasting},
author={Vladimir V. V'yugin},
journal={arXiv preprint arXiv:0808.3746},
year={2008},
archivePrefix={arXiv},
eprint={0808.3746},
primaryClass={cs.LG cs.GT}
} | v'yugin2008a |
arxiv-4665 | 0808.3747 | Forward Correction and Fountain codes in Delay Tolerant Networks | <|reference_start|>Forward Correction and Fountain codes in Delay Tolerant Networks: Delay tolerant Ad-hoc Networks make use of mobility of relay nodes to compensate for lack of permanent connectivity and thus enable communication between nodes that are out of range of each other. To decrease delivery delay, the information that needs to be delivered is replicated in the network. Our objective in this paper is to study replication mechanisms that include coding in order to improve the probability of successful delivery within a given time limit. We propose an analytical approach that allows to quantify tradeoffs between resources and performance measures (energy and delay). We study the effect of coding on the performance of the network while optimizing parameters that govern routing. Our results, based on fluid approximations, are compared to simulations which validate the model<|reference_end|> | arxiv | @article{altman2008forward,
title={Forward Correction and Fountain codes in Delay Tolerant Networks},
author={Eitan Altman and Francesco De Pellegrini},
journal={arXiv preprint arXiv:0808.3747},
year={2008},
archivePrefix={arXiv},
eprint={0808.3747},
primaryClass={cs.NI cs.PF}
} | altman2008forward |
arxiv-4666 | 0808.3756 | Approaching Blokh-Zyablov Error Exponent with Linear-Time Encodable/Decodable Codes | <|reference_start|>Approaching Blokh-Zyablov Error Exponent with Linear-Time Encodable/Decodable Codes: Guruswami and Indyk showed in [1] that Forney's error exponent can be achieved with linear coding complexity over binary symmetric channels. This paper extends this conclusion to general discrete-time memoryless channels and shows that Forney's and Blokh-Zyablov error exponents can be arbitrarily approached by one-level and multi-level concatenated codes with linear encoding/decoding complexity. The key result is a revision to Forney's general minimum distance decoding algorithm, which enables a low complexity integration of Guruswami-Indyk's outer codes into the concatenated coding schemes.<|reference_end|> | arxiv | @article{wang2008approaching,
title={Approaching Blokh-Zyablov Error Exponent with Linear-Time
Encodable/Decodable Codes},
author={Zheng Wang, Jie Luo},
journal={arXiv preprint arXiv:0808.3756},
year={2008},
doi={10.1109/LCOMM.2009.090047},
archivePrefix={arXiv},
eprint={0808.3756},
primaryClass={cs.IT cs.CC math.IT}
} | wang2008approaching |
arxiv-4667 | 0808.3881 | Counting Hexagonal Patches and Independent Sets in Circle Graphs | <|reference_start|>Counting Hexagonal Patches and Independent Sets in Circle Graphs: A hexagonal patch is a plane graph in which inner faces have length 6, inner vertices have degree 3, and boundary vertices have degree 2 or 3. We consider the following counting problem: given a sequence of twos and threes, how many hexagonal patches exist with this degree sequence along the outer face? This problem is motivated by the study of benzenoid hydrocarbons and fullerenes in computational chemistry. We give the first polynomial time algorithm for this problem. We show that it can be reduced to counting maximum independent sets in circle graphs, and give a simple and fast algorithm for this problem.<|reference_end|> | arxiv | @article{bonsma2008counting,
title={Counting Hexagonal Patches and Independent Sets in Circle Graphs},
author={Paul Bonsma, Felix Breuer},
journal={arXiv preprint arXiv:0808.3881},
year={2008},
archivePrefix={arXiv},
eprint={0808.3881},
primaryClass={cs.DM cs.DS}
} | bonsma2008counting |
arxiv-4668 | 0808.3884 | The Complexity of Reasoning for Fragments of Default Logic | <|reference_start|>The Complexity of Reasoning for Fragments of Default Logic: Default logic was introduced by Reiter in 1980. In 1992, Gottlob classified the complexity of the extension existence problem for propositional default logic as $\SigmaPtwo$-complete, and the complexity of the credulous and skeptical reasoning problem as SigmaP2-complete, resp. PiP2-complete. Additionally, he investigated restrictions on the default rules, i.e., semi-normal default rules. Selman made in 1992 a similar approach with disjunction-free and unary default rules. In this paper we systematically restrict the set of allowed propositional connectives. We give a complete complexity classification for all sets of Boolean functions in the meaning of Post's lattice for all three common decision problems for propositional default logic. We show that the complexity is a hexachotomy (SigmaP2-, DeltaP2-, NP-, P-, NL-complete, trivial) for the extension existence problem, while for the credulous and skeptical reasoning problem we obtain similar classifications without trivial cases.<|reference_end|> | arxiv | @article{beyersdorff2008the,
title={The Complexity of Reasoning for Fragments of Default Logic},
author={Olaf Beyersdorff, Arne Meier, Michael Thomas, Heribert Vollmer},
journal={arXiv preprint arXiv:0808.3884},
year={2008},
archivePrefix={arXiv},
eprint={0808.3884},
primaryClass={cs.CC cs.LO}
} | beyersdorff2008the |
arxiv-4669 | 0808.3889 | Open architecture for multilingual parallel texts | <|reference_start|>Open architecture for multilingual parallel texts: Multilingual parallel texts (abbreviated to parallel texts) are linguistic versions of the same content ("translations"); e.g., the Maastricht Treaty in English and Spanish are parallel texts. This document is about creating an open architecture for the whole Authoring, Translation and Publishing Chain (ATP-chain) for the processing of parallel texts.<|reference_end|> | arxiv | @article{benitez2008open,
title={Open architecture for multilingual parallel texts},
author={M.T. Carrasco Benitez},
journal={arXiv preprint arXiv:0808.3889},
year={2008},
archivePrefix={arXiv},
eprint={0808.3889},
primaryClass={cs.CL}
} | benitez2008open |
arxiv-4670 | 0808.3892 | Essential arity gap of Boolean functions | <|reference_start|>Essential arity gap of Boolean functions: We investigate the Boolean functions with essential arity gap 2. We use Full Conjunctive Normal Forms instead of Zhegalkin's polynomials, which allow us to simplify the proofs and to obtain several combinatorial results, concerning the Boolean functions with a given arity gap.<|reference_end|> | arxiv | @article{shtrakov2008essential,
title={Essential arity gap of Boolean functions},
author={Slavcho Shtrakov},
journal={arXiv preprint arXiv:0808.3892},
year={2008},
archivePrefix={arXiv},
eprint={0808.3892},
primaryClass={cs.DM}
} | shtrakov2008essential |
arxiv-4671 | 0808.3928 | On the strength of proof-irrelevant type theories | <|reference_start|>On the strength of proof-irrelevant type theories: We present a type theory with some proof-irrelevance built into the conversion rule. We argue that this feature is useful when type theory is used as the logical formalism underlying a theorem prover. We also show a close relation with the subset types of the theory of PVS. We show that in these theories, because of the additional extentionality, the axiom of choice implies the decidability of equality, that is, almost classical logic. Finally we describe a simple set-theoretic semantics.<|reference_end|> | arxiv | @article{werner2008on,
title={On the strength of proof-irrelevant type theories},
author={Benjamin Werner},
journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September
26, 2008) lmcs:1142},
year={2008},
doi={10.2168/LMCS-4(3:13)2008},
archivePrefix={arXiv},
eprint={0808.3928},
primaryClass={cs.LO}
} | werner2008on |
arxiv-4672 | 0808.3937 | Understanding Fairness and its Impact on Quality of Service in IEEE 80211 | <|reference_start|>Understanding Fairness and its Impact on Quality of Service in IEEE 80211: The Distributed Coordination Function (DCF) aims at fair and efficient medium access in IEEE 802.11. In face of its success, it is remarkable that there is little consensus on the actual degree of fairness achieved, particularly bearing its impact on quality of service in mind. In this paper we provide an accurate model for the fairness of the DCF. Given M greedy stations we assume fairness if a tagged station contributes a share of 1/M to the overall number of packets transmitted. We derive the probability distribution of fairness deviations and support our analytical results by an extensive set of measurements. We find a closed-form expression for the improvement of long-term over short-term fairness. Regarding the random countdown values we quantify the significance of their distribution whereas we discover that fairness is largely insensitive to the distribution parameters. Based on our findings we view the DCF as emulating an ideal fair queuing system to quantify the deviations from a fair rate allocation. We deduce a stochastic service curve model for the DCF to predict packet delays in IEEE 802.11. We show how a station can estimate its fair bandwidth share from passive measurements of its traffic arrivals and departures.<|reference_end|> | arxiv | @article{bredel2008understanding,
title={Understanding Fairness and its Impact on Quality of Service in IEEE
802.11},
author={Michael Bredel, Markus Fidler},
journal={IEEE INFOCOM 2009},
year={2008},
doi={10.1109/INFCOM.2009.5062022},
archivePrefix={arXiv},
eprint={0808.3937},
primaryClass={cs.NI cs.PF}
} | bredel2008understanding |
arxiv-4673 | 0808.3959 | A Simple Extension of the $\modulo$-$\Lambda$ Transformation | <|reference_start|>A Simple Extension of the $\modulo$-$\Lambda$ Transformation: A simple lemma is derived that allows to transform a general scalar (non-Gaussian, non-additive) continuous-alphabet channel as well as a general multiple-access channel into a modulo-additive noise channel. While in general the transformation is information lossy, it allows to leverage linear coding techniques and capacity results derived for networks comprised of additive Gaussian nodes to more general networks.<|reference_end|> | arxiv | @article{erez2008a,
title={A Simple Extension of the $\modulo$-$\Lambda$ Transformation},
author={Uri Erez and Ram Zamir},
journal={arXiv preprint arXiv:0808.3959},
year={2008},
archivePrefix={arXiv},
eprint={0808.3959},
primaryClass={cs.IT math.IT}
} | erez2008a |
arxiv-4674 | 0808.3971 | Networked MIMO with Clustered Linear Precoding | <|reference_start|>Networked MIMO with Clustered Linear Precoding: A clustered base transceiver station (BTS) coordination strategy is proposed for a large cellular MIMO network, which includes full intra-cluster coordination to enhance the sum rate and limited inter-cluster coordination to reduce interference for the cluster edge users. Multi-cell block diagonalization is used to coordinate the transmissions across multiple BTSs in the same cluster. To satisfy per-BTS power constraints, three combined precoder and power allocation algorithms are proposed with different performance and complexity tradeoffs. For inter-cluster coordination, the coordination area is chosen to balance fairness for edge users and the achievable sum rate. It is shown that a small cluster size (about 7 cells) is sufficient to obtain most of the sum rate benefits from clustered coordination while greatly relieving channel feedback requirement. Simulations show that the proposed coordination strategy efficiently reduces interference and provides a considerable sum rate gain for cellular MIMO networks.<|reference_end|> | arxiv | @article{zhang2008networked,
title={Networked MIMO with Clustered Linear Precoding},
author={Jun Zhang, Runhua Chen, Jeffrey G. Andrews, Arunabha Ghosh, and Robert
W. Heath Jr},
journal={arXiv preprint arXiv:0808.3971},
year={2008},
archivePrefix={arXiv},
eprint={0808.3971},
primaryClass={cs.IT math.IT}
} | zhang2008networked |
arxiv-4675 | 0808.3990 | Adaptive Dynamic Congestion Avoidance with Master Equation | <|reference_start|>Adaptive Dynamic Congestion Avoidance with Master Equation: This paper proposes an adaptive variant of Random Early Detection (RED) gateway queue management for packet-switched networks via a discrete state analog of the non-stationary Master Equation i.e. Markov process. The computation of average queue size, which appeared in the original RED algorithm, is altered by introducing a probability $P(l,t)$, which defines the probability of having $l$ number of packets in the queue at the given time $t$, and depends upon the previous state of the queue. This brings the advantage of eliminating a free parameter: queue weight, completely. Computation of transition rates and probabilities are carried out on the fly, and determined by the algorithm automatically. Simulations with unstructured packets illustrate the method, the performance of the adaptive variant of RED algorithm, and the comparison with the standard RED.<|reference_end|> | arxiv | @article{süzen2008adaptive,
title={Adaptive Dynamic Congestion Avoidance with Master Equation},
author={Mehmet S"uzen and Ziya S"uzen},
journal={arXiv preprint arXiv:0808.3990},
year={2008},
archivePrefix={arXiv},
eprint={0808.3990},
primaryClass={cs.NI}
} | süzen2008adaptive |
arxiv-4676 | 0808.4050 | Optimizing the double description method for normal surface enumeration | <|reference_start|>Optimizing the double description method for normal surface enumeration: Many key algorithms in 3-manifold topology involve the enumeration of normal surfaces, which is based upon the double description method for finding the vertices of a convex polytope. Typically we are only interested in a small subset of these vertices, thus opening the way for substantial optimization. Here we give an account of the vertex enumeration problem as it applies to normal surfaces, and present new optimizations that yield strong improvements in both running time and memory consumption. The resulting algorithms are tested using the freely available software package Regina.<|reference_end|> | arxiv | @article{burton2008optimizing,
title={Optimizing the double description method for normal surface enumeration},
author={Benjamin A. Burton},
journal={Mathematics of Computation 79 (2010), no. 269, 453-484},
year={2008},
doi={10.1090/S0025-5718-09-02282-0},
archivePrefix={arXiv},
eprint={0808.4050},
primaryClass={math.GT cs.CG math.CO}
} | burton2008optimizing |
arxiv-4677 | 0808.4060 | TrustMAS: Trusted Communication Platform for Multi-Agent Systems | <|reference_start|>TrustMAS: Trusted Communication Platform for Multi-Agent Systems: The paper presents TrustMAS - Trusted Communication Platform for Multi-Agent Systems, which provides trust and anonymity for mobile agents. The platform includes anonymous technique based on random-walk algorithm for providing general purpose anonymous communication for agents. All agents, which take part in the proposed platform, benefit from trust and anonymity that is provided for their interactions. Moreover, in TrustMAS there are StegAgents (SA) that are able to perform various steganographic communication. To achieve that goal, SAs may use methods in different layers of TCP/IP model or specialized middleware enabling steganography that allows hidden communication through all layers of mentioned model. In TrustMAS steganographic channels are used to exchange routing tables between StegAgents. Thus all StegAgents in TrustMAS with their ability to exchange information by using hidden channels form distributed steganographic router (Stegrouter).<|reference_end|> | arxiv | @article{szczypiorski2008trustmas:,
title={TrustMAS: Trusted Communication Platform for Multi-Agent Systems},
author={Krzysztof Szczypiorski, Igor Margasinski, Wojciech Mazurczyk,
Krzysztof Cabaj, Pawel Radziszewski},
journal={arXiv preprint arXiv:0808.4060},
year={2008},
archivePrefix={arXiv},
eprint={0808.4060},
primaryClass={cs.CR cs.MA}
} | szczypiorski2008trustmas: |
arxiv-4678 | 0808.4079 | From Altruism to Non-Cooperation in Routing Games | <|reference_start|>From Altruism to Non-Cooperation in Routing Games: The paper studies the routing in the network shared by several users. Each user seeks to optimize either its own performance or some combination between its own performance and that of other users, by controlling the routing of its given flow demand. We parameterize the degree of cooperation which allows to cover the fully non-cooperative behavior, the fully cooperative behavior, and even more, the fully altruistic behavior, all these as special cases of the parameter's choice. A large part of the work consists in exploring the impact of the degree of cooperation on the equilibrium. Our first finding is to identify multiple Nash equilibria with cooperative behavior that do not occur in the non-cooperative case under the same conditions (cost, demand and topology). We then identify Braess like paradox (in which adding capacity or adding a link to a network results in worse performance to all users) and study the impact of the degree of cooperation on it. We identify another type of paradox in cooperation scenario. We identify that when we increase the degree of cooperation of a user while other users keep unchanged their degree of cooperation, leads to an improvement in performance of that user. We then pursue the exploration and carry it on to the setting of Mixed equilibrium (i.e. some users are non atomic-they have infinitesimally small demand, and other have finite fixed demand). We finally obtain some theoretical results that show that for low degree of cooperation the equilibrium is unique, confirming the results of our numerical study.<|reference_end|> | arxiv | @article{azad2008from,
title={From Altruism to Non-Cooperation in Routing Games},
author={Amar Prakash Azad, Eitan Altman and R. El-Azouzi},
journal={arXiv preprint arXiv:0808.4079},
year={2008},
archivePrefix={arXiv},
eprint={0808.4079},
primaryClass={cs.GT cs.NI}
} | azad2008from |
arxiv-4679 | 0808.4100 | Codes and Noncommutative Stochastic Matrices | <|reference_start|>Codes and Noncommutative Stochastic Matrices: Given a matrix over a skew field fixing the column (1,...,1)^t, we give formulas for a row vector fixed by this matrix. The same techniques are applied to give noncommutative extensions of probabilistic properties of codes.<|reference_end|> | arxiv | @article{lavallée2008codes,
title={Codes and Noncommutative Stochastic Matrices},
author={Sylvain Lavall'ee, Christophe Reutenauer, Vladimir Retakh, Dominique
Perrin},
journal={arXiv preprint arXiv:0808.4100},
year={2008},
archivePrefix={arXiv},
eprint={0808.4100},
primaryClass={math.RA cs.IT math.IT}
} | lavallée2008codes |
arxiv-4680 | 0808.4104 | Flow-level Characteristics of Spam and Ham | <|reference_start|>Flow-level Characteristics of Spam and Ham: Despite a large amount of effort devoted in the past years trying to limit unsolicited mail, spam is still a major global concern. Content-analysis techniques and blacklists, the most popular methods used to identify and block spam, are beginning to lose their edge in the battle. We argue here that one not only needs to look into the network-related characteristics of spam traffic, as has been recently suggested, but also to look deeper into the network core, in order to counter the increasing sophistication of spam-ing methods. Yet, at the same time, local knowledge available at a given server can often be irreplaceable in identifying specific spammers. To this end, in this paper we show how the local intelligence of mail servers can be gathered and correlated pas- sively at the ISP-level providing valuable network-wide information. Specifically, we use first a large network flow trace from a medium size, national ISP, to demonstrate that the pre-filtering decisions of individual mail servers can be tracked and combined at the flow level. Then, we argue that such aggregated knowledge not only can allow ISPs to develop and evaluate powerful new methods for fighting spam, but also to monitor remotely what their own servers are doing.<|reference_end|> | arxiv | @article{schatzmann2008flow-level,
title={Flow-level Characteristics of Spam and Ham},
author={Dominik Schatzmann, Martin Burkhart, Thrasyvoulos Spyropoulos},
journal={arXiv preprint arXiv:0808.4104},
year={2008},
number={TIK-Report No. 291},
archivePrefix={arXiv},
eprint={0808.4104},
primaryClass={cs.NI}
} | schatzmann2008flow-level |
arxiv-4681 | 0808.4111 | Relative Entropy and Statistics | <|reference_start|>Relative Entropy and Statistics: Formalising the confrontation of opinions (models) to observations (data) is the task of Inferential Statistics. Information Theory provides us with a basic functional, the relative entropy (or Kullback-Leibler divergence), an asymmetrical measure of dissimilarity between the empirical and the theoretical distributions. The formal properties of the relative entropy turn out to be able to capture every aspect of Inferential Statistics, as illustrated here, for simplicity, on dices (= i.i.d. process with finitely many outcomes): refutability (strict or probabilistic): the asymmetry data / models; small deviations: rejecting a single hypothesis; competition between hypotheses and model selection; maximum likelihood: model inference and its limits; maximum entropy: reconstructing partially observed data; EM-algorithm; flow data and gravity modelling; determining the order of a Markov chain.<|reference_end|> | arxiv | @article{bavaud2008relative,
title={Relative Entropy and Statistics},
author={Franc{c}ois Bavaud},
journal={Bavaud F. (2009) Information Theory, Relative Entropy and
Statistics. In: Sommaruga G. (editor): Formal Theories of Information.
Lecture Notes in Computer Science 5363, Springer, pp. 54-78},
year={2008},
archivePrefix={arXiv},
eprint={0808.4111},
primaryClass={cs.IT math.IT math.ST stat.TH}
} | bavaud2008relative |
arxiv-4682 | 0808.4122 | Swapping Lemmas for Regular and Context-Free Languages | <|reference_start|>Swapping Lemmas for Regular and Context-Free Languages: In formal language theory, one of the most fundamental tools, known as pumping lemmas, is extremely useful for regular and context-free languages. However, there are natural properties for which the pumping lemmas are of little use. One of such examples concerns a notion of advice, which depends only on the size of an underlying input. A standard pumping lemma encounters difficulty in proving that a given language is not regular in the presence of advice. We develop its substitution, called a swapping lemma for regular languages, to demonstrate the non-regularity of a target language with advice. For context-free languages, we also present a similar form of swapping lemma, which serves as a technical tool to show that certain languages are not context-free with advice.<|reference_end|> | arxiv | @article{yamakami2008swapping,
title={Swapping Lemmas for Regular and Context-Free Languages},
author={Tomoyuki Yamakami},
journal={arXiv preprint arXiv:0808.4122},
year={2008},
archivePrefix={arXiv},
eprint={0808.4122},
primaryClass={cs.CC cs.CL cs.FL}
} | yamakami2008swapping |
arxiv-4683 | 0808.4133 | Tableau-based decision procedure for the multi-agent epistemic logic with operators of common and distributed knowledge | <|reference_start|>Tableau-based decision procedure for the multi-agent epistemic logic with operators of common and distributed knowledge: We develop an incremental-tableau-based decision procedure for the multi-agent epistemic logic MAEL(CD) (aka S5_n (CD)), whose language contains operators of individual knowledge for a finite set Ag of agents, as well as operators of distributed and common knowledge among all agents in Ag. Our tableau procedure works in (deterministic) exponential time, thus establishing an upper bound for MAEL(CD)-satisfiability that matches the (implicit) lower-bound known from earlier results, which implies ExpTime-completeness of MAEL(CD)-satisfiability. Therefore, our procedure provides a complexity-optimal algorithm for checking MAEL(CD)-satisfiability, which, however, in most cases is much more efficient. We prove soundness and completeness of the procedure, and illustrate it with an example.<|reference_end|> | arxiv | @article{goranko2008tableau-based,
title={Tableau-based decision procedure for the multi-agent epistemic logic
with operators of common and distributed knowledge},
author={Valentin Goranko, Dmitry Shkatov},
journal={arXiv preprint arXiv:0808.4133},
year={2008},
doi={10.1109/SEFM.2008.27},
archivePrefix={arXiv},
eprint={0808.4133},
primaryClass={cs.LO cs.MA}
} | goranko2008tableau-based |
arxiv-4684 | 0808.4134 | Spectral Sparsification of Graphs | <|reference_start|>Spectral Sparsification of Graphs: We introduce a new notion of graph sparsificaiton based on spectral similarity of graph Laplacians: spectral sparsification requires that the Laplacian quadratic form of the sparsifier approximate that of the original. This is equivalent to saying that the Laplacian of the sparsifier is a good preconditioner for the Laplacian of the original. We prove that every graph has a spectral sparsifier of nearly linear size. Moreover, we present an algorithm that produces spectral sparsifiers in time $\softO{m}$, where $m$ is the number of edges in the original graph. This construction is a key component of a nearly-linear time algorithm for solving linear equations in diagonally-dominant matrcies. Our sparsification algorithm makes use of a nearly-linear time algorithm for graph partitioning that satisfies a strong guarantee: if the partition it outputs is very unbalanced, then the larger part is contained in a subgraph of high conductance.<|reference_end|> | arxiv | @article{spielman2008spectral,
title={Spectral Sparsification of Graphs},
author={Daniel A. Spielman and Shang-Hua Teng},
journal={arXiv preprint arXiv:0808.4134},
year={2008},
archivePrefix={arXiv},
eprint={0808.4134},
primaryClass={cs.DS cs.DM}
} | spielman2008spectral |
arxiv-4685 | 0808.4135 | Achieving the Empirical Capacity Using Feedback Part I: Memoryless Additive Models | <|reference_start|>Achieving the Empirical Capacity Using Feedback Part I: Memoryless Additive Models: We address the problem of universal communications over an unknown channel with an instantaneous noiseless feedback, and show how rates corresponding to the empirical behavior of the channel can be attained, although no rate can be guaranteed in advance. First, we consider a discrete modulo-additive channel with alphabet $\mathcal{X}$, where the noise sequence $Z^n$ is arbitrary and unknown and may causally depend on the transmitted and received sequences and on the encoder's message, possibly in an adversarial fashion. Although the classical capacity of this channel is zero, we show that rates approaching the empirical capacity $\log|\mathcal{X}|-H_{emp}(Z^n)$ can be universally attained, where $H_{emp}(Z^n)$ is the empirical entropy of $Z^n$. For the more general setting where the channel can map its input to an output in an arbitrary unknown fashion subject only to causality, we model the empirical channel actions as the modulo-addition of a realized noise sequence, and show that the same result applies if common randomness is available. The results are proved constructively, by providing a simple sequential transmission scheme approaching the empirical capacity. In part II of this work we demonstrate how even higher rates can be attained by using more elaborate models for channel actions, and by utilizing possible empirical dependencies in its behavior.<|reference_end|> | arxiv | @article{shayevitz2008achieving,
title={Achieving the Empirical Capacity Using Feedback Part I: Memoryless
Additive Models},
author={Ofer Shayevitz and Meir Feder},
journal={arXiv preprint arXiv:0808.4135},
year={2008},
archivePrefix={arXiv},
eprint={0808.4135},
primaryClass={cs.IT math.IT}
} | shayevitz2008achieving |
arxiv-4686 | 0808.4146 | Dynamic Connectivity in ALOHA Ad Hoc Networks | <|reference_start|>Dynamic Connectivity in ALOHA Ad Hoc Networks: In a wireless network the set of transmitting nodes changes frequently because of the MAC scheduler and the traffic load. Previously, connectivity in wireless networks was analyzed using static geometric graphs, and as we show leads to an overly constrained design criterion. The dynamic nature of the transmitting set introduces additional randomness in a wireless system that improves the connectivity, and this additional randomness is not captured by a static connectivity graph. In this paper, we consider an ad hoc network with half-duplex radios that uses multihop routing and slotted ALOHA for the MAC contention and introduce a random dynamic multi-digraph to model its connectivity. We first provide analytical results about the degree distribution of the graph. Next, defining the path formation time as the minimum time required for a causal path to form between the source and destination on the dynamic graph, we derive the distributional properties of the connection delay using techniques from first-passage percolation and epidemic processes. We consider the giant component of the network formed when communication is noise-limited (by neglecting interference). Then, in the presence of interference, we prove that the delay scales linearly with the source-destination distance on this giant component. We also provide simulation results to support the theoretical results.<|reference_end|> | arxiv | @article{ganti2008dynamic,
title={Dynamic Connectivity in ALOHA Ad Hoc Networks},
author={RadhaKrishna Ganti and Martin Haenggi},
journal={arXiv preprint arXiv:0808.4146},
year={2008},
archivePrefix={arXiv},
eprint={0808.4146},
primaryClass={cs.IT cs.NI math.IT math.PR}
} | ganti2008dynamic |
arxiv-4687 | 0808.4156 | Rate-Distortion via Markov Chain Monte Carlo | <|reference_start|>Rate-Distortion via Markov Chain Monte Carlo: We propose an approach to lossy source coding, utilizing ideas from Gibbs sampling, simulated annealing, and Markov Chain Monte Carlo (MCMC). The idea is to sample a reconstruction sequence from a Boltzmann distribution associated with an energy function that incorporates the distortion between the source and reconstruction, the compressibility of the reconstruction, and the point sought on the rate-distortion curve. To sample from this distribution, we use a `heat bath algorithm': Starting from an initial candidate reconstruction (say the original source sequence), at every iteration, an index i is chosen and the i-th sequence component is replaced by drawing from the conditional probability distribution for that component given all the rest. At the end of this process, the encoder conveys the reconstruction to the decoder using universal lossless compression. The complexity of each iteration is independent of the sequence length and only linearly dependent on a certain context parameter (which grows sub-logarithmically with the sequence length). We show that the proposed algorithms achieve optimum rate-distortion performance in the limits of large number of iterations, and sequence length, when employed on any stationary ergodic source. Experimentation shows promising initial results. Employing our lossy compressors on noisy data, with appropriately chosen distortion measure and level, followed by a simple de-randomization operation, results in a family of denoisers that compares favorably (both theoretically and in practice) with other MCMC-based schemes, and with the Discrete Universal Denoiser (DUDE).<|reference_end|> | arxiv | @article{jalali2008rate-distortion,
title={Rate-Distortion via Markov Chain Monte Carlo},
author={Shirin Jalali, Tsachy Weissman},
journal={arXiv preprint arXiv:0808.4156},
year={2008},
doi={10.1109/ISIT.2008.4595107},
archivePrefix={arXiv},
eprint={0808.4156},
primaryClass={cs.IT math.IT}
} | jalali2008rate-distortion |
arxiv-4688 | 0808.4160 | Using Relative Entropy to Find Optimal Approximations: an Application to Simple Fluids | <|reference_start|>Using Relative Entropy to Find Optimal Approximations: an Application to Simple Fluids: We develop a maximum relative entropy formalism to generate optimal approximations to probability distributions. The central results consist in (a) justifying the use of relative entropy as the uniquely natural criterion to select a preferred approximation from within a family of trial parameterized distributions, and (b) to obtain the optimal approximation by marginalizing over parameters using the method of maximum entropy and information geometry. As an illustration we apply our method to simple fluids. The "exact" canonical distribution is approximated by that of a fluid of hard spheres. The proposed method first determines the preferred value of the hard-sphere diameter, and then obtains an optimal hard-sphere approximation by a suitably weighed average over different hard-sphere diameters. This leads to a considerable improvement in accounting for the soft-core nature of the interatomic potential. As a numerical demonstration, the radial distribution function and the equation of state for a Lennard-Jones fluid (argon) are compared with results from molecular dynamics simulations.<|reference_end|> | arxiv | @article{tseng2008using,
title={Using Relative Entropy to Find Optimal Approximations: an Application to
Simple Fluids},
author={Chih-Yuan Tseng and Ariel Caticha},
journal={Physica A387, 6759 (2008)},
year={2008},
doi={10.1016/j.physa.2008.08.035},
archivePrefix={arXiv},
eprint={0808.4160},
primaryClass={cond-mat.stat-mech cs.IT math.IT math.PR physics.data-an}
} | tseng2008using |
arxiv-4689 | 0809.0009 | Distributed Parameter Estimation in Sensor Networks: Nonlinear Observation Models and Imperfect Communication | <|reference_start|>Distributed Parameter Estimation in Sensor Networks: Nonlinear Observation Models and Imperfect Communication: The paper studies distributed static parameter (vector) estimation in sensor networks with nonlinear observation models and noisy inter-sensor communication. It introduces \emph{separably estimable} observation models that generalize the observability condition in linear centralized estimation to nonlinear distributed estimation. It studies two distributed estimation algorithms in separably estimable models, the $\mathcal{NU}$ (with its linear counterpart $\mathcal{LU}$) and the $\mathcal{NLU}$. Their update rule combines a \emph{consensus} step (where each sensor updates the state by weight averaging it with its neighbors' states) and an \emph{innovation} step (where each sensor processes its local current observation.) This makes the three algorithms of the \textit{consensus + innovations} type, very different from traditional consensus. The paper proves consistency (all sensors reach consensus almost surely and converge to the true parameter value,) efficiency, and asymptotic unbiasedness. For $\mathcal{LU}$ and $\mathcal{NU}$, it proves asymptotic normality and provides convergence rate guarantees. The three algorithms are characterized by appropriately chosen decaying weight sequences. Algorithms $\mathcal{LU}$ and $\mathcal{NU}$ are analyzed in the framework of stochastic approximation theory; algorithm $\mathcal{NLU}$ exhibits mixed time-scale behavior and biased perturbations, and its analysis requires a different approach that is developed in the paper.<|reference_end|> | arxiv | @article{kar2008distributed,
title={Distributed Parameter Estimation in Sensor Networks: Nonlinear
Observation Models and Imperfect Communication},
author={Soummya Kar, Jose M.F.Moura and Kavita Ramanan},
journal={arXiv preprint arXiv:0809.0009},
year={2008},
doi={10.1109/TIT.2012.219450},
archivePrefix={arXiv},
eprint={0809.0009},
primaryClass={cs.MA cs.IT math.IT}
} | kar2008distributed |
arxiv-4690 | 0809.0016 | An overview of the transmission capacity of wireless networks | <|reference_start|>An overview of the transmission capacity of wireless networks: This paper surveys and unifies a number of recent contributions that have collectively developed a metric for decentralized wireless network analysis known as transmission capacity. Although it is notoriously difficult to derive general end-to-end capacity results for multi-terminal or \adhoc networks, the transmission capacity (TC) framework allows for quantification of achievable single-hop rates by focusing on a simplified physical/MAC-layer model. By using stochastic geometry to quantify the multi-user interference in the network, the relationship between the optimal spatial density and success probability of transmissions in the network can be determined, and expressed -- often fairly simply -- in terms of the key network parameters. The basic model and analytical tools are first discussed and applied to a simple network with path loss only and we present tight upper and lower bounds on transmission capacity (via lower and upper bounds on outage probability). We then introduce random channels (fading/shadowing) and give TC and outage approximations for an arbitrary channel distribution, as well as exact results for the special cases of Rayleigh and Nakagami fading. We then apply these results to show how TC can be used to better understand scheduling, power control, and the deployment of multiple antennas in a decentralized network. The paper closes by discussing shortcomings in the model as well as future research directions.<|reference_end|> | arxiv | @article{weber2008an,
title={An overview of the transmission capacity of wireless networks},
author={Steven Weber, Jeffrey G. Andrews, Nihar Jindal},
journal={arXiv preprint arXiv:0809.0016},
year={2008},
doi={10.1109/TCOMM.2010.093010.090478},
archivePrefix={arXiv},
eprint={0809.0016},
primaryClass={cs.IT math.IT}
} | weber2008an |
arxiv-4691 | 0809.0024 | Game Theory with Costly Computation | <|reference_start|>Game Theory with Costly Computation: We develop a general game-theoretic framework for reasoning about strategic agents performing possibly costly computation. In this framework, many traditional game-theoretic results (such as the existence of a Nash equilibrium) no longer hold. Nevertheless, we can use the framework to provide psychologically appealing explanations to observed behavior in well-studied games (such as finitely repeated prisoner's dilemma and rock-paper-scissors). Furthermore, we provide natural conditions on games sufficient to guarantee that equilibria exist. As an application of this framework, we consider a notion of game-theoretic implementation of mediators in computational games. We show that a special case of this notion is equivalent to a variant of the traditional cryptographic definition of protocol security; this result shows that, when taking computation into account, the two approaches used for dealing with "deviating" players in two different communities -- Nash equilibrium in game theory and zero-knowledge "simulation" in cryptography -- are intimately related.<|reference_end|> | arxiv | @article{halpern2008game,
title={Game Theory with Costly Computation},
author={Joseph Y. Halpern and Rafael Pass},
journal={arXiv preprint arXiv:0809.0024},
year={2008},
archivePrefix={arXiv},
eprint={0809.0024},
primaryClass={cs.GT cs.CR}
} | halpern2008game |
arxiv-4692 | 0809.0032 | A Variational Inference Framework for Soft-In-Soft-Out Detection in Multiple Access Channels | <|reference_start|>A Variational Inference Framework for Soft-In-Soft-Out Detection in Multiple Access Channels: We propose a unified framework for deriving and studying soft-in-soft-out (SISO) detection in interference channels using the concept of variational inference. The proposed framework may be used in multiple-access interference (MAI), inter-symbol interference (ISI), and multiple-input multiple-outpu (MIMO) channels. Without loss of generality, we will focus our attention on turbo multiuser detection, to facilitate a more concrete discussion. It is shown that, with some loss of optimality, variational inference avoids the exponential complexity of a posteriori probability (APP) detection by optimizing a closely-related, but much more manageable, objective function called variational free energy. In addition to its systematic appeal, there are several other advantages to this viewpoint. First of all, it provides unified and rigorous justifications for numerous detectors that were proposed on radically different grounds, and facilitates convenient joint detection and decoding (utilizing the turbo principle) when error-control codes are incorporated. Secondly, efficient joint parameter estimation and data detection is possible via the variational expectation maximization (EM) algorithm, such that the detrimental effect of inaccurate channel knowledge at the receiver may be dealt with systematically. We are also able to extend BPSK-based SISO detection schemes to arbitrary square QAM constellations in a rigorous manner using a variational argument.<|reference_end|> | arxiv | @article{lin2008a,
title={A Variational Inference Framework for Soft-In-Soft-Out Detection in
Multiple Access Channels},
author={D. D. Lin and T. J. Lim},
journal={arXiv preprint arXiv:0809.0032},
year={2008},
archivePrefix={arXiv},
eprint={0809.0032},
primaryClass={cs.IT cs.LG math.IT}
} | lin2008a |
arxiv-4693 | 0809.0060 | Model Checking Probabilistic Timed Automata with One or Two Clocks | <|reference_start|>Model Checking Probabilistic Timed Automata with One or Two Clocks: Probabilistic timed automata are an extension of timed automata with discrete probability distributions. We consider model-checking algorithms for the subclasses of probabilistic timed automata which have one or two clocks. Firstly, we show that PCTL probabilistic model-checking problems (such as determining whether a set of target states can be reached with probability at least 0.99 regardless of how nondeterminism is resolved) are PTIME-complete for one-clock probabilistic timed automata, and are EXPTIME-complete for probabilistic timed automata with two clocks. Secondly, we show that, for one-clock probabilistic timed automata, the model-checking problem for the probabilistic timed temporal logic PCTL is EXPTIME-complete. However, the model-checking problem for the subclass of PCTL which does not permit both punctual timing bounds, which require the occurrence of an event at an exact time point, and comparisons with probability bounds other than 0 or 1, is PTIME-complete for one-clock probabilistic timed automata.<|reference_end|> | arxiv | @article{jurdzinski2008model,
title={Model Checking Probabilistic Timed Automata with One or Two Clocks},
author={Marcin Jurdzinski, Francois Laroussinie, Jeremy Sproston},
journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September
26, 2008) lmcs:988},
year={2008},
doi={10.2168/LMCS-4(3:12)2008},
archivePrefix={arXiv},
eprint={0809.0060},
primaryClass={cs.LO}
} | jurdzinski2008model |
arxiv-4694 | 0809.0062 | The Stochastic Logarithmic Norm for Stability Analysis of Stochastic Differential Equations | <|reference_start|>The Stochastic Logarithmic Norm for Stability Analysis of Stochastic Differential Equations: To analyze the stability of It\^o stochastic differential equations with multiplicative noise, we introduce the stochastic logarithmic norm. The logarithmic norm was originally introduced by G. Dahlquist in 1958 as a tool to study the growth of solutions to ordinary differential equations and for estimating the error growth in discretization methods for their approximate solutions. We extend the concept to the stability analysis of It\^o stochastic differential equations with multiplicative noise. Stability estimates for linear It\^o SDEs using the one, two and $\infty$-norms in the $l$-th mean, where $1 \leq l < \infty $, are derived and the application of the stochastic logarithmic norm is illustrated with examples.<|reference_end|> | arxiv | @article{ahmad2008the,
title={The Stochastic Logarithmic Norm for Stability Analysis of Stochastic
Differential Equations},
author={Sk. Safique Ahmad and Nagalinga Rajan and Soumyendu Raha},
journal={arXiv preprint arXiv:0809.0062},
year={2008},
archivePrefix={arXiv},
eprint={0809.0062},
primaryClass={cs.NA}
} | ahmad2008the |
arxiv-4695 | 0809.0063 | Simultaneous Modular Reduction and Kronecker Substitution for Small Finite Fields | <|reference_start|>Simultaneous Modular Reduction and Kronecker Substitution for Small Finite Fields: We present algorithms to perform modular polynomial multiplication or modular dot product efficiently in a single machine word. We pack polynomials into integers and perform several modular operations with machine integer or floating point arithmetic. The modular polynomials are converted into integers using Kronecker substitution (evaluation at a sufficiently large integer). With some control on the sizes and degrees, arithmetic operations on the polynomials can be performed directly with machine integers or floating point numbers and the number of conversions can be reduced. We also present efficient ways to recover the modular values of the coefficients. This leads to practical gains of quite large constant factors for polynomial multiplication, prime field linear algebra and small extension field arithmetic.<|reference_end|> | arxiv | @article{dumas2008simultaneous,
title={Simultaneous Modular Reduction and Kronecker Substitution for Small
Finite Fields},
author={Jean-Guillaume Dumas (LJK), Laurent Fousse (LJK), Bruno Salvy (INRIA
Rocquencourt)},
journal={arXiv preprint arXiv:0809.0063},
year={2008},
doi={10.1016/j.jsc.2010.08.015},
archivePrefix={arXiv},
eprint={0809.0063},
primaryClass={cs.SC math.NT}
} | dumas2008simultaneous |
arxiv-4696 | 0809.0070 | Underwater Acoustic Networks: Channel Models and Network Coding based Lower Bound to Transmission Power for Multicast | <|reference_start|>Underwater Acoustic Networks: Channel Models and Network Coding based Lower Bound to Transmission Power for Multicast: The goal of this paper is two-fold. First, to establish a tractable model for the underwater acoustic channel useful for network optimization in terms of convexity. Second, to propose a network coding based lower bound for transmission power in underwater acoustic networks, and compare this bound to the performance of several network layer schemes. The underwater acoustic channel is characterized by a path loss that depends strongly on transmission distance and signal frequency. The exact relationship among power, transmission band, distance and capacity for the Gaussian noise scenario is a complicated one. We provide a closed-form approximate model for 1) transmission power and 2) optimal frequency band to use, as functions of distance and capacity. The model is obtained through numerical evaluation of analytical results that take into account physical models of acoustic propagation loss and ambient noise. Network coding is applied to determine a lower bound to transmission power for a multicast scenario, for a variety of multicast data rates and transmission distances of interest for practical systems, exploiting physical properties of the underwater acoustic channel. The results quantify the performance gap in transmission power between a variety of routing and network coding schemes and the network coding based lower bound. We illustrate results numerically for different network scenarios.<|reference_end|> | arxiv | @article{lucani2008underwater,
title={Underwater Acoustic Networks: Channel Models and Network Coding based
Lower Bound to Transmission Power for Multicast},
author={Daniel E. Lucani, Muriel M'edard, Milica Stojanovic},
journal={arXiv preprint arXiv:0809.0070},
year={2008},
archivePrefix={arXiv},
eprint={0809.0070},
primaryClass={cs.IT math.IT}
} | lucani2008underwater |
arxiv-4697 | 0809.0073 | Languages recognized with unbounded error by quantum finite automata | <|reference_start|>Languages recognized with unbounded error by quantum finite automata: This paper has been superseded by arXiv:1007.3624<|reference_end|> | arxiv | @article{yakaryilmaz2008languages,
title={Languages recognized with unbounded error by quantum finite automata},
author={Abuzer Yakaryilmaz, A. C. Cem Say},
journal={arXiv preprint arXiv:0809.0073},
year={2008},
archivePrefix={arXiv},
eprint={0809.0073},
primaryClass={quant-ph cs.CC}
} | yakaryilmaz2008languages |
arxiv-4698 | 0809.0091 | A functional view of upper bounds on codes | <|reference_start|>A functional view of upper bounds on codes: Functional and linear-algebraic approaches to the Delsarte problem of upper bounds on codes are discussed. We show that Christoffel-Darboux kernels and Levenshtein polynomials related to them arise as stationary points of the moment functionals of some distributions. We also show that they can be derived as eigenfunctions of the Jacobi operator. This motivates the choice of polynomials used to derive linear programming upper bounds on codes in homogeneous spaces.<|reference_end|> | arxiv | @article{barg2008a,
title={A functional view of upper bounds on codes},
author={Alexander Barg and Dmitry Nogin},
journal={" Coding and Cryptography," Proceedings of the First International
Workshop, Wuyi Mountain, Fujian, China, 11 - 15 June 2007, edited by Yongqing
Li et al., World Scientific, 2008, pp. 15--24},
year={2008},
archivePrefix={arXiv},
eprint={0809.0091},
primaryClass={cs.IT math.IT}
} | barg2008a |
arxiv-4699 | 0809.0099 | Degrees of Freedom of the $K$ User $M \times N$ MIMO Interference Channel | <|reference_start|>Degrees of Freedom of the $K$ User $M \times N$ MIMO Interference Channel: We provide innerbound and outerbound for the total number of degrees of freedom of the $K$ user multiple input multiple output (MIMO) Gaussian interference channel with $M$ antennas at each transmitter and $N$ antennas at each receiver if the channel coefficients are time-varying and drawn from a continuous distribution. The bounds are tight when the ratio $\frac{\max(M,N)}{\min(M,N)}=R$ is equal to an integer. For this case, we show that the total number of degrees of freedom is equal to $\min(M,N)K$ if $K \leq R$ and $\min(M,N)\frac{R}{R+1}K$ if $K > R$. Achievability is based on interference alignment. We also provide examples where using interference alignment combined with zero forcing can achieve more degrees of freedom than merely zero forcing for some MIMO interference channels with constant channel coefficients.<|reference_end|> | arxiv | @article{gou2008degrees,
title={Degrees of Freedom of the $K$ User $M \times N$ MIMO Interference
Channel},
author={Tiangao Gou, Syed A. Jafar},
journal={arXiv preprint arXiv:0809.0099},
year={2008},
archivePrefix={arXiv},
eprint={0809.0099},
primaryClass={cs.IT math.IT}
} | gou2008degrees |
arxiv-4700 | 0809.0103 | On the nature of long-range letter correlations in texts | <|reference_start|>On the nature of long-range letter correlations in texts: The origin of long-range letter correlations in natural texts is studied using random walk analysis and Jensen-Shannon divergence. It is concluded that they result from slow variations in letter frequency distribution, which are a consequence of slow variations in lexical composition within the text. These correlations are preserved by random letter shuffling within a moving window. As such, they do reflect structural properties of the text, but in a very indirect manner.<|reference_end|> | arxiv | @article{manin2008on,
title={On the nature of long-range letter correlations in texts},
author={Dmitrii Y. Manin},
journal={arXiv preprint arXiv:0809.0103},
year={2008},
archivePrefix={arXiv},
eprint={0809.0103},
primaryClass={cs.CL cs.IT math.IT}
} | manin2008on |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.