corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-4101 | 0806.3246 | Broadcasting with side information | <|reference_start|>Broadcasting with side information: A sender holds a word x consisting of n blocks x_i, each of t bits, and wishes to broadcast a codeword to m receivers, R_1,...,R_m. Each receiver R_i is interested in one block, and has prior side information consisting of some subset of the other blocks. Let \beta_t be the minimum number of bits that has to be transmitted when each block is of length t, and let \beta be the limit \beta = \lim_{t \to \infty} \beta_t/t. In words, \beta is the average communication cost per bit in each block (for long blocks). Finding the coding rate \beta, for such an informed broadcast setting, generalizes several coding theoretic parameters related to Informed Source Coding on Demand, Index Coding and Network Coding. In this work we show that usage of large data blocks may strictly improve upon the trivial encoding which treats each bit in the block independently. To this end, we provide general bounds on \beta_t, and prove that for any constant C there is an explicit broadcast setting in which \beta = 2 but \beta_1 > C. One of these examples answers a question of Lubetzky and Stav. In addition, we provide examples with the following counterintuitive direct-sum phenomena. Consider a union of several mutually independent broadcast settings. The optimal code for the combined setting may yield a significant saving in communication over concatenating optimal encodings for the individual settings. This result also provides new non-linear coding schemes which improve upon the largest known gap between linear and non-linear Network Coding, thus improving the results of Dougherty, Freiling, and Zeger. The proofs use ideas related to Witsenhausen's rate, OR graph products, colorings of Cayley graphs and the chromatic numbers of Kneser graphs.<|reference_end|> | arxiv | @article{alon2008broadcasting,
title={Broadcasting with side information},
author={Noga Alon, Avinatan Hasidim, Eyal Lubetzky, Uri Stav, Amit Weinstein},
journal={arXiv preprint arXiv:0806.3246},
year={2008},
archivePrefix={arXiv},
eprint={0806.3246},
primaryClass={cs.IT math.IT}
} | alon2008broadcasting |
arxiv-4102 | 0806.3258 | Local Search Heuristics For The Multidimensional Assignment Problem | <|reference_start|>Local Search Heuristics For The Multidimensional Assignment Problem: The Multidimensional Assignment Problem (MAP) (abbreviated s-AP in the case of s dimensions) is an extension of the well-known assignment problem. The most studied case of MAP is 3-AP, though the problems with larger values of s also have a large number of applications. We consider several known neighborhoods, generalize them and propose some new ones. The heuristics are evaluated both theoretically and experimentally and dominating algorithms are selected. We also demonstrate a combination of two neighborhoods may yield a heuristics which is superior to both of its components.<|reference_end|> | arxiv | @article{gutin2008local,
title={Local Search Heuristics For The Multidimensional Assignment Problem},
author={Gregory Gutin, Daniel Karapetyan},
journal={Journal of Heuristics 17(3) (2011), 201--249},
year={2008},
doi={10.1007/s10732-010-9133-3},
archivePrefix={arXiv},
eprint={0806.3258},
primaryClass={cs.DS}
} | gutin2008local |
arxiv-4103 | 0806.3277 | On McMillan's theorem about uniquely decipherable codes | <|reference_start|>On McMillan's theorem about uniquely decipherable codes: Karush's proof of McMillan's theorem is recast as an argument involving polynomials with non-commuting indeterminates certain evaluations of which yield the Kraft sums of codes, proving a strengthened version of McMillan's theorem.<|reference_end|> | arxiv | @article{foldes2008on,
title={On McMillan's theorem about uniquely decipherable codes},
author={Stephan Foldes},
journal={arXiv preprint arXiv:0806.3277},
year={2008},
archivePrefix={arXiv},
eprint={0806.3277},
primaryClass={math.CO cs.IT math.IT}
} | foldes2008on |
arxiv-4104 | 0806.3284 | Optimal hash functions for approximate closest pairs on the n-cube | <|reference_start|>Optimal hash functions for approximate closest pairs on the n-cube: One way to find closest pairs in large datasets is to use hash functions. In recent years locality-sensitive hash functions for various metrics have been given: projecting an n-cube onto k bits is simple hash function that performs well. In this paper we investigate alternatives to projection. For various parameters hash functions given by complete decoding algorithms for codes work better, and asymptotically random codes perform better than projection.<|reference_end|> | arxiv | @article{gordon2008optimal,
title={Optimal hash functions for approximate closest pairs on the n-cube},
author={Daniel M. Gordon, Victor Miller and Peter Ostapenko},
journal={arXiv preprint arXiv:0806.3284},
year={2008},
archivePrefix={arXiv},
eprint={0806.3284},
primaryClass={cs.IT math.IT}
} | gordon2008optimal |
arxiv-4105 | 0806.3301 | Fast computation of the median by successive binning | <|reference_start|>Fast computation of the median by successive binning: This paper describes a new median algorithm and a median approximation algorithm. The former has O(n) average running time and the latter has O(n) worst-case running time. These algorithms are highly competitive with the standard algorithm when computing the median of a single data set, but are significantly faster in updating the median when more data is added.<|reference_end|> | arxiv | @article{tibshirani2008fast,
title={Fast computation of the median by successive binning},
author={Ryan J. Tibshirani},
journal={arXiv preprint arXiv:0806.3301},
year={2008},
archivePrefix={arXiv},
eprint={0806.3301},
primaryClass={stat.CO cs.DS stat.AP}
} | tibshirani2008fast |
arxiv-4106 | 0806.3317 | Differential Transmit Diversity Based on Quasi-Orthogonal Space-Time Block Code | <|reference_start|>Differential Transmit Diversity Based on Quasi-Orthogonal Space-Time Block Code: By using joint modulation and customized constellation set, we show that Quasi-Orthogonal Space-Time Block Code (QO-STBC) can be used to form a new differential space-time modulation (DSTM) scheme to provide full transmit diversity with non-coherent detection. Our new scheme can provide higher code rate than existing DSTM schemes based on Orthogonal STBC. It also has a lower decoding complexity than the other DSTM schemes, such as those based on Group Codes, because it only requires a joint detection of two complex symbols. We derive the design criteria for the customized constellation set and use them to construct a constellation set that provides a wide range of spectral efficiency with full diversity and maximum coding gain.<|reference_end|> | arxiv | @article{yuen2008differential,
title={Differential Transmit Diversity Based on Quasi-Orthogonal Space-Time
Block Code},
author={Chau Yuen, Yong Liang Guan, Tjeng Thiang Tjhung},
journal={Globecom 2004},
year={2008},
archivePrefix={arXiv},
eprint={0806.3317},
primaryClass={cs.IT math.IT}
} | yuen2008differential |
arxiv-4107 | 0806.3320 | Unitary Differential Space-Time Modulation with Joint Modulation | <|reference_start|>Unitary Differential Space-Time Modulation with Joint Modulation: We develop two new designs of unitary differential space-time modulation (DSTM) with low decoding complexity. Their decoder can be separated into a few parallel decoders, each of which has a decoding search space of less than sqrt(N) if the DSTM codebook contains N codewords. Both designs are based on the concept of joint modulation, which means that several information symbols are jointly modulated, unlike the conventional symbol-by-symbol modulation. The first design is based on Orthogonal Space-Time Block Code (O-STBC) with joint constellation constructed from spherical codes. The second design is based on Quasi-Orthogonal Space-Time Block Code (QO-STBC) with specially designed pair-wise constellation sets. Both the proposed unitary DSTM schemes have considerably lower decoding complexity than many prior DSTM schemes, including those based on Group Codes and Sp(2) which generally have a decoding search space of N for a codebook size of N codewords, and much better decoding performance than the existing O-STBC DSTM scheme. Between two designs, the proposed DSTM based on O-STBC generally has better decoding performance, while the proposed DSTM based on QO-STBC has lower decoding complexity when 8 transmit antennas.<|reference_end|> | arxiv | @article{yuen2008unitary,
title={Unitary Differential Space-Time Modulation with Joint Modulation},
author={Chau Yuen, Yong Liang Guan, T. T. Tjhung},
journal={arXiv preprint arXiv:0806.3320},
year={2008},
archivePrefix={arXiv},
eprint={0806.3320},
primaryClass={cs.IT math.IT}
} | yuen2008unitary |
arxiv-4108 | 0806.3321 | Achieving Near-Capacity at Low SNR on a Multiple-Antenna Multiple-User Channel | <|reference_start|>Achieving Near-Capacity at Low SNR on a Multiple-Antenna Multiple-User Channel: We analyze the sensitivity of the capacity of a multi-antenna multi-user system to the number of users being served. We show analytically that, for a given desired sum-rate, the extra power needed to serve a subset of the users at low SNR (signal-to-noise ratio) can be very small, and is generally much smaller than the extra power needed to serve the same subset at high SNR. The advantages of serving only subsets of the users are many: multi-user algorithms have lower complexity, reduced channel-state information requirements, and, often, better performance. We provide guidelines on how many users to serve to get near-capacity performance with low complexity. For example, we show how in an eight-antenna eight-user system we can serve only four users and still be approximately 2 dB from capacity at very low SNR.<|reference_end|> | arxiv | @article{yuen2008achieving,
title={Achieving Near-Capacity at Low SNR on a Multiple-Antenna Multiple-User
Channel},
author={Chau Yuen, Bertrand M. Hochwald},
journal={arXiv preprint arXiv:0806.3321},
year={2008},
archivePrefix={arXiv},
eprint={0806.3321},
primaryClass={cs.IT math.IT}
} | yuen2008achieving |
arxiv-4109 | 0806.3322 | Power-Balanced Orthogonal Space-Time Block Code | <|reference_start|>Power-Balanced Orthogonal Space-Time Block Code: In this paper, we propose two new systematic ways to construct amicable orthogonal designs (AOD), with an aim to facilitate the construction of power-balanced orthogonal spacetime block codes (O-STBC) with favorable practical attributes. We also show that an AOD can be constructed from an Amicable Family (AF), and such a construction is crucial for achieving a power-balanced O-STBC. In addition, we develop design guidelines on how to select the "type" parameter of an AOD so that the resultant O-STBC will have better power-distribution and code-coefficient attributes. Among the new O-STBCs obtained, one is shown to be optimal in terms of power distribution attributes. In addition, one of the proposed construction methods is shown to generalize some other construction methods proposed in the literature.<|reference_end|> | arxiv | @article{yuen2008power-balanced,
title={Power-Balanced Orthogonal Space-Time Block Code},
author={Chau Yuen, Yong Liang Guan, Tjeng Thiang Tjhung},
journal={arXiv preprint arXiv:0806.3322},
year={2008},
archivePrefix={arXiv},
eprint={0806.3322},
primaryClass={cs.IT math.IT}
} | yuen2008power-balanced |
arxiv-4110 | 0806.3324 | Optimizing Quasi-Orthogonal STBC Through Group-Constrained Linear Transformation | <|reference_start|>Optimizing Quasi-Orthogonal STBC Through Group-Constrained Linear Transformation: In this paper, we first derive the generic algebraic structure of a Quasi-Orthogonal STBC (QO-STBC). Next we propose Group-Constrained Linear Transformation (GCLT) as a means to optimize the diversity and coding gains of a QO-STBC with square or rectangular QAM constellations. Compared with QO-STBC with constellation rotation (CR), we show that QO-STBC with GCLT requires only half the number of symbols for joint detection, hence lower maximum-likelihood decoding complexity. We also derive analytically the optimum GCLT parameters for QO-STBC with square QAM constellation. The optimized QO-STBCs with GCLT are able to achieve full transmit diversity, and have negligible performance loss compared with QO-STBCs with CR at the same code rate.<|reference_end|> | arxiv | @article{yuen2008optimizing,
title={Optimizing Quasi-Orthogonal STBC Through Group-Constrained Linear
Transformation},
author={Chau Yuen, Yong Liang Guan, Tjeng Thiang Tjhung},
journal={arXiv preprint arXiv:0806.3324},
year={2008},
archivePrefix={arXiv},
eprint={0806.3324},
primaryClass={cs.IT math.IT}
} | yuen2008optimizing |
arxiv-4111 | 0806.3325 | On the Search for High-Rate Quasi-Orthogonal Space-Time Block Code | <|reference_start|>On the Search for High-Rate Quasi-Orthogonal Space-Time Block Code: A Quasi-Orthogonal Space-Time Block Code (QO-STBC) is attractive because it achieves higher code rate than Orthogonal STBC and lower decoding complexity than nonorthogonal STBC. In this paper, we first derive the algebraic structure of QO-STBC, then we apply it in a novel graph-based search algorithm to find high-rate QO-STBCs with code rates greater than 1. From the four-antenna codes found using this approach, it is found that the maximum code rate is limited to 5/4 with symbolwise diversity level of four, and 4 with symbolwise diversity level of two. The maximum likelihood decoding of these high-rate QO-STBCs can be performed on two separate sub-groups of symbols. The rate-5/4 codes are the first known QO-STBCs with code rate greater 1 that has full symbolwise diversity level.<|reference_end|> | arxiv | @article{yuen2008on,
title={On the Search for High-Rate Quasi-Orthogonal Space-Time Block Code},
author={Chau Yuen, Yong Liang Guan, Tjeng Thiang Tjhung},
journal={arXiv preprint arXiv:0806.3325},
year={2008},
archivePrefix={arXiv},
eprint={0806.3325},
primaryClass={cs.IT math.IT}
} | yuen2008on |
arxiv-4112 | 0806.3328 | Limited Feedback for Multi-Antenna Multi-user Communications with Generalized Multi-Unitary Decomposition | <|reference_start|>Limited Feedback for Multi-Antenna Multi-user Communications with Generalized Multi-Unitary Decomposition: In this paper, we propose a decomposition method called Generalized Multi-Unitary Decomposition (GMUD) which is useful in multi-user MIMO precoding. This decomposition transforms a complex matrix H into PRQ, where R is a special matrix whose first row contains only a non-zero user defined value at the left-most position, P and Q are a pair of unitary matrices. The major attraction of our proposed GMUD is we can obtain multiple solutions of P and Q >. With GMUD, we propose a precoding method for a MIMO multi-user system that does not require full channel state information (CSI) at the transmitter. The proposed precoding method uses the multiple unitary matrices property to compensate the inaccurate feedback information as the transmitter can steer the transmission beams of individual users such that the inter-user interference is kept minimum.<|reference_end|> | arxiv | @article{chua2008limited,
title={Limited Feedback for Multi-Antenna Multi-user Communications with
Generalized Multi-Unitary Decomposition},
author={Wee Seng Chua, Chau Yuen, Yong Liang Guan, Francois Chin},
journal={arXiv preprint arXiv:0806.3328},
year={2008},
archivePrefix={arXiv},
eprint={0806.3328},
primaryClass={cs.IT math.IT}
} | chua2008limited |
arxiv-4113 | 0806.3329 | Beamforming Matrix Quantization with Variable Feedback Rate | <|reference_start|>Beamforming Matrix Quantization with Variable Feedback Rate: We propose an improved beamforming matrix compression by Givens Rotation with the use of variable feedback rate. The variable feedback rate means that the number of bits used to represent the quantized beamforming matrix is based on the value of the matrix. Compared with the fixed feedback rate scheme, the proposed method has better performance without additional feedback bandwidth.<|reference_end|> | arxiv | @article{yuen2008beamforming,
title={Beamforming Matrix Quantization with Variable Feedback Rate},
author={Chau Yuen, Sumei Sun, Mel Meau Shin Ho},
journal={arXiv preprint arXiv:0806.3329},
year={2008},
archivePrefix={arXiv},
eprint={0806.3329},
primaryClass={cs.IT math.IT}
} | yuen2008beamforming |
arxiv-4114 | 0806.3332 | Compressed Sensing of Analog Signals in Shift-Invariant Spaces | <|reference_start|>Compressed Sensing of Analog Signals in Shift-Invariant Spaces: A traditional assumption underlying most data converters is that the signal should be sampled at a rate exceeding twice the highest frequency. This statement is based on a worst-case scenario in which the signal occupies the entire available bandwidth. In practice, many signals are sparse so that only part of the bandwidth is used. In this paper, we develop methods for low-rate sampling of continuous-time sparse signals in shift-invariant (SI) spaces, generated by m kernels with period T. We model sparsity by treating the case in which only k out of the m generators are active, however, we do not know which k are chosen. We show how to sample such signals at a rate much lower than m/T, which is the minimal sampling rate without exploiting sparsity. Our approach combines ideas from analog sampling in a subspace with a recently developed block diagram that converts an infinite set of sparse equations to a finite counterpart. Using these two components we formulate our problem within the framework of finite compressed sensing (CS) and then rely on algorithms developed in that context. The distinguishing feature of our results is that in contrast to standard CS, which treats finite-length vectors, we consider sampling of analog signals for which no underlying finite-dimensional model exists. The proposed framework allows to extend much of the recent literature on CS to the analog domain.<|reference_end|> | arxiv | @article{eldar2008compressed,
title={Compressed Sensing of Analog Signals in Shift-Invariant Spaces},
author={Yonina C. Eldar},
journal={arXiv preprint arXiv:0806.3332},
year={2008},
doi={10.1109/TSP.2009.2020750},
archivePrefix={arXiv},
eprint={0806.3332},
primaryClass={cs.IT math.IT}
} | eldar2008compressed |
arxiv-4115 | 0806.3437 | Quantum and Randomized Lower Bounds for Local Search on Vertex-Transitive Graphs | <|reference_start|>Quantum and Randomized Lower Bounds for Local Search on Vertex-Transitive Graphs: We study the problem of \emph{local search} on a graph. Given a real-valued black-box function f on the graph's vertices, this is the problem of determining a local minimum of f--a vertex v for which f(v) is no more than f evaluated at any of v's neighbors. In 1983, Aldous gave the first strong lower bounds for the problem, showing that any randomized algorithm requires $\Omega(2^{n/2 - o(1)})$ queries to determine a local minima on the n-dimensional hypercube. The next major step forward was not until 2004 when Aaronson, introducing a new method for query complexity bounds, both strengthened this lower bound to $\Omega(2^{n/2}/n^2)$ and gave an analogous lower bound on the quantum query complexity. While these bounds are very strong, they are known only for narrow families of graphs (hypercubes and grids). We show how to generalize Aaronson's techniques in order to give randomized (and quantum) lower bounds on the query complexity of local search for the family of vertex-transitive graphs. In particular, we show that for any vertex-transitive graph G of N vertices and diameter d, the randomized and quantum query complexities for local search on G are $\Omega(N^{1/2}/d\log N)$ and $\Omega(N^{1/4}/\sqrt{d\log N})$, respectively.<|reference_end|> | arxiv | @article{dinh2008quantum,
title={Quantum and Randomized Lower Bounds for Local Search on
Vertex-Transitive Graphs},
author={Hang Dinh and Alexander Russell},
journal={arXiv preprint arXiv:0806.3437},
year={2008},
archivePrefix={arXiv},
eprint={0806.3437},
primaryClass={quant-ph cs.DS}
} | dinh2008quantum |
arxiv-4116 | 0806.3456 | On Computing the Vertex Centroid of a Polyhedron | <|reference_start|>On Computing the Vertex Centroid of a Polyhedron: Let $\mathcal{P}$ be an $\mathcal{H}$-polytope in $\mathbb{R}^d$ with vertex set $V$. The vertex centroid is defined as the average of the vertices in $V$. We prove that computing the vertex centroid of an $\mathcal{H}$-polytope is #P-hard. Moreover, we show that even just checking whether the vertex centroid lies in a given halfspace is already #P-hard for $\mathcal{H}$-polytopes. We also consider the problem of approximating the vertex centroid by finding a point within an $\epsilon$ distance from it and prove this problem to be #P-easy by showing that given an oracle for counting the number of vertices of an $\mathcal{H}$-polytope, one can approximate the vertex centroid in polynomial time. We also show that any algorithm approximating the vertex centroid to \emph{any} ``sufficiently'' non-trivial (for example constant) distance, can be used to construct a fully polynomial approximation scheme for approximating the centroid and also an output-sensitive polynomial algorithm for the Vertex Enumeration problem. Finally, we show that for unbounded polyhedra the vertex centroid can not be approximated to a distance of $d^{{1/2}-\delta}$ for any fixed constant $\delta>0$.<|reference_end|> | arxiv | @article{elbassioni2008on,
title={On Computing the Vertex Centroid of a Polyhedron},
author={Khaled Elbassioni and Hans Raj Tiwary},
journal={arXiv preprint arXiv:0806.3456},
year={2008},
archivePrefix={arXiv},
eprint={0806.3456},
primaryClass={cs.CG}
} | elbassioni2008on |
arxiv-4117 | 0806.3471 | Stabilizing Tiny Interaction Protocols | <|reference_start|>Stabilizing Tiny Interaction Protocols: In this paper we present the self-stabilizing implementation of a class of token based algorithms. In the current work we only consider interactions between weak nodes. They are uniform, they do not have unique identifiers, are static and their interactions are restricted to a subset of nodes called neighbours. While interacting, a pair of neighbouring nodes may create mobile agents (that materialize in the current work the token abstraction) that perform traversals of the network and accelerate the system stabilization. In this work we only explore the power of oblivious stateless agents. Our work shows that the agent paradigm is an elegant distributed tool for achieving self-stabilization in Tiny Interaction Protocols (TIP). Nevertheless, in order to reach the full power of classical self-stabilizing algorithms more complex classes of agents have to be considered (e.g. agents with memory, identifiers or communication skills). Interestingly, our work proposes for the first time a model that unifies the recent studies in mobile robots(agents) that evolve in a discrete space and the already established population protocols paradigm.<|reference_end|> | arxiv | @article{canepa2008stabilizing,
title={Stabilizing Tiny Interaction Protocols},
author={Davide Canepa and Maria Gradinariu Potop-Butucaru},
journal={arXiv preprint arXiv:0806.3471},
year={2008},
archivePrefix={arXiv},
eprint={0806.3471},
primaryClass={cs.DC cs.DS}
} | canepa2008stabilizing |
arxiv-4118 | 0806.3474 | Information field theory for cosmological perturbation reconstruction and non-linear signal analysis | <|reference_start|>Information field theory for cosmological perturbation reconstruction and non-linear signal analysis: We develop information field theory (IFT) as a means of Bayesian inference on spatially distributed signals, the information fields. A didactical approach is attempted. Starting from general considerations on the nature of measurements, signals, noise, and their relation to a physical reality, we derive the information Hamiltonian, the source field, propagator, and interaction terms. Free IFT reproduces the well known Wiener-filter theory. Interacting IFT can be diagrammatically expanded, for which we provide the Feynman rules in position-, Fourier-, and spherical harmonics space, and the Boltzmann-Shannon information measure. The theory should be applicable in many fields. However, here, two cosmological signal recovery problems are discussed in their IFT-formulation. 1) Reconstruction of the cosmic large-scale structure matter distribution from discrete galaxy counts in incomplete galaxy surveys within a simple model of galaxy formation. We show that a Gaussian signal, which should resemble the initial density perturbations of the Universe, observed with a strongly non-linear, incomplete and Poissonian-noise affected response, as the processes of structure and galaxy formation and observations provide, can be reconstructed thanks to the virtue of a response-renormalization flow equation. 2) We design a filter to detect local non-linearities in the cosmic microwave background, which are predicted from some Early-Universe inflationary scenarios, and expected due to measurement imperfections. This filter is the optimal Bayes' estimator up to linear order in the non-linearity parameter and can be used even to construct sky maps of non-linearities in the data.<|reference_end|> | arxiv | @article{ensslin2008information,
title={Information field theory for cosmological perturbation reconstruction
and non-linear signal analysis},
author={Torsten A. Ensslin, Mona Frommert, Francisco S. Kitaura},
journal={arXiv preprint arXiv:0806.3474},
year={2008},
doi={10.1103/PhysRevD.80.105005},
number={J-MPA2270e},
archivePrefix={arXiv},
eprint={0806.3474},
primaryClass={astro-ph cs.IT hep-th math.IT physics.data-an stat.CO}
} | ensslin2008information |
arxiv-4119 | 0806.3480 | "Minesweeper" and spectrum of discrete Laplacians | <|reference_start|>"Minesweeper" and spectrum of discrete Laplacians: The paper is devoted to a problem inspired by the "Minesweeper" computer game. It is shown that certain configurations of open cells guarantee the existence and the uniqueness of solution. Mathematically the problem is reduced to some spectral properties of discrete differential operators. It is shown how the uniqueness can be used to create a new game which preserves the spirit of "Minesweeper" but does not require a computer.<|reference_end|> | arxiv | @article{german2008"minesweeper",
title={"Minesweeper" and spectrum of discrete Laplacians},
author={Oleg German, Evgeny Lakshtanov},
journal={Applicable Analysis, Vol. 89, No. 12, December 2010, 1907-1916},
year={2008},
doi={10.1080/00036811.2010.505189},
archivePrefix={arXiv},
eprint={0806.3480},
primaryClass={cs.DM}
} | german2008"minesweeper" |
arxiv-4120 | 0806.3514 | Consistent Newton-Raphson vs fixed-point for variational multiscale formulations for incompressible Navier-Stokes | <|reference_start|>Consistent Newton-Raphson vs fixed-point for variational multiscale formulations for incompressible Navier-Stokes: The following paper compares a consistent Newton-Raphson and fixed-point iteration based solution strategy for a variational multiscale finite element formulation for incompressible Navier-Stokes. The main contributions of this work include a consistent linearization of the Navier-Stokes equations, which provides an avenue for advanced algorithms that require origins in a consistent method. We also present a comparison between formulations that differ only in their linearization, but maintain all other equivalences. Using the variational multiscale concept, we construct a stabilized formulation (that may be considered an extension of the MINI element to nonlinear Navier-Stokes). We then linearize the problem using fixed-point iteration and by deriving a consistent tangent matrix for the update equation to obtain the solution via Newton-Raphson iterations. We show that the consistent formulation converges in fewer iterations, as expected, for several test problems. We also show that the consistent formulation converges for problems for which fixed-point iteration diverges. We present the results of both methods for problems of Reynold's number up to 5000.<|reference_end|> | arxiv | @article{turner2008consistent,
title={Consistent Newton-Raphson vs. fixed-point for variational multiscale
formulations for incompressible Navier-Stokes},
author={D. Z. Turner and K. B. Nakshatrala and K. D. Hjelmstad},
journal={arXiv preprint arXiv:0806.3514},
year={2008},
archivePrefix={arXiv},
eprint={0806.3514},
primaryClass={cs.NA}
} | turner2008consistent |
arxiv-4121 | 0806.3536 | In Pursuit of Spreadsheet Excellence | <|reference_start|>In Pursuit of Spreadsheet Excellence: The first fully-documented study into the quantitative impact of errors in operational spreadsheets identified an interesting anomaly. One of the five participating organisations involved in the study contributed a set of five spreadsheets of such quality that they set the organisation apart in a statistical sense. This virtuoso performance gave rise to a simple sampling test - The Clean Sheet Test - which can be used to objectively evaluate if an organisation is in control of the spreadsheets it is using in important processes such as financial reporting.<|reference_end|> | arxiv | @article{croll2008in,
title={In Pursuit of Spreadsheet Excellence},
author={Grenville J. Croll},
journal={arXiv preprint arXiv:0806.3536},
year={2008},
archivePrefix={arXiv},
eprint={0806.3536},
primaryClass={cs.SE cs.HC}
} | croll2008in |
arxiv-4122 | 0806.3537 | Statistical Learning of Arbitrary Computable Classifiers | <|reference_start|>Statistical Learning of Arbitrary Computable Classifiers: Statistical learning theory chiefly studies restricted hypothesis classes, particularly those with finite Vapnik-Chervonenkis (VC) dimension. The fundamental quantity of interest is the sample complexity: the number of samples required to learn to a specified level of accuracy. Here we consider learning over the set of all computable labeling functions. Since the VC-dimension is infinite and a priori (uniform) bounds on the number of samples are impossible, we let the learning algorithm decide when it has seen sufficient samples to have learned. We first show that learning in this setting is indeed possible, and develop a learning algorithm. We then show, however, that bounding sample complexity independently of the distribution is impossible. Notably, this impossibility is entirely due to the requirement that the learning algorithm be computable, and not due to the statistical nature of the problem.<|reference_end|> | arxiv | @article{soloveichik2008statistical,
title={Statistical Learning of Arbitrary Computable Classifiers},
author={David Soloveichik},
journal={arXiv preprint arXiv:0806.3537},
year={2008},
archivePrefix={arXiv},
eprint={0806.3537},
primaryClass={cs.LG}
} | soloveichik2008statistical |
arxiv-4123 | 0806.3542 | Design and Analysis of an Asynchronous Zero Collision MAC Protocol | <|reference_start|>Design and Analysis of an Asynchronous Zero Collision MAC Protocol: This paper proposes and analyzes a distributed MAC protocol that achieves zero collision with no control message exchange nor synchronization. ZC (ZeroCollision) is neither reservation-based nor dynamic TDMA; the protocol supports variable-length packets and does not lose efficiency when some of the stations do not transmit. At the same time, ZC is not a CSMA; in its steady state, it is completely collision-free. The stations transmit repeatedly in a round-robin order once the convergence state is reached. If some stations skip their turn, their transmissions are replaced by idle $20 \mu$-second mini-slots that enable the other stations to keep track of their order. Because of its short medium access delay and its efficiency, the protocol supports both real-time and elastic applications. The protocol allows for nodes leaving and joining the network; it can allocate more throughput to specific nodes (such as an access point). The protocol is robust against carrier sensing errors or clock drift. While collision avoidance is guaranteed in a single collision domain, it is not the case in a multiple collision one. However, experiments show ZC supports a comparable amount of goodput to CSMA in a multiple collision domain environment. The paper presents an analysis and extensive simulations of the protocol, confirming that ZC outperforms both CSMA and TDMA at high and low load.<|reference_end|> | arxiv | @article{lee2008design,
title={Design and Analysis of an Asynchronous Zero Collision MAC Protocol},
author={Jiwoong Lee, Jean C. Walrand},
journal={arXiv preprint arXiv:0806.3542},
year={2008},
archivePrefix={arXiv},
eprint={0806.3542},
primaryClass={cs.NI}
} | lee2008design |
arxiv-4124 | 0806.3626 | On multi F-nomial coefficients and Inversion formula for F-nomial coefficients | <|reference_start|>On multi F-nomial coefficients and Inversion formula for F-nomial coefficients: In response to [6], we discover the looked for inversion formula for F-nomial coefficients. Before supplying its proof, we generalize F-nomial coefficients to multi F-nomial coefficients and we give their combinatorial interpretation in cobweb posets language, as the number of maximal-disjoint blocks of the form sP_{k_1,k_2,...,k_s} of layer <Phi_1-->Phi_n>. Then we present inversion formula for F-nomial coefficients using multi F-nomial coefficients for all cobweb-admissible sequences. To this end we infer also some identities as conclusions of that inversion formula for the case of binomial, Gaussian and Fibonomial coefficients.<|reference_end|> | arxiv | @article{dziemianczuk2008on,
title={On multi F-nomial coefficients and Inversion formula for F-nomial
coefficients},
author={M. Dziemianczuk},
journal={arXiv preprint arXiv:0806.3626},
year={2008},
archivePrefix={arXiv},
eprint={0806.3626},
primaryClass={math.CO cs.DM}
} | dziemianczuk2008on |
arxiv-4125 | 0806.3628 | Four-node Relay Network with Bi-directional Traffic Employing Wireless Network Coding with Pre-cancellation | <|reference_start|>Four-node Relay Network with Bi-directional Traffic Employing Wireless Network Coding with Pre-cancellation: Network coding has the potential to improve the overall throughput of a network by combining different streams of data and forwarding them. In wireless networks, the wireless channel provide an excellent medium for physical layer network coding as signals from different transmitters are combined automatically by the wireless channel. In such scenarios, it would be interesting to investigate protocols and algorithms which can optimally relay information. In this paper, we look at a four-node two-way or bidirectional relay network, and propose a relay protocol which can relay information efficiently in this network.<|reference_end|> | arxiv | @article{kuek2008four-node,
title={Four-node Relay Network with Bi-directional Traffic Employing Wireless
Network Coding with Pre-cancellation},
author={Su Kiang Kuek, Chau Yuen, Woon Hau Chin},
journal={VTC Spring 2008},
year={2008},
archivePrefix={arXiv},
eprint={0806.3628},
primaryClass={cs.IT math.IT}
} | kuek2008four-node |
arxiv-4126 | 0806.3629 | Bi-Directional Multi-Antenna Relay Communications with Wireless Network Coding | <|reference_start|>Bi-Directional Multi-Antenna Relay Communications with Wireless Network Coding: In this paper, we consider a two-way or bidirectional communications system with a relay equipped with multiple antennas. We show that when the downlink channel state information is not known at the relay, the benefit of having additional antennas at the relay can only be obtained by using decode and forward (DF) but not amplify and forward (AF). The gain becomes significant when we employ transmit diversity together with wireless network coding. We also demonstrate how the performance of such system can be improved by performing antenna selection at the relay. Our results show that if downlink channel state information is known at the relay, network coding may not provide additional gain than simple antenna selection scheme.<|reference_end|> | arxiv | @article{yuen2008bi-directional,
title={Bi-Directional Multi-Antenna Relay Communications with Wireless Network
Coding},
author={Chau Yuen, Woon Hau Chin, Yong Liang Guan, Wenhua Chen, Taoyi Tee},
journal={VTC Spring 2008},
year={2008},
archivePrefix={arXiv},
eprint={0806.3629},
primaryClass={cs.IT math.IT}
} | yuen2008bi-directional |
arxiv-4127 | 0806.3630 | Comparative Study of SVD and QRS in Closed-Loop Beamforming Systems | <|reference_start|>Comparative Study of SVD and QRS in Closed-Loop Beamforming Systems: We compare two closed-loop beamforming algorithms, one based on singular value decomposition (SVD) and the other based on equal diagonal QR decomposition (QRS). SVD has the advantage of parallelizing the MIMO channel, but each of the sub-channels has different gain. QRS has the advantage of having equal diagonal value for the decomposed channel, but the subchannels are not fully parallelized, hence requiring successive interference cancellation or other techniques to perform decoding. We consider a closed-loop system where the feedback information is a unitary beamforming matrix. Due to the discrete and limited modulation set, SVD may have inferior performance to QRS when no modulation set selection is performed. However, if the selection of modulation set is performed optimally, we show that SVD can outperform QRS.<|reference_end|> | arxiv | @article{yuen2008comparative,
title={Comparative Study of SVD and QRS in Closed-Loop Beamforming Systems},
author={Chau Yuen, Sumei Sun, Jian-Kang Zhang},
journal={arXiv preprint arXiv:0806.3630},
year={2008},
archivePrefix={arXiv},
eprint={0806.3630},
primaryClass={cs.IT math.IT}
} | yuen2008comparative |
arxiv-4128 | 0806.3631 | Comparative Study of Open-loop Transmit Diversity Schemes for Four Transmit Antennas in Coded OFDM Systems | <|reference_start|>Comparative Study of Open-loop Transmit Diversity Schemes for Four Transmit Antennas in Coded OFDM Systems: We compare four open-loop transmit diversity schemes in a coded Orthogonal Frequency Division Multiplexing (OFDM) system with four transmit antennas, namely cyclic delay diversity (CDD), Space-Time Block Code (STBC, Alamouti code is used) with CDD, Quasi-Orthogonal STBC (QO-STBC) and Minimum-Decoding-Complexity QOSTBC (MDC-QOSTBC). We show that in a coded system with low code rate, a scheme with spatial transmit diversity of second order can achieve similar performance to that with spatial transmit diversity of fourth order due to the additional diversity provided by the phase shift diversity with channel coding. In addition, we also compare the decoding complexity and other features of the above four mentioned schemes, such as the requirement for the training signals, hybrid automatic retransmission request (HARQ), etc. The discussions in this paper can be readily applied to future wireless communication systems, such as mobile systems beyond 3G, IEEE 802.11 wireless LAN, or IEEE 802.16 WiMAX, that employ more than two transmit antennas and OFDM.<|reference_end|> | arxiv | @article{yuen2008comparative,
title={Comparative Study of Open-loop Transmit Diversity Schemes for Four
Transmit Antennas in Coded OFDM Systems},
author={Chau Yuen, Yan Wu, Sumei Sun},
journal={VTC Fall 2007},
year={2008},
archivePrefix={arXiv},
eprint={0806.3631},
primaryClass={cs.IT math.IT}
} | yuen2008comparative |
arxiv-4129 | 0806.3633 | A Continuous Vector-Perturbation for Multi-Antenna Multi-User Communication | <|reference_start|>A Continuous Vector-Perturbation for Multi-Antenna Multi-User Communication: The sum-rate of the broadcast channel in a multi-antenna multi-user communication system can be achieved by using precoding and adding a regular perturbation to the data vector. The perturbation can be removed by the modulus function, thus transparent to the receiver, but the information of the precoding matrix is needed to decode the symbols. This paper proposes a new technique to improve the multi-antenna multi-user system, by adding a continuous perturbation to the data vector without the need of information on the precoding matrix to be known at the receiver. The perturbation vector will be treated as interference at the receiver, thus it will be transparent to the receiver. The derivation of the continuous vector perturbation is provided by maximizing the signal-to-interference plus noise ratio or minimizing the minimum mean square error of the received signal.<|reference_end|> | arxiv | @article{chua2008a,
title={A Continuous Vector-Perturbation for Multi-Antenna Multi-User
Communication},
author={Wee Seng Chua, Chau Yuen, Francois Chin},
journal={VTC Spring 2007},
year={2008},
archivePrefix={arXiv},
eprint={0806.3633},
primaryClass={cs.IT math.IT}
} | chua2008a |
arxiv-4130 | 0806.3646 | Round Trip Time Prediction Using the Symbolic Function Network Approach | <|reference_start|>Round Trip Time Prediction Using the Symbolic Function Network Approach: In this paper, we develop a novel approach to model the Internet round trip time using a recently proposed symbolic type neural network model called symbolic function network. The developed predictor is shown to have good generalization performance and simple representation compared to the multilayer perceptron based predictors.<|reference_end|> | arxiv | @article{eskander2008round,
title={Round Trip Time Prediction Using the Symbolic Function Network Approach},
author={George S. Eskander, Amir Atiya, Kil To Chong, Hyongsuk Kim, Sung Goo
Yoo},
journal={ISITC, pp. 3-7, 2007 International Symposium on Information
Technology Convergence (ISITC 2007), 2007},
year={2008},
archivePrefix={arXiv},
eprint={0806.3646},
primaryClass={cs.NE cs.SC}
} | eskander2008round |
arxiv-4131 | 0806.3650 | Recursive Code Construction for Random Networks | <|reference_start|>Recursive Code Construction for Random Networks: A modification of Koetter-Kschischang codes for random networks is presented (these codes were also studied by Wang et al. in the context of authentication problems). The new codes have higher information rate, while maintaining the same error-correcting capabilities. An efficient error-correcting algorithm is proposed for these codes.<|reference_end|> | arxiv | @article{skachek2008recursive,
title={Recursive Code Construction for Random Networks},
author={Vitaly Skachek},
journal={arXiv preprint arXiv:0806.3650},
year={2008},
doi={10.1109/TIT.2009.2039163},
archivePrefix={arXiv},
eprint={0806.3650},
primaryClass={cs.IT math.IT}
} | skachek2008recursive |
arxiv-4132 | 0806.3653 | Opportunistic Interference Alignment in MIMO Interference Channels | <|reference_start|>Opportunistic Interference Alignment in MIMO Interference Channels: We present two interference alignment techniques such that an opportunistic point-to-point multiple input multiple output (MIMO) link can reuse, without generating any additional interference, the same frequency band of a similar pre-existing primary link. In this scenario, we exploit the fact that under power constraints, although each radio maximizes independently its rate by water-filling on their channel transfer matrix singular values, frequently, not all of them are used. Therefore, by aligning the interference of the opportunistic radio it is possible to transmit at a significant rate while insuring zero-interference on the pre-existing link. We propose a linear pre-coder for a perfect interference alignment and a power allocation scheme which maximizes the individual data rate of the secondary link. Our numerical results show that significant data rates are achieved even for a reduced number of antennas.<|reference_end|> | arxiv | @article{perlaza2008opportunistic,
title={Opportunistic Interference Alignment in MIMO Interference Channels},
author={Samir Medina Perlaza, Merouane Debbah, Samson Lasaulce and Jean-Marie
Chaufray},
journal={arXiv preprint arXiv:0806.3653},
year={2008},
doi={10.1109/PIMRC.2008.4699872},
archivePrefix={arXiv},
eprint={0806.3653},
primaryClass={cs.GT cs.IT math.IT}
} | perlaza2008opportunistic |
arxiv-4133 | 0806.3668 | Approximating Multi-Criteria Max-TSP | <|reference_start|>Approximating Multi-Criteria Max-TSP: We present randomized approximation algorithms for multi-criteria Max-TSP. For Max-STSP with k > 1 objective functions, we obtain an approximation ratio of $1/k - \eps$ for arbitrarily small $\eps > 0$. For Max-ATSP with k objective functions, we obtain an approximation ratio of $1/(k+1) - \eps$.<|reference_end|> | arxiv | @article{bläser2008approximating,
title={Approximating Multi-Criteria Max-TSP},
author={Markus Bl"aser, Bodo Manthey, Oliver Putz},
journal={arXiv preprint arXiv:0806.3668},
year={2008},
archivePrefix={arXiv},
eprint={0806.3668},
primaryClass={cs.DS}
} | bläser2008approximating |
arxiv-4134 | 0806.3681 | On the d-dimensional Quasi-Equally Spaced Sampling | <|reference_start|>On the d-dimensional Quasi-Equally Spaced Sampling: We study a class of random matrices that appear in several communication and signal processing applications, and whose asymptotic eigenvalue distribution is closely related to the reconstruction error of an irregularly sampled bandlimited signal. We focus on the case where the random variables characterizing these matrices are d-dimensional vectors, independent, and quasi-equally spaced, i.e., they have an arbitrary distribution and their averages are vertices of a d-dimensional grid. Although a closed form expression of the eigenvalue distribution is still unknown, under these conditions we are able (i) to derive the distribution moments as the matrix size grows to infinity, while its aspect ratio is kept constant, and (ii) to show that the eigenvalue distribution tends to the Marcenko-Pastur law as d->infinity. These results can find application in several fields, as an example we show how they can be used for the estimation of the mean square error provided by linear reconstruction techniques.<|reference_end|> | arxiv | @article{nordio2008on,
title={On the d-dimensional Quasi-Equally Spaced Sampling},
author={Alessandro Nordio, Carla-Fabiana Chiasserini, Emanuele Viterbo},
journal={arXiv preprint arXiv:0806.3681},
year={2008},
archivePrefix={arXiv},
eprint={0806.3681},
primaryClass={cs.IT math.IT}
} | nordio2008on |
arxiv-4135 | 0806.3708 | Atlas-Based Prostate Segmentation Using an Hybrid Registration | <|reference_start|>Atlas-Based Prostate Segmentation Using an Hybrid Registration: Purpose: This paper presents the preliminary results of a semi-automatic method for prostate segmentation of Magnetic Resonance Images (MRI) which aims to be incorporated in a navigation system for prostate brachytherapy. Methods: The method is based on the registration of an anatomical atlas computed from a population of 18 MRI exams onto a patient image. An hybrid registration framework which couples an intensity-based registration with a robust point-matching algorithm is used for both atlas building and atlas registration. Results: The method has been validated on the same dataset that the one used to construct the atlas using the "leave-one-out method". Results gives a mean error of 3.39 mm and a standard deviation of 1.95 mm with respect to expert segmentations. Conclusions: We think that this segmentation tool may be a very valuable help to the clinician for routine quantitative image exploitation.<|reference_end|> | arxiv | @article{martin2008atlas-based,
title={Atlas-Based Prostate Segmentation Using an Hybrid Registration},
author={S'ebastien Martin (TIMC), Vincent Daanen (TIMC), Jocelyne Troccaz
(TIMC)},
journal={arXiv preprint arXiv:0806.3708},
year={2008},
archivePrefix={arXiv},
eprint={0806.3708},
primaryClass={cs.OH}
} | martin2008atlas-based |
arxiv-4136 | 0806.3710 | How Is Meaning Grounded in Dictionary Definitions? | <|reference_start|>How Is Meaning Grounded in Dictionary Definitions?: Meaning cannot be based on dictionary definitions all the way down: at some point the circularity of definitions must be broken in some way, by grounding the meanings of certain words in sensorimotor categories learned from experience or shaped by evolution. This is the "symbol grounding problem." We introduce the concept of a reachable set -- a larger vocabulary whose meanings can be learned from a smaller vocabulary through definition alone, as long as the meanings of the smaller vocabulary are themselves already grounded. We provide simple algorithms to compute reachable sets for any given dictionary.<|reference_end|> | arxiv | @article{masse2008how,
title={How Is Meaning Grounded in Dictionary Definitions?},
author={A. Blondin Masse, G. Chicoisne, Y. Gargouri, S. Harnad, O. Picard, O.
Marcotte},
journal={arXiv preprint arXiv:0806.3710},
year={2008},
archivePrefix={arXiv},
eprint={0806.3710},
primaryClass={cs.CL cs.DB}
} | masse2008how |
arxiv-4137 | 0806.3765 | Cross-concordances: terminology mapping and its effectiveness for information retrieval | <|reference_start|>Cross-concordances: terminology mapping and its effectiveness for information retrieval: The German Federal Ministry for Education and Research funded a major terminology mapping initiative, which found its conclusion in 2007. The task of this terminology mapping initiative was to organize, create and manage 'cross-concordances' between controlled vocabularies (thesauri, classification systems, subject heading lists) centred around the social sciences but quickly extending to other subject areas. 64 crosswalks with more than 500,000 relations were established. In the final phase of the project, a major evaluation effort to test and measure the effectiveness of the vocabulary mappings in an information system environment was conducted. The paper reports on the cross-concordance work and evaluation results.<|reference_end|> | arxiv | @article{mayr2008cross-concordances:,
title={Cross-concordances: terminology mapping and its effectiveness for
information retrieval},
author={Philipp Mayr, Vivien Petras},
journal={arXiv preprint arXiv:0806.3765},
year={2008},
archivePrefix={arXiv},
eprint={0806.3765},
primaryClass={cs.DL cs.IR}
} | mayr2008cross-concordances: |
arxiv-4138 | 0806.3787 | Computational Approaches to Measuring the Similarity of Short Contexts : A Review of Applications and Methods | <|reference_start|>Computational Approaches to Measuring the Similarity of Short Contexts : A Review of Applications and Methods: Measuring the similarity of short written contexts is a fundamental problem in Natural Language Processing. This article provides a unifying framework by which short context problems can be categorized both by their intended application and proposed solution. The goal is to show that various problems and methodologies that appear quite different on the surface are in fact very closely related. The axes by which these categorizations are made include the format of the contexts (headed versus headless), the way in which the contexts are to be measured (first-order versus second-order similarity), and the information used to represent the features in the contexts (micro versus macro views). The unifying thread that binds together many short context applications and methods is the fact that similarity decisions must be made between contexts that share few (if any) words in common.<|reference_end|> | arxiv | @article{pedersen2008computational,
title={Computational Approaches to Measuring the Similarity of Short Contexts :
A Review of Applications and Methods},
author={Ted Pedersen (University of Minnesota, Duluth)},
journal={University of Minnesota Supercomputing Institute Research Report
UMSI 2010/118, October 2010},
year={2008},
archivePrefix={arXiv},
eprint={0806.3787},
primaryClass={cs.CL}
} | pedersen2008computational |
arxiv-4139 | 0806.3799 | A Sublinear Algorithm for Sparse Reconstruction with l2/l2 Recovery Guarantees | <|reference_start|>A Sublinear Algorithm for Sparse Reconstruction with l2/l2 Recovery Guarantees: Compressed Sensing aims to capture attributes of a sparse signal using very few measurements. Cand\`{e}s and Tao showed that sparse reconstruction is possible if the sensing matrix acts as a near isometry on all $\boldsymbol{k}$-sparse signals. This property holds with overwhelming probability if the entries of the matrix are generated by an iid Gaussian or Bernoulli process. There has been significant recent interest in an alternative signal processing framework; exploiting deterministic sensing matrices that with overwhelming probability act as a near isometry on $\boldsymbol{k}$-sparse vectors with uniformly random support, a geometric condition that is called the Statistical Restricted Isometry Property or StRIP. This paper considers a family of deterministic sensing matrices satisfying the StRIP that are based on \srm codes (binary chirps) and a $\boldsymbol{k}$-sparse reconstruction algorithm with sublinear complexity. In the presence of stochastic noise in the data domain, this paper derives bounds on the $\boldsymbol{\ell_2}$ accuracy of approximation in terms of the $\boldsymbol{\ell_2}$ norm of the measurement noise and the accuracy of the best $\boldsymbol{k}$-sparse approximation, also measured in the $\boldsymbol{\ell_2}$ norm. This type of $\boldsymbol{\ell_2 /\ell_2}$ bound is tighter than the standard $\boldsymbol{\ell_2 /\ell_1}$ or $\boldsymbol{\ell_1/ \ell_1}$ bounds.<|reference_end|> | arxiv | @article{calderbank2008a,
title={A Sublinear Algorithm for Sparse Reconstruction with l2/l2 Recovery
Guarantees},
author={Robert Calderbank, Stephen Howard, Sina Jafarpour},
journal={arXiv preprint arXiv:0806.3799},
year={2008},
archivePrefix={arXiv},
eprint={0806.3799},
primaryClass={cs.IT math.IT}
} | calderbank2008a |
arxiv-4140 | 0806.3802 | Efficient and Robust Compressed Sensing using High-Quality Expander Graphs | <|reference_start|>Efficient and Robust Compressed Sensing using High-Quality Expander Graphs: Expander graphs have been recently proposed to construct efficient compressed sensing algorithms. In particular, it has been shown that any $n$-dimensional vector that is $k$-sparse (with $k\ll n$) can be fully recovered using $O(k\log\frac{n}{k})$ measurements and only $O(k\log n)$ simple recovery iterations. In this paper we improve upon this result by considering expander graphs with expansion coefficient beyond 3/4 and show that, with the same number of measurements, only $O(k)$ recovery iterations are required, which is a significant improvement when $n$ is large. In fact, full recovery can be accomplished by at most $2k$ very simple iterations. The number of iterations can be made arbitrarily close to $k$, and the recovery algorithm can be implemented very efficiently using a simple binary search tree. We also show that by tolerating a small penalty on the number of measurements, and not on the number of recovery iterations, one can use the efficient construction of a family of expander graphs to come up with explicit measurement matrices for this method. We compare our result with other recently developed expander-graph-based methods and argue that it compares favorably both in terms of the number of required measurements and in terms of the recovery time complexity. Finally we will show how our analysis extends to give a robust algorithm that finds the position and sign of the $k$ significant elements of an almost $k$-sparse signal and then, using very simple optimization techniques, finds in sublinear time a $k$-sparse signal which approximates the original signal with very high precision.<|reference_end|> | arxiv | @article{jafarpour2008efficient,
title={Efficient and Robust Compressed Sensing using High-Quality Expander
Graphs},
author={Sina Jafarpour, Weiyu Xu, Babak Hassibi, Robert Calderbank},
journal={arXiv preprint arXiv:0806.3802},
year={2008},
archivePrefix={arXiv},
eprint={0806.3802},
primaryClass={cs.IT math.IT}
} | jafarpour2008efficient |
arxiv-4141 | 0806.3827 | Optimal Scheduling of File Transfers with Divisible Sizes on Multiple Disjoint Paths | <|reference_start|>Optimal Scheduling of File Transfers with Divisible Sizes on Multiple Disjoint Paths: In this paper I investigate several offline and online data transfer scheduling problems and propose efficient algorithms and techniques for addressing them. In the offline case, I present a novel, heuristic, algorithm for scheduling files with divisible sizes on multiple disjoint paths, in order to maximize the total profit (the problem is equivalent to the multiple knapsack problem with divisible item sizes). I then consider a cost optimization problem for transferring a sequence of identical files, subject to time constraints imposed by the data transfer providers. For the online case I propose an algorithmic framework based on the block partitioning method, which can speed up the process of resource allocation and reservation.<|reference_end|> | arxiv | @article{andreica2008optimal,
title={Optimal Scheduling of File Transfers with Divisible Sizes on Multiple
Disjoint Paths},
author={Mugurel Ionut Andreica},
journal={Proceedings of the IEEE Romania International Conference
"Communications", 2008. (ISBN: 978-606-521-008-0), Bucharest : Romania (2008)},
year={2008},
archivePrefix={arXiv},
eprint={0806.3827},
primaryClass={cs.DS cs.NI}
} | andreica2008optimal |
arxiv-4142 | 0806.3849 | Separability in the Ambient Logic | <|reference_start|>Separability in the Ambient Logic: The \it{Ambient Logic} (AL) has been proposed for expressing properties of process mobility in the calculus of Mobile Ambients (MA), and as a basis for query languages on semistructured data. We study some basic questions concerning the discriminating power of AL, focusing on the equivalence on processes induced by the logic $(=_L>)$. As underlying calculi besides MA we consider a subcalculus in which an image-finiteness condition holds and that we prove to be Turing complete. Synchronous variants of these calculi are studied as well. In these calculi, we provide two operational characterisations of $_=L$: a coinductive one (as a form of bisimilarity) and an inductive one (based on structual properties of processes). After showing $_=L$ to be stricly finer than barbed congruence, we establish axiomatisations of $_=L$ on the subcalculus of MA (both the asynchronous and the synchronous version), enabling us to relate $_=L$ to structural congruence. We also present some (un)decidability results that are related to the above separation properties for AL: the undecidability of $_=L$ on MA and its decidability on the subcalculus.<|reference_end|> | arxiv | @article{hirschkoff2008separability,
title={Separability in the Ambient Logic},
author={Daniel Hirschkoff, Etienne Lozes, Davide Sangiorgi},
journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September
4, 2008) lmcs:682},
year={2008},
doi={10.2168/LMCS-4(3:4)2008},
archivePrefix={arXiv},
eprint={0806.3849},
primaryClass={cs.LO cs.MA cs.PL}
} | hirschkoff2008separability |
arxiv-4143 | 0806.3885 | Conceptualization of seeded region growing by pixels aggregation Part 1: the framework | <|reference_start|>Conceptualization of seeded region growing by pixels aggregation Part 1: the framework: Adams and Bishop have proposed in 1994 a novel region growing algorithm called seeded region growing by pixels aggregation (SRGPA). This paper introduces a framework to implement an algorithm using SRGPA. This framework is built around two concepts: localization and organization of applied action. This conceptualization gives a quick implementation of algorithms, a direct translation between the mathematical idea and the numerical implementation, and an improvement of algorithms efficiency.<|reference_end|> | arxiv | @article{tariel2008conceptualization,
title={Conceptualization of seeded region growing by pixels aggregation. Part
1: the framework},
author={Vincent Tariel},
journal={arXiv preprint arXiv:0806.3885},
year={2008},
archivePrefix={arXiv},
eprint={0806.3885},
primaryClass={cs.CV}
} | tariel2008conceptualization |
arxiv-4144 | 0806.3887 | Conceptualization of seeded region growing by pixels aggregation Part 2: how to localize a final partition invariant about the seeded region initialisation order | <|reference_start|>Conceptualization of seeded region growing by pixels aggregation Part 2: how to localize a final partition invariant about the seeded region initialisation order: In the previous paper, we have conceptualized the localization and the organization of seeded region growing by pixels aggregation (SRGPA) but we do not give the issue when there is a collision between two distinct regions during the growing process. In this paper, we propose two implementations to manage two classical growing processes: one without a boundary region region to divide the other regions and another with. Unfortunately, as noticed by Mehnert and Jakway (1997), this partition depends on the seeded region initialisation order (SRIO). We propose a growing process, invariant about SRIO such as the boundary region is the set of ambiguous pixels.<|reference_end|> | arxiv | @article{tariel2008conceptualization,
title={Conceptualization of seeded region growing by pixels aggregation. Part
2: how to localize a final partition invariant about the seeded region
initialisation order},
author={Vincent Tariel},
journal={arXiv preprint arXiv:0806.3887},
year={2008},
archivePrefix={arXiv},
eprint={0806.3887},
primaryClass={cs.CV}
} | tariel2008conceptualization |
arxiv-4145 | 0806.3906 | Power Indices and minimal winning Coalitions | <|reference_start|>Power Indices and minimal winning Coalitions: The Penrose-Banzhaf index and the Shapley-Shubik index are the best-known and the most used tools to measure political power of voters in simple voting games. Most methods to calculate these power indices are based on counting winning coalitions, in particular those coalitions a voter is decisive for. We present a new combinatorial formula how to calculate both indices solely using the set of minimal winning coalitions.<|reference_end|> | arxiv | @article{kirsch2008power,
title={Power Indices and minimal winning Coalitions},
author={Werner Kirsch and Jessica Langner},
journal={arXiv preprint arXiv:0806.3906},
year={2008},
archivePrefix={arXiv},
eprint={0806.3906},
primaryClass={math.CO cs.GT}
} | kirsch2008power |
arxiv-4146 | 0806.3928 | Conceptualization of seeded region growing by pixels aggregation Part 3: a wide range of algorithms | <|reference_start|>Conceptualization of seeded region growing by pixels aggregation Part 3: a wide range of algorithms: In the two previous papers of this serie, we have created a library, called Population, dedicated to seeded region growing by pixels aggregation and we have proposed different growing processes to get a partition with or without a boundary region to divide the other regions or to get a partition invariant about the seeded region initialisation order. Using this work, we implement some algorithms belonging to the field of SRGPA using this library and these growing processes.<|reference_end|> | arxiv | @article{tariel2008conceptualization,
title={Conceptualization of seeded region growing by pixels aggregation. Part
3: a wide range of algorithms},
author={Vincent Tariel},
journal={arXiv preprint arXiv:0806.3928},
year={2008},
archivePrefix={arXiv},
eprint={0806.3928},
primaryClass={cs.CV}
} | tariel2008conceptualization |
arxiv-4147 | 0806.3938 | Cooperation with Complement is Better | <|reference_start|>Cooperation with Complement is Better: In a setting where heterogeneous agents interact to accomplish a given set of goals, cooperation is of utmost importance, especially when agents cannot achieve their individual goals by exclusive use of their own efforts. Even when we consider friendly environments and benevolent agents, cooperation involves several issues: with whom to cooperate, reciprocation, how to address credit assignment and complex division of gains, etc. We propose a model where heterogeneous agents cooperate by forming groups and formation of larger groups is promoted. Benefit of agents is proportional to the performance and the size of the group. There is a time pressure to form a group. We investigate how preferring similar or complement agents in group formation affects an agent's success. Preferring complement in group formation is found to be better, yet there is no need to push the strategy to the extreme since the effect of complementing partners is saturated.<|reference_end|> | arxiv | @article{yildirim2008cooperation,
title={Cooperation with Complement is Better},
author={Ilker Yildirim and Haluk Bingol},
journal={arXiv preprint arXiv:0806.3938},
year={2008},
archivePrefix={arXiv},
eprint={0806.3938},
primaryClass={cs.MA physics.soc-ph}
} | yildirim2008cooperation |
arxiv-4148 | 0806.3939 | Conceptualization of seeded region growing by pixels aggregation Part 4: Simple, generic and robust extraction of grains in granular materials obtained by X-ray tomography | <|reference_start|>Conceptualization of seeded region growing by pixels aggregation Part 4: Simple, generic and robust extraction of grains in granular materials obtained by X-ray tomography: This paper proposes a simple, generic and robust method to extract the grains from experimental tridimensionnal images of granular materials obtained by X-ray tomography. This extraction has two steps: segmentation and splitting. For the segmentation step, if there is a sufficient contrast between the different components, a classical threshold procedure followed by a succession of morphological filters can be applied. If not, and if the boundary needs to be localized precisely, a watershed transformation controlled by labels is applied. The basement of this transformation is to localize a label included in the component and another label in the component complementary. A "soft" threshold following by an opening is applied on the initial image to localize a label in a component. For any segmentation procedure, the visualisation shows a problem: some groups of two grains, close one to each other, become connected. So if a classical cluster procedure is applied on the segmented binary image, these numerical connected grains are considered as a single grain. To overcome this problem, we applied a procedure introduced by L. Vincent in 1993. This grains extraction is tested for various complexes porous media and granular material, to predict various properties (diffusion, electrical conductivity, deformation field) in a good agreement with experiment data.<|reference_end|> | arxiv | @article{tariel2008conceptualization,
title={Conceptualization of seeded region growing by pixels aggregation. Part
4: Simple, generic and robust extraction of grains in granular materials
obtained by X-ray tomography},
author={Vincent Tariel},
journal={arXiv preprint arXiv:0806.3939},
year={2008},
archivePrefix={arXiv},
eprint={0806.3939},
primaryClass={cs.CV}
} | tariel2008conceptualization |
arxiv-4149 | 0806.3949 | Use of a Quantum Computer and the Quick Medical Reference To Give an Approximate Diagnosis | <|reference_start|>Use of a Quantum Computer and the Quick Medical Reference To Give an Approximate Diagnosis: The Quick Medical Reference (QMR) is a compendium of statistical knowledge connecting diseases to findings (symptoms). The information in QMR can be represented as a Bayesian network. The inference problem (or, in more medical language, giving a diagnosis) for the QMR is to, given some findings, find the probability of each disease. Rejection sampling and likelihood weighted sampling (a.k.a. likelihood weighting) are two simple algorithms for making approximate inferences from an arbitrary Bayesian net (and from the QMR Bayesian net in particular). Heretofore, the samples for these two algorithms have been obtained with a conventional "classical computer". In this paper, we will show that two analogous algorithms exist for the QMR Bayesian net, where the samples are obtained with a quantum computer. We expect that these two algorithms, implemented on a quantum computer, can also be used to make inferences (and predictions) with other Bayesian nets.<|reference_end|> | arxiv | @article{tucci2008use,
title={Use of a Quantum Computer and the Quick Medical Reference To Give an
Approximate Diagnosis},
author={Robert R. Tucci},
journal={arXiv preprint arXiv:0806.3949},
year={2008},
archivePrefix={arXiv},
eprint={0806.3949},
primaryClass={quant-ph cs.AI}
} | tucci2008use |
arxiv-4150 | 0806.3963 | A stabilized finite element formulation for advection-diffusion using the generalized finite element framework | <|reference_start|>A stabilized finite element formulation for advection-diffusion using the generalized finite element framework: The following work presents a generalized (extended) finite element formulation for the advection-diffusion equation. Using enrichment functions that represent the exponential nature of the exact solution, smooth numerical solutions are obtained for problems with steep gradients and high Peclet numbers (up to Pe = 25) in one and two-dimensions. As opposed to traditional stabilized methods that require the construction of stability parameters and stabilization terms, the present work avoids numerical instabilities by improving the classical Galerkin solution with an enrichment function. To contextualize this method among other stabilized methods, we show by decomposition of the solution (in a multiscale manner) an equivalence to both Galerkin/least-squares type methods and those that use bubble functions. This work also presents a strategy for constructing the enrichment function for problems with complex geometries by employing a global-local approach.<|reference_end|> | arxiv | @article{turner2008a,
title={A stabilized finite element formulation for advection-diffusion using
the generalized finite element framework},
author={D. Z. Turner and K. B. Nakshatrala and K. D. Hjelmstad},
journal={arXiv preprint arXiv:0806.3963},
year={2008},
archivePrefix={arXiv},
eprint={0806.3963},
primaryClass={cs.NA}
} | turner2008a |
arxiv-4151 | 0806.3978 | Information In The Non-Stationary Case | <|reference_start|>Information In The Non-Stationary Case: Information estimates such as the ``direct method'' of Strong et al. (1998) sidestep the difficult problem of estimating the joint distribution of response and stimulus by instead estimating the difference between the marginal and conditional entropies of the response. While this is an effective estimation strategy, it tempts the practitioner to ignore the role of the stimulus and the meaning of mutual information. We show here that, as the number of trials increases indefinitely, the direct (or ``plug-in'') estimate of marginal entropy converges (with probability 1) to the entropy of the time-averaged conditional distribution of the response, and the direct estimate of the conditional entropy converges to the time-averaged entropy of the conditional distribution of the response. Under joint stationarity and ergodicity of the response and stimulus, the difference of these quantities converges to the mutual information. When the stimulus is deterministic or non-stationary the direct estimate of information no longer estimates mutual information, which is no longer meaningful, but it remains a measure of variability of the response distribution across time.<|reference_end|> | arxiv | @article{vu2008information,
title={Information In The Non-Stationary Case},
author={Vincent Q. Vu, Bin Yu, Robert E. Kass},
journal={arXiv preprint arXiv:0806.3978},
year={2008},
archivePrefix={arXiv},
eprint={0806.3978},
primaryClass={q-bio.NC cs.IT math.IT q-bio.QM stat.ME}
} | vu2008information |
arxiv-4152 | 0806.4020 | Design, Development and Testing of Underwater Vehicles: ITB Experience | <|reference_start|>Design, Development and Testing of Underwater Vehicles: ITB Experience: The last decade has witnessed increasing worldwide interest in the research of underwater robotics with particular focus on the area of autonomous underwater vehicles (AUVs). The underwater robotics technology has enabled human to access the depth of the ocean to conduct environmental surveys, resources mapping as well as scientific and military missions. This capability is especially valuable for countries with major water or oceanic resources. As an archipelagic nation with more than 13,000 islands, Indonesia has one of the most abundant living and non-organic oceanic resources. The needs for the mapping, exploration, and environmental preservation of the vast marine resources are therefore imperative. The challenge of the deep water exploration has been the complex issues associated with hazardous and unstructured undersea and sea-bed environments. The paper reports the design, development and testing efforts of underwater vehicle that have been conducted at Institut Teknologi Bandung. Key technology areas have been identified and step-by-step development is presented in conjunction with the need to meet the challenge of underwater vehicle operation. A number of future research directions are also highlighted.<|reference_end|> | arxiv | @article{muljowidodo2008design,,
title={Design, Development and Testing of Underwater Vehicles: ITB Experience},
author={Muljowidodo, Said D. Jenie, Agus Budiyono, Sapto A. Nugroho},
journal={arXiv preprint arXiv:0806.4020},
year={2008},
archivePrefix={arXiv},
eprint={0806.4020},
primaryClass={cs.RO}
} | muljowidodo2008design, |
arxiv-4153 | 0806.4021 | Linear Parameter Varying Model Identification for Control of Rotorcraft-based UAV | <|reference_start|>Linear Parameter Varying Model Identification for Control of Rotorcraft-based UAV: A rotorcraft-based unmanned aerial vehicle exhibits more complex properties compared to its full-size counterparts due to its increased sensitivity to control inputs and disturbances and higher bandwidth of its dynamics. As an aerial vehicle with vertical take-off and landing capability, the helicopter specifically poses a difficult problem of transition between forward flight and unstable hover and vice versa. The LPV control technique explicitly takes into account the change in performance due to the real-time parameter variations. The technique therefore theoretically guarantees the performance and robustness over the entire operating envelope. In this study, we investigate a new approach implementing model identification for use in the LPV control framework. The identification scheme employs recursive least square technique implemented on the LPV system represented by dynamics of helicopter during a transition. The airspeed as the scheduling of parameter trajectory is not assumed to vary slowly. The exclusion of slow parameter change requirement allows for the application of the algorithm for aggressive maneuvering capability without the need of expensive computation. The technique is tested numerically and will be validated in the autonomous flight of a small scale helicopter.<|reference_end|> | arxiv | @article{budiyono2008linear,
title={Linear Parameter Varying Model Identification for Control of
Rotorcraft-based UAV},
author={Agus Budiyono, H.Y Sutarto},
journal={arXiv preprint arXiv:0806.4021},
year={2008},
archivePrefix={arXiv},
eprint={0806.4021},
primaryClass={cs.RO}
} | budiyono2008linear |
arxiv-4154 | 0806.4034 | Data linkage dynamics with shedding | <|reference_start|>Data linkage dynamics with shedding: We study shedding in the setting of data linkage dynamics, a simple model of computation that bears on the use of dynamic data structures in programming. Shedding is complementary to garbage collection. With shedding, each time a link to a data object is updated by a program, it is determined whether or not the link will possibly be used once again by the program, and if not the link is automatically removed. Thus, everything is made garbage as soon as it can be viewed as garbage. By that, the effectiveness of garbage collection becomes maximal.<|reference_end|> | arxiv | @article{bergstra2008data,
title={Data linkage dynamics with shedding},
author={J. A. Bergstra, C. A. Middelburg},
journal={Fundamenta Informaticae, 103(1--4):31--52, 2010},
year={2008},
doi={10.3233/FI-2010-317},
number={PRG0809},
archivePrefix={arXiv},
eprint={0806.4034},
primaryClass={cs.LO}
} | bergstra2008data |
arxiv-4155 | 0806.4073 | A comparison of two approaches for polynomial time algorithms computing basic graph parameters | <|reference_start|>A comparison of two approaches for polynomial time algorithms computing basic graph parameters: In this paper we compare and illustrate the algorithmic use of graphs of bounded tree-width and graphs of bounded clique-width. For this purpose we give polynomial time algorithms for computing the four basic graph parameters independence number, clique number, chromatic number, and clique covering number on a given tree structure of graphs of bounded tree-width and graphs of bounded clique-width in polynomial time. We also present linear time algorithms for computing the latter four basic graph parameters on trees, i.e. graphs of tree-width 1, and on co-graphs, i.e. graphs of clique-width at most 2.<|reference_end|> | arxiv | @article{gurski2008a,
title={A comparison of two approaches for polynomial time algorithms computing
basic graph parameters},
author={Frank Gurski},
journal={arXiv preprint arXiv:0806.4073},
year={2008},
archivePrefix={arXiv},
eprint={0806.4073},
primaryClass={cs.DS cs.DM}
} | gurski2008a |
arxiv-4156 | 0806.4112 | Statistical Physics of Hard Optimization Problems | <|reference_start|>Statistical Physics of Hard Optimization Problems: Optimization is fundamental in many areas of science, from computer science and information theory to engineering and statistical physics, as well as to biology or social sciences. It typically involves a large number of variables and a cost function depending on these variables. Optimization problems in the NP-complete class are particularly difficult, it is believed that the number of operations required to minimize the cost function is in the most difficult cases exponential in the system size. However, even in an NP-complete problem the practically arising instances might, in fact, be easy to solve. The principal question we address in this thesis is: How to recognize if an NP-complete constraint satisfaction problem is typically hard and what are the main reasons for this? We adopt approaches from the statistical physics of disordered systems, in particular the cavity method developed originally to describe glassy systems. We describe new properties of the space of solutions in two of the most studied constraint satisfaction problems - random satisfiability and random graph coloring. We suggest a relation between the existence of the so-called frozen variables and the algorithmic hardness of a problem. Based on these insights, we introduce a new class of problems which we named "locked" constraint satisfaction, where the statistical description is easily solvable, but from the algorithmic point of view they are even more challenging than the canonical satisfiability.<|reference_end|> | arxiv | @article{zdeborová2008statistical,
title={Statistical Physics of Hard Optimization Problems},
author={Lenka Zdeborov'a},
journal={Acta Physica Slovaca 59, No.3, 169-303 (2009)},
year={2008},
archivePrefix={arXiv},
eprint={0806.4112},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.CC}
} | zdeborová2008statistical |
arxiv-4157 | 0806.4127 | The implicit equation of a canal surface | <|reference_start|>The implicit equation of a canal surface: A canal surface is an envelope of a one parameter family of spheres. In this paper we present an efficient algorithm for computing the implicit equation of a canal surface generated by a rational family of spheres. By using Laguerre and Lie geometries, we relate the equation of the canal surface to the equation of a dual variety of a certain curve in 5-dimensional projective space. We define the \mu-basis for arbitrary dimension and give a simple algorithm for its computation. This is then applied to the dual variety, which allows us to deduce the implicit equations of the the dual variety, the canal surface and any offset to the canal surface.<|reference_end|> | arxiv | @article{dohm2008the,
title={The implicit equation of a canal surface},
author={Marc Dohm (JAD, INRIA Sophia Antipolis), Severinas Zube},
journal={arXiv preprint arXiv:0806.4127},
year={2008},
doi={10.1016/j.jsc.2008.06.001},
archivePrefix={arXiv},
eprint={0806.4127},
primaryClass={math.AG cs.SC math.AC}
} | dohm2008the |
arxiv-4158 | 0806.4130 | Complexity of Hybrid Logics over Transitive Frames | <|reference_start|>Complexity of Hybrid Logics over Transitive Frames: This paper examines the complexity of hybrid logics over transitive frames, transitive trees, and linear frames. We show that satisfiability over transitive frames for the hybrid language extended with the downarrow operator is NEXPTIME-complete. This is in contrast to undecidability of satisfiability over arbitrary frames for this language (Areces, Blackburn, Marx 1999). It is also shown that adding the @ operator or the past modality leads to undecidability over transitive frames. This is again in contrast to the case of transitive trees and linear frames, where we show these languages to be nonelementarily decidable. Moreover, we establish 2EXPTIME and EXPTIME upper bounds for satisfiability over transitive frames and transitive trees, respectively, for the hybrid Until/Since language. An EXPTIME lower bound is shown to hold for the modal Until language over both frame classes.<|reference_end|> | arxiv | @article{mundhenk2008complexity,
title={Complexity of Hybrid Logics over Transitive Frames},
author={Martin Mundhenk (1), Thomas Schneider (2), Thomas Schwentick (3),
Volker Weber (3) ((1) University of Jena, (2) University of Manchester, (3)
University of Dortmund)},
journal={Workshop "Methods for Modalities" (M4M-4), Informatik-Berichte,
194, pp. 62-78, 2005. ISSN 0863-095X},
year={2008},
archivePrefix={arXiv},
eprint={0806.4130},
primaryClass={cs.LO}
} | mundhenk2008complexity |
arxiv-4159 | 0806.4168 | Established Clustering Procedures for Network Analysis | <|reference_start|>Established Clustering Procedures for Network Analysis: In light of the burgeoning interest in network analysis in the new millenium, we bring to the attention of contemporary network theorists, a two-stage double-standarization and hierarchical clustering (single-linkage-like) procedure devised in 1974. In its many applications over the next decade--primarily to the migration flows between geographic subdivisions within nations--the presence was often revealed of ``hubs''. These are, typically, ``cosmopolitan/non-provincial'' areas--such as the French capital, Paris--which send and receive people relatively broadly across their respective nations. Additionally, this two-stage procedure--which ``might very well be the most successful application of cluster analysis'' (R. C. Dubes)--has detected many (physically or socially) isolated groups (regions) of areas, such as those forming the southern islands, Shikoku and Kyushu, of Japan, the Italian islands of Sardinia and Sicily, and the New England region of the United States. Further, we discuss a (complementary) approach developed in 1976, involving the application of the max-flow/min-cut theorem to raw/non-standardized flows.<|reference_end|> | arxiv | @article{slater2008established,
title={Established Clustering Procedures for Network Analysis},
author={Paul B. Slater},
journal={arXiv preprint arXiv:0806.4168},
year={2008},
archivePrefix={arXiv},
eprint={0806.4168},
primaryClass={physics.soc-ph cs.SI physics.data-an stat.AP}
} | slater2008established |
arxiv-4160 | 0806.4200 | The Secrecy Rate Region of the Broadcast Channel | <|reference_start|>The Secrecy Rate Region of the Broadcast Channel: In this paper, we consider a scenario where a source node wishes to broadcast two confidential messages for two respective receivers, while a wire-tapper also receives the transmitted signal. This model is motivated by wireless communications, where individual secure messages are broadcast over open media and can be received by any illegitimate receiver. The secrecy level is measured by equivocation rate at the eavesdropper. We first study the general (non-degraded) broadcast channel with confidential messages. We present an inner bound on the secrecy capacity region for this model. The inner bound coding scheme is based on a combination of random binning and the Gelfand-Pinsker bining. This scheme matches the Marton's inner bound on the broadcast channel without confidentiality constraint. We further study the situation where the channels are degraded. For the degraded broadcast channel with confidential messages, we present the secrecy capacity region. Our achievable coding scheme is based on Cover's superposition scheme and random binning. We refer to this scheme as Secret Superposition Scheme. In this scheme, we show that randomization in the first layer increases the secrecy rate of the second layer. This capacity region matches the capacity region of the degraded broadcast channel without security constraint. It also matches the secrecy capacity for the conventional wire-tap channel. Our converse proof is based on a combination of the converse proof of the conventional degraded broadcast channel and Csiszar lemma. Finally, we assume that the channels are Additive White Gaussian Noise (AWGN) and show that secret superposition scheme with Gaussian codebook is optimal. The converse proof is based on the generalized entropy power inequality.<|reference_end|> | arxiv | @article{bagherikaram2008the,
title={The Secrecy Rate Region of the Broadcast Channel},
author={Ghadamali Bagherikaram, Abolfazl S. Motahari, Amir K. Khandani},
journal={arXiv preprint arXiv:0806.4200},
year={2008},
archivePrefix={arXiv},
eprint={0806.4200},
primaryClass={cs.IT math.IT}
} | bagherikaram2008the |
arxiv-4161 | 0806.4210 | Agnostically Learning Juntas from Random Walks | <|reference_start|>Agnostically Learning Juntas from Random Walks: We prove that the class of functions g:{-1,+1}^n -> {-1,+1} that only depend on an unknown subset of k<<n variables (so-called k-juntas) is agnostically learnable from a random walk in time polynomial in n, 2^{k^2}, epsilon^{-k}, and log(1/delta). In other words, there is an algorithm with the claimed running time that, given epsilon, delta > 0 and access to a random walk on {-1,+1}^n labeled by an arbitrary function f:{-1,+1}^n -> {-1,+1}, finds with probability at least 1-delta a k-junta that is (opt(f)+epsilon)-close to f, where opt(f) denotes the distance of a closest k-junta to f.<|reference_end|> | arxiv | @article{arpe2008agnostically,
title={Agnostically Learning Juntas from Random Walks},
author={Jan Arpe and Elchanan Mossel},
journal={arXiv preprint arXiv:0806.4210},
year={2008},
archivePrefix={arXiv},
eprint={0806.4210},
primaryClass={cs.LG}
} | arpe2008agnostically |
arxiv-4162 | 0806.4221 | Localized Spanners for Wireless Networks | <|reference_start|>Localized Spanners for Wireless Networks: We present a new efficient localized algorithm to construct, for any given quasi-unit disk graph G=(V,E) and any e > 0, a (1+e)-spanner for G of maximum degree O(1) and total weight O(w(MST)), where w(MST) denotes the weight of a minimum spanning tree for V. We further show that similar localized techniques can be used to construct, for a given unit disk graph G = (V, E), a planar Cdel(1+e)(1+pi/2)-spanner for G of maximum degree O(1) and total weight O(w(MST)). Here Cdel denotes the stretch factor of the unit Delaunay triangulation for V. Both constructions can be completed in O(1) communication rounds, and require each node to know its own coordinates.<|reference_end|> | arxiv | @article{damian2008localized,
title={Localized Spanners for Wireless Networks},
author={Mirela Damian and Sriram V. Pemmaraju},
journal={arXiv preprint arXiv:0806.4221},
year={2008},
archivePrefix={arXiv},
eprint={0806.4221},
primaryClass={cs.DC}
} | damian2008localized |
arxiv-4163 | 0806.4264 | Online network coding for optimal throughput and delay -- the three-receiver case | <|reference_start|>Online network coding for optimal throughput and delay -- the three-receiver case: For a packet erasure broadcast channel with three receivers, we propose a new coding algorithm that makes use of feedback to dynamically adapt the code. Our algorithm is throughput optimal, and we conjecture that it also achieves an asymptotically optimal average decoding delay at the receivers. We consider heavy traffic asymptotics, where the load factor \rho approaches 1 from below with either the arrival rate (\lambda) or the channel parameter (\mu) being fixed at a number less than 1. We verify through simulations that our algorithm achieves an asymptotically optimal decoding delay of O(1/(1-\rho)).<|reference_end|> | arxiv | @article{sundararajan2008online,
title={Online network coding for optimal throughput and delay -- the
three-receiver case},
author={Jay Kumar Sundararajan, Devavrat Shah, Muriel M'edard},
journal={arXiv preprint arXiv:0806.4264},
year={2008},
archivePrefix={arXiv},
eprint={0806.4264},
primaryClass={cs.IT math.IT}
} | sundararajan2008online |
arxiv-4164 | 0806.4286 | Implementation for blow up of tornado-type solutions for complex version of 3D Navier-Stokes system | <|reference_start|>Implementation for blow up of tornado-type solutions for complex version of 3D Navier-Stokes system: We consider Cauchy problem for Fourier transformation of 3-dimensional Navier-Stokes system with zero external force. Using initial data purposed by Dong Li and Ya.G.Sinai we implement self-similar regime producing fast growing behavior of the energy of solution while time tends to critical value.<|reference_end|> | arxiv | @article{arnold2008implementation,
title={Implementation for blow up of tornado-type solutions for complex version
of 3D Navier-Stokes system},
author={M.D. Arnold and A.V. Khokhlov},
journal={arXiv preprint arXiv:0806.4286},
year={2008},
archivePrefix={arXiv},
eprint={0806.4286},
primaryClass={cs.NA}
} | arnold2008implementation |
arxiv-4165 | 0806.4293 | Scalar Quantization for Audio Data Coding | <|reference_start|>Scalar Quantization for Audio Data Coding: This paper is concerned with scalar quantization of transform coefficients in an audio codec. The generalized Gaussian distribution (GGD) is used as an approximation of one-dimensional probability density function for transform coefficients obtained by modulated lapped transform (MLT) or modified cosine transform (MDCT) filterbank. The rationale of the model is provided in comparison with theoretically achievable rate-distortion function. The rate-distortion function computed for the random sequence obtained from a real sequence of samples from a large database is compared with that computed for random sequence obtained by a GGD random generator. A simple algorithm of constructing the Extended Zero Zone (EZZ) quantizer is proposed. Simulation results show that the EZZ quantizer yields a negligible loss in terms of coding efficiency compared to optimal scalar quantizers. Furthermore, we describe an adaptive version of the EZZ quantizer which works efficiently with low bitrate requirements for transmitting side information<|reference_end|> | arxiv | @article{kudryashov2008scalar,
title={Scalar Quantization for Audio Data Coding},
author={Boris D. Kudryashov, Anton V. Porov, and Eunmi L. Oh},
journal={arXiv preprint arXiv:0806.4293},
year={2008},
archivePrefix={arXiv},
eprint={0806.4293},
primaryClass={cs.MM cs.IT math.IT}
} | kudryashov2008scalar |
arxiv-4166 | 0806.4326 | An Efficient Algorithm for 2D Euclidean 2-Center with Outliers | <|reference_start|>An Efficient Algorithm for 2D Euclidean 2-Center with Outliers: For a set P of n points in R^2, the Euclidean 2-center problem computes a pair of congruent disks of the minimal radius that cover P. We extend this to the (2,k)-center problem where we compute the minimal radius pair of congruent disks to cover n-k points of P. We present a randomized algorithm with O(n k^7 log^3 n) expected running time for the (2,k)-center problem. We also study the (p,k)-center problem in R}^2 under the \ell_\infty-metric. We give solutions for p=4 in O(k^{O(1)} n log n) time and for p=5 in O(k^{O(1)} n log^5 n) time.<|reference_end|> | arxiv | @article{agarwal2008an,
title={An Efficient Algorithm for 2D Euclidean 2-Center with Outliers},
author={Pankaj K. Agarwal and Jeff M. Phillips},
journal={arXiv preprint arXiv:0806.4326},
year={2008},
archivePrefix={arXiv},
eprint={0806.4326},
primaryClass={cs.CG}
} | agarwal2008an |
arxiv-4167 | 0806.4341 | On Sequences with Non-Learnable Subsequences | <|reference_start|>On Sequences with Non-Learnable Subsequences: The remarkable results of Foster and Vohra was a starting point for a series of papers which show that any sequence of outcomes can be learned (with no prior knowledge) using some universal randomized forecasting algorithm and forecast-dependent checking rules. We show that for the class of all computationally efficient outcome-forecast-based checking rules, this property is violated. Moreover, we present a probabilistic algorithm generating with probability close to one a sequence with a subsequence which simultaneously miscalibrates all partially weakly computable randomized forecasting algorithms. %subsequences non-learnable by each randomized algorithm. According to the Dawid's prequential framework we consider partial recursive randomized algorithms.<|reference_end|> | arxiv | @article{v'yugin2008on,
title={On Sequences with Non-Learnable Subsequences},
author={Vladimir V. V'yugin},
journal={LNCS 5010, pp. 302-313, 2008},
year={2008},
archivePrefix={arXiv},
eprint={0806.4341},
primaryClass={cs.AI cs.LG}
} | v'yugin2008on |
arxiv-4168 | 0806.4344 | Approximability and parameterized complexity of minmax values | <|reference_start|>Approximability and parameterized complexity of minmax values: We consider approximating the minmax value of a multi-player game in strategic form. Tightening recent bounds by Borgs et al., we observe that approximating the value with a precision of epsilon log n digits (for any constant epsilon>0 is NP-hard, where n is the size of the game. On the other hand, approximating the value with a precision of c log log n digits (for any constant c >= 1) can be done in quasi-polynomial time. We consider the parameterized complexity of the problem, with the parameter being the number of pure strategies k of the player for which the minmax value is computed. We show that if there are three players, k=2 and there are only two possible rational payoffs, the minmax value is a rational number and can be computed exactly in linear time. In the general case, we show that the value can be approximated with any polynomial number of digits of accuracy in time n^(O(k)). On the other hand, we show that minmax value approximation is W[1]-hard and hence not likely to be fixed parameter tractable. Concretely, we show that if k-CLIQUE requires time n^(Omega(k)) then so does minmax value computation.<|reference_end|> | arxiv | @article{hansen2008approximability,
title={Approximability and parameterized complexity of minmax values},
author={Kristoffer Arnsfelt Hansen, Thomas Dueholm Hansen, Peter Bro
Miltersen, Troels Bjerre S{o}rensen},
journal={arXiv preprint arXiv:0806.4344},
year={2008},
archivePrefix={arXiv},
eprint={0806.4344},
primaryClass={cs.GT}
} | hansen2008approximability |
arxiv-4169 | 0806.4361 | Space Efficient Multi-Dimensional Range Reporting | <|reference_start|>Space Efficient Multi-Dimensional Range Reporting: We present a data structure that supports three-dimensional range reporting queries in $O(\log \log U + (\log \log n)^3+k)$ time and uses $O(n\log^{1+\eps} n)$ space, where $U$ is the size of the universe, $k$ is the number of points in the answer,and $\eps$ is an arbitrary constant. This result improves over the data structure of Alstrup, Brodal, and Rauhe (FOCS 2000) that uses $O(n\log^{1+\eps} n)$ space and supports queries in $O(\log n+k)$ time,the data structure of Nekrich (SoCG'07) that uses $O(n\log^{3} n)$ space and supports queries in $O(\log \log U + (\log \log n)^2 + k)$ time, and the data structure of Afshani (ESA'08) that uses $O(n\log^{3} n)$ space and also supports queries in $O(\log \log U + (\log \log n)^2 + k)$ time but relies on randomization during the preprocessing stage. Our result allows us to significantly reduce the space usage of the fastest previously known static and incremental $d$-dimensional data structures, $d\geq 3$, at a cost of increasing the query time by a negligible $O(\log \log n)$ factor.<|reference_end|> | arxiv | @article{karpinski2008space,
title={Space Efficient Multi-Dimensional Range Reporting},
author={Marek Karpinski, Yakov Nekrich},
journal={arXiv preprint arXiv:0806.4361},
year={2008},
archivePrefix={arXiv},
eprint={0806.4361},
primaryClass={cs.DS cs.CG}
} | karpinski2008space |
arxiv-4170 | 0806.4372 | The 1-fixed-endpoint Path Cover Problem is Polynomial on Interval Graph | <|reference_start|>The 1-fixed-endpoint Path Cover Problem is Polynomial on Interval Graph: We consider a variant of the path cover problem, namely, the $k$-fixed-endpoint path cover problem, or kPC for short, on interval graphs. Given a graph $G$ and a subset $\mathcal{T}$ of $k$ vertices of $V(G)$, a $k$-fixed-endpoint path cover of $G$ with respect to $\mathcal{T}$ is a set of vertex-disjoint paths $\mathcal{P}$ that covers the vertices of $G$ such that the $k$ vertices of $\mathcal{T}$ are all endpoints of the paths in $\mathcal{P}$. The kPC problem is to find a $k$-fixed-endpoint path cover of $G$ of minimum cardinality; note that, if $\mathcal{T}$ is empty the stated problem coincides with the classical path cover problem. In this paper, we study the 1-fixed-endpoint path cover problem on interval graphs, or 1PC for short, generalizing the 1HP problem which has been proved to be NP-complete even for small classes of graphs. Motivated by a work of Damaschke, where he left both 1HP and 2HP problems open for the class of interval graphs, we show that the 1PC problem can be solved in polynomial time on the class of interval graphs. The proposed algorithm is simple, runs in $O(n^2)$ time, requires linear space, and also enables us to solve the 1HP problem on interval graphs within the same time and space complexity.<|reference_end|> | arxiv | @article{asdre2008the,
title={The 1-fixed-endpoint Path Cover Problem is Polynomial on Interval Graph},
author={Katerina Asdre and Stavros D. Nikolopoulos},
journal={arXiv preprint arXiv:0806.4372},
year={2008},
archivePrefix={arXiv},
eprint={0806.4372},
primaryClass={cs.DS cs.DM}
} | asdre2008the |
arxiv-4171 | 0806.4391 | Prediction with Expert Advice in Games with Unbounded One-Step Gains | <|reference_start|>Prediction with Expert Advice in Games with Unbounded One-Step Gains: The games of prediction with expert advice are considered in this paper. We present some modification of Kalai and Vempala algorithm of following the perturbed leader for the case of unrestrictedly large one-step gains. We show that in general case the cumulative gain of any probabilistic prediction algorithm can be much worse than the gain of some expert of the pool. Nevertheless, we give the lower bound for this cumulative gain in general case and construct a universal algorithm which has the optimal performance; we also prove that in case when one-step gains of experts of the pool have ``limited deviations'' the performance of our algorithm is close to the performance of the best expert.<|reference_end|> | arxiv | @article{v'yugin2008prediction,
title={Prediction with Expert Advice in Games with Unbounded One-Step Gains},
author={Vladimir V. V'yugin},
journal={arXiv preprint arXiv:0806.4391},
year={2008},
archivePrefix={arXiv},
eprint={0806.4391},
primaryClass={cs.LG cs.AI}
} | v'yugin2008prediction |
arxiv-4172 | 0806.4415 | On the inner and outer bounds of 3-receiver broadcast channels with 2-degraded message sets | <|reference_start|>On the inner and outer bounds of 3-receiver broadcast channels with 2-degraded message sets: We consider a broadcast channel with 3 receivers and 2 messages (M0, M1) where two of the three receivers need to decode messages (M0, M1) while the remaining one just needs to decode the message M0. We study the best known inner and outer bounds under this setting, in an attempt to find the deficiencies with the current techniques of establishing the bounds. We produce a simple example where we are able to explicitly evaluate the inner bound and show that it differs from the general outer bound. For a class of channels where the general inner and outer bounds differ, we use a new argument to show that the inner bound is tight.<|reference_end|> | arxiv | @article{nair2008on,
title={On the inner and outer bounds of 3-receiver broadcast channels with
2-degraded message sets},
author={Chandra Nair, Vincent Wang Zizhou},
journal={arXiv preprint arXiv:0806.4415},
year={2008},
archivePrefix={arXiv},
eprint={0806.4415},
primaryClass={cs.IT math.IT}
} | nair2008on |
arxiv-4173 | 0806.4422 | Computationally Efficient Estimators for Dimension Reductions Using Stable Random Projections | <|reference_start|>Computationally Efficient Estimators for Dimension Reductions Using Stable Random Projections: The method of stable random projections is a tool for efficiently computing the $l_\alpha$ distances using low memory, where $0<\alpha \leq 2$ is a tuning parameter. The method boils down to a statistical estimation task and various estimators have been proposed, based on the geometric mean, the harmonic mean, and the fractional power etc. This study proposes the optimal quantile estimator, whose main operation is selecting, which is considerably less expensive than taking fractional power, the main operation in previous estimators. Our experiments report that the optimal quantile estimator is nearly one order of magnitude more computationally efficient than previous estimators. For large-scale learning tasks in which storing and computing pairwise distances is a serious bottleneck, this estimator should be desirable. In addition to its computational advantages, the optimal quantile estimator exhibits nice theoretical properties. It is more accurate than previous estimators when $\alpha>1$. We derive its theoretical error bounds and establish the explicit (i.e., no hidden constants) sample complexity bound.<|reference_end|> | arxiv | @article{li2008computationally,
title={Computationally Efficient Estimators for Dimension Reductions Using
Stable Random Projections},
author={Ping Li},
journal={arXiv preprint arXiv:0806.4422},
year={2008},
archivePrefix={arXiv},
eprint={0806.4422},
primaryClass={cs.LG}
} | li2008computationally |
arxiv-4174 | 0806.4423 | On Approximating the Lp Distances for p>2 | <|reference_start|>On Approximating the Lp Distances for p>2: Applications in machine learning and data mining require computing pairwise Lp distances in a data matrix A. For massive high-dimensional data, computing all pairwise distances of A can be infeasible. In fact, even storing A or all pairwise distances of A in the memory may be also infeasible. This paper proposes a simple method for p = 2, 4, 6, ... We first decompose the l_p (where p is even) distances into a sum of 2 marginal norms and p-1 ``inner products'' at different orders. Then we apply normal or sub-Gaussian random projections to approximate the resultant ``inner products,'' assuming that the marginal norms can be computed exactly by a linear scan. We propose two strategies for applying random projections. The basic projection strategy requires only one projection matrix but it is more difficult to analyze, while the alternative projection strategy requires p-1 projection matrices but its theoretical analysis is much easier. In terms of the accuracy, at least for p=4, the basic strategy is always more accurate than the alternative strategy if the data are non-negative, which is common in reality.<|reference_end|> | arxiv | @article{li2008on,
title={On Approximating the Lp Distances for p>2},
author={Ping Li},
journal={arXiv preprint arXiv:0806.4423},
year={2008},
archivePrefix={arXiv},
eprint={0806.4423},
primaryClass={cs.LG}
} | li2008on |
arxiv-4175 | 0806.4451 | Counteracting Byzantine Adversaries with Network Coding: An Overhead Analysis | <|reference_start|>Counteracting Byzantine Adversaries with Network Coding: An Overhead Analysis: Network coding increases throughput and is robust against failures and erasures. However, since it allows mixing of information within the network, a single corrupted packet generated by a Byzantine attacker can easily contaminate the information to multiple destinations. In this paper, we study the transmission overhead associated with three different schemes for detecting Byzantine adversaries at a node using network coding: end-to-end error correction, packet-based Byzantine detection scheme, and generation-based Byzantine detection scheme. In end-to-end error correction, it is known that we can correct up to the min-cut between the source and destinations. However, if we use Byzantine detection schemes, we can detect polluted data, drop them, and therefore, only transmit valid data. For the dropped data, the destinations perform erasure correction, which is computationally lighter than error correction. We show that, with enough attackers present in the network, Byzantine detection schemes may improve the throughput of the network since we choose to forward only reliable information. When the probability of attack is high, a packet-based detection scheme is the most bandwidth efficient; however, when the probability of attack is low, the overhead involved with signing each packet becomes costly, and the generation-based scheme may be preferred. Finally, we characterize the tradeoff between generation size and overhead of detection in bits as the probability of attack increases in the network.<|reference_end|> | arxiv | @article{kim2008counteracting,
title={Counteracting Byzantine Adversaries with Network Coding: An Overhead
Analysis},
author={MinJi Kim, Muriel Medard, Joao Barros},
journal={arXiv preprint arXiv:0806.4451},
year={2008},
archivePrefix={arXiv},
eprint={0806.4451},
primaryClass={cs.IT cs.CR math.IT}
} | kim2008counteracting |
arxiv-4176 | 0806.4468 | On Ergodic Sum Capacity of Fading Cognitive Multiple-Access and Broadcast Channels | <|reference_start|>On Ergodic Sum Capacity of Fading Cognitive Multiple-Access and Broadcast Channels: This paper studies the information-theoretic limits of a secondary or cognitive radio (CR) network under spectrum sharing with an existing primary radio network. In particular, the fading cognitive multiple-access channel (C-MAC) is first studied, where multiple secondary users transmit to the secondary base station (BS) under both individual transmit-power constraints and a set of interference-power constraints each applied at one of the primary receivers. This paper considers the long-term (LT) or the short-term (ST) transmit-power constraint over the fading states at each secondary transmitter, combined with the LT or ST interference-power constraint at each primary receiver. In each case, the optimal power allocation scheme is derived for the secondary users to achieve the ergodic sum capacity of the fading C-MAC, as well as the conditions for the optimality of the dynamic time-division-multiple-access (D-TDMA) scheme in the secondary network. The fading cognitive broadcast channel (C-BC) that models the downlink transmission in the secondary network is then studied under the LT/ST transmit-power constraint at the secondary BS jointly with the LT/ST interference-power constraint at each of the primary receivers. It is shown that D-TDMA is indeed optimal for achieving the ergodic sum capacity of the fading C-BC for all combinations of transmit-power and interference-power constraints.<|reference_end|> | arxiv | @article{zhang2008on,
title={On Ergodic Sum Capacity of Fading Cognitive Multiple-Access and
Broadcast Channels},
author={Rui Zhang, Shuguang Cui, and Ying-Chang Liang},
journal={arXiv preprint arXiv:0806.4468},
year={2008},
doi={10.1109/TIT.2009.2030449},
archivePrefix={arXiv},
eprint={0806.4468},
primaryClass={cs.IT math.IT}
} | zhang2008on |
arxiv-4177 | 0806.4484 | On empirical meaning of randomness with respect to a real parameter | <|reference_start|>On empirical meaning of randomness with respect to a real parameter: We study the empirical meaning of randomness with respect to a family of probability distributions $P_\theta$, where $\theta$ is a real parameter, using algorithmic randomness theory. In the case when for a computable probability distribution $P_\theta$ an effectively strongly consistent estimate exists, we show that the Levin's a priory semicomputable semimeasure of the set of all $P_\theta$-random sequences is positive if and only if the parameter $\theta$ is a computable real number. The different methods for generating ``meaningful'' $P_\theta$-random sequences with noncomputable $\theta$ are discussed.<|reference_end|> | arxiv | @article{v'yugin2008on,
title={On empirical meaning of randomness with respect to a real parameter},
author={Vladimir V'yugin},
journal={LNCS 4649, pp. 387-396, 2007},
year={2008},
archivePrefix={arXiv},
eprint={0806.4484},
primaryClass={cs.LG cs.AI}
} | v'yugin2008on |
arxiv-4178 | 0806.4510 | On Field Size and Success Probability in Network Coding | <|reference_start|>On Field Size and Success Probability in Network Coding: Using tools from algebraic geometry and Groebner basis theory we solve two problems in network coding. First we present a method to determine the smallest field size for which linear network coding is feasible. Second we derive improved estimates on the success probability of random linear network coding. These estimates take into account which monomials occur in the support of the determinant of the product of Edmonds matrices. Therefore we finally investigate which monomials can occur in the determinant of the Edmonds matrix.<|reference_end|> | arxiv | @article{geil2008on,
title={On Field Size and Success Probability in Network Coding},
author={Olav Geil, Ryutaroh Matsumoto, Casper Thomsen},
journal={Proceedings of the 2nd International Workshop on the Arithmetic of
Finite Fields, WAIFI 2008, pp. 157-173},
year={2008},
doi={10.1007/978-3-540-69499-1_14},
archivePrefix={arXiv},
eprint={0806.4510},
primaryClass={cs.IT math.IT}
} | geil2008on |
arxiv-4179 | 0806.4511 | The model of quantum evolution | <|reference_start|>The model of quantum evolution: This paper has been withdrawn by the author due to extremely unscientific errors.<|reference_end|> | arxiv | @article{wishnevsky2008the,
title={The model of quantum evolution},
author={Konstantin P. Wishnevsky},
journal={arXiv preprint arXiv:0806.4511},
year={2008},
archivePrefix={arXiv},
eprint={0806.4511},
primaryClass={cs.AI}
} | wishnevsky2008the |
arxiv-4180 | 0806.4526 | WiPal: Efficient Offline Merging of IEEE 80211 Traces | <|reference_start|>WiPal: Efficient Offline Merging of IEEE 80211 Traces: Merging wireless traces is a fundamental step in measurement-based studies involving multiple packet sniffers. Existing merging tools either require a wired infrastructure or are limited in their usability. We propose WiPal, an offline merging tool for IEEE 802.11 traces that has been designed to be efficient and simple to use. WiPal is flexible in the sense that it does not require any specific services, neither from monitors (like synchronization, access to a wired network, or embedding specific software) nor from its software environment (e.g. an SQL server). We present WiPal's operation and show how its features - notably, its modular design - improve both ease of use and efficiency. Experiments on real traces show that WiPal is an order of magnitude faster than other tools providing the same features. To our knowledge, WiPal is the only offline trace merger that can be used by the research community in a straightforward fashion.<|reference_end|> | arxiv | @article{claveirole2008wipal:,
title={WiPal: Efficient Offline Merging of IEEE 802.11 Traces},
author={Thomas Claveirole and Marcelo Dias de Amorim},
journal={arXiv preprint arXiv:0806.4526},
year={2008},
archivePrefix={arXiv},
eprint={0806.4526},
primaryClass={cs.NI}
} | claveirole2008wipal: |
arxiv-4181 | 0806.4553 | Interpolation in local theory extensions | <|reference_start|>Interpolation in local theory extensions: In this paper we study interpolation in local extensions of a base theory. We identify situations in which it is possible to obtain interpolants in a hierarchical manner, by using a prover and a procedure for generating interpolants in the base theory as black-boxes. We present several examples of theory extensions in which interpolants can be computed this way, and discuss applications in verification, knowledge representation, and modular reasoning in combinations of local theories.<|reference_end|> | arxiv | @article{sofronie-stokkermans2008interpolation,
title={Interpolation in local theory extensions},
author={Viorica Sofronie-Stokkermans},
journal={Logical Methods in Computer Science, Volume 4, Issue 4 (October
17, 2008) lmcs:1143},
year={2008},
doi={10.2168/LMCS-4(4:1)2008},
archivePrefix={arXiv},
eprint={0806.4553},
primaryClass={cs.LO cs.SE}
} | sofronie-stokkermans2008interpolation |
arxiv-4182 | 0806.4572 | Problems of robustness for universal coding schemes | <|reference_start|>Problems of robustness for universal coding schemes: The Lempel-Ziv universal coding scheme is asymptotically optimal for the class of all stationary ergodic sources. A problem of robustness of this property under small violations of ergodicity is studied. A notion of deficiency of algorithmic randomness is used as a measure of disagreement between data sequence and probability measure. We prove that universal compressing schemes from a large class are non-robust in the following sense: if the randomness deficiency grows arbitrarily slowly on initial fragments of an infinite sequence then the property of asymptotic optimality of any universal compressing algorithm can be violated. Lempel-Ziv compressing algorithms are robust on infinite sequences generated by ergodic Markov chains when the randomness deficiency of its initial fragments of length $n$ grows as $o(n)$.<|reference_end|> | arxiv | @article{v'yugin2008problems,
title={Problems of robustness for universal coding schemes},
author={V.V.V'yugin},
journal={Problems of Information Transmission, 39 (2003), pp. 32-46},
year={2008},
archivePrefix={arXiv},
eprint={0806.4572},
primaryClass={cs.IT cs.OH math.IT}
} | v'yugin2008problems |
arxiv-4183 | 0806.4627 | SP2Bench: A SPARQL Performance Benchmark | <|reference_start|>SP2Bench: A SPARQL Performance Benchmark: Recently, the SPARQL query language for RDF has reached the W3C recommendation status. In response to this emerging standard, the database community is currently exploring efficient storage techniques for RDF data and evaluation strategies for SPARQL queries. A meaningful analysis and comparison of these approaches necessitates a comprehensive and universal benchmark platform. To this end, we have developed SP^2Bench, a publicly available, language-specific SPARQL performance benchmark. SP^2Bench is settled in the DBLP scenario and comprises both a data generator for creating arbitrarily large DBLP-like documents and a set of carefully designed benchmark queries. The generated documents mirror key characteristics and social-world distributions encountered in the original DBLP data set, while the queries implement meaningful requests on top of this data, covering a variety of SPARQL operator constellations and RDF access patterns. As a proof of concept, we apply SP^2Bench to existing engines and discuss their strengths and weaknesses that follow immediately from the benchmark results.<|reference_end|> | arxiv | @article{schmidt2008sp2bench:,
title={SP2Bench: A SPARQL Performance Benchmark},
author={Michael Schmidt, Thomas Hornung, Georg Lausen, Christoph Pinkel},
journal={arXiv preprint arXiv:0806.4627},
year={2008},
archivePrefix={arXiv},
eprint={0806.4627},
primaryClass={cs.DB cs.PF}
} | schmidt2008sp2bench: |
arxiv-4184 | 0806.4631 | The Heap Lambda Machine | <|reference_start|>The Heap Lambda Machine: This paper introduces a new machine architecture for evaluating lambda expressions using the normal-order reduction, which guarantees that every lambda expression will be evaluated if the expression has its normal form and the system has enough memory. The architecture considered here operates using heap memory only. Lambda expressions are represented as graphs, and all algorithms used in the processing unit of this machine are non-recursive.<|reference_end|> | arxiv | @article{salikhmetov2008the,
title={The Heap Lambda Machine},
author={Anton Salikhmetov},
journal={arXiv preprint arXiv:0806.4631},
year={2008},
archivePrefix={arXiv},
eprint={0806.4631},
primaryClass={cs.LO}
} | salikhmetov2008the |
arxiv-4185 | 0806.4648 | An Algebraic Approach for the MIMO Control of Small Scale Helicopter | <|reference_start|>An Algebraic Approach for the MIMO Control of Small Scale Helicopter: The control of small-scale helicopter is a MIMO problem. To use of classical control approach to formally solve a MIMO problem, one needs to come up with multidimensional Root Locus diagram to tune the control parameters. The problem with the required dimension of the RL diagram for MIMO design has forced the design procedure of classical approach to be conducted in cascaded multi-loop SISO system starting from the innermost loop outward. To implement this control approach for a helicopter, a pitch and roll attitude control system is often subordinated to a, respectively, longitudinal and lateral velocity control system in a nested architecture. The requirement for this technique to work is that the inner attitude control loop must have a higher bandwidth than the outer velocity control loop which is not the case for high performance mini helicopter. To address the above problems, an algebraic design approach is proposed in this work. The designed control using s-CDM approach is demonstrated for hovering control of small-scale helicopter simultaneously subjected to plant parameter uncertainties and wind disturbances.<|reference_end|> | arxiv | @article{budiyono2008an,
title={An Algebraic Approach for the MIMO Control of Small Scale Helicopter},
author={A. Budiyono, and T. Sudiyanto},
journal={Proceedings of the International Conference on Intelligent
Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No.
ICIUS2007-A014},
year={2008},
archivePrefix={arXiv},
eprint={0806.4648},
primaryClass={cs.RO}
} | budiyono2008an |
arxiv-4186 | 0806.4650 | Structural Damage Detection Using Randomized Trained Neural Networks | <|reference_start|>Structural Damage Detection Using Randomized Trained Neural Networks: A computationally method on damage detection problems in structures was conducted using neural networks. The problem that is considered in this works consists of estimating the existence, location and extent of stiffness reduction in structure which is indicated by the changes of the structural static parameters such as deflection and strain. The neural network was trained to recognize the behaviour of static parameter of the undamaged structure as well as of the structure with various possible damage extent and location which were modelled as random states. The proposed techniques were applied to detect damage in a simply supported beam. The structure was analyzed using finite-element-method (FEM) and the damage identification was conducted by a back-propagation neural network using the change of the structural strain and displacement. The results showed that using proposed method the strain is more efficient for identification of damage than the displacement.<|reference_end|> | arxiv | @article{haryanto2008structural,
title={Structural Damage Detection Using Randomized Trained Neural Networks},
author={Ismoyo Haryanto, Joga Dharma Setiawan, and Agus Budiyono},
journal={Proceedings of the International Conference on Intelligent
Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No.
ICIUS2007-C022},
year={2008},
archivePrefix={arXiv},
eprint={0806.4650},
primaryClass={cs.NE}
} | haryanto2008structural |
arxiv-4187 | 0806.4652 | A Fixed-Parameter Algorithm for Random Instances of Weighted d-CNF Satisfiability | <|reference_start|>A Fixed-Parameter Algorithm for Random Instances of Weighted d-CNF Satisfiability: We study random instances of the weighted $d$-CNF satisfiability problem (WEIGHTED $d$-SAT), a generic W[1]-complete problem. A random instance of the problem consists of a fixed parameter $k$ and a random $d$-CNF formula $\weicnf{n}{p}{k, d}$ generated as follows: for each subset of $d$ variables and with probability $p$, a clause over the $d$ variables is selected uniformly at random from among the $2^d - 1$ clauses that contain at least one negated literals. We show that random instances of WEIGHTED $d$-SAT can be solved in $O(k^2n + n^{O(1)})$-time with high probability, indicating that typical instances of WEIGHTED $d$-SAT under this instance distribution are fixed-parameter tractable. The result also hold for random instances from the model $\weicnf{n}{p}{k,d}(d')$ where clauses containing less than $d' (1 < d' < d)$ negated literals are forbidden, and for random instances of the renormalized (miniaturized) version of WEIGHTED $d$-SAT in certain range of the random model's parameter $p(n)$. This, together with our previous results on the threshold behavior and the resolution complexity of unsatisfiable instances of $\weicnf{n}{p}{k, d}$, provides an almost complete characterization of the typical-case behavior of random instances of WEIGHTED $d$-SAT.<|reference_end|> | arxiv | @article{gao2008a,
title={A Fixed-Parameter Algorithm for Random Instances of Weighted d-CNF
Satisfiability},
author={Yong Gao},
journal={arXiv preprint arXiv:0806.4652},
year={2008},
archivePrefix={arXiv},
eprint={0806.4652},
primaryClass={cs.DS cs.AI cs.CC}
} | gao2008a |
arxiv-4188 | 0806.4667 | Overlaid Cellular and Mobile Ad Hoc Networks | <|reference_start|>Overlaid Cellular and Mobile Ad Hoc Networks: In cellular systems using frequency division duplex, growing Internet services cause unbalance of uplink and downlink traffic, resulting in poor uplink spectrum utilization. Addressing this issue, this paper considers overlaying an ad hoc network onto a cellular uplink network for improving spectrum utilization and spatial reuse efficiency. Transmission capacities of the overlaid networks are analyzed, which are defined as the maximum densities of the ad hoc nodes and mobile users under an outage constraint. Using tools from stochastic geometry, the capacity tradeoff curves for the overlaid networks are shown to be linear. Deploying overlaid networks based on frequency separation is proved to achieve higher network capacities than that based on spatial separation. Furthermore, spatial diversity is shown to enhance network capacities.<|reference_end|> | arxiv | @article{huang2008overlaid,
title={Overlaid Cellular and Mobile Ad Hoc Networks},
author={Kaibin Huang, Yan Chen, Bin Chen, Xia Yang, and Vincent K. N. Lau},
journal={arXiv preprint arXiv:0806.4667},
year={2008},
doi={10.1109/ICCS.2008.4737445},
archivePrefix={arXiv},
eprint={0806.4667},
primaryClass={cs.IT math.IT}
} | huang2008overlaid |
arxiv-4189 | 0806.4686 | Sparse Online Learning via Truncated Gradient | <|reference_start|>Sparse Online Learning via Truncated Gradient: We propose a general method called truncated gradient to induce sparsity in the weights of online learning algorithms with convex loss functions. This method has several essential properties: The degree of sparsity is continuous -- a parameter controls the rate of sparsification from no sparsification to total sparsification. The approach is theoretically motivated, and an instance of it can be regarded as an online counterpart of the popular $L_1$-regularization method in the batch setting. We prove that small rates of sparsification result in only small additional regret with respect to typical online learning guarantees. The approach works well empirically. We apply the approach to several datasets and find that for datasets with large numbers of features, substantial sparsity is discoverable.<|reference_end|> | arxiv | @article{langford2008sparse,
title={Sparse Online Learning via Truncated Gradient},
author={John Langford, Lihong Li, Tong Zhang},
journal={arXiv preprint arXiv:0806.4686},
year={2008},
archivePrefix={arXiv},
eprint={0806.4686},
primaryClass={cs.LG cs.AI}
} | langford2008sparse |
arxiv-4190 | 0806.4695 | On the Throughput Allocation for Proportional Fairness in Multirate IEEE 80211 DCF | <|reference_start|>On the Throughput Allocation for Proportional Fairness in Multirate IEEE 80211 DCF: This paper presents a modified proportional fairness (PF) criterion suitable for mitigating the \textit{rate anomaly} problem of multirate IEEE 802.11 Wireless LANs employing the mandatory Distributed Coordination Function (DCF) option. Compared to the widely adopted assumption of saturated network, the proposed criterion can be applied to general networks whereby the contending stations are characterized by specific packet arrival rates, $\lambda_s$, and transmission rates $R_d^{s}$. The throughput allocation resulting from the proposed algorithm is able to greatly increase the aggregate throughput of the DCF while ensuring fairness levels among the stations of the same order of the ones available with the classical PF criterion. Put simply, each station is allocated a throughput that depends on a suitable normalization of its packet rate, which, to some extent, measures the frequency by which the station tries to gain access to the channel. Simulation results are presented for some sample scenarios, confirming the effectiveness of the proposed criterion.<|reference_end|> | arxiv | @article{daneshgaran2008on,
title={On the Throughput Allocation for Proportional Fairness in Multirate IEEE
802.11 DCF},
author={F. Daneshgaran, M. Laddomada, F. Mesiti, and M. Mondin},
journal={arXiv preprint arXiv:0806.4695},
year={2008},
doi={10.1109/CCNC.2009.4784741},
archivePrefix={arXiv},
eprint={0806.4695},
primaryClass={cs.NI}
} | daneshgaran2008on |
arxiv-4191 | 0806.4703 | Challenging More Updates: Towards Anonymous Re-publication of Fully Dynamic Datasets | <|reference_start|>Challenging More Updates: Towards Anonymous Re-publication of Fully Dynamic Datasets: Most existing anonymization work has been done on static datasets, which have no update and need only one-time publication. Recent studies consider anonymizing dynamic datasets with external updates: the datasets are updated with record insertions and/or deletions. This paper addresses a new problem: anonymous re-publication of datasets with internal updates, where the attribute values of each record are dynamically updated. This is an important and challenging problem for attribute values of records are updating frequently in practice and existing methods are unable to deal with such a situation. We initiate a formal study of anonymous re-publication of dynamic datasets with internal updates, and show the invalidation of existing methods. We introduce theoretical definition and analysis of dynamic datasets, and present a general privacy disclosure framework that is applicable to all anonymous re-publication problems. We propose a new counterfeited generalization principle alled m-Distinct to effectively anonymize datasets with both external updates and internal updates. We also develop an algorithm to generalize datasets to meet m-Distinct. The experiments conducted on real-world data demonstrate the effectiveness of the proposed solution.<|reference_end|> | arxiv | @article{li2008challenging,
title={Challenging More Updates: Towards Anonymous Re-publication of Fully
Dynamic Datasets},
author={Feng Li and Shuigeng Zhou},
journal={arXiv preprint arXiv:0806.4703},
year={2008},
archivePrefix={arXiv},
eprint={0806.4703},
primaryClass={cs.DB}
} | li2008challenging |
arxiv-4192 | 0806.4722 | Malleable Coding: Compressed Palimpsests | <|reference_start|>Malleable Coding: Compressed Palimpsests: A malleable coding scheme considers not only compression efficiency but also the ease of alteration, thus encouraging some form of recycling of an old compressed version in the formation of a new one. Malleability cost is the difficulty of synchronizing compressed versions, and malleable codes are of particular interest when representing information and modifying the representation are both expensive. We examine the trade-off between compression efficiency and malleability cost under a malleability metric defined with respect to a string edit distance. This problem introduces a metric topology to the compressed domain. We characterize the achievable rates and malleability as the solution of a subgraph isomorphism problem. This can be used to argue that allowing conditional entropy of the edited message given the original message to grow linearly with block length creates an exponential increase in code length.<|reference_end|> | arxiv | @article{varshney2008malleable,
title={Malleable Coding: Compressed Palimpsests},
author={Lav R. Varshney, Julius Kusuma, and Vivek K Goyal},
journal={arXiv preprint arXiv:0806.4722},
year={2008},
archivePrefix={arXiv},
eprint={0806.4722},
primaryClass={cs.IT math.IT}
} | varshney2008malleable |
arxiv-4193 | 0806.4735 | Linear Time Algorithms for Finding a Dominating Set of Fixed Size in Degenerated Graphs | <|reference_start|>Linear Time Algorithms for Finding a Dominating Set of Fixed Size in Degenerated Graphs: There is substantial literature dealing with fixed parameter algorithms for the dominating set problem on various families of graphs. In this paper, we give a $k^{O(dk)} n$ time algorithm for finding a dominating set of size at most $k$ in a $d$-degenerated graph with $n$ vertices. This proves that the dominating set problem is fixed-parameter tractable for degenerated graphs. For graphs that do not contain $K_h$ as a topological minor, we give an improved algorithm for the problem with running time $(O(h))^{hk} n$. For graphs which are $K_h$-minor-free, the running time is further reduced to $(O(\log h))^{hk/2} n$. Fixed-parameter tractable algorithms that are linear in the number of vertices of the graph were previously known only for planar graphs. For the families of graphs discussed above, the problem of finding an induced cycle of a given length is also addressed. For every fixed $H$ and $k$, we show that if an $H$-minor-free graph $G$ with $n$ vertices contains an induced cycle of size $k$, then such a cycle can be found in O(n) expected time as well as in $O(n \log n)$ worst-case time. Some results are stated concerning the (im)possibility of establishing linear time algorithms for the more general family of degenerated graphs.<|reference_end|> | arxiv | @article{alon2008linear,
title={Linear Time Algorithms for Finding a Dominating Set of Fixed Size in
Degenerated Graphs},
author={Noga Alon and Shai Gutner},
journal={Proc. of 13th COCOON (2007), 394-405},
year={2008},
archivePrefix={arXiv},
eprint={0806.4735},
primaryClass={cs.DS cs.DM}
} | alon2008linear |
arxiv-4194 | 0806.4737 | On the Multiplexing Gain of K-user Partially Connected Interference Channel | <|reference_start|>On the Multiplexing Gain of K-user Partially Connected Interference Channel: The multiplexing gain (MUXG) of $K$-user interference channel (IC) with partially connected interfering links is analyzed. The motivation for the partially connected IC comes from the fact that not all interferences are equally strong in practice. The MUXG is characterized as a function of the number ($K$) of users and the number ($N \geq 1$) of interfering links. Our analysis is mainly based on the interference alignment (IA) technique to mitigate interference. Our main results are as follows: One may expect that higher MUXG can be attained when some of interfering links do not exist. However, when $N$ is odd and $K=N+2$, the MUXG is not increased beyond the optimal MUXG of fully connected IC, which is $\frac{KM}{2}$. The number of interfering links has no influence on the achievable MUXG using IA, but affects the efficiency in terms of the number of required channel realizations: When N=1 or 2, the optimal MUXG of the fully connected IC is achievable with a finite number of channel realizations. In case of $N \geq 3$, however, the MUXG of $\frac{KM}{2}$ can be achieved asymptotically as the number of channel realizations tends to infinity.<|reference_end|> | arxiv | @article{choi2008on,
title={On the Multiplexing Gain of K-user Partially Connected Interference
Channel},
author={Sang Won Choi and Sae-Young Chung},
journal={arXiv preprint arXiv:0806.4737},
year={2008},
archivePrefix={arXiv},
eprint={0806.4737},
primaryClass={cs.IT math.IT}
} | choi2008on |
arxiv-4195 | 0806.4746 | Concept-Oriented Programming | <|reference_start|>Concept-Oriented Programming: Object-oriented programming (OOP) is aimed at describing the structure and behaviour of objects by hiding the mechanism of their representation and access in primitive references. In this article we describe an approach, called concept-oriented programming (COP), which focuses on modelling references assuming that they also possess application-specific structure and behaviour accounting for a great deal or even most of the overall program complexity. References in COP are completely legalized and get the same status as objects while the functions are distributed among both objects and references. In order to support this design we introduce a new programming construct, called concept, which generalizes conventional classes and concept inclusion relation generalizing class inheritance. The main advantage of COP is that it allows programmers to describe two sides of any program: explicitly used functions of objects and intermediate functionality of references having cross-cutting nature and executed implicitly behind the scenes during object access.<|reference_end|> | arxiv | @article{savinov2008concept-oriented,
title={Concept-Oriented Programming},
author={Alexandr Savinov},
journal={arXiv preprint arXiv:0806.4746},
year={2008},
archivePrefix={arXiv},
eprint={0806.4746},
primaryClass={cs.PL}
} | savinov2008concept-oriented |
arxiv-4196 | 0806.4749 | Nested Ordered Sets and their Use for Data Modelling | <|reference_start|>Nested Ordered Sets and their Use for Data Modelling: In this paper we present a new approach to data modelling, called the concept-oriented model (CoM), and describe its main features and characteristics including data semantics and operations. The distinguishing feature of this model is that it is based on the formalism of nested ordered sets where any element participates in two structures simultaneously: hierarchical (nested) and multi-dimensional (ordered). An element of the model is postulated to consist of two parts, called identity and entity, and the whole approach can be naturally broken into two branches: identity modelling and entity modelling. We also propose a new query language with the main construct, called concept, defined as a pair of two classes: identity class and entity class. We describe how its operations of projection, de-projection and product can be used to solve typical data modelling tasks.<|reference_end|> | arxiv | @article{savinov2008nested,
title={Nested Ordered Sets and their Use for Data Modelling},
author={Alexandr Savinov},
journal={arXiv preprint arXiv:0806.4749},
year={2008},
archivePrefix={arXiv},
eprint={0806.4749},
primaryClass={cs.DB}
} | savinov2008nested |
arxiv-4197 | 0806.4773 | Signal Codes | <|reference_start|>Signal Codes: Motivated by signal processing, we present a new class of channel codes, called signal codes, for continuous-alphabet channels. Signal codes are lattice codes whose encoding is done by convolving an integer information sequence with a fixed filter pattern. Decoding is based on the bidirectional sequential stack decoder, which can be implemented efficiently using the heap data structure. Error analysis and simulation results indicate that signal codes can achieve low error rate at approximately 1dB from channel capacity.<|reference_end|> | arxiv | @article{shalvi2008signal,
title={Signal Codes},
author={Ofir Shalvi, Naftali Sommer, Meir Feder},
journal={arXiv preprint arXiv:0806.4773},
year={2008},
doi={10.1109/ITW.2003.1216761},
archivePrefix={arXiv},
eprint={0806.4773},
primaryClass={cs.IT math.IT}
} | shalvi2008signal |
arxiv-4198 | 0806.4787 | Locality and Bounding-Box Quality of Two-Dimensional Space-Filling Curves | <|reference_start|>Locality and Bounding-Box Quality of Two-Dimensional Space-Filling Curves: Space-filling curves can be used to organise points in the plane into bounding-box hierarchies (such as R-trees). We develop measures of the bounding-box quality of space-filling curves that express how effective different space-filling curves are for this purpose. We give general lower bounds on the bounding-box quality measures and on locality according to Gotsman and Lindenbaum for a large class of space-filling curves. We describe a generic algorithm to approximate these and similar quality measures for any given curve. Using our algorithm we find good approximations of the locality and the bounding-box quality of several known and new space-filling curves. Surprisingly, some curves with relatively bad locality by Gotsman and Lindenbaum's measure, have good bounding-box quality, while the curve with the best-known locality has relatively bad bounding-box quality.<|reference_end|> | arxiv | @article{haverkort2008locality,
title={Locality and Bounding-Box Quality of Two-Dimensional Space-Filling
Curves},
author={Herman Haverkort and Freek van Walderveen},
journal={arXiv preprint arXiv:0806.4787},
year={2008},
archivePrefix={arXiv},
eprint={0806.4787},
primaryClass={cs.CG cs.DB}
} | haverkort2008locality |
arxiv-4199 | 0806.4790 | AMS Without 4-Wise Independence on Product Domains | <|reference_start|>AMS Without 4-Wise Independence on Product Domains: In their seminal work, Alon, Matias, and Szegedy introduced several sketching techniques, including showing that 4-wise independence is sufficient to obtain good approximations of the second frequency moment. In this work, we show that their sketching technique can be extended to product domains $[n]^k$ by using the product of 4-wise independent functions on $[n]$. Our work extends that of Indyk and McGregor, who showed the result for $k = 2$. Their primary motivation was the problem of identifying correlations in data streams. In their model, a stream of pairs $(i,j) \in [n]^2$ arrive, giving a joint distribution $(X,Y)$, and they find approximation algorithms for how close the joint distribution is to the product of the marginal distributions under various metrics, which naturally corresponds to how close $X$ and $Y$ are to being independent. By using our technique, we obtain a new result for the problem of approximating the $\ell_2$ distance between the joint distribution and the product of the marginal distributions for $k$-ary vectors, instead of just pairs, in a single pass. Our analysis gives a randomized algorithm that is a $(1 \pm \epsilon)$ approximation (with probability $1-\delta$) that requires space logarithmic in $n$ and $m$ and proportional to $3^k$.<|reference_end|> | arxiv | @article{braverman2008ams,
title={AMS Without 4-Wise Independence on Product Domains},
author={Vladimir Braverman, Kai-Min Chung, Zhenming Liu, Michael Mitzenmacher,
Rafail Ostrovsky},
journal={arXiv preprint arXiv:0806.4790},
year={2008},
archivePrefix={arXiv},
eprint={0806.4790},
primaryClass={cs.DS}
} | braverman2008ams |
arxiv-4200 | 0806.4802 | A new Hedging algorithm and its application to inferring latent random variables | <|reference_start|>A new Hedging algorithm and its application to inferring latent random variables: We present a new online learning algorithm for cumulative discounted gain. This learning algorithm does not use exponential weights on the experts. Instead, it uses a weighting scheme that depends on the regret of the master algorithm relative to the experts. In particular, experts whose discounted cumulative gain is smaller (worse) than that of the master algorithm receive zero weight. We also sketch how a regret-based algorithm can be used as an alternative to Bayesian averaging in the context of inferring latent random variables.<|reference_end|> | arxiv | @article{freund2008a,
title={A new Hedging algorithm and its application to inferring latent random
variables},
author={Yoav Freund and Daniel Hsu},
journal={arXiv preprint arXiv:0806.4802},
year={2008},
archivePrefix={arXiv},
eprint={0806.4802},
primaryClass={cs.GT cs.AI}
} | freund2008a |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.