corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-6101 | 0901.2864 | An extension of the order bound for AG codes | <|reference_start|>An extension of the order bound for AG codes: The most successful method to obtain lower bounds for the minimum distance of an algebraic geometric code is the order bound, which generalizes the Feng-Rao bound. We provide a significant extension of the bound that improves the order bounds by Beelen and by Duursma and Park. We include an exhaustive numerical comparison of the different bounds for 10168 two-point codes on the Suzuki curve of genus g=124 over the field of 32 elements. Keywords: algebraic geometric code, order bound, Suzuki curve.<|reference_end|> | arxiv | @article{duursma2009an,
title={An extension of the order bound for AG codes},
author={Iwan Duursma and Radoslav Kirov},
journal={arXiv preprint arXiv:0901.2864},
year={2009},
doi={10.1007/978-3-642-02181-7},
archivePrefix={arXiv},
eprint={0901.2864},
primaryClass={math.NT cs.IT math.AG math.IT}
} | duursma2009an |
arxiv-6102 | 0901.2897 | Online validation of the pi and pi' failure functions | <|reference_start|>Online validation of the pi and pi' failure functions: Let pi_w denote the failure function of the Morris-Pratt algorithm for a word w. In this paper we study the following problem: given an integer array A[1..n], is there a word w over arbitrary alphabet such that A[i]=pi_w[i] for all i? Moreover, what is the minimum required cardinality of the alphabet? We give a real time linear algorithm for this problem in the unit-cost RAM model with \Theta(log n) bits word size. Our algorithm returns a word w over minimal alphabet such that pi_w = A as well and uses just o(n) words of memory. Then we consider function pi' instead of pi and give an online O(n log n) algorithm for this case. This is the first polynomial algorithm for online version of this problem.<|reference_end|> | arxiv | @article{gawrychowski2009online,
title={Online validation of the pi and pi' failure functions},
author={Pawel Gawrychowski, Artur Jez, Lukasz Jez},
journal={arXiv preprint arXiv:0901.2897},
year={2009},
archivePrefix={arXiv},
eprint={0901.2897},
primaryClass={cs.DS}
} | gawrychowski2009online |
arxiv-6103 | 0901.2900 | An O(log(n)) Fully Dynamic Algorithm for Maximum matching in a tree | <|reference_start|>An O(log(n)) Fully Dynamic Algorithm for Maximum matching in a tree: In this paper, we have developed a fully-dynamic algorithm for maintaining cardinality of maximum-matching in a tree using the construction of top-trees. The time complexities are as follows: 1. Initialization Time: $O(n(log(n)))$ to build the Top-tree. 2. Update Time: $O(log(n))$ 3. Query Time: O(1) to query the cardinality of maximum-matching and $O(log(n))$ to find if a particular edge is matched.<|reference_end|> | arxiv | @article{gupta2009an,
title={An O(log(n)) Fully Dynamic Algorithm for Maximum matching in a tree},
author={Manoj Gupta (1), Ankit Sharma (1) ((1) Indian Institute of Technology
Kanpur)},
journal={arXiv preprint arXiv:0901.2900},
year={2009},
archivePrefix={arXiv},
eprint={0901.2900},
primaryClass={cs.DS}
} | gupta2009an |
arxiv-6104 | 0901.2903 | Entropy Measures vs Algorithmic Information | <|reference_start|>Entropy Measures vs Algorithmic Information: Algorithmic entropy and Shannon entropy are two conceptually different information measures, as the former is based on size of programs and the later in probability distributions. However, it is known that, for any recursive probability distribution, the expected value of algorithmic entropy equals its Shannon entropy, up to a constant that depends only on the distribution. We study if a similar relationship holds for R\'{e}nyi and Tsallis entropies of order $\alpha$, showing that it only holds for R\'{e}nyi and Tsallis entropies of order 1 (i.e., for Shannon entropy). Regarding a time bounded analogue relationship, we show that, for distributions such that the cumulative probability distribution is computable in time $t(n)$, the expected value of time-bounded algorithmic entropy (where the alloted time is $nt(n)\log (nt(n))$) is in the same range as the unbounded version. So, for these distributions, Shannon entropy captures the notion of computationally accessible information. We prove that, for universal time-bounded distribution $\m^t(x)$, Tsallis and R\'{e}nyi entropies converge if and only if $\alpha$ is greater than 1.<|reference_end|> | arxiv | @article{teixeira2009entropy,
title={Entropy Measures vs. Algorithmic Information},
author={Andreia Teixeira, Andre Souto, Armando Matos, Luis Antunes},
journal={arXiv preprint arXiv:0901.2903},
year={2009},
archivePrefix={arXiv},
eprint={0901.2903},
primaryClass={cs.IT cs.CC math.IT}
} | teixeira2009entropy |
arxiv-6105 | 0901.2906 | Measuring communication complexity using instance complexity with oracles | <|reference_start|>Measuring communication complexity using instance complexity with oracles: We establish a connection between non-deterministic communication complexity and instance complexity, a measure of information based on algorithmic entropy. Let $\overline{x}$, $\overline{y}$ and $Y_1(\overline{x})$ be respectively the input known by Alice, the input known by Bob, and the set of all values of $y$ such that $f(\overline{x},y)=1$; a string is a witness of the non-deterministic communication protocol iff it is a program $p$ that "corresponds exactly" to the instance complexity $\ic^{f,t}(\overline{y}:Y_1(\overline{x}))$.<|reference_end|> | arxiv | @article{matos2009measuring,
title={Measuring communication complexity using instance complexity with
oracles},
author={Armando Matos, Andreia Teixeira, Andre Souto},
journal={arXiv preprint arXiv:0901.2906},
year={2009},
archivePrefix={arXiv},
eprint={0901.2906},
primaryClass={cs.CC}
} | matos2009measuring |
arxiv-6106 | 0901.2911 | Gibbs Free Energy Analysis of a Quantum Analog of the Classical Binary Symmetric Channel | <|reference_start|>Gibbs Free Energy Analysis of a Quantum Analog of the Classical Binary Symmetric Channel: The Gibbs free energy properties of a quantum {\it send, receive} communications system are studied. The communications model resembles the classical Ising model of spins on a lattice in that the joint state of the quantum system is the product of sender and receiver states. However, the system differs from the classical case in that the sender and receiver spin states are quantum superposition states coupled by a Hamiltonian operator. A basic understanding of these states is directly relevant to communications theory and indirectly relevant to computation since the product states form a basis for entangled states. Highlights of the study include an exact method for decimation for quantum spins. The main result is that the minimum Gibbs free energy of the quantum system in the product state is higher (lower capacity) than a classical system with the same parameter values. The result is both surprising and not. The channel characteristics of the quantum system in the product state are markedly inferior to those of the classical Ising system. Intuitively, it would seem that capacity should suffer as a result. Yet, one would expect entangled states, built from product states, to have better correlation properties.<|reference_end|> | arxiv | @article{ford2009gibbs,
title={Gibbs Free Energy Analysis of a Quantum Analog of the Classical Binary
Symmetric Channel},
author={David K. Ford},
journal={arXiv preprint arXiv:0901.2911},
year={2009},
archivePrefix={arXiv},
eprint={0901.2911},
primaryClass={physics.gen-ph cond-mat.stat-mech cs.IT math.IT}
} | ford2009gibbs |
arxiv-6107 | 0901.2912 | Weighted $\ell_1$ Minimization for Sparse Recovery with Prior Information | <|reference_start|>Weighted $\ell_1$ Minimization for Sparse Recovery with Prior Information: In this paper we study the compressed sensing problem of recovering a sparse signal from a system of underdetermined linear equations when we have prior information about the probability of each entry of the unknown signal being nonzero. In particular, we focus on a model where the entries of the unknown vector fall into two sets, each with a different probability of being nonzero. We propose a weighted $\ell_1$ minimization recovery algorithm and analyze its performance using a Grassman angle approach. We compute explicitly the relationship between the system parameters (the weights, the number of measurements, the size of the two sets, the probabilities of being non-zero) so that an iid random Gaussian measurement matrix along with weighted $\ell_1$ minimization recovers almost all such sparse signals with overwhelming probability as the problem dimension increases. This allows us to compute the optimal weights. We also provide simulations to demonstrate the advantages of the method over conventional $\ell_1$ optimization.<|reference_end|> | arxiv | @article{khajehnejad2009weighted,
title={Weighted $\ell_1$ Minimization for Sparse Recovery with Prior
Information},
author={M. Amin Khajehnejad, Weiyu Xu, Salman Avestimehr, and Babak Hassibi},
journal={arXiv preprint arXiv:0901.2912},
year={2009},
archivePrefix={arXiv},
eprint={0901.2912},
primaryClass={cs.IT math.IT}
} | khajehnejad2009weighted |
arxiv-6108 | 0901.2913 | An Algebraic Watchdog for Wireless Network Coding | <|reference_start|>An Algebraic Watchdog for Wireless Network Coding: In this paper, we propose a scheme, called the "algebraic watchdog" for wireless network coding, in which nodes can detect malicious behaviors probabilistically, police their downstream neighbors locally using overheard messages, and, thus, provide a secure global "self-checking network". Unlike traditional Byzantine detection protocols which are receiver-based, this protocol gives the senders an active role in checking the node downstream. This work is inspired by Marti et. al.'s watchdog-pathrater, which attempts to detect and mitigate the effects of routing misbehavior. As the first building block of a such system, we focus on a two-hop network. We present a graphical model to understand the inference process nodes execute to police their downstream neighbors; as well as to compute, analyze, and approximate the probabilities of misdetection and false detection. In addition, we present an algebraic analysis of the performance using an hypothesis testing framework, that provides exact formulae for probabilities of false detection and misdetection.<|reference_end|> | arxiv | @article{kim2009an,
title={An Algebraic Watchdog for Wireless Network Coding},
author={MinJi Kim, Muriel Medard, Joao Barros, Ralf Koetter},
journal={arXiv preprint arXiv:0901.2913},
year={2009},
doi={10.1109/ISIT.2009.5206004},
archivePrefix={arXiv},
eprint={0901.2913},
primaryClass={cs.NI cs.CR}
} | kim2009an |
arxiv-6109 | 0901.2922 | Scheduling in Multi-hop Wireless Networks with Priorities | <|reference_start|>Scheduling in Multi-hop Wireless Networks with Priorities: In this paper we consider prioritized maximal scheduling in multi-hop wireless networks, where the scheduler chooses a maximal independent set greedily according to a sequence specified by certain priorities. We show that if the probability distributions of the priorities are properly chosen, we can achieve the optimal (maximum) stability region using an i.i.d random priority assignment process, for any set of arrival processes that satisfy Law of Large Numbers. The pre-computation of the priorities is, in general, NP-hard, but there exists polynomial time approximation scheme (PTAS) to achieve any fraction of the optimal stability region. We next focus on the simple case of static priority and specify a greedy priority assignment algorithm, which can achieve the same fraction of the optimal stability region as the state of art result for Longest Queue First (LQF) schedulers. We also show that this algorithm can be easily adapted to satisfy delay constraints in the large deviations regime, and therefore, supports Quality of Service (QoS) for each link.<|reference_end|> | arxiv | @article{li2009scheduling,
title={Scheduling in Multi-hop Wireless Networks with Priorities},
author={Qiao Li, Rohit Negi},
journal={arXiv preprint arXiv:0901.2922},
year={2009},
archivePrefix={arXiv},
eprint={0901.2922},
primaryClass={cs.IT math.IT}
} | li2009scheduling |
arxiv-6110 | 0901.2924 | Universal Complex Structures in Written Language | <|reference_start|>Universal Complex Structures in Written Language: Quantitative linguistics has provided us with a number of empirical laws that characterise the evolution of languages and competition amongst them. In terms of language usage, one of the most influential results is Zipf's law of word frequencies. Zipf's law appears to be universal, and may not even be unique to human language. However, there is ongoing controversy over whether Zipf's law is a good indicator of complexity. Here we present an alternative approach that puts Zipf's law in the context of critical phenomena (the cornerstone of complexity in physics) and establishes the presence of a large scale "attraction" between successive repetitions of words. Moreover, this phenomenon is scale-invariant and universal -- the pattern is independent of word frequency and is observed in texts by different authors and written in different languages. There is evidence, however, that the shape of the scaling relation changes for words that play a key role in the text, implying the existence of different "universality classes" in the repetition of words. These behaviours exhibit striking parallels with complex catastrophic phenomena.<|reference_end|> | arxiv | @article{corral2009universal,
title={Universal Complex Structures in Written Language},
author={Alvaro Corral (1), Ramon Ferrer-i-Cancho (2), Gemma Boleda (2), Albert
Diaz-Guilera (3). ((1) Centre de Recerca Matematica, (2) U Politecnica
Catalunya, (3) U Barcelona)},
journal={arXiv preprint arXiv:0901.2924},
year={2009},
archivePrefix={arXiv},
eprint={0901.2924},
primaryClass={physics.soc-ph cs.CL}
} | corral2009universal |
arxiv-6111 | 0901.2934 | Noisy DPC and Application to a Cognitive Channel | <|reference_start|>Noisy DPC and Application to a Cognitive Channel: In this paper, we first consider a channel that is contaminated by two independent Gaussian noises $S ~ N(0,Q)$ and $Z_0 ~ N(0,N_0)$. The capacity of this channel is computed when independent noisy versions of $S$ are known to the transmitter and/or receiver. It is shown that the channel capacity is greater then the capacity when $S$ is completely unknown, but is less then the capacity when $S$ is perfectly known at the transmitter or receiver. For example, if there is one noisy version of $S$ known at the transmitter only, the capacity is $0.5log(1+P/(Q(N_1/(Q+N_1))+N_0))$, where $P$ is the input power constraint and $N_1$ is the power of the noise corrupting $S$. We then consider a Gaussian cognitive interference channel (IC) and propose a causal noisy dirty paper coding (DPC) strategy. We compute the achievable region using this noisy DPC strategy and quantify the regions when it achieves the upper bound on the rate.<|reference_end|> | arxiv | @article{peng2009noisy,
title={Noisy DPC and Application to a Cognitive Channel},
author={Yong Peng and Dinesh Rajan},
journal={arXiv preprint arXiv:0901.2934},
year={2009},
archivePrefix={arXiv},
eprint={0901.2934},
primaryClass={cs.IT math.IT}
} | peng2009noisy |
arxiv-6112 | 0901.2954 | An Upper Limit of AC Huffman Code Length in JPEG Compression | <|reference_start|>An Upper Limit of AC Huffman Code Length in JPEG Compression: A strategy for computing upper code-length limits of AC Huffman codes for an 8x8 block in JPEG Baseline coding is developed. The method is based on a geometric interpretation of the DCT, and the calculated limits are as close as 14% to the maximum code-lengths. The proposed strategy can be adapted to other transform coding methods, e.g., MPEG 2 and 4 video compressions, to calculate close upper code length limits for the respective processing blocks.<|reference_end|> | arxiv | @article{horie2009an,
title={An Upper Limit of AC Huffman Code Length in JPEG Compression},
author={Kenichi Horie},
journal={arXiv preprint arXiv:0901.2954},
year={2009},
number={OIMC07P03556},
archivePrefix={arXiv},
eprint={0901.2954},
primaryClass={cs.IT cs.CC cs.CE cs.CV math.IT}
} | horie2009an |
arxiv-6113 | 0901.3003 | Timed tuplix calculus and the Wesseling and van den Bergh equation | <|reference_start|>Timed tuplix calculus and the Wesseling and van den Bergh equation: We develop an algebraic framework for the description and analysis of financial behaviours, that is, behaviours that consist of transferring certain amounts of money at planned times. To a large extent, analysis of financial products amounts to analysis of such behaviours. We formalize the cumulative interest compliant conservation requirement for financial products proposed by Wesseling and van den Bergh by an equation in the framework developed and define a notion of financial product behaviour using this formalization. We also present some properties of financial product behaviours. The development of the framework has been influenced by previous work on the process algebra ACP.<|reference_end|> | arxiv | @article{bergstra2009timed,
title={Timed tuplix calculus and the Wesseling and van den Bergh equation},
author={J. A. Bergstra, C. A. Middelburg},
journal={Scientific Annals of Computer Science 23(2):169--190, 2013},
year={2009},
doi={10.7561/SACS.2013.2.169},
number={PRG0901},
archivePrefix={arXiv},
eprint={0901.3003},
primaryClass={q-fin.GN cs.LO}
} | bergstra2009timed |
arxiv-6114 | 0901.3012 | Meadow enriched ACP process algebras | <|reference_start|>Meadow enriched ACP process algebras: We introduce the notion of an ACP process algebra. The models of the axiom system ACP are the origin of this notion. ACP process algebras have to do with processes in which no data are involved. We also introduce the notion of a meadow enriched ACP process algebra, which is a simple generalization of the notion of an ACP process algebra to processes in which data are involved. In meadow enriched ACP process algebras, the mathematical structure for data is a meadow.<|reference_end|> | arxiv | @article{bergstra2009meadow,
title={Meadow enriched ACP process algebras},
author={J. A. Bergstra, C. A. Middelburg},
journal={arXiv preprint arXiv:0901.3012},
year={2009},
number={PRG0902},
archivePrefix={arXiv},
eprint={0901.3012},
primaryClass={math.RA cs.LO}
} | bergstra2009meadow |
arxiv-6115 | 0901.3017 | Statistical analysis of the Indus script using $n$-grams | <|reference_start|>Statistical analysis of the Indus script using $n$-grams: The Indus script is one of the major undeciphered scripts of the ancient world. The small size of the corpus, the absence of bilingual texts, and the lack of definite knowledge of the underlying language has frustrated efforts at decipherment since the discovery of the remains of the Indus civilisation. Recently, some researchers have questioned the premise that the Indus script encodes spoken language. Building on previous statistical approaches, we apply the tools of statistical language processing, specifically $n$-gram Markov chains, to analyse the Indus script for syntax. Our main results are that the script has well-defined signs which begin and end texts, that there is directionality and strong correlations in the sign order, and that there are groups of signs which appear to have identical syntactic function. All these require no {\it a priori} suppositions regarding the syntactic or semantic content of the signs, but follow directly from the statistical analysis. Using information theoretic measures, we find the information in the script to be intermediate between that of a completely random and a completely fixed ordering of signs. Our study reveals that the Indus script is a structured sign system showing features of a formal language, but, at present, cannot conclusively establish that it encodes {\it natural} language. Our $n$-gram Markov model is useful for predicting signs which are missing or illegible in a corpus of Indus texts. This work forms the basis for the development of a stochastic grammar which can be used to explore the syntax of the Indus script in greater detail.<|reference_end|> | arxiv | @article{yadav2009statistical,
title={Statistical analysis of the Indus script using $n$-grams},
author={Nisha Yadav, Hrishikesh Joglekar, Rajesh P. N. Rao, M. N. Vahia,
Iravatham Mahadevan and R. Adhikari},
journal={arXiv preprint arXiv:0901.3017},
year={2009},
doi={10.1371/journal.pone.0009506},
archivePrefix={arXiv},
eprint={0901.3017},
primaryClass={cs.CL}
} | yadav2009statistical |
arxiv-6116 | 0901.3047 | Entropy Principle in Direct Derivation of Benford's Law | <|reference_start|>Entropy Principle in Direct Derivation of Benford's Law: The uneven distribution of digits in numerical data, known as Benford's law, was discovered in 1881. Since then, this law has been shown to be correct in copious numerical data relating to economics, physics and even prime numbers. Although it attracts considerable attention, there is no a priori probabilistic criterion when a data set should or should not obey the law. Here a general criterion is suggested, namely that any file of digits in the Shannon limit (namely, having maximum entropy) has a Benford's law distribution of digits.<|reference_end|> | arxiv | @article{kafri2009entropy,
title={Entropy Principle in Direct Derivation of Benford's Law},
author={Oded Kafri},
journal={arXiv preprint arXiv:0901.3047},
year={2009},
archivePrefix={arXiv},
eprint={0901.3047},
primaryClass={cs.DM physics.data-an}
} | kafri2009entropy |
arxiv-6117 | 0901.3056 | Factorization of Joint Probability Mass Functions into Parity Check Interactions | <|reference_start|>Factorization of Joint Probability Mass Functions into Parity Check Interactions: We show that any joint probability mass function (PMF) can be expressed as a product of parity check factors and factors of degree one with the help of some auxiliary variables, if the alphabet size is appropriate for defining a parity check equation. In other words, marginalization of a joint PMF is equivalent to a soft decoding task as long as a finite field can be constructed over the alphabet of the PMF. In factor graph terminology this claim means that a factor graph representing such a joint PMF always has an equivalent Tanner graph. We provide a systematic method based on the Hilbert space of PMFs and orthogonal projections for obtaining this factorization.<|reference_end|> | arxiv | @article{bayramoglu2009factorization,
title={Factorization of Joint Probability Mass Functions into Parity Check
Interactions},
author={M. F. Bayramoglu and A. "Ozg"ur Y{i}lmaz},
journal={arXiv preprint arXiv:0901.3056},
year={2009},
archivePrefix={arXiv},
eprint={0901.3056},
primaryClass={cs.IT cs.DM math.IT math.PR}
} | bayramoglu2009factorization |
arxiv-6118 | 0901.3119 | Average number of flips in pancake sorting | <|reference_start|>Average number of flips in pancake sorting: We are given a stack of pancakes of different sizes and the only allowed operation is to take several pancakes from top and flip them. The unburnt version requires the pancakes to be sorted by their sizes at the end, while in the burnt version they additionally need to be oriented burnt-side down. We present an algorithm with the average number of flips, needed to sort a stack of n burnt pancakes, equal to 7n/4+O(1) and a randomized algorithm for the unburnt version with at most 17n/12+O(1) flips on average. In addition, we show that in the burnt version, the average number of flips of any algorithm is at least n+\Omega(n/log n) and conjecture that some algorithm can reach n+\Theta(n/log n). We also slightly increase the lower bound on g(n), the minimum number of flips needed to sort the worst stack of n burnt pancakes. This bound, together with the upper bound found by Heydari and Sudborough in 1997, gives the exact number of flips to sort the previously conjectured worst stack -I_n for n=3 mod 4 and n>=15. Finally we present exact values of f(n) up to n=19 and of g(n) up to n=17 and disprove a conjecture of Cohen and Blum by showing that the burnt stack -I_{15} is not the worst one for n=15.<|reference_end|> | arxiv | @article{cibulka2009average,
title={Average number of flips in pancake sorting},
author={Josef Cibulka},
journal={Theor. Comput. Sci. 412, pp. 822-834 (2011)},
year={2009},
doi={10.1016/j.tcs.2010.11.028},
archivePrefix={arXiv},
eprint={0901.3119},
primaryClass={cs.DM}
} | cibulka2009average |
arxiv-6119 | 0901.3130 | Secure Communication in the Low-SNR Regime: A Characterization of the Energy-Secrecy Tradeoff | <|reference_start|>Secure Communication in the Low-SNR Regime: A Characterization of the Energy-Secrecy Tradeoff: Secrecy capacity of a multiple-antenna wiretap channel is studied in the low signal-to-noise ratio (SNR) regime. Expressions for the first and second derivatives of the secrecy capacity with respect to SNR at SNR = 0 are derived. Transmission strategies required to achieve these derivatives are identified. In particular, it is shown that it is optimal in the low-SNR regime to transmit in the maximum-eigenvalue eigenspace of H_m* H_m - N_m/N_e H_e* H_e where H_m and H_e denote the channel matrices associated with the legitimate receiver and eavesdropper, respectively, and N_m and N_e are the noise variances at the receiver and eavesdropper, respectively. Energy efficiency is analyzed by finding the minimum bit energy required for secure and reliable communications, and the wideband slope. Increased bit energy requirements under secrecy constraints are quantified. Finally, the impact of fading is investigated.<|reference_end|> | arxiv | @article{gursoy2009secure,
title={Secure Communication in the Low-SNR Regime: A Characterization of the
Energy-Secrecy Tradeoff},
author={Mustafa Cenk Gursoy},
journal={arXiv preprint arXiv:0901.3130},
year={2009},
archivePrefix={arXiv},
eprint={0901.3130},
primaryClass={cs.IT math.IT}
} | gursoy2009secure |
arxiv-6120 | 0901.3132 | Low-SNR Analysis of Interference Channels under Secrecy Constraints | <|reference_start|>Low-SNR Analysis of Interference Channels under Secrecy Constraints: In this paper, we study the secrecy rates over weak Gaussian interference channels for different transmission schemes. We focus on the low-SNR regime and obtain the minimum bit energy E_b/N_0_min values, and the wideband slope regions for both TDMA and multiplexed transmission schemes. We show that secrecy constraints introduce a penalty in both the minimum bit energy and the slope regions. Additionally, we identify under what conditions TDMA or multiplexed transmission is optimal. Finally, we show that TDMA is more likely to be optimal in the presence of secrecy constraints.<|reference_end|> | arxiv | @article{zhang2009low-snr,
title={Low-SNR Analysis of Interference Channels under Secrecy Constraints},
author={Junwei Zhang and Mustafa Cenk Gursoy},
journal={arXiv preprint arXiv:0901.3132},
year={2009},
archivePrefix={arXiv},
eprint={0901.3132},
primaryClass={cs.IT math.IT}
} | zhang2009low-snr |
arxiv-6121 | 0901.3134 | Energy Efficiency of Fixed-Rate Wireless Transmissions under Queueing Constraints and Channel Uncertainty | <|reference_start|>Energy Efficiency of Fixed-Rate Wireless Transmissions under Queueing Constraints and Channel Uncertainty: Energy efficiency of fixed-rate transmissions is studied in the presence of queueing constraints and channel uncertainty. It is assumed that neither the transmitter nor the receiver has channel side information prior to transmission. The channel coefficients are estimated at the receiver via minimum mean-square-error (MMSE) estimation with the aid of training symbols. It is further assumed that the system operates under statistical queueing constraints in the form of limitations on buffer violation probabilities. The optimal fraction of of power allocated to training is identified. Spectral efficiency--bit energy tradeoff is analyzed in the low-power and wideband regimes by employing the effective capacity formulation. In particular, it is shown that the bit energy increases without bound in the low-power regime as the average power vanishes. On the other hand, it is proven that the bit energy diminishes to its minimum value in the wideband regime as the available bandwidth increases. For this case, expressions for the minimum bit energy and wideband slope are derived. Overall, energy costs of channel uncertainty and queueing constraints are identified.<|reference_end|> | arxiv | @article{qiao2009energy,
title={Energy Efficiency of Fixed-Rate Wireless Transmissions under Queueing
Constraints and Channel Uncertainty},
author={Deli Qiao, Mustafa Cenk Gursoy, and Senem Velipasalar},
journal={arXiv preprint arXiv:0901.3134},
year={2009},
archivePrefix={arXiv},
eprint={0901.3134},
primaryClass={cs.IT math.IT}
} | qiao2009energy |
arxiv-6122 | 0901.3150 | Matrix Completion from a Few Entries | <|reference_start|>Matrix Completion from a Few Entries: Let M be a random (alpha n) x n matrix of rank r<<n, and assume that a uniformly random subset E of its entries is observed. We describe an efficient algorithm that reconstructs M from |E| = O(rn) observed entries with relative root mean square error RMSE <= C(rn/|E|)^0.5 . Further, if r=O(1), M can be reconstructed exactly from |E| = O(n log(n)) entries. These results apply beyond random matrices to general low-rank incoherent matrices. This settles (in the case of bounded rank) a question left open by Candes and Recht and improves over the guarantees for their reconstruction algorithm. The complexity of our algorithm is O(|E|r log(n)), which opens the way to its use for massive data sets. In the process of proving these statements, we obtain a generalization of a celebrated result by Friedman-Kahn-Szemeredi and Feige-Ofek on the spectrum of sparse random matrices.<|reference_end|> | arxiv | @article{keshavan2009matrix,
title={Matrix Completion from a Few Entries},
author={Raghunandan H. Keshavan, Andrea Montanari, and Sewoong Oh},
journal={arXiv preprint arXiv:0901.3150},
year={2009},
archivePrefix={arXiv},
eprint={0901.3150},
primaryClass={cs.LG stat.ML}
} | keshavan2009matrix |
arxiv-6123 | 0901.3170 | On linear balancing sets | <|reference_start|>On linear balancing sets: Let n be an even positive integer and F be the field \GF(2). A word in F^n is called balanced if its Hamming weight is n/2. A subset C \subseteq F^n$ is called a balancing set if for every word y \in F^n there is a word x \in C such that y + x is balanced. It is shown that most linear subspaces of F^n of dimension slightly larger than 3/2\log_2(n) are balancing sets. A generalization of this result to linear subspaces that are "almost balancing" is also presented. On the other hand, it is shown that the problem of deciding whether a given set of vectors in F^n spans a balancing set, is NP-hard. An application of linear balancing sets is presented for designing efficient error-correcting coding schemes in which the codewords are balanced.<|reference_end|> | arxiv | @article{mazumdar2009on,
title={On linear balancing sets},
author={Arya Mazumdar, Ron M. Roth, Pascal O. Vontobel},
journal={Advances in Mathematics of Communications (AMC), Vol. 4, Issue 3,
pp. 345 - 361, August, 2010},
year={2009},
doi={10.3934/amc.2010.4.345},
archivePrefix={arXiv},
eprint={0901.3170},
primaryClass={cs.IT cs.DM math.IT}
} | mazumdar2009on |
arxiv-6124 | 0901.3188 | Dejean's conjecture holds for n>=27 | <|reference_start|>Dejean's conjecture holds for n>=27: We show that Dejean's conjecture holds for n>=27. This brings the final resolution of the conjecture by the approach of Moulin Ollagnier within range of the computationally feasible.<|reference_end|> | arxiv | @article{currie2009dejean's,
title={Dejean's conjecture holds for n>=27},
author={James Currie and Narad Rampersad},
journal={arXiv preprint arXiv:0901.3188},
year={2009},
archivePrefix={arXiv},
eprint={0901.3188},
primaryClass={math.CO cs.FL}
} | currie2009dejean's |
arxiv-6125 | 0901.3189 | Self-assembly of the discrete Sierpinski carpet and related fractals | <|reference_start|>Self-assembly of the discrete Sierpinski carpet and related fractals: It is well known that the discrete Sierpinski triangle can be defined as the nonzero residues modulo 2 of Pascal's triangle, and that from this definition one can easily construct a tileset with which the discrete Sierpinski triangle self-assembles in Winfree's tile assembly model. In this paper we introduce an infinite class of discrete self-similar fractals that are defined by the residues modulo a prime p of the entries in a two-dimensional matrix obtained from a simple recursive equation. We prove that every fractal in this class self-assembles using a uniformly constructed tileset. As a special case we show that the discrete Sierpinski carpet self-assembles using a set of 30 tiles.<|reference_end|> | arxiv | @article{kautz2009self-assembly,
title={Self-assembly of the discrete Sierpinski carpet and related fractals},
author={Steven M. Kautz, James I. Lathrop},
journal={arXiv preprint arXiv:0901.3189},
year={2009},
archivePrefix={arXiv},
eprint={0901.3189},
primaryClass={cs.OH}
} | kautz2009self-assembly |
arxiv-6126 | 0901.3192 | End-to-End Outage Minimization in OFDM Based Linear Relay Networks | <|reference_start|>End-to-End Outage Minimization in OFDM Based Linear Relay Networks: Multi-hop relaying is an economically efficient architecture for coverage extension and throughput enhancement in future wireless networks. OFDM, on the other hand, is a spectrally efficient physical layer modulation technique for broadband transmission. As a natural consequence of combining OFDM with multi-hop relaying, the allocation of per-hop subcarrier power and per-hop transmission time is crucial in optimizing the network performance. This paper is concerned with the end-to-end information outage in an OFDM based linear relay network. Our goal is to find an optimal power and time adaptation policy to minimize the outage probability under a long-term total power constraint. We solve the problem in two steps. First, for any given channel realization, we derive the minimum short-term power required to meet a target transmission rate. We show that it can be obtained through two nested bisection loops. To reduce computational complexity and signalling overhead, we also propose a sub-optimal algorithm. In the second step, we determine a power threshold to control the transmission on-off so that the long-term total power constraint is satisfied. Numerical examples are provided to illustrate the performance of the proposed power and time adaptation schemes with respect to other resource adaptation schemes.<|reference_end|> | arxiv | @article{zhang2009end-to-end,
title={End-to-End Outage Minimization in OFDM Based Linear Relay Networks},
author={Xiaolu Zhang, Meixia Tao, Wenhua Jiao and Chun Sum Ng},
journal={arXiv preprint arXiv:0901.3192},
year={2009},
archivePrefix={arXiv},
eprint={0901.3192},
primaryClass={cs.IT math.IT}
} | zhang2009end-to-end |
arxiv-6127 | 0901.3196 | Statistical Performance Analysis of MDL Source Enumeration in Array Processing | <|reference_start|>Statistical Performance Analysis of MDL Source Enumeration in Array Processing: In this correspondence, we focus on the performance analysis of the widely-used minimum description length (MDL) source enumeration technique in array processing. Unfortunately, available theoretical analysis exhibit deviation from the simulation results. We present an accurate and insightful performance analysis for the probability of missed detection. We also show that the statistical performance of the MDL is approximately the same under both deterministic and stochastic signal models. Simulation results show the superiority of the proposed analysis over available results.<|reference_end|> | arxiv | @article{haddadi2009statistical,
title={Statistical Performance Analysis of MDL Source Enumeration in Array
Processing},
author={Farzan Haddadi, Mohammadreza Malekmohammadi, Mohammad Mahdi Nayebi,
Mohammad Reza Aref},
journal={arXiv preprint arXiv:0901.3196},
year={2009},
doi={10.1109/TSP.2009.2028207},
archivePrefix={arXiv},
eprint={0901.3196},
primaryClass={cs.IT math.IT}
} | haddadi2009statistical |
arxiv-6128 | 0901.3197 | A Low Density Lattice Decoder via Non-Parametric Belief Propagation | <|reference_start|>A Low Density Lattice Decoder via Non-Parametric Belief Propagation: The recent work of Sommer, Feder and Shalvi presented a new family of codes called low density lattice codes (LDLC) that can be decoded efficiently and approach the capacity of the AWGN channel. A linear time iterative decoding scheme which is based on a message-passing formulation on a factor graph is given. In the current work we report our theoretical findings regarding the relation between the LDLC decoder and belief propagation. We show that the LDLC decoder is an instance of non-parametric belief propagation and further connect it to the Gaussian belief propagation algorithm. Our new results enable borrowing knowledge from the non-parametric and Gaussian belief propagation domains into the LDLC domain. Specifically, we give more general convergence conditions for convergence of the LDLC decoder (under the same assumptions of the original LDLC convergence analysis). We discuss how to extend the LDLC decoder from Latin square to full rank, non-square matrices. We propose an efficient construction of sparse generator matrix and its matching decoder. We report preliminary experimental results which show our decoder has comparable symbol to error rate compared to the original LDLC decoder.%<|reference_end|> | arxiv | @article{bickson2009a,
title={A Low Density Lattice Decoder via Non-Parametric Belief Propagation},
author={Danny Bickson, Alexander T. Ihler and Danny Dolev},
journal={arXiv preprint arXiv:0901.3197},
year={2009},
doi={10.1109/ALLERTON.2009.5394798},
archivePrefix={arXiv},
eprint={0901.3197},
primaryClass={cs.IT math.IT}
} | bickson2009a |
arxiv-6129 | 0901.3199 | A Distributed Trust Diffusion Protocol for Ad Hoc Networks | <|reference_start|>A Distributed Trust Diffusion Protocol for Ad Hoc Networks: In this paper, we propose and evaluate a distributed protocol to manage trust diffusion in ad hoc networks. In this protocol, each node i maintains a \trust value" about an other node j which is computed both as a result of the exchanges with node j itself and as a function of the opinion that other nodes have about j. These two aspects are respectively weighted by a trust index that measures the trust quality the node has in its own experiences and by a trust index representing the trust the node has in the opinions of the other nodes. Simulations have been realized to validate the robustness of this protocol against three kinds of attacks: simple coalitions, Trojan attacks and detonator attacks.<|reference_end|> | arxiv | @article{morvan2009a,
title={A Distributed Trust Diffusion Protocol for Ad Hoc Networks},
author={Michel Morvan (LIP, Ixxi), Sylvain Sen'e (IXXI, Timc)},
journal={Second International Conference on Wireless and Mobile
Communications, Bucarest : Roumanie (2006)},
year={2009},
archivePrefix={arXiv},
eprint={0901.3199},
primaryClass={cs.NI}
} | morvan2009a |
arxiv-6130 | 0901.3202 | Model-Consistent Sparse Estimation through the Bootstrap | <|reference_start|>Model-Consistent Sparse Estimation through the Bootstrap: We consider the least-square linear regression problem with regularization by the $\ell^1$-norm, a problem usually referred to as the Lasso. In this paper, we first present a detailed asymptotic analysis of model consistency of the Lasso in low-dimensional settings. For various decays of the regularization parameter, we compute asymptotic equivalents of the probability of correct model selection. For a specific rate decay, we show that the Lasso selects all the variables that should enter the model with probability tending to one exponentially fast, while it selects all other variables with strictly positive probability. We show that this property implies that if we run the Lasso for several bootstrapped replications of a given sample, then intersecting the supports of the Lasso bootstrap estimates leads to consistent model selection. This novel variable selection procedure, referred to as the Bolasso, is extended to high-dimensional settings by a provably consistent two-step procedure.<|reference_end|> | arxiv | @article{bach2009model-consistent,
title={Model-Consistent Sparse Estimation through the Bootstrap},
author={Francis Bach (INRIA Rocquencourt)},
journal={arXiv preprint arXiv:0901.3202},
year={2009},
archivePrefix={arXiv},
eprint={0901.3202},
primaryClass={cs.LG stat.ML}
} | bach2009model-consistent |
arxiv-6131 | 0901.3257 | On Leveraging Partial Paths in Partially-Connected Networks | <|reference_start|>On Leveraging Partial Paths in Partially-Connected Networks: Mobile wireless network research focuses on scenarios at the extremes of the network connectivity continuum where the probability of all nodes being connected is either close to unity, assuming connected paths between all nodes (mobile ad hoc networks), or it is close to zero, assuming no multi-hop paths exist at all (delay-tolerant networks). In this paper, we argue that a sizable fraction of networks lies between these extremes and is characterized by the existence of partial paths, i.e. multi-hop path segments that allow forwarding data closer to the destination even when no end-to-end path is available. A fundamental issue in such networks is dealing with disruptions of end-to-end paths. Under a stochastic model, we compare the performance of the established end-to-end retransmission (ignoring partial paths), against a forwarding mechanism that leverages partial paths to forward data closer to the destination even during disruption periods. Perhaps surprisingly, the alternative mechanism is not necessarily superior. However, under a stochastic monotonicity condition between current v.s. future path length, which we demonstrate to hold in typical network models, we manage to prove superiority of the alternative mechanism in stochastic dominance terms. We believe that this study could serve as a foundation to design more efficient data transfer protocols for partially-connected networks, which could potentially help reducing the gap between applications that can be supported over disconnected networks and those requiring full connectivity.<|reference_end|> | arxiv | @article{heimlicher2009on,
title={On Leveraging Partial Paths in Partially-Connected Networks},
author={Simon Heimlicher, Merkouris Karaliopoulos, Hanoch Levy and
Thrasyvoulos Spyropoulos},
journal={arXiv preprint arXiv:0901.3257},
year={2009},
doi={10.1109/INFCOM.2009.5061906},
number={TR-303},
archivePrefix={arXiv},
eprint={0901.3257},
primaryClass={cs.NI}
} | heimlicher2009on |
arxiv-6132 | 0901.3291 | Approaching the linguistic complexity | <|reference_start|>Approaching the linguistic complexity: We analyze the rank-frequency distributions of words in selected English and Polish texts. We compare scaling properties of these distributions in both languages. We also study a few small corpora of Polish literary texts and find that for a corpus consisting of texts written by different authors the basic scaling regime is broken more strongly than in the case of comparable corpus consisting of texts written by the same author. Similarly, for a corpus consisting of texts translated into Polish from other languages the scaling regime is broken more strongly than for a comparable corpus of native Polish texts. Moreover, based on the British National Corpus, we consider the rank-frequency distributions of the grammatically basic forms of words (lemmas) tagged with their proper part of speech. We find that these distributions do not scale if each part of speech is analyzed separately. The only part of speech that independently develops a trace of scaling is verbs.<|reference_end|> | arxiv | @article{drozdz2009approaching,
title={Approaching the linguistic complexity},
author={Stanislaw Drozdz, Jaroslaw Kwapien, Adam Orczyk},
journal={Complex Sciences, Lect. Notes ICST vol.4, 1044-1050 (Springer,
2009)},
year={2009},
doi={10.1007/978-3-642-02466-5_104},
archivePrefix={arXiv},
eprint={0901.3291},
primaryClass={cs.CL physics.data-an}
} | drozdz2009approaching |
arxiv-6133 | 0901.3299 | Computing Rooted and Unrooted Maximum Consistent Supertrees | <|reference_start|>Computing Rooted and Unrooted Maximum Consistent Supertrees: A chief problem in phylogenetics and database theory is the computation of a maximum consistent tree from a set of rooted or unrooted trees. A standard input are triplets, rooted binary trees on three leaves, or quartets, unrooted binary trees on four leaves. We give exact algorithms constructing rooted and unrooted maximum consistent supertrees in time O(2^n n^5 m^2 log(m)) for a set of m triplets (quartets), each one distinctly leaf-labeled by some subset of n labels. The algorithms extend to weighted triplets (quartets). We further present fast exact algorithms for constructing rooted and unrooted maximum consistent trees in polynomial space. Finally, for a set T of m rooted or unrooted trees with maximum degree D and distinctly leaf-labeled by some subset of a set L of n labels, we compute, in O(2^{mD} n^m m^5 n^6 log(m)) time, a tree distinctly leaf-labeled by a maximum-size subset X of L that all trees in T, when restricted to X, are consistent with.<|reference_end|> | arxiv | @article{van iersel2009computing,
title={Computing Rooted and Unrooted Maximum Consistent Supertrees},
author={Leo van Iersel, Matthias Mnich},
journal={arXiv preprint arXiv:0901.3299},
year={2009},
archivePrefix={arXiv},
eprint={0901.3299},
primaryClass={cs.DM cs.DS}
} | van iersel2009computing |
arxiv-6134 | 0901.3314 | Sending a Bi-Variate Gaussian over a Gaussian MAC | <|reference_start|>Sending a Bi-Variate Gaussian over a Gaussian MAC: We study the power versus distortion trade-off for the distributed transmission of a memoryless bi-variate Gaussian source over a two-to-one average-power limited Gaussian multiple-access channel. In this problem, each of two separate transmitters observes a different component of a memoryless bi-variate Gaussian source. The two transmitters then describe their source component to a common receiver via an average-power constrained Gaussian multiple-access channel. From the output of the multiple-access channel, the receiver wishes to reconstruct each source component with the least possible expected squared-error distortion. Our interest is in characterizing the distortion pairs that are simultaneously achievable on the two source components. We present sufficient conditions and necessary conditions for the achievability of a distortion pair. These conditions are expressed as a function of the channel signal-to-noise ratio (SNR) and of the source correlation. In several cases the necessary conditions and sufficient conditions are shown to agree. In particular, we show that if the channel SNR is below a certain threshold, then an uncoded transmission scheme is optimal. We also derive the precise high-SNR asymptotics of an optimal scheme.<|reference_end|> | arxiv | @article{lapidoth2009sending,
title={Sending a Bi-Variate Gaussian over a Gaussian MAC},
author={Amos Lapidoth, Stephan Tinguely},
journal={arXiv preprint arXiv:0901.3314},
year={2009},
doi={10.1109/ISIT.2006.261926},
archivePrefix={arXiv},
eprint={0901.3314},
primaryClass={cs.IT math.IT}
} | lapidoth2009sending |
arxiv-6135 | 0901.3348 | Nuclear norm minimization for the planted clique and biclique problems | <|reference_start|>Nuclear norm minimization for the planted clique and biclique problems: We consider the problems of finding a maximum clique in a graph and finding a maximum-edge biclique in a bipartite graph. Both problems are NP-hard. We write both problems as matrix-rank minimization and then relax them using the nuclear norm. This technique, which may be regarded as a generalization of compressive sensing, has recently been shown to be an effective way to solve rank optimization problems. In the special cases that the input graph has a planted clique or biclique (i.e., a single large clique or biclique plus diversionary edges), our algorithm successfully provides an exact solution to the original instance. For each problem, we provide two analyses of when our algorithm succeeds. In the first analysis, the diversionary edges are placed by an adversary. In the second, they are placed at random. In the case of random edges for the planted clique problem, we obtain the same bound as Alon, Krivelevich and Sudakov as well as Feige and Krauthgamer, but we use different techniques.<|reference_end|> | arxiv | @article{ames2009nuclear,
title={Nuclear norm minimization for the planted clique and biclique problems},
author={Brendan Ames, Stephen Vavasis},
journal={arXiv preprint arXiv:0901.3348},
year={2009},
archivePrefix={arXiv},
eprint={0901.3348},
primaryClass={cs.DS cs.NA}
} | ames2009nuclear |
arxiv-6136 | 0901.3384 | A Boundary Approximation Algorithm for Distributed Sensor Networks | <|reference_start|>A Boundary Approximation Algorithm for Distributed Sensor Networks: We present an algorithm for boundary approximation in locally-linked sensor networks that communicate with a remote monitoring station. Delaunay triangulations and Voronoi diagrams are used to generate a sensor communication network and define boundary segments between sensors, respectively. The proposed algorithm reduces remote station communication by approximating boundaries via a decentralized computation executed within the sensor network. Moreover, the algorithm identifies boundaries based on differences between neighboring sensor readings, and not absolute sensor values. An analysis of the bandwidth consumption of the algorithm is presented and compared to two naive approaches. The proposed algorithm reduces the amount of remote communication (compared to the naive approaches) and becomes increasingly useful in networks with more nodes.<|reference_end|> | arxiv | @article{ham2009a,
title={A Boundary Approximation Algorithm for Distributed Sensor Networks},
author={Michael I. Ham and Marko A. Rodriguez},
journal={International Journal of Sensor Networks, 8(1), pp. 41-46,
ISSN:1748-1279, 2010},
year={2009},
number={LA-UR-09-00111},
archivePrefix={arXiv},
eprint={0901.3384},
primaryClass={cs.DC}
} | ham2009a |
arxiv-6137 | 0901.3403 | Distributed Compressive Sensing | <|reference_start|>Distributed Compressive Sensing: Compressive sensing is a signal acquisition framework based on the revelation that a small collection of linear projections of a sparse signal contains enough information for stable recovery. In this paper we introduce a new theory for distributed compressive sensing (DCS) that enables new distributed coding algorithms for multi-signal ensembles that exploit both intra- and inter-signal correlation structures. The DCS theory rests on a new concept that we term the joint sparsity of a signal ensemble. Our theoretical contribution is to characterize the fundamental performance limits of DCS recovery for jointly sparse signal ensembles in the noiseless measurement setting; our result connects single-signal, joint, and distributed (multi-encoder) compressive sensing. To demonstrate the efficacy of our framework and to show that additional challenges such as computational tractability can be addressed, we study in detail three example models for jointly sparse signals. For these models, we develop practical algorithms for joint recovery of multiple signals from incoherent projections. In two of our three models, the results are asymptotically best-possible, meaning that both the upper and lower bounds match the performance of our practical algorithms. Moreover, simulations indicate that the asymptotics take effect with just a moderate number of signals. DCS is immediately applicable to a range of problems in sensor arrays and networks.<|reference_end|> | arxiv | @article{baron2009distributed,
title={Distributed Compressive Sensing},
author={Dror Baron, Marco F. Duarte, Michael B. Wakin, Shriram Sarvotham,
Richard G. Baraniuk},
journal={arXiv preprint arXiv:0901.3403},
year={2009},
archivePrefix={arXiv},
eprint={0901.3403},
primaryClass={cs.IT math.IT}
} | baron2009distributed |
arxiv-6138 | 0901.3408 | Limits of Deterministic Compressed Sensing Considering Arbitrary Orthonormal Basis for Sparsity | <|reference_start|>Limits of Deterministic Compressed Sensing Considering Arbitrary Orthonormal Basis for Sparsity: It is previously shown that proper random linear samples of a finite discrete signal (vector) which has a sparse representation in an orthonormal basis make it possible (with probability 1) to recover the original signal. Moreover, the choice of the linear samples does not depend on the sparsity domain. In this paper, we will show that the replacement of random linear samples with deterministic functions of the signal (not necessarily linear) will not result in unique reconstruction of k-sparse signals except for k=1. We will show that there exist deterministic nonlinear sampling functions for unique reconstruction of 1- sparse signals while deterministic linear samples fail to do so.<|reference_end|> | arxiv | @article{amini2009limits,
title={Limits of Deterministic Compressed Sensing Considering Arbitrary
Orthonormal Basis for Sparsity},
author={Arash Amini and Farokh Marvasti},
journal={arXiv preprint arXiv:0901.3408},
year={2009},
archivePrefix={arXiv},
eprint={0901.3408},
primaryClass={cs.IT math.IT}
} | amini2009limits |
arxiv-6139 | 0901.3467 | Erasure Codes with a Banded Structure for Hybrid Iterative-ML Decoding | <|reference_start|>Erasure Codes with a Banded Structure for Hybrid Iterative-ML Decoding: This paper presents new FEC codes for the erasure channel, LDPC-Band, that have been designed so as to optimize a hybrid iterative-Maximum Likelihood (ML) decoding. Indeed, these codes feature simultaneously a sparse parity check matrix, which allows an efficient use of iterative LDPC decoding, and a generator matrix with a band structure, which allows fast ML decoding on the erasure channel. The combination of these two decoding algorithms leads to erasure codes achieving a very good trade-off between complexity and erasure correction capability.<|reference_end|> | arxiv | @article{soro2009erasure,
title={Erasure Codes with a Banded Structure for Hybrid Iterative-ML Decoding},
author={Alexandre Soro, Mathieu Cunche, Jerome Lacan, Vincent Roca},
journal={arXiv preprint arXiv:0901.3467},
year={2009},
archivePrefix={arXiv},
eprint={0901.3467},
primaryClass={cs.IT math.IT}
} | soro2009erasure |
arxiv-6140 | 0901.3475 | Efficient decoding algorithm using triangularity of $\mbfR$ matrix of QR-decomposition | <|reference_start|>Efficient decoding algorithm using triangularity of $\mbfR$ matrix of QR-decomposition: An efficient decoding algorithm named `divided decoder' is proposed in this paper. Divided decoding can be combined with any decoder using QR-decomposition and offers different pairs of performance and complexity. Divided decoding provides various combinations of two or more different searching algorithms. Hence it makes flexibility in error rate and complexity for the algorithms using it. We calculate diversity orders and upper bounds of error rates for typical models when these models are solved by divided decodings with sphere decoder, and discuss about the effects of divided decoding on complexity. Simulation results of divided decodings combined with a sphere decoder according to different splitting indices correspond to the theoretical analysis.<|reference_end|> | arxiv | @article{park2009efficient,
title={Efficient decoding algorithm using triangularity of $\mbf{R}$ matrix of
QR-decomposition},
author={In Sook Park},
journal={arXiv preprint arXiv:0901.3475},
year={2009},
archivePrefix={arXiv},
eprint={0901.3475},
primaryClass={cs.IT math.IT}
} | park2009efficient |
arxiv-6141 | 0901.3482 | Code injection attacks on harvard-architecture devices | <|reference_start|>Code injection attacks on harvard-architecture devices: Harvard architecture CPU design is common in the embedded world. Examples of Harvard-based architecture devices are the Mica family of wireless sensors. Mica motes have limited memory and can process only very small packets. Stack-based buffer overflow techniques that inject code into the stack and then execute it are therefore not applicable. It has been a common belief that code injection is impossible on Harvard architectures. This paper presents a remote code injection attack for Mica sensors. We show how to exploit program vulnerabilities to permanently inject any piece of code into the program memory of an Atmel AVR-based sensor. To our knowledge, this is the first result that presents a code injection technique for such devices. Previous work only succeeded in injecting data or performing transient attacks. Injecting permanent code is more powerful since the attacker can gain full control of the target sensor. We also show that this attack can be used to inject a worm that can propagate through the wireless sensor network and possibly create a sensor botnet. Our attack combines different techniques such as return oriented programming and fake stack injection. We present implementation details and suggest some counter-measures.<|reference_end|> | arxiv | @article{francillon2009code,
title={Code injection attacks on harvard-architecture devices},
author={Aur'elien Francillon, Claude Castelluccia},
journal={CCS '08: Proceedings of the 15th ACM conference on Computer and
communications security (2008) 15--26},
year={2009},
archivePrefix={arXiv},
eprint={0901.3482},
primaryClass={cs.CR}
} | francillon2009code |
arxiv-6142 | 0901.3574 | Automating Access Control Logics in Simple Type Theory with LEO-II | <|reference_start|>Automating Access Control Logics in Simple Type Theory with LEO-II: Garg and Abadi recently proved that prominent access control logics can be translated in a sound and complete way into modal logic S4. We have previously outlined how normal multimodal logics, including monomodal logics K and S4, can be embedded in simple type theory (which is also known as higher-order logic) and we have demonstrated that the higher-order theorem prover LEO-II can automate reasoning in and about them. In this paper we combine these results and describe a sound and complete embedding of different access control logics in simple type theory. Employing this framework we show that the off the shelf theorem prover LEO-II can be applied to automate reasoning in prominent access control logics.<|reference_end|> | arxiv | @article{benzmueller2009automating,
title={Automating Access Control Logics in Simple Type Theory with LEO-II},
author={Christoph Benzmueller},
journal={SEKI Report SR-2008-01 (ISSN 1437-4447), Saarland University, 2008},
year={2009},
doi={10.1007/978-3-642-01244-0_34},
number={SEKI Report SR-2008-01},
archivePrefix={arXiv},
eprint={0901.3574},
primaryClass={cs.LO cs.AI}
} | benzmueller2009automating |
arxiv-6143 | 0901.3580 | Feedback Capacity of the Gaussian Interference Channel to Within 17075 Bits: the Symmetric Case | <|reference_start|>Feedback Capacity of the Gaussian Interference Channel to Within 17075 Bits: the Symmetric Case: We characterize the symmetric capacity to within 1.7075 bits/s/Hz for the two-user Gaussian interference channel with feedback. The result makes use of a deterministic model to provide insights into the Gaussian channel. We derive a new outer bound to show that a proposed achievable scheme can achieve the symmetric capacity to within 1.7075 bits for all channel parameters. From this result, we show that feedback provides unbounded gain, i.e., the gain becomes arbitrarily large for certain channel parameters. It is a surprising result because feedback has been so far known to provide only power gain (bounded gain) in the context of multiple access channels and broadcast channels.<|reference_end|> | arxiv | @article{suh2009feedback,
title={Feedback Capacity of the Gaussian Interference Channel to Within 1.7075
Bits: the Symmetric Case},
author={Changho Suh, David Tse},
journal={arXiv preprint arXiv:0901.3580},
year={2009},
archivePrefix={arXiv},
eprint={0901.3580},
primaryClass={cs.IT math.IT}
} | suh2009feedback |
arxiv-6144 | 0901.3585 | Resource Adaptive Agents in Interactive Theorem Proving | <|reference_start|>Resource Adaptive Agents in Interactive Theorem Proving: We introduce a resource adaptive agent mechanism which supports the user in interactive theorem proving. The mechanism uses a two layered architecture of agent societies to suggest appropriate commands together with possible command argument instantiations. Experiments with this approach show that its effectiveness can be further improved by introducing a resource concept. In this paper we provide an abstract view on the overall mechanism, motivate the necessity of an appropriate resource concept and discuss its realization within the agent architecture.<|reference_end|> | arxiv | @article{benzmueller2009resource,
title={Resource Adaptive Agents in Interactive Theorem Proving},
author={Christoph Benzmueller, Volker Sorge},
journal={SEKI Report (ISSN 1437-4447), Saarland University, 1999},
year={2009},
number={SR-99-02},
archivePrefix={arXiv},
eprint={0901.3585},
primaryClass={cs.LO cs.AI}
} | benzmueller2009resource |
arxiv-6145 | 0901.3590 | On the Dual Formulation of Boosting Algorithms | <|reference_start|>On the Dual Formulation of Boosting Algorithms: We study boosting algorithms from a new perspective. We show that the Lagrange dual problems of AdaBoost, LogitBoost and soft-margin LPBoost with generalized hinge loss are all entropy maximization problems. By looking at the dual problems of these boosting algorithms, we show that the success of boosting algorithms can be understood in terms of maintaining a better margin distribution by maximizing margins and at the same time controlling the margin variance.We also theoretically prove that, approximately, AdaBoost maximizes the average margin, instead of the minimum margin. The duality formulation also enables us to develop column generation based optimization algorithms, which are totally corrective. We show that they exhibit almost identical classification results to that of standard stage-wise additive boosting algorithms but with much faster convergence rates. Therefore fewer weak classifiers are needed to build the ensemble using our proposed optimization technique.<|reference_end|> | arxiv | @article{shen2009on,
title={On the Dual Formulation of Boosting Algorithms},
author={Chunhua Shen and Hanxi Li},
journal={arXiv preprint arXiv:0901.3590},
year={2009},
doi={10.1109/TPAMI.2010.47},
archivePrefix={arXiv},
eprint={0901.3590},
primaryClass={cs.LG cs.CV}
} | shen2009on |
arxiv-6146 | 0901.3596 | Joint source-channel with side information coding error exponents | <|reference_start|>Joint source-channel with side information coding error exponents: In this paper, we study the upper and the lower bounds on the joint source-channel coding error exponent with decoder side-information. The results in the paper are non-trivial extensions of the Csiszar's classical paper [5]. Unlike the joint source-channel coding result in [5], it is not obvious whether the lower bound and the upper bound are equivalent even if the channel coding error exponent is known. For a class of channels, including the symmetric channels, we apply a game-theoretic result to establish the existence of a saddle point and hence prove that the lower and upper bounds are the same if the channel coding error exponent is known. More interestingly, we show that encoder side-information does not increase the error exponents in this case.<|reference_end|> | arxiv | @article{chang2009joint,
title={Joint source-channel with side information coding error exponents},
author={Cheng Chang},
journal={arXiv preprint arXiv:0901.3596},
year={2009},
archivePrefix={arXiv},
eprint={0901.3596},
primaryClass={cs.IT math.IT}
} | chang2009joint |
arxiv-6147 | 0901.3608 | A remark on higher order RUE-resolution with EXTRUE | <|reference_start|>A remark on higher order RUE-resolution with EXTRUE: We show that a prominent counterexample for the completeness of first order RUE-resolution does not apply to the higher order RUE-resolution approach EXTRUE.<|reference_end|> | arxiv | @article{benzmueller2009a,
title={A remark on higher order RUE-resolution with EXTRUE},
author={Christoph Benzmueller},
journal={SEKI Report (ISSN 1437-4447), Saarland University, 1999},
year={2009},
number={SR-02-05},
archivePrefix={arXiv},
eprint={0901.3608},
primaryClass={cs.AI cs.LO}
} | benzmueller2009a |
arxiv-6148 | 0901.3611 | Safe Carrier Sensing Range in CSMA Network under Physical Interference Model | <|reference_start|>Safe Carrier Sensing Range in CSMA Network under Physical Interference Model: In this paper, we study the setting of carrier-sensing range in 802.11 networks under the (cumulative) physical interference model. Specifically, we identify a carrier-sensing range that will prevent collisions in 802.11 networks due to carrier-sensing failure under the physical interference model. We find that the carrier-sensing range required under the physical interference model must be larger than that required under the protocol (pairwise) interference model by a multiplicative factor. For example, if the SINR requirement is 10dB and the path-loss exponent is 4, the factor is 1.4. Furthermore, given a fixed pathloss exponent of 4, the factor increases as the SINR requirement increases. However, the limit of the factor is 1.84 as the SINR requirement goes to infinity.<|reference_end|> | arxiv | @article{fu2009safe,
title={Safe Carrier Sensing Range in CSMA Network under Physical Interference
Model},
author={Liqun Fu, Soung Chang Liew, Jianwei Huang},
journal={arXiv preprint arXiv:0901.3611},
year={2009},
archivePrefix={arXiv},
eprint={0901.3611},
primaryClass={cs.NI}
} | fu2009safe |
arxiv-6149 | 0901.3615 | A Constructive Generalization of Nash Equilibrium | <|reference_start|>A Constructive Generalization of Nash Equilibrium: In a society of multiple individuals, if everybody is only interested in maximizing his own payoff, will there exist any equilibrium for the society? John Nash proved more than 50 years ago that an equilibrium always exists such that nobody would benefit from unilaterally changing his strategy. Nash Equilibrium is a central concept in game theory, which offers the mathematical foundation for social science and economy. However, the original definition is declarative without including a solution to find them. It has been found later that it is computationally difficult to find a Nash equilibrium. Furthermore, a Nash equilibrium may be unstable, sensitive to the smallest variation of payoff functions. Making the situation worse, a society with selfish individuals can have an enormous number of equilibria, making it extremely hard to find out the global optimal one. This paper offers a constructive generalization of Nash equilibrium to cover the case when the selfishness of individuals are reduced to lower levels in a controllable way. It shows that the society has one and only one equilibrium when the selfishness is reduced to a certain level. When every individual follows the iterative, soft-decision optimization process presented in this paper, the society converges to the unique equilibrium with an exponential rate under any initial conditions. When it is a consensus equilibrium at the same time, it must be the global optimum. The study of this paper suggests that, to build a good, stable society (including the financial market) for the benefit everyone in it, the pursuing of maximal payoff by each individual should be controlled at some level either by voluntary good citizenship or some proper regulations.<|reference_end|> | arxiv | @article{huang2009a,
title={A Constructive Generalization of Nash Equilibrium},
author={Xiaofei Huang},
journal={arXiv preprint arXiv:0901.3615},
year={2009},
archivePrefix={arXiv},
eprint={0901.3615},
primaryClass={cs.GT cs.CC}
} | huang2009a |
arxiv-6150 | 0901.3619 | Mechanized semantics for the Clight subset of the C language | <|reference_start|>Mechanized semantics for the Clight subset of the C language: This article presents the formal semantics of a large subset of the C language called Clight. Clight includes pointer arithmetic, "struct" and "union" types, C loops and structured "switch" statements. Clight is the source language of the CompCert verified compiler. The formal semantics of Clight is a big-step operational semantics that observes both terminating and diverging executions and produces traces of input/output events. The formal semantics of Clight is mechanized using the Coq proof assistant. In addition to the semantics of Clight, this article describes its integration in the CompCert verified compiler and several ways by which the semantics was validated.<|reference_end|> | arxiv | @article{blazy2009mechanized,
title={Mechanized semantics for the Clight subset of the C language},
author={Sandrine Blazy (CEDRIC, INRIA Rocquencourt), Xavier Leroy (INRIA
Rocquencourt)},
journal={Journal of Automated Reasoning 43, 3 (2009) 263-288},
year={2009},
doi={10.1007/s10817-009-9148-3},
archivePrefix={arXiv},
eprint={0901.3619},
primaryClass={cs.PL}
} | blazy2009mechanized |
arxiv-6151 | 0901.3620 | Enterprise model verification and validation: an approach | <|reference_start|>Enterprise model verification and validation: an approach: This article presents a Verification and Validation approach which is used here in order to complete the classical tool box the industrial user may utilize in Enterprise Modeling and Integration domain. This approach, which has been defined independently from any application domain is based on several formal concepts and tools presented in this paper. These concepts are property concepts, property reference matrix, properties graphs, enterprise modeling domain ontology, conceptual graphs and formal reasoning mechanisms.<|reference_end|> | arxiv | @article{chapurlat2009enterprise,
title={Enterprise model verification and validation: an approach},
author={Vincent Chapurlat (LGI2P), Bernard Kamsu Foguem (LGI2P), Franc{c}ois
Prunet (LIRMM)},
journal={Annual Review in Control 27, 2 (2003) 185-197},
year={2009},
archivePrefix={arXiv},
eprint={0901.3620},
primaryClass={cs.SE}
} | chapurlat2009enterprise |
arxiv-6152 | 0901.3630 | Decay of Correlations in Low Density Parity Check Codes: Low Noise Regime | <|reference_start|>Decay of Correlations in Low Density Parity Check Codes: Low Noise Regime: Consider transmission over a binary additive white gaussian noise channel using a fixed low-density parity check code. We consider the posterior measure over the code bits and the corresponding correlation between two codebits, averaged over the noise realizations. We show that for low enough noise variance this average correlation decays exponentially fast with the graph distance between the code bits. One consequence of this result is that for low enough noise variance the GEXIT functions (further averaged over a standard code ensemble) of the belief propagation and optimal decoders are the same.<|reference_end|> | arxiv | @article{kudekar2009decay,
title={Decay of Correlations in Low Density Parity Check Codes: Low Noise
Regime},
author={Shrinivas Kudekar and Nicolas Macris},
journal={arXiv preprint arXiv:0901.3630},
year={2009},
archivePrefix={arXiv},
eprint={0901.3630},
primaryClass={cs.IT math.IT}
} | kudekar2009decay |
arxiv-6153 | 0901.3657 | Homotopy methods for multiplication modulo triangular sets | <|reference_start|>Homotopy methods for multiplication modulo triangular sets: We study the cost of multiplication modulo triangular families of polynomials. Following previous work by Li, Moreno Maza and Schost, we propose an algorithm that relies on homotopy and fast evaluation-interpolation techniques. We obtain a quasi-linear time complexity for substantial families of examples, for which no such result was known before. Applications are given to notably addition of algebraic numbers in small characteristic.<|reference_end|> | arxiv | @article{bostan2009homotopy,
title={Homotopy methods for multiplication modulo triangular sets},
author={Alin Bostan, Muhammad Chowdhury, Joris van der Hoeven, Eric Schost},
journal={arXiv preprint arXiv:0901.3657},
year={2009},
archivePrefix={arXiv},
eprint={0901.3657},
primaryClass={cs.SC cs.DS}
} | bostan2009homotopy |
arxiv-6154 | 0901.3692 | The Complexity of Computing Minimal Unidirectional Covering Sets | <|reference_start|>The Complexity of Computing Minimal Unidirectional Covering Sets: Given a binary dominance relation on a set of alternatives, a common thread in the social sciences is to identify subsets of alternatives that satisfy certain notions of stability. Examples can be found in areas as diverse as voting theory, game theory, and argumentation theory. Brandt and Fischer [BF08] proved that it is NP-hard to decide whether an alternative is contained in some inclusion-minimal upward or downward covering set. For both problems, we raise this lower bound to the Theta_{2}^{p} level of the polynomial hierarchy and provide a Sigma_{2}^{p} upper bound. Relatedly, we show that a variety of other natural problems regarding minimal or minimum-size covering sets are hard or complete for either of NP, coNP, and Theta_{2}^{p}. An important consequence of our results is that neither minimal upward nor minimal downward covering sets (even when guaranteed to exist) can be computed in polynomial time unless P=NP. This sharply contrasts with Brandt and Fischer's result that minimal bidirectional covering sets (i.e., sets that are both minimal upward and minimal downward covering sets) are polynomial-time computable.<|reference_end|> | arxiv | @article{baumeister2009the,
title={The Complexity of Computing Minimal Unidirectional Covering Sets},
author={Dorothea Baumeister, Felix Brandt, Felix Fischer, Jan Hoffmann, Joerg
Rothe},
journal={Theory of Computing Systems 53(3), 2012},
year={2009},
doi={10.1007/s00224-012-9437-9},
archivePrefix={arXiv},
eprint={0901.3692},
primaryClass={cs.CC cs.GT}
} | baumeister2009the |
arxiv-6155 | 0901.3699 | Randomly colouring simple hypergraphs | <|reference_start|>Randomly colouring simple hypergraphs: We study the problem of constructing a (near) random proper $q$-colouring of a simple k-uniform hypergraph with n vertices and maximum degree \Delta. (Proper in that no edge is mono-coloured and simple in that two edges have maximum intersection of size one). We give conditions on q,\Delta so that if these conditions are satisfied, Glauber dynamics will converge in O(n\log n) time from a random (improper) start. The interesting thing here is that for k\geq 3 we can take q=o(\D).<|reference_end|> | arxiv | @article{frieze2009randomly,
title={Randomly colouring simple hypergraphs},
author={Alan Frieze, Pall Melsted},
journal={arXiv preprint arXiv:0901.3699},
year={2009},
archivePrefix={arXiv},
eprint={0901.3699},
primaryClass={cs.DM cs.DS}
} | frieze2009randomly |
arxiv-6156 | 0901.3706 | Symmetric tensor decomposition | <|reference_start|>Symmetric tensor decomposition: We present an algorithm for decomposing a symmetric tensor, of dimension n and order d as a sum of rank-1 symmetric tensors, extending the algorithm of Sylvester devised in 1886 for binary forms. We recall the correspondence between the decomposition of a homogeneous polynomial in n variables of total degree d as a sum of powers of linear forms (Waring's problem), incidence properties on secant varieties of the Veronese Variety and the representation of linear forms as a linear combination of evaluations at distinct points. Then we reformulate Sylvester's approach from the dual point of view. Exploiting this duality, we propose necessary and sufficient conditions for the existence of such a decomposition of a given rank, using the properties of Hankel (and quasi-Hankel) matrices, derived from multivariate polynomials and normal form computations. This leads to the resolution of polynomial equations of small degree in non-generic cases. We propose a new algorithm for symmetric tensor decomposition, based on this characterization and on linear algebra computations with these Hankel matrices. The impact of this contribution is two-fold. First it permits an efficient computation of the decomposition of any tensor of sub-generic rank, as opposed to widely used iterative algorithms with unproved global convergence (e.g. Alternate Least Squares or gradient descents). Second, it gives tools for understanding uniqueness conditions, and for detecting the rank.<|reference_end|> | arxiv | @article{brachat2009symmetric,
title={Symmetric tensor decomposition},
author={Jerome Brachat (INRIA Sophia Antipolis), Pierre Comon (I3S), Bernard
Mourrain (INRIA Sophia Antipolis), Elias Tsigaridas (INRIA Sophia Antipolis)},
journal={Linear Algebra and Applications 433, 11-12 (2010) 851?1872},
year={2009},
archivePrefix={arXiv},
eprint={0901.3706},
primaryClass={cs.SC math.AG}
} | brachat2009symmetric |
arxiv-6157 | 0901.3751 | Sorting improves word-aligned bitmap indexes | <|reference_start|>Sorting improves word-aligned bitmap indexes: Bitmap indexes must be compressed to reduce input/output costs and minimize CPU usage. To accelerate logical operations (AND, OR, XOR) over bitmaps, we use techniques based on run-length encoding (RLE), such as Word-Aligned Hybrid (WAH) compression. These techniques are sensitive to the order of the rows: a simple lexicographical sort can divide the index size by 9 and make indexes several times faster. We investigate row-reordering heuristics. Simply permuting the columns of the table can increase the sorting efficiency by 40%. Secondary contributions include efficient algorithms to construct and aggregate bitmaps. The effect of word length is also reviewed by constructing 16-bit, 32-bit and 64-bit indexes. Using 64-bit CPUs, we find that 64-bit indexes are slightly faster than 32-bit indexes despite being nearly twice as large.<|reference_end|> | arxiv | @article{lemire2009sorting,
title={Sorting improves word-aligned bitmap indexes},
author={Daniel Lemire, Owen Kaser, Kamel Aouiche},
journal={Data & Knowledge Engineering, Volume 69, Issue 1, 2010, Pages 3-28},
year={2009},
doi={10.1016/j.datak.2009.08.006},
archivePrefix={arXiv},
eprint={0901.3751},
primaryClass={cs.DB}
} | lemire2009sorting |
arxiv-6158 | 0901.3754 | Bid Optimization in Broad-Match Ad auctions | <|reference_start|>Bid Optimization in Broad-Match Ad auctions: Ad auctions in sponsored search support ``broad match'' that allows an advertiser to target a large number of queries while bidding only on a limited number. While giving more expressiveness to advertisers, this feature makes it challenging to optimize bids to maximize their returns: choosing to bid on a query as a broad match because it provides high profit results in one bidding for related queries which may yield low or even negative profits. We abstract and study the complexity of the {\em bid optimization problem} which is to determine an advertiser's bids on a subset of keywords (possibly using broad match) so that her profit is maximized. In the query language model when the advertiser is allowed to bid on all queries as broad match, we present an linear programming (LP)-based polynomial-time algorithm that gets the optimal profit. In the model in which an advertiser can only bid on keywords, ie., a subset of keywords as an exact or broad match, we show that this problem is not approximable within any reasonable approximation factor unless P=NP. To deal with this hardness result, we present a constant-factor approximation when the optimal profit significantly exceeds the cost. This algorithm is based on rounding a natural LP formulation of the problem. Finally, we study a budgeted variant of the problem, and show that in the query language model, one can find two budget constrained ad campaigns in polynomial time that implement the optimal bidding strategy. Our results are the first to address bid optimization under the broad match feature which is common in ad auctions.<|reference_end|> | arxiv | @article{even-dar2009bid,
title={Bid Optimization in Broad-Match Ad auctions},
author={Eyal Even-dar, Yishay Mansour, Vahab Mirrokni, S. Muthukrishnan, Uri
Nadav},
journal={arXiv preprint arXiv:0901.3754},
year={2009},
archivePrefix={arXiv},
eprint={0901.3754},
primaryClass={cs.GT cs.DS}
} | even-dar2009bid |
arxiv-6159 | 0901.3761 | Closures in Formal Languages and Kuratowski's Theorem | <|reference_start|>Closures in Formal Languages and Kuratowski's Theorem: A famous theorem of Kuratowski states that in a topological space, at most 14 distinct sets can be produced by repeatedly applying the operations of closure and complement to a given set. We re-examine this theorem in the setting of formal languages, where closure is either Kleene closure or positive closure. We classify languages according to the structure of the algebra they generate under iterations of complement and closure. We show that there are precisely 9 such algebras in the case of positive closure, and 12 in the case of Kleene closure.<|reference_end|> | arxiv | @article{brzozowski2009closures,
title={Closures in Formal Languages and Kuratowski's Theorem},
author={J. Brzozowski, E. Grant, J. Shallit},
journal={arXiv preprint arXiv:0901.3761},
year={2009},
archivePrefix={arXiv},
eprint={0901.3761},
primaryClass={cs.CC cs.FL}
} | brzozowski2009closures |
arxiv-6160 | 0901.3762 | Enhancing the capabilities of LIGO time-frequency plane searches through clustering | <|reference_start|>Enhancing the capabilities of LIGO time-frequency plane searches through clustering: One class of gravitational wave signals LIGO is searching for consists of short duration bursts of unknown waveforms. Potential sources include core collapse supernovae, gamma ray burst progenitors, and mergers of binary black holes or neutron stars. We present a density-based clustering algorithm to improve the performance of time-frequency searches for such gravitational-wave bursts when they are extended in time and/or frequency, and not sufficiently well known to permit matched filtering. We have implemented this algorithm as an extension to the QPipeline, a gravitational-wave data analysis pipeline for the detection of bursts, which currently determines the statistical significance of events based solely on the peak significance observed in minimum uncertainty regions of the time-frequency plane. Density based clustering improves the performance of such a search by considering the aggregate significance of arbitrarily shaped regions in the time-frequency plane and rejecting the isolated minimum uncertainty features expected from the background detector noise. In this paper, we present test results for simulated signals and demonstrate that density based clustering improves the performance of the QPipeline for signals extended in time and/or frequency.<|reference_end|> | arxiv | @article{khan2009enhancing,
title={Enhancing the capabilities of LIGO time-frequency plane searches through
clustering},
author={Rubab Khan, Shourov Chatterji},
journal={Class.Quant.Grav.26:155009,2009},
year={2009},
doi={10.1088/0264-9381/26/15/155009},
archivePrefix={arXiv},
eprint={0901.3762},
primaryClass={gr-qc astro-ph.IM cs.CV physics.data-an}
} | khan2009enhancing |
arxiv-6161 | 0901.3763 | Closures in Formal Languages: Concatenation, Separation, and Algorithms | <|reference_start|>Closures in Formal Languages: Concatenation, Separation, and Algorithms: We continue our study of open and closed languages. We investigate how the properties of being open and closed are preserved under concatenation. We investigate analogues, in formal languages, of the separation axioms in topological spaces; one of our main results is that there is a clopen partition separating two words if and only if the words commute. We show that we can decide in quadratic time if the language specified by a DFA is closed, but if the language is specified by an NFA, the problem is PSPACE-complete.<|reference_end|> | arxiv | @article{brzozowski2009closures,
title={Closures in Formal Languages: Concatenation, Separation, and Algorithms},
author={J. Brzozowski, E. Grant, J. Shallit},
journal={arXiv preprint arXiv:0901.3763},
year={2009},
archivePrefix={arXiv},
eprint={0901.3763},
primaryClass={cs.CC cs.FL}
} | brzozowski2009closures |
arxiv-6162 | 0901.3769 | Deceptiveness and Neutrality - the ND family of fitness landscapes | <|reference_start|>Deceptiveness and Neutrality - the ND family of fitness landscapes: When a considerable number of mutations have no effects on fitness values, the fitness landscape is said neutral. In order to study the interplay between neutrality, which exists in many real-world applications, and performances of metaheuristics, it is useful to design landscapes which make it possible to tune precisely neutral degree distribution. Even though many neutral landscape models have already been designed, none of them are general enough to create landscapes with specific neutral degree distributions. We propose three steps to design such landscapes: first using an algorithm we construct a landscape whose distribution roughly fits the target one, then we use a simulated annealing heuristic to bring closer the two distributions and finally we affect fitness values to each neutral network. Then using this new family of fitness landscapes we are able to highlight the interplay between deceptiveness and neutrality.<|reference_end|> | arxiv | @article{beaudoin2009deceptiveness,
title={Deceptiveness and Neutrality - the ND family of fitness landscapes},
author={William Beaudoin (I3S), S'ebastien Verel (I3S), Philippe Collard
(I3S), Cathy Escazut (I3S)},
journal={arXiv preprint arXiv:0901.3769},
year={2009},
doi={10.1145/1143997.1144091},
archivePrefix={arXiv},
eprint={0901.3769},
primaryClass={cs.AI}
} | beaudoin2009deceptiveness |
arxiv-6163 | 0901.3795 | On a random number of disorders | <|reference_start|>On a random number of disorders: We register a random sequence which has the following properties: it has three segments being the homogeneous Markov processes. Each segment has his own one step transition probability law and the length of the segment is unknown and random. It means that at two random successive moments (they can be equal also and equal zero too) the source of observations is changed and the first observation in new segment is chosen according to new transition probability starting from the last state of the previous segment. In effect the number of homogeneous segments is random. The transition probabilities of each process are known and a priori distribution of the disorder moments is given. The former research on such problem has been devoted to various questions concerning the distribution changes. The random number of distributional segments creates new problems in solutions with relation to analysis of the model with deterministic number of segments. Two cases are presented in details. In the first one the objectives is to stop on or between the disorder moments while in the second one our objective is to find the strategy which immediately detects the distribution changes. Both problems are reformulated to optimal stopping of the observed sequences. The detailed analysis of the problem is presented to show the form of optimal decision function.<|reference_end|> | arxiv | @article{szajowski2009on,
title={On a random number of disorders},
author={Krzysztof Szajowski},
journal={Probability and Mathematical Statistics, vol. 31, Fasc. 1 (2011),
pp. 17-45},
year={2009},
archivePrefix={arXiv},
eprint={0901.3795},
primaryClass={math.PR cs.IT math.IT math.ST stat.TH}
} | szajowski2009on |
arxiv-6164 | 0901.3809 | Interference channel capacity region for randomized fixed-composition codes | <|reference_start|>Interference channel capacity region for randomized fixed-composition codes: The randomized fixe-composition with optimal decoding error exponents are studied \cite{Raul_ISIT,Raul_journal} for the finite alphabet interference channel (IFC) with two transmitter-receiver pairs. In this paper we investigate the capacity region of the randomized fixed-composition coding scheme. A complete characterization of the capacity region of the said coding scheme is given. The inner bound is derived by showing the existence of a positive error exponent within the capacity region. A simple universal decoding rule is given. The tight outer bound is derived by extending a technique first developed in \cite{Dueck_RC} for single input output channels to interference channels. It is shown that even with a sophisticated time-sharing scheme among randomized fixed-composition codes, the capacity region of the randomized fixed-composition coding is not bigger than the known Han-Kobayashi \cite{Han_Kobayashi} capacity region. This suggests that the average behavior of random codes are not sufficient to get new capacity regions.<|reference_end|> | arxiv | @article{chang2009interference,
title={Interference channel capacity region for randomized fixed-composition
codes},
author={Cheng Chang},
journal={arXiv preprint arXiv:0901.3809},
year={2009},
doi={10.1109/ALLERTON.2009.5394808},
archivePrefix={arXiv},
eprint={0901.3809},
primaryClass={cs.IT math.IT}
} | chang2009interference |
arxiv-6165 | 0901.3820 | On the rate distortion function of Bernoulli Gaussian sequences | <|reference_start|>On the rate distortion function of Bernoulli Gaussian sequences: In this paper, we study the rate distortion function of the i.i.d sequence of multiplications of a Bernoulli $p$ random variable and a gaussian random variable $\sim N(0,1)$. We use a new technique in the derivation of the lower bound in which we establish the duality between channel coding and lossy source coding in the strong sense. We improve the lower bound on the rate distortion function over the best known lower bound by $p\log_2\frac{1}{p}$ if distortion $D$ is small. This has some interesting implications on sparse signals where $p$ is small since the known gap between the lower and upper bound is $H(p)$. This improvement in the lower bound shows that the lower and upper bounds are almost identical for sparse signals with small distortion because $\lim\limits_{p\to 0}\frac{p\log_2\frac{1}{p}}{H(p)}=1$.<|reference_end|> | arxiv | @article{chang2009on,
title={On the rate distortion function of Bernoulli Gaussian sequences},
author={Cheng Chang},
journal={arXiv preprint arXiv:0901.3820},
year={2009},
doi={10.1109/ISIT.2010.5513289},
archivePrefix={arXiv},
eprint={0901.3820},
primaryClass={cs.IT math.IT}
} | chang2009on |
arxiv-6166 | 0901.3828 | On Recognizable Languages of Infinite Pictures | <|reference_start|>On Recognizable Languages of Infinite Pictures: In a recent paper, Altenbernd, Thomas and W\"ohrle have considered acceptance of languages of infinite two-dimensional words (infinite pictures) by finite tiling systems, with the usual acceptance conditions, such as the B\"uchi and Muller ones, firstly used for infinite words. The authors asked for comparing the tiling system acceptance with an acceptance of pictures row by row using an automaton model over ordinal words of length $\omega^2$. We give in this paper a solution to this problem, showing that all languages of infinite pictures which are accepted row by row by B\"uchi or Choueka automata reading words of length $\omega^2$ are B\"uchi recognized by a finite tiling system, but the converse is not true. We give also the answer to two other questions which were raised by Altenbernd, Thomas and W\"ohrle, showing that it is undecidable whether a B\"uchi recognizable language of infinite pictures is E-recognizable (respectively, A-recognizable).<|reference_end|> | arxiv | @article{finkel2009on,
title={On Recognizable Languages of Infinite Pictures},
author={Olivier Finkel (ELM)},
journal={International Journal of Foundations of Computer Science 15, 6
(2004) 823-840},
year={2009},
archivePrefix={arXiv},
eprint={0901.3828},
primaryClass={cs.LO cs.CC math.LO}
} | finkel2009on |
arxiv-6167 | 0901.3839 | Remembering what we like: Toward an agent-based model of Web traffic | <|reference_start|>Remembering what we like: Toward an agent-based model of Web traffic: Analysis of aggregate Web traffic has shown that PageRank is a poor model of how people actually navigate the Web. Using the empirical traffic patterns generated by a thousand users over the course of two months, we characterize the properties of Web traffic that cannot be reproduced by Markovian models, in which destinations are independent of past decisions. In particular, we show that the diversity of sites visited by individual users is smaller and more broadly distributed than predicted by the PageRank model; that link traffic is more broadly distributed than predicted; and that the time between consecutive visits to the same site by a user is less broadly distributed than predicted. To account for these discrepancies, we introduce a more realistic navigation model in which agents maintain individual lists of bookmarks that are used as teleportation targets. The model can also account for branching, a traffic property caused by browser features such as tabs and the back button. The model reproduces aggregate traffic patterns such as site popularity, while also generating more accurate predictions of diversity, link traffic, and return time distributions. This model for the first time allows us to capture the extreme heterogeneity of aggregate traffic measurements while explaining the more narrowly focused browsing patterns of individual users.<|reference_end|> | arxiv | @article{goncalves2009remembering,
title={Remembering what we like: Toward an agent-based model of Web traffic},
author={Bruno Goncalves, Mark R. Meiss, Jose J. Ramasco, Alessandro Flammini,
Filippo Menczer},
journal={WSDM 2009 Late Breaking Results},
year={2009},
archivePrefix={arXiv},
eprint={0901.3839},
primaryClass={cs.HC cs.CY cs.IR cs.MA physics.soc-ph}
} | goncalves2009remembering |
arxiv-6168 | 0901.3843 | Fast algorithms for differential equations in positive characteristic | <|reference_start|>Fast algorithms for differential equations in positive characteristic: We address complexity issues for linear differential equations in characteristic $p>0$: resolution and computation of the $p$-curvature. For these tasks, our main focus is on algorithms whose complexity behaves well with respect to $p$. We prove bounds linear in $p$ on the degree of polynomial solutions and propose algorithms for testing the existence of polynomial solutions in sublinear time $\tilde{O}(p^{1/2})$, and for determining a whole basis of the solution space in quasi-linear time $\tilde{O}(p)$; the $\tilde{O}$ notation indicates that we hide logarithmic factors. We show that for equations of arbitrary order, the $p$-curvature can be computed in subquadratic time $\tilde{O}(p^{1.79})$, and that this can be improved to $O(\log(p))$ for first order equations and to $\tilde{O}(p)$ for classes of second order equations.<|reference_end|> | arxiv | @article{bostan2009fast,
title={Fast algorithms for differential equations in positive characteristic},
author={Alin Bostan (INRIA Rocquencourt), 'Eric Schost},
journal={arXiv preprint arXiv:0901.3843},
year={2009},
archivePrefix={arXiv},
eprint={0901.3843},
primaryClass={cs.SC}
} | bostan2009fast |
arxiv-6169 | 0901.3880 | Capacity Scaling of Single-source Wireless Networks: Effect of Multiple Antennas | <|reference_start|>Capacity Scaling of Single-source Wireless Networks: Effect of Multiple Antennas: We consider a wireless network in which a single source node located at the center of a unit area having $m$ antennas transmits messages to $n$ randomly located destination nodes in the same area having a single antenna each. To achieve the sum-rate proportional to $m$ by transmit beamforming, channel state information (CSI) is essentially required at the transmitter (CSIT), which is hard to obtain in practice because of the time-varying nature of the channels and feedback overhead. We show that, even without CSIT, the achievable sum-rate scales as $\Theta(m\log m)$ if a cooperation between receivers is allowed. By deriving the cut-set upper bound, we also show that $\Theta(m\log m)$ scaling is optimal. Specifically, for $n=\omega(m^2)$, the simple TDMA-based quantize-and-forward is enough to achieve the capacity scaling. For $n=\omega(m)$ and $n=\operatorname{O}(m^2)$, on the other hand, we apply the hierarchical cooperation to achieve the capacity scaling.<|reference_end|> | arxiv | @article{jeon2009capacity,
title={Capacity Scaling of Single-source Wireless Networks: Effect of Multiple
Antennas},
author={Sang-Woon Jeon, Sae-Young Chung},
journal={IEEE Transactions on Information Theory, vol. 58, no. 11, pp.
6870-6878, Nov. 2012},
year={2009},
archivePrefix={arXiv},
eprint={0901.3880},
primaryClass={cs.IT math.IT}
} | jeon2009capacity |
arxiv-6170 | 0901.3882 | Graph-based local elimination algorithms in discrete optimization | <|reference_start|>Graph-based local elimination algorithms in discrete optimization: The aim of this paper is to provide a review of structural decomposition methods in discrete optimization and to give a unified framework in the form of local elimination algorithms (LEA). This paper is organized as follows. Local elimination algorithms for discrete optimization (DO) problems (DOPs) with constraints are considered; a classification of dynamic programming computational procedures is given. We introduce Elimination Game and Elimination tree. Application of bucket elimination algorithm from constraint satisfaction (CS) to solving DOPs is done. We consider different local elimination schemes and related notions. Clustering that merges several variables into single meta-variable defines a promising approach to solve DOPs. This allows to create a quotient (condensed) graph and apply a local block elimination algorithm. In order to describe a block elimination process, we introduce Block Elimination Game. We discuss the connection of aforementioned local elimination algorithmic schemes and a way of transforming the directed acyclic graph (DAG) of computational LEA procedure to the tree decomposition.<|reference_end|> | arxiv | @article{shcherbina2009graph-based,
title={Graph-based local elimination algorithms in discrete optimization},
author={Oleg Shcherbina},
journal={arXiv preprint arXiv:0901.3882},
year={2009},
archivePrefix={arXiv},
eprint={0901.3882},
primaryClass={cs.DM}
} | shcherbina2009graph-based |
arxiv-6171 | 0901.3902 | iKlax: a New Musical Audio Format for Active Listening | <|reference_start|>iKlax: a New Musical Audio Format for Active Listening: In this paper, we are presenting a new model for interactive music. Unlike most interactive systems, our model is based on file organization, but does not require digital audio treatments. This model includes a definition of a constraints system and its solver. The products of this project are intended for the general public, inexperienced users, as well as professional musicians, and will be distributed commercially. We are here presenting three products of this project. The difficulty of this project is to design a technology and software products for interactive music which must be easy to use by the general public and by professional composers.<|reference_end|> | arxiv | @article{gallot2009iklax:,
title={iKlax: a New Musical Audio Format for Active Listening},
author={Fabien Gallot, Owen Lagadec, Myriam Desainte-Catherine (LaBRI),
Sylvain Marchand (LaBRI)},
journal={International Computer Music Conference (ICMC), Belfast : Irlande
(2008)},
year={2009},
archivePrefix={arXiv},
eprint={0901.3902},
primaryClass={cs.SD}
} | gallot2009iklax: |
arxiv-6172 | 0901.3906 | A Program Transformation for Continuation Call-Based Tabled Execution | <|reference_start|>A Program Transformation for Continuation Call-Based Tabled Execution: The advantages of tabled evaluation regarding program termination and reduction of complexity are well known --as are the significant implementation, portability, and maintenance efforts that some proposals (especially those based on suspension) require. This implementation effort is reduced by program transformation-based continuation call techniques, at some efficiency cost. However, the traditional formulation of this proposal by Ramesh and Cheng limits the interleaving of tabled and non-tabled predicates and thus cannot be used as-is for arbitrary programs. In this paper we present a complete translation for the continuation call technique which, using the runtime support needed for the traditional proposal, solves these problems and makes it possible to execute arbitrary tabled programs. We present performance results which show that CCall offers a useful tradeoff that can be competitive with state-of-the-art implementations.<|reference_end|> | arxiv | @article{de guzman2009a,
title={A Program Transformation for Continuation Call-Based Tabled Execution},
author={Pablo Chico de Guzman, Manuel Carro, Manuel V. Hermenegildo},
journal={arXiv preprint arXiv:0901.3906},
year={2009},
archivePrefix={arXiv},
eprint={0901.3906},
primaryClass={cs.PL}
} | de guzman2009a |
arxiv-6173 | 0901.3910 | Simulation of mitochondrial metabolism using multi-agents system | <|reference_start|>Simulation of mitochondrial metabolism using multi-agents system: Metabolic pathways describe chains of enzymatic reactions. Their modelling is a key point to understand living systems. An enzymatic reaction is an interaction between one or several metabolites (substrates) and an enzyme (simple protein or enzymatic complex build of several subunits). In our Mitochondria in Silico Project, MitoScop, we study the metabolism of the mitochondria, an intra-cellular organelle. Many ordinary differential equation models are available in the literature. They well fit experimental results on flux values inside the metabolic pathways, but many parameters are di$\pm$cult to transcribe with such models: localization of enzymes, rules about the reactions scheduler, etc Moreover, a model of a significant part of mitochondrial metabolism could become very complex and contain more than 50 equations. In this context, the multi-agents systems appear as an alternative to model the metabolic pathways. Firstly, we have looked after membrane design. The mitochondria is a particular case because the inner mitochondrial space, ie matricial space, is delimited by two membranes: the inner and the outer one. In addition to matricial enzymes, other enzymes are located inside the membranes or in the inter-membrane space. Analysis of mitochondrial metabolism must take into account this kind of architecture.<|reference_end|> | arxiv | @article{lales2009simulation,
title={Simulation of mitochondrial metabolism using multi-agents system},
author={Charles Lales (LaBRI), N. Parisey, Jean-Pierre Mazat, Marie
Beurton-Aimar (LaBRI)},
journal={AAMAS'05 (MAS*BIOMED'05), Utrecht : Pays-Bas (2005)},
year={2009},
archivePrefix={arXiv},
eprint={0901.3910},
primaryClass={q-bio.SC cs.MA q-bio.QM}
} | lales2009simulation |
arxiv-6174 | 0901.3923 | Model-Based Event Detection in Wireless Sensor Networks | <|reference_start|>Model-Based Event Detection in Wireless Sensor Networks: In this paper we present an application of techniques from statistical signal processing to the problem of event detection in wireless sensor networks used for environmental monitoring. The proposed approach uses the well-established Principal Component Analysis (PCA) technique to build a compact model of the observed phenomena that is able to capture daily and seasonal trends in the collected measurements. We then use the divergence between actual measurements and model predictions to detect the existence of discrete events within the collected data streams. Our preliminary results show that this event detection mechanism is sensitive enough to detect the onset of rain events using the temperature modality of a wireless sensor network.<|reference_end|> | arxiv | @article{gupchup2009model-based,
title={Model-Based Event Detection in Wireless Sensor Networks},
author={Jayant Gupchup, Andreas Terzis, Randal Burns and Alex Szalay},
journal={Workshop for Data Sharing and Interoperability on the World Wide
Web (DSI 2007). April 2007, In Proceedings},
year={2009},
archivePrefix={arXiv},
eprint={0901.3923},
primaryClass={cs.NI cs.CV}
} | gupchup2009model-based |
arxiv-6175 | 0901.3924 | Area-Universal Rectangular Layouts | <|reference_start|>Area-Universal Rectangular Layouts: A rectangular layout is a partition of a rectangle into a finite set of interior-disjoint rectangles. Rectangular layouts appear in various applications: as rectangular cartograms in cartography, as floorplans in building architecture and VLSI design, and as graph drawings. Often areas are associated with the rectangles of a rectangular layout and it might hence be desirable if one rectangular layout can represent several area assignments. A layout is area-universal if any assignment of areas to rectangles can be realized by a combinatorially equivalent rectangular layout. We identify a simple necessary and sufficient condition for a rectangular layout to be area-universal: a rectangular layout is area-universal if and only if it is one-sided. More generally, given any rectangular layout L and any assignment of areas to its regions, we show that there can be at most one layout (up to horizontal and vertical scaling) which is combinatorially equivalent to L and achieves a given area assignment. We also investigate similar questions for perimeter assignments. The adjacency requirements for the rectangles of a rectangular layout can be specified in various ways, most commonly via the dual graph of the layout. We show how to find an area-universal layout for a given set of adjacency requirements whenever such a layout exists.<|reference_end|> | arxiv | @article{eppstein2009area-universal,
title={Area-Universal Rectangular Layouts},
author={David Eppstein, Elena Mumford, Bettina Speckmann, and Kevin Verbeek},
journal={arXiv preprint arXiv:0901.3924},
year={2009},
archivePrefix={arXiv},
eprint={0901.3924},
primaryClass={cs.CG}
} | eppstein2009area-universal |
arxiv-6176 | 0901.3929 | Revisiting the Age of Enlightenment from a Collective Decision Making Systems Perspective | <|reference_start|>Revisiting the Age of Enlightenment from a Collective Decision Making Systems Perspective: The ideals of the eighteenth century's Age of Enlightenment are the foundation of modern democracies. The era was characterized by thinkers who promoted progressive social reforms that opposed the long-established aristocracies and monarchies of the time. Prominent examples of such reforms include the establishment of inalienable human rights, self-governing republics, and market capitalism. Twenty-first century democratic nations can benefit from revisiting the systems developed during the Enlightenment and reframing them within the techno-social context of the Information Age. This article explores the application of social algorithms that make use of Thomas Paine's (English: 1737--1809) representatives, Adam Smith's (Scottish: 1723--1790) self-interested actors, and Marquis de Condorcet's (French: 1743--1794) optimal decision making groups. It is posited that technology-enabled social algorithms can better realize the ideals articulated during the Enlightenment.<|reference_end|> | arxiv | @article{rodriguez2009revisiting,
title={Revisiting the Age of Enlightenment from a Collective Decision Making
Systems Perspective},
author={Marko A. Rodriguez and Jennifer H. Watkins},
journal={First Monday, volume 14, number 8, ISSN:1396-0466, LA-UR-09-00324,
University of Illinois at Chicago Library, August 2009},
year={2009},
number={LA-UR-09-00324},
archivePrefix={arXiv},
eprint={0901.3929},
primaryClass={cs.CY cs.DL}
} | rodriguez2009revisiting |
arxiv-6177 | 0901.3939 | Effectively Searching Maps in Web Documents | <|reference_start|>Effectively Searching Maps in Web Documents: Maps are an important source of information in archaeology and other sciences. Users want to search for historical maps to determine recorded history of the political geography of regions at different eras, to find out where exactly archaeological artifacts were discovered, etc. Currently, they have to use a generic search engine and add the term map along with other keywords to search for maps. This crude method will generate a significant number of false positives that the user will need to cull through to get the desired results. To reduce their manual effort, we propose an automatic map identification, indexing, and retrieval system that enables users to search and retrieve maps appearing in a large corpus of digital documents using simple keyword queries. We identify features that can help in distinguishing maps from other figures in digital documents and show how a Support-Vector-Machine-based classifier can be used to identify maps. We propose map-level-metadata e.g., captions, references to the maps in text, etc. and document-level metadata, e.g., title, abstract, citations, how recent the publication is, etc. and show how they can be automatically extracted and indexed. Our novel ranking algorithm weights different metadata fields differently and also uses the document-level metadata to help rank retrieved maps. Empirical evaluations show which features should be selected and which metadata fields should be weighted more. We also demonstrate improved retrieval results in comparison to adaptations of existing methods for map retrieval. Our map search engine has been deployed in an online map-search system that is part of the Blind-Review digital library system.<|reference_end|> | arxiv | @article{tan2009effectively,
title={Effectively Searching Maps in Web Documents},
author={Qingzhao Tan, Prasenjit Mitra, C. Lee Giles},
journal={ECIR2009},
year={2009},
archivePrefix={arXiv},
eprint={0901.3939},
primaryClass={cs.DL cs.IR}
} | tan2009effectively |
arxiv-6178 | 0901.3948 | OFDM Channel Estimation Based on Adaptive Thresholding for Sparse Signal Detection | <|reference_start|>OFDM Channel Estimation Based on Adaptive Thresholding for Sparse Signal Detection: Wireless OFDM channels can be approximated by a time varying filter with sparse time domain taps. Recent achievements in sparse signal processing such as compressed sensing have facilitated the use of sparsity in estimation, which improves the performance significantly. The problem of these sparse-based methods is the need for a stable transformation matrix which is not fulfilled in the current transmission setups. To assist the analog filtering at the receiver, the transmitter leaves some of the subcarriers at both edges of the bandwidth unused which results in an ill-conditioned DFT submatrix. To overcome this difficulty we propose Adaptive Thresholding for Sparse Signal Detection (ATSSD). Simulation results confirm that the proposed method works well in time-invariant and specially time-varying channels where other methods may not work as well.<|reference_end|> | arxiv | @article{soltanolkotabi2009ofdm,
title={OFDM Channel Estimation Based on Adaptive Thresholding for Sparse Signal
Detection},
author={Mahdi Soltanolkotabi, Arash Amini and Farokh Marvasti},
journal={arXiv preprint arXiv:0901.3948},
year={2009},
archivePrefix={arXiv},
eprint={0901.3948},
primaryClass={cs.IT math.IT}
} | soltanolkotabi2009ofdm |
arxiv-6179 | 0901.3950 | Efficient Sampling of Sparse Wideband Analog Signals | <|reference_start|>Efficient Sampling of Sparse Wideband Analog Signals: Periodic nonuniform sampling is a known method to sample spectrally sparse signals below the Nyquist rate. This strategy relies on the implicit assumption that the individual samplers are exposed to the entire frequency range. This assumption becomes impractical for wideband sparse signals. The current paper proposes an alternative sampling stage that does not require a full-band front end. Instead, signals are captured with an analog front end that consists of a bank of multipliers and lowpass filters whose cutoff is much lower than the Nyquist rate. The problem of recovering the original signal from the low-rate samples can be studied within the framework of compressive sampling. An appropriate parameter selection ensures that the samples uniquely determine the analog input. Moreover, the analog input can be stably reconstructed with digital algorithms. Numerical experiments support the theoretical analysis.<|reference_end|> | arxiv | @article{mishali2009efficient,
title={Efficient Sampling of Sparse Wideband Analog Signals},
author={Moshe Mishali, Yonina C. Eldar and Joel A. Tropp},
journal={Proc. of IEEEI, 25th convention, pp. 290-294, Dec. 2008},
year={2009},
number={CCIT Report #705, Oct. 2008, EE Dept., Technion Israel},
archivePrefix={arXiv},
eprint={0901.3950},
primaryClass={cs.IT math.IT}
} | mishali2009efficient |
arxiv-6180 | 0901.3984 | Stop the Chase | <|reference_start|>Stop the Chase: The chase procedure, an algorithm proposed 25+ years ago to fix constraint violations in database instances, has been successfully applied in a variety of contexts, such as query optimization, data exchange, and data integration. Its practicability, however, is limited by the fact that - for an arbitrary set of constraints - it might not terminate; even worse, chase termination is an undecidable problem in general. In response, the database community has proposed sufficient restrictions on top of the constraints that guarantee chase termination on any database instance. In this paper, we propose a novel sufficient termination condition, called inductive restriction, which strictly generalizes previous conditions, but can be checked as efficiently. Furthermore, we motivate and study the problem of data-dependent chase termination and, as a key result, present sufficient termination conditions w.r.t. fixed instances. They are strictly more general than inductive restriction and might guarantee termination although the chase does not terminate in the general case.<|reference_end|> | arxiv | @article{meier2009stop,
title={Stop the Chase},
author={Michael Meier and Michael Schmidt and Georg Lausen},
journal={arXiv preprint arXiv:0901.3984},
year={2009},
archivePrefix={arXiv},
eprint={0901.3984},
primaryClass={cs.DB}
} | meier2009stop |
arxiv-6181 | 0901.3987 | Improved Delay Estimates for a Queueing Model for Random Linear Coding for Unicast | <|reference_start|>Improved Delay Estimates for a Queueing Model for Random Linear Coding for Unicast: Consider a lossy communication channel for unicast with zero-delay feedback. For this communication scenario, a simple retransmission scheme is optimum with respect to delay. An alternative approach is to use random linear coding in automatic repeat-request (ARQ) mode. We extend the work of Shrader and Ephremides, by deriving an expression for the delay of random linear coding over field of infinite size. Simulation results for various field sizes are also provided.<|reference_end|> | arxiv | @article{ravanbakhsh2009improved,
title={Improved Delay Estimates for a Queueing Model for Random Linear Coding
for Unicast},
author={Mohammad Ravanbakhsh, Angela I. Barbero Diez, and Oyvind Ytrehus},
journal={arXiv preprint arXiv:0901.3987},
year={2009},
doi={10.1109/ISIT.2009.5205892},
archivePrefix={arXiv},
eprint={0901.3987},
primaryClass={cs.IT math.IT}
} | ravanbakhsh2009improved |
arxiv-6182 | 0901.3990 | Du corpus au dictionnaire | <|reference_start|>Du corpus au dictionnaire: In this article, we propose an automatic process to build multi-lingual lexico-semantic resources. The goal of these resources is to browse semantically textual information contained in texts of different languages. This method uses a mathematical model called Atlas s\'emantiques in order to represent the different senses of each word. It uses the linguistic relations between words to create graphs that are projected into a semantic space. These projections constitute semantic maps that denote the sense trends of each given word. This model is fed with syntactic relations between words extracted from a corpus. Therefore, the lexico-semantic resource produced describes all the words and all their meanings observed in the corpus. The sense trends are expressed by syntactic contexts, typical for a given meaning. The link between each sense trend and the utterances used to build the sense trend are also stored in an index. Thus all the instances of a word in a particular sense are linked and can be browsed easily. And by using several corpora of different languages, several resources are built that correspond with each other through languages. It makes it possible to browse information through languages thanks to syntactic contexts translations (even if some of them are partial).<|reference_end|> | arxiv | @article{jacquemin2009du,
title={Du corpus au dictionnaire},
author={Bernard Jacquemin (LIMSI), Sabine Ploux (L2C2)},
journal={Cahiers de Linguistique. Revue de sociolinguistique et de
sociologie de la langue fran\c{c}aise 33, 1 (2008) 63-84},
year={2009},
archivePrefix={arXiv},
eprint={0901.3990},
primaryClass={cs.CL cs.IR}
} | jacquemin2009du |
arxiv-6183 | 0901.4002 | Max Edge Coloring of Trees | <|reference_start|>Max Edge Coloring of Trees: We study the weighted generalization of the edge coloring problem where the weight of each color class (matching) equals to the weight of its heaviest edge and the goal is to minimize the sum of the colors' weights. We present a 3/2-approximation algorithm for trees.<|reference_end|> | arxiv | @article{lucarelli2009max,
title={Max Edge Coloring of Trees},
author={Giorgio Lucarelli, Ioannis Milis, Vangelis Th. Paschos},
journal={arXiv preprint arXiv:0901.4002},
year={2009},
archivePrefix={arXiv},
eprint={0901.4002},
primaryClass={cs.DS}
} | lucarelli2009max |
arxiv-6184 | 0901.4004 | Mining for adverse drug events with formal concept analysis | <|reference_start|>Mining for adverse drug events with formal concept analysis: The pharmacovigilance databases consist of several case reports involving drugs and adverse events (AEs). Some methods are applied consistently to highlight all signals, i.e. all statistically significant associations between a drug and an AE. These methods are appropriate for verification of more complex relationships involving one or several drug(s) and AE(s) (e.g; syndromes or interactions) but do not address the identification of them. We propose a method for the extraction of these relationships based on Formal Concept Analysis (FCA) associated with disproportionality measures. This method identifies all sets of drugs and AEs which are potential signals, syndromes or interactions. Compared to a previous experience of disproportionality analysis without FCA, the addition of FCA was more efficient for identifying false positives related to concomitant drugs.<|reference_end|> | arxiv | @article{estacio-moreno2009mining,
title={Mining for adverse drug events with formal concept analysis},
author={Alexander Estacio-Moreno, Yannick Toussaint, C'edric Bousquet},
journal={Studies in health technology and informatics 136 (2008) 803-8},
year={2009},
archivePrefix={arXiv},
eprint={0901.4004},
primaryClass={cs.AI}
} | estacio-moreno2009mining |
arxiv-6185 | 0901.4012 | Cross-situational and supervised learning in the emergence of communication | <|reference_start|>Cross-situational and supervised learning in the emergence of communication: Scenarios for the emergence or bootstrap of a lexicon involve the repeated interaction between at least two agents who must reach a consensus on how to name N objects using H words. Here we consider minimal models of two types of learning algorithms: cross-situational learning, in which the individuals determine the meaning of a word by looking for something in common across all observed uses of that word, and supervised operant conditioning learning, in which there is strong feedback between individuals about the intended meaning of the words. Despite the stark differences between these learning schemes, we show that they yield the same communication accuracy in the realistic limits of large N and H, which coincides with the result of the classical occupancy problem of randomly assigning N objects to H words.<|reference_end|> | arxiv | @article{fontanari2009cross-situational,
title={Cross-situational and supervised learning in the emergence of
communication},
author={Jos'e F. Fontanari and Angelo Cangelosi},
journal={Interaction Studies: Social Behaviour and Communication in
Biological and Artificial Systems, 12, 119-133 (2011)},
year={2009},
doi={10.1075/is.12.1.05fon},
archivePrefix={arXiv},
eprint={0901.4012},
primaryClass={cs.LG}
} | fontanari2009cross-situational |
arxiv-6186 | 0901.4016 | A Proposal for Proquints: Identifiers that are Readable, Spellable, and Pronounceable | <|reference_start|>A Proposal for Proquints: Identifiers that are Readable, Spellable, and Pronounceable: Identifiers (IDs) are pervasive throughout our modern life. We suggest that these IDs would be easier to manage and remember if they were easily readable, spellable, and pronounceable. As a solution to this problem we propose using PRO-nouncable QUINT-uplets of alternating unambiguous consonants and vowels: _proquints_.<|reference_end|> | arxiv | @article{wilkerson2009a,
title={A Proposal for Proquints: Identifiers that are Readable, Spellable, and
Pronounceable},
author={Daniel Shawcross Wilkerson},
journal={arXiv preprint arXiv:0901.4016},
year={2009},
archivePrefix={arXiv},
eprint={0901.4016},
primaryClass={cs.SE cs.CY cs.HC}
} | wilkerson2009a |
arxiv-6187 | 0901.4023 | Using Kolmogorov Complexity for Understanding Some Limitations on Steganography | <|reference_start|>Using Kolmogorov Complexity for Understanding Some Limitations on Steganography: Recently perfectly secure steganographic systems have been described for a wide class of sources of covertexts. The speed of transmission of secret information for these stegosystems is proportional to the length of the covertext. In this work we show that there are sources of covertexts for which such stegosystems do not exist. The key observation is that if the set of possible covertexts has a maximal Kolmogorov complexity, then a high-speed perfect stegosystem has to have complexity of the same order.<|reference_end|> | arxiv | @article{ryabko2009using,
title={Using Kolmogorov Complexity for Understanding Some Limitations on
Steganography},
author={Boris Ryabko, Daniil Ryabko},
journal={arXiv preprint arXiv:0901.4023},
year={2009},
archivePrefix={arXiv},
eprint={0901.4023},
primaryClass={cs.CC cs.CR}
} | ryabko2009using |
arxiv-6188 | 0901.4032 | On the upstream mobility scheme for two-phase flow in porous media | <|reference_start|>On the upstream mobility scheme for two-phase flow in porous media: When neglecting capillarity, two-phase incompressible flow in porous media is modelled as a scalar nonlinear hyperbolic conservation law. A change in the rock type results in a change of the flux function. Discretizing in one-dimensional with a finite volume method, we investigate two numerical fluxes, an extension of the Godunov flux and the upstream mobility flux, the latter being widely used in hydrogeology and petroleum engineering. Then, in the case of a changing rock type, one can give examples when the upstream mobility flux does not give the right answer.<|reference_end|> | arxiv | @article{mishra2009on,
title={On the upstream mobility scheme for two-phase flow in porous media},
author={Siddhartha Mishra (University of Oslo), J'er^ome Jaffr'e (INRIA
Rocquencourt)},
journal={Computational Geosciences 14 (2010) 105-124},
year={2009},
doi={10.1007/s10596-009-9135-0},
number={RR-6789},
archivePrefix={arXiv},
eprint={0901.4032},
primaryClass={cs.NA math.AP math.NA physics.class-ph}
} | mishra2009on |
arxiv-6189 | 0901.4068 | On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels | <|reference_start|>On the Sum Capacity of A Class of Cyclically Symmetric Deterministic Interference Channels: Certain deterministic interference channels have been shown to accurately model Gaussian interference channels in the asymptotic low-noise regime. Motivated by this correspondence, we investigate a K user-pair, cyclically symmetric, deterministic interference channel in which each receiver experiences interference only from its neighboring transmitters (Wyner model). We establish the sum capacity for a large set of channel parameters, thus generalizing previous results for the 2-pair case.<|reference_end|> | arxiv | @article{bandemer2009on,
title={On the Sum Capacity of A Class of Cyclically Symmetric Deterministic
Interference Channels},
author={Bernd Bandemer, Gonzalo Vazquez-Vilar, Abbas El Gamal},
journal={arXiv preprint arXiv:0901.4068},
year={2009},
doi={10.1109/ISIT.2009.5205951},
archivePrefix={arXiv},
eprint={0901.4068},
primaryClass={cs.IT math.IT}
} | bandemer2009on |
arxiv-6190 | 0901.4080 | A Framework to Handle Linear Temporal Properties in (\omega-)Regular Model Checking | <|reference_start|>A Framework to Handle Linear Temporal Properties in (\omega-)Regular Model Checking: Since the topic emerged several years ago, work on regular model checking has mostly been devoted to the verification of state reachability and safety properties. Though it was known that linear temporal properties could also be checked within this framework, little has been done about working out the corresponding details. This paper addresses this issue in the context of regular model checking based on the encoding of states by finite or infinite words. It works out the exact constructions to be used in both cases, and proposes a partial solution to the problem resulting from the fact that infinite computations of unbounded configurations might never contain the same configuration twice, thus making cycle detection problematic.<|reference_end|> | arxiv | @article{bouajjani2009a,
title={A Framework to Handle Linear Temporal Properties in (\omega-)Regular
Model Checking},
author={Ahmed Bouajjani, Axel Legay, Pierre Wolper},
journal={arXiv preprint arXiv:0901.4080},
year={2009},
archivePrefix={arXiv},
eprint={0901.4080},
primaryClass={cs.LO}
} | bouajjani2009a |
arxiv-6191 | 0901.4081 | Adaptive FPGA NoC-based Architecture for Multispectral Image Correlation | <|reference_start|>Adaptive FPGA NoC-based Architecture for Multispectral Image Correlation: An adaptive FPGA architecture based on the NoC (Network-on-Chip) approach is used for the multispectral image correlation. This architecture must contain several distance algorithms depending on the characteristics of spectral images and the precision of the authentication. The analysis of distance algorithms is required which bases on the algorithmic complexity, result precision, execution time and the adaptability of the implementation. This paper presents the comparison of these distance computation algorithms on one spectral database. The result of a RGB algorithm implementation was discussed.<|reference_end|> | arxiv | @article{zhang2009adaptive,
title={Adaptive FPGA NoC-based Architecture for Multispectral Image Correlation},
author={Linlin Zhang (LAHC), Anne Claire Legrand (LAHC), Virginie Fresse
(LAHC), Viktor Fischer (LAHC)},
journal={arXiv preprint arXiv:0901.4081},
year={2009},
archivePrefix={arXiv},
eprint={0901.4081},
primaryClass={cs.AR}
} | zhang2009adaptive |
arxiv-6192 | 0901.4129 | Quasi-Cyclic LDPC Codes: Influence of Proto- and Tanner-Graph Structure on Minimum Hamming Distance Upper Bounds | <|reference_start|>Quasi-Cyclic LDPC Codes: Influence of Proto- and Tanner-Graph Structure on Minimum Hamming Distance Upper Bounds: Quasi-cyclic (QC) low-density parity-check (LDPC) codes are an important instance of proto-graph-based LDPC codes. In this paper we present upper bounds on the minimum Hamming distance of QC LDPC codes and study how these upper bounds depend on graph structure parameters (like variable degrees, check node degrees, girth) of the Tanner graph and of the underlying proto-graph. Moreover, for several classes of proto-graphs we present explicit QC LDPC code constructions that achieve (or come close to) the respective minimum Hamming distance upper bounds. Because of the tight algebraic connection between QC codes and convolutional codes, we can state similar results for the free Hamming distance of convolutional codes. In fact, some QC code statements are established by first proving the corresponding convolutional code statements and then using a result by Tanner that says that the minimum Hamming distance of a QC code is upper bounded by the free Hamming distance of the convolutional code that is obtained by "unwrapping" the QC code.<|reference_end|> | arxiv | @article{smarandache2009quasi-cyclic,
title={Quasi-Cyclic LDPC Codes: Influence of Proto- and Tanner-Graph Structure
on Minimum Hamming Distance Upper Bounds},
author={Roxana Smarandache, Pascal O. Vontobel},
journal={arXiv preprint arXiv:0901.4129},
year={2009},
doi={10.1109/TIT.2011.2173244},
archivePrefix={arXiv},
eprint={0901.4129},
primaryClass={cs.IT cs.DM math.IT}
} | smarandache2009quasi-cyclic |
arxiv-6193 | 0901.4134 | Distributed Lossy Averaging | <|reference_start|>Distributed Lossy Averaging: An information theoretic formulation of the distributed averaging problem previously studied in computer science and control is presented. We assume a network with m nodes each observing a WGN source. The nodes communicate and perform local processing with the goal of computing the average of the sources to within a prescribed mean squared error distortion. The network rate distortion function R^*(D) for a 2-node network with correlated Gaussian sources is established. A general cutset lower bound on R^*(D) is established and shown to be achievable to within a factor of 2 via a centralized protocol over a star network. A lower bound on the network rate distortion function for distributed weighted-sum protocols, which is larger in order than the cutset bound by a factor of log m is established. An upper bound on the network rate distortion function for gossip-base weighted-sum protocols, which is only log log m larger in order than the lower bound for a complete graph network, is established. The results suggest that using distributed protocols results in a factor of log m increase in order relative to centralized protocols.<|reference_end|> | arxiv | @article{su2009distributed,
title={Distributed Lossy Averaging},
author={Han-I Su, Abbas El Gamal},
journal={arXiv preprint arXiv:0901.4134},
year={2009},
archivePrefix={arXiv},
eprint={0901.4134},
primaryClass={cs.IT math.IT}
} | su2009distributed |
arxiv-6194 | 0901.4137 | Practical Robust Estimators for the Imprecise Dirichlet Model | <|reference_start|>Practical Robust Estimators for the Imprecise Dirichlet Model: Walley's Imprecise Dirichlet Model (IDM) for categorical i.i.d. data extends the classical Dirichlet model to a set of priors. It overcomes several fundamental problems which other approaches to uncertainty suffer from. Yet, to be useful in practice, one needs efficient ways for computing the imprecise=robust sets or intervals. The main objective of this work is to derive exact, conservative, and approximate, robust and credible interval estimates under the IDM for a large class of statistical estimators, including the entropy and mutual information.<|reference_end|> | arxiv | @article{hutter2009practical,
title={Practical Robust Estimators for the Imprecise Dirichlet Model},
author={Marcus Hutter},
journal={International Journal of Approximate Reasoning, 50:2 (2009) pages
231-242},
year={2009},
archivePrefix={arXiv},
eprint={0901.4137},
primaryClass={math.ST cs.LG stat.ML stat.TH}
} | hutter2009practical |
arxiv-6195 | 0901.4147 | Determination of Minimal Sets of Control Places for Safe Petri Nets | <|reference_start|>Determination of Minimal Sets of Control Places for Safe Petri Nets: Our objective is to design a controlled system with a simple method for discrete event systems based on Petri nets. It is possible to construct the Petri net model of a system and the specification separately. By synchronous composition of both models, the desired functioning closed loop model is deduced. Often uncontrollable transitions lead to forbidden states. The problem of forbidden states is solved using linear constraints. A set of linear constraints allows forbidding the reachability of these states. Generally, the number of these so-called forbidden states and consequently the number of constraints are large and lead to a great number of control places. A systematic method to reduce the size and the number of constraints for safe Petri Nets is given. By using a method based on the Petri nets invariants, maximal permissive controllers are determined. The size of the controller is close to the size of the specified model, and it can be implemented on a PLC in a structural way.<|reference_end|> | arxiv | @article{dideban2009determination,
title={Determination of Minimal Sets of Control Places for Safe Petri Nets},
author={Abbas Dideban (GIPSA-lab), Hassane. Alla (GIPSA-lab)},
journal={arXiv preprint arXiv:0901.4147},
year={2009},
archivePrefix={arXiv},
eprint={0901.4147},
primaryClass={cs.IT math.IT}
} | dideban2009determination |
arxiv-6196 | 0901.4180 | Google distance between words | <|reference_start|>Google distance between words: Cilibrasi and Vitanyi have demonstrated that it is possible to extract the meaning of words from the world-wide web. To achieve this, they rely on the number of webpages that are found through a Google search containing a given word and they associate the page count to the probability that the word appears on a webpage. Thus, conditional probabilities allow them to correlate one word with another word's meaning. Furthermore, they have developed a similarity distance function that gauges how closely related a pair of words is. We present a specific counterexample to the triangle inequality for this similarity distance function.<|reference_end|> | arxiv | @article{kjos-hanssen2009google,
title={Google distance between words},
author={Bj{o}rn Kjos-Hanssen and Alberto J. Evangelista},
journal={arXiv preprint arXiv:0901.4180},
year={2009},
archivePrefix={arXiv},
eprint={0901.4180},
primaryClass={cs.CL}
} | kjos-hanssen2009google |
arxiv-6197 | 0901.4192 | Fixing Convergence of Gaussian Belief Propagation | <|reference_start|>Fixing Convergence of Gaussian Belief Propagation: Gaussian belief propagation (GaBP) is an iterative message-passing algorithm for inference in Gaussian graphical models. It is known that when GaBP converges it converges to the correct MAP estimate of the Gaussian random vector and simple sufficient conditions for its convergence have been established. In this paper we develop a double-loop algorithm for forcing convergence of GaBP. Our method computes the correct MAP estimate even in cases where standard GaBP would not have converged. We further extend this construction to compute least-squares solutions of over-constrained linear systems. We believe that our construction has numerous applications, since the GaBP algorithm is linked to solution of linear systems of equations, which is a fundamental problem in computer science and engineering. As a case study, we discuss the linear detection problem. We show that using our new construction, we are able to force convergence of Montanari's linear detection algorithm, in cases where it would originally fail. As a consequence, we are able to increase significantly the number of users that can transmit concurrently.<|reference_end|> | arxiv | @article{johnson2009fixing,
title={Fixing Convergence of Gaussian Belief Propagation},
author={Jason K. Johnson, Danny Bickson and Danny Dolev},
journal={arXiv preprint arXiv:0901.4192},
year={2009},
doi={10.1109/ISIT.2009.5205777},
archivePrefix={arXiv},
eprint={0901.4192},
primaryClass={cs.IT cs.LG math.IT stat.CO}
} | johnson2009fixing |
arxiv-6198 | 0901.4201 | Peer to Peer Optimistic Collaborative Editing on XML-like trees | <|reference_start|>Peer to Peer Optimistic Collaborative Editing on XML-like trees: Collaborative editing consists in editing a common document shared by several independent sites. This may give rise to conficts when two different users perform simultaneous uncompatible operations. Centralized systems solve this problem by using locks that prevent some modifications to occur and leave the resolution of confict to users. On the contrary, peer to peer (P2P) editing doesn't allow locks and the optimistic approach uses a Integration Transformation IT that reconciliates the conficting operations and ensures convergence (all copies are identical on each site). Two properties TP1 and TP2, relating the set of allowed operations Op and the transformation IT, have been shown to ensure the correctness of the process. The choice of the set Op is crucial to define an integration operation that satisfies TP1 and TP2. Many existing algorithms don't satisfy these properties and are indeed incorrect i.e. convergence is not guaranteed. No algorithm enjoying both properties is known for strings and little work has been done for XML trees in a pure P2P framework (that doesn't use time-stamps for instance). We focus on editing unranked unordered labeled trees, so-called XML-like trees that are considered for instance in the Harmony pro ject. We show that no transformation satisfying TP1 and TP2 can exist for a first set of operations but we show that TP1 and TP2 hold for a richer set of operations. We show how to combine our approach with any convergent editing process on strings (not necessarily based on integration transformation) to get a convergent process.<|reference_end|> | arxiv | @article{lugiez2009peer,
title={Peer to Peer Optimistic Collaborative Editing on XML-like trees},
author={Denis Lugiez (LIF), St'ephane Martin (LIF)},
journal={arXiv preprint arXiv:0901.4201},
year={2009},
archivePrefix={arXiv},
eprint={0901.4201},
primaryClass={cs.DS}
} | lugiez2009peer |
arxiv-6199 | 0901.4205 | On the small weight codewords of the functional codes C_2(Q), Q a non-singular quadric | <|reference_start|>On the small weight codewords of the functional codes C_2(Q), Q a non-singular quadric: We study the small weight codewords of the functional code C_2(Q), with Q a non-singular quadric of PG(N,q). We prove that the small weight codewords correspond to the intersections of Q with the singular quadrics of PG(N,q) consisting of two hyperplanes. We also calculate the number of codewords having these small weights.<|reference_end|> | arxiv | @article{edoukou2009on,
title={On the small weight codewords of the functional codes C_2(Q), Q a
non-singular quadric},
author={Fr'ed'eric Edoukou (IML), Anja Hallez, Franc{c}ois Rodier (IML),
Leo Storme},
journal={arXiv preprint arXiv:0901.4205},
year={2009},
archivePrefix={arXiv},
eprint={0901.4205},
primaryClass={math.AG cs.IT math.IT}
} | edoukou2009on |
arxiv-6200 | 0901.4224 | Geospatial semantics: beyond ontologies, towards an enactive approach | <|reference_start|>Geospatial semantics: beyond ontologies, towards an enactive approach: Current approaches to semantics in the geospatial domain are mainly based on ontologies, but ontologies, since continue to build entirely on the symbolic methodology, suffers from the classical problems, e.g. the symbol grounding problem, affecting representational theories. We claim for an enactive approach to semantics, where meaning is considered to be an emergent feature arising context-dependently in action. Since representational theories are unable to deal with context, a new formalism is required toward a contextual theory of concepts. SCOP is considered a promising formalism in this sense and is briefly described.<|reference_end|> | arxiv | @article{di donato2009geospatial,
title={Geospatial semantics: beyond ontologies, towards an enactive approach},
author={Pasquale Di Donato},
journal={arXiv preprint arXiv:0901.4224},
year={2009},
archivePrefix={arXiv},
eprint={0901.4224},
primaryClass={cs.AI cs.DB}
} | di donato2009geospatial |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.