corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-2901
0802.4282
Distributed Opportunistic Scheduling For Ad-Hoc Communications Under Noisy Channel Estimation
<|reference_start|>Distributed Opportunistic Scheduling For Ad-Hoc Communications Under Noisy Channel Estimation: Distributed opportunistic scheduling is studied for wireless ad-hoc networks, where many links contend for one channel using random access. In such networks, distributed opportunistic scheduling (DOS) involves a process of joint channel probing and distributed scheduling. It has been shown that under perfect channel estimation, the optimal DOS for maximizing the network throughput is a pure threshold policy. In this paper, this formalism is generalized to explore DOS under noisy channel estimation, where the transmission rate needs to be backed off from the estimated rate to reduce the outage. It is shown that the optimal scheduling policy remains to be threshold-based, and that the rate threshold turns out to be a function of the variance of the estimation error and be a functional of the backoff rate function. Since the optimal backoff rate is intractable, a suboptimal linear backoff scheme that backs off the estimated signal-to-noise ratio (SNR) and hence the rate is proposed. The corresponding optimal backoff ratio and rate threshold can be obtained via an iterative algorithm. Finally, simulation results are provided to illustrate the tradeoff caused by increasing training time to improve channel estimation at the cost of probing efficiency.<|reference_end|>
arxiv
@article{zheng2008distributed, title={Distributed Opportunistic Scheduling For Ad-Hoc Communications Under Noisy Channel Estimation}, author={Dong Zheng, Man-On Pun, Weiyan Ge, Junshan Zhang and H. Vincent Poor}, journal={arXiv preprint arXiv:0802.4282}, year={2008}, doi={10.1109/ICC.2008.698}, archivePrefix={arXiv}, eprint={0802.4282}, primaryClass={cs.IT math.IT} }
zheng2008distributed
arxiv-2902
0802.4284
Distributed Opportunistic Scheduling for MIMO Ad-Hoc Networks
<|reference_start|>Distributed Opportunistic Scheduling for MIMO Ad-Hoc Networks: Distributed opportunistic scheduling (DOS) protocols are proposed for multiple-input multiple-output (MIMO) ad-hoc networks with contention-based medium access. The proposed scheduling protocols distinguish themselves from other existing works by their explicit design for system throughput improvement through exploiting spatial multiplexing and diversity in a {\em distributed} manner. As a result, multiple links can be scheduled to simultaneously transmit over the spatial channels formed by transmit/receiver antennas. Taking into account the tradeoff between feedback requirements and system throughput, we propose and compare protocols with different levels of feedback information. Furthermore, in contrast to the conventional random access protocols that ignore the physical channel conditions of contending links, the proposed protocols implement a pure threshold policy derived from optimal stopping theory, i.e. only links with threshold-exceeding channel conditions are allowed for data transmission. Simulation results confirm that the proposed protocols can achieve impressive throughput performance by exploiting spatial multiplexing and diversity.<|reference_end|>
arxiv
@article{pun2008distributed, title={Distributed Opportunistic Scheduling for MIMO Ad-Hoc Networks}, author={Man-On Pun, Weiyan Ge, Dong Zheng, Junshan Zhang and H. Vincent Poor}, journal={arXiv preprint arXiv:0802.4284}, year={2008}, doi={10.1109/ICC.2008.694}, archivePrefix={arXiv}, eprint={0802.4284}, primaryClass={cs.IT math.IT} }
pun2008distributed
arxiv-2903
0802.4291
Opportunistic Scheduling and Beamforming for MIMO-OFDMA Downlink Systems with Reduced Feedback
<|reference_start|>Opportunistic Scheduling and Beamforming for MIMO-OFDMA Downlink Systems with Reduced Feedback: Opportunistic scheduling and beamforming schemes with reduced feedback are proposed for MIMO-OFDMA downlink systems. Unlike the conventional beamforming schemes in which beamforming is implemented solely by the base station (BS) in a per-subcarrier fashion, the proposed schemes take advantages of a novel channel decomposition technique to perform beamforming jointly by the BS and the mobile terminal (MT). The resulting beamforming schemes allow the BS to employ only {\em one} beamforming matrix (BFM) to form beams for {\em all} subcarriers while each MT completes the beamforming task for each subcarrier locally. Consequently, for a MIMO-OFDMA system with $Q$ subcarriers, the proposed opportunistic scheduling and beamforming schemes require only one BFM index and $Q$ supportable throughputs to be returned from each MT to the BS, in contrast to $Q$ BFM indices and $Q$ supportable throughputs required by the conventional schemes. The advantage of the proposed schemes becomes more evident when a further feedback reduction is achieved by grouping adjacent subcarriers into exclusive clusters and returning only cluster information from each MT. Theoretical analysis and computer simulation confirm the effectiveness of the proposed reduced-feedback schemes.<|reference_end|>
arxiv
@article{pun2008opportunistic, title={Opportunistic Scheduling and Beamforming for MIMO-OFDMA Downlink Systems with Reduced Feedback}, author={Man-On Pun, Kyeong Jin Kim and H. Vincent Poor}, journal={arXiv preprint arXiv:0802.4291}, year={2008}, doi={10.1109/ICC.2008.135}, archivePrefix={arXiv}, eprint={0802.4291}, primaryClass={cs.IT math.IT} }
pun2008opportunistic
arxiv-2904
0802.4293
Reduced Incidence algebras description of cobweb posets and KoDAGs
<|reference_start|>Reduced Incidence algebras description of cobweb posets and KoDAGs: After identifying the reduced incidence algebra of an arbitrary cobweb poset the very first properties of these algebras are being disclosed.<|reference_end|>
arxiv
@article{krot-sieniawska2008reduced, title={Reduced Incidence algebras description of cobweb posets and KoDAGs}, author={Ewa Krot-Sieniawska}, journal={arXiv preprint arXiv:0802.4293}, year={2008}, archivePrefix={arXiv}, eprint={0802.4293}, primaryClass={math.CO cs.DM} }
krot-sieniawska2008reduced
arxiv-2905
0802.4299
SINR Analysis of Opportunistic MIMO-SDMA Downlink Systems with Linear Combining
<|reference_start|>SINR Analysis of Opportunistic MIMO-SDMA Downlink Systems with Linear Combining: Opportunistic scheduling (OS) schemes have been proposed previously by the authors for multiuser MIMO-SDMA downlink systems with linear combining. In particular, it has been demonstrated that significant performance improvement can be achieved by incorporating low-complexity linear combining techniques into the design of OS schemes for MIMO-SDMA. However, this previous analysis was performed based on the effective signal-to-interference ratio (SIR), assuming an interference-limited scenario, which is typically a valid assumption in SDMA-based systems. It was shown that the limiting distribution of the effective SIR is of the Frechet type. Surprisingly, the corresponding scaling laws were found to follow $\epsilon\log K$ with $0<\epsilon<1$, rather than the conventional $\log\log K$ form. Inspired by this difference between the scaling law forms, in this paper a systematic approach is developed to derive asymptotic throughput and scaling laws based on signal-to-interference-noise ratio (SINR) by utilizing extreme value theory. The convergence of the limiting distribution of the effective SINR to the Gumbel type is established. The resulting scaling law is found to be governed by the conventional $\log\log K$ form. These novel results are validated by simulation results. The comparison of SIR and SINR-based analysis suggests that the SIR-based analysis is more computationally efficient for SDMA-based systems and it captures the asymptotic system performance with higher fidelity.<|reference_end|>
arxiv
@article{pun2008sinr, title={SINR Analysis of Opportunistic MIMO-SDMA Downlink Systems with Linear Combining}, author={Man-On Pun, Visa Koivunen and H. Vincent Poor}, journal={arXiv preprint arXiv:0802.4299}, year={2008}, doi={10.1109/ICC.2008.699}, archivePrefix={arXiv}, eprint={0802.4299}, primaryClass={cs.IT math.IT} }
pun2008sinr
arxiv-2906
0802.4307
A O(n^8) X O(n^7) Linear Programming Model of the Quadratic Assignment Problem
<|reference_start|>A O(n^8) X O(n^7) Linear Programming Model of the Quadratic Assignment Problem: This paper has been withdrawn because Theorem 21 and Corollary 22 are in error; The modeling idea is OK, but it needs 9-dimensional variables instead of the 8-dimensional variables defined in notations 6.9. Examples of the correct model (with 9-index variables) are: (1) Diaby, M., "Linear Programming Formulation of the Set Partitioning Problem," International Journal of Operational Research 8:4 (August 2010) pp. 399-427; (2) Diaby, M., "Linear Programming Formulation of the Vertex Coloring Problem," International Journal of Mathematics in Operational Research 2:3 (May 2010) pp. 259-289; (3) Diaby, M., "The Traveling Salesman Problem: A Linear Programming Formulation," WSEAS Transactions on Mathematics, 6:6 (June 2007) pp. 745-754.<|reference_end|>
arxiv
@article{diaby2008a, title={A O(n^8) X O(n^7) Linear Programming Model of the Quadratic Assignment Problem}, author={Moustapha Diaby}, journal={arXiv preprint arXiv:0802.4307}, year={2008}, archivePrefix={arXiv}, eprint={0802.4307}, primaryClass={cs.DM cs.CC} }
diaby2008a
arxiv-2907
0802.4312
Curves That Must Be Retraced
<|reference_start|>Curves That Must Be Retraced: We exhibit a polynomial time computable plane curve GAMMA that has finite length, does not intersect itself, and is smooth except at one endpoint, but has the following property. For every computable parametrization f of GAMMA and every positive integer n, there is some positive-length subcurve of GAMMA that f retraces at least n times. In contrast, every computable curve of finite length that does not intersect itself has a constant-speed (hence non-retracing) parametrization that is computable relative to the halting problem.<|reference_end|>
arxiv
@article{gu2008curves, title={Curves That Must Be Retraced}, author={Xiaoyang Gu, Jack H. Lutz, Elvira Mayordomo}, journal={arXiv preprint arXiv:0802.4312}, year={2008}, archivePrefix={arXiv}, eprint={0802.4312}, primaryClass={cs.CC} }
gu2008curves
arxiv-2908
0802.4325
A Simple Yao-Yao-Based Spanner of Bounded Degree
<|reference_start|>A Simple Yao-Yao-Based Spanner of Bounded Degree: It is a standing open question to decide whether the Yao-Yao structure for unit disk graphs (UDGs) is a length spanner of not. This question is highly relevant to the topology control problem for wireless ad hoc networks. In this paper we make progress towards resolving this question by showing that the Yao-Yao structure is a length spanner for UDGs of bounded aspect ratio. We also propose a new local algorithm, called Yao-Sparse-Sink, based on the Yao-Sink method introduced by Li, Wan, Wang and Frieder, that computes a (1+e)-spanner of bounded degree for a given UDG and for given e > 0. The Yao-Sparse-Sink method enables an efficient local computation of sparse sink trees. Finally, we show that all these structures for UDGs -- Yao, Yao-Yao, Yao-Sink and Yao-Sparse-Sink -- have arbitrarily large weight.<|reference_end|>
arxiv
@article{damian2008a, title={A Simple Yao-Yao-Based Spanner of Bounded Degree}, author={Mirela Damian}, journal={arXiv preprint arXiv:0802.4325}, year={2008}, archivePrefix={arXiv}, eprint={0802.4325}, primaryClass={cs.CG cs.DS} }
damian2008a
arxiv-2909
0802.4326
The Generation of Textual Entailment with NLML in an Intelligent Dialogue system for Language Learning CSIEC
<|reference_start|>The Generation of Textual Entailment with NLML in an Intelligent Dialogue system for Language Learning CSIEC: This research report introduces the generation of textual entailment within the project CSIEC (Computer Simulation in Educational Communication), an interactive web-based human-computer dialogue system with natural language for English instruction. The generation of textual entailment (GTE) is critical to the further improvement of CSIEC project. Up to now we have found few literatures related with GTE. Simulating the process that a human being learns English as a foreign language we explore our naive approach to tackle the GTE problem and its algorithm within the framework of CSIEC, i.e. rule annotation in NLML, pattern recognition (matching), and entailment transformation. The time and space complexity of our algorithm is tested with some entailment examples. Further works include the rules annotation based on the English textbooks and a GUI interface for normal users to edit the entailment rules.<|reference_end|>
arxiv
@article{jia2008the, title={The Generation of Textual Entailment with NLML in an Intelligent Dialogue system for Language Learning CSIEC}, author={Jiyou Jia}, journal={arXiv preprint arXiv:0802.4326}, year={2008}, archivePrefix={arXiv}, eprint={0802.4326}, primaryClass={cs.CL cs.AI cs.CY} }
jia2008the
arxiv-2910
0802.4330
Eigenvalue Estimates and Mutual Information for the Linear Time-Varying Channel
<|reference_start|>Eigenvalue Estimates and Mutual Information for the Linear Time-Varying Channel: We consider linear time-varying channels with additive white Gaussian noise. For a large class of such channels we derive rigorous estimates of the eigenvalues of the correlation matrix of the effective channel in terms of the sampled time-varying transfer function and, thus, provide a theoretical justification for a relationship that has been frequently observed in the literature. We then use this eigenvalue estimate to derive an estimate of the mutual information of the channel. Our approach is constructive and is based on a careful balance of the trade-off between approximate operator diagonalization, signal dimension loss, and accuracy of eigenvalue estimates.<|reference_end|>
arxiv
@article{farrell2008eigenvalue, title={Eigenvalue Estimates and Mutual Information for the Linear Time-Varying Channel}, author={Brendan Farrell, Thomas Strohmer}, journal={arXiv preprint arXiv:0802.4330}, year={2008}, doi={10.1109/TIT.2011.2161919}, archivePrefix={arXiv}, eprint={0802.4330}, primaryClass={cs.IT math.IT} }
farrell2008eigenvalue
arxiv-2911
0802.4344
An Improved Scheme for Initial Ranging in OFDMA-based Networks
<|reference_start|>An Improved Scheme for Initial Ranging in OFDMA-based Networks: An efficient scheme for initial ranging has recently been proposed by X. Fu et al. in the context of orthogonal frequency-division multiple-access (OFDMA) networks based on the IEEE 802.16e-2005 standard. The proposed solution aims at estimating the power levels and timing offsets of the ranging subscriber stations (RSSs) without taking into account the effect of possible carrier frequency offsets (CFOs) between the received signals and the base station local reference. Motivated by the above problem, in the present work we design a novel ranging scheme for OFDMA in which the ranging signals are assumed to be misaligned both in time and frequency. Our goal is to estimate the timing errors and CFOs of each active RSS. Specifically, CFO estimation is accomplished by resorting to subspacebased methods while a least-squares approach is employed for timing recovery. Computer simulations are used to assess the effectiveness of the proposed solution and to make comparisons with existing alternatives.<|reference_end|>
arxiv
@article{sanguinetti2008an, title={An Improved Scheme for Initial Ranging in OFDMA-based Networks}, author={Luca Sanguinetti, Michele Morelli and H. Vincent Poor}, journal={arXiv preprint arXiv:0802.4344}, year={2008}, doi={10.1109/ICC.2008.652}, archivePrefix={arXiv}, eprint={0802.4344}, primaryClass={cs.IT cs.OH math.IT} }
sanguinetti2008an
arxiv-2912
0802.4350
Role of Symmetry and Geometry in a chaotic Pseudo-Random Bit Generator
<|reference_start|>Role of Symmetry and Geometry in a chaotic Pseudo-Random Bit Generator: In this work, Pseudo-Random Bit Generation (PRBG) based on 2D chaotic mappings of logistic type is considered. The sequences generated with two Pseudorandom Bit Generators (PRBGs) of this type are statistically tested and the computational effectiveness of the generators is estimated. The role played by the symmetry and the geometrical properties of the underlying chaotic attractors is also explored. Considering these PRBGs valid for cryptography, the size of the available key spaces are calculated. Additionally, a novel mechanism called 'symmetry-swap' is introduced in order to enhance the PRBG algorithm. It is shown that it can increase the degrees of freedom of the key space, while maintaining the speed and performance in the PRBG.<|reference_end|>
arxiv
@article{pellicer-lostao2008role, title={Role of Symmetry and Geometry in a chaotic Pseudo-Random Bit Generator}, author={Carmen Pellicer-Lostao and Ricardo Lopez-Ruiz}, journal={arXiv preprint arXiv:0802.4350}, year={2008}, archivePrefix={arXiv}, eprint={0802.4350}, primaryClass={nlin.CD cs.CR physics.comp-ph stat.AP} }
pellicer-lostao2008role
arxiv-2913
0802.4363
Estimating the entropy of binary time series: Methodology, some theory and a simulation study
<|reference_start|>Estimating the entropy of binary time series: Methodology, some theory and a simulation study: Partly motivated by entropy-estimation problems in neuroscience, we present a detailed and extensive comparison between some of the most popular and effective entropy estimation methods used in practice: The plug-in method, four different estimators based on the Lempel-Ziv (LZ) family of data compression algorithms, an estimator based on the Context-Tree Weighting (CTW) method, and the renewal entropy estimator. **Methodology. Three new entropy estimators are introduced. For two of the four LZ-based estimators, a bootstrap procedure is described for evaluating their standard error, and a practical rule of thumb is heuristically derived for selecting the values of their parameters. ** Theory. We prove that, unlike their earlier versions, the two new LZ-based estimators are consistent for every finite-valued, stationary and ergodic process. An effective method is derived for the accurate approximation of the entropy rate of a finite-state HMM with known distribution. Heuristic calculations are presented and approximate formulas are derived for evaluating the bias and the standard error of each estimator. ** Simulation. All estimators are applied to a wide range of data generated by numerous different processes with varying degrees of dependence and memory. Some conclusions drawn from these experiments include: (i) For all estimators considered, the main source of error is the bias. (ii) The CTW method is repeatedly and consistently seen to provide the most accurate results. (iii) The performance of the LZ-based estimators is often comparable to that of the plug-in method. (iv) The main drawback of the plug-in method is its computational inefficiency.<|reference_end|>
arxiv
@article{gao2008estimating, title={Estimating the entropy of binary time series: Methodology, some theory and a simulation study}, author={Y. Gao, I. Kontoyiannis and E. Bienenstock}, journal={arXiv preprint arXiv:0802.4363}, year={2008}, doi={10.3390/entropy-e10020071}, archivePrefix={arXiv}, eprint={0802.4363}, primaryClass={cs.IT math.IT math.ST stat.TH} }
gao2008estimating
arxiv-2914
0802.4390
Low Complexity Sphere Decoding for Spatial Multiplexing MIMO
<|reference_start|>Low Complexity Sphere Decoding for Spatial Multiplexing MIMO: In this paper we present a novel method for decoding multiple input - multiple output (MIMO) transmission, which combines sphere decoding (SD) and zero forcing (ZF) techniques to provide near optimal low complexity and high performance constant time modified sphere decoding algorithm. This algorithm was designed especially for large number of transmit antennas, and allows efficient implementation in hardware. We do this by limiting the number of overall SD iterations. Moreover, we make sure that matrices with high condition number are more likely to undergo SD.<|reference_end|>
arxiv
@article{neder2008low, title={Low Complexity Sphere Decoding for Spatial Multiplexing MIMO}, author={Vadim Neder, Doron Ezri and Motti Haridim}, journal={arXiv preprint arXiv:0802.4390}, year={2008}, archivePrefix={arXiv}, eprint={0802.4390}, primaryClass={cs.IT math.IT} }
neder2008low
arxiv-2915
0802.4450
A Study On Distributed Model Predictive Consensus
<|reference_start|>A Study On Distributed Model Predictive Consensus: We investigate convergence properties of a proposed distributed model predictive control (DMPC) scheme, where agents negotiate to compute an optimal consensus point using an incremental subgradient method based on primal decomposition as described in Johansson et al. [2006, 2007]. The objective of the distributed control strategy is to agree upon and achieve an optimal common output value for a group of agents in the presence of constraints on the agent dynamics using local predictive controllers. Stability analysis using a receding horizon implementation of the distributed optimal consensus scheme is performed. Conditions are given under which convergence can be obtained even if the negotiations do not reach full consensus.<|reference_end|>
arxiv
@article{keviczky2008a, title={A Study On Distributed Model Predictive Consensus}, author={Tamas Keviczky, Karl Henrik Johansson}, journal={arXiv preprint arXiv:0802.4450}, year={2008}, archivePrefix={arXiv}, eprint={0802.4450}, primaryClass={cs.MA} }
keviczky2008a
arxiv-2916
0803.0011
Qtier-Rapor: Managing Spreadsheet Systems & Improving Corporate Performance, Compliance and Governance
<|reference_start|>Qtier-Rapor: Managing Spreadsheet Systems & Improving Corporate Performance, Compliance and Governance: Much of what EuSpRIG discusses is concerned with the integrity of individual spreadsheets. In businesses, interlocking spreadsheets are regularly used to fill functional gaps in core administrative systems. The growth and deployment of such integrated spreadsheet SYSTEMS raises the scale of issues to a whole new level. The correct management of spreadsheet systems is necessary to ensure that the business achieves its goals of improved performance and good corporate governance, within the constraints of legislative compliance - poor management will deliver the opposite. This paper is an anatomy of the real-life issues of the commercial use of spreadsheets in business, and demonstrates how Qtier-Rapor has been used to instil best practice in the use of integrated commercial spreadsheet systems.<|reference_end|>
arxiv
@article{bishop2008qtier-rapor:, title={Qtier-Rapor: Managing Spreadsheet Systems & Improving Corporate Performance, Compliance and Governance}, author={Keith Bishop}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 33-44 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0011}, primaryClass={cs.OH} }
bishop2008qtier-rapor:
arxiv-2917
0803.0014
Automated Termination Proofs for Logic Programs by Term Rewriting
<|reference_start|>Automated Termination Proofs for Logic Programs by Term Rewriting: There are two kinds of approaches for termination analysis of logic programs: "transformational" and "direct" ones. Direct approaches prove termination directly on the basis of the logic program. Transformational approaches transform a logic program into a term rewrite system (TRS) and then analyze termination of the resulting TRS instead. Thus, transformational approaches make all methods previously developed for TRSs available for logic programs as well. However, the applicability of most existing transformations is quite restricted, as they can only be used for certain subclasses of logic programs. (Most of them are restricted to well-moded programs.) In this paper we improve these transformations such that they become applicable for any definite logic program. To simulate the behavior of logic programs by TRSs, we slightly modify the notion of rewriting by permitting infinite terms. We show that our transformation results in TRSs which are indeed suitable for automated termination analysis. In contrast to most other methods for termination of logic programs, our technique is also sound for logic programming without occur check, which is typically used in practice. We implemented our approach in the termination prover AProVE and successfully evaluated it on a large collection of examples.<|reference_end|>
arxiv
@article{schneider-kamp2008automated, title={Automated Termination Proofs for Logic Programs by Term Rewriting}, author={P. Schneider-Kamp, J. Giesl, A. Serebrenik, R. Thiemann}, journal={arXiv preprint arXiv:0803.0014}, year={2008}, archivePrefix={arXiv}, eprint={0803.0014}, primaryClass={cs.LO cs.AI cs.PL} }
schneider-kamp2008automated
arxiv-2918
0803.0015
EuSpRIG 2006 Commercial Spreadsheet Review
<|reference_start|>EuSpRIG 2006 Commercial Spreadsheet Review: This management summary provides an outline of a commercial spreadsheet review process. The aim of this process is to ensure remedial or enhancement work can safely be undertaken on a spreadsheet with a commercially acceptable level of risk of introducing new errors.<|reference_end|>
arxiv
@article{murphy2008eusprig, title={EuSpRIG 2006 Commercial Spreadsheet Review}, author={Simon Murphy}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 45-52 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0015}, primaryClass={cs.SE} }
murphy2008eusprig
arxiv-2919
0803.0018
Another approach to decide on real root existence for univariate Polynomials, and a multivariate extension for 3-SAT
<|reference_start|>Another approach to decide on real root existence for univariate Polynomials, and a multivariate extension for 3-SAT: We present six Theorems on the univariate real Polynomial, using which we develop a new algorithm for deciding the existence of atleast one real root for univariate integer Polynomials. Our algorithm outputs that no positive real root exists, if and only if, the given Polynomial is a factor of a real Polynomial with positive coefficients. Next, we define a transformation that transforms any instance of 3-SAT into a multivariate real Polynomial with positive coefficients, if and only if, the instance is not satisfiable.<|reference_end|>
arxiv
@article{chermakani2008another, title={Another approach to decide on real root existence for univariate Polynomials, and a multivariate extension for 3-SAT}, author={Deepak Ponvel Chermakani}, journal={arXiv preprint arXiv:0803.0018}, year={2008}, archivePrefix={arXiv}, eprint={0803.0018}, primaryClass={cs.NA cs.DM} }
chermakani2008another
arxiv-2920
0803.0032
Composition Attacks and Auxiliary Information in Data Privacy
<|reference_start|>Composition Attacks and Auxiliary Information in Data Privacy: Privacy is an increasingly important aspect of data publishing. Reasoning about privacy, however, is fraught with pitfalls. One of the most significant is the auxiliary information (also called external knowledge, background knowledge, or side information) that an adversary gleans from other channels such as the web, public records, or domain knowledge. This paper explores how one can reason about privacy in the face of rich, realistic sources of auxiliary information. Specifically, we investigate the effectiveness of current anonymization schemes in preserving privacy when multiple organizations independently release anonymized data about overlapping populations. 1. We investigate composition attacks, in which an adversary uses independent anonymized releases to breach privacy. We explain why recently proposed models of limited auxiliary information fail to capture composition attacks. Our experiments demonstrate that even a simple instance of a composition attack can breach privacy in practice, for a large class of currently proposed techniques. The class includes k-anonymity and several recent variants. 2. On a more positive note, certain randomization-based notions of privacy (such as differential privacy) provably resist composition attacks and, in fact, the use of arbitrary side information. This resistance enables stand-alone design of anonymization schemes, without the need for explicitly keeping track of other releases. We provide a precise formulation of this property, and prove that an important class of relaxations of differential privacy also satisfy the property. This significantly enlarges the class of protocols known to enable modular design.<|reference_end|>
arxiv
@article{ganta2008composition, title={Composition Attacks and Auxiliary Information in Data Privacy}, author={Srivatsava Ranjit Ganta, Shiva Prasad Kasiviswanathan, Adam Smith}, journal={arXiv preprint arXiv:0803.0032}, year={2008}, archivePrefix={arXiv}, eprint={0803.0032}, primaryClass={cs.DB cs.CR} }
ganta2008composition
arxiv-2921
0803.0034
From a set of parts to an indivisible whole Part I: Operations in a closed mode
<|reference_start|>From a set of parts to an indivisible whole Part I: Operations in a closed mode: This paper provides a description of a new method for information processing based on holistic approach wherein analysis is a direct product of synthesis. The core of the method is iterative averaging of all the elements of a system according to all the parameters describing the elements. Contrary to common logic, the iterative averaging of a system's elements does not result in homogenization of the system; instead, it causes an obligatory subdivision of the system into two alternative subgroups, leaving no outliers. Within each of the formed subgroups, similarity coefficients between the elements reach the value of 1, whereas similarity coefficients between the elements of different subgroups equal a certain constant value greater than 0 but lower than 1. When subjected to iterative averaging, any system consisting of three or more elements of which at least two elements are not completely identical undergo such a process of bifurcation that occurs non-linearly. Successive iterative averaging of each of the forming subgroups eventually provides a hierarchical system that reflects relationships between the elements of an input system under analysis. We propose a definition of a natural hierarchy that can exist only in conditions of closeness of a system and can be discovered upon providing such an effect onto a system which allows its elements interact with each other based on the principle of self-organization. Self-organization can be achieved through an overall and total cross-averaging of a system's elements. We demonstrate the application potentials of the proposed technology on a number of examples, including a system of scattered points, randomized datasets, as well as meteorological and demographical datasets.<|reference_end|>
arxiv
@article{andreev2008from, title={From a set of parts to an indivisible whole. Part I: Operations in a closed mode}, author={Leonid Andreev}, journal={arXiv preprint arXiv:0803.0034}, year={2008}, archivePrefix={arXiv}, eprint={0803.0034}, primaryClass={cs.OH} }
andreev2008from
arxiv-2922
0803.0037
A Survey on Deep Packet Inspection for Intrusion Detection Systems
<|reference_start|>A Survey on Deep Packet Inspection for Intrusion Detection Systems: Deep packet inspection is widely recognized as a powerful way which is used for intrusion detection systems for inspecting, deterring and deflecting malicious attacks over the network. Fundamentally, almost intrusion detection systems have the ability to search through packets and identify contents that match with known attacks. In this paper, we survey the deep packet inspection implementations techniques, research challenges and algorithms. Finally, we provide a comparison between the different applied systems.<|reference_end|>
arxiv
@article{abuhmed2008a, title={A Survey on Deep Packet Inspection for Intrusion Detection Systems}, author={Tamer AbuHmed, Abedelaziz Mohaisen, DaeHun Nyang}, journal={Magazine of Korea Telecommunication Society, vol. 24, No. 11, pp. 25-36, November 2007}, year={2008}, archivePrefix={arXiv}, eprint={0803.0037}, primaryClass={cs.CR} }
abuhmed2008a
arxiv-2923
0803.0046
One-Time Pad, Arithmetic Coding and Logic Gates: An unifying theme using Dynamical Systems
<|reference_start|>One-Time Pad, Arithmetic Coding and Logic Gates: An unifying theme using Dynamical Systems: In this letter, we prove that the perfectly secure One-Time Pad (OTP) encryption can be seen as finding the initial condition on the binary map under a random switch based on the perfectly random pad. This turns out to be a special case of Grangetto's randomized arithmetic coding performed on the Binary Map. Furthermore, we derive the set of possible perfect secrecy systems using such an approach. Since OTP encryption is an XOR operation, we thus have a dynamical systems implementation of the XOR gate. We show similar implementations for other gates such as NOR, NAND, OR, XNOR, AND and NOT. The dynamical systems framework unifies the three areas to which Shannon made foundational contributions: lossless compression (Source Coding), perfect encryption (Cryptography), and design of logic gates (Computation)<|reference_end|>
arxiv
@article{nagaraj2008one-time, title={One-Time Pad, Arithmetic Coding and Logic Gates: An unifying theme using Dynamical Systems}, author={Nithin Nagaraj and Prabhakar G. Vaidya}, journal={arXiv preprint arXiv:0803.0046}, year={2008}, archivePrefix={arXiv}, eprint={0803.0046}, primaryClass={nlin.CD cs.CR} }
nagaraj2008one-time
arxiv-2924
0803.0048
A Bit-Compatible Shared Memory Parallelization for ILU(k) Preconditioning and a Bit-Compatible Generalization to Distributed Memory
<|reference_start|>A Bit-Compatible Shared Memory Parallelization for ILU(k) Preconditioning and a Bit-Compatible Generalization to Distributed Memory: ILU(k) is a commonly used preconditioner for iterative linear solvers for sparse, non-symmetric systems. It is often preferred for the sake of its stability. We present TPILU(k), the first efficiently parallelized ILU(k) preconditioner that maintains this important stability property. Even better, TPILU(k) preconditioning produces an answer that is bit-compatible with the sequential ILU(k) preconditioning. In terms of performance, the TPILU(k) preconditioning is shown to run faster whenever more cores are made available to it --- while continuing to be as stable as sequential ILU(k). This is in contrast to some competing methods that may become unstable if the degree of thread parallelism is raised too far. Where Block Jacobi ILU(k) fails in an application, it can be replaced by TPILU(k) in order to maintain good performance, while also achieving full stability. As a further optimization, TPILU(k) offers an optional level-based incomplete inverse method as a fast approximation for the original ILU(k) preconditioned matrix. Although this enhancement is not bit-compatible with classical ILU(k), it is bit-compatible with the output from the single-threaded version of the same algorithm. In experiments on a 16-core computer, the enhanced TPILU(k)-based iterative linear solver performed up to 9 times faster. As we approach an era of many-core computing, the ability to efficiently take advantage of many cores will become ever more important. TPILU(k) also demonstrates good performance on cluster or Grid. For example, the new algorithm achieves 50 times speedup with 80 nodes for general sparse matrices of dimension 160,000 that are diagonally dominant.<|reference_end|>
arxiv
@article{dong2008a, title={A Bit-Compatible Shared Memory Parallelization for ILU(k) Preconditioning and a Bit-Compatible Generalization to Distributed Memory}, author={Xin Dong, Gene Cooperman}, journal={arXiv preprint arXiv:0803.0048}, year={2008}, archivePrefix={arXiv}, eprint={0803.0048}, primaryClass={cs.DC} }
dong2008a
arxiv-2925
0803.0053
Mobile Agents for Content-Based WWW Distributed Image Retrieval
<|reference_start|>Mobile Agents for Content-Based WWW Distributed Image Retrieval: At present, the de-facto standard for providing contents in the Internet is the World Wide Web. A technology, which is now emerging on the Web, is Content-Based Image Retrieval (CBIR). CBIR applies methods and algorithms from computer science to analyse and index images based on their visual content. Mobile agents push the flexibility of distributed systems to their limits since not only computations are dynamically distributed but also the code that performs them. The current commercial applet-based methodologies for accessing image database systems offer limited flexibility, scalability and robustness. In this paper the author proposes a new framework for content-based WWW distributed image retrieval based on Java-based mobile agents. The implementation of the framework shows that its performance is comparable to, and in some cases outperforms, the current approach.<|reference_end|>
arxiv
@article{thampi2008mobile, title={Mobile Agents for Content-Based WWW Distributed Image Retrieval}, author={Sabu M. Thampi, K. Chandra Sekaran}, journal={arXiv preprint arXiv:0803.0053}, year={2008}, archivePrefix={arXiv}, eprint={0803.0053}, primaryClass={cs.DC cs.IR} }
thampi2008mobile
arxiv-2926
0803.0055
A compact topology for sand automata
<|reference_start|>A compact topology for sand automata: In this paper, we exhibit a strong relation between the sand automata configuration space and the cellular automata configuration space. This relation induces a compact topology for sand automata, and a new context in which sand automata are homeomorphic to cellular automata acting on a specific subshift. We show that the existing topological results for sand automata, including the Hedlund-like representation theorem, still hold. In this context, we give a characterization of the cellular automata which are sand automata, and study some dynamical behaviors such as equicontinuity. Furthermore, we deal with the nilpotency. We show that the classical definition is not meaningful for sand automata. Then, we introduce a suitable new notion of nilpotency for sand automata. Finally, we prove that this simple dynamical behavior is undecidable.<|reference_end|>
arxiv
@article{dennunzio2008a, title={A compact topology for sand automata}, author={Alberto Dennunzio (DISCo), Pierre Guillon (IGM), Beno^it Masson (LIF)}, journal={arXiv preprint arXiv:0803.0055}, year={2008}, archivePrefix={arXiv}, eprint={0803.0055}, primaryClass={cs.CC} }
dennunzio2008a
arxiv-2927
0803.0134
On disjoint matchings in cubic graphs
<|reference_start|>On disjoint matchings in cubic graphs: For $i=2,3$ and a cubic graph $G$ let $\nu_{i}(G)$ denote the maximum number of edges that can be covered by $i$ matchings. We show that $\nu_{2}(G)\geq {4/5}| V(G)| $ and $\nu_{3}(G)\geq {7/6}| V(G)| $. Moreover, it turns out that $\nu_{2}(G)\leq \frac{|V(G)|+2\nu_{3}(G)}{4}$.<|reference_end|>
arxiv
@article{mkrtchyan2008on, title={On disjoint matchings in cubic graphs}, author={Vahan V. Mkrtchyan, Samvel S. Petrosyan, Gagik N. Vardanyan}, journal={Discrete Mathematics, 310/10-11 (2010), pp. 1588-1613}, year={2008}, doi={10.1016/j.disc.2010.02.007}, archivePrefix={arXiv}, eprint={0803.0134}, primaryClass={cs.DM} }
mkrtchyan2008on
arxiv-2928
0803.0146
Polynomial time algorithms for bi-criteria, multi-objective and ratio problems in clustering and imaging Part I: Normalized cut and ratio regions
<|reference_start|>Polynomial time algorithms for bi-criteria, multi-objective and ratio problems in clustering and imaging Part I: Normalized cut and ratio regions: Partitioning and grouping of similar objects plays a fundamental role in image segmentation and in clustering problems. In such problems a typical goal is to group together similar objects, or pixels in the case of image processing. At the same time another goal is to have each group distinctly dissimilar from the rest and possibly to have the group size fairly large. These goals are often combined as a ratio optimization problem. One example of such problem is the normalized cut problem, another is the ratio regions problem. We devise here the first polynomial time algorithms solving these problems optimally. The algorithms are efficient and combinatorial. This contrasts with the heuristic approaches used in the image segmentation literature that formulate those problems as nonlinear optimization problems, which are then relaxed and solved with spectral techniques in real numbers. These approaches not only fail to deliver an optimal solution, but they are also computationally expensive. The algorithms presented here use as a subroutine a minimum $s,t-cut procedure on a related graph which is of polynomial size. The output consists of the optimal solution to the respective ratio problem, as well as a sequence of nested solution with respect to any relative weighting of the objectives of the numerator and denominator. An extension of the results here to bi-criteria and multi-criteria objective functions is presented in part II.<|reference_end|>
arxiv
@article{hochbaum2008polynomial, title={Polynomial time algorithms for bi-criteria, multi-objective and ratio problems in clustering and imaging. Part I: Normalized cut and ratio regions}, author={Dorit S. Hochbaum}, journal={IEEE Transactions on Pattern Analysis and Machine Intelligence, May 2010 32:5 889-898}, year={2008}, archivePrefix={arXiv}, eprint={0803.0146}, primaryClass={cs.CV cs.DM} }
hochbaum2008polynomial
arxiv-2929
0803.0159
Towards a Spreadsheet Engineering
<|reference_start|>Towards a Spreadsheet Engineering: In this paper, we report some on-going focused research, but are further keen to set it in the context of a proposed bigger picture, as follows. There is a certain depressing pattern about the attitude of industry to spreadsheet error research and a certain pattern about conferences highlighting these issues. Is it not high time to move on from measuring spreadsheet errors to developing an armoury of disciplines and controls? In short, we propose the need to rigorously lay the foundations of a spreadsheet engineering discipline. Clearly, multiple research teams would be required to tackle such a big task. This suggests the need for both national and international collaborative research, since any given group can only address a small segment of the whole. There are already a small number of examples of such on-going international collaborative research. Having established the need for a directed research effort, the rest of the paper then attempts to act as an exemplar in demonstrating and applying this focus. With regard to one such of research, in a recent paper, Panko (2005) stated that: "...group development and testing appear to be promising areas to pursue". Of particular interest to us are some gaps in the published research record on techniques to reduce errors. We further report on the topics: techniques for cross-checking, time constraints effects, and some aspects of developer perception.<|reference_end|>
arxiv
@article{vemula2008towards, title={Towards a Spreadsheet Engineering}, author={V.R. Vemula, David Ball, Simon Thorne}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 53-64 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0159}, primaryClass={cs.CY} }
vemula2008towards
arxiv-2930
0803.0162
A Software Development Methodology for Research and Prototyping in Financial Markets
<|reference_start|>A Software Development Methodology for Research and Prototyping in Financial Markets: The objective of this paper is to develop a standardized methodology for software development in the very unique industry and culture of financial markets. The prototyping process we present allows the development team to deliver for review and comment intermediate-level models based upon clearly defined customer requirements. This spreadsheet development methodology is presented within a larger business context, that of trading system development, the subject of an upcoming book by the authors of this paper.<|reference_end|>
arxiv
@article{kumiega2008a, title={A Software Development Methodology for Research and Prototyping in Financial Markets}, author={Andrew Kumiega, Ben Van Vliet}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 107-127 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0162}, primaryClass={cs.SE} }
kumiega2008a
arxiv-2931
0803.0163
Rapid Spreadsheet Reshaping with Excelsior: multiple drastic changes to content and layout are easy when you represent enough structure
<|reference_start|>Rapid Spreadsheet Reshaping with Excelsior: multiple drastic changes to content and layout are easy when you represent enough structure: Spreadsheets often need changing in ways made tedious and risky by Excel. For example: simultaneously altering many tables' size, orientation, and position; inserting cross-tabulations; moving data between sheets; splitting and merging sheets. A safer, faster restructuring tool is, we claim, Excelsior. The result of a research project into reducing spreadsheet risk, Excelsior is the first ever tool for modularising spreadsheets; i.e. for building them from components which can be independently created, tested, debugged, and updated. It represents spreadsheets in a way that makes these components explicit, separates them from layout, and allows both components and layout to be changed without breaking dependent formulae. Here, we report experiments to test that this does indeed make such changes easier. In one, we automatically generated a cross-tabulation and added it to a spreadsheet. In the other, we generated new versions of a 10,000-cell housing-finance spreadsheet containing many interconnected 20*40 tables. We varied table sizes from 5*10 to 200*2,000; moved tables between sheets; and flipped table orientations. Each change generated a spreadsheet with different structure but identical outputs; each change took just a few minutes.<|reference_end|>
arxiv
@article{paine2008rapid, title={Rapid Spreadsheet Reshaping with Excelsior: multiple drastic changes to content and layout are easy when you represent enough structure}, author={Jocelyn Paine, Emre Tek, Duncan Williamson}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 129-146 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0163}, primaryClass={cs.SE} }
paine2008rapid
arxiv-2932
0803.0164
Considering Functional Spreadsheet Operator Usage Suggests the Value of Example Driven Modelling for Decision Support Systems
<|reference_start|>Considering Functional Spreadsheet Operator Usage Suggests the Value of Example Driven Modelling for Decision Support Systems: Most spreadsheet surveys both for reporting use and error focus on the practical application of the spreadsheet in a particular industry. Typically these studies will illustrate that a particular percentage of spreadsheets are used for optimisation and a further percentage are used for 'What if' analysis. Much less common is examining the classes of function, as defined by the vendor, used by modellers to build their spreadsheet models. This alternative analysis allows further insight into the programming nature of spreadsheets and may assist researchers in targeting particular structures in spreadsheet software for further investigation. Further, understanding the functional make-up of spreadsheets allows effective evaluation of novel approaches from a programming point of view. It allows greater insight into studies that report what spreadsheets are used for since it is explicit which functional structures are in use in spreadsheets. We conclude that a deeper understanding of the use of operators and the operator's relationship to error would provide fresh insight into the spreadsheet error problem. Considering functional spreadsheet operator usage suggests the value of Example Driven Modelling for Decision Support Systems<|reference_end|>
arxiv
@article{thorne2008considering, title={Considering Functional Spreadsheet Operator Usage Suggests the Value of Example Driven Modelling for Decision Support Systems}, author={Simon Thorne, David Ball}, journal={roc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 147-158 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0164}, primaryClass={cs.HC cs.SE} }
thorne2008considering
arxiv-2933
0803.0165
Documenting Spreadsheets
<|reference_start|>Documenting Spreadsheets: This paper discusses spreadsheets documentation and new means to achieve this end by using Excel's built-in "Comment" function. By structuring comments, they can be used as an essential tool to fully explain spreadsheet. This will greatly facilitate spreadsheet change control, risk management and auditing. It will fill a crucial gap in corporate governance by adding essential information that can be managed in order to satisfy internal controls and accountability standards.<|reference_end|>
arxiv
@article{payette2008documenting, title={Documenting Spreadsheets}, author={Raymond Payette}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 163-173 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0165}, primaryClass={cs.HC} }
payette2008documenting
arxiv-2934
0803.0166
Spreadsheet Validation and Analysis through Content Visualization
<|reference_start|>Spreadsheet Validation and Analysis through Content Visualization: Visualizing spreadsheet content provides analytic insight and visual validation of large amounts of spreadsheet data. Oculus Excel Visualizer is a point and click data visualization experiment which directly visualizes Excel data and re-uses the layout and formatting already present in the spreadsheet.<|reference_end|>
arxiv
@article{brath2008spreadsheet, title={Spreadsheet Validation and Analysis through Content Visualization}, author={Richard Brath, Michael Peters}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 175-183 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0166}, primaryClass={cs.HC} }
brath2008spreadsheet
arxiv-2935
0803.0167
Does an awareness of differing types of spreadsheet errors aid end-users in identifying spreadsheets errors?
<|reference_start|>Does an awareness of differing types of spreadsheet errors aid end-users in identifying spreadsheets errors?: The research presented in this paper establishes a valid, and simplified, revision of previous spreadsheet error classifications. This investigation is concerned with the results of a web survey and two web-based gender and domain-knowledge free spreadsheet error identification exercises. The participants of the survey and exercises were a test group of professionals (all of whom regularly use spreadsheets) and a control group of students from the University of Greenwich (UK). The findings show that over 85% of users are also the spreadsheet's developer, supporting the revised spreadsheet error classification. The findings also show that spreadsheet error identification ability is directly affected both by spreadsheet experience and by error-type awareness. In particular, that spreadsheet error-type awareness significantly improves the user's ability to identify, the more surreptitious, qualitative error.<|reference_end|>
arxiv
@article{purser2008does, title={Does an awareness of differing types of spreadsheet errors aid end-users in identifying spreadsheets errors?}, author={Michael Purser, David Chadwick}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 185-204 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0167}, primaryClass={cs.HC} }
purser2008does
arxiv-2936
0803.0168
Comparison of Characteristics and Practices amongst Spreadsheet Users with Different Levels of Experience
<|reference_start|>Comparison of Characteristics and Practices amongst Spreadsheet Users with Different Levels of Experience: We developed an internet-based questionnaire on spreadsheet use that we administered to a large number of users in several companies and organizations to document how spreadsheets are currently being developed and used in business. In this paper, we discuss the results drawn from of a comparison of responses from individuals with the most experience and expertise with those from individuals with the least. These results describe two views of spreadsheet design and use in organizations, and reflect gaps between these two groups and between these groups and the entire population of nearly 1600 respondents. Moreover, our results indicate that these gaps have multiple dimensions: they reflect not only the context, skill, and practices of individual users but also the policies of large organizations.<|reference_end|>
arxiv
@article{baker2008comparison, title={Comparison of Characteristics and Practices amongst Spreadsheet Users with Different Levels of Experience}, author={Kenneth R. Baker, Stephen G. Powell, Barry Lawson, and Lynn Foster-Johnson}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 205-219 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0168}, primaryClass={cs.HC} }
baker2008comparison
arxiv-2937
0803.0169
An Investigation of the Incidence and Effect of Spreadsheet Errors Caused by the Hard Coding of Input Data Values into Formulas
<|reference_start|>An Investigation of the Incidence and Effect of Spreadsheet Errors Caused by the Hard Coding of Input Data Values into Formulas: The hard coding of input data or constants into spreadsheet formulas is widely recognised as poor spreadsheet model design. However, the importance of avoiding such practice appears to be underestimated perhaps in light of the lack of quantitative error at the time of occurrence and the recognition that this design defect may never result in a bottom-line error. The paper examines both the academic and practitioner view of such hard coding design flaws. The practitioner or industry viewpoint is gained indirectly through a review of commercial spreadsheet auditing software. The development of an automated (electronic) means for detecting such hard coding is described together with a discussion of some results obtained through analysis of a number of student and practitioner spreadsheet models.<|reference_end|>
arxiv
@article{blayney2008an, title={An Investigation of the Incidence and Effect of Spreadsheet Errors Caused by the Hard Coding of Input Data Values into Formulas}, author={Paul J. Blayney}, journal={Proc. European Spreadsheet Risks Int. Grp. (EuSpRIG) 2006 22-230 ISBN:1-905617-08-9}, year={2008}, archivePrefix={arXiv}, eprint={0803.0169}, primaryClass={cs.HC} }
blayney2008an
arxiv-2938
0803.0189
Quiescence of Self-stabilizing Gossiping among Mobile Agents in Graphs
<|reference_start|>Quiescence of Self-stabilizing Gossiping among Mobile Agents in Graphs: This paper considers gossiping among mobile agents in graphs: agents move on the graph and have to disseminate their initial information to every other agent. We focus on self-stabilizing solutions for the gossip problem, where agents may start from arbitrary locations in arbitrary states. Self-stabilization requires (some of the) participating agents to keep moving forever, hinting at maximizing the number of agents that could be allowed to stop moving eventually. This paper formalizes the self-stabilizing agent gossip problem, introduces the quiescence number (i.e., the maximum number of eventually stopping agents) of self-stabilizing solutions and investigates the quiescence number with respect to several assumptions related to agent anonymity, synchrony, link duplex capacity, and whiteboard capacity.<|reference_end|>
arxiv
@article{masuzawa2008quiescence, title={Quiescence of Self-stabilizing Gossiping among Mobile Agents in Graphs}, author={Toshimitsu Masuzawa, S'ebastien Tixeuil (LIP6, INRIA Futurs)}, journal={arXiv preprint arXiv:0803.0189}, year={2008}, archivePrefix={arXiv}, eprint={0803.0189}, primaryClass={cs.DC cs.PF cs.RO} }
masuzawa2008quiescence
arxiv-2939
0803.0194
Acquisition Accuracy Evaluation in Visual Inspection Systems - a Practical Approach
<|reference_start|>Acquisition Accuracy Evaluation in Visual Inspection Systems - a Practical Approach: This paper draws a proposal of a set of parameters and methods for accuracy evaluation of visual inspection systems. The case of a monochrome board is treated, but practically all conclusions and methods may be extended for colour acquisition. Basically, the proposed parameters are grouped in five sets as follows:Internal noise;Video ADC cuantisation parameters;Analogue processing section parameters;Dominant frequencies;Synchronisation (lock-in) accuracy. On basis of this set of parameters was developed a software environment, in conjunction with a test signal generator that allows the "test" images. The paper also presents conclusions of evaluation for two types of video acquisition boards<|reference_end|>
arxiv
@article{arsinte2008acquisition, title={Acquisition Accuracy Evaluation in Visual Inspection Systems - a Practical Approach}, author={Radu Arsinte, Costin Miron}, journal={Proceeedings of ETc '96 Conference, 1996, Timisoara, Romania}, year={2008}, archivePrefix={arXiv}, eprint={0803.0194}, primaryClass={cs.CV cs.MM} }
arsinte2008acquisition
arxiv-2940
0803.0197
DSP Based System for Real time Voice Synthesis Applications Development
<|reference_start|>DSP Based System for Real time Voice Synthesis Applications Development: This paper describes an experimental system designed for development of real time voice synthesis applications. The system is composed from a DSP coprocessor card, equipped with an TMS320C25 or TMS320C50 chip, voice acquisition module (ADDA2),host computer (IBM-PC compatible), software specific tools.<|reference_end|>
arxiv
@article{arsinte2008dsp, title={DSP Based System for Real time Voice Synthesis Applications Development}, author={Radu Arsinte, Attila Ferencz, Costin Miron}, journal={Proceedings of SPECOM' 96 Conference, 1996, St. Petersburg, Russia}, year={2008}, archivePrefix={arXiv}, eprint={0803.0197}, primaryClass={cs.SD} }
arsinte2008dsp
arxiv-2941
0803.0225
Random hypergraphs and algorithmics
<|reference_start|>Random hypergraphs and algorithmics: Hypergraphs are structures that can be decomposed or described; in other words they are recursively countable. Here, we get exact and asymptotic enumeration results on hypergraphs by means of exponential generating functions. The number of hypergraph component is bounded, as a generalisation of Wright inequalities for graphs: the proof is a combinatorial understanding of the structure by inclusion exclusion. Asymptotic results are obtained, thanks to generating functions proofs are at the end very easy to read, through complex analysis by saddle point method. By this way, we characterized: - the components with a given number of vertices and of hyperedges by the expected size of a random hypermatching in these structures. - the random hypergraphs (evolving hyperedge by hyperedge) according to the expected number of hyperedges when the first cycle appears in the evolving structure. This work is an open road to further works on random hypergraphs such as threshold phenomenon, tools used here seem to be sufficient at first sight.<|reference_end|>
arxiv
@article{andriamampianina2008random, title={Random hypergraphs and algorithmics}, author={Tsiriniaina Andriamampianina}, journal={arXiv preprint arXiv:0803.0225}, year={2008}, archivePrefix={arXiv}, eprint={0803.0225}, primaryClass={cs.DM} }
andriamampianina2008random
arxiv-2942
0803.0241
Self-Stabilizing Pulse Synchronization Inspired by Biological Pacemaker Networks
<|reference_start|>Self-Stabilizing Pulse Synchronization Inspired by Biological Pacemaker Networks: We define the ``Pulse Synchronization'' problem that requires nodes to achieve tight synchronization of regular pulse events, in the settings of distributed computing systems. Pulse-coupled synchronization is a phenomenon displayed by a large variety of biological systems, typically overcoming a high level of noise. Inspired by such biological models, a robust and self-stabilizing Byzantine pulse synchronization algorithm for distributed computer systems is presented. The algorithm attains near optimal synchronization tightness while tolerating up to a third of the nodes exhibiting Byzantine behavior concurrently. Pulse synchronization has been previously shown to be a powerful building block for designing algorithms in this severe fault model. We have previously shown how to stabilize general Byzantine algorithms, using pulse synchronization. To the best of our knowledge there is no other scheme to do this without the use of synchronized pulses.<|reference_end|>
arxiv
@article{daliot2008self-stabilizing, title={Self-Stabilizing Pulse Synchronization Inspired by Biological Pacemaker Networks}, author={Ariel Daliot, Danny Dolev, and Hanna Parnas}, journal={In Proceedings of the Sixth Symposium on Self-Stabilizing Systems (SSS'03), San Francisco, June 2003. See also LNCS 2704}, year={2008}, archivePrefix={arXiv}, eprint={0803.0241}, primaryClass={cs.DC} }
daliot2008self-stabilizing
arxiv-2943
0803.0248
Networks become navigable as nodes move and forget
<|reference_start|>Networks become navigable as nodes move and forget: We propose a dynamical process for network evolution, aiming at explaining the emergence of the small world phenomenon, i.e., the statistical observation that any pair of individuals are linked by a short chain of acquaintances computable by a simple decentralized routing algorithm, known as greedy routing. Previously proposed dynamical processes enabled to demonstrate experimentally (by simulations) that the small world phenomenon can emerge from local dynamics. However, the analysis of greedy routing using the probability distributions arising from these dynamics is quite complex because of mutual dependencies. In contrast, our process enables complete formal analysis. It is based on the combination of two simple processes: a random walk process, and an harmonic forgetting process. Both processes reflect natural behaviors of the individuals, viewed as nodes in the network of inter-individual acquaintances. We prove that, in k-dimensional lattices, the combination of these two processes generates long-range links mutually independently distributed as a k-harmonic distribution. We analyze the performances of greedy routing at the stationary regime of our process, and prove that the expected number of steps for routing from any source to any target in any multidimensional lattice is a polylogarithmic function of the distance between the two nodes in the lattice. Up to our knowledge, these results are the first formal proof that navigability in small worlds can emerge from a dynamical process for network evolution. Our dynamical process can find practical applications to the design of spatial gossip and resource location protocols.<|reference_end|>
arxiv
@article{chaintreau2008networks, title={Networks become navigable as nodes move and forget}, author={Augustin Chaintreau, Pierre Fraigniaud, Emmanuelle Lebhar}, journal={arXiv preprint arXiv:0803.0248}, year={2008}, archivePrefix={arXiv}, eprint={0803.0248}, primaryClass={cs.DS} }
chaintreau2008networks
arxiv-2944
0803.0265
Blind Fingerprinting
<|reference_start|>Blind Fingerprinting: We study blind fingerprinting, where the host sequence into which fingerprints are embedded is partially or completely unknown to the decoder. This problem relates to a multiuser version of the Gel'fand-Pinsker problem. The number of colluders and the collusion channel are unknown, and the colluders and the fingerprint embedder are subject to distortion constraints. We propose a conditionally constant-composition random binning scheme and a universal decoding rule and derive the corresponding false-positive and false-negative error exponents. The encoder is a stacked binning scheme and makes use of an auxiliary random sequence. The decoder is a {\em maximum doubly-penalized mutual information decoder}, where the significance of each candidate coalition is assessed relative to a threshold that trades off false-positive and false-negative error exponents. The penalty is proportional to coalition size and is a function of the conditional type of host sequence. Positive exponents are obtained at all rates below a certain value, which is therefore a lower bound on public fingerprinting capacity. We conjecture that this value is the public fingerprinting capacity. A simpler threshold decoder is also given, which has similar universality properties but also lower achievable rates. An upper bound on public fingerprinting capacity is also derived.<|reference_end|>
arxiv
@article{wang2008blind, title={Blind Fingerprinting}, author={Ying Wang and Pierre Moulin}, journal={arXiv preprint arXiv:0803.0265}, year={2008}, archivePrefix={arXiv}, eprint={0803.0265}, primaryClass={cs.IT math.IT} }
wang2008blind
arxiv-2945
0803.0316
Staged Self-Assembly:Nanomanufacture of Arbitrary Shapes with O(1) Glues
<|reference_start|>Staged Self-Assembly:Nanomanufacture of Arbitrary Shapes with O(1) Glues: We introduce staged self-assembly of Wang tiles, where tiles can be added dynamically in sequence and where intermediate constructions can be stored for later mixing. This model and its various constraints and performance measures are motivated by a practical nanofabrication scenario through protein-based bioengineering. Staging allows us to break through the traditional lower bounds in tile self-assembly by encoding the shape in the staging algorithm instead of the tiles. All of our results are based on the practical assumption that only a constant number of glues, and thus only a constant number of tiles, can be engineered, as each new glue type requires significant biochemical research and experiments. Under this assumption, traditional tile self-assembly cannot even manufacture an n*n square; in contrast, we show how staged assembly enables manufacture of arbitrary orthogonal shapes in a variety of precise formulations of the model.<|reference_end|>
arxiv
@article{demaine2008staged, title={Staged Self-Assembly:Nanomanufacture of Arbitrary Shapes with O(1) Glues}, author={Erik D. Demaine, Martin L. Demaine, Sandor P. Fekete, Mashhood Ishaque, Eynat Rafalin, Robert T. Schweller and Diane Souvaine}, journal={arXiv preprint arXiv:0803.0316}, year={2008}, archivePrefix={arXiv}, eprint={0803.0316}, primaryClass={cs.CG} }
demaine2008staged
arxiv-2946
0803.0378
Thread algebra for poly-threading
<|reference_start|>Thread algebra for poly-threading: Threads as considered in basic thread algebra are primarily looked upon as behaviours exhibited by sequential programs on execution. It is a fact of life that sequential programs are often fragmented. Consequently, fragmented program behaviours are frequently found. In this paper, we consider this phenomenon. We extend basic thread algebra with the barest mechanism for sequencing of threads that are taken for fragments. This mechanism, called poly-threading, supports both autonomous and non-autonomous thread selection in sequencing. We relate the resulting theory to the algebraic theory of processes known as ACP and use it to describe analytic execution architectures suited for fragmented programs. We also consider the case where the steps of fragmented program behaviours are interleaved in the ways of non-distributed and distributed multi-threading.<|reference_end|>
arxiv
@article{bergstra2008thread, title={Thread algebra for poly-threading}, author={J. A. Bergstra, C. A. Middelburg}, journal={Formal Aspects of Computing, 23(4):567--583, 2011}, year={2008}, doi={10.1007/s00165-011-0178-3}, number={PRG0810}, archivePrefix={arXiv}, eprint={0803.0378}, primaryClass={cs.LO} }
bergstra2008thread
arxiv-2947
0803.0398
In-depth analysis of the Naming Game dynamics: the homogeneous mixing case
<|reference_start|>In-depth analysis of the Naming Game dynamics: the homogeneous mixing case: Language emergence and evolution has recently gained growing attention through multi-agent models and mathematical frameworks to study their behavior. Here we investigate further the Naming Game, a model able to account for the emergence of a shared vocabulary of form-meaning associations through social/cultural learning. Due to the simplicity of both the structure of the agents and their interaction rules, the dynamics of this model can be analyzed in great detail using numerical simulations and analytical arguments. This paper first reviews some existing results and then presents a new overall understanding.<|reference_end|>
arxiv
@article{baronchelli2008in-depth, title={In-depth analysis of the Naming Game dynamics: the homogeneous mixing case}, author={Andrea Baronchelli, Vittorio Loreto, Luc Steels}, journal={Int. J. Mod. Phys. C 19, 785 (2008)}, year={2008}, doi={10.1142/S0129183108012522}, archivePrefix={arXiv}, eprint={0803.0398}, primaryClass={physics.soc-ph cond-mat.stat-mech cs.GT cs.MA} }
baronchelli2008in-depth
arxiv-2948
0803.0404
The Complexity of Testing Properties of Simple Games
<|reference_start|>The Complexity of Testing Properties of Simple Games: Simple games cover voting systems in which a single alternative, such as a bill or an amendment, is pitted against the status quo. A simple game or a yes-no voting system is a set of rules that specifies exactly which collections of ``yea'' votes yield passage of the issue at hand. A collection of ``yea'' voters forms a winning coalition. We are interested on performing a complexity analysis of problems on such games depending on the game representation. We consider four natural explicit representations, winning, loosing, minimal winning, and maximal loosing. We first analyze the computational complexity of obtaining a particular representation of a simple game from a different one. We show that some cases this transformation can be done in polynomial time while the others require exponential time. The second question is classifying the complexity for testing whether a game is simple or weighted. We show that for the four types of representation both problem can be solved in polynomial time. Finally, we provide results on the complexity of testing whether a simple game or a weighted game is of a special type. In this way, we analyze strongness, properness, decisiveness and homogeneity, which are desirable properties to be fulfilled for a simple game.<|reference_end|>
arxiv
@article{freixas2008the, title={The Complexity of Testing Properties of Simple Games}, author={Josep Freixas, Xavier Molinero, Martin Olsen, Maria Serna}, journal={arXiv preprint arXiv:0803.0404}, year={2008}, archivePrefix={arXiv}, eprint={0803.0404}, primaryClass={cs.GT cs.CC} }
freixas2008the
arxiv-2949
0803.0405
Multi-dimensional sparse time series: feature extraction
<|reference_start|>Multi-dimensional sparse time series: feature extraction: We show an analysis of multi-dimensional time series via entropy and statistical linguistic techniques. We define three markers encoding the behavior of the series, after it has been translated into a multi-dimensional symbolic sequence. The leading component and the trend of the series with respect to a mobile window analysis result from the entropy analysis and label the dynamical evolution of the series. The diversification formalizes the differentiation in the use of recurrent patterns, from a Zipf law point of view. These markers are the starting point of further analysis such as classification or clustering of large database of multi-dimensional time series, prediction of future behavior and attribution of new data. We also present an application to economic data. We deal with measurements of money investments of some business companies in advertising market for different media sources.<|reference_end|>
arxiv
@article{franciosi2008multi-dimensional, title={Multi-dimensional sparse time series: feature extraction}, author={Marco Franciosi, Giulia Menconi}, journal={arXiv preprint arXiv:0803.0405}, year={2008}, archivePrefix={arXiv}, eprint={0803.0405}, primaryClass={cs.MM cs.IR} }
franciosi2008multi-dimensional
arxiv-2950
0803.0412
Essential conditions for evolution of communication within a species
<|reference_start|>Essential conditions for evolution of communication within a species: A major obstacle in analyzing the evolution of information exchange and processing is our insufficient understanding of the underlying signaling and decision-making biological mechanisms. For instance, it is unclear why are humans unique in developing such extensive communication abilities. To treat this problem, a method based on the mutual information approach is developed that evaluates the information content of communication between interacting individuals through correlations of their behavior patterns (rather than calculating the information load of exchanged discrete signals, e.g. Shannon entropy). It predicts that correlated interactions of the indirect reciprocity type together with affective behavior and selection rules changing with time are necessary conditions for the emergence of significant information exchange. Population size variations accelerate this development. These results are supported by evidence of demographic bottlenecks, distinguishing human from other species' (e.g. apes) evolution line. They indicate as well new pathways for evolution of information based phenomena, such as intelligence and complexity.<|reference_end|>
arxiv
@article{feigel2008essential, title={Essential conditions for evolution of communication within a species}, author={A. Feigel}, journal={arXiv preprint arXiv:0803.0412}, year={2008}, doi={10.1016/j.jtbi.2008.07.005}, archivePrefix={arXiv}, eprint={0803.0412}, primaryClass={q-bio.PE cs.GT physics.soc-ph} }
feigel2008essential
arxiv-2951
0803.0428
Zero-Forcing Precoding for Frequency Selective MIMO Channels with $H^\infty$ Criterion and Causality Constraint
<|reference_start|>Zero-Forcing Precoding for Frequency Selective MIMO Channels with $H^\infty$ Criterion and Causality Constraint: We consider zero-forcing equalization of frequency selective MIMO channels by causal and linear time-invariant precoders in the presence of intersymbol interference. Our motivation is twofold. First, we are concerned with the optimal performance of causal precoders from a worst case point of view. Therefore we construct an optimal causal precoder, whereas contrary to other works our construction is not limited to finite or rational impulse responses. Moreover we derive a novel numerical approach to computation of the optimal perfomance index achievable by causal precoders for given channels. This quantity is important in the numerical determination of optimal precoders.<|reference_end|>
arxiv
@article{wahls2008zero-forcing, title={Zero-Forcing Precoding for Frequency Selective MIMO Channels with $H^\infty$ Criterion and Causality Constraint}, author={Sander Wahls, Holger Boche, Volker Pohl}, journal={Signal Processing, Vol. 89, No. 9, pp. 1754-1761, Sep. 2009}, year={2008}, doi={10.1016/j.sigpro.2009.03.010}, archivePrefix={arXiv}, eprint={0803.0428}, primaryClass={cs.IT math.IT} }
wahls2008zero-forcing
arxiv-2952
0803.0439
Optimizing polynomials for floating-point implementation
<|reference_start|>Optimizing polynomials for floating-point implementation: The floating-point implementation of a function on an interval often reduces to polynomial approximation, the polynomial being typically provided by Remez algorithm. However, the floating-point evaluation of a Remez polynomial sometimes leads to catastrophic cancellations. This happens when some of the polynomial coefficients are very small in magnitude with respects to others. In this case, it is better to force these coefficients to zero, which also reduces the operation count. This technique, classically used for odd or even functions, may be generalized to a much larger class of functions. An algorithm is presented that forces to zero the smaller coefficients of the initial polynomial thanks to a modified Remez algorithm targeting an incomplete monomial basis. One advantage of this technique is that it is purely numerical, the function being used as a numerical black box. This algorithm is implemented within a larger polynomial implementation tool that is demonstrated on a range of examples, resulting in polynomials with less coefficients than those obtained the usual way.<|reference_end|>
arxiv
@article{de dinechin2008optimizing, title={Optimizing polynomials for floating-point implementation}, author={Florent De Dinechin (LIP), Christoph Quirin Lauter (LIP)}, journal={arXiv preprint arXiv:0803.0439}, year={2008}, archivePrefix={arXiv}, eprint={0803.0439}, primaryClass={cs.NA cs.MS} }
de dinechin2008optimizing
arxiv-2953
0803.0450
Inferring Neuronal Network Connectivity from Spike Data: A Temporal Datamining Approach
<|reference_start|>Inferring Neuronal Network Connectivity from Spike Data: A Temporal Datamining Approach: Understanding the functioning of a neural system in terms of its underlying circuitry is an important problem in neuroscience. Recent developments in electrophysiology and imaging allow one to simultaneously record activities of hundreds of neurons. Inferring the underlying neuronal connectivity patterns from such multi-neuronal spike train data streams is a challenging statistical and computational problem. This task involves finding significant temporal patterns from vast amounts of symbolic time series data. In this paper we show that the frequent episode mining methods from the field of temporal data mining can be very useful in this context. In the frequent episode discovery framework, the data is viewed as a sequence of events, each of which is characterized by an event type and its time of occurrence and episodes are certain types of temporal patterns in such data. Here we show that, using the set of discovered frequent episodes from multi-neuronal data, one can infer different types of connectivity patterns in the neural system that generated it. For this purpose, we introduce the notion of mining for frequent episodes under certain temporal constraints; the structure of these temporal constraints is motivated by the application. We present algorithms for discovering serial and parallel episodes under these temporal constraints. Through extensive simulation studies we demonstrate that these methods are useful for unearthing patterns of neuronal network connectivity.<|reference_end|>
arxiv
@article{patnaik2008inferring, title={Inferring Neuronal Network Connectivity from Spike Data: A Temporal Datamining Approach}, author={Debprakash Patnaik (Electical Engg. Dept., Indian Institute of Science, Bangalore), and P. S. Sastry (Electrical Engg. Dept., Indian Institute of Science, Bangalore), and K. P. Unnikrishnan (General Motors R&D, Warren)}, journal={arXiv preprint arXiv:0803.0450}, year={2008}, archivePrefix={arXiv}, eprint={0803.0450}, primaryClass={cs.DB q-bio.NC} }
patnaik2008inferring
arxiv-2954
0803.0473
Stream sampling for variance-optimal estimation of subset sums
<|reference_start|>Stream sampling for variance-optimal estimation of subset sums: From a high volume stream of weighted items, we want to maintain a generic sample of a certain limited size $k$ that we can later use to estimate the total weight of arbitrary subsets. This is the classic context of on-line reservoir sampling, thinking of the generic sample as a reservoir. We present an efficient reservoir sampling scheme, $\varoptk$, that dominates all previous schemes in terms of estimation quality. $\varoptk$ provides {\em variance optimal unbiased estimation of subset sums}. More precisely, if we have seen $n$ items of the stream, then for {\em any} subset size $m$, our scheme based on $k$ samples minimizes the average variance over all subsets of size $m$. In fact, the optimality is against any off-line scheme with $k$ samples tailored for the concrete set of items seen. In addition to optimal average variance, our scheme provides tighter worst-case bounds on the variance of {\em particular} subsets than previously possible. It is efficient, handling each new item of the stream in $O(\log k)$ time. Finally, it is particularly well suited for combination of samples from different streams in a distributed setting.<|reference_end|>
arxiv
@article{cohen2008stream, title={Stream sampling for variance-optimal estimation of subset sums}, author={Edith Cohen, Nick Duffield, Haim Kaplan, Carsten Lund, and Mikkel Thorup}, journal={arXiv preprint arXiv:0803.0473}, year={2008}, archivePrefix={arXiv}, eprint={0803.0473}, primaryClass={cs.DS} }
cohen2008stream
arxiv-2955
0803.0476
Fast unfolding of communities in large networks
<|reference_start|>Fast unfolding of communities in large networks: We propose a simple method to extract the community structure of large networks. Our method is a heuristic method that is based on modularity optimization. It is shown to outperform all other known community detection method in terms of computation time. Moreover, the quality of the communities detected is very good, as measured by the so-called modularity. This is shown first by identifying language communities in a Belgian mobile phone network of 2.6 million customers and by analyzing a web graph of 118 million nodes and more than one billion links. The accuracy of our algorithm is also verified on ad-hoc modular networks. .<|reference_end|>
arxiv
@article{blondel2008fast, title={Fast unfolding of communities in large networks}, author={Vincent D. Blondel, Jean-Loup Guillaume, Renaud Lambiotte and Etienne Lefebvre}, journal={J. Stat. Mech. (2008) P10008}, year={2008}, doi={10.1088/1742-5468/2008/10/P10008}, archivePrefix={arXiv}, eprint={0803.0476}, primaryClass={physics.soc-ph cond-mat.stat-mech cs.CY cs.DS} }
blondel2008fast
arxiv-2956
0803.0515
Intuitive Source Code Visualization Tools for Improving Student Comprehension: BRICS
<|reference_start|>Intuitive Source Code Visualization Tools for Improving Student Comprehension: BRICS: Even relatively simple code analysis can be a daunting task for many first year students. Perceived complexity, coupled with foreign and harsh syntax, often outstrips the ability for students to take in what they are seeing in terms of their verbal memory. That is, first year students often lack the experience to encode critical building blocks in source code, and their interrelationships, into their own words. We believe this argues for the need for IDEs to provide additional support for representations that would appeal directly to visual memory. In this paper, we examine this need for intuitive source code visualization tools that are easily accessible to novice programmers, discuss the requirements for such a tool, and suggest a novel idea that takes advantage of human peripheral vision to achieve stronger overall code structure awareness.<|reference_end|>
arxiv
@article{pearson2008intuitive, title={Intuitive Source Code Visualization Tools for Improving Student Comprehension: BRICS}, author={Christopher Pearson, Celina Gibbs, Yvonne Coady}, journal={arXiv preprint arXiv:0803.0515}, year={2008}, archivePrefix={arXiv}, eprint={0803.0515}, primaryClass={cs.HC} }
pearson2008intuitive
arxiv-2957
0803.0528
Une approche modulaire probabiliste pour le routage \`a Qualit\'e de Service int\'egr\'ee
<|reference_start|>Une approche modulaire probabiliste pour le routage \`a Qualit\'e de Service int\'egr\'ee: Due to emerging real-time and multimedia applications, efficient routing of information packets in dynamically changing communication network requires that as the load levels, traffic patterns and topology of the network change, the routing policy also adapts. We focused in this paper on QoS based routing by developing a neuro-dynamic programming to construct dynamic state dependent routing policies. We propose an approach based on adaptive algorithm for packet routing using reinforcement learning which optimizes two criteria: cumulative cost path and end-to-end delay. Numerical results obtained with OPNET simulator for different packet interarrival times statistical distributions with different levels of traffic's load show that the proposed approach gives better results compared to standard optimal path routing algorithms.<|reference_end|>
arxiv
@article{hoceini2008une, title={Une approche modulaire probabiliste pour le routage \`a Qualit\'e de Service int\'egr\'ee}, author={Said Hoceini (LISSI - Ea 3956), Abdelhamid Mellouk (LISSI - Ea 3956), Hayet Hafi (LISSI - Ea 3956)}, journal={Colloque Francophone sur l'Ing\'enierie des Protocoles (CFIP), Les Arcs : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0803.0528}, primaryClass={cs.NI} }
hoceini2008une
arxiv-2958
0803.0529
Evaluation and exploitation of knowledge robustness in knowledge-based systems
<|reference_start|>Evaluation and exploitation of knowledge robustness in knowledge-based systems: Industrial knowledge is complex, difficult to formalize and very dynamic in reason of the continuous development of techniques and technologies. The verification of the validity of the knowledge base at the time of its elaboration is not sufficient. To be exploitable, this knowledge must then be able to be used under conditions (slightly) different from the conditions in which it was formalized. So, it becomes vital for the company to permanently evaluate the quality of the industrial knowledge implemented in the system. This evaluation is founded on the concept of robustness of the knowledge formalized by conceptual graphs. The evaluation method is supported by a computerized tool.<|reference_end|>
arxiv
@article{barcikowski2008evaluation, title={Evaluation and exploitation of knowledge robustness in knowledge-based systems}, author={M. Barcikowski (INRIA Sophia Antipolis), P. Pernelle, A. Lefebvre (AIB_ERN), M. Martinez, J. Renaud}, journal={Dans Proceedings - 9th IFAC (IEEE Control Systems Society) Symp. Automated Syst. Based on Human Skill and Knowledge (ASBoHS'06), Nancy : France (2006)}, year={2008}, archivePrefix={arXiv}, eprint={0803.0529}, primaryClass={cs.OH} }
barcikowski2008evaluation
arxiv-2959
0803.0597
Cooperative Spectrum Sensing Using Random Matrix Theory
<|reference_start|>Cooperative Spectrum Sensing Using Random Matrix Theory: In this paper, using tools from asymptotic random matrix theory, a new cooperative scheme for frequency band sensing is introduced for both AWGN and fading channels. Unlike previous works in the field, the new scheme does not require the knowledge of the noise statistics or its variance and is related to the behavior of the largest and smallest eigenvalue of random matrices. Remarkably, simulations show that the asymptotic claims hold even for a small number of observations (which makes it convenient for time-varying topologies), outperforming classical energy detection techniques.<|reference_end|>
arxiv
@article{cardoso2008cooperative, title={Cooperative Spectrum Sensing Using Random Matrix Theory}, author={L. S. Cardoso, M. Debbah, P. Bianchi, J. Najim}, journal={arXiv preprint arXiv:0803.0597}, year={2008}, archivePrefix={arXiv}, eprint={0803.0597}, primaryClass={cs.IT math.IT} }
cardoso2008cooperative
arxiv-2960
0803.0610
On the Approximate Eigenstructure of Time-Varying Channels
<|reference_start|>On the Approximate Eigenstructure of Time-Varying Channels: In this article we consider the approximate description of doubly--dispersive channels by its symbol. We focus on channel operators with compactly supported spreading, which are widely used to represent fast fading multipath communication channels. The concept of approximate eigenstructure is introduced, which measures the accuracy E_p of the approximation of the channel operation as a pure multiplication in a given L_p-norm. Two variants of such an approximate Weyl symbol calculus are studied, which have important applications in several models for time--varying mobile channels. Typically, such channels have random spreading functions (inverse Weyl transform) defined on a common support U of finite non--zero size such that approximate eigenstructure has to be measured with respect to certain norms of the spreading process. We derive several explicit relations to the size |U| of the support. We show that the characterization of the ratio of E_p to some L_q-norm of the spreading function is related to weighted norms of ambiguity and Wigner functions. We present the connection to localization operators and give new bounds on the ability of localization of ambiguity functions and Wigner functions in U. Our analysis generalizes and improves recent results for the case p=2 and q=1.<|reference_end|>
arxiv
@article{jung2008on, title={On the Approximate Eigenstructure of Time-Varying Channels}, author={Peter Jung}, journal={arXiv preprint arXiv:0803.0610}, year={2008}, archivePrefix={arXiv}, eprint={0803.0610}, primaryClass={cs.IT math.IT} }
jung2008on
arxiv-2961
0803.0632
Network Coding for Distributed Storage Systems
<|reference_start|>Network Coding for Distributed Storage Systems: Distributed storage systems provide reliable access to data through redundancy spread over individually unreliable nodes. Application scenarios include data centers, peer-to-peer storage systems, and storage in wireless networks. Storing data using an erasure code, in fragments spread across nodes, requires less redundancy than simple replication for the same level of reliability. However, since fragments must be periodically replaced as nodes fail, a key question is how to generate encoded fragments in a distributed way while transferring as little data as possible across the network. For an erasure coded system, a common practice to repair from a node failure is for a new node to download subsets of data stored at a number of surviving nodes, reconstruct a lost coded block using the downloaded data, and store it at the new node. We show that this procedure is sub-optimal. We introduce the notion of regenerating codes, which allow a new node to download \emph{functions} of the stored data from the surviving nodes. We show that regenerating codes can significantly reduce the repair bandwidth. Further, we show that there is a fundamental tradeoff between storage and repair bandwidth which we theoretically characterize using flow arguments on an appropriately constructed graph. By invoking constructive results in network coding, we introduce regenerating codes that can achieve any point in this optimal tradeoff.<|reference_end|>
arxiv
@article{dimakis2008network, title={Network Coding for Distributed Storage Systems}, author={Alexandros G. Dimakis, P. Brighten Godfrey, Yunnan Wu, Martin J. Wainwright, Kannan Ramchandran}, journal={arXiv preprint arXiv:0803.0632}, year={2008}, number={EECS 14546}, archivePrefix={arXiv}, eprint={0803.0632}, primaryClass={cs.NI cs.IT math.IT} }
dimakis2008network
arxiv-2962
0803.0653
Aggregating and Deploying Network Access Control Policies
<|reference_start|>Aggregating and Deploying Network Access Control Policies: The existence of errors or inconsistencies in the configuration of security components, such as filtering routers and/or firewalls, may lead to weak access control policies -- potentially easy to be evaded by unauthorized parties. We present in this paper a proposal to create, manage, and deploy consistent policies in those components in an efficient way. To do so, we combine two main approaches. The first approach is the use of an aggregation mechanism that yields consistent configurations or signals inconsistencies. Through this mechanism we can fold existing policies of a given system and create a consistent and global set of access control rules -- easy to maintain and manage by using a single syntax. The second approach is the use of a refinement mechanism that guarantees the proper deployment of such a global set of rules into the system, yet free of inconsistencies.<|reference_end|>
arxiv
@article{garcia-alfaro2008aggregating, title={Aggregating and Deploying Network Access Control Policies}, author={Joaquin Garcia-Alfaro, Frederic Cuppens, Nora Cuppens-Boulahia}, journal={Proc. 2007 International Symposium on Frontiers in Availability, Reliability and Security (FARES), Vienna (Austria), 10-13 April 2007 (10/04/2007), 532-539}, year={2008}, archivePrefix={arXiv}, eprint={0803.0653}, primaryClass={cs.CR cs.NI} }
garcia-alfaro2008aggregating
arxiv-2963
0803.0661
Towards an Optimal Separation of Space and Length in Resolution
<|reference_start|>Towards an Optimal Separation of Space and Length in Resolution: Most state-of-the-art satisfiability algorithms today are variants of the DPLL procedure augmented with clause learning. The main bottleneck for such algorithms, other than the obvious one of time, is the amount of memory used. In the field of proof complexity, the resources of time and memory correspond to the length and space of resolution proofs. There has been a long line of research trying to understand these proof complexity measures, but while strong results have been proven on length our understanding of space is still quite poor. For instance, it remains open whether the fact that a formula is provable in short length implies that it is also provable in small space or whether on the contrary these measures are unrelated in the sense that short proofs can be arbitrarily complex with respect to space. In this paper, we present some evidence that the true answer should be that the latter case holds. We do this by proving a tight bound of Theta(sqrt(n)) on the space needed for so-called pebbling contradictions over pyramid graphs of size n. This yields the first polynomial lower bound on space that is not a consequence of a corresponding lower bound on width, another well-studied measure in resolution, as well as an improvement of the weak separation in (Nordstrom 2006) of space and width from logarithmic to polynomial. Also, continuing the line of research initiated by (Ben-Sasson 2002) into trade-offs between different proof complexity measures, we present a simplified proof of the recent length-space trade-off result in (Hertel and Pitassi 2007), and show how our ideas can be used to prove a couple of other exponential trade-offs in resolution.<|reference_end|>
arxiv
@article{nordström2008towards, title={Towards an Optimal Separation of Space and Length in Resolution}, author={Jakob Nordstr"om, Johan H{aa}stad}, journal={arXiv preprint arXiv:0803.0661}, year={2008}, archivePrefix={arXiv}, eprint={0803.0661}, primaryClass={cs.CC cs.LO} }
nordström2008towards
arxiv-2964
0803.0666
An approach to control collaborative processes in PLM systems
<|reference_start|>An approach to control collaborative processes in PLM systems: Companies that collaborate within the product development processes need to implement an effective management of their collaborative activities. Despite the implementation of a PLM system, the collaborative activities are not efficient as it might be expected. This paper presents an analysis of the problems related to the collaborative work using a PLM system. From this analysis, we propose an approach for improving collaborative processes within a PLM system, based on monitoring indicators. This approach leads to identify and therefore to mitigate the brakes of the collaborative work.<|reference_end|>
arxiv
@article{kadiri2008an, title={An approach to control collaborative processes in PLM systems}, author={Soumaya El Kadiri (LIESP), Philippe Pernelle (LIESP), Miguel Delattre (LIESP), Abdelaziz Bouras (LIESP)}, journal={Dans Workshop on Extended Product and Process Analysis aNd Design - Extended Product and Process Analysis aNd Design, Bordeaux : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0803.0666}, primaryClass={cs.SE} }
kadiri2008an
arxiv-2965
0803.0701
Spanning directed trees with many leaves
<|reference_start|>Spanning directed trees with many leaves: The {\sc Directed Maximum Leaf Out-Branching} problem is to find an out-branching (i.e. a rooted oriented spanning tree) in a given digraph with the maximum number of leaves. In this paper, we obtain two combinatorial results on the number of leaves in out-branchings. We show that - every strongly connected $n$-vertex digraph $D$ with minimum in-degree at least 3 has an out-branching with at least $(n/4)^{1/3}-1$ leaves; - if a strongly connected digraph $D$ does not contain an out-branching with $k$ leaves, then the pathwidth of its underlying graph UG($D$) is $O(k\log k)$. Moreover, if the digraph is acyclic, the pathwidth is at most $4k$. The last result implies that it can be decided in time $2^{O(k\log^2 k)}\cdot n^{O(1)}$ whether a strongly connected digraph on $n$ vertices has an out-branching with at least $k$ leaves. On acyclic digraphs the running time of our algorithm is $2^{O(k\log k)}\cdot n^{O(1)}$.<|reference_end|>
arxiv
@article{alon2008spanning, title={Spanning directed trees with many leaves}, author={N Alon, F.V. Fomin, G. Gutin, M. Krivelevich and S. Saurabh}, journal={arXiv preprint arXiv:0803.0701}, year={2008}, archivePrefix={arXiv}, eprint={0803.0701}, primaryClass={cs.DS cs.DM} }
alon2008spanning
arxiv-2966
0803.0726
A quadratic algorithm for road coloring
<|reference_start|>A quadratic algorithm for road coloring: The Road Coloring Theorem states that every aperiodic directed graph with constant out-degree has a synchronized coloring. This theorem had been conjectured during many years as the Road Coloring Problem before being settled by A. Trahtman. Trahtman's proof leads to an algorithm that finds a synchronized labeling with a cubic worst-case time complexity. We show a variant of his construction with a worst-case complexity which is quadratic in time and linear in space. We also extend the Road Coloring Theorem to the periodic case.<|reference_end|>
arxiv
@article{béal2008a, title={A quadratic algorithm for road coloring}, author={Marie-Pierre B'eal, Dominique Perrin}, journal={arXiv preprint arXiv:0803.0726}, year={2008}, archivePrefix={arXiv}, eprint={0803.0726}, primaryClass={cs.DS cs.DM} }
béal2008a
arxiv-2967
0803.0731
Complexity Analysis of Reed-Solomon Decoding over GF(2^m) Without Using Syndromes
<|reference_start|>Complexity Analysis of Reed-Solomon Decoding over GF(2^m) Without Using Syndromes: For the majority of the applications of Reed-Solomon (RS) codes, hard decision decoding is based on syndromes. Recently, there has been renewed interest in decoding RS codes without using syndromes. In this paper, we investigate the complexity of syndromeless decoding for RS codes, and compare it to that of syndrome-based decoding. Aiming to provide guidelines to practical applications, our complexity analysis differs in several aspects from existing asymptotic complexity analysis, which is typically based on multiplicative fast Fourier transform (FFT) techniques and is usually in big O notation. First, we focus on RS codes over characteristic-2 fields, over which some multiplicative FFT techniques are not applicable. Secondly, due to moderate block lengths of RS codes in practice, our analysis is complete since all terms in the complexities are accounted for. Finally, in addition to fast implementation using additive FFT techniques, we also consider direct implementation, which is still relevant for RS codes with moderate lengths. Comparing the complexities of both syndromeless and syndrome-based decoding algorithms based on direct and fast implementations, we show that syndromeless decoding algorithms have higher complexities than syndrome-based ones for high rate RS codes regardless of the implementation. Both errors-only and errors-and-erasures decoding are considered in this paper. We also derive tighter bounds on the complexities of fast polynomial multiplications based on Cantor's approach and the fast extended Euclidean algorithm.<|reference_end|>
arxiv
@article{chen2008complexity, title={Complexity Analysis of Reed-Solomon Decoding over GF(2^m) Without Using Syndromes}, author={Ning Chen and Zhiyuan Yan}, journal={arXiv preprint arXiv:0803.0731}, year={2008}, archivePrefix={arXiv}, eprint={0803.0731}, primaryClass={cs.IT cs.CC cs.DS math.IT} }
chen2008complexity
arxiv-2968
0803.0755
Toeplitz Block Matrices in Compressed Sensing
<|reference_start|>Toeplitz Block Matrices in Compressed Sensing: Recent work in compressed sensing theory shows that $n\times N$ independent and identically distributed (IID) sensing matrices whose entries are drawn independently from certain probability distributions guarantee exact recovery of a sparse signal with high probability even if $n\ll N$. Motivated by signal processing applications, random filtering with Toeplitz sensing matrices whose elements are drawn from the same distributions were considered and shown to also be sufficient to recover a sparse signal from reduced samples exactly with high probability. This paper considers Toeplitz block matrices as sensing matrices. They naturally arise in multichannel and multidimensional filtering applications and include Toeplitz matrices as special cases. It is shown that the probability of exact reconstruction is also high. Their performance is validated using simulations.<|reference_end|>
arxiv
@article{sebert2008toeplitz, title={Toeplitz Block Matrices in Compressed Sensing}, author={Florian Sebert, Leslie Ying, and Yi Ming Zou}, journal={arXiv preprint arXiv:0803.0755}, year={2008}, archivePrefix={arXiv}, eprint={0803.0755}, primaryClass={cs.IT math.IT math.PR} }
sebert2008toeplitz
arxiv-2969
0803.0764
Asymmetric and Symmetric Subsystem BCH Codes and Beyond
<|reference_start|>Asymmetric and Symmetric Subsystem BCH Codes and Beyond: Recently, the theory of quantum error control codes has been extended to subsystem codes over symmetric and asymmetric quantum channels -- qubit-flip and phase-shift errors may have equal or different probabilities. Previous work in constructing quantum error control codes has focused on code constructions for symmetric quantum channels. In this paper, we develop a theory and establish the connection between asymmetric quantum codes and subsystem codes. We present families of subsystem and asymmetric quantum codes derived, once again, from classical BCH and RS codes over finite fields. Particularly, we derive an interesting asymmetric and symmetric subsystem codes based on classical BCH codes with parameters $[[n,k,r,d]]_q$, $[[n,k,r,d_z/d_x]]_q$ and $[[n,k',0,d_z/d_x]]_q$ for arbitrary values of code lengths and dimensions. We establish asymmetric Singleton and Hamming bounds on asymmetric quantum and subsystem code parameters; and derive optimal asymmetric MDS subsystem codes. Finally, our constructions are well explained by an illustrative example. This paper is written on the occasion of the 50th anniversary of the discovery of classical BCH codes and their quantum counterparts were derived nearly 10 years ago.<|reference_end|>
arxiv
@article{aly2008asymmetric, title={Asymmetric and Symmetric Subsystem BCH Codes and Beyond}, author={Salah A. Aly}, journal={arXiv preprint arXiv:0803.0764}, year={2008}, archivePrefix={arXiv}, eprint={0803.0764}, primaryClass={quant-ph cs.IT math.IT} }
aly2008asymmetric
arxiv-2970
0803.0778
Constant-Rank Codes
<|reference_start|>Constant-Rank Codes: Constant-dimension codes have recently received attention due to their significance to error control in noncoherent random network coding. In this paper, we show that constant-rank codes are closely related to constant-dimension codes and we study the properties of constant-rank codes. We first introduce a relation between vectors in $\mathrm{GF}(q^m)^n$ and subspaces of $\mathrm{GF}(q)^m$ or $\mathrm{GF}(q)^n$, and use it to establish a relation between constant-rank codes and constant-dimension codes. We then derive bounds on the maximum cardinality of constant-rank codes with given rank weight and minimum rank distance. Finally, we investigate the asymptotic behavior of the maximal cardinality of constant-rank codes with given rank weight and minimum rank distance.<|reference_end|>
arxiv
@article{gadouleau2008constant-rank, title={Constant-Rank Codes}, author={Maximilien Gadouleau and Zhiyuan Yan}, journal={arXiv preprint arXiv:0803.0778}, year={2008}, archivePrefix={arXiv}, eprint={0803.0778}, primaryClass={cs.IT math.IT} }
gadouleau2008constant-rank
arxiv-2971
0803.0792
Incremental Topological Ordering and Strong Component Maintenance
<|reference_start|>Incremental Topological Ordering and Strong Component Maintenance: We present an on-line algorithm for maintaining a topological order of a directed acyclic graph as arcs are added, and detecting a cycle when one is created. Our algorithm takes O(m^{1/2}) amortized time per arc, where m is the total number of arcs. For sparse graphs, this bound improves the best previous bound by a logarithmic factor and is tight to within a constant factor for a natural class of algorithms that includes all the existing ones. Our main insight is that the bidirectional search method of previous algorithms does not require an ordered search, but can be more general. This allows us to avoid the use of heaps (priority queues) entirely. Instead, the deterministic version of our algorithm uses (approximate) median-finding. The randomized version of our algorithm avoids this complication, making it very simple. We extend our topological ordering algorithm to give the first detailed algorithm for maintaining the strong components of a directed graph, and a topological order of these components, as arcs are added. This extension also has an amortized time bound of O(m^{1/2}) per arc.<|reference_end|>
arxiv
@article{haeupler2008incremental, title={Incremental Topological Ordering and Strong Component Maintenance}, author={Bernhard Haeupler, Siddhartha Sen, and Robert E. Tarjan}, journal={arXiv preprint arXiv:0803.0792}, year={2008}, archivePrefix={arXiv}, eprint={0803.0792}, primaryClass={cs.DS} }
haeupler2008incremental
arxiv-2972
0803.0803
Un Algorithme de Gestion des Adjacences bas\'e sur la Puissance du Signal
<|reference_start|>Un Algorithme de Gestion des Adjacences bas\'e sur la Puissance du Signal: In this proposition, we present a link management technique for pro-active routing protocols for ad-hoc networks. This new mechanism is based on signal strength hence cross layer approach is used. The hysteresis mechanism provided by OLSR is improved upon by using signal strength in combination with the hello loss based hysteresis. The signal power is used to determine if the link-quality is improving or deteriorating while packet losses are handled through the hysteresis mechanism specified in OLSR RFC. This not only makes the link management more robust but also helps in anticipating link breakages thereby greatly improving the performance.<|reference_end|>
arxiv
@article{ali2008un, title={Un Algorithme de Gestion des Adjacences bas\'e sur la Puissance du Signal}, author={Husnain Mansoor Ali (IEF), Anthony Busson (IEF), Amina Meraihi Naimi (IEF), Veronique Veque (IEF)}, journal={Colloque Francophone sur l'Ing\'enierie des Protocoles (CFIP), Les Arcs : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0803.0803}, primaryClass={cs.NI} }
ali2008un
arxiv-2973
0803.0811
Subspace Pursuit for Compressive Sensing Signal Reconstruction
<|reference_start|>Subspace Pursuit for Compressive Sensing Signal Reconstruction: We propose a new method for reconstruction of sparse signals with and without noisy perturbations, termed the subspace pursuit algorithm. The algorithm has two important characteristics: low computational complexity, comparable to that of orthogonal matching pursuit techniques when applied to very sparse signals, and reconstruction accuracy of the same order as that of LP optimization methods. The presented analysis shows that in the noiseless setting, the proposed algorithm can exactly reconstruct arbitrary sparse signals provided that the sensing matrix satisfies the restricted isometry property with a constant parameter. In the noisy setting and in the case that the signal is not exactly sparse, it can be shown that the mean squared error of the reconstruction is upper bounded by constant multiples of the measurement and signal perturbation energies.<|reference_end|>
arxiv
@article{dai2008subspace, title={Subspace Pursuit for Compressive Sensing Signal Reconstruction}, author={Wei Dai and Olgica Milenkovic}, journal={arXiv preprint arXiv:0803.0811}, year={2008}, archivePrefix={arXiv}, eprint={0803.0811}, primaryClass={cs.NA cs.IT math.IT} }
dai2008subspace
arxiv-2974
0803.0822
Website Optimization through Mining User Navigational Pattern
<|reference_start|>Website Optimization through Mining User Navigational Pattern: With the World Wide Web's ubiquity increase and the rapid development of various online businesses, the complexity of web sites grow. The analysis of web user's navigational pattern within a web site can provide useful information for server performance enhancements, restructuring a website and direct marketing in e-commerce etc. In this paper, an algorithm is proposed for mining such navigation patterns. The key insight is that users access information of interest and follow a certain path while navigating a web site. If they don't find it, they would backtrack and choose among the alternate paths till they reach the destination. The point they backtrack is the Intermediate Reference Location. Identifying such Intermediate locations and destinations out of the pattern will be the main endeavor in the rest of this report.<|reference_end|>
arxiv
@article{biswal2008website, title={Website Optimization through Mining User Navigational Pattern}, author={Biswajit Biswal}, journal={arXiv preprint arXiv:0803.0822}, year={2008}, archivePrefix={arXiv}, eprint={0803.0822}, primaryClass={cs.IR} }
biswal2008website
arxiv-2975
0803.0845
Knapsack cryptosystems built on NP-hard instance
<|reference_start|>Knapsack cryptosystems built on NP-hard instance: We construct three public key knapsack cryptosystems. Standard knapsack cryptosystems hide easy instances of the knapsack problem and have been broken. The systems considered in the article face this problem: They hide a random (possibly hard) instance of the knapsack problem. We provide both complexity results (size of the key, time needed to encypher/decypher...) and experimental results. Security results are given for the second cryptosystem (the fastest one and the one with the shortest key). Probabilistic polynomial reductions show that finding the private key is as difficult as factorizing a product of two primes. We also consider heuristic attacks. First, the density of the cryptosystem can be chosen arbitrarily close to one, discarding low density attacks. Finally, we consider explicit heuristic attacks based on the LLL algorithm and we prove that with respect to these attacks, the public key is as secure as a random key.<|reference_end|>
arxiv
@article{evain2008knapsack, title={Knapsack cryptosystems built on NP-hard instance}, author={Laurent Evain}, journal={arXiv preprint arXiv:0803.0845}, year={2008}, archivePrefix={arXiv}, eprint={0803.0845}, primaryClass={cs.CR cs.CC cs.DM cs.DS} }
evain2008knapsack
arxiv-2976
0803.0858
Untangling planar graphs from a specified vertex position - Hard cases
<|reference_start|>Untangling planar graphs from a specified vertex position - Hard cases: Given a planar graph $G$, we consider drawings of $G$ in the plane where edges are represented by straight line segments (which possibly intersect). Such a drawing is specified by an injective embedding $\pi$ of the vertex set of $G$ into the plane. We prove that a wheel graph $W_n$ admits a drawing $\pi$ such that, if one wants to eliminate edge crossings by shifting vertices to new positions in the plane, then at most $(2+o(1))\sqrt n$ of all $n$ vertices can stay fixed. Moreover, such a drawing $\pi$ exists even if it is presupposed that the vertices occupy any prescribed set of points in the plane. Similar questions are discussed for other families of planar graphs.<|reference_end|>
arxiv
@article{kang2008untangling, title={Untangling planar graphs from a specified vertex position - Hard cases}, author={Mihyun Kang, Oleg Pikhurko, Alexander Ravsky, Mathias Schacht, Oleg Verbitsky}, journal={Discrete Applied Mathematics 159:8 (2011) 789-799}, year={2008}, doi={10.1016/j.dam.2011.01.011}, archivePrefix={arXiv}, eprint={0803.0858}, primaryClass={cs.DM cs.CG} }
kang2008untangling
arxiv-2977
0803.0862
xPerm: fast index canonicalization for tensor computer algebra
<|reference_start|>xPerm: fast index canonicalization for tensor computer algebra: We present a very fast implementation of the Butler-Portugal algorithm for index canonicalization with respect to permutation symmetries. It is called xPerm, and has been written as a combination of a Mathematica package and a C subroutine. The latter performs the most demanding parts of the computations and can be linked from any other program or computer algebra system. We demonstrate with tests and timings the effectively polynomial performance of the Butler-Portugal algorithm with respect to the number of indices, though we also show a case in which it is exponential. Our implementation handles generic tensorial expressions with several dozen indices in hundredths of a second, or one hundred indices in a few seconds, clearly outperforming all other current canonicalizers. The code has been already under intensive testing for several years and has been essential in recent investigations in large-scale tensor computer algebra.<|reference_end|>
arxiv
@article{martin-garcia2008xperm:, title={xPerm: fast index canonicalization for tensor computer algebra}, author={Jose M. Martin-Garcia}, journal={Comp. Phys. Commun. 179 (2008) 597-603}, year={2008}, doi={10.1016/j.cpc.2008.05.009}, archivePrefix={arXiv}, eprint={0803.0862}, primaryClass={cs.SC gr-qc hep-th} }
martin-garcia2008xperm:
arxiv-2978
0803.0874
A Method for Solving Cyclic Block Penta-diagonal Systems of Linear Equations
<|reference_start|>A Method for Solving Cyclic Block Penta-diagonal Systems of Linear Equations: A method for solving cyclic block three-diagonal systems of equations is generalized for solving a block cyclic penta-diagonal system of equations. Introducing a special form of two new variables the original system is split into three block pentagonal systems, which can be solved by the known methods. As such method belongs to class of direct methods without pivoting. Implementation of the algorithm is discussed in some details and the numerical examples are present.<|reference_end|>
arxiv
@article{batista2008a, title={A Method for Solving Cyclic Block Penta-diagonal Systems of Linear Equations}, author={Milan Batista}, journal={arXiv preprint arXiv:0803.0874}, year={2008}, archivePrefix={arXiv}, eprint={0803.0874}, primaryClass={cs.MS cs.NA} }
batista2008a
arxiv-2979
0803.0875
Vandermonde Frequency Division Multiplexing for Cognitive Radio
<|reference_start|>Vandermonde Frequency Division Multiplexing for Cognitive Radio: We consider a cognitive radio scenario where a primary and a secondary user wish to communicate with their corresponding receivers simultaneously over frequency selective channels. Under realistic assumptions that the secondary transmitter has no side information about the primary's message and each transmitter knows only its local channels, we propose a Vandermonde precoder that cancels the interference from the secondary user by exploiting the redundancy of a cyclic prefix. Our numerical examples show that VFDM, with an appropriate design of the input covariance, enables the secondary user to achieve a considerable rate while generating zero interference to the primary user.<|reference_end|>
arxiv
@article{cardoso2008vandermonde, title={Vandermonde Frequency Division Multiplexing for Cognitive Radio}, author={L. S. Cardoso, M. Kobayashi, M. Debbah, O. Ryan}, journal={arXiv preprint arXiv:0803.0875}, year={2008}, archivePrefix={arXiv}, eprint={0803.0875}, primaryClass={cs.IT math.IT} }
cardoso2008vandermonde
arxiv-2980
0803.0924
What Can We Learn Privately?
<|reference_start|>What Can We Learn Privately?: Learning problems form an important category of computational tasks that generalizes many of the computations researchers apply to large real-life data sets. We ask: what concept classes can be learned privately, namely, by an algorithm whose output does not depend too heavily on any one input or specific training example? More precisely, we investigate learning algorithms that satisfy differential privacy, a notion that provides strong confidentiality guarantees in contexts where aggregate information is released about a database containing sensitive information about individuals. We demonstrate that, ignoring computational constraints, it is possible to privately agnostically learn any concept class using a sample size approximately logarithmic in the cardinality of the concept class. Therefore, almost anything learnable is learnable privately: specifically, if a concept class is learnable by a (non-private) algorithm with polynomial sample complexity and output size, then it can be learned privately using a polynomial number of samples. We also present a computationally efficient private PAC learner for the class of parity functions. Local (or randomized response) algorithms are a practical class of private algorithms that have received extensive investigation. We provide a precise characterization of local private learning algorithms. We show that a concept class is learnable by a local algorithm if and only if it is learnable in the statistical query (SQ) model. Finally, we present a separation between the power of interactive and noninteractive local learning algorithms.<|reference_end|>
arxiv
@article{kasiviswanathan2008what, title={What Can We Learn Privately?}, author={Shiva Prasad Kasiviswanathan, Homin K. Lee, Kobbi Nissim, Sofya Raskhodnikova, and Adam Smith}, journal={SIAM Journal of Computing 40(3) (2011) 793-826}, year={2008}, archivePrefix={arXiv}, eprint={0803.0924}, primaryClass={cs.LG cs.CC cs.CR cs.DB} }
kasiviswanathan2008what
arxiv-2981
0803.0929
Graph Sparsification by Effective Resistances
<|reference_start|>Graph Sparsification by Effective Resistances: We present a nearly-linear time algorithm that produces high-quality sparsifiers of weighted graphs. Given as input a weighted graph $G=(V,E,w)$ and a parameter $\epsilon>0$, we produce a weighted subgraph $H=(V,\tilde{E},\tilde{w})$ of $G$ such that $|\tilde{E}|=O(n\log n/\epsilon^2)$ and for all vectors $x\in\R^V$ $(1-\epsilon)\sum_{uv\in E}(x(u)-x(v))^2w_{uv}\le \sum_{uv\in\tilde{E}}(x(u)-x(v))^2\tilde{w}_{uv} \le (1+\epsilon)\sum_{uv\in E}(x(u)-x(v))^2w_{uv}. (*)$ This improves upon the sparsifiers constructed by Spielman and Teng, which had $O(n\log^c n)$ edges for some large constant $c$, and upon those of Bencz\'ur and Karger, which only satisfied (*) for $x\in\{0,1\}^V$. A key ingredient in our algorithm is a subroutine of independent interest: a nearly-linear time algorithm that builds a data structure from which we can query the approximate effective resistance between any two vertices in a graph in $O(\log n)$ time.<|reference_end|>
arxiv
@article{spielman2008graph, title={Graph Sparsification by Effective Resistances}, author={Daniel A. Spielman, Nikhil Srivastava}, journal={arXiv preprint arXiv:0803.0929}, year={2008}, archivePrefix={arXiv}, eprint={0803.0929}, primaryClass={cs.DS} }
spielman2008graph
arxiv-2982
0803.0952
Femtocell Networks: A Survey
<|reference_start|>Femtocell Networks: A Survey: The surest way to increase the system capacity of a wireless link is by getting the transmitter and receiver closer to each other, which creates the dual benefits of higher quality links and more spatial reuse. In a network with nomadic users, this inevitably involves deploying more infrastructure, typically in the form of microcells, hotspots, distributed antennas, or relays. A less expensive alternative is the recent concept of femtocells, also called home base-stations, which are data access points installed by home users get better indoor voice and data coverage. In this article, we overview the technical and business arguments for femtocells, and describe the state-of-the-art on each front. We also describe the technical challenges facing femtocell networks, and give some preliminary ideas for how to overcome them.<|reference_end|>
arxiv
@article{chandrasekhar2008femtocell, title={Femtocell Networks: A Survey}, author={Vikram Chandrasekhar, Jeffrey Andrews and Alan Gatherer}, journal={arXiv preprint arXiv:0803.0952}, year={2008}, doi={10.1109/MCOM.2008.4623708}, archivePrefix={arXiv}, eprint={0803.0952}, primaryClass={cs.NI} }
chandrasekhar2008femtocell
arxiv-2983
0803.0954
Selective association rule generation
<|reference_start|>Selective association rule generation: Mining association rules is a popular and well researched method for discovering interesting relations between variables in large databases. A practical problem is that at medium to low support values often a large number of frequent itemsets and an even larger number of association rules are found in a database. A widely used approach is to gradually increase minimum support and minimum confidence or to filter the found rules using increasingly strict constraints on additional measures of interestingness until the set of rules found is reduced to a manageable size. In this paper we describe a different approach which is based on the idea to first define a set of ``interesting'' itemsets (e.g., by a mixture of mining and expert knowledge) and then, in a second step to selectively generate rules for only these itemsets. The main advantage of this approach over increasing thresholds or filtering rules is that the number of rules found is significantly reduced while at the same time it is not necessary to increase the support and confidence thresholds which might lead to missing important information in the database.<|reference_end|>
arxiv
@article{hahsler2008selective, title={Selective association rule generation}, author={Michael Hahsler, Christian Buchta, and Kurt Hornik}, journal={Computational Statistics, 2007. Online First, Published: 25 July 2007}, year={2008}, doi={10.1007/s00180-007-0062-z}, archivePrefix={arXiv}, eprint={0803.0954}, primaryClass={cs.DB cs.DS} }
hahsler2008selective
arxiv-2984
0803.0956
Characterizing path graphs by forbidden induced subgraphs
<|reference_start|>Characterizing path graphs by forbidden induced subgraphs: A graph is a path graph if it is the intersection graph of a family of subpaths of a tree. In 1970, Renz asked for a characterizaton of path graphs by forbidden induced subgraphs. Here we answer this question by listing all graphs that are not path graphs and are minimal with this property.<|reference_end|>
arxiv
@article{lévêque2008characterizing, title={Characterizing path graphs by forbidden induced subgraphs}, author={Benjamin L'ev^eque (LGS), Fr'ed'eric Maffray (LGS), Myriam Preissmann (LGS)}, journal={arXiv preprint arXiv:0803.0956}, year={2008}, archivePrefix={arXiv}, eprint={0803.0956}, primaryClass={cs.DM} }
lévêque2008characterizing
arxiv-2985
0803.0966
New probabilistic interest measures for association rules
<|reference_start|>New probabilistic interest measures for association rules: Mining association rules is an important technique for discovering meaningful patterns in transaction databases. Many different measures of interestingness have been proposed for association rules. However, these measures fail to take the probabilistic properties of the mined data into account. In this paper, we start with presenting a simple probabilistic framework for transaction data which can be used to simulate transaction data when no associations are present. We use such data and a real-world database from a grocery outlet to explore the behavior of confidence and lift, two popular interest measures used for rule mining. The results show that confidence is systematically influenced by the frequency of the items in the left hand side of rules and that lift performs poorly to filter random noise in transaction data. Based on the probabilistic framework we develop two new interest measures, hyper-lift and hyper-confidence, which can be used to filter or order mined association rules. The new measures show significantly better performance than lift for applications where spurious rules are problematic.<|reference_end|>
arxiv
@article{hahsler2008new, title={New probabilistic interest measures for association rules}, author={Michael Hahsler and Kurt Hornik}, journal={Intelligent Data Analysis, 11(5):437-455, 2007}, year={2008}, doi={10.3233/IDA-2007-11502}, archivePrefix={arXiv}, eprint={0803.0966}, primaryClass={cs.DB stat.ML} }
hahsler2008new
arxiv-2986
0803.0988
Faster Approximate Lossy Generalized Flow via Interior Point Algorithms
<|reference_start|>Faster Approximate Lossy Generalized Flow via Interior Point Algorithms: We present faster approximation algorithms for generalized network flow problems. A generalized flow is one in which the flow out of an edge differs from the flow into the edge by a constant factor. We limit ourselves to the lossy case, when these factors are at most 1. Our algorithm uses a standard interior-point algorithm to solve a linear program formulation of the network flow problem. The system of linear equations that arises at each step of the interior-point algorithm takes the form of a symmetric M-matrix. We present an algorithm for solving such systems in nearly linear time. The algorithm relies on the Spielman-Teng nearly linear time algorithm for solving linear systems in diagonally-dominant matrices. For a graph with m edges, our algorithm obtains an additive epsilon approximation of the maximum generalized flow and minimum cost generalized flow in time tildeO(m^(3/2) * log(1/epsilon)). In many parameter ranges, this improves over previous algorithms by a factor of approximately m^(1/2). We also obtain a similar improvement for exactly solving the standard min-cost flow problem.<|reference_end|>
arxiv
@article{daitch2008faster, title={Faster Approximate Lossy Generalized Flow via Interior Point Algorithms}, author={Samuel I. Daitch, Daniel A. Spielman}, journal={arXiv preprint arXiv:0803.0988}, year={2008}, archivePrefix={arXiv}, eprint={0803.0988}, primaryClass={cs.DS cs.NA} }
daitch2008faster
arxiv-2987
0803.1025
Asymptotic Concentration Behaviors of Linear Combinations of Weight Distributions on Random Linear Code Ensemble
<|reference_start|>Asymptotic Concentration Behaviors of Linear Combinations of Weight Distributions on Random Linear Code Ensemble: Asymptotic concentration behaviors of linear combinations of weight distributions on the random linear code ensemble are presented. Many important properties of a binary linear code can be expressed as the form of a linear combination of weight distributions such as number of codewords, undetected error probability and upper bound on the maximum likelihood error probability. The key in this analysis is the covariance formula of weight distributions of the random linear code ensemble, which reveals the second-order statistics of a linear function of the weight distributions. Based on the covariance formula, several expressions of the asymptotic concentration rate, which indicate the speed of convergence to the average, are derived.<|reference_end|>
arxiv
@article{wadayama2008asymptotic, title={Asymptotic Concentration Behaviors of Linear Combinations of Weight Distributions on Random Linear Code Ensemble}, author={Tadashi Wadayama}, journal={arXiv preprint arXiv:0803.1025}, year={2008}, archivePrefix={arXiv}, eprint={0803.1025}, primaryClass={cs.IT math.IT} }
wadayama2008asymptotic
arxiv-2988
0803.1030
Robust Stochastic Chemical Reaction Networks and Bounded Tau-Leaping
<|reference_start|>Robust Stochastic Chemical Reaction Networks and Bounded Tau-Leaping: The behavior of some stochastic chemical reaction networks is largely unaffected by slight inaccuracies in reaction rates. We formalize the robustness of state probabilities to reaction rate deviations, and describe a formal connection between robustness and efficiency of simulation. Without robustness guarantees, stochastic simulation seems to require computational time proportional to the total number of reaction events. Even if the concentration (molecular count per volume) stays bounded, the number of reaction events can be linear in the duration of simulated time and total molecular count. We show that the behavior of robust systems can be predicted such that the computational work scales linearly with the duration of simulated time and concentration, and only polylogarithmically in the total molecular count. Thus our asymptotic analysis captures the dramatic speedup when molecular counts are large, and shows that for bounded concentrations the computation time is essentially invariant with molecular count. Finally, by noticing that even robust stochastic chemical reaction networks are capable of embedding complex computational problems, we argue that the linear dependence on simulated time and concentration is likely optimal.<|reference_end|>
arxiv
@article{soloveichik2008robust, title={Robust Stochastic Chemical Reaction Networks and Bounded Tau-Leaping}, author={David Soloveichik}, journal={arXiv preprint arXiv:0803.1030}, year={2008}, archivePrefix={arXiv}, eprint={0803.1030}, primaryClass={cs.CC} }
soloveichik2008robust
arxiv-2989
0803.1087
The Future of Scientific Simulations: from Artificial Life to Artificial Cosmogenesis
<|reference_start|>The Future of Scientific Simulations: from Artificial Life to Artificial Cosmogenesis: This philosophical paper explores the relation between modern scientific simulations and the future of the universe. We argue that a simulation of an entire universe will result from future scientific activity. This requires us to tackle the challenge of simulating open-ended evolution at all levels in a single simulation. The simulation should encompass not only biological evolution, but also physical evolution (a level below) and cultural evolution (a level above). The simulation would allow us to probe what would happen if we would "replay the tape of the universe" with the same or different laws and initial conditions. We also distinguish between real-world and artificial-world modelling. Assuming that intelligent life could indeed simulate an entire universe, this leads to two tentative hypotheses. Some authors have argued that we may already be in a simulation run by an intelligent entity. Or, if such a simulation could be made real, this would lead to the production of a new universe. This last direction is argued with a careful speculative philosophical approach, emphasizing the imperative to find a solution to the heat death problem in cosmology. The reader is invited to consult Annex 1 for an overview of the logical structure of this paper. -- Keywords: far future, future of science, ALife, simulation, realization, cosmology, heat death, fine-tuning, physical eschatology, cosmological natural selection, cosmological artificial selection, artificial cosmogenesis, selfish biocosm hypothesis, meduso-anthropic principle, developmental singularity hypothesis, role of intelligent life.<|reference_end|>
arxiv
@article{vidal2008the, title={The Future of Scientific Simulations: from Artificial Life to Artificial Cosmogenesis}, author={Clement Vidal}, journal={arXiv preprint arXiv:0803.1087}, year={2008}, archivePrefix={arXiv}, eprint={0803.1087}, primaryClass={cs.AI} }
vidal2008the
arxiv-2990
0803.1090
Self-Corrected Min-Sum decoding of LDPC codes
<|reference_start|>Self-Corrected Min-Sum decoding of LDPC codes: In this paper we propose a very simple but powerful self-correction method for the Min-Sum decoding of LPDC codes. Unlike other correction methods known in the literature, our method does not try to correct the check node processing approximation, but it modifies the variable node processing by erasing unreliable messages. However, this positively affects check node messages, which become symmetric Gaussian distributed, and we show that this is sufficient to ensure a quasi-optimal decoding performance. Monte-Carlo simulations show that the proposed Self-Corrected Min-Sum decoding performs very close to the Sum-Product decoding, while preserving the main features of the Min-Sum decoding, that is low complexity and independence with respect to noise variance estimation errors.<|reference_end|>
arxiv
@article{savin2008self-corrected, title={Self-Corrected Min-Sum decoding of LDPC codes}, author={Valentin Savin}, journal={arXiv preprint arXiv:0803.1090}, year={2008}, archivePrefix={arXiv}, eprint={0803.1090}, primaryClass={cs.IT math.IT} }
savin2008self-corrected
arxiv-2991
0803.1094
Min-Max decoding for non binary LDPC codes
<|reference_start|>Min-Max decoding for non binary LDPC codes: Iterative decoding of non-binary LDPC codes is currently performed using either the Sum-Product or the Min-Sum algorithms or slightly different versions of them. In this paper, several low-complexity quasi-optimal iterative algorithms are proposed for decoding non-binary codes. The Min-Max algorithm is one of them and it has the benefit of two possible LLR domain implementations: a standard implementation, whose complexity scales as the square of the Galois field's cardinality and a reduced complexity implementation called selective implementation, which makes the Min-Max decoding very attractive for practical purposes.<|reference_end|>
arxiv
@article{savin2008min-max, title={Min-Max decoding for non binary LDPC codes}, author={Valentin Savin}, journal={arXiv preprint arXiv:0803.1094}, year={2008}, archivePrefix={arXiv}, eprint={0803.1094}, primaryClass={cs.IT math.IT} }
savin2008min-max
arxiv-2992
0803.1096
Algebraic-geometric codes from vector bundles and their decoding
<|reference_start|>Algebraic-geometric codes from vector bundles and their decoding: Algebraic-geometric codes can be constructed by evaluating a certain set of functions on a set of distinct rational points of an algebraic curve. The set of functions that are evaluated is the linear space of a given divisor or, equivalently, the set of section of a given line bundle. Using arbitrary rank vector bundles on algebraic curves, we propose a natural generalization of the above construction. Our codes can also be seen as interleaved versions of classical algebraic-geometric codes. We show that the algorithm of Brown, Minder and Shokrollahi can be extended to this new class of codes and it corrects any number of errors up to $t^{*} - g/2$, where $t^{*}$ is the designed correction capacity of the code and $g$ is the curve genus.<|reference_end|>
arxiv
@article{savin2008algebraic-geometric, title={Algebraic-geometric codes from vector bundles and their decoding}, author={Valentin Savin}, journal={arXiv preprint arXiv:0803.1096}, year={2008}, archivePrefix={arXiv}, eprint={0803.1096}, primaryClass={cs.IT math.IT} }
savin2008algebraic-geometric
arxiv-2993
0803.1104
Optimizing Web Sites for Customer Retention
<|reference_start|>Optimizing Web Sites for Customer Retention: With customer relationship management (CRM) companies move away from a mainly product-centered view to a customer-centered view. Resulting from this change, the effective management of how to keep contact with customers throughout different channels is one of the key success factors in today's business world. Company Web sites have evolved in many industries into an extremely important channel through which customers can be attracted and retained. To analyze and optimize this channel, accurate models of how customers browse through the Web site and what information within the site they repeatedly view are crucial. Typically, data mining techniques are used for this purpose. However, there already exist numerous models developed in marketing research for traditional channels which could also prove valuable to understanding this new channel. In this paper we propose the application of an extension of the Logarithmic Series Distribution (LSD) model repeat-usage of Web-based information and thus to analyze and optimize a Web Site's capability to support one goal of CRM, to retain customers. As an example, we use the university's blended learning web portal with over a thousand learning resources to demonstrate how the model can be used to evaluate and improve the Web site's effectiveness.<|reference_end|>
arxiv
@article{hahsler2008optimizing, title={Optimizing Web Sites for Customer Retention}, author={Michael Hahsler}, journal={arXiv preprint arXiv:0803.1104}, year={2008}, archivePrefix={arXiv}, eprint={0803.1104}, primaryClass={cs.HC} }
hahsler2008optimizing
arxiv-2994
0803.1110
On the Computation of the Topology of a Non-Reduced Implicit Space Curve
<|reference_start|>On the Computation of the Topology of a Non-Reduced Implicit Space Curve: An algorithm is presented for the computation of the topology of a non-reduced space curve defined as the intersection of two implicit algebraic surfaces. It computes a Piecewise Linear Structure (PLS) isotopic to the original space curve. The algorithm is designed to provide the exact result for all inputs. It's a symbolic-numeric algorithm based on subresultant computation. Simple algebraic criteria are given to certify the output of the algorithm. The algorithm uses only one projection of the non-reduced space curve augmented with adjacency information around some "particular points" of the space curve. The algorithm is implemented with the Mathemagix Computer Algebra System (CAS) using the SYNAPS library as a backend.<|reference_end|>
arxiv
@article{diatta2008on, title={On the Computation of the Topology of a Non-Reduced Implicit Space Curve}, author={Daouda Niang Diatta (XLIM), Bernard Mourrain (INRIA Sophia Antipolis), Olivier Ruatta (XLIM)}, journal={arXiv preprint arXiv:0803.1110}, year={2008}, archivePrefix={arXiv}, eprint={0803.1110}, primaryClass={math.AC cs.CG cs.SC} }
diatta2008on
arxiv-2995
0803.1111
Hierarchical Grid-Based Pairwise Key Pre-distribution in Wireless Sensor Networks
<|reference_start|>Hierarchical Grid-Based Pairwise Key Pre-distribution in Wireless Sensor Networks: The security of wireless sensor networks is an active topic of research where both symmetric and asymmetric key cryptography issues have been studied. Due to their computational feasibility on typical sensor nodes, symmetric key algorithms that use the same key to encrypt and decrypt messages have been intensively studied and perfectly deployed in such environment. Because of the wireless sensor's limited infrastructure, the bottleneck challenge for deploying these algorithms is the key distribution. For the same reason of resources restriction, key distribution mechanisms which are used in traditional wireless networks are not efficient for sensor networks. To overcome the key distribution problem, several key pre-distribution algorithms and techniques that assign keys or keying material for the networks nodes in an offline phase have been introduced recently. In this paper, we introduce a supplemental distribution technique based on the communication pattern and deployment knowledge modeling. Our technique is based on the hierarchical grid deployment. For granting a proportional security level with number of dependent sensors, we use different polynomials in different orders with different weights. In seek of our proposed work's value, we provide a detailed analysis on the used resources, resulting security, resiliency, and connectivity compared with other related works.<|reference_end|>
arxiv
@article{mohaisen2008hierarchical, title={Hierarchical Grid-Based Pairwise Key Pre-distribution in Wireless Sensor Networks}, author={Abedelaziz Mohaisen, DaeHun Nyang, KyungHee Lee}, journal={arXiv preprint arXiv:0803.1111}, year={2008}, archivePrefix={arXiv}, eprint={0803.1111}, primaryClass={cs.CR} }
mohaisen2008hierarchical
arxiv-2996
0803.1120
The Rate Loss of Single-Letter Characterization: The "Dirty" Multiple Access Channel
<|reference_start|>The Rate Loss of Single-Letter Characterization: The "Dirty" Multiple Access Channel: For general memoryless systems, the typical information theoretic solution - when exists - has a "single-letter" form. This reflects the fact that optimum performance can be approached by a random code (or a random binning scheme), generated using independent and identically distributed copies of some single-letter distribution. Is that the form of the solution of any (information theoretic) problem? In fact, some counter examples are known. The most famous is the "two help one" problem: Korner and Marton showed that if we want to decode the modulo-two sum of two binary sources from their independent encodings, then linear coding is better than random coding. In this paper we provide another counter example, the "doubly-dirty" multiple access channel (MAC). Like the Korner-Marton problem, this is a multi-terminal scenario where side information is distributed among several terminals; each transmitter knows part of the channel interference but the receiver is not aware of any part of it. We give an explicit solution for the capacity region of a binary version of the doubly-dirty MAC, demonstrate how the capacity region can be approached using a linear coding scheme, and prove that the "best known single-letter region" is strictly contained in it. We also state a conjecture regarding a similar rate loss of single letter characterization in the Gaussian case.<|reference_end|>
arxiv
@article{philosof2008the, title={The Rate Loss of Single-Letter Characterization: The "Dirty" Multiple Access Channel}, author={Tal Philosof and Ram Zamir}, journal={arXiv preprint arXiv:0803.1120}, year={2008}, archivePrefix={arXiv}, eprint={0803.1120}, primaryClass={cs.IT math.IT} }
philosof2008the
arxiv-2997
0803.1144
Asymptotic Capacity and Optimal Precoding Strategy of Multi-Level Precode & Forward in Correlated Channels
<|reference_start|>Asymptotic Capacity and Optimal Precoding Strategy of Multi-Level Precode & Forward in Correlated Channels: We analyze a multi-level MIMO relaying system where a multiple-antenna transmitter sends data to a multipleantenna receiver through several relay levels, also equipped with multiple antennas. Assuming correlated fading in each hop, each relay receives a faded version of the signal transmitted by the previous level, performs precoding on the received signal and retransmits it to the next level. Using free probability theory and assuming that the noise power at the relay levels - but not at the receiver - is negligible, a closed-form expression of the end-to-end asymptotic instantaneous mutual information is derived as the number of antennas in all levels grow large with the same rate. This asymptotic expression is shown to be independent from the channel realizations, to only depend on the channel statistics and to also serve as the asymptotic value of the end-to-end average mutual information. We also provide the optimal singular vectors of the precoding matrices that maximize the asymptotic mutual information : the optimal transmit directions represented by the singular vectors of the precoding matrices are aligned on the eigenvectors of the channel correlation matrices, therefore they can be determined only using the known statistics of the channel matrices and do not depend on a particular channel realization.<|reference_end|>
arxiv
@article{fawaz2008asymptotic, title={Asymptotic Capacity and Optimal Precoding Strategy of Multi-Level Precode & Forward in Correlated Channels}, author={Nadia Fawaz, Keyvan Zarifi, Merouane Debbah, David Gesbert}, journal={arXiv preprint arXiv:0803.1144}, year={2008}, doi={10.1109/ITW.2008.4578651}, archivePrefix={arXiv}, eprint={0803.1144}, primaryClass={cs.IT math.IT} }
fawaz2008asymptotic
arxiv-2998
0803.1189
Infinite words containing squares at every position
<|reference_start|>Infinite words containing squares at every position: Richomme asked the following question: what is the infimum of the real numbers $\alpha$ > 2 such that there exists an infinite word that avoids $\alpha$-powers but contains arbitrarily large squares beginning at every position? We resolve this question in the case of a binary alphabet by showing that the answer is $\alpha$ = 7/3.<|reference_end|>
arxiv
@article{currie2008infinite, title={Infinite words containing squares at every position}, author={James D. Currie, Narad Rampersad}, journal={arXiv preprint arXiv:0803.1189}, year={2008}, archivePrefix={arXiv}, eprint={0803.1189}, primaryClass={math.CO cs.FL} }
currie2008infinite
arxiv-2999
0803.1195
Secure Lossless Compression with Side Information
<|reference_start|>Secure Lossless Compression with Side Information: Secure data compression in the presence of side information at both a legitimate receiver and an eavesdropper is explored. A noise-free, limited rate link between the source and the receiver, whose output can be perfectly observed by the eavesdropper, is assumed. As opposed to the wiretap channel model, in which secure communication can be established by exploiting the noise in the channel, here the existence of side information at the receiver is used. Both coded and uncoded side information are considered. In the coded side information scenario, inner and outer bounds on the compression-equivocation rate region are given. In the uncoded side information scenario, the availability of the legitimate receiver's and the eavesdropper's side information at the encoder is considered, and the compression-equivocation rate region is characterized for these cases. It is shown that the side information at the encoder can increase the equivocation rate at the eavesdropper. Hence, the side information at the encoder is shown to be useful in terms of security; this is in contrast with the pure lossless data compression case where side information at the encoder would not help.<|reference_end|>
arxiv
@article{gunduz2008secure, title={Secure Lossless Compression with Side Information}, author={Deniz Gunduz, Elza Erkip, H. Vincent Poor}, journal={arXiv preprint arXiv:0803.1195}, year={2008}, doi={10.1109/ITW.2008.4578644}, archivePrefix={arXiv}, eprint={0803.1195}, primaryClass={cs.IT math.IT} }
gunduz2008secure
arxiv-3000
0803.1207
Serious Flaws in Korf et al's Analysis on Time Complexity of A*
<|reference_start|>Serious Flaws in Korf et al's Analysis on Time Complexity of A*: This paper has been withdrawn.<|reference_end|>
arxiv
@article{dinh2008serious, title={Serious Flaws in Korf et al.'s Analysis on Time Complexity of A*}, author={Hang Dinh}, journal={arXiv preprint arXiv:0803.1207}, year={2008}, archivePrefix={arXiv}, eprint={0803.1207}, primaryClass={cs.AI} }
dinh2008serious