corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-4701 | 0809.0116 | Toward Expressive and Scalable Sponsored Search Auctions | <|reference_start|>Toward Expressive and Scalable Sponsored Search Auctions: Internet search results are a growing and highly profitable advertising platform. Search providers auction advertising slots to advertisers on their search result pages. Due to the high volume of searches and the users' low tolerance for search result latency, it is imperative to resolve these auctions fast. Current approaches restrict the expressiveness of bids in order to achieve fast winner determination, which is the problem of allocating slots to advertisers so as to maximize the expected revenue given the advertisers' bids. The goal of our work is to permit more expressive bidding, thus allowing advertisers to achieve complex advertising goals, while still providing fast and scalable techniques for winner determination.<|reference_end|> | arxiv | @article{martin2008toward,
title={Toward Expressive and Scalable Sponsored Search Auctions},
author={David J. Martin, Johannes Gehrke, Joseph Y. Halpern},
journal={David J. Martin, Johannes Gehrke, and Joseph Y. Halpern. Toward
Expressive and Scalable Sponsored Search Auctions. In Proceedings of the 24th
IEEE International Conference on Data Engineering, pages 237--246. April 2008},
year={2008},
doi={10.1109/ICDE.2008.4497432},
archivePrefix={arXiv},
eprint={0809.0116},
primaryClass={cs.DB}
} | martin2008toward |
arxiv-4702 | 0809.0124 | A Uniform Approach to Analogies, Synonyms, Antonyms, and Associations | <|reference_start|>A Uniform Approach to Analogies, Synonyms, Antonyms, and Associations: Recognizing analogies, synonyms, antonyms, and associations appear to be four distinct tasks, requiring distinct NLP algorithms. In the past, the four tasks have been treated independently, using a wide variety of algorithms. These four semantic classes, however, are a tiny sample of the full range of semantic phenomena, and we cannot afford to create ad hoc algorithms for each semantic phenomenon; we need to seek a unified approach. We propose to subsume a broad range of phenomena under analogies. To limit the scope of this paper, we restrict our attention to the subsumption of synonyms, antonyms, and associations. We introduce a supervised corpus-based machine learning algorithm for classifying analogous word pairs, and we show that it can solve multiple-choice SAT analogy questions, TOEFL synonym questions, ESL synonym-antonym questions, and similar-associated-both questions from cognitive psychology.<|reference_end|> | arxiv | @article{turney2008a,
title={A Uniform Approach to Analogies, Synonyms, Antonyms, and Associations},
author={Peter D. Turney (National Research Council of Canada)},
journal={Proceedings of the 22nd International Conference on Computational
Linguistics (Coling 2008), August 2008, Manchester, UK, Pages 905-912},
year={2008},
number={NRC 50398},
archivePrefix={arXiv},
eprint={0809.0124},
primaryClass={cs.CL cs.IR cs.LG}
} | turney2008a |
arxiv-4703 | 0809.0158 | Network Tomography Based on Additive Metrics | <|reference_start|>Network Tomography Based on Additive Metrics: Inference of the network structure (e.g., routing topology) and dynamics (e.g., link performance) is an essential component in many network design and management tasks. In this paper we propose a new, general framework for analyzing and designing routing topology and link performance inference algorithms using ideas and tools from phylogenetic inference in evolutionary biology. The framework is applicable to a variety of measurement techniques. Based on the framework we introduce and develop several polynomial-time distance-based inference algorithms with provable performance. We provide sufficient conditions for the correctness of the algorithms. We show that the algorithms are consistent (return correct topology and link performance with an increasing sample size) and robust (can tolerate a certain level of measurement errors). In addition, we establish certain optimality properties of the algorithms (i.e., they achieve the optimal $l_\infty$-radius) and demonstrate their effectiveness via model simulation.<|reference_end|> | arxiv | @article{ni2008network,
title={Network Tomography Based on Additive Metrics},
author={Jian Ni, Sekhar Tatikonda},
journal={IEEE Transactions on Information Theory, 57(12), December 2011},
year={2008},
doi={10.1109/TIT.2011.2168901},
archivePrefix={arXiv},
eprint={0809.0158},
primaryClass={cs.NI cs.IT math.IT}
} | ni2008network |
arxiv-4704 | 0809.0159 | Improved Approximations for Guarding 15-Dimensional Terrains | <|reference_start|>Improved Approximations for Guarding 15-Dimensional Terrains: We present a 4-approximation algorithm for the problem of placing a fewest guards on a 1.5D terrain so that every point of the terrain is seen by at least one guard. This improves on the currently best approximation factor of 5. Our method is based on rounding the linear programming relaxation of the corresponding covering problem. Besides the simplicity of the analysis, which mainly relies on decomposing the constraint matrix of the LP into totally balanced matrices, our algorithm, unlike previous work, generalizes to the weighted and partial versions of the basic problem.<|reference_end|> | arxiv | @article{elbassioni2008improved,
title={Improved Approximations for Guarding 1.5-Dimensional Terrains},
author={K. Elbassioni, D. Matijevic, J. Mestre, D. Severdija},
journal={arXiv preprint arXiv:0809.0159},
year={2008},
archivePrefix={arXiv},
eprint={0809.0159},
primaryClass={cs.CG}
} | elbassioni2008improved |
arxiv-4705 | 0809.0188 | Approximating Transitivity in Directed Networks | <|reference_start|>Approximating Transitivity in Directed Networks: We study the problem of computing a minimum equivalent digraph (also known as the problem of computing a strong transitive reduction) and its maximum objective function variant, with two types of extensions. First, we allow to declare a set $D\subset E$ and require that a valid solution $A$ satisfies $D\subset A$ (it is sometimes called transitive reduction problem). In the second extension (called $p$-ary transitive reduction), we have integer edge labeling and we view two paths as equivalent if they have the same beginning, ending and the sum of labels modulo $p$. A solution $A\subseteq E$ is valid if it gives an equivalent path for every original path. For all problems we establish the following: polynomial time minimization of $|A|$ within ratio 1.5, maximization of $|E-A|$ within ratio 2, MAX-SNP hardness even of the length of simple cycles is limited to 5. Furthermore, we believe that the combinatorial technique behind the approximation algorithm for the minimization version might be of interest to other graph connectivity problems as well.<|reference_end|> | arxiv | @article{berman2008approximating,
title={Approximating Transitivity in Directed Networks},
author={Piotr Berman, Bhaskar DasGupta and Marek Karpinski},
journal={arXiv preprint arXiv:0809.0188},
year={2008},
archivePrefix={arXiv},
eprint={0809.0188},
primaryClass={cs.CC cs.DM cs.DS}
} | berman2008approximating |
arxiv-4706 | 0809.0195 | Light Logics and the Call-by-Value Lambda Calculus | <|reference_start|>Light Logics and the Call-by-Value Lambda Calculus: The so-called light logics have been introduced as logical systems enjoying quite remarkable normalization properties. Designing a type assignment system for pure lambda calculus from these logics, however, is problematic. In this paper we show that shifting from usual call-by-name to call-by-value lambda calculus allows regaining strong connections with the underlying logic. This will be done in the context of Elementary Affine Logic (EAL), designing a type system in natural deduction style assigning EAL formulae to lambda terms.<|reference_end|> | arxiv | @article{coppola2008light,
title={Light Logics and the Call-by-Value Lambda Calculus},
author={Paolo Coppola, Ugo Dal Lago, Simona Ronchi Della Rocca},
journal={Logical Methods in Computer Science, Volume 4, Issue 4 (November
7, 2008) lmcs:820},
year={2008},
doi={10.2168/LMCS-4(4:5)2008},
archivePrefix={arXiv},
eprint={0809.0195},
primaryClass={cs.LO}
} | coppola2008light |
arxiv-4707 | 0809.0199 | Dense Error Correction via L1-Minimization | <|reference_start|>Dense Error Correction via L1-Minimization: This paper studies the problem of recovering a non-negative sparse signal $\x \in \Re^n$ from highly corrupted linear measurements $\y = A\x + \e \in \Re^m$, where $\e$ is an unknown error vector whose nonzero entries may be unbounded. Motivated by an observation from face recognition in computer vision, this paper proves that for highly correlated (and possibly overcomplete) dictionaries $A$, any non-negative, sufficiently sparse signal $\x$ can be recovered by solving an $\ell^1$-minimization problem: $\min \|\x\|_1 + \|\e\|_1 \quad {subject to} \quad \y = A\x + \e.$ More precisely, if the fraction $\rho$ of errors is bounded away from one and the support of $\x$ grows sublinearly in the dimension $m$ of the observation, then as $m$ goes to infinity, the above $\ell^1$-minimization succeeds for all signals $\x$ and almost all sign-and-support patterns of $\e$. This result suggests that accurate recovery of sparse signals is possible and computationally feasible even with nearly 100% of the observations corrupted. The proof relies on a careful characterization of the faces of a convex polytope spanned together by the standard crosspolytope and a set of iid Gaussian vectors with nonzero mean and small variance, which we call the ``cross-and-bouquet'' model. Simulations and experimental results corroborate the findings, and suggest extensions to the result.<|reference_end|> | arxiv | @article{wright2008dense,
title={Dense Error Correction via L1-Minimization},
author={John Wright and Yi Ma},
journal={arXiv preprint arXiv:0809.0199},
year={2008},
number={UILU-ENG-08-2210, DC 237},
archivePrefix={arXiv},
eprint={0809.0199},
primaryClass={cs.IT math.IT}
} | wright2008dense |
arxiv-4708 | 0809.0216 | Colliding Message Pairs for 23 and 24-step SHA-512 | <|reference_start|>Colliding Message Pairs for 23 and 24-step SHA-512: Recently, Indesteege et al. [1] had described attacks against 23 and 24-step SHA-512 at SAC '08. Their attacks are based on the differential path by Nikolic and Biryukov [2]. The reported complexities are $2^{44.9}$ and $2^{53}$ calls to the respective step reduced SHA-512 hash function. They provided colliding message pairs for 23-step SHA-512 but did not provide a colliding message pair for 24-step SHA-512. In this note we provide a colliding message pair for 23-step SHA-512 and the first colliding message pair for 24-step SHA-512. Our attacks use the differential path first described by Sanadhya and Sarkar at ACISP '08 [3]. The complexities of our attacks are $2^{16.5}$ and $2^{34.5}$ calls to the respective step reduced SHA-512 hash function. Complete details of the attacks will be provided in an extended version of this note.<|reference_end|> | arxiv | @article{sanadhya2008colliding,
title={Colliding Message Pairs for 23 and 24-step SHA-512},
author={Somitra Kumar Sanadhya and Palash Sarkar},
journal={arXiv preprint arXiv:0809.0216},
year={2008},
archivePrefix={arXiv},
eprint={0809.0216},
primaryClass={cs.CR}
} | sanadhya2008colliding |
arxiv-4709 | 0809.0257 | Linear Kernelizations for Restricted 3-Hitting Set Problems | <|reference_start|>Linear Kernelizations for Restricted 3-Hitting Set Problems: The 3-\textsc{Hitting Set} problem is also called the \textsc{Vertex Cover} problem on 3-uniform hypergraphs. In this paper, we address kernelizations of the \textsc{Vertex Cover} problem on 3-uniform hypergraphs. We show that this problem admits a linear kernel in three classes of 3-uniform hypergraphs. We also obtain lower and upper bounds on the kernel size for them by the parametric duality.<|reference_end|> | arxiv | @article{cai2008linear,
title={Linear Kernelizations for Restricted 3-Hitting Set Problems},
author={Xuan Cai},
journal={arXiv preprint arXiv:0809.0257},
year={2008},
archivePrefix={arXiv},
eprint={0809.0257},
primaryClass={cs.CC}
} | cai2008linear |
arxiv-4710 | 0809.0259 | On Duality between Local Maximum Stable Sets of a Graph and its Line-Graph | <|reference_start|>On Duality between Local Maximum Stable Sets of a Graph and its Line-Graph: G is a Koenig-Egervary graph provided alpha(G)+ mu(G)=|V(G)|, where mu(G) is the size of a maximum matching and alpha(G) is the cardinality of a maximum stable set. S is a local maximum stable set of G if S is a maximum stable set of the closed neighborhood of S. Nemhauser and Trotter Jr. proved that any local maximum stable set is a subset of a maximum stable set of G. In this paper we demonstrate that if S is a local maximum stable set, the subgraph H induced by the closed neighborhood of S is a Koenig-Egervary graph, and M is a maximum matching in H, then M is a local maximum stable set in the line graph of G.<|reference_end|> | arxiv | @article{levit2008on,
title={On Duality between Local Maximum Stable Sets of a Graph and its
Line-Graph},
author={Vadim E. Levit and Eugen Mandrescu},
journal={arXiv preprint arXiv:0809.0259},
year={2008},
archivePrefix={arXiv},
eprint={0809.0259},
primaryClass={math.CO cs.DM}
} | levit2008on |
arxiv-4711 | 0809.0271 | Randomised Variable Neighbourhood Search for Multi Objective Optimisation | <|reference_start|>Randomised Variable Neighbourhood Search for Multi Objective Optimisation: Various local search approaches have recently been applied to machine scheduling problems under multiple objectives. Their foremost consideration is the identification of the set of Pareto optimal alternatives. An important aspect of successfully solving these problems lies in the definition of an appropriate neighbourhood structure. Unclear in this context remains, how interdependencies within the fitness landscape affect the resolution of the problem. The paper presents a study of neighbourhood search operators for multiple objective flow shop scheduling. Experiments have been carried out with twelve different combinations of criteria. To derive exact conclusions, small problem instances, for which the optimal solutions are known, have been chosen. Statistical tests show that no single neighbourhood operator is able to equally identify all Pareto optimal alternatives. Significant improvements however have been obtained by hybridising the solution algorithm using a randomised variable neighbourhood search technique.<|reference_end|> | arxiv | @article{geiger2008randomised,
title={Randomised Variable Neighbourhood Search for Multi Objective
Optimisation},
author={Martin Josef Geiger},
journal={Proceedings of the 4th EU/ME Workshop: Design and Evaluation of
Advanced Hybrid Meta-Heuristics, November 4--5, Nottingham, United Kingdom,
pp. 34-42},
year={2008},
archivePrefix={arXiv},
eprint={0809.0271},
primaryClass={cs.AI}
} | geiger2008randomised |
arxiv-4712 | 0809.0352 | Instruction sequences and non-uniform complexity theory | <|reference_start|>Instruction sequences and non-uniform complexity theory: We develop theory concerning non-uniform complexity in a setting in which the notion of single-pass instruction sequence considered in program algebra is the central notion. We define counterparts of the complexity classes P/poly and NP/poly and formulate a counterpart of the complexity theoretic conjecture that NP is not included in P/poly. In addition, we define a notion of completeness for the counterpart of NP/poly using a non-uniform reducibility relation and formulate complexity hypotheses which concern restrictions on the instruction sequences used for computation. We think that the theory developed opens up an additional way of investigating issues concerning non-uniform complexity.<|reference_end|> | arxiv | @article{bergstra2008instruction,
title={Instruction sequences and non-uniform complexity theory},
author={J. A. Bergstra, C. A. Middelburg},
journal={arXiv preprint arXiv:0809.0352},
year={2008},
number={PRG0812},
archivePrefix={arXiv},
eprint={0809.0352},
primaryClass={cs.CC}
} | bergstra2008instruction |
arxiv-4713 | 0809.0355 | Simulations between triangular and hexagonal number-conserving cellular automata | <|reference_start|>Simulations between triangular and hexagonal number-conserving cellular automata: A number-conserving cellular automaton is a cellular automaton whose states are integers and whose transition function keeps the sum of all cells constant throughout its evolution. It can be seen as a kind of modelization of the physical conservation laws of mass or energy. In this paper, we first propose a necessary condition for triangular and hexagonal cellular automata to be number-conserving. The local transition function is expressed by the sum of arity two functions which can be regarded as 'flows' of numbers. The sufficiency is obtained through general results on number-conserving cellular automata. Then, using the previous flow functions, we can construct effective number-conserving simulations between hexagonal cellular automata and triangular cellular automata.<|reference_end|> | arxiv | @article{imai2008simulations,
title={Simulations between triangular and hexagonal number-conserving cellular
automata},
author={Katsunobu Imai (I3S, IEC), Bruno Martin (I3S)},
journal={arXiv preprint arXiv:0809.0355},
year={2008},
archivePrefix={arXiv},
eprint={0809.0355},
primaryClass={cs.DM}
} | imai2008simulations |
arxiv-4714 | 0809.0360 | The Complexity of Enriched Mu-Calculi | <|reference_start|>The Complexity of Enriched Mu-Calculi: The fully enriched μ-calculus is the extension of the propositional μ-calculus with inverse programs, graded modalities, and nominals. While satisfiability in several expressive fragments of the fully enriched μ-calculus is known to be decidable and ExpTime-complete, it has recently been proved that the full calculus is undecidable. In this paper, we study the fragments of the fully enriched μ-calculus that are obtained by dropping at least one of the additional constructs. We show that, in all fragments obtained in this way, satisfiability is decidable and ExpTime-complete. Thus, we identify a family of decidable logics that are maximal (and incomparable) in expressive power. Our results are obtained by introducing two new automata models, showing that their emptiness problems are ExpTime-complete, and then reducing satisfiability in the relevant logics to these problems. The automata models we introduce are two-way graded alternating parity automata over infinite trees (2GAPTs) and fully enriched automata (FEAs) over infinite forests. The former are a common generalization of two incomparable automata models from the literature. The latter extend alternating automata in a similar way as the fully enriched μ-calculus extends the standard μ-calculus.<|reference_end|> | arxiv | @article{bonatti2008the,
title={The Complexity of Enriched Mu-Calculi},
author={Piero A. Bonatti, Carsten Lutz, Aniello Murano, Moshe Y. Vardi},
journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September
22, 2008) lmcs:993},
year={2008},
doi={10.2168/LMCS-4(3:11)2008},
archivePrefix={arXiv},
eprint={0809.0360},
primaryClass={cs.LO cs.CL}
} | bonatti2008the |
arxiv-4715 | 0809.0400 | Canonical Coin Systems for Change-Making Problems | <|reference_start|>Canonical Coin Systems for Change-Making Problems: The Change-Making Problem is to represent a given value with the fewest coins under a given coin system. As a variation of the knapsack problem, it is known to be NP-hard. Nevertheless, in most real money systems, the greedy algorithm yields optimal solutions. In this paper, we study what type of coin systems that guarantee the optimality of the greedy algorithm. We provide new proofs for a sufficient and necessary condition for the so-called \emph{canonical} coin systems with four or five types of coins, and a sufficient condition for non-canonical coin systems, respectively. Moreover, we present an $O(m^2)$ algorithm that decides whether a tight coin system is canonical.<|reference_end|> | arxiv | @article{cai2008canonical,
title={Canonical Coin Systems for Change-Making Problems},
author={Xuan Cai},
journal={arXiv preprint arXiv:0809.0400},
year={2008},
archivePrefix={arXiv},
eprint={0809.0400},
primaryClass={cs.DS cs.DM}
} | cai2008canonical |
arxiv-4716 | 0809.0406 | Foundations of the Pareto Iterated Local Search Metaheuristic | <|reference_start|>Foundations of the Pareto Iterated Local Search Metaheuristic: The paper describes the proposition and application of a local search metaheuristic for multi-objective optimization problems. It is based on two main principles of heuristic search, intensification through variable neighborhoods, and diversification through perturbations and successive iterations in favorable regions of the search space. The concept is successfully tested on permutation flow shop scheduling problems under multiple objectives. While the obtained results are encouraging in terms of their quality, another positive attribute of the approach is its' simplicity as it does require the setting of only very few parameters. The implementation of the Pareto Iterated Local Search metaheuristic is based on the MOOPPS computer system of local search heuristics for multi-objective scheduling which has been awarded the European Academic Software Award 2002 in Ronneby, Sweden (http://www.easa-award.net/, http://www.bth.se/llab/easa_2002.nsf)<|reference_end|> | arxiv | @article{geiger2008foundations,
title={Foundations of the Pareto Iterated Local Search Metaheuristic},
author={Martin Josef Geiger},
journal={arXiv preprint arXiv:0809.0406},
year={2008},
archivePrefix={arXiv},
eprint={0809.0406},
primaryClass={cs.AI}
} | geiger2008foundations |
arxiv-4717 | 0809.0410 | A Computational Study of Genetic Crossover Operators for Multi-Objective Vehicle Routing Problem with Soft Time Windows | <|reference_start|>A Computational Study of Genetic Crossover Operators for Multi-Objective Vehicle Routing Problem with Soft Time Windows: The article describes an investigation of the effectiveness of genetic algorithms for multi-objective combinatorial optimization (MOCO) by presenting an application for the vehicle routing problem with soft time windows. The work is motivated by the question, if and how the problem structure influences the effectiveness of different configurations of the genetic algorithm. Computational results are presented for different classes of vehicle routing problems, varying in their coverage with time windows, time window size, distribution and number of customers. The results are compared with a simple, but effective local search approach for multi-objective combinatorial optimization problems.<|reference_end|> | arxiv | @article{geiger2008a,
title={A Computational Study of Genetic Crossover Operators for Multi-Objective
Vehicle Routing Problem with Soft Time Windows},
author={Martin Josef Geiger},
journal={Habenicht, W. et al. (eds.): Multi-Criteria- und Fuzzy Systeme in
Theorie und Praxis-Loesungsansaetze fuer Entscheidungsprobleme mit komplexen
Zielsystemen, 2003, ISBN 3-8244-7864-1, pp. 191-207},
year={2008},
archivePrefix={arXiv},
eprint={0809.0410},
primaryClass={cs.AI}
} | geiger2008a |
arxiv-4718 | 0809.0416 | Genetic Algorithms for multiple objective vehicle routing | <|reference_start|>Genetic Algorithms for multiple objective vehicle routing: The talk describes a general approach of a genetic algorithm for multiple objective optimization problems. A particular dominance relation between the individuals of the population is used to define a fitness operator, enabling the genetic algorithm to adress even problems with efficient, but convex-dominated alternatives. The algorithm is implemented in a multilingual computer program, solving vehicle routing problems with time windows under multiple objectives. The graphical user interface of the program shows the progress of the genetic algorithm and the main parameters of the approach can be easily modified. In addition to that, the program provides powerful decision support to the decision maker. The software has proved it's excellence at the finals of the European Academic Software Award EASA, held at the Keble college/ University of Oxford/ Great Britain.<|reference_end|> | arxiv | @article{geiger2008genetic,
title={Genetic Algorithms for multiple objective vehicle routing},
author={Martin Josef Geiger},
journal={Proceedings of the Metaheuristics International Conference
MIC'2001, Porto, Portugal, pp. 349-353},
year={2008},
archivePrefix={arXiv},
eprint={0809.0416},
primaryClass={cs.AI}
} | geiger2008genetic |
arxiv-4719 | 0809.0417 | Queue-length synchronization in a communication networks | <|reference_start|>Queue-length synchronization in a communication networks: We study synchronization in the context of network traffic on a $2-d$ communication network with local clustering and geographic separations. The network consists of nodes and randomly distributed hubs where the top five hubs ranked according to their coefficient of betweenness centrality (CBC) are connected by random assortative and gradient mechanisms. For multiple message traffic, messages can trap at the high CBC hubs, and congestion can build up on the network with long queues at the congested hubs. The queue lengths are seen to synchronize in the congested phase. Both complete and phase synchronization is seen, between pairs of hubs. In the decongested phase, the pairs start clearing, and synchronization is lost. A cascading master-slave relation is seen between the hubs, with the slower hubs (which are slow to decongest) driving the faster ones. These are usually the hubs of high CBC. Similar results are seen for traffic of constant density. Total synchronization between the hubs of high CBC is also seen in the congested regime. Similar behavior is seen for traffic on a network constructed using the Waxman random topology generator. We also demonstrate the existence of phase synchronization in real Internet traffic data.<|reference_end|> | arxiv | @article{mukherjee2008queue-length,
title={Queue-length synchronization in a communication networks},
author={Satyam Mukherjee, Neelima Gupte},
journal={Phys. Rev. E Vol. 79, 056105 (2009)},
year={2008},
doi={10.1103/PhysRevE.79.056105},
archivePrefix={arXiv},
eprint={0809.0417},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.NI}
} | mukherjee2008queue-length |
arxiv-4720 | 0809.0444 | Quantum classification | <|reference_start|>Quantum classification: Quantum classification is defined as the task of predicting the associated class of an unknown quantum state drawn from an ensemble of pure states given a finite number of copies of this state. By recasting the state discrimination problem within the framework of Machine Learning (ML), we can use the notion of learning reduction coming from classical ML to solve different variants of the classification task, such as the weighted binary and the multiclass versions.<|reference_end|> | arxiv | @article{gambs2008quantum,
title={Quantum classification},
author={S'ebastien Gambs},
journal={arXiv preprint arXiv:0809.0444},
year={2008},
archivePrefix={arXiv},
eprint={0809.0444},
primaryClass={quant-ph cs.LG}
} | gambs2008quantum |
arxiv-4721 | 0809.0448 | The Stock Market as a Game: An Agent Based Approach to Trading in Stocks | <|reference_start|>The Stock Market as a Game: An Agent Based Approach to Trading in Stocks: Just as war is sometimes fallaciously represented as a zero sum game -- when in fact war is a negative sum game - stock market trading, a positive sum game over time, is often erroneously represented as a zero sum game. This is called the "zero sum fallacy" -- the erroneous belief that one trader in a stock market exchange can only improve their position provided some other trader's position deteriorates. However, a positive sum game in absolute terms can be recast as a zero sum game in relative terms. Similarly it appears that negative sum games in absolute terms have been recast as zero sum games in relative terms: otherwise, why would zero sum games be used to represent situations of war? Such recasting may have heuristic or pedagogic interest but recasting must be clearly explicited or risks generating confusion. Keywords: Game theory, stock trading and agent based AI.<|reference_end|> | arxiv | @article{engle2008the,
title={The Stock Market as a Game: An Agent Based Approach to Trading in Stocks},
author={Eric Engle},
journal={arXiv preprint arXiv:0809.0448},
year={2008},
archivePrefix={arXiv},
eprint={0809.0448},
primaryClass={q-fin.TR cs.AI cs.GT}
} | engle2008the |
arxiv-4722 | 0809.0458 | Agent Models of Political Interactions | <|reference_start|>Agent Models of Political Interactions: Looks at state interactions from an agent based AI perspective to see state interactions as an example of emergent intelligent behavior. Exposes basic principles of game theory.<|reference_end|> | arxiv | @article{engle2008agent,
title={Agent Models of Political Interactions},
author={Eric Engle},
journal={arXiv preprint arXiv:0809.0458},
year={2008},
archivePrefix={arXiv},
eprint={0809.0458},
primaryClass={cs.AI cs.GT}
} | engle2008agent |
arxiv-4723 | 0809.0460 | Stochastic Combinatorial Optimization under Probabilistic Constraints | <|reference_start|>Stochastic Combinatorial Optimization under Probabilistic Constraints: In this paper, we present approximation algorithms for combinatorial optimization problems under probabilistic constraints. Specifically, we focus on stochastic variants of two important combinatorial optimization problems: the k-center problem and the set cover problem, with uncertainty characterized by a probability distribution over set of points or elements to be covered. We consider these problems under adaptive and non-adaptive settings, and present efficient approximation algorithms for the case when underlying distribution is a product distribution. In contrast to the expected cost model prevalent in stochastic optimization literature, our problem definitions support restrictions on the probability distributions of the total costs, via incorporating constraints that bound the probability with which the incurred costs may exceed a given threshold.<|reference_end|> | arxiv | @article{agrawal2008stochastic,
title={Stochastic Combinatorial Optimization under Probabilistic Constraints},
author={Shipra Agrawal, Amin Saberi, Yinyu Ye},
journal={arXiv preprint arXiv:0809.0460},
year={2008},
archivePrefix={arXiv},
eprint={0809.0460},
primaryClass={cs.DS}
} | agrawal2008stochastic |
arxiv-4724 | 0809.0461 | The Semiotic Machine | <|reference_start|>The Semiotic Machine: A semiotic model of the user interface in human-computer interaction. Algorithmic sign, semotics, algorithmic art.<|reference_end|> | arxiv | @article{engle2008the,
title={The Semiotic Machine},
author={Eric Engle},
journal={arXiv preprint arXiv:0809.0461},
year={2008},
archivePrefix={arXiv},
eprint={0809.0461},
primaryClass={cs.HC}
} | engle2008the |
arxiv-4725 | 0809.0490 | Principal Graphs and Manifolds | <|reference_start|>Principal Graphs and Manifolds: In many physical, statistical, biological and other investigations it is desirable to approximate a system of points by objects of lower dimension and/or complexity. For this purpose, Karl Pearson invented principal component analysis in 1901 and found 'lines and planes of closest fit to system of points'. The famous k-means algorithm solves the approximation problem too, but by finite sets instead of lines and planes. This chapter gives a brief practical introduction into the methods of construction of general principal objects, i.e. objects embedded in the 'middle' of the multidimensional data set. As a basis, the unifying framework of mean squared distance approximation of finite datasets is selected. Principal graphs and manifolds are constructed as generalisations of principal components and k-means principal points. For this purpose, the family of expectation/maximisation algorithms with nearest generalisations is presented. Construction of principal graphs with controlled complexity is based on the graph grammar approach.<|reference_end|> | arxiv | @article{gorban2008principal,
title={Principal Graphs and Manifolds},
author={A. N. Gorban, A. Y. Zinovyev},
journal={Handbook of Research on Machine Learning Applications and Trends:
Algorithms, Methods and Techniques, Ch. 2, Information Science Reference,
2009. 28-59},
year={2008},
doi={10.4018/978-1-60566-766-9},
archivePrefix={arXiv},
eprint={0809.0490},
primaryClass={cs.LG cs.NE stat.ML}
} | gorban2008principal |
arxiv-4726 | 0809.0494 | Interaction Grammars | <|reference_start|>Interaction Grammars: Interaction Grammar (IG) is a grammatical formalism based on the notion of polarity. Polarities express the resource sensitivity of natural languages by modelling the distinction between saturated and unsaturated syntactic structures. Syntactic composition is represented as a chemical reaction guided by the saturation of polarities. It is expressed in a model-theoretic framework where grammars are constraint systems using the notion of tree description and parsing appears as a process of building tree description models satisfying criteria of saturation and minimality.<|reference_end|> | arxiv | @article{guillaume2008interaction,
title={Interaction Grammars},
author={Bruno Guillaume (INRIA Lorraine - LORIA), Guy Perrier (INRIA Lorraine
- LORIA)},
journal={arXiv preprint arXiv:0809.0494},
year={2008},
number={RR-6621},
archivePrefix={arXiv},
eprint={0809.0494},
primaryClass={cs.LO}
} | guillaume2008interaction |
arxiv-4727 | 0809.0519 | Complexity of comparison of influence of players in simple games | <|reference_start|>Complexity of comparison of influence of players in simple games: Coalitional voting games appear in different forms in multi-agent systems, social choice and threshold logic. In this paper, the complexity of comparison of influence between players in coalitional voting games is characterized. The possible representations of simple games considered are simple games represented by winning coalitions, minimal winning coalitions, weighted voting game or a multiple weighted voting game. The influence of players is gauged from the viewpoint of basic player types, desirability relations and classical power indices such as Shapley-Shubik index, Banzhaf index, Holler index, Deegan-Packel index and Chow parameters. Among other results, it is shown that for a simple game represented by minimal winning coalitions, although it is easy to verify whether a player has zero or one voting power, computing the Banzhaf value of the player is #P-complete. Moreover, it is proved that multiple weighted voting games are the only representations for which it is NP-hard to verify whether the game is linear or not. For a simple game with a set W^m of minimal winning coalitions and n players, a O(n.|W^m|+(n^2)log(n)) algorithm is presented which returns `no' if the game is non-linear and returns the strict desirability ordering otherwise. The complexity of transforming simple games into compact representations is also examined.<|reference_end|> | arxiv | @article{aziz2008complexity,
title={Complexity of comparison of influence of players in simple games},
author={Haris Aziz},
journal={arXiv preprint arXiv:0809.0519},
year={2008},
archivePrefix={arXiv},
eprint={0809.0519},
primaryClass={cs.GT}
} | aziz2008complexity |
arxiv-4728 | 0809.0522 | The first-mover advantage in scientific publication | <|reference_start|>The first-mover advantage in scientific publication: Mathematical models of the scientific citation process predict a strong "first-mover" effect under which the first papers in a field will, essentially regardless of content, receive citations at a rate enormously higher than papers published later. Moreover papers are expected to retain this advantage in perpetuity -- they should receive more citations indefinitely, no matter how many other papers are published after them. We test this conjecture against data from a selection of fields and in several cases find a first-mover effect of a magnitude similar to that predicted by the theory. Were we wearing our cynical hat today, we might say that the scientist who wants to become famous is better off -- by a wide margin -- writing a modest paper in next year's hottest field than an outstanding paper in this year's. On the other hand, there are some papers, albeit only a small fraction, that buck the trend and attract significantly more citations than theory predicts despite having relatively late publication dates. We suggest that papers of this kind, though they often receive comparatively few citations overall, are probably worthy of our attention.<|reference_end|> | arxiv | @article{newman2008the,
title={The first-mover advantage in scientific publication},
author={M. E. J. Newman},
journal={Europhys. Lett. 86, 68001 (2009)},
year={2008},
doi={10.1209/0295-5075/86/68001},
archivePrefix={arXiv},
eprint={0809.0522},
primaryClass={physics.soc-ph cs.DL cs.SI}
} | newman2008the |
arxiv-4729 | 0809.0524 | Computer Art in the Former Soviet Bloc | <|reference_start|>Computer Art in the Former Soviet Bloc: Documents early computer art in the Soviet bloc and describes Marxist art theory.<|reference_end|> | arxiv | @article{engle2008computer,
title={Computer Art in the Former Soviet Bloc},
author={Eric Engle},
journal={arXiv preprint arXiv:0809.0524},
year={2008},
archivePrefix={arXiv},
eprint={0809.0524},
primaryClass={cs.MM cs.CY}
} | engle2008computer |
arxiv-4730 | 0809.0533 | Power Control in Cognitive Radio Networks: How to Cross a Multi-Lane Highway | <|reference_start|>Power Control in Cognitive Radio Networks: How to Cross a Multi-Lane Highway: We consider power control in cognitive radio networks where secondary users identify and exploit instantaneous and local spectrum opportunities without causing unacceptable interference to primary users. We qualitatively characterize the impact of the transmission power of secondary users on the occurrence of spectrum opportunities and the reliability of opportunity detection. Based on a Poisson model of the primary network, we quantify these impacts by showing that (i) the probability of spectrum opportunity decreases exponentially with respect to the transmission power of secondary users, where the exponential decay constant is given by the traffic load of primary users; (ii) reliable opportunity detection is achieved in the two extreme regimes in terms of the ratio between the transmission power of secondary users and that of primary users. Such analytical characterizations allow us to study power control for optimal transport throughput under constraints on the interference to primary users. Furthermore, we reveal the difference between detecting primary signals and detecting spectrum opportunities, and demonstrate the complex relationship between physical layer spectrum sensing and MAC layer throughput. The dependency of this PHY-MAC interaction on the application type and the use of handshake signaling such as RTS/CTS is illustrated.<|reference_end|> | arxiv | @article{ren2008power,
title={Power Control in Cognitive Radio Networks: How to Cross a Multi-Lane
Highway},
author={Wei Ren, Qing Zhao, Ananthram Swami},
journal={arXiv preprint arXiv:0809.0533},
year={2008},
archivePrefix={arXiv},
eprint={0809.0533},
primaryClass={cs.NI}
} | ren2008power |
arxiv-4731 | 0809.0536 | How to Fully Exploit the Degrees of Freedom in the Downlink of MISO Systems With Opportunistic Beamforming | <|reference_start|>How to Fully Exploit the Degrees of Freedom in the Downlink of MISO Systems With Opportunistic Beamforming: The opportunistic beamforming in the downlink of multiple-input single-output (MISO) systems forms $N$ transmit beams, usually, no more than the number of transmit antennas $N_t$. However, the degrees of freedom in this downlink is as large as $N_t^2$. That is, at most $N_t^2$ rather than only $N_t$ users can be simultaneously transmitted and thus the scheduling latency can be significantly reduced. In this paper, we focus on the opportunistic beamforming schemes with $N_t<N\le N_t^2$ transmit beams in the downlink of MISO systems over Rayleigh fading channels. We first show how to design the beamforming matrices with maximum number of transmit beams as well as least correlation between any pair of them as possible, through Fourier, Grassmannian, and mutually unbiased bases (MUB) based constructions in practice. Then, we analyze their system throughput by exploiting the asymptotic theory of extreme order statistics. Finally, our simulation results show the Grassmannian-based beamforming achieves the maximum throughput in all cases with $N_t=2$, 3, 4. However, if we want to exploit overall $N_t^2$ degrees of freedom, we shall resort to the Fourier and MUB-based constructions in the cases with $N_t=3$, 4, respectively.<|reference_end|> | arxiv | @article{xia2008how,
title={How to Fully Exploit the Degrees of Freedom in the Downlink of MISO
Systems With Opportunistic Beamforming},
author={Minghua Xia, Wenkun Wen, and Soo-Chang Kim},
journal={arXiv preprint arXiv:0809.0536},
year={2008},
archivePrefix={arXiv},
eprint={0809.0536},
primaryClass={cs.IT math.IT}
} | xia2008how |
arxiv-4732 | 0809.0539 | Signature Quantization in Fading CDMA With Limited Feedback | <|reference_start|>Signature Quantization in Fading CDMA With Limited Feedback: In this work, we analyze the performance of a signature quantization scheme for reverse-link Direct Sequence (DS)- Code Division Multiple Access (CDMA). Assuming perfect estimates of the channel and interference covariance, the receiver selects the signature that minimizes interference power or maximizes signal-to-interference plus noise ratio (SINR) for a desired user from a signature codebook. The codebook index corresponding to the optimal signature is then relayed to the user with a finite number of bits via a feedback channel. Here we are interested in the performance of a Random Vector Quantization (RVQ) codebook, which contains independent isotropically distributed vectors. Assuming arbitrary transmit power allocation, we consider additive white Gaussian noise (AWGN) channel first with no fading and subsequently, with multipath fading. We derive the corresponding SINR in a large system limit at the output of matched filter and linear minimum mean squared error (MMSE) receiver. Numerical examples show that the derived large system results give a good approximation to the performance of finite-size system and that the MMSE receiver achieves close to a single-user performance with only one feedback bit per signature element.<|reference_end|> | arxiv | @article{santipach2008signature,
title={Signature Quantization in Fading CDMA With Limited Feedback},
author={Wiroonsak Santipach},
journal={IEEE TRANSACTIONS ON COMMUNICATIONS, VOL. 59, NO. 2, PP. 569-577
FEBRUARY 2011},
year={2008},
doi={10.1109/TCOMM.2011.122110.090476},
archivePrefix={arXiv},
eprint={0809.0539},
primaryClass={cs.IT math.IT}
} | santipach2008signature |
arxiv-4733 | 0809.0545 | Frequency Locking of an Optical Cavity using LQG Integral Control | <|reference_start|>Frequency Locking of an Optical Cavity using LQG Integral Control: This paper considers the application of integral Linear Quadratic Gaussian (LQG) optimal control theory to a problem of cavity locking in quantum optics. The cavity locking problem involves controlling the error between the laser frequency and the resonant frequency of the cavity. A model for the cavity system, which comprises a piezo-electric actuator and an optical cavity is experimentally determined using a subspace identification method. An LQG controller which includes integral action is synthesized to stabilize the frequency of the cavity to the laser frequency and to reject low frequency noise. The controller is successfully implemented in the laboratory using a dSpace DSP board.<|reference_end|> | arxiv | @article{hassen2008frequency,
title={Frequency Locking of an Optical Cavity using LQG Integral Control},
author={S. Z. Sayed Hassen, M. Heurs, E. H. Huntington, I. R. Petersen},
journal={Journal of Physics B: Atomic, Molecular and Optical Physics, vol.
42, 175501, 2009},
year={2008},
doi={10.1088/0953-4075/42/17/175501},
archivePrefix={arXiv},
eprint={0809.0545},
primaryClass={quant-ph cs.SY}
} | hassen2008frequency |
arxiv-4734 | 0809.0600 | No-signaling, intractability and entanglement | <|reference_start|>No-signaling, intractability and entanglement: We consider the problem of deriving the no-signaling condition from the assumption that, as seen from a complexity theoretic perspective, the universe is not an exponential place. A fact that disallows such a derivation is the existence of {\em polynomial superluminal} gates, hypothetical primitive operations that enable superluminal signaling but not the efficient solution of intractable problems. It therefore follows, if this assumption is a basic principle of physics, either that it must be supplemented with additional assumptions to prohibit such gates, or, improbably, that no-signaling is not a universal condition. Yet, a gate of this kind is possibly implicit, though not recognized as such, in a decade-old quantum optical experiment involving position-momentum entangled photons. Here we describe a feasible modified version experiment that appears to explicitly demonstrate the action of this gate. Some obvious counter-claims are shown to be invalid. We believe that the unexpected possibility of polynomial superluminal operations arises because some practically measured quantum optical quantities are not describable as standard quantum mechanical observables.<|reference_end|> | arxiv | @article{srikanth2008no-signaling,,
title={No-signaling, intractability and entanglement},
author={R. Srikanth},
journal={arXiv preprint arXiv:0809.0600},
year={2008},
archivePrefix={arXiv},
eprint={0809.0600},
primaryClass={quant-ph cs.CC}
} | srikanth2008no-signaling, |
arxiv-4735 | 0809.0610 | A framework for the interactive resolution of multi-objective vehicle routing problems | <|reference_start|>A framework for the interactive resolution of multi-objective vehicle routing problems: The article presents a framework for the resolution of rich vehicle routing problems which are difficult to address with standard optimization techniques. We use local search on the basis on variable neighborhood search for the construction of the solutions, but embed the techniques in a flexible framework that allows the consideration of complex side constraints of the problem such as time windows, multiple depots, heterogeneous fleets, and, in particular, multiple optimization criteria. In order to identify a compromise alternative that meets the requirements of the decision maker, an interactive procedure is integrated in the resolution of the problem, allowing the modification of the preference information articulated by the decision maker. The framework is prototypically implemented in a computer system. First results of test runs on multiple depot vehicle routing problems with time windows are reported.<|reference_end|> | arxiv | @article{geiger2008a,
title={A framework for the interactive resolution of multi-objective vehicle
routing problems},
author={Martin Josef Geiger, Wolf Wenger},
journal={arXiv preprint arXiv:0809.0610},
year={2008},
archivePrefix={arXiv},
eprint={0809.0610},
primaryClass={cs.AI}
} | geiger2008a |
arxiv-4736 | 0809.0635 | Low ML-Decoding Complexity, Large Coding Gain, Full-Rate, Full-Diversity STBCs for 2 X 2 and 4 X 2 MIMO Systems | <|reference_start|>Low ML-Decoding Complexity, Large Coding Gain, Full-Rate, Full-Diversity STBCs for 2 X 2 and 4 X 2 MIMO Systems: This paper (Part of the content of this manuscript has been accepted for presentation in IEEE Globecom 2008, to be held in New Orleans) deals with low maximum likelihood (ML) decoding complexity, full-rate and full-diversity space-time block codes (STBCs), which also offer large coding gain, for the 2 transmit antenna, 2 receive antenna ($2\times 2$) and the 4 transmit antenna, 2 receive antenna ($4\times 2$) MIMO systems. Presently, the best known STBC for the $2\times2$ system is the Golden code and that for the $4\times2$ system is the DjABBA code. Following the approach by Biglieri, Hong and Viterbo, a new STBC is presented in this paper for the $2\times 2$ system. This code matches the Golden code in performance and ML-decoding complexity for square QAM constellations while it has lower ML-decoding complexity with the same performance for non-rectangular QAM constellations. This code is also shown to be \emph{information-lossless} and \emph{diversity-multiplexing gain} (DMG) tradeoff optimal. This design procedure is then extended to the $4\times 2$ system and a code, which outperforms the DjABBA code for QAM constellations with lower ML-decoding complexity, is presented. So far, the Golden code has been reported to have an ML-decoding complexity of the order of $M^4$ for square QAM of size $M$. In this paper, a scheme that reduces its ML-decoding complexity to $M^2\sqrt{M}$ is presented.<|reference_end|> | arxiv | @article{srinath2008low,
title={Low ML-Decoding Complexity, Large Coding Gain, Full-Rate, Full-Diversity
STBCs for 2 X 2 and 4 X 2 MIMO Systems},
author={K. Pavan Srinath, B. Sundar Rajan},
journal={arXiv preprint arXiv:0809.0635},
year={2008},
archivePrefix={arXiv},
eprint={0809.0635},
primaryClass={cs.IT math.IT}
} | srinath2008low |
arxiv-4737 | 0809.0662 | Improving Local Search for Fuzzy Scheduling Problems | <|reference_start|>Improving Local Search for Fuzzy Scheduling Problems: The integration of fuzzy set theory and fuzzy logic into scheduling is a rather new aspect with growing importance for manufacturing applications, resulting in various unsolved aspects. In the current paper, we investigate an improved local search technique for fuzzy scheduling problems with fitness plateaus, using a multi criteria formulation of the problem. We especially address the problem of changing job priorities over time as studied at the Sherwood Press Ltd, a Nottingham based printing company, who is a collaborator on the project.<|reference_end|> | arxiv | @article{geiger2008improving,
title={Improving Local Search for Fuzzy Scheduling Problems},
author={Martin Josef Geiger, Sanja Petrovic},
journal={Proceedings of the Post Graduate Research Conference in
Electronics, Photonics, Communications & Networks and Computing Science PREP
2004, University of Hertfordshire, Great Britain, pp. 146-147},
year={2008},
archivePrefix={arXiv},
eprint={0809.0662},
primaryClass={cs.AI}
} | geiger2008improving |
arxiv-4738 | 0809.0676 | Binary Random Sequences Obtained From Decimal Sequences | <|reference_start|>Binary Random Sequences Obtained From Decimal Sequences: This paper presents a twist to the generation of binary random sequences by starting with decimal sequences. Rather than representing the prime reciprocal sequence directly in base 2, we first right the prime reciprocal in base 10 and then convert it into the binary form. The autocorrelation and cross-correlation properties of these binary random (BRD) sequences are discussed.<|reference_end|> | arxiv | @article{thippireddy2008binary,
title={Binary Random Sequences Obtained From Decimal Sequences},
author={Suresh B. Thippireddy},
journal={arXiv preprint arXiv:0809.0676},
year={2008},
archivePrefix={arXiv},
eprint={0809.0676},
primaryClass={cs.CR}
} | thippireddy2008binary |
arxiv-4739 | 0809.0680 | The Prolog Interface to the Unstructured Information Management Architecture | <|reference_start|>The Prolog Interface to the Unstructured Information Management Architecture: In this paper we describe the design and implementation of the Prolog interface to the Unstructured Information Management Architecture (UIMA) and some of its applications in natural language processing. The UIMA Prolog interface translates unstructured data and the UIMA Common Analysis Structure (CAS) into a Prolog knowledge base, over which, the developers write rules and use resolution theorem proving to search and generate new annotations over the unstructured data. These rules can explore all the previous UIMA annotations (such as, the syntactic structure, parsing statistics) and external Prolog knowledge bases (such as, Prolog WordNet and Extended WordNet) to implement a variety of tasks for the natural language analysis. We also describe applications of this logic programming interface in question analysis (such as, focus detection, answer-type and other constraints detection), shallow parsing (such as, relations in the syntactic structure), and answer selection.<|reference_end|> | arxiv | @article{fodor2008the,
title={The Prolog Interface to the Unstructured Information Management
Architecture},
author={Paul Fodor, Adam Lally, David Ferrucci},
journal={arXiv preprint arXiv:0809.0680},
year={2008},
archivePrefix={arXiv},
eprint={0809.0680},
primaryClass={cs.SE cs.IR}
} | fodor2008the |
arxiv-4740 | 0809.0686 | Energy Scaling Laws for Distributed Inference in Random Fusion Networks | <|reference_start|>Energy Scaling Laws for Distributed Inference in Random Fusion Networks: The energy scaling laws of multihop data fusion networks for distributed inference are considered. The fusion network consists of randomly located sensors distributed i.i.d. according to a general spatial distribution in an expanding region. Among the class of data fusion schemes that enable optimal inference at the fusion center for Markov random field (MRF) hypotheses, the scheme with minimum average energy consumption is bounded below by average energy of fusion along the minimum spanning tree, and above by a suboptimal scheme, referred to as Data Fusion for Markov Random Fields (DFMRF). Scaling laws are derived for the optimal and suboptimal fusion policies. It is shown that the average asymptotic energy of the DFMRF scheme is finite for a class of MRF models.<|reference_end|> | arxiv | @article{anandkumar2008energy,
title={Energy Scaling Laws for Distributed Inference in Random Fusion Networks},
author={Animashree Anandkumar, Joseph E. Yukich, Lang Tong, and Ananthram
Swami},
journal={vol. 27, no. 7, pp.1203-1217, Sept. 2009},
year={2008},
doi={10.1109/JSAC.2009.090916},
archivePrefix={arXiv},
eprint={0809.0686},
primaryClass={cs.IT cs.NI math.IT math.ST stat.TH}
} | anandkumar2008energy |
arxiv-4741 | 0809.0689 | Rational Generating Functions and Integer Programming Games | <|reference_start|>Rational Generating Functions and Integer Programming Games: We explore the computational complexity of computing pure Nash equilibria for a new class of strategic games called integer programming games with difference of piecewise linear convex payoffs. Integer programming games are games where players' action sets are integer points inside of polytopes. Using recent results from the study of short rational generating functions for encoding sets of integer points pioneered by Alexander Barvinok, we present efficient algorithms for enumerating all pure Nash equilibria, and other computations of interest, such as the pure price of anarchy, and pure threat point, when the dimension and number of "convex" linear pieces in the payoff functions are fixed. Sequential games where a leader is followed by competing followers (a Stackelberg--Nash setting) are also considered.<|reference_end|> | arxiv | @article{köppe2008rational,
title={Rational Generating Functions and Integer Programming Games},
author={Matthias K"oppe, Christopher Thomas Ryan, Maurice Queyranne},
journal={Operations Research 59 (2011), no. 6, 1445-1460},
year={2008},
doi={10.1287/opre.1110.0964},
archivePrefix={arXiv},
eprint={0809.0689},
primaryClass={cs.GT math.CO}
} | köppe2008rational |
arxiv-4742 | 0809.0692 | How long should an astronomical paper be to increase its Impact? | <|reference_start|>How long should an astronomical paper be to increase its Impact?: Naively, one would expect longer papers to have larger impact (i.e., to be cited more). I tested this expectation by selecting all (~30,000) refereed papers from A&A, AJ, ApJ and MNRAS published between 2000 and 2004. These particular years were chosen so papers analyzed would not be too "fresh", but at the same time length of each article could be obtained via ADS. I find that indeed longer papers published in these four major astronomy journals are on average cited more, with a median number of citations increasing from 6 for articles 2-3 pages long to about 50 for articles ~50 pages long. I do however observe a significant "Letters effect", i.e. ApJ and A&A articles 4 pages long are cited more than articles 5-10 pages long. Also, the very few longest (>80 pages) papers are actually cited less than somewhat shorter papers. For individual journals, median citations per paper increase from 11 for ~9,300 A&A papers to 14 for ~5,300 MNRAS papers, 16 for ~2,550 AJ papers, and 20 for ~12,850 ApJ papers (including ApJ Letters and Supplement). I conclude with some semi-humorous career advice, directed especially at first-year graduate students.<|reference_end|> | arxiv | @article{stanek2008how,
title={How long should an astronomical paper be to increase its Impact?},
author={Krzysztof Zbigniew Stanek},
journal={arXiv preprint arXiv:0809.0692},
year={2008},
archivePrefix={arXiv},
eprint={0809.0692},
primaryClass={astro-ph cs.DL physics.soc-ph}
} | stanek2008how |
arxiv-4743 | 0809.0723 | A Simple Mechanism for Focused Web-harvesting | <|reference_start|>A Simple Mechanism for Focused Web-harvesting: The focused web-harvesting is deployed to realize an automated and comprehensive index databases as an alternative way for virtual topical data integration. The web-harvesting has been implemented and extended by not only specifying the targeted URLs, but also predefining human-edited harvesting parameters to improve the speed and accuracy. The harvesting parameter set comprises three main components. First, the depth-scale of being harvested final pages containing desired information counted from the first page at the targeted URLs. Secondly, the focus-point number to determine the exact box containing relevant information. Lastly, the combination of keywords to recognize encountered hyperlinks of relevant images or full-texts embedded in those final pages. All parameters are accessible and fully customizable for each target by the administrators of participating institutions over an integrated web interface. A real implementation to the Indonesian Scientific Index which covers all scientific information across Indonesia is also briefly introduced.<|reference_end|> | arxiv | @article{akbar2008a,
title={A Simple Mechanism for Focused Web-harvesting},
author={Z. Akbar and L.T. Handoko},
journal={arXiv preprint arXiv:0809.0723},
year={2008},
number={FISIKALIPI-08079},
archivePrefix={arXiv},
eprint={0809.0723},
primaryClass={cs.IR cs.CY}
} | akbar2008a |
arxiv-4744 | 0809.0727 | Microcontroller-based System for Modular Networked Robot | <|reference_start|>Microcontroller-based System for Modular Networked Robot: A prototype of modular networked robot for autonomous monitoring works with full control over web through wireless connection has been developed. The robot is equipped with a particular set of built-in analyzing tools and appropriate censors, depending on its main purposes, to enable self-independent and real-time data acquisition and processing. The paper is focused on the microcontroller-based system to realize the modularity. The whole system is divided into three modules : main unit, data acquisition and data processing, while the analyzed results and all aspects of control and monitoring systems are fully accessible over an integrated web-interface. This concept leads to some unique features : enhancing flexibility due to enabling partial replacement of the modules according to user needs, easy access over web for remote users, and low development and maintenance cost due to software dominated components.<|reference_end|> | arxiv | @article{firmansyah2008microcontroller-based,
title={Microcontroller-based System for Modular Networked Robot},
author={I. Firmansyah, Z. Akbar, B. Hermanto and L.T. Handoko},
journal={arXiv preprint arXiv:0809.0727},
year={2008},
number={FISIKALIPI-08077},
archivePrefix={arXiv},
eprint={0809.0727},
primaryClass={cs.RO cs.CY}
} | firmansyah2008microcontroller-based |
arxiv-4745 | 0809.0728 | A Spectrum-Shaping Perspective on Cognitive Radio | <|reference_start|>A Spectrum-Shaping Perspective on Cognitive Radio: A new perspective on cognitive radio is presented, where the pre-existent legacy service is either uncoded or coded and a pair of cognitive transceivers need be appropriately deployed to coexist with the legacy service. The basic idea underlying the new perspective is to exploit the fact that, typically, the legacy channel is not fully loaded by the legacy service, thus leaving a non-negligible margin to accommodate the cognitive transmission. The exploitation of such a load margin is optimized by shaping the spectrum of the transmitted cognitive signal. It is shown that non-trivial coexistence of legacy and cognitive systems is possible even without sharing the legacy message with the cognitive transmitter. Surprisingly, the optimized cognitive transmitter is no longer limited by its interference power at the legacy receiver, and can always transmit at its full available device power. Analytical development and numerical illustration are presented, in particular focusing on the logarithmic growth rate, {\it i.e.}, the prelog coefficient, of cognitive transmission in the high-power regime.<|reference_end|> | arxiv | @article{zhang2008a,
title={A Spectrum-Shaping Perspective on Cognitive Radio},
author={Wenyi Zhang, Urbashi Mitra},
journal={arXiv preprint arXiv:0809.0728},
year={2008},
archivePrefix={arXiv},
eprint={0809.0728},
primaryClass={cs.IT math.IT}
} | zhang2008a |
arxiv-4746 | 0809.0733 | There exists no self-dual [24,12,10] code over F5 | <|reference_start|>There exists no self-dual [24,12,10] code over F5: Self-dual codes over F5 exist for all even lengths. The smallest length for which the largest minimum weight among self-dual codes has not been determined is 24, and the largest minimum weight is either 9 or 10. In this note, we show that there exists no self-dual [24,12,10] code over F5, using the classification of 24-dimensional odd unimodular lattices due to Borcherds.<|reference_end|> | arxiv | @article{harada2008there,
title={There exists no self-dual [24,12,10] code over F5},
author={Masaaki Harada and Akihiro Munemasa},
journal={Designs, Codes and Cryptogr. 52 (2009), 125-127},
year={2008},
archivePrefix={arXiv},
eprint={0809.0733},
primaryClass={math.CO cs.IT math.IT}
} | harada2008there |
arxiv-4747 | 0809.0737 | Malleable Coding with Fixed Reuse | <|reference_start|>Malleable Coding with Fixed Reuse: In cloud computing, storage area networks, remote backup storage, and similar settings, stored data is modified with updates from new versions. Representing information and modifying the representation are both expensive. Therefore it is desirable for the data to not only be compressed but to also be easily modified during updates. A malleable coding scheme considers both compression efficiency and ease of alteration, promoting codeword reuse. We examine the trade-off between compression efficiency and malleability cost-the difficulty of synchronizing compressed versions-measured as the length of a reused prefix portion. Through a coding theorem, the region of achievable rates and malleability is expressed as a single-letter optimization. Relationships to common information problems are also described.<|reference_end|> | arxiv | @article{varshney2008malleable,
title={Malleable Coding with Fixed Reuse},
author={Lav R. Varshney, Julius Kusuma, and Vivek K Goyal},
journal={arXiv preprint arXiv:0809.0737},
year={2008},
archivePrefix={arXiv},
eprint={0809.0737},
primaryClass={cs.IT math.IT}
} | varshney2008malleable |
arxiv-4748 | 0809.0745 | Sparse Recovery by Non-convex Optimization -- Instance Optimality | <|reference_start|>Sparse Recovery by Non-convex Optimization -- Instance Optimality: In this note, we address the theoretical properties of $\Delta_p$, a class of compressed sensing decoders that rely on $\ell^p$ minimization with 0<p<1 to recover estimates of sparse and compressible signals from incomplete and inaccurate measurements. In particular, we extend the results of Candes, Romberg and Tao, and Wojtaszczyk regarding the decoder $\Delta_1$, based on $\ell^1$ minimization, to $\Delta_p$ with 0<p<1. Our results are two-fold. First, we show that under certain sufficient conditions that are weaker than the analogous sufficient conditions for $\Delta_1$ the decoders $\Delta_p$ are robust to noise and stable in the sense that they are (2,p) instance optimal for a large class of encoders. Second, we extend the results of Wojtaszczyk to show that, like $\Delta_1$, the decoders $\Delta_p$ are (2,2) instance optimal in probability provided the measurement matrix is drawn from an appropriate distribution.<|reference_end|> | arxiv | @article{saab2008sparse,
title={Sparse Recovery by Non-convex Optimization -- Instance Optimality},
author={Rayan Saab, Ozgur Yilmaz},
journal={arXiv preprint arXiv:0809.0745},
year={2008},
archivePrefix={arXiv},
eprint={0809.0745},
primaryClass={cs.IT math.IT}
} | saab2008sparse |
arxiv-4749 | 0809.0753 | Proposition of the Interactive Pareto Iterated Local Search Procedure - Elements and Initial Experiments | <|reference_start|>Proposition of the Interactive Pareto Iterated Local Search Procedure - Elements and Initial Experiments: The article presents an approach to interactively solve multi-objective optimization problems. While the identification of efficient solutions is supported by computational intelligence techniques on the basis of local search, the search is directed by partial preference information obtained from the decision maker. An application of the approach to biobjective portfolio optimization, modeled as the well-known knapsack problem, is reported, and experimental results are reported for benchmark instances taken from the literature. In brief, we obtain encouraging results that show the applicability of the approach to the described problem.<|reference_end|> | arxiv | @article{geiger2008proposition,
title={Proposition of the Interactive Pareto Iterated Local Search Procedure -
Elements and Initial Experiments},
author={Martin Josef Geiger},
journal={The Fourth International Conference on Evolutionary
Multi-Criterion Optimization: Late Breaking Papers, Matsushima, Japan, March
2007, pp. 19-23},
year={2008},
archivePrefix={arXiv},
eprint={0809.0753},
primaryClass={cs.AI cs.HC}
} | geiger2008proposition |
arxiv-4750 | 0809.0755 | Bin Packing Under Multiple Objectives - a Heuristic Approximation Approach | <|reference_start|>Bin Packing Under Multiple Objectives - a Heuristic Approximation Approach: The article proposes a heuristic approximation approach to the bin packing problem under multiple objectives. In addition to the traditional objective of minimizing the number of bins, the heterogeneousness of the elements in each bin is minimized, leading to a biobjective formulation of the problem with a tradeoff between the number of bins and their heterogeneousness. An extension of the Best-Fit approximation algorithm is presented to solve the problem. Experimental investigations have been carried out on benchmark instances of different size, ranging from 100 to 1000 items. Encouraging results have been obtained, showing the applicability of the heuristic approach to the described problem.<|reference_end|> | arxiv | @article{geiger2008bin,
title={Bin Packing Under Multiple Objectives - a Heuristic Approximation
Approach},
author={Martin Josef Geiger},
journal={The Fourth International Conference on Evolutionary
Multi-Criterion Optimization: Late Breaking Papers, Matsushima, Japan, March
2007, pp. 53-56},
year={2008},
archivePrefix={arXiv},
eprint={0809.0755},
primaryClass={cs.AI}
} | geiger2008bin |
arxiv-4751 | 0809.0757 | An application of the Threshold Accepting metaheuristic for curriculum based course timetabling | <|reference_start|>An application of the Threshold Accepting metaheuristic for curriculum based course timetabling: The article presents a local search approach for the solution of timetabling problems in general, with a particular implementation for competition track 3 of the International Timetabling Competition 2007 (ITC 2007). The heuristic search procedure is based on Threshold Accepting to overcome local optima. A stochastic neighborhood is proposed and implemented, randomly removing and reassigning events from the current solution. The overall concept has been incrementally obtained from a series of experiments, which we describe in each (sub)section of the paper. In result, we successfully derived a potential candidate solution approach for the finals of track 3 of the ITC 2007.<|reference_end|> | arxiv | @article{geiger2008an,
title={An application of the Threshold Accepting metaheuristic for curriculum
based course timetabling},
author={Martin Josef Geiger},
journal={Proceedings of the 7th International Conference on the Practice
and Theory of Automated Timetabling PATAT 2008, August 19-22, Montreal,
Canada},
year={2008},
archivePrefix={arXiv},
eprint={0809.0757},
primaryClass={cs.AI}
} | geiger2008an |
arxiv-4752 | 0809.0788 | Peek Arc Consistency | <|reference_start|>Peek Arc Consistency: This paper studies peek arc consistency, a reasoning technique that extends the well-known arc consistency technique for constraint satisfaction. In contrast to other more costly extensions of arc consistency that have been studied in the literature, peek arc consistency requires only linear space and quadratic time and can be parallelized in a straightforward way such that it runs in linear time with a linear number of processors. We demonstrate that for various constraint languages, peek arc consistency gives a polynomial-time decision procedure for the constraint satisfaction problem. We also present an algebraic characterization of those constraint languages that can be solved by peek arc consistency, and study the robustness of the algorithm.<|reference_end|> | arxiv | @article{bodirsky2008peek,
title={Peek Arc Consistency},
author={Manuel Bodirsky and Hubie Chen},
journal={arXiv preprint arXiv:0809.0788},
year={2008},
archivePrefix={arXiv},
eprint={0809.0788},
primaryClass={cs.AI cs.CC cs.LO}
} | bodirsky2008peek |
arxiv-4753 | 0809.0833 | The stable configuration in acyclic preference-based systems | <|reference_start|>The stable configuration in acyclic preference-based systems: Acyclic preferences recently appeared as an elegant way to model many distributed systems. An acyclic instance admits a unique stable configuration, which can reveal the performance of the system. In this paper, we give the statistical properties of the stable configuration for three classes of acyclic preferences: node-based preferences, distance-based preferences, and random acyclic systems. Using random overlay graphs, we prove using mean-field and fluid-limit techniques that these systems have an asymptotically continuous independent rank distribution for a proper scaling, and the analytical solution is compared to simulations. These results provide a theoretical ground for validating the performance of bandwidth-based or proximity-based unstructured systems.<|reference_end|> | arxiv | @article{mathieu2008the,
title={The stable configuration in acyclic preference-based systems},
author={Fabien Mathieu (INRIA Rocquencourt, FT R&D, INRIA Rocquencourt),
Gheorghe Postelnicu, Julien Reynier (INRIA Rocquencourt)},
journal={arXiv preprint arXiv:0809.0833},
year={2008},
number={RR-6628},
archivePrefix={arXiv},
eprint={0809.0833},
primaryClass={cs.NI}
} | mathieu2008the |
arxiv-4754 | 0809.0835 | Approximating the volume of unions and intersections of high-dimensional geometric objects | <|reference_start|>Approximating the volume of unions and intersections of high-dimensional geometric objects: We consider the computation of the volume of the union of high-dimensional geometric objects. While showing that this problem is #P-hard already for very simple bodies (i.e., axis-parallel boxes), we give a fast FPRAS for all objects where one can: (1) test whether a given point lies inside the object, (2) sample a point uniformly, (3) calculate the volume of the object in polynomial time. All three oracles can be weak, that is, just approximate. This implies that Klee's measure problem and the hypervolume indicator can be approximated efficiently even though they are #P-hard and hence cannot be solved exactly in time polynomial in the number of dimensions unless P=NP. Our algorithm also allows to approximate efficiently the volume of the union of convex bodies given by weak membership oracles. For the analogous problem of the intersection of high-dimensional geometric objects we prove #P-hardness for boxes and show that there is no multiplicative polynomial-time $2^{d^{1-\epsilon}}$-approximation for certain boxes unless NP=BPP, but give a simple additive polynomial-time $\epsilon$-approximation.<|reference_end|> | arxiv | @article{bringmann2008approximating,
title={Approximating the volume of unions and intersections of high-dimensional
geometric objects},
author={Karl Bringmann and Tobias Friedrich},
journal={Computational Geometry: Theory and Applications, Vol. 43, No. 6-7,
pages 601-610, 2010},
year={2008},
doi={10.1016/j.comgeo.2010.03.004},
archivePrefix={arXiv},
eprint={0809.0835},
primaryClass={cs.CG cs.NE}
} | bringmann2008approximating |
arxiv-4755 | 0809.0840 | HEP data analysis using jHepWork and Java | <|reference_start|>HEP data analysis using jHepWork and Java: A role of Java in high-energy physics and recent progress in development of a platform-independent data-analysis framework, jHepWork, is discussed. The framework produces professional graphics and has many libraries for data manipulation.<|reference_end|> | arxiv | @article{chekanov2008hep,
title={HEP data analysis using jHepWork and Java},
author={S.Chekanov},
journal={arXiv preprint arXiv:0809.0840},
year={2008},
number={ANL-HEP-CP-08-53},
archivePrefix={arXiv},
eprint={0809.0840},
primaryClass={cs.CE hep-ex hep-ph}
} | chekanov2008hep |
arxiv-4756 | 0809.0853 | Estimating divergence functionals and the likelihood ratio by convex risk minimization | <|reference_start|>Estimating divergence functionals and the likelihood ratio by convex risk minimization: We develop and analyze $M$-estimation methods for divergence functionals and the likelihood ratios of two probability distributions. Our method is based on a non-asymptotic variational characterization of $f$-divergences, which allows the problem of estimating divergences to be tackled via convex empirical risk optimization. The resulting estimators are simple to implement, requiring only the solution of standard convex programs. We present an analysis of consistency and convergence for these estimators. Given conditions only on the ratios of densities, we show that our estimators can achieve optimal minimax rates for the likelihood ratio and the divergence functionals in certain regimes. We derive an efficient optimization algorithm for computing our estimates, and illustrate their convergence behavior and practical viability by simulations.<|reference_end|> | arxiv | @article{nguyen2008estimating,
title={Estimating divergence functionals and the likelihood ratio by convex
risk minimization},
author={XuanLong Nguyen, Martin J. Wainwright, Michael I. Jordan},
journal={IEEE Transactions on Information Theory, 56(11), 5847--5861, 2010},
year={2008},
doi={10.1109/TIT.2010.2068870},
archivePrefix={arXiv},
eprint={0809.0853},
primaryClass={math.ST cs.IT math.IT stat.TH}
} | nguyen2008estimating |
arxiv-4757 | 0809.0874 | Between the Information Economy and Student Recruitment: Present Conjuncture and Future Prospects | <|reference_start|>Between the Information Economy and Student Recruitment: Present Conjuncture and Future Prospects: In university programs and curricula, in general we react to the need to meet market needs. We respond to market stimulus, or at least try to do so. Consider now an inverted view. Consider our data and perspectives in university programs as reflecting and indeed presaging economic trends. In this article I pursue this line of thinking. I show how various past events fit very well into this new view. I provide explanation for why some technology trends happened as they did, and why some current developments are important now.<|reference_end|> | arxiv | @article{murtagh2008between,
title={Between the Information Economy and Student Recruitment: Present
Conjuncture and Future Prospects},
author={Fionn Murtagh},
journal={CEPIS UPGRADE, vol. IX, no. 5, pp. 56-64, Oct. 2008},
year={2008},
archivePrefix={arXiv},
eprint={0809.0874},
primaryClass={cs.CY cs.GL}
} | murtagh2008between |
arxiv-4758 | 0809.0884 | On the role of metaphor in information visualization | <|reference_start|>On the role of metaphor in information visualization: The concept of metaphor, in particular graphical (or visual) metaphor, is central to the field of information visualization. Information graphics and interactive information visualization systems employ a variety of metaphorical devices to make abstract, complex, voluminous, or otherwise difficult-to-comprehend information understandable in graphical terms. This paper explores the use of metaphor in information visualization, advancing the theory previously argued by Johnson, Lakoff, Tversky et al. that many information graphics are metaphorically understood in terms of cognitively entrenched spatial patterns known as image schemas. These patterns serve to structure and constrain abstract reasoning processes via metaphorical projection operations that are grounded in everyday perceptual experiences with phenomena such as containment, movement, and force dynamics. Building on previous research, I argue that information graphics promote comprehension of their target information through the use of graphical patterns that invoke these preexisting schematic structures. I further theorize that the degree of structural alignment of a particular graphic with one or more corresponding image schemas accounts for its perceived degree of intuitiveness. Accordingly, image schema theory can provide a powerful explanatory and predictive framework for visualization research. I review relevant theories of analogy and metaphor, and discuss the image schematic properties of several common types of information graphic. I conclude with the proposal that the inventory of image schemas culled from linguistic studies can serve as the basis for an inventory of design elements suitable for developing intuitive and effective new information visualization techniques.<|reference_end|> | arxiv | @article{risch2008on,
title={On the role of metaphor in information visualization},
author={John S. Risch},
journal={arXiv preprint arXiv:0809.0884},
year={2008},
archivePrefix={arXiv},
eprint={0809.0884},
primaryClass={cs.HC cs.GR}
} | risch2008on |
arxiv-4759 | 0809.0908 | Reduced Complexity Demodulation and Equalization Scheme for Differential Impulse Radio UWB Systems with ISI | <|reference_start|>Reduced Complexity Demodulation and Equalization Scheme for Differential Impulse Radio UWB Systems with ISI: In this paper, we consider the demodulation and equalization problem of differential Impulse Radio (IR) Ultra-WideBand (UWB) Systems with Inter-Symbol-Interference (ISI). The differential IR UWB systems have been extensively discussed recently. The advantage of differential IR UWB systems include simple receiver frontend structure. One challenge in the demodulation and equalization of such systems with ISI is that the systems have a rather complex model. The input and output signals of the systems follow a second-order Volterra model. Furthermore, the noise at the output is data dependent. In this paper, we propose a reduced-complexity joint demodulation and equalization algorithm. The algorithm is based on reformulating the nearest neighborhood decoding problem into a mixed quadratic programming and utilizing a semi-definite relaxation. The numerical results show that the proposed demodulation and equalization algorithm has low computational complexity, and at the same time, has almost the same error probability performance compared with the maximal likelihood decoding algorithm.<|reference_end|> | arxiv | @article{ma2008reduced,
title={Reduced Complexity Demodulation and Equalization Scheme for Differential
Impulse Radio UWB Systems with ISI},
author={Xudong Ma},
journal={Proceeding of the IEEE Sarnoff Symposium, Princeton NJ, March 30 -
April 1, 2009},
year={2008},
archivePrefix={arXiv},
eprint={0809.0908},
primaryClass={cs.IT math.IT}
} | ma2008reduced |
arxiv-4760 | 0809.0916 | Irreversible Monte Carlo Algorithms for Efficient Sampling | <|reference_start|>Irreversible Monte Carlo Algorithms for Efficient Sampling: Equilibrium systems evolve according to Detailed Balance (DB). This principe guided development of the Monte-Carlo sampling techniques, of which Metropolis-Hastings (MH) algorithm is the famous representative. It is also known that DB is sufficient but not necessary. We construct irreversible deformation of a given reversible algorithm capable of dramatic improvement of sampling from known distribution. Our transformation modifies transition rates keeping the structure of transitions intact. To illustrate the general scheme we design an Irreversible version of Metropolis-Hastings (IMH) and test it on example of a spin cluster. Standard MH for the model suffers from the critical slowdown, while IMH is free from critical slowdown.<|reference_end|> | arxiv | @article{turitsyn2008irreversible,
title={Irreversible Monte Carlo Algorithms for Efficient Sampling},
author={Konstantin S. Turitsyn, Michael Chertkov, Marija Vucelja},
journal={arXiv preprint arXiv:0809.0916},
year={2008},
doi={10.1016/j.physd.2010.10.003},
archivePrefix={arXiv},
eprint={0809.0916},
primaryClass={cond-mat.stat-mech cs.IT math.IT math.PR stat.AP}
} | turitsyn2008irreversible |
arxiv-4761 | 0809.0918 | Intersecting random graphs and networks with multiple adjacency constraints: A simple example | <|reference_start|>Intersecting random graphs and networks with multiple adjacency constraints: A simple example: When studying networks using random graph models, one is sometimes faced with situations where the notion of adjacency between nodes reflects multiple constraints. Traditional random graph models are insufficient to handle such situations. A simple idea to account for multiple constraints consists in taking the intersection of random graphs. In this paper we initiate the study of random graphs so obtained through a simple example. We examine the intersection of an Erdos-Renyi graph and of one-dimensional geometric random graphs. We investigate the zero-one laws for the property that there are no isolated nodes. When the geometric component is defined on the unit circle, a full zero-one law is established and we determine its critical scaling. When the geometric component lies in the unit interval, there is a gap in that the obtained zero and one laws are found to express deviations from different critical scalings. In particular, the first moment method requires a larger critical scaling than in the unit circle case in order to obtain the one law. This discrepancy is somewhat surprising given that the zero-one laws for the absence of isolated nodes are identical in the geometric random graphs on both the unit interval and unit circle.<|reference_end|> | arxiv | @article{anthapadmanabhan2008intersecting,
title={Intersecting random graphs and networks with multiple adjacency
constraints: A simple example},
author={N. Prasanth Anthapadmanabhan and Armand M. Makowski},
journal={arXiv preprint arXiv:0809.0918},
year={2008},
archivePrefix={arXiv},
eprint={0809.0918},
primaryClass={cs.IT math.IT math.PR}
} | anthapadmanabhan2008intersecting |
arxiv-4762 | 0809.0922 | Superposition for Fixed Domains | <|reference_start|>Superposition for Fixed Domains: Superposition is an established decision procedure for a variety of first-order logic theories represented by sets of clauses. A satisfiable theory, saturated by superposition, implicitly defines a minimal term-generated model for the theory. Proving universal properties with respect to a saturated theory directly leads to a modification of the minimal model's term-generated domain, as new Skolem functions are introduced. For many applications, this is not desired. Therefore, we propose the first superposition calculus that can explicitly represent existentially quantified variables and can thus compute with respect to a given domain. This calculus is sound and refutationally complete in the limit for a first-order fixed domain semantics. For saturated Horn theories and classes of positive formulas, we can even employ the calculus to prove properties of the minimal model itself, going beyond the scope of known superposition-based approaches.<|reference_end|> | arxiv | @article{horbach2008superposition,
title={Superposition for Fixed Domains},
author={Matthias Horbach, Christoph Weidenbach},
journal={arXiv preprint arXiv:0809.0922},
year={2008},
archivePrefix={arXiv},
eprint={0809.0922},
primaryClass={cs.AI cs.LO}
} | horbach2008superposition |
arxiv-4763 | 0809.0949 | Efficient Implementation of the Generalized Tunstall Code Generation Algorithm | <|reference_start|>Efficient Implementation of the Generalized Tunstall Code Generation Algorithm: A method is presented for constructing a Tunstall code that is linear time in the number of output items. This is an improvement on the state of the art for non-Bernoulli sources, including Markov sources, which require a (suboptimal) generalization of Tunstall's algorithm proposed by Savari and analytically examined by Tabus and Rissanen. In general, if n is the total number of output leaves across all Tunstall trees, s is the number of trees (states), and D is the number of leaves of each internal node, then this method takes O((1+(log s)/D) n) time and O(n) space.<|reference_end|> | arxiv | @article{baer2008efficient,
title={Efficient Implementation of the Generalized Tunstall Code Generation
Algorithm},
author={Michael B. Baer},
journal={arXiv preprint arXiv:0809.0949},
year={2008},
archivePrefix={arXiv},
eprint={0809.0949},
primaryClass={cs.IT cs.DS math.IT}
} | baer2008efficient |
arxiv-4764 | 0809.0961 | MOOPPS: An Optimization System for Multi Objective Scheduling | <|reference_start|>MOOPPS: An Optimization System for Multi Objective Scheduling: In the current paper, we present an optimization system solving multi objective production scheduling problems (MOOPPS). The identification of Pareto optimal alternatives or at least a close approximation of them is possible by a set of implemented metaheuristics. Necessary control parameters can easily be adjusted by the decision maker as the whole software is fully menu driven. This allows the comparison of different metaheuristic algorithms for the considered problem instances. Results are visualized by a graphical user interface showing the distribution of solutions in outcome space as well as their corresponding Gantt chart representation. The identification of a most preferred solution from the set of efficient solutions is supported by a module based on the aspiration interactive method (AIM). The decision maker successively defines aspiration levels until a single solution is chosen. After successfully competing in the finals in Ronneby, Sweden, the MOOPPS software has been awarded the European Academic Software Award 2002 (http://www.bth.se/llab/easa_2002.nsf)<|reference_end|> | arxiv | @article{geiger2008moopps:,
title={MOOPPS: An Optimization System for Multi Objective Scheduling},
author={Martin Josef Geiger},
journal={Proceedings of the Metaheuristics International Conference MIC
2005, Vienna, Austria, pp. 403-408},
year={2008},
archivePrefix={arXiv},
eprint={0809.0961},
primaryClass={cs.AI cs.HC}
} | geiger2008moopps: |
arxiv-4765 | 0809.1017 | Entropy Concentration and the Empirical Coding Game | <|reference_start|>Entropy Concentration and the Empirical Coding Game: We give a characterization of Maximum Entropy/Minimum Relative Entropy inference by providing two `strong entropy concentration' theorems. These theorems unify and generalize Jaynes' `concentration phenomenon' and Van Campenhout and Cover's `conditional limit theorem'. The theorems characterize exactly in what sense a prior distribution Q conditioned on a given constraint, and the distribution P, minimizing the relative entropy D(P ||Q) over all distributions satisfying the constraint, are `close' to each other. We then apply our theorems to establish the relationship between entropy concentration and a game-theoretic characterization of Maximum Entropy Inference due to Topsoe and others.<|reference_end|> | arxiv | @article{grunwald2008entropy,
title={Entropy Concentration and the Empirical Coding Game},
author={Peter Grunwald},
journal={arXiv preprint arXiv:0809.1017},
year={2008},
archivePrefix={arXiv},
eprint={0809.1017},
primaryClass={cs.IT cs.LG math.IT math.ST stat.ME stat.TH}
} | grunwald2008entropy |
arxiv-4766 | 0809.1019 | Moving and resizing of the screen objects | <|reference_start|>Moving and resizing of the screen objects: The shape and size of the objects, which we see on the screen, when the application is running, are defined at the design time. By using some sort of adaptive interface, developers give users a chance to resize these objects or on rare occasion even change, but all these changes are predetermined by a developer; user can't go out of the designer's scenario. Making each and all elements moveable / resizable and giving users the full control of these processes, changes the whole idea of applications; programs become user-driven and significantly increase the effectiveness of users' work. This article is about the instrument to turn any screen object into moveable / resizable.<|reference_end|> | arxiv | @article{andreyev2008moving,
title={Moving and resizing of the screen objects},
author={Sergey Andreyev},
journal={arXiv preprint arXiv:0809.1019},
year={2008},
archivePrefix={arXiv},
eprint={0809.1019},
primaryClass={cs.HC}
} | andreyev2008moving |
arxiv-4767 | 0809.1039 | High-SNR Analysis of Outage-Limited Communications with Bursty and Delay-Limited Information | <|reference_start|>High-SNR Analysis of Outage-Limited Communications with Bursty and Delay-Limited Information: This work analyzes the high-SNR asymptotic error performance of outage-limited communications with fading, where the number of bits that arrive at the transmitter during any time slot is random but the delivery of bits at the receiver must adhere to a strict delay limitation. Specifically, bit errors are caused by erroneous decoding at the receiver or violation of the strict delay constraint. Under certain scaling of the statistics of the bit-arrival process with SNR, this paper shows that the optimal decay behavior of the asymptotic total probability of bit error depends on how fast the burstiness of the source scales down with SNR. If the source burstiness scales down too slowly, the total probability of error is asymptotically dominated by delay-violation events. On the other hand, if the source burstiness scales down too quickly, the total probability of error is asymptotically dominated by channel-error events. However, at the proper scaling, where the burstiness scales linearly with 1/sqrt(log SNR) and at the optimal coding duration and transmission rate, the occurrences of channel errors and delay-violation errors are asymptotically balanced. In this latter case, the optimal exponent of the total probability of error reveals a tradeoff that addresses the question of how much of the allowable time and rate should be used for gaining reliability over the channel and how much for accommodating the burstiness with delay constraints.<|reference_end|> | arxiv | @article{kittipiyakul2008high-snr,
title={High-SNR Analysis of Outage-Limited Communications with Bursty and
Delay-Limited Information},
author={Somsak Kittipiyakul, Petros Elia, and Tara Javidi},
journal={arXiv preprint arXiv:0809.1039},
year={2008},
doi={10.1109/TIT.2008.2010005},
archivePrefix={arXiv},
eprint={0809.1039},
primaryClass={cs.IT math.IT}
} | kittipiyakul2008high-snr |
arxiv-4768 | 0809.1043 | On Unique Decodability | <|reference_start|>On Unique Decodability: In this paper we propose a revisitation of the topic of unique decodability and of some fundamental theorems of lossless coding. It is widely believed that, for any discrete source X, every "uniquely decodable" block code satisfies E[l(X_1 X_2 ... X_n)]>= H(X_1,X_2,...,X_n), where X_1, X_2,...,X_n are the first n symbols of the source, E[l(X_1 X_2 ... X_n)] is the expected length of the code for those symbols and H(X_1,X_2,...,X_n) is their joint entropy. We show that, for certain sources with memory, the above inequality only holds when a limiting definition of "uniquely decodable code" is considered. In particular, the above inequality is usually assumed to hold for any "practical code" due to a debatable application of McMillan's theorem to sources with memory. We thus propose a clarification of the topic, also providing an extended version of McMillan's theorem to be used for Markovian sources.<|reference_end|> | arxiv | @article{dalai2008on,
title={On Unique Decodability},
author={Marco Dalai, Riccardo Leonardi},
journal={arXiv preprint arXiv:0809.1043},
year={2008},
doi={10.1109/TIT.2008.929941},
archivePrefix={arXiv},
eprint={0809.1043},
primaryClass={cs.IT math.IT}
} | dalai2008on |
arxiv-4769 | 0809.1053 | An impossibility result for process discrimination | <|reference_start|>An impossibility result for process discrimination: Two series of binary observations $x_1,x_1,...$ and $y_1,y_2,...$ are presented: at each time $n\in\N$ we are given $x_n$ and $y_n$. It is assumed that the sequences are generated independently of each other by two B-processes. We are interested in the question of whether the sequences represent a typical realization of two different processes or of the same one. We demonstrate that this is impossible to decide, in the sense that every discrimination procedure is bound to err with non-negligible frequency when presented with sequences from some B-processes. This contrasts earlier positive results on B-processes, in particular those showing that there are consistent $\bar d$-distance estimates for this class of processes.<|reference_end|> | arxiv | @article{ryabko2008an,
title={An impossibility result for process discrimination},
author={Daniil Ryabko (INRIA Lille - Nord Europe)},
journal={arXiv preprint arXiv:0809.1053},
year={2008},
archivePrefix={arXiv},
eprint={0809.1053},
primaryClass={math.PR cs.IT math.IT math.ST stat.TH}
} | ryabko2008an |
arxiv-4770 | 0809.1061 | A Novel Proportional Fairness Criterion for Throughput Allocation in Multirate IEEE 80211 | <|reference_start|>A Novel Proportional Fairness Criterion for Throughput Allocation in Multirate IEEE 80211: This paper focuses on multirate IEEE 802.11 Wireless LAN employing the mandatory Distributed Coordination Function (DCF) option. Its aim is threefold. Upon starting from the multi-dimensional Markovian state transition model proposed by Malone \textit{et.al.} for characterizing the behavior of the IEEE 802.11 protocol at the Medium Access Control layer, it presents an extension accounting for packet transmission failures due to channel errors. Second, it establishes the conditions under which a network constituted by $N$ stations, each station transmitting with its own bit rate, $R^{(s)}_d$, and packet rate, $\lambda_s$, can be assumed loaded. Finally, it proposes a modified Proportional Fairness (PF) criterion, suitable for mitigating the \textit{rate anomaly} problem of multirate loaded IEEE 802.11 Wireless LANs, employing the mandatory DCF option. Compared to the widely adopted assumption of saturated network, the proposed fairness criterion can be applied to general loaded networks. The throughput allocation resulting from the proposed algorithm is able to greatly increase the aggregate throughput of the DCF, while ensuring fairness levels among the stations of the same order as the ones guaranteed by the classical PF criterion. Simulation results are presented for some sample scenarios, confirming the effectiveness of the proposed criterion for optimized throughput allocation.<|reference_end|> | arxiv | @article{laddomada2008a,
title={A Novel Proportional Fairness Criterion for Throughput Allocation in
Multirate IEEE 802.11},
author={M. Laddomada, F. Mesiti, M. Mondin, and F. Daneshgaran},
journal={arXiv preprint arXiv:0809.1061},
year={2008},
archivePrefix={arXiv},
eprint={0809.1061},
primaryClass={cs.NI}
} | laddomada2008a |
arxiv-4771 | 0809.1077 | Variable Neighborhood Search for the University Lecturer-Student Assignment Problem | <|reference_start|>Variable Neighborhood Search for the University Lecturer-Student Assignment Problem: The paper presents a study of local search heuristics in general and variable neighborhood search in particular for the resolution of an assignment problem studied in the practical work of universities. Here, students have to be assigned to scientific topics which are proposed and supported by members of staff. The problem involves the optimization under given preferences of students which may be expressed when applying for certain topics. It is possible to observe that variable neighborhood search leads to superior results for the tested problem instances. One instance is taken from an actual case, while others have been generated based on the real world data to support the analysis with a deeper analysis. An extension of the problem has been formulated by integrating a second objective function that simultaneously balances the workload of the members of staff while maximizing utility of the students. The algorithmic approach has been prototypically implemented in a computer system. One important aspect in this context is the application of the research work to problems of other scientific institutions, and therefore the provision of decision support functionalities.<|reference_end|> | arxiv | @article{geiger2008variable,
title={Variable Neighborhood Search for the University Lecturer-Student
Assignment Problem},
author={Martin Josef Geiger, Wolf Wenger},
journal={arXiv preprint arXiv:0809.1077},
year={2008},
archivePrefix={arXiv},
eprint={0809.1077},
primaryClass={cs.AI}
} | geiger2008variable |
arxiv-4772 | 0809.1132 | Managing Varying Worst Case Execution Times on DVS Platforms | <|reference_start|>Managing Varying Worst Case Execution Times on DVS Platforms: Energy efficient real-time task scheduling attracted a lot of attention in the past decade. Most of the time, deterministic execution lengths for tasks were considered, but this model fits less and less with the reality, especially with the increasing number of multimedia applications. It's why a lot of research is starting to consider stochastic models, where execution times are only known stochastically. However, authors consider that they have a pretty much precise knowledge about the properties of the system, especially regarding to the worst case execution time (or worst case execution cycles, WCEC). In this work, we try to relax this hypothesis, and assume that the WCEC can vary. We propose miscellaneous methods to react to such a situation, and give many simulation results attesting that with a small effort, we can provide very good results, allowing to keep a low deadline miss rate as well as an energy consumption similar to clairvoyant algorithms.<|reference_end|> | arxiv | @article{berten2008managing,
title={Managing Varying Worst Case Execution Times on DVS Platforms},
author={Vandy Berten, Chi-Ju Chang, Tei-Wei Kuo},
journal={arXiv preprint arXiv:0809.1132},
year={2008},
archivePrefix={arXiv},
eprint={0809.1132},
primaryClass={cs.OS}
} | berten2008managing |
arxiv-4773 | 0809.1138 | Derivation of evolutionary payoffs from observable behavior | <|reference_start|>Derivation of evolutionary payoffs from observable behavior: Interpretation of animal behavior, especially as cooperative or selfish, is a challenge for evolutionary theory. Strategy of a competition should follow from corresponding Darwinian payoffs for the available behavioral options. The payoffs and decision making processes, however, are difficult to observe and quantify. Here we present a general method for the derivation of evolutionary payoffs from observable statistics of interactions. The method is applied to combat of male bowl and doily spiders, to predator inspection by sticklebacks and to territorial defense by lions, demonstrating animal behavior as a new type of game theoretical equilibrium. Games animals play may be derived unequivocally from their observable behavior, the reconstruction, however, can be subjected to fundamental limitations due to our inability to observe all information exchange mechanisms (communication).<|reference_end|> | arxiv | @article{feigel2008derivation,
title={Derivation of evolutionary payoffs from observable behavior},
author={Alexander Feigel, Avraham Englander and Assaf Engel},
journal={arXiv preprint arXiv:0809.1138},
year={2008},
archivePrefix={arXiv},
eprint={0809.1138},
primaryClass={q-bio.PE cs.GT physics.soc-ph}
} | feigel2008derivation |
arxiv-4774 | 0809.1171 | Minkowski Sum Selection and Finding | <|reference_start|>Minkowski Sum Selection and Finding: For the \textsc{Minkowski Sum Selection} problem with linear objective functions, we obtain the following results: (1) optimal $O(n\log n)$ time algorithms for $\lambda=1$; (2) $O(n\log^2 n)$ time deterministic algorithms and expected $O(n\log n)$ time randomized algorithms for any fixed $\lambda>1$. For the \textsc{Minkowski Sum Finding} problem with linear objective functions or objective functions of the form $f(x,y)=\frac{by}{ax}$, we construct optimal $O(n\log n)$ time algorithms for any fixed $\lambda\geq 1$.<|reference_end|> | arxiv | @article{luo2008minkowski,
title={Minkowski Sum Selection and Finding},
author={Cheng-Wei Luo, Hsiao-Fei Liu, Peng-An Chen, and Kun-Mao Chao},
journal={arXiv preprint arXiv:0809.1171},
year={2008},
archivePrefix={arXiv},
eprint={0809.1171},
primaryClass={cs.DS cs.CG}
} | luo2008minkowski |
arxiv-4775 | 0809.1177 | Amdahl's and Gustafson-Barsis laws revisited | <|reference_start|>Amdahl's and Gustafson-Barsis laws revisited: The paper presents a simple derivation of the Gustafson-Barsis law from the Amdahl's law. In the computer literature these two laws describing the speedup limits of parallel applications are derived separately. It is shown, that treating the time of the execution of the sequential part of the application as a constant, in few lines the Gustafson-Barsis law can be obtained from the Amdahl's law and that the popular claim, that Gustafson-Barsis law overthrows Amdahl's law is a mistake.<|reference_end|> | arxiv | @article{karbowski2008amdahl's,
title={Amdahl's and Gustafson-Barsis laws revisited},
author={Andrzej Karbowski},
journal={arXiv preprint arXiv:0809.1177},
year={2008},
archivePrefix={arXiv},
eprint={0809.1177},
primaryClass={cs.DC cs.GT cs.PF}
} | karbowski2008amdahl's |
arxiv-4776 | 0809.1181 | Sector and Sphere: Towards Simplified Storage and Processing of Large Scale Distributed Data | <|reference_start|>Sector and Sphere: Towards Simplified Storage and Processing of Large Scale Distributed Data: Cloud computing has demonstrated that processing very large datasets over commodity clusters can be done simply given the right programming model and infrastructure. In this paper, we describe the design and implementation of the Sector storage cloud and the Sphere compute cloud. In contrast to existing storage and compute clouds, Sector can manage data not only within a data center, but also across geographically distributed data centers. Similarly, the Sphere compute cloud supports User Defined Functions (UDF) over data both within a data center and across data centers. As a special case, MapReduce style programming can be implemented in Sphere by using a Map UDF followed by a Reduce UDF. We describe some experimental studies comparing Sector/Sphere and Hadoop using the Terasort Benchmark. In these studies, Sector is about twice as fast as Hadoop. Sector/Sphere is open source.<|reference_end|> | arxiv | @article{gu2008sector,
title={Sector and Sphere: Towards Simplified Storage and Processing of Large
Scale Distributed Data},
author={Yunhong Gu and Robert L Grossman},
journal={arXiv preprint arXiv:0809.1181},
year={2008},
archivePrefix={arXiv},
eprint={0809.1181},
primaryClass={cs.DC}
} | gu2008sector |
arxiv-4777 | 0809.1205 | On Information-Theoretic Scaling Laws for Wireless Networks | <|reference_start|>On Information-Theoretic Scaling Laws for Wireless Networks: With the analysis of the hierarchical scheme, the potential influence of the pre-constant in deriving scaling laws is exposed. It is found that a modified hierarchical scheme can achieve a throughput arbitrarily times higher than the original one, although it is still diminishingly small compared to the linear scaling. The study demonstrates the essential importance of the throughput formula itself, rather than the scaling laws consequently derived.<|reference_end|> | arxiv | @article{xie2008on,
title={On Information-Theoretic Scaling Laws for Wireless Networks},
author={Liang-Liang Xie},
journal={arXiv preprint arXiv:0809.1205},
year={2008},
archivePrefix={arXiv},
eprint={0809.1205},
primaryClass={cs.IT math.IT}
} | xie2008on |
arxiv-4778 | 0809.1208 | Bounds on the Capacity of the Relay Channel with States at the Source | <|reference_start|>Bounds on the Capacity of the Relay Channel with States at the Source: This paper has been withdrawn by the authors<|reference_end|> | arxiv | @article{zaidi2008bounds,
title={Bounds on the Capacity of the Relay Channel with States at the Source},
author={Abdellatif Zaidi and Luc Vandendorpe},
journal={arXiv preprint arXiv:0809.1208},
year={2008},
archivePrefix={arXiv},
eprint={0809.1208},
primaryClass={cs.IT math.IT}
} | zaidi2008bounds |
arxiv-4779 | 0809.1226 | Applications of Universal Source Coding to Statistical Analysis of Time Series | <|reference_start|>Applications of Universal Source Coding to Statistical Analysis of Time Series: We show how universal codes can be used for solving some of the most important statistical problems for time series. By definition, a universal code (or a universal lossless data compressor) can compress any sequence generated by a stationary and ergodic source asymptotically to the Shannon entropy, which, in turn, is the best achievable ratio for lossless data compressors. We consider finite-alphabet and real-valued time series and the following problems: estimation of the limiting probabilities for finite-alphabet time series and estimation of the density for real-valued time series, the on-line prediction, regression, classification (or problems with side information) for both types of the time series and the following problems of hypothesis testing: goodness-of-fit testing, or identity testing, and testing of serial independence. It is important to note that all problems are considered in the framework of classical mathematical statistics and, on the other hand, everyday methods of data compression (or archivers) can be used as a tool for the estimation and testing. It turns out, that quite often the suggested methods and tests are more powerful than known ones when they are applied in practice.<|reference_end|> | arxiv | @article{ryabko2008applications,
title={Applications of Universal Source Coding to Statistical Analysis of Time
Series},
author={Boris Ryabko},
journal={arXiv preprint arXiv:0809.1226},
year={2008},
archivePrefix={arXiv},
eprint={0809.1226},
primaryClass={cs.IT cs.AI math.IT math.ST stat.TH}
} | ryabko2008applications |
arxiv-4780 | 0809.1236 | Bounded Underapproximations | <|reference_start|>Bounded Underapproximations: We show a new and constructive proof of the following language-theoretic result: for every context-free language L, there is a bounded context-free language L' included in L which has the same Parikh (commutative) image as L. Bounded languages, introduced by Ginsburg and Spanier, are subsets of regular languages of the form w1*w2*...wk* for some finite words w1,...,wk. In particular bounded subsets of context-free languages have nice structural and decidability properties. Our proof proceeds in two parts. First, using Newton's iterations on the language semiring, we construct a context-free subset Ls of L that can be represented as a sequence of substitutions on a linear language and has the same Parikh image as L. Second, we inductively construct a Parikh-equivalent bounded context-free subset of Ls. We show two applications of this result in model checking: to underapproximate the reachable state space of multithreaded procedural programs and to underapproximate the reachable state space of recursive counter programs. The bounded language constructed above provides a decidable underapproximation for the original problems. By iterating the construction, we get a semi-algorithm for the original problems that constructs a sequence of underapproximations such that no two underapproximations of the sequence can be compared. This provides a progress guarantee: every word w in L is in some underapproximation of the sequence. In addition, we show that our approach subsumes context-bounded reachability for multithreaded programs.<|reference_end|> | arxiv | @article{ganty2008bounded,
title={Bounded Underapproximations},
author={Pierre Ganty, Rupak Majumdar, Benjamin Monmege},
journal={Formal Methods in System Design 40(2) (2012) 206-231},
year={2008},
doi={10.1007/s10703-011-0136-y},
archivePrefix={arXiv},
eprint={0809.1236},
primaryClass={cs.LO}
} | ganty2008bounded |
arxiv-4781 | 0809.1241 | A New Framework of Multistage Estimation | <|reference_start|>A New Framework of Multistage Estimation: In this paper, we have established a unified framework of multistage parameter estimation. We demonstrate that a wide variety of statistical problems such as fixed-sample-size interval estimation, point estimation with error control, bounded-width confidence intervals, interval estimation following hypothesis testing, construction of confidence sequences, can be cast into the general framework of constructing sequential random intervals with prescribed coverage probabilities. We have developed exact methods for the construction of such sequential random intervals in the context of multistage sampling. In particular, we have established inclusion principle and coverage tuning techniques to control and adjust the coverage probabilities of sequential random intervals. We have obtained concrete sampling schemes which are unprecedentedly efficient in terms of sampling effort as compared to existing procedures.<|reference_end|> | arxiv | @article{chen2008a,
title={A New Framework of Multistage Estimation},
author={Xinjia Chen},
journal={arXiv preprint arXiv:0809.1241},
year={2008},
doi={10.1103/PhysRevE.79.026307},
archivePrefix={arXiv},
eprint={0809.1241},
primaryClass={math.ST cs.LG math.PR stat.ME stat.TH}
} | chen2008a |
arxiv-4782 | 0809.1252 | Maximum Entropy Rate of Markov Sources for Systems With Non-regular Constraints | <|reference_start|>Maximum Entropy Rate of Markov Sources for Systems With Non-regular Constraints: Using the concept of discrete noiseless channels, it was shown by Shannon in A Mathematical Theory of Communication that the ultimate performance of an encoder for a constrained system is limited by the combinatorial capacity of the system if the constraints define a regular language. In the present work, it is shown that this is not an inherent property of regularity but holds in general. To show this, constrained systems are described by generating functions and random walks on trees.<|reference_end|> | arxiv | @article{böcherer2008maximum,
title={Maximum Entropy Rate of Markov Sources for Systems With Non-regular
Constraints},
author={Georg B"ocherer, Valdemar Cardoso da Rocha Jr. and Cecilio Pimentel},
journal={arXiv preprint arXiv:0809.1252},
year={2008},
archivePrefix={arXiv},
eprint={0809.1252},
primaryClass={cs.IT math.IT}
} | böcherer2008maximum |
arxiv-4783 | 0809.1257 | The Golden Ratio Encoder | <|reference_start|>The Golden Ratio Encoder: This paper proposes a novel Nyquist-rate analog-to-digital (A/D) conversion algorithm which achieves exponential accuracy in the bit-rate despite using imperfect components. The proposed algorithm is based on a robust implementation of a beta-encoder where the value of the base beta is equal to golden mean. It was previously shown that beta-encoders can be implemented in such a way that their exponential accuracy is robust against threshold offsets in the quantizer element. This paper extends this result by allowing for imperfect analog multipliers with imprecise gain values as well. A formal computational model for algorithmic encoders and a general test bed for evaluating their robustness is also proposed.<|reference_end|> | arxiv | @article{daubechies2008the,
title={The Golden Ratio Encoder},
author={I. Daubechies, C.S. G"unt"urk, Y. Wang, "O. Yilmaz},
journal={arXiv preprint arXiv:0809.1257},
year={2008},
doi={10.1109/TIT.2010.2059750},
archivePrefix={arXiv},
eprint={0809.1257},
primaryClass={cs.IT math.IT}
} | daubechies2008the |
arxiv-4784 | 0809.1258 | Network Protection Codes Against Link Failures Using Network Coding | <|reference_start|>Network Protection Codes Against Link Failures Using Network Coding: Protecting against link failures in communication networks is essential to increase robustness, accessibility, and reliability of data transmission. Recently, network coding has been proposed as a solution to provide agile and cost efficient network protection against link failures, which does not require data rerouting, or packet retransmission. To achieve this, separate paths have to be provisioned to carry encoded packets, hence requiring either the addition of extra links, or reserving some of the resources for this purpose. In this paper, we propose network protection codes against a single link failure using network coding, where a separate path using reserved links is not needed. In this case portions of the link capacities are used to carry the encoded packets. The scheme is extended to protect against multiple link failures and can be implemented at an overlay layer. Although this leads to reducing the network capacity, the network capacity reduction is asymptotically small in most cases of practical interest. We demonstrate that such network protection codes are equivalent to error correcting codes for erasure channels. Finally, we study the encoding and decoding operations of such codes over the binary field.<|reference_end|> | arxiv | @article{aly2008network,
title={Network Protection Codes Against Link Failures Using Network Coding},
author={Salah A. Aly, Ahmed E. Kamal},
journal={Proc. of IEEE Globecom 08, New Orleans, LA, 2008},
year={2008},
doi={10.1109/GLOCOM.2008.ECP.516},
archivePrefix={arXiv},
eprint={0809.1258},
primaryClass={cs.IT cs.NI math.IT}
} | aly2008network |
arxiv-4785 | 0809.1264 | Tight Bounds on Minimum Maximum Pointwise Redundancy | <|reference_start|>Tight Bounds on Minimum Maximum Pointwise Redundancy: This paper presents new lower and upper bounds for the optimal compression of binary prefix codes in terms of the most probable input symbol, where compression efficiency is determined by the nonlinear codeword length objective of minimizing maximum pointwise redundancy. This objective relates to both universal modeling and Shannon coding, and these bounds are tight throughout the interval. The upper bounds also apply to a related objective, that of dth exponential redundancy.<|reference_end|> | arxiv | @article{baer2008tight,
title={Tight Bounds on Minimum Maximum Pointwise Redundancy},
author={Michael Baer},
journal={arXiv preprint arXiv:0809.1264},
year={2008},
archivePrefix={arXiv},
eprint={0809.1264},
primaryClass={cs.IT math.IT}
} | baer2008tight |
arxiv-4786 | 0809.1270 | Predictive Hypothesis Identification | <|reference_start|>Predictive Hypothesis Identification: While statistics focusses on hypothesis testing and on estimating (properties of) the true sampling distribution, in machine learning the performance of learning algorithms on future data is the primary issue. In this paper we bridge the gap with a general principle (PHI) that identifies hypotheses with best predictive performance. This includes predictive point and interval estimation, simple and composite hypothesis testing, (mixture) model selection, and others as special cases. For concrete instantiations we will recover well-known methods, variations thereof, and new ones. PHI nicely justifies, reconciles, and blends (a reparametrization invariant variation of) MAP, ML, MDL, and moment estimation. One particular feature of PHI is that it can genuinely deal with nested hypotheses.<|reference_end|> | arxiv | @article{hutter2008predictive,
title={Predictive Hypothesis Identification},
author={Marcus Hutter},
journal={arXiv preprint arXiv:0809.1270},
year={2008},
archivePrefix={arXiv},
eprint={0809.1270},
primaryClass={cs.LG math.ST stat.ML stat.TH}
} | hutter2008predictive |
arxiv-4787 | 0809.1300 | What makes a good role model | <|reference_start|>What makes a good role model: The role model strategy is introduced as a method for designing an estimator by approaching the output of a superior estimator that has better input observations. This strategy is shown to yield the optimal Bayesian estimator when a Markov condition is fulfilled. Two examples involving simple channels are given to illustrate its use. The strategy is combined with time averaging to construct a statistical model by numerically solving a convex program. The role model strategy was developed in the context of low complexity decoder design for iterative decoding. Potential applications outside the field of communications are discussed.<|reference_end|> | arxiv | @article{sayir2008what,
title={What makes a good role model},
author={Jossy Sayir},
journal={arXiv preprint arXiv:0809.1300},
year={2008},
archivePrefix={arXiv},
eprint={0809.1300},
primaryClass={cs.IT math.IT}
} | sayir2008what |
arxiv-4788 | 0809.1318 | A Fuzzy Commitment Scheme | <|reference_start|>A Fuzzy Commitment Scheme: This paper attempt has been made to explain a fuzzy commitment scheme. In the conventional Commitment schemes, both committed string m and valid opening key are required to enable the sender to prove the commitment. However there could be many instances where the transmission involves noise or minor errors arising purely because of the factors over which neither the sender nor the receiver have any control. The fuzzy commitment scheme presented in this paper is to accept the opening key that is close to the original one in suitable distance metric, but not necessarily identical. The concept itself is illustrated with the help of simple situation.<|reference_end|> | arxiv | @article{al-saggaf2008a,
title={A Fuzzy Commitment Scheme},
author={Alawi A. Al-saggaf, H. S. Acharya},
journal={arXiv preprint arXiv:0809.1318},
year={2008},
archivePrefix={arXiv},
eprint={0809.1318},
primaryClass={cs.CR}
} | al-saggaf2008a |
arxiv-4789 | 0809.1330 | Low-Complexity Coding and Source-Optimized Clustering for Large-Scale Sensor Networks | <|reference_start|>Low-Complexity Coding and Source-Optimized Clustering for Large-Scale Sensor Networks: We consider the distributed source coding problem in which correlated data picked up by scattered sensors has to be encoded separately and transmitted to a common receiver, subject to a rate-distortion constraint. Although near-tooptimal solutions based on Turbo and LDPC codes exist for this problem, in most cases the proposed techniques do not scale to networks of hundreds of sensors. We present a scalable solution based on the following key elements: (a) distortion-optimized index assignments for low-complexity distributed quantization, (b) source-optimized hierarchical clustering based on the Kullback-Leibler distance and (c) sum-product decoding on specific factor graphs exploiting the correlation of the data.<|reference_end|> | arxiv | @article{maierbacher2008low-complexity,
title={Low-Complexity Coding and Source-Optimized Clustering for Large-Scale
Sensor Networks},
author={G. Maierbacher, J. Barros},
journal={arXiv preprint arXiv:0809.1330},
year={2008},
archivePrefix={arXiv},
eprint={0809.1330},
primaryClass={cs.IT math.IT}
} | maierbacher2008low-complexity |
arxiv-4790 | 0809.1344 | The Balanced Unicast and Multicast Capacity Regions of Large Wireless Networks | <|reference_start|>The Balanced Unicast and Multicast Capacity Regions of Large Wireless Networks: We consider the question of determining the scaling of the $n^2$-dimensional balanced unicast and the $n 2^n$-dimensional balanced multicast capacity regions of a wireless network with $n$ nodes placed uniformly at random in a square region of area $n$ and communicating over Gaussian fading channels. We identify this scaling of both the balanced unicast and multicast capacity regions in terms of $\Theta(n)$, out of $2^n$ total possible, cuts. These cuts only depend on the geometry of the locations of the source nodes and their destination nodes and the traffic demands between them, and thus can be readily evaluated. Our results are constructive and provide optimal (in the scaling sense) communication schemes.<|reference_end|> | arxiv | @article{niesen2008the,
title={The Balanced Unicast and Multicast Capacity Regions of Large Wireless
Networks},
author={Urs Niesen, Piyush Gupta, Devavrat Shah},
journal={IEEE Transactions on Information Theory, vol. 56, pp. 2249-2271,
May 2010},
year={2008},
doi={10.1109/TIT.2010.2043979},
archivePrefix={arXiv},
eprint={0809.1344},
primaryClass={cs.IT math.IT}
} | niesen2008the |
arxiv-4791 | 0809.1348 | MBBP for improved iterative channel decoding in 80216e WiMAX systems | <|reference_start|>MBBP for improved iterative channel decoding in 80216e WiMAX systems: We propose the application of multiple-bases belief-propagation, an optimized iterative decoding method, to a set of rate-1/2 LDPC codes from the IEEE 802.16e WiMAX standard. The presented approach allows for improved decoding performance when signaling over the AWGN channel. As all required operations for this method can be run in parallel, the decoding delay of this method and standard belief-propagation decoding are equal. The obtained results are compared to the performance of LDPC codes optimized with the progressive edge-growth algorithm and to bounds from information theory. It will be shown that the discussed method mitigates the gap to the well-known random coding bound by about 20 percent.<|reference_end|> | arxiv | @article{hehn2008mbbp,
title={MBBP for improved iterative channel decoding in 802.16e WiMAX systems},
author={Thorsten Hehn, Johannes B. Huber, Stefan Laendner},
journal={arXiv preprint arXiv:0809.1348},
year={2008},
archivePrefix={arXiv},
eprint={0809.1348},
primaryClass={cs.IT math.IT}
} | hehn2008mbbp |
arxiv-4792 | 0809.1366 | Network Coding Security: Attacks and Countermeasures | <|reference_start|>Network Coding Security: Attacks and Countermeasures: By allowing intermediate nodes to perform non-trivial operations on packets, such as mixing data from multiple streams, network coding breaks with the ruling store and forward networking paradigm and opens a myriad of challenging security questions. Following a brief overview of emerging network coding protocols, we provide a taxonomy of their security vulnerabilities, which highlights the differences between attack scenarios in which network coding is particularly vulnerable and other relevant cases in which the intrinsic properties of network coding allow for stronger and more efficient security solutions than classical routing. Furthermore, we give practical examples where network coding can be combined with classical cryptography both for secure communication and secret key distribution. Throughout the paper we identify a number of research challenges deemed relevant towards the applicability of secure network coding in practical networks.<|reference_end|> | arxiv | @article{lima2008network,
title={Network Coding Security: Attacks and Countermeasures},
author={Lu'isa Lima, Jo~ao P. Vilela, Paulo F. Oliveira and Jo~ao Barros},
journal={arXiv preprint arXiv:0809.1366},
year={2008},
archivePrefix={arXiv},
eprint={0809.1366},
primaryClass={cs.CR cs.IT cs.NI math.IT}
} | lima2008network |
arxiv-4793 | 0809.1379 | A Max-Flow Min-Cut Theorem with Applications in Small Worlds and Dual Radio Networks | <|reference_start|>A Max-Flow Min-Cut Theorem with Applications in Small Worlds and Dual Radio Networks: Intrigued by the capacity of random networks, we start by proving a max-flow min-cut theorem that is applicable to any random graph obeying a suitably defined independence-in-cut property. We then show that this property is satisfied by relevant classes, including small world topologies, which are pervasive in both man-made and natural networks, and wireless networks of dual devices, which exploit multiple radio interfaces to enhance the connectivity of the network. In both cases, we are able to apply our theorem and derive max-flow min-cut bounds for network information flow.<|reference_end|> | arxiv | @article{costa2008a,
title={A Max-Flow Min-Cut Theorem with Applications in Small Worlds and Dual
Radio Networks},
author={Rui A. Costa, Joao Barros},
journal={arXiv preprint arXiv:0809.1379},
year={2008},
archivePrefix={arXiv},
eprint={0809.1379},
primaryClass={cs.IT cs.DM math.IT}
} | costa2008a |
arxiv-4794 | 0809.1398 | Stability of Maximum likelihood based clustering methods: exploring the backbone of classifications (Who is keeping you in that community?) | <|reference_start|>Stability of Maximum likelihood based clustering methods: exploring the backbone of classifications (Who is keeping you in that community?): Components of complex systems are often classified according to the way they interact with each other. In graph theory such groups are known as clusters or communities. Many different techniques have been recently proposed to detect them, some of which involve inference methods using either Bayesian or Maximum Likelihood approaches. In this article, we study a statistical model designed for detecting clusters based on connection similarity. The basic assumption of the model is that the graph was generated by a certain grouping of the nodes and an Expectation Maximization algorithm is employed to infer that grouping. We show that the method admits further development to yield a stability analysis of the groupings that quantifies the extent to which each node influences its neighbors group membership. Our approach naturally allows for the identification of the key elements responsible for the grouping and their resilience to changes in the network. Given the generality of the assumptions underlying the statistical model, such nodes are likely to play special roles in the original system. We illustrate this point by analyzing several empirical networks for which further information about the properties of the nodes is available. The search and identification of stabilizing nodes constitutes thus a novel technique to characterize the relevance of nodes in complex networks.<|reference_end|> | arxiv | @article{mungan2008stability,
title={Stability of Maximum likelihood based clustering methods: exploring the
backbone of classifications (Who is keeping you in that community?)},
author={Muhittin Mungan, Jose J. Ramasco},
journal={J. Stat. Mech. (2010) P04028},
year={2008},
archivePrefix={arXiv},
eprint={0809.1398},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.IT math.IT physics.comp-ph physics.data-an}
} | mungan2008stability |
arxiv-4795 | 0809.1409 | Domain Specific Software Architecture for Design Center Automation | <|reference_start|>Domain Specific Software Architecture for Design Center Automation: Domain specific software architecture aims at software reuse through construction of domain architecture reference model. The constructed reference model presents a set of individual components and their interaction points. When starting on a new large software project, the design engineer starts with pre-constructed model, which can be easily browsed and picks up opportunities of use in the new solution design. This report discusses application of domain reference design methods by deriving domain specific reference architecture for a product ordering system in a design center. The product in this case is instock and special order blinds from different manufacturers in a large supply store. The development of mature domain specific reference software architecture for this domain is not the objective of this report. However, this report would like to capture the method used in one such process and that is the primary concern of this report. This report lists subjective details of such a process applied to the domain of ordering custom and instock blinds from a large home construction and goods supply store. This report also describes the detailed process of derivation of knowledge models, unified knowledge models and the reference architecture for this domain. However, this domain model is only partially complete which may not be used for any real applications. This report is a result of a course project undertaken while studying this methodology.<|reference_end|> | arxiv | @article{sinha2008domain,
title={Domain Specific Software Architecture for Design Center Automation},
author={Anshuman Sinha, Haritha Nandela, Vijaya Balakrishna},
journal={arXiv preprint arXiv:0809.1409},
year={2008},
archivePrefix={arXiv},
eprint={0809.1409},
primaryClass={cs.SE}
} | sinha2008domain |
arxiv-4796 | 0809.1437 | How applicable is Python as first computer language for teaching programming in a pre-university educational environment, from a teacher's point of view? | <|reference_start|>How applicable is Python as first computer language for teaching programming in a pre-university educational environment, from a teacher's point of view?: This project report attempts to evaluate the educational properties of the Python computer language, in practice. This is done by examining computer language evolution history, related scientific background work, the existing educational research on computer languages and Python's experimental application in higher secondary education in Greece, during first half of year 2002. This Thesis Report was delivered in advance of a thesis defense for a Masters/Doctorandus (MSc/Drs) title with the Amstel Institute/Universiteit van Amsterdam, during the same year.<|reference_end|> | arxiv | @article{georgatos2008how,
title={How applicable is Python as first computer language for teaching
programming in a pre-university educational environment, from a teacher's
point of view?},
author={Fotis Georgatos},
journal={arXiv preprint arXiv:0809.1437},
year={2008},
archivePrefix={arXiv},
eprint={0809.1437},
primaryClass={cs.PL cs.CY}
} | georgatos2008how |
arxiv-4797 | 0809.1465 | Probabilistic Systems with LimSup and LimInf Objectives | <|reference_start|>Probabilistic Systems with LimSup and LimInf Objectives: We give polynomial-time algorithms for computing the values of Markov decision processes (MDPs) with limsup and liminf objectives. A real-valued reward is assigned to each state, and the value of an infinite path in the MDP is the limsup (resp. liminf) of all rewards along the path. The value of an MDP is the maximal expected value of an infinite path that can be achieved by resolving the decisions of the MDP. Using our result on MDPs, we show that turn-based stochastic games with limsup and liminf objectives can be solved in NP \cap coNP.<|reference_end|> | arxiv | @article{chatterjee2008probabilistic,
title={Probabilistic Systems with LimSup and LimInf Objectives},
author={Krishnendu Chatterjee and Thomas A. Henzinger},
journal={arXiv preprint arXiv:0809.1465},
year={2008},
archivePrefix={arXiv},
eprint={0809.1465},
primaryClass={cs.GT cs.LO}
} | chatterjee2008probabilistic |
arxiv-4798 | 0809.1476 | Obtaining Exact Interpolation Multivariate Polynomial by Approximation | <|reference_start|>Obtaining Exact Interpolation Multivariate Polynomial by Approximation: In some fields such as Mathematics Mechanization, automated reasoning and Trustworthy Computing etc., exact results are needed. Symbolic computations are used to obtain the exact results. Symbolic computations are of high complexity. In order to improve the situation, exactly interpolating methods are often proposed for the exact results and approximate interpolating methods for the approximate ones. In this paper, we study how to obtain exact interpolation polynomial with rational coefficients by approximate interpolating methods.<|reference_end|> | arxiv | @article{feng2008obtaining,
title={Obtaining Exact Interpolation Multivariate Polynomial by Approximation},
author={Yong Feng, Jingzhong Zhang, Xiaolin Qin, Xun Yuan},
journal={arXiv preprint arXiv:0809.1476},
year={2008},
archivePrefix={arXiv},
eprint={0809.1476},
primaryClass={cs.SC cs.CG}
} | feng2008obtaining |
arxiv-4799 | 0809.1489 | An optimal local approximation algorithm for max-min linear programs | <|reference_start|>An optimal local approximation algorithm for max-min linear programs: We present a local algorithm (constant-time distributed algorithm) for approximating max-min LPs. The objective is to maximise $\omega$ subject to $Ax \le 1$, $Cx \ge \omega 1$, and $x \ge 0$ for nonnegative matrices $A$ and $C$. The approximation ratio of our algorithm is the best possible for any local algorithm; there is a matching unconditional lower bound.<|reference_end|> | arxiv | @article{floréen2008an,
title={An optimal local approximation algorithm for max-min linear programs},
author={Patrik Flor'een, Joel Kaasinen, Petteri Kaski, Jukka Suomela},
journal={arXiv preprint arXiv:0809.1489},
year={2008},
doi={10.1145/1583991.1584058},
archivePrefix={arXiv},
eprint={0809.1489},
primaryClass={cs.DC}
} | floréen2008an |
arxiv-4800 | 0809.1493 | Exploring Large Feature Spaces with Hierarchical Multiple Kernel Learning | <|reference_start|>Exploring Large Feature Spaces with Hierarchical Multiple Kernel Learning: For supervised and unsupervised learning, positive definite kernels allow to use large and potentially infinite dimensional feature spaces with a computational cost that only depends on the number of observations. This is usually done through the penalization of predictor functions by Euclidean or Hilbertian norms. In this paper, we explore penalizing by sparsity-inducing norms such as the l1-norm or the block l1-norm. We assume that the kernel decomposes into a large sum of individual basis kernels which can be embedded in a directed acyclic graph; we show that it is then possible to perform kernel selection through a hierarchical multiple kernel learning framework, in polynomial time in the number of selected kernels. This framework is naturally applied to non linear variable selection; our extensive simulations on synthetic datasets and datasets from the UCI repository show that efficiently exploring the large feature space through sparsity-inducing norms leads to state-of-the-art predictive performance.<|reference_end|> | arxiv | @article{bach2008exploring,
title={Exploring Large Feature Spaces with Hierarchical Multiple Kernel
Learning},
author={Francis Bach (INRIA Rocquencourt)},
journal={arXiv preprint arXiv:0809.1493},
year={2008},
archivePrefix={arXiv},
eprint={0809.1493},
primaryClass={cs.LG stat.ML}
} | bach2008exploring |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.