corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-673001
cs/0506040
A Fixed-Length Coding Algorithm for DNA Sequence Compression
<|reference_start|>A Fixed-Length Coding Algorithm for DNA Sequence Compression: While achieving a compression ratio of 2.0 bits/base, the new algorithm codes non-N bases in fixed length. It dramatically reduces the time of coding and decoding than previous DNA compression algorithms and some universal compression programs.<|reference_end|>
arxiv
@article{liu2005a, title={A Fixed-Length Coding Algorithm for DNA Sequence Compression}, author={Jie Liu, Sheng Bao, Zhiqiang Jing, Shi Chen}, journal={arXiv preprint arXiv:cs/0506040}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506040}, primaryClass={cs.IT math.IT} }
liu2005a
arxiv-673002
cs/0506041
Competitive on-line learning with a convex loss function
<|reference_start|>Competitive on-line learning with a convex loss function: We consider the problem of sequential decision making under uncertainty in which the loss caused by a decision depends on the following binary observation. In competitive on-line learning, the goal is to design decision algorithms that are almost as good as the best decision rules in a wide benchmark class, without making any assumptions about the way the observations are generated. However, standard algorithms in this area can only deal with finite-dimensional (often countable) benchmark classes. In this paper we give similar results for decision rules ranging over an arbitrary reproducing kernel Hilbert space. For example, it is shown that for a wide class of loss functions (including the standard square, absolute, and log loss functions) the average loss of the master algorithm, over the first $N$ observations, does not exceed the average loss of the best decision rule with a bounded norm plus $O(N^{-1/2})$. Our proof technique is very different from the standard ones and is based on recent results about defensive forecasting. Given the probabilities produced by a defensive forecasting algorithm, which are known to be well calibrated and to have good resolution in the long run, we use the expected loss minimization principle to find a suitable decision.<|reference_end|>
arxiv
@article{vovk2005competitive, title={Competitive on-line learning with a convex loss function}, author={Vladimir Vovk}, journal={arXiv preprint arXiv:cs/0506041}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506041}, primaryClass={cs.LG cs.AI} }
vovk2005competitive
arxiv-673003
cs/0506042
Tree-Based Construction of LDPC Codes
<|reference_start|>Tree-Based Construction of LDPC Codes: We present a construction of LDPC codes that have minimum pseudocodeword weight equal to the minimum distance, and perform well with iterative decoding. The construction involves enumerating a d-regular tree for a fixed number of layers and employing a connection algorithm based on mutually orthogonal Latin squares to close the tree. Methods are presented for degrees d=p^s and d = p^s+1, for p a prime, -- one of which includes the well-known finite-geometry-based LDPC codes.<|reference_end|>
arxiv
@article{sridhara2005tree-based, title={Tree-Based Construction of LDPC Codes}, author={Deepak Sridhara, Christine Kelley, and Joachim Rosenthal}, journal={arXiv preprint arXiv:cs/0506042}, year={2005}, doi={10.1109/ISIT.2005.1523456}, archivePrefix={arXiv}, eprint={cs/0506042}, primaryClass={cs.IT math.IT} }
sridhara2005tree-based
arxiv-673004
cs/0506043
A Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation
<|reference_start|>A Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation: We consider the problem of compression of two memoryless binary sources, the correlation between which is defined by a Hidden Markov Model (HMM). We propose a Decision Feedback (DF) based scheme which when used with low density parity check codes results in compression close to the Slepian Wolf limits.<|reference_end|>
arxiv
@article{narayanan2005a, title={A Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation}, author={Krishna R. Narayanan and Kapil Bhattad}, journal={arXiv preprint arXiv:cs/0506043}, year={2005}, doi={10.1109/LCOMM.2006.1633329}, archivePrefix={arXiv}, eprint={cs/0506043}, primaryClass={cs.IT math.IT} }
narayanan2005a
arxiv-673005
cs/0506044
Minimal Network Coding for Multicast
<|reference_start|>Minimal Network Coding for Multicast: We give an information flow interpretation for multicasting using network coding. This generalizes the fluid model used to represent flows to a single receiver. Using the generalized model, we present a decentralized algorithm to minimize the number of packets that undergo network coding. We also propose a decentralized algorithm to construct capacity achieving multicast codes when the processing at some nodes is restricted to routing. The proposed algorithms can be coupled with existing decentralized schemes to achieve minimum cost muticast.<|reference_end|>
arxiv
@article{bhattad2005minimal, title={Minimal Network Coding for Multicast}, author={Kapil Bhattad, Niranjan Ratnakar, Ralf Koetter, and Krishna R. Narayanan}, journal={arXiv preprint arXiv:cs/0506044}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506044}, primaryClass={cs.IT math.IT} }
bhattad2005minimal
arxiv-673006
cs/0506045
Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation
<|reference_start|>Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation: We consider the problem of compression of two memoryless binary sources, the correlation between which is defined by a Hidden Markov Model (HMM). We propose a Decision Feedback (DF) based scheme which when used with low density parity check codes results in compression close to the Slepian Wolf limits.<|reference_end|>
arxiv
@article{narayanan2005decision, title={Decision Feedback Based Scheme for Slepian-Wolf Coding of sources with Hidden Markov Correlation}, author={Krishna R. Narayanan and Kapil Bhattad}, journal={arXiv preprint arXiv:cs/0506045}, year={2005}, doi={10.1109/LCOMM.2006.1633329}, archivePrefix={arXiv}, eprint={cs/0506045}, primaryClass={cs.IT math.IT} }
narayanan2005decision
arxiv-673007
cs/0506046
Dictionaries merger for text expansion in question answering
<|reference_start|>Dictionaries merger for text expansion in question answering: This paper presents an original way to add new data in a reference dictionary from several other lexical resources, without loosing any consistence. This operation is carried in order to get lexical information classified by the sense of the entry. This classification makes it possible to enrich utterances (in QA: the queries) following the meaning, and to reduce noise. An analysis of the experienced problems shows the interest of this method, and insists on the points that have to be tackled.<|reference_end|>
arxiv
@article{jacquemin2005dictionaries, title={Dictionaries merger for text expansion in question answering}, author={Bernard Jacquemin (ISC)}, journal={Proceedings of COLING 2004 (2004) 1398}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506046}, primaryClass={cs.DL} }
jacquemin2005dictionaries
arxiv-673008
cs/0506047
Analyse et expansion des textes en question-r\'eponse
<|reference_start|>Analyse et expansion des textes en question-r\'eponse: This paper presents an original methodology to consider question answering. We noticed that query expansion is often incorrect because of a bad understanding of the question. But the automatic good understanding of an utterance is linked to the context length, and the question are often short. This methodology proposes to analyse the documents and to construct an informative structure from the results of the analysis and from a semantic text expansion. The linguistic analysis identifies words (tokenization and morphological analysis), links between words (syntactic analysis) and word sense (semantic disambiguation). The text expansion adds to each word the synonyms matching its sense and replaces the words in the utterances by derivatives, modifying the syntactic schema if necessary. In this way, whatever enrichment may be, the text keeps the same meaning, but each piece of information matches many realisations. The questioning method consists in constructing a local informative structure without enrichment, and matches it with the documentary structure. If a sentence in the informative structure matches the question structure, this sentence is the answer to the question.<|reference_end|>
arxiv
@article{jacquemin2005analyse, title={Analyse et expansion des textes en question-r\'{e}ponse}, author={Bernard Jacquemin (ISC)}, journal={Le poids des mots. Actes des 7es journ\'{e}es internationales d'Analyse statistique des Donn\'{e}es Textuelles (2004) 1219}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506047}, primaryClass={cs.IR} }
jacquemin2005analyse
arxiv-673009
cs/0506048
Enriching a Text by Semantic Disambiguation for Information Extraction
<|reference_start|>Enriching a Text by Semantic Disambiguation for Information Extraction: External linguistic resources have been used for a very long time in information extraction. These methods enrich a document with data that are semantically equivalent, in order to improve recall. For instance, some of these methods use synonym dictionaries. These dictionaries enrich a sentence with words that have a similar meaning. However, these methods present some serious drawbacks, since words are usually synonyms only in restricted contexts. The method we propose here consists of using word sense disambiguation rules (WSD) to restrict the selection of synonyms to only these that match a specific syntactico-semantic context. We show how WSD rules are built and how information extraction techniques can benefit from the application of these rules.<|reference_end|>
arxiv
@article{jacquemin2005enriching, title={Enriching a Text by Semantic Disambiguation for Information Extraction}, author={Bernard Jacquemin (ISC), Caroline Brun, Claude Roux}, journal={LREC 2002 Workshop Proceedings "Using semantics for informaiton retrival and filtering" (2002) 45-51}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506048}, primaryClass={cs.IR} }
jacquemin2005enriching
arxiv-673010
cs/0506049
Exploitation de dictionnaires \'electroniques pour la d\'esambigu\"isation s\'emantique lexicale
<|reference_start|>Exploitation de dictionnaires \'electroniques pour la d\'esambigu\"isation s\'emantique lexicale: This paper presents a lexical disambiguation system, initially developed for English and now adapted to French. This system associates a word with its meaning in a given context using electronic dictionaries as semantically annotated corpora in order to extract semantic disambiguation rules. We describe the rule extraction and application process as well as the evaluation of the system. The results for French give us insight information on some possible improvments of the nature and content of lexical resources adapted for disambiguation in this framework.<|reference_end|>
arxiv
@article{brun2005exploitation, title={Exploitation de dictionnaires \'{e}lectroniques pour la d\'{e}sambigu\"{i}sation s\'{e}mantique lexicale}, author={Caroline Brun (XRCE), Bernard Jacquemin (ISC, XRCE), Fr'ed'erique Segond (XRCE)}, journal={Traitement Automatique des Langues (TAL) 42, no. 3 (2001) pp. 667-690}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506049}, primaryClass={cs.DL} }
brun2005exploitation
arxiv-673011
cs/0506050
The Workshop - Implementing Well Structured Enterprise Applications
<|reference_start|>The Workshop - Implementing Well Structured Enterprise Applications: We specify an abstraction layer to be used between an enterprise application and the utilized enterprise framework (like J2EE or .NET). This specification is called the Workshop. It provides an intuitive metaphor supporting the programmer in designing easy understandable code. We present an implementation of this specification. It is based upon the J2EE framework and is called the JWorkshop. As a proof of concept we implement a special certification authority called the Key Authority based upon the JWorkshop. The mentioned certification authority runs very successfully in a variety of different real world projects.<|reference_end|>
arxiv
@article{wiesmaier2005the, title={The Workshop - Implementing Well Structured Enterprise Applications}, author={A. Wiesmaier, V. Karatsiolis, M. Lippert, J. Buchmann}, journal={Proceedings of "The 2005 International Conference on Software Engineering Research and Practice"; June 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506050}, primaryClass={cs.SE} }
wiesmaier2005the
arxiv-673012
cs/0506051
Comparison of two different implementations of a finite-difference-method for first-order pde in mathematica and matlab
<|reference_start|>Comparison of two different implementations of a finite-difference-method for first-order pde in mathematica and matlab: In this article two implementations of a symmetric finite difference algorithm for a first-order partial differential equation are discussed. The considered partial differential equation discribes the time evolution of the crack length distribution of microcracks in brittle materia.<|reference_end|>
arxiv
@article{herrmann2005comparison, title={Comparison of two different implementations of a finite-difference-method for first-order pde in mathematica and matlab}, author={Heiko Herrmann, Gunnar Rueckner}, journal={arXiv preprint arXiv:cs/0506051}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506051}, primaryClass={cs.CE cs.DM} }
herrmann2005comparison
arxiv-673013
cs/0506052
Comments on `Bit Interleaved Coded Modulation'
<|reference_start|>Comments on `Bit Interleaved Coded Modulation': Caire, Taricco and Biglieri presented a detailed analysis of bit interleaved coded modulation, a simple and popular technique used to improve system performance, especially in the context of fading channels. They derived an upper bound to the probability of error, called the expurgated bound. In this correspondence, the proof of the expurgated bound is shown to be flawed. A new upper bound is also derived. It is not known whether the original expurgated bound is valid for the important special case of square QAM with Gray labeling, but the new bound is very close to, and slightly tighter than, the original bound for a numerical example.<|reference_end|>
arxiv
@article{sethuraman2005comments, title={Comments on `Bit Interleaved Coded Modulation'}, author={Vignesh Sethuraman, Bruce Hajek}, journal={arXiv preprint arXiv:cs/0506052}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506052}, primaryClass={cs.IT math.IT} }
sethuraman2005comments
arxiv-673014
cs/0506053
Analysis on Transmit Antenna Selection for Spatial Multiplexing Systems: A Geometrical Approach
<|reference_start|>Analysis on Transmit Antenna Selection for Spatial Multiplexing Systems: A Geometrical Approach: Recently, the remarkable potential of a multiple-input multiple-output (MIMO) wireless communication system was unveiled for its ability to provide spatial diversity or multiplexing gains. For MIMO diversity schemes, it is already known that. by the optimal antenna selection maximizing the post-processing signal-to-noise ratio, the diversity order of the full system can be maintained. On the other hand, the diversity order achieved by antenna selection in spatial multiplexing systems, especially those exploiting practical coding and decoding schemes, has not been rigorously analyzed thus far. In this paper, from a geometric standpoint, we propose a new framework for theoretically analyzing the diversity order achieved by transmit antenna selection for separately encoded spatial multiplexing systems with linear and decision-feedback receivers. We rigorously show that a diversity order of (Nt-1)(Nr-1) can be achieved for an Nr by Nt SM system when L=2 antennas are selected from the transmit side; while for L>2 scenarios, we give bounds for the achievable diversity order and show that the optimal diversity order is at least (Nt-L+1)(Nr-L+1) . Furthermore, the same geometrical approach can be used to evaluate the diversity-multiplexing tradeoff curves for the considered spatial multiplexing systems with transmit antenna selection.<|reference_end|>
arxiv
@article{zhang2005analysis, title={Analysis on Transmit Antenna Selection for Spatial Multiplexing Systems: A Geometrical Approach}, author={Hongyuan Zhang, Huaiyu Dai, Quan Zhou, Brian L. Hughes}, journal={arXiv preprint arXiv:cs/0506053}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506053}, primaryClass={cs.IT math.IT} }
zhang2005analysis
arxiv-673015
cs/0506054
Efficiency Loss in a Network Resource Allocation Game: The Case of Elastic Supply
<|reference_start|>Efficiency Loss in a Network Resource Allocation Game: The Case of Elastic Supply: We consider a resource allocation problem where individual users wish to send data across a network to maximize their utility, and a cost is incurred at each link that depends on the total rate sent through the link. It is known that as long as users do not anticipate the effect of their actions on prices, a simple proportional pricing mechanism can maximize the sum of users' utilities minus the cost (called aggregate surplus). Continuing previous efforts to quantify the effects of selfish behavior in network pricing mechanisms, we consider the possibility that users anticipate the effect of their actions on link prices. Under the assumption that the links' marginal cost functions are convex, we establish existence of a Nash equilibrium. We show that the aggregate surplus at a Nash equilibrium is no worse than a factor of 4*sqrt{2} - 5 times the optimal aggregate surplus; thus, the efficiency loss when users are selfish is no more than approximately 34%.<|reference_end|>
arxiv
@article{johari2005efficiency, title={Efficiency Loss in a Network Resource Allocation Game: The Case of Elastic Supply}, author={Ramesh Johari (Stanford University), Shie Mannor (McGill University), John N. Tsitsiklis (MIT)}, journal={arXiv preprint arXiv:cs/0506054}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506054}, primaryClass={cs.GT} }
johari2005efficiency
arxiv-673016
cs/0506055
The Complexity of Kings
<|reference_start|>The Complexity of Kings: A king in a directed graph is a node from which each node in the graph can be reached via paths of length at most two. There is a broad literature on tournaments (completely oriented digraphs), and it has been known for more than half a century that all tournaments have at least one king [Lan53]. Recently, kings have proven useful in theoretical computer science, in particular in the study of the complexity of the semifeasible sets [HNP98,HT05] and in the study of the complexity of reachability problems [Tan01,NT02]. In this paper, we study the complexity of recognizing kings. For each succinctly specified family of tournaments, the king problem is known to belong to $\Pi_2^p$ [HOZZ]. We prove that this bound is optimal: We construct a succinctly specified tournament family whose king problem is $\Pi_2^p$-complete. It follows easily from our proof approach that the problem of testing kingship in succinctly specified graphs (which need not be tournaments) is $\Pi_2^p$-complete. We also obtain $\Pi_2^p$-completeness results for k-kings in succinctly specified j-partite tournaments, $k,j \geq 2$, and we generalize our main construction to show that $\Pi_2^p$-completeness holds for testing k-kingship in succinctly specified families of tournaments for all $k \geq 2$.<|reference_end|>
arxiv
@article{hemaspaandra2005the, title={The Complexity of Kings}, author={Edith Hemaspaandra, Lane A. Hemaspaandra, Osamu Watanabe}, journal={arXiv preprint arXiv:cs/0506055}, year={2005}, number={URCS-TR-2005-870}, archivePrefix={arXiv}, eprint={cs/0506055}, primaryClass={cs.CC cs.DM} }
hemaspaandra2005the
arxiv-673017
cs/0506056
Large Alphabets and Incompressibility
<|reference_start|>Large Alphabets and Incompressibility: We briefly survey some concepts related to empirical entropy -- normal numbers, de Bruijn sequences and Markov processes -- and investigate how well it approximates Kolmogorov complexity. Our results suggest $\ell$th-order empirical entropy stops being a reasonable complexity metric for almost all strings of length $m$ over alphabets of size $n$ about when $n^\ell$ surpasses $m$.<|reference_end|>
arxiv
@article{gagie2005large, title={Large Alphabets and Incompressibility}, author={Travis Gagie}, journal={arXiv preprint arXiv:cs/0506056}, year={2005}, doi={10.1016/j.ipl.2006.04.008}, archivePrefix={arXiv}, eprint={cs/0506056}, primaryClass={cs.IT math.IT} }
gagie2005large
arxiv-673018
cs/0506057
About one 3-parameter Model of Testing
<|reference_start|>About one 3-parameter Model of Testing: This article offers a 3-parameter model of testing, with 1) the difference between the ability level of the examinee and item difficulty; 2) the examinee discrimination and 3) the item discrimination as model parameters.<|reference_end|>
arxiv
@article{victor2005about, title={About one 3-parameter Model of Testing}, author={Kromer Victor}, journal={arXiv preprint arXiv:cs/0506057}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506057}, primaryClass={cs.LG} }
victor2005about
arxiv-673019
cs/0506058
An MSE Based Ttransfer Chart to Analyze Iterative Decoding Schemes
<|reference_start|>An MSE Based Ttransfer Chart to Analyze Iterative Decoding Schemes: An alternative to extrinsic information transfer (EXIT) charts called mean squared error (MSE) charts that use a measure related to the MSE instead of mutual information is proposed. Using the relationship between mutual information and minimum mean squared error (MMSE), a relationship between the rate of any code and the area under a plot of MSE versus signal to noise ratio (SNR) is obtained, when the log likelihood ratios (LLR) can be assumed to be from a Gaussian channel. Using this result, a theoretical justification is provided for designing concatenated codes by matching the EXIT charts of the inner and outer decoders, when the LLRs are Gaussian which is typically assumed for code design using EXIT charts. Finally, for the special case of AWGN channel it is shown that any capacity achieving code has an EXIT curve that is flat. This extends Ashikhmin et als results for erasure channels to the Gaussian channel.<|reference_end|>
arxiv
@article{bhattad2005an, title={An MSE Based Ttransfer Chart to Analyze Iterative Decoding Schemes}, author={Kapil Bhattad and Krishna Narayanan}, journal={arXiv preprint arXiv:cs/0506058}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506058}, primaryClass={cs.IT math.IT} }
bhattad2005an
arxiv-673020
cs/0506059
Existentially Restricted Quantified Constraint Satisfaction
<|reference_start|>Existentially Restricted Quantified Constraint Satisfaction: The quantified constraint satisfaction problem (QCSP) is a powerful framework for modelling computational problems. The general intractability of the QCSP has motivated the pursuit of restricted cases that avoid its maximal complexity. In this paper, we introduce and study a new model for investigating QCSP complexity in which the types of constraints given by the existentially quantified variables, is restricted. Our primary technical contribution is the development and application of a general technology for proving positive results on parameterizations of the model, of inclusion in the complexity class coNP.<|reference_end|>
arxiv
@article{chen2005existentially, title={Existentially Restricted Quantified Constraint Satisfaction}, author={Hubie Chen}, journal={arXiv preprint arXiv:cs/0506059}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506059}, primaryClass={cs.CC cs.LO} }
chen2005existentially
arxiv-673021
cs/0506060
Yet another normalisation proof for Martin-Lof's logical framework--Terms with correct arities are strongly normalising
<|reference_start|>Yet another normalisation proof for Martin-Lof's logical framework--Terms with correct arities are strongly normalising: In this paper, we prove the strong normalisation for Martin-L\"{o}f's Logical Framework, and suggest that {}``correct arity'', a condition weaker than well-typedness, will also guarantee the strong normalisation.<|reference_end|>
arxiv
@article{luo2005yet, title={Yet another normalisation proof for Martin-Lof's logical framework--Terms with correct arities are strongly normalising}, author={Yong Luo}, journal={arXiv preprint arXiv:cs/0506060}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506060}, primaryClass={cs.LO} }
luo2005yet
arxiv-673022
cs/0506061
Security Policies as Membranes in Systems for Global Computing
<|reference_start|>Security Policies as Membranes in Systems for Global Computing: We propose a simple global computing framework, whose main concern is code migration. Systems are structured in sites, and each site is divided into two parts: a computing body, and a membrane, which regulates the interactions between the computing body and the external environment. More precisely, membranes are filters which control access to the associated site, and they also rely on the well-established notion of trust between sites. We develop a basic theory to express and enforce security policies via membranes. Initially, these only control the actions incoming agents intend to perform locally. We then adapt the basic theory to encompass more sophisticated policies, where the number of actions an agent wants to perform, and also their order, are considered.<|reference_end|>
arxiv
@article{gorla2005security, title={Security Policies as Membranes in Systems for Global Computing}, author={Daniele Gorla, Matthew Hennessy and Vladimiro Sassone}, journal={Logical Methods in Computer Science, Volume 1, Issue 3 (December 20, 2005) lmcs:2262}, year={2005}, doi={10.2168/LMCS-1(3:2)2005}, archivePrefix={arXiv}, eprint={cs/0506061}, primaryClass={cs.PL cs.LO} }
gorla2005security
arxiv-673023
cs/0506062
A CDMA multiuser detection algorithm based on survey propagation
<|reference_start|>A CDMA multiuser detection algorithm based on survey propagation: A computationally tractable CDMA multiuser detection algorithm is developed based on survey propagation.<|reference_end|>
arxiv
@article{kabashima2005a, title={A CDMA multiuser detection algorithm based on survey propagation}, author={Yoshiyuki Kabashima}, journal={arXiv preprint arXiv:cs/0506062}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506062}, primaryClass={cs.IT math.IT} }
kabashima2005a
arxiv-673024
cs/0506063
Priority-Based Conflict Resolution in Inconsistent Relational Databases
<|reference_start|>Priority-Based Conflict Resolution in Inconsistent Relational Databases: We study here the impact of priorities on conflict resolution in inconsistent relational databases. We extend the framework of repairs and consistent query answers. We propose a set of postulates that an extended framework should satisfy and consider two instantiations of the framework: (locally preferred) l-repairs and (globally preferred) g-repairs. We study the relationships between them and the impact each notion of repair has on the computational complexity of repair checking and consistent query answers.<|reference_end|>
arxiv
@article{staworko2005priority-based, title={Priority-Based Conflict Resolution in Inconsistent Relational Databases}, author={Slawomir Staworko and Jan Chomicki}, journal={arXiv preprint arXiv:cs/0506063}, year={2005}, number={UB CSE Technical Report 2005-11}, archivePrefix={arXiv}, eprint={cs/0506063}, primaryClass={cs.DB} }
staworko2005priority-based
arxiv-673025
cs/0506064
Optimal multiple assignments based on integer programming in secret sharing schemes with general access structures
<|reference_start|>Optimal multiple assignments based on integer programming in secret sharing schemes with general access structures: It is known that for any general access structure, a secret sharing scheme (SSS) can be constructed from an (m,m)-threshold scheme by using the so-called cumulative map or from a (t,m)-threshold SSS by a modified cumulative map. However, such constructed SSSs are not efficient generally. In this paper, we propose a new method to construct a SSS from a $(t,m)$-threshold scheme for any given general access structure. In the proposed method, integer programming is used to distribute optimally the shares of (t,m)-threshold scheme to each participant of the general access structure. From the optimality, it can always attain lower coding rate than the cumulative maps except the cases that they give the optimal distribution. The same method is also applied to construct SSSs for incomplete access structures and/or ramp access structures.<|reference_end|>
arxiv
@article{iwamoto2005optimal, title={Optimal multiple assignments based on integer programming in secret sharing schemes with general access structures}, author={Mitsugu Iwamoto, Hirosuke Yamamoto, Hirohisa Ogawa}, journal={arXiv preprint arXiv:cs/0506064}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506064}, primaryClass={cs.CR cs.IT math.IT} }
iwamoto2005optimal
arxiv-673026
cs/0506065
Strongly secure ramp secret sharing schemes for general access structures
<|reference_start|>Strongly secure ramp secret sharing schemes for general access structures: Ramp secret sharing (SS) schemes can be classified into strong ramp SS schemes and weak ramp SS schemes. The strong ramp SS schemes do not leak out any part of a secret explicitly even in the case where some information about the secret leaks from a non-qualified set of shares, and hence, they are more desirable than weak ramp SS schemes. However, it is not known how to construct the strong ramp SS schemes in the case of general access structures. In this paper, it is shown that a strong ramp SS scheme can always be constructed from a SS scheme with plural secrets for any feasible general access structure. As a byproduct, it is pointed out that threshold ramp SS schemes based on Shamir's polynomial interpolation method are {\em not} always strong.<|reference_end|>
arxiv
@article{iwamoto2005strongly, title={Strongly secure ramp secret sharing schemes for general access structures}, author={Mitsugu Iwamoto, Hirosuke Yamamoto}, journal={arXiv preprint arXiv:cs/0506065}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506065}, primaryClass={cs.CR cs.IT math.IT} }
iwamoto2005strongly
arxiv-673027
cs/0506066
Impersonation with the Echo Protocol
<|reference_start|>Impersonation with the Echo Protocol: The Echo protocol tries to do secure location verification using physical limits imposed by the speeds of light and sound. While the protocol is able to guarantee that a certain object is within a certain region, it cannot ensure the authenticity of further messages from the object without using cryptography. This paper describes an impersonation attack against the protocol based on this weakness. It also describes a couple of approaches which can be used to defend against the attack.<|reference_end|>
arxiv
@article{chung2005impersonation, title={Impersonation with the Echo Protocol}, author={Yoo Chung and Dongman Lee}, journal={arXiv preprint arXiv:cs/0506066}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506066}, primaryClass={cs.CR cs.NI} }
chung2005impersonation
arxiv-673028
cs/0506067
Measuring Woody: The Size of Debian 30
<|reference_start|>Measuring Woody: The Size of Debian 30: Debian is possibly the largest free software distribution, with well over 4,500 source packages in the latest stable release (Debian 3.0) and more than 8,000 source packages in the release currently in preparation. However, we wish to know what these numbers mean. In this paper, we use David A. Wheeler's SLOCCount system to determine the number of physical source lines of code (SLOC) of Debian 3.0 (aka woody). We show that Debian 3.0 includes more than 105,000,000 physical SLOC (almost twice than Red Hat 9, released about 8 months later), showing that the Debian development model (based on the work of a large group of voluntary developers spread around the world) is at least as capable as other development methods (like the more centralized one, based on the work of employees, used by Red Hat or Microsoft) to manage distributions of this size. It is also shown that if Debian had been developed using traditional proprietary methods, the COCOMO model estimates that its cost would be close to $6.1 billion USD to develop Debian 3.0. In addition, we offer both an analysis of the programming languages used in the distribution (C amounts for about 65%, C++ for about 12%, Shell for about 8% and LISP is around 4%, with many others to follow), and the largest packages (The Linux kernel, Mozilla, XFree86, PM3, etc.)<|reference_end|>
arxiv
@article{amor2005measuring, title={Measuring Woody: The Size of Debian 3.0}, author={Juan Jose Amor, Gregorio Robles, Jesus Gonzalez-Barahona}, journal={arXiv preprint arXiv:cs/0506067}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506067}, primaryClass={cs.SE} }
amor2005measuring
arxiv-673029
cs/0506068
Quantum Arthur-Merlin Games
<|reference_start|>Quantum Arthur-Merlin Games: This paper studies quantum Arthur-Merlin games, which are Arthur-Merlin games in which Arthur and Merlin can perform quantum computations and Merlin can send Arthur quantum information. As in the classical case, messages from Arthur to Merlin are restricted to be strings of uniformly generated random bits. It is proved that for one-message quantum Arthur-Merlin games, which correspond to the complexity class QMA, completeness and soundness errors can be reduced exponentially without increasing the length of Merlin's message. Previous constructions for reducing error required a polynomial increase in the length of Merlin's message. Applications of this fact include a proof that logarithmic length quantum certificates yield no increase in power over BQP and a simple proof that QMA is contained in PP. Other facts that are proved include the equivalence of three (or more) message quantum Arthur-Merlin games with ordinary quantum interactive proof systems and some basic properties concerning two-message quantum Arthur-Merlin games.<|reference_end|>
arxiv
@article{marriott2005quantum, title={Quantum Arthur-Merlin Games}, author={Chris Marriott and John Watrous}, journal={Computational Complexity, 14(2): 122 - 152, 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506068}, primaryClass={cs.CC quant-ph} }
marriott2005quantum
arxiv-673030
cs/0506069
A generating function method for the average-case analysis of DPLL
<|reference_start|>A generating function method for the average-case analysis of DPLL: A method to calculate the average size of Davis-Putnam-Loveland-Logemann (DPLL) search trees for random computational problems is introduced, and applied to the satisfiability of random CNF formulas (SAT) and the coloring of random graph (COL) problems. We establish recursion relations for the generating functions of the average numbers of (variable or color) assignments at a given height in the search tree, which allow us to derive the asymptotics of the expected DPLL tree size, 2^{N w + o(N)}, where N is the instance size. w is calculated as a function of the input distribution parameters (ratio of clauses per variable for SAT, average vertex degree for COL), and the branching heuristics.<|reference_end|>
arxiv
@article{monasson2005a, title={A generating function method for the average-case analysis of DPLL}, author={Remi Monasson (LPTENS)}, journal={RANDOM 2005, Berkeley, CA, \'Etats-Unis d'Am\'erique, p.402-413}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506069}, primaryClass={cs.CC cond-mat.dis-nn} }
monasson2005a
arxiv-673031
cs/0506070
Data Visualization on Shared Usage Multi-Screen Environment
<|reference_start|>Data Visualization on Shared Usage Multi-Screen Environment: The modern multimedia technologies based on the whole palette of hardware and software facilities of real-time high-speed information processing, in a combination with effective facilities of the remote access to information resources, allow us to visualize diverse types of information. Data visualization facilities &#8211; is the face of the Automated Control System on whom often judge about their efficiency. They take a special place, providing visualization of the diverse information necessary for decision-making by a final control link - the person allocated by certain powers.<|reference_end|>
arxiv
@article{chashkov2005data, title={Data Visualization on Shared Usage Multi-Screen Environment}, author={Ph.D. Yuriy A. Chashkov}, journal={arXiv preprint arXiv:cs/0506070}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506070}, primaryClass={cs.MM} }
chashkov2005data
arxiv-673032
cs/0506071
Signal transmission on lossy lines as a dissipative quantum state propagation
<|reference_start|>Signal transmission on lossy lines as a dissipative quantum state propagation: The transmission of electric signals on a coupled line with distributed RLC-parameters is considered as a propagation of a dissipative quasi particle. A calculation technique is developed, alternative to the one, accepted for lumped lines. The relativistic wave equation for the transient response is deduced following the common Ohm-low-type considerations. The exact expressions for the Green function, for information transfer velocity and for time delay are obtained on this base. The fundamental restrictions on the measurement accuracy of the time delay are pointed out. The obtained results are naturally generalized for the multilevel networks of the arbitrary dimension.<|reference_end|>
arxiv
@article{reznykov2005signal, title={Signal transmission on lossy lines as a dissipative quantum state propagation}, author={Yu. Reznykov}, journal={arXiv preprint arXiv:cs/0506071}, year={2005}, number={BTU LTP-05-06}, archivePrefix={arXiv}, eprint={cs/0506071}, primaryClass={cs.NI} }
reznykov2005signal
arxiv-673033
cs/0506072
Performance Analysis of Algebraic Soft Decoding of Reed-Solomon Codes over Binary Symmetric and Erasure Channels
<|reference_start|>Performance Analysis of Algebraic Soft Decoding of Reed-Solomon Codes over Binary Symmetric and Erasure Channels: In this paper, we characterize the decoding region of algebraic soft decoding (ASD) of Reed-Solomon (RS) codes over erasure channels and binary symmetric channel (BSC). Optimal multiplicity assignment strategies (MAS) are investigated and tight bounds are derived to show the ASD can significantly outperform conventional Berlekamp Massey (BM) decoding over these channels for a wide code rate range. The analysis technique can also be extended to other channel models, e.g., RS coded modulation over erasure channels.<|reference_end|>
arxiv
@article{jiang2005performance, title={Performance Analysis of Algebraic Soft Decoding of Reed-Solomon Codes over Binary Symmetric and Erasure Channels}, author={Jing Jiang and Krishna R. Narayanan}, journal={arXiv preprint arXiv:cs/0506072}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506072}, primaryClass={cs.IT math.IT} }
jiang2005performance
arxiv-673034
cs/0506073
Iterative Soft Input Soft Output Decoding of Reed-Solomon Codes by Adapting the Parity Check Matrix
<|reference_start|>Iterative Soft Input Soft Output Decoding of Reed-Solomon Codes by Adapting the Parity Check Matrix: An iterative algorithm is presented for soft-input-soft-output (SISO) decoding of Reed-Solomon (RS) codes. The proposed iterative algorithm uses the sum product algorithm (SPA) in conjunction with a binary parity check matrix of the RS code. The novelty is in reducing a submatrix of the binary parity check matrix that corresponds to less reliable bits to a sparse nature before the SPA is applied at each iteration. The proposed algorithm can be geometrically interpreted as a two-stage gradient descent with an adaptive potential function. This adaptive procedure is crucial to the convergence behavior of the gradient descent algorithm and, therefore, significantly improves the performance. Simulation results show that the proposed decoding algorithm and its variations provide significant gain over hard decision decoding (HDD) and compare favorably with other popular soft decision decoding methods.<|reference_end|>
arxiv
@article{jiang2005iterative, title={Iterative Soft Input Soft Output Decoding of Reed-Solomon Codes by Adapting the Parity Check Matrix}, author={Jing Jiang and Krishna R. Narayanan}, journal={arXiv preprint arXiv:cs/0506073}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506073}, primaryClass={cs.IT math.IT} }
jiang2005iterative
arxiv-673035
cs/0506074
Redundancy in Logic II: 2CNF and Horn Propositional Formulae
<|reference_start|>Redundancy in Logic II: 2CNF and Horn Propositional Formulae: We report complexity results about redundancy of formulae in 2CNF form. We first consider the problem of checking redundancy and show some algorithms that are slightly better than the trivial one. We then analyze problems related to finding irredundant equivalent subsets (I.E.S.) of a given set. The concept of cyclicity proved to be relevant to the complexity of these problems. Some results about Horn formulae are also shown.<|reference_end|>
arxiv
@article{liberatore2005redundancy, title={Redundancy in Logic II: 2CNF and Horn Propositional Formulae}, author={Paolo Liberatore}, journal={arXiv preprint arXiv:cs/0506074}, year={2005}, doi={10.1016/j.artint.2007.06.003}, archivePrefix={arXiv}, eprint={cs/0506074}, primaryClass={cs.AI cs.LO} }
liberatore2005redundancy
arxiv-673036
cs/0506075
Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales
<|reference_start|>Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales: We address the rating-inference problem, wherein rather than simply decide whether a review is "thumbs up" or "thumbs down", as in previous sentiment analysis work, one must determine an author's evaluation with respect to a multi-point scale (e.g., one to five "stars"). This task represents an interesting twist on standard multi-class text categorization because there are several different degrees of similarity between class labels; for example, "three stars" is intuitively closer to "four stars" than to "one star". We first evaluate human performance at the task. Then, we apply a meta-algorithm, based on a metric labeling formulation of the problem, that alters a given n-ary classifier's output in an explicit attempt to ensure that similar items receive similar labels. We show that the meta-algorithm can provide significant improvements over both multi-class and regression versions of SVMs when we employ a novel similarity measure appropriate to the problem.<|reference_end|>
arxiv
@article{pang2005seeing, title={Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales}, author={Bo Pang and Lillian Lee}, journal={arXiv preprint arXiv:cs/0506075}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506075}, primaryClass={cs.CL cs.LG} }
pang2005seeing
arxiv-673037
cs/0506076
Alternative security architecture for IP Telephony based on digital watermarking
<|reference_start|>Alternative security architecture for IP Telephony based on digital watermarking: Problems with securing IP Telephony systems, insufficient standardization and lack of security mechanisms emerged the need for new approaches and solutions. In this paper a new, alternative security architecture for voice-systems is presented. It is based on digital watermarking: a new, flexible and powerful technology that is increasingly gaining more and more attention. Besides known applications e.g. to solve copyright protection problems, we propose to use digital watermarking to secure not only transmitted audio but also signaling protocol that IP Telephony is based on.<|reference_end|>
arxiv
@article{mazurczyk2005alternative, title={Alternative security architecture for IP Telephony based on digital watermarking}, author={Wojciech Mazurczyk, Zbigniew Kotulski}, journal={Lecture Notes in Computer Science 4166, pp. 170 - 181, Springer, Heidelberg 2006. ISBN 978-3-540-45762-6}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506076}, primaryClass={cs.CR cs.MM} }
mazurczyk2005alternative
arxiv-673038
cs/0506077
Stability of Scheduled Multi-access Communication over Quasi-static Flat Fading Channels with Random Coding and Independent Decoding
<|reference_start|>Stability of Scheduled Multi-access Communication over Quasi-static Flat Fading Channels with Random Coding and Independent Decoding: The stability of scheduled multiaccess communication with random coding and independent decoding of messages is investigated. The number of messages that may be scheduled for simultaneous transmission is limited to a given maximum value, and the channels from transmitters to receiver are quasi-static, flat, and have independent fades. Requests for message transmissions are assumed to arrive according to an i.i.d. arrival process. Then, we show the following: (1) in the limit of large message alphabet size, the stability region has an interference limited information-theoretic capacity interpretation, (2) state-independent scheduling policies achieve this asymptotic stability region, and (3) in the asymptotic limit corresponding to immediate access, the stability region for non-idling scheduling policies is shown to be identical irrespective of received signal powers.<|reference_end|>
arxiv
@article{sayee2005stability, title={Stability of Scheduled Multi-access Communication over Quasi-static Flat Fading Channels with Random Coding and Independent Decoding}, author={KCV Kalyanarama Sesha Sayee and Utpal Mukherji}, journal={arXiv preprint arXiv:cs/0506077}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506077}, primaryClass={cs.IT math.IT} }
sayee2005stability
arxiv-673039
cs/0506078
Dynamical Neural Network: Information and Topology
<|reference_start|>Dynamical Neural Network: Information and Topology: A neural network works as an associative memory device if it has large storage capacity and the quality of the retrieval is good enough. The learning and attractor abilities of the network both can be measured by the mutual information (MI), between patterns and retrieval states. This paper deals with a search for an optimal topology, of a Hebb network, in the sense of the maximal MI. We use small-world topology. The connectivity $\gamma$ ranges from an extremely diluted to the fully connected network; the randomness $\omega$ ranges from purely local to completely random neighbors. It is found that, while stability implies an optimal $MI(\gamma,\omega)$ at $\gamma_{opt}(\omega)\to 0$, for the dynamics, the optimal topology holds at certain $\gamma_{opt}>0$ whenever $0\leq\omega<0.3$.<|reference_end|>
arxiv
@article{dominguez2005dynamical, title={Dynamical Neural Network: Information and Topology}, author={David Dominguez, Kostadin Koroutchev, Eduardo Serrano & Francisco B. Rodriguez}, journal={arXiv preprint arXiv:cs/0506078}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506078}, primaryClass={cs.IR cs.NE} }
dominguez2005dynamical
arxiv-673040
cs/0506079
Quantitative Models and Implicit Complexity
<|reference_start|>Quantitative Models and Implicit Complexity: We give new proofs of soundness (all representable functions on base types lies in certain complexity classes) for Elementary Affine Logic, LFPL (a language for polytime computation close to realistic functional programming introduced by one of us), Light Affine Logic and Soft Affine Logic. The proofs are based on a common semantical framework which is merely instantiated in four different ways. The framework consists of an innovative modification of realizability which allows us to use resource-bounded computations as realisers as opposed to including all Turing computable functions as is usually the case in realizability constructions. For example, all realisers in the model for LFPL are polynomially bounded computations whence soundness holds by construction of the model. The work then lies in being able to interpret all the required constructs in the model. While being the first entirely semantical proof of polytime soundness for light logi cs, our proof also provides a notable simplification of the original already semantical proof of polytime soundness for LFPL. A new result made possible by the semantic framework is the addition of polymorphism and a modality to LFPL thus allowing for an internal definition of inductive datatypes.<|reference_end|>
arxiv
@article{lago2005quantitative, title={Quantitative Models and Implicit Complexity}, author={U. Dal Lago and M. Hofmann}, journal={arXiv preprint arXiv:cs/0506079}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506079}, primaryClass={cs.LO cs.CC} }
lago2005quantitative
arxiv-673041
cs/0506080
The Geometry of Linear Higher-Order Recursion
<|reference_start|>The Geometry of Linear Higher-Order Recursion: Linearity and ramification constraints have been widely used to weaken higher-order (primitive) recursion in such a way that the class of representable functions equals the class of polytime functions. We show that fine-tuning these two constraints leads to different expressive strengths, some of them lying well beyond polynomial time. This is done by introducing a new semantics, called algebraic context semantics. The framework stems from Gonthier's original work and turns out to be a versatile and powerful tool for the quantitative analysis of normalization in presence of constants and higher-order recursion.<|reference_end|>
arxiv
@article{lago2005the, title={The Geometry of Linear Higher-Order Recursion}, author={U. Dal Lago}, journal={arXiv preprint arXiv:cs/0506080}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506080}, primaryClass={cs.LO cs.CC} }
lago2005the
arxiv-673042
cs/0506081
Three lines proof of the lower bound for the matrix rigidity
<|reference_start|>Three lines proof of the lower bound for the matrix rigidity: The rigidity of a matrix describes the minimal number of entries one has to change to reduce matrix's rank to r. We give very simple combinatorial proof of the lower bound for the rigidity of Sylvester (special case of Hadamard) matrix that matches the best known result by de Wolf(2005) for Hadamard matrices proved by quantum information theoretical arguments.<|reference_end|>
arxiv
@article{midrijanis2005three, title={Three lines proof of the lower bound for the matrix rigidity}, author={Gatis Midrijanis}, journal={arXiv preprint arXiv:cs/0506081}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506081}, primaryClass={cs.CC} }
midrijanis2005three
arxiv-673043
cs/0506082
Open Questions in the Theory of Semifeasible Computation
<|reference_start|>Open Questions in the Theory of Semifeasible Computation: The study of semifeasible algorithms was initiated by Selman's work a quarter of century ago [Sel79,Sel81,Sel82]. Informally put, this research stream studies the power of those sets L for which there is a deterministic (or in some cases, the function may belong to one of various nondeterministic function classes) polynomial-time function f such that when at least one of x and y belongs to L, then f(x,y) \in L \cap \{x,y\}. The intuition here is that it is saying: ``Regarding membership in L, if you put a gun to my head and forced me to bet on one of x or y as belonging to L, my money would be on f(x,y).'' In this article, we present a number of open problems from the theory of semifeasible algorithms. For each we present its background and review what partial results, if any, are known.<|reference_end|>
arxiv
@article{faliszewski2005open, title={Open Questions in the Theory of Semifeasible Computation}, author={Piotr Faliszewski and Lane A. Hemaspaandra}, journal={arXiv preprint arXiv:cs/0506082}, year={2005}, number={URCS-TR-2005-872}, archivePrefix={arXiv}, eprint={cs/0506082}, primaryClass={cs.CC} }
faliszewski2005open
arxiv-673044
cs/0506083
Maxwell Construction: The Hidden Bridge between Iterative and Maximum a Posteriori Decoding
<|reference_start|>Maxwell Construction: The Hidden Bridge between Iterative and Maximum a Posteriori Decoding: There is a fundamental relationship between belief propagation and maximum a posteriori decoding. A decoding algorithm, which we call the Maxwell decoder, is introduced and provides a constructive description of this relationship. Both, the algorithm itself and the analysis of the new decoder are reminiscent of the Maxwell construction in thermodynamics. This paper investigates in detail the case of transmission over the binary erasure channel, while the extension to general binary memoryless channels is discussed in a companion paper.<|reference_end|>
arxiv
@article{measson2005maxwell, title={Maxwell Construction: The Hidden Bridge between Iterative and Maximum a Posteriori Decoding}, author={Cyril Measson, Andrea Montanari, Ruediger Urbanke}, journal={arXiv preprint arXiv:cs/0506083}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506083}, primaryClass={cs.IT cond-mat.dis-nn math.IT} }
measson2005maxwell
arxiv-673045
cs/0506084
The One Page Model Checker
<|reference_start|>The One Page Model Checker: We show how standard IPC mechanisms can be used with the fork() system call to perform explicit state model checking on all interleavings of a multithreaded application. We specifically show how to check for deadlock and race conditions in programs with two threads. Our techniques are easy to apply to other languages, and require only the most rudimentary parsing of the target language. Our fundamental system fits in one page of C code.<|reference_end|>
arxiv
@article{holt2005the, title={The One Page Model Checker}, author={Jason E. Holt (BYU)}, journal={arXiv preprint arXiv:cs/0506084}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506084}, primaryClass={cs.LO} }
holt2005the
arxiv-673046
cs/0506085
On the Job Training
<|reference_start|>On the Job Training: We propose a new framework for building and evaluating machine learning algorithms. We argue that many real-world problems require an agent which must quickly learn to respond to demands, yet can continue to perform and respond to new training throughout its useful life. We give a framework for how such agents can be built, describe several metrics for evaluating them, and show that subtle changes in system construction can significantly affect agent performance.<|reference_end|>
arxiv
@article{holt2005on, title={On the Job Training}, author={Jason E. Holt (BYU)}, journal={arXiv preprint arXiv:cs/0506085}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506085}, primaryClass={cs.LG} }
holt2005on
arxiv-673047
cs/0506086
Large System Decentralized Detection Performance Under Communication Constraints
<|reference_start|>Large System Decentralized Detection Performance Under Communication Constraints: The problem of decentralized detection in a sensor network subjected to a total average power constraint and all nodes sharing a common bandwidth is investigated. The bandwidth constraint is taken into account by assuming non-orthogonal communication between sensors and the data fusion center via direct-sequence code-division multiple-access (DS-CDMA). In the case of large sensor systems and random spreading, the asymptotic decentralized detection performance is derived assuming independent and identically distributed (iid) sensor observations via random matrix theory. The results show that, even under both power and bandwidth constraints, it is better to combine many not-so-good local decisions rather than relying on one (or a few) very-good local decisions.<|reference_end|>
arxiv
@article{jayaweera2005large, title={Large System Decentralized Detection Performance Under Communication Constraints}, author={Sudharman K. Jayaweera}, journal={arXiv preprint arXiv:cs/0506086}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506086}, primaryClass={cs.IT math.IT} }
jayaweera2005large
arxiv-673048
cs/0506087
Primal-dual distance bounds of linear codes with application to cryptography
<|reference_start|>Primal-dual distance bounds of linear codes with application to cryptography: Let $N(d,d^\perp)$ denote the minimum length $n$ of a linear code $C$ with $d$ and $d^{\bot}$, where $d$ is the minimum Hamming distance of $C$ and $d^{\bot}$ is the minimum Hamming distance of $C^{\bot}$. In this paper, we show a lower bound and an upper bound on $N(d,d^\perp)$. Further, for small values of $d$ and $d^\perp$, we determine $N(d,d^\perp)$ and give a generator matrix of the optimum linear code. This problem is directly related to the design method of cryptographic Boolean functions suggested by Kurosawa et al.<|reference_end|>
arxiv
@article{matsumoto2005primal-dual, title={Primal-dual distance bounds of linear codes with application to cryptography}, author={Ryutaroh Matsumoto, Kaoru Kurosawa, Toshiya Itoh, Toshimitsu Konno, Tomohiko Uyematsu}, journal={IEEE Trans. Inform. Theory, vol. 52, no. 9, pp. 4251-4256, Sept. 2006}, year={2005}, doi={10.1109/TIT.2006.880050}, archivePrefix={arXiv}, eprint={cs/0506087}, primaryClass={cs.IT cs.CR math.IT} }
matsumoto2005primal-dual
arxiv-673049
cs/0506088
An Alternative to Huffman's Algorithm for Constructing Variable-Length Codes
<|reference_start|>An Alternative to Huffman's Algorithm for Constructing Variable-Length Codes: This paper has been withdrawn by the author.<|reference_end|>
arxiv
@article{rioul2005an, title={An Alternative to Huffman's Algorithm for Constructing Variable-Length Codes}, author={Olivier Rioul}, journal={arXiv preprint arXiv:cs/0506088}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506088}, primaryClass={cs.IT math.IT} }
rioul2005an
arxiv-673050
cs/0506089
Field geology with a wearable computer: 1st results of the Cyborg Astrobiologist System
<|reference_start|>Field geology with a wearable computer: 1st results of the Cyborg Astrobiologist System: We present results from the first geological field tests of the `Cyborg Astrobiologist', which is a wearable computer and video camcorder system that we are using to test and train a computer-vision system towards having some of the autonomous decision-making capabilities of a field-geologist. The Cyborg Astrobiologist platform has thus far been used for testing and development of these algorithms and systems: robotic acquisition of quasi-mosaics of images, real-time image segmentation, and real-time determination of interesting points in the image mosaics. This work is more of a test of the whole system, rather than of any one part of the system. However, beyond the concept of the system itself, the uncommon map (despite its simplicity) is the main innovative part of the system. The uncommon map helps to determine interest-points in a context-free manner. Overall, the hardware and software systems function reliably, and the computer-vision algorithms are adequate for the first field tests. In addition to the proof-of-concept aspect of these field tests, the main result of these field tests is the enumeration of those issues that we can improve in the future, including: dealing with structural shadow and microtexture, and also, controlling the camera's zoom lens in an intelligent manner. Nonetheless, despite these and other technical inadequacies, this Cyborg Astrobiologist system, consisting of a camera-equipped wearable-computer and its computer-vision algorithms, has demonstrated its ability of finding genuinely interesting points in real-time in the geological scenery, and then gathering more information about these interest points in an automated manner. We use these capabilities for autonomous guidance towards geological points-of-interest.<|reference_end|>
arxiv
@article{mcguire2005field, title={Field geology with a wearable computer: 1st results of the Cyborg Astrobiologist System}, author={Patrick C. McGuire, Javier Gomez-Elvira, Jose Antonio Rodriguez-Manfredi, Eduardo Sebastian-Martinez, Jens Ormo, Enrique Diaz-Martinez, Markus Oesker, Robert Haschke, Joerg Ontrup, Helge Ritter}, journal={"Proceedings of the <a href="http://www.icinco.org">ICINCO</a>'2005 (International Conference on Informatics in Control, Automation and Robotics)", September 14-17, Barcelona, Spain, vol. 3, pp. 283-291 (2005).}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506089}, primaryClass={cs.CV astro-ph cs.AI cs.CE cs.HC cs.RO} }
mcguire2005field
arxiv-673051
cs/0506090
An Exact 29416^n Algorithm for the Three Domatic Number Problem
<|reference_start|>An Exact 29416^n Algorithm for the Three Domatic Number Problem: The three domatic number problem asks whether a given undirected graph can be partitioned into at least three dominating sets, i.e., sets whose closed neighborhood equals the vertex set of the graph. Since this problem is NP-complete, no polynomial-time algorithm is known for it. The naive deterministic algorithm for this problem runs in time 3^n, up to polynomial factors. In this paper, we design an exact deterministic algorithm for this problem running in time 2.9416^n. Thus, our algorithm can handle problem instances of larger size than the naive algorithm in the same amount of time. We also present another deterministic and a randomized algorithm for this problem that both have an even better performance for graphs with small maximum degree.<|reference_end|>
arxiv
@article{riege2005an, title={An Exact 2.9416^n Algorithm for the Three Domatic Number Problem}, author={Tobias Riege, J"org Rothe}, journal={arXiv preprint arXiv:cs/0506090}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506090}, primaryClass={cs.CC} }
riege2005an
arxiv-673052
cs/0506091
A New Construction for LDPC Codes using Permutation Polynomials over Integer Rings
<|reference_start|>A New Construction for LDPC Codes using Permutation Polynomials over Integer Rings: A new construction is proposed for low density parity check (LDPC) codes using quadratic permutation polynomials over finite integer rings. The associated graphs for the new codes have both algebraic and pseudo-random nature, and the new codes are quasi-cyclic. Graph isomorphisms and automorphisms are identified and used in an efficient search for good codes. Graphs with girth as large as 12 were found. Upper bounds on the minimum Hamming distance are found both analytically and algorithmically. The bounds indicate that the minimum distance grows with block length. Near-codewords are one of the causes for error floors in LDPC codes; the new construction provides a good framework for studying near-codewords in LDPC codes. Nine example codes are given, and computer simulation results show the excellent error performance of these codes. Finally, connections are made between this new LDPC construction and turbo codes using interleavers generated by quadratic permutation polynomials.<|reference_end|>
arxiv
@article{takeshita2005a, title={A New Construction for LDPC Codes using Permutation Polynomials over Integer Rings}, author={Oscar Y. Takeshita}, journal={arXiv preprint arXiv:cs/0506091}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506091}, primaryClass={cs.IT math.IT} }
takeshita2005a
arxiv-673053
cs/0506092
Emergent Statistical Wealth Distributions in Simple Monetary Exchange Models: A Critical Review
<|reference_start|>Emergent Statistical Wealth Distributions in Simple Monetary Exchange Models: A Critical Review: This paper reviews recent attempts at modelling inequality of wealth as an emergent phenomenon of interacting-agent processes. We point out that recent models of wealth condensation which draw their inspiration from molecular dynamics have, in fact, reinvented a process introduced quite some time ago by Angle (1986) in the sociological literature. We emphasize some problematic aspects of simple wealth exchange models and contrast them with a monetary model based on economic principles of market mediated exchange. The paper also reports new results on the influence of market power on the wealth distribution in statistical equilibrium. As it turns out, inequality increases but market power alone is not sufficient for changing the exponential tails of simple exchange models into Pareto tails.<|reference_end|>
arxiv
@article{lux2005emergent, title={Emergent Statistical Wealth Distributions in Simple Monetary Exchange Models: A Critical Review}, author={Thomas Lux}, journal={arXiv preprint arXiv:cs/0506092}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506092}, primaryClass={cs.MA} }
lux2005emergent
arxiv-673054
cs/0506093
On Maximum Contention-Free Interleavers and Permutation Polynomials over Integer Rings
<|reference_start|>On Maximum Contention-Free Interleavers and Permutation Polynomials over Integer Rings: An interleaver is a critical component for the channel coding performance of turbo codes. Algebraic constructions are of particular interest because they admit analytical designs and simple, practical hardware implementation. Contention-free interleavers have been recently shown to be suitable for parallel decoding of turbo codes. In this correspondence, it is shown that permutation polynomials generate maximum contention-free interleavers, i.e., every factor of the interleaver length becomes a possible degree of parallel processing of the decoder. Further, it is shown by computer simulations that turbo codes using these interleavers perform very well for the 3rd Generation Partnership Project (3GPP) standard.<|reference_end|>
arxiv
@article{takeshita2005on, title={On Maximum Contention-Free Interleavers and Permutation Polynomials over Integer Rings}, author={Oscar Y. Takeshita}, journal={arXiv preprint arXiv:cs/0506093}, year={2005}, doi={10.1109/TIT.2005.864450}, archivePrefix={arXiv}, eprint={cs/0506093}, primaryClass={cs.IT math.IT} }
takeshita2005on
arxiv-673055
cs/0506094
Universal Codes as a Basis for Nonparametric Testing of Serial Independence for Time Series
<|reference_start|>Universal Codes as a Basis for Nonparametric Testing of Serial Independence for Time Series: We consider a stationary and ergodic source $p$ generated symbols $x_1 ... x_t$ from some finite set $A$ and a null hypothesis $H_0$ that $p$ is Markovian source with memory (or connectivity) not larger than $m, (m >= 0).$ The alternative hypothesis $H_1$ is that the sequence is generated by a stationary and ergodic source, which differs from the source under $H_0$. In particular, if $m= 0$ we have the null hypothesis $H_0$ that the sequence is generated by Bernoully source (or the hypothesis that $x_1 ...x_t$ are independent.) Some new tests which are based on universal codes and universal predictors, are suggested.<|reference_end|>
arxiv
@article{ryabko2005universal, title={Universal Codes as a Basis for Nonparametric Testing of Serial Independence for Time Series}, author={Boris Ryabko and Jaakko Astola}, journal={arXiv preprint arXiv:cs/0506094}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506094}, primaryClass={cs.IT math.IT} }
ryabko2005universal
arxiv-673056
cs/0506095
Deriving a Stationary Dynamic Bayesian Network from a Logic Program with Recursive Loops
<|reference_start|>Deriving a Stationary Dynamic Bayesian Network from a Logic Program with Recursive Loops: Recursive loops in a logic program present a challenging problem to the PLP framework. On the one hand, they loop forever so that the PLP backward-chaining inferences would never stop. On the other hand, they generate cyclic influences, which are disallowed in Bayesian networks. Therefore, in existing PLP approaches logic programs with recursive loops are considered to be problematic and thus are excluded. In this paper, we propose an approach that makes use of recursive loops to build a stationary dynamic Bayesian network. Our work stems from an observation that recursive loops in a logic program imply a time sequence and thus can be used to model a stationary dynamic Bayesian network without using explicit time parameters. We introduce a Bayesian knowledge base with logic clauses of the form $A \leftarrow A_1,...,A_l, true, Context, Types$, which naturally represents the knowledge that the $A_i$s have direct influences on $A$ in the context $Context$ under the type constraints $Types$. We then use the well-founded model of a logic program to define the direct influence relation and apply SLG-resolution to compute the space of random variables together with their parental connections. We introduce a novel notion of influence clauses, based on which a declarative semantics for a Bayesian knowledge base is established and algorithms for building a two-slice dynamic Bayesian network from a logic program are developed.<|reference_end|>
arxiv
@article{shen2005deriving, title={Deriving a Stationary Dynamic Bayesian Network from a Logic Program with Recursive Loops}, author={Y. D. Shen, Q. Yang, J. H. You and L. Y. Yuan}, journal={arXiv preprint arXiv:cs/0506095}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506095}, primaryClass={cs.AI cs.LG cs.LO} }
shen2005deriving
arxiv-673057
cs/0506096
Polynomial Synthesis of Asynchronous Automata
<|reference_start|>Polynomial Synthesis of Asynchronous Automata: Zielonka's theorem shows that each regular set of Mazurkiewicz traces can be implemented as a system of synchronized processes with a distributed control structure called asynchronous automaton. This paper gives a polynomial algorithm for the synthesis of a non-deterministic asynchronous automaton from a regular Mazurkiewicz trace language. This new construction is based on an unfolding approach that improves the complexity of Zielonka's and Pighizzini's techniques in terms of the number of states.<|reference_end|>
arxiv
@article{baudru2005polynomial, title={Polynomial Synthesis of Asynchronous Automata}, author={Nicolas Baudru (LIF), R'emi Morin (LIF)}, journal={arXiv preprint arXiv:cs/0506096}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506096}, primaryClass={cs.CC cs.LO} }
baudru2005polynomial
arxiv-673058
cs/0506097
A Flexible Thread Scheduler for Hierarchical Multiprocessor Machines
<|reference_start|>A Flexible Thread Scheduler for Hierarchical Multiprocessor Machines: With the current trend of multiprocessor machines towards more and more hierarchical architectures, exploiting the full computational power requires careful distribution of execution threads and data so as to limit expensive remote memory accesses. Existing multi-threaded libraries provide only limited facilities to let applications express distribution indications, so that programmers end up with explicitly distributing tasks according to the underlying architecture, which is difficult and not portable. In this article, we present: (1) a model for dynamically expressing the structure of the computation; (2) a scheduler interpreting this model so as to make judicious hierarchical distribution decisions; (3) an implementation within the Marcel user-level thread library. We experimented our proposal on a scientific application running on a ccNUMA Bull NovaScale with 16 Intel Itanium II processors; results show a 30% gain compared to a classical scheduler, and are similar to what a handmade scheduler achieves in a non-portable way.<|reference_end|>
arxiv
@article{thibault2005a, title={A Flexible Thread Scheduler for Hierarchical Multiprocessor Machines}, author={Samuel Thibault (INRIA Futurs)}, journal={arXiv preprint arXiv:cs/0506097}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506097}, primaryClass={cs.DC} }
thibault2005a
arxiv-673059
cs/0506098
Distributed Selfish Load Balancing
<|reference_start|>Distributed Selfish Load Balancing: Suppose that a set of $m$ tasks are to be shared as equally as possible amongst a set of $n$ resources. A game-theoretic mechanism to find a suitable allocation is to associate each task with a ``selfish agent'', and require each agent to select a resource, with the cost of a resource being the number of agents to select it. Agents would then be expected to migrate from overloaded to underloaded resources, until the allocation becomes balanced. Recent work has studied the question of how this can take place within a distributed setting in which agents migrate selfishly without any centralized control. In this paper we discuss a natural protocol for the agents which combines the following desirable features: It can be implemented in a strongly distributed setting, uses no central control, and has good convergence properties. For $m\gg n$, the system becomes approximately balanced (an $\epsilon$-Nash equilibrium) in expected time $O(\log\log m)$. We show using a martingale technique that the process converges to a perfectly balanced allocation in expected time $O(\log\log m+n^4)$. We also give a lower bound of $\Omega(\max\{\log\log m,n\})$ for the convergence time.<|reference_end|>
arxiv
@article{berenbrink2005distributed, title={Distributed Selfish Load Balancing}, author={Petra Berenbrink, Tom Friedetzky, Leslie Ann Goldberg, Paul Goldberg, Zengjian Hu, Russell Martin}, journal={arXiv preprint arXiv:cs/0506098}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506098}, primaryClass={cs.GT math.OC} }
berenbrink2005distributed
arxiv-673060
cs/0506099
DIMES: Let the Internet Measure Itself
<|reference_start|>DIMES: Let the Internet Measure Itself: Today's Internet maps, which are all collected from a small number of vantage points, are falling short of being accurate. We suggest here a paradigm shift for this task. DIMES is a distributed measurement infrastructure for the Internet that is based on the deployment of thousands of light weight measurement agents around the globe. We describe the rationale behind DIMES deployment, discuss its design trade-offs and algorithmic challenges, and analyze the structure of the Internet as it seen with DIMES.<|reference_end|>
arxiv
@article{shavitt2005dimes:, title={DIMES: Let the Internet Measure Itself}, author={Yuval Shavitt and Eran Shir}, journal={arXiv preprint arXiv:cs/0506099}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506099}, primaryClass={cs.NI} }
shavitt2005dimes:
arxiv-673061
cs/0506100
On the NP-Completeness of Some Graph Cluster Measures
<|reference_start|>On the NP-Completeness of Some Graph Cluster Measures: Graph clustering is the problem of identifying sparsely connected dense subgraphs (clusters) in a given graph. Proposed clustering algorithms usually optimize various fitness functions that measure the quality of a cluster within the graph. Examples of such cluster measures include the conductance, the local and relative densities, and single cluster editing. We prove that the decision problems associated with the optimization tasks of finding the clusters that are optimal with respect to these fitness measures are NP-complete.<|reference_end|>
arxiv
@article{sima2005on, title={On the NP-Completeness of Some Graph Cluster Measures}, author={Jiri Sima and Satu Elisa Schaeffer}, journal={arXiv preprint arXiv:cs/0506100}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506100}, primaryClass={cs.CC} }
sima2005on
arxiv-673062
cs/0506101
Efficient Multiclass Implementations of L1-Regularized Maximum Entropy
<|reference_start|>Efficient Multiclass Implementations of L1-Regularized Maximum Entropy: This paper discusses the application of L1-regularized maximum entropy modeling or SL1-Max [9] to multiclass categorization problems. A new modification to the SL1-Max fast sequential learning algorithm is proposed to handle conditional distributions. Furthermore, unlike most previous studies, the present research goes beyond a single type of conditional distribution. It describes and compares a variety of modeling assumptions about the class distribution (independent or exclusive) and various types of joint or conditional distributions. It results in a new methodology for combining binary regularized classifiers to achieve multiclass categorization. In this context, Maximum Entropy can be considered as a generic and efficient regularized classification tool that matches or outperforms the state-of-the art represented by AdaBoost and SVMs.<|reference_end|>
arxiv
@article{haffner2005efficient, title={Efficient Multiclass Implementations of L1-Regularized Maximum Entropy}, author={Patrick Haffner, Steven Phillips, Rob Schapire}, journal={arXiv preprint arXiv:cs/0506101}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506101}, primaryClass={cs.LG cs.CL} }
haffner2005efficient
arxiv-673063
cs/0506102
On $m$-dimensional toric codes
<|reference_start|>On $m$-dimensional toric codes: Toric codes are a class of $m$-dimensional cyclic codes introduced recently by J. Hansen. They may be defined as evaluation codes obtained from monomials corresponding to integer lattice points in an integral convex polytope $P \subseteq \R^m$. As such, they are in a sense a natural extension of Reed-Solomon codes. Several authors have used intersection theory on toric surfaces to derive bounds on the minimum distance of some toric codes with $m = 2$. In this paper, we will provide a more elementary approach that applies equally well to many toric codes for all $m \ge 2$. Our methods are based on a sort of multivariate generalization of Vandermonde determinants that has also been used in the study of multivariate polynomial interpolation. We use these Vandermonde determinants to determine the minimum distance of toric codes from rectangular polytopes and simplices. We also prove a general result showing that if there is a unimodular integer affine transformation taking one polytope $P_1$ to a second polytope $P_2$, then the corresponding toric codes are monomially equivalent (hence have the same parameters). We use this to begin a classification of two-dimensional toric codes with small dimension.<|reference_end|>
arxiv
@article{little2005on, title={On $m$-dimensional toric codes}, author={John Little, Ryan Schwarz}, journal={arXiv preprint arXiv:cs/0506102}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506102}, primaryClass={cs.IT math.AC math.AG math.IT} }
little2005on
arxiv-673064
cs/0506103
Security of mobile agents: a new concept of the integrity protection
<|reference_start|>Security of mobile agents: a new concept of the integrity protection: The recent developments in the mobile technology (mobile phones, middleware) created a need for new methods of protecting the code transmitted through the network. The proposed mechanisms not only secure the compiled program, but also the data, that can be gathered during its "journey". The oldest and the simplest methods are more concentrated on integrity of the code itself and on the detection of unauthorized manipulation. Other, more advanced proposals protect not only the code but also the execution state and the collected data. The paper is divided into two parts. The first one is mostly devoted to different methods of securing the code and protecting its integrity; starting from watermarking and fingerprinting, up to methods designed specially for mobile agent systems: encrypted function, cryptographic traces, time limited black-box security, chained-MAC protocol, publicly-verifiable chained digital signatures The second part presents new concept for providing mobile agents with integrity protection, based on a zero-knowledge proof system.<|reference_end|>
arxiv
@article{zwierko2005security, title={Security of mobile agents: a new concept of the integrity protection}, author={Aneta Zwierko, Zbigniew Kotulski}, journal={arXiv preprint arXiv:cs/0506103}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506103}, primaryClass={cs.CR} }
zwierko2005security
arxiv-673065
cs/0506104
Computing minimal models, stable models and answer sets
<|reference_start|>Computing minimal models, stable models and answer sets: We propose and study algorithms to compute minimal models, stable models and answer sets of t-CNF theories, and normal and disjunctive t-programs. We are especially interested in algorithms with non-trivial worst-case performance bounds. The bulk of the paper is concerned with the classes of 2- and 3-CNF theories, and normal and disjunctive 2- and 3-programs, for which we obtain significantly stronger results than those implied by our general considerations. We show that one can find all minimal models of 2-CNF theories and all answer sets of disjunctive 2-programs in time O(m 1.4422..^n). Our main results concern computing stable models of normal 3-programs, minimal models of 3-CNF theories and answer sets of disjunctive 3-programs. We design algorithms that run in time O(m 1.6701..^n), in the case of the first problem, and in time O(mn^2 2.2782..^n), in the case of the latter two. All these bounds improve by exponential factors the best algorithms known previously. We also obtain closely related upper bounds on the number of minimal models, stable models and answer sets a t-CNF theory, a normal t-program or a disjunctive t-program may have. To appear in Theory and Practice of Logic Programming (TPLP).<|reference_end|>
arxiv
@article{lonc2005computing, title={Computing minimal models, stable models and answer sets}, author={Z. Lonc, M. Truszczynski}, journal={arXiv preprint arXiv:cs/0506104}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506104}, primaryClass={cs.LO cs.DS} }
lonc2005computing
arxiv-673066
cs/0506105
A protected password change protocol
<|reference_start|>A protected password change protocol: Some protected password change protocols were proposed. However, the previous protocols were easily vulnerable to several attacks such as denial of service, password guessing, stolen-verifier and impersonation atacks etc. Recently, Chang et al. proposed a simple authenticated key agreement and protected password change protocol for enhancing the security and efficiency. In this paper, authors shall show that password guessing, denial of service and known-key attacks can work in their password change protocol. At the same time, authors shall propose a new password change protocol to withstand all the threats of security.<|reference_end|>
arxiv
@article{wang2005a, title={A protected password change protocol}, author={Ren-Chiun Wang, Chou-Chen Yang, Kun-Ru Mo}, journal={arXiv preprint arXiv:cs/0506105}, year={2005}, archivePrefix={arXiv}, eprint={cs/0506105}, primaryClass={cs.CR} }
wang2005a
arxiv-673067
cs/0507001
Asymptotically Optimal Tree-based Group Key Management Schemes
<|reference_start|>Asymptotically Optimal Tree-based Group Key Management Schemes: In key management schemes that realize secure multicast communications encrypted by group keys on a public network, tree structures are often used to update the group keys efficiently. Selcuk and Sidhu have proposed an efficient scheme which updates dynamically the tree structures based on the withdrawal probabilities of members. In this paper, it is shown that Selcuk-Sidhu scheme is asymptotically optimal for the cost of withdrawal. Furthermore, a new key management scheme, which takes account of key update costs of joining in addition to withdrawal, is proposed. It is proved that the proposed scheme is also asymptotically optimal, and it is shown by simulation that it can attain good performance for nonasymptotic cases.<|reference_end|>
arxiv
@article{sakai2005asymptotically, title={Asymptotically Optimal Tree-based Group Key Management Schemes}, author={Hideyuki Sakai and Hirosuke Yamamoto}, journal={arXiv preprint arXiv:cs/0507001}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507001}, primaryClass={cs.IT cs.CR math.IT} }
sakai2005asymptotically
arxiv-673068
cs/0507002
The Three Node Wireless Network: Achievable Rates and Cooperation Strategies
<|reference_start|>The Three Node Wireless Network: Achievable Rates and Cooperation Strategies: We consider a wireless network composed of three nodes and limited by the half-duplex and total power constraints. This formulation encompasses many of the special cases studied in the literature and allows for capturing the common features shared by them. Here, we focus on three special cases, namely 1) Relay Channel, 2) Multicast Channel, and 3) Conference Channel. These special cases are judicially chosen to reflect varying degrees of complexity while highlighting the common ground shared by the different variants of the three node wireless network. For the relay channel, we propose a new cooperation scheme that exploits the wireless feedback gain. This scheme combines the benefits of decode-and-forward and compress-and-forward strategies and avoids the idealistic feedback assumption adopted in earlier works. Our analysis of the achievable rate of this scheme reveals the diminishing feedback gain at both the low and high signal-to-noise ratio regimes. Inspired by the proposed feedback strategy, we identify a greedy cooperation framework applicable to both the multicast and conference channels. Our performance analysis reveals several nice properties of the proposed greedy approach and the central role of cooperative source-channel coding in exploiting the receiver side information in the wireless network setting. Our proofs for the cooperative multicast with side-information rely on novel nested and independent binning encoders along with a list decoder.<|reference_end|>
arxiv
@article{lai2005the, title={The Three Node Wireless Network: Achievable Rates and Cooperation Strategies}, author={Lifeng Lai, Ke Liu, and Hesham El Gamal}, journal={arXiv preprint arXiv:cs/0507002}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507002}, primaryClass={cs.IT math.IT} }
lai2005the
arxiv-673069
cs/0507003
The role of Quantum Interference in Quantum Computing
<|reference_start|>The role of Quantum Interference in Quantum Computing: Quantum interference is proposed as a tool to augment Quantum Computation.<|reference_end|>
arxiv
@article{shiekh2005the, title={The role of Quantum Interference in Quantum Computing}, author={A.Y. Shiekh}, journal={Int. Jour. of Theo. Phys., 45, 1653, 2006}, year={2005}, doi={10.1007/s10773-005-9025-8}, archivePrefix={arXiv}, eprint={cs/0507003}, primaryClass={cs.CC quant-ph} }
shiekh2005the
arxiv-673070
cs/0507004
An End-to-End Probabilistic Network Calculus with Moment Generating Functions
<|reference_start|>An End-to-End Probabilistic Network Calculus with Moment Generating Functions: Network calculus is a min-plus system theory for performance evaluation of queuing networks. Its elegance stems from intuitive convolution formulas for concatenation of deterministic servers. Recent research dispenses with the worst-case assumptions of network calculus to develop a probabilistic equivalent that benefits from statistical multiplexing. Significant achievements have been made, owing for example to the theory of effective bandwidths, however, the outstanding scalability set up by concatenation of deterministic servers has not been shown. This paper establishes a concise, probabilistic network calculus with moment generating functions. The presented work features closed-form, end-to-end, probabilistic performance bounds that achieve the objective of scaling linearly in the number of servers in series. The consistent application of moment generating functions put forth in this paper utilizes independence beyond the scope of current statistical multiplexing of flows. A relevant additional gain is demonstrated for tandem servers with independent cross-traffic.<|reference_end|>
arxiv
@article{fidler2005an, title={An End-to-End Probabilistic Network Calculus with Moment Generating Functions}, author={Markus Fidler}, journal={IWQoS 2006}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507004}, primaryClass={cs.IT cs.PF math.IT} }
fidler2005an
arxiv-673071
cs/0507005
A Genetic Algorithm Based Finger Selection Scheme for UWB MMSE Rake Receivers
<|reference_start|>A Genetic Algorithm Based Finger Selection Scheme for UWB MMSE Rake Receivers: Due to a large number of multipath components in a typical ultra wideband (UWB) system, selective Rake (SRake) receivers, which combine energy from a subset of multipath components, are commonly employed. In order to optimize system performance, an optimal selection of multipath components to be employed at fingers of an SRake receiver needs to be considered. In this paper, this finger selection problem is investigated for a minimum mean square error (MMSE) UWB SRake receiver. Since the optimal solution is NP hard, a genetic algorithm (GA) based iterative scheme is proposed, which can achieve near-optimal performance after a reasonable number of iterations. Simulation results are presented to compare the performance of the proposed finger selection algorithm with those of the conventional and optimal schemes.<|reference_end|>
arxiv
@article{gezici2005a, title={A Genetic Algorithm Based Finger Selection Scheme for UWB MMSE Rake Receivers}, author={Sinan Gezici, Mung Chiang, H. Vincent Poor and Hisashi Kobayashi}, journal={arXiv preprint arXiv:cs/0507005}, year={2005}, doi={10.1109/ICU.2005.1569977}, archivePrefix={arXiv}, eprint={cs/0507005}, primaryClass={cs.IT math.IT} }
gezici2005a
arxiv-673072
cs/0507006
A Two-Step Time of Arrival Estimation Algorithm for Impulse Radio Ultra Wideband Systems
<|reference_start|>A Two-Step Time of Arrival Estimation Algorithm for Impulse Radio Ultra Wideband Systems: High time resolution of ultra wideband (UWB) signals facilitates very precise positioning capabilities based on time-of-arrival (TOA) measurements. Although the theoretical lower bound for TOA estimation can be achieved by the maximum likelihood principle, it is impractical due to the need for extremely high-rate sampling and the presence of large number of multipath components. On the other hand, the conventional correlation-based algorithm, which serially searches possible signal delays, takes a very long time to estimate the TOA of a received UWB signal. Moreover, the first signal path does not always have the strongest correlation output. Therefore, first path detection algorithms need to be considered. In this paper, a data-aided two-step TOA estimation algorithm is proposed. In order to speed up the estimation process, the first step estimates the rough TOA of the received signal based on received signal energy. Then, in the second step, the arrival time of the first signal path is estimated by considering a hypothesis testing approach. The proposed scheme uses low-rate correlation outputs, and is able to perform accurate TOA estimation in reasonable time intervals. The simulation results are presented to analyze the performance of the estimator.<|reference_end|>
arxiv
@article{gezici2005a, title={A Two-Step Time of Arrival Estimation Algorithm for Impulse Radio Ultra Wideband Systems}, author={Sinan Gezici, Zafer Sahinoglu, Andreas F. Molisch, Hisashi Kobayashi, and H. Vincent Poor}, journal={arXiv preprint arXiv:cs/0507006}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507006}, primaryClass={cs.IT math.IT} }
gezici2005a
arxiv-673073
cs/0507007
Strong normalisation for applied lambda calculi
<|reference_start|>Strong normalisation for applied lambda calculi: We consider the untyped lambda calculus with constructors and recursively defined constants. We construct a domain-theoretic model such that any term not denoting bottom is strongly normalising provided all its `stratified approximations' are. From this we derive a general normalisation theorem for applied typed lambda-calculi: If all constants have a total value, then all typeable terms are strongly normalising. We apply this result to extensions of G\"odel's system T and system F extended by various forms of bar recursion for which strong normalisation was hitherto unknown.<|reference_end|>
arxiv
@article{berger2005strong, title={Strong normalisation for applied lambda calculi}, author={Ulrich Berger}, journal={Logical Methods in Computer Science, Volume 1, Issue 2 (October 5, 2005) lmcs:2267}, year={2005}, doi={10.2168/LMCS-1(2:3)2005}, archivePrefix={arXiv}, eprint={cs/0507007}, primaryClass={cs.GT} }
berger2005strong
arxiv-673074
cs/0507008
Complexity Science for Simpletons
<|reference_start|>Complexity Science for Simpletons: In this article, we shall describe some of the most interesting topics in the subject of Complexity Science for a general audience. Anyone with a solid foundation in high school mathematics (with some calculus) and an elementary understanding of computer programming will be able to follow this article. First, we shall explain the significance of the P versus NP problem and solve it. Next, we shall describe two other famous mathematics problems, the Collatz 3n+1 Conjecture and the Riemann Hypothesis, and show how both Chaitin's incompleteness theorem and Wolfram's notion of "computational irreducibility" are important for understanding why no one has, as of yet, solved these two problems.<|reference_end|>
arxiv
@article{feinstein2005complexity, title={Complexity Science for Simpletons}, author={Craig Alan Feinstein}, journal={Progress in Physics, 2006, v. 3, 35-42}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507008}, primaryClass={cs.CC cs.GL} }
feinstein2005complexity
arxiv-673075
cs/0507009
MAEC : A Movement-Assisted Energy Conserving Method in Event Driven Wireless Sensor Networks
<|reference_start|>MAEC : A Movement-Assisted Energy Conserving Method in Event Driven Wireless Sensor Networks: Energy is one of the most important resources in wireless sensor networks. Recently, the mobility of base station has been exploited to preserve the energy. But in event driven networks, the mobility issue is quite different from the continuous monitoring one because only a small portion of sensor node has data to send at one time. The number of sensor node that forward traffic should be minimized to prolong the network lifetime. In this paper, we propose a movement-assisted energy conserving method which tries to reduce the amount of forwarding sensor node by directing the base station to move close to the hotspots. This method achieves good performance especially when applied to a network with a set of cooperative mobile base station. Extensive simulation has been done to verify the effectiveness of the propose schema.<|reference_end|>
arxiv
@article{zhao2005maec, title={MAEC : A Movement-Assisted Energy Conserving Method in Event Driven Wireless Sensor Networks}, author={Ming Zhao, Zhigang Chen, Xiaoheng Deng, Lianming Zhang, Anfeng Liu and Guosheng Huang}, journal={arXiv preprint arXiv:cs/0507009}, year={2005}, doi={10.1109/TENCON.2005.301075}, archivePrefix={arXiv}, eprint={cs/0507009}, primaryClass={cs.NI} }
zhao2005maec
arxiv-673076
cs/0507010
A Study for the Feature Core of Dynamic Reduct
<|reference_start|>A Study for the Feature Core of Dynamic Reduct: To the reduct problems of decision system, the paper proposes the notion of dynamic core according to the dynamic reduct model. It describes various formal definitions of dynamic core, and discusses some properties about dynamic core. All of these show that dynamic core possesses the essential characters of the feature core.<|reference_end|>
arxiv
@article{wang2005a, title={A Study for the Feature Core of Dynamic Reduct}, author={Jiayang Wang}, journal={arXiv preprint arXiv:cs/0507010}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507010}, primaryClass={cs.AI} }
wang2005a
arxiv-673077
cs/0507011
A Utility-Based Approach to Power Control and Receiver Design in Wireless Data Networks
<|reference_start|>A Utility-Based Approach to Power Control and Receiver Design in Wireless Data Networks: In this work, the cross-layer design problem of joint multiuser detection and power control is studied using a game-theoretic approach. The uplink of a direct-sequence code division multiple access (DS-CDMA) data network is considered and a non-cooperative game is proposed in which users in the network are allowed to choose their uplink receivers as well as their transmit powers to maximize their own utilities. The utility function measures the number of reliable bits transmitted by the user per joule of energy consumed. Focusing on linear receivers, the Nash equilibrium for the proposed game is derived. It is shown that the equilibrium is one where the powers are SIR-balanced with the minimum mean square error (MMSE) detector as the receiver. In addition, this framework is used to study power control games for the matched filter, the decorrelator, and the MMSE detector; and the receivers' performance is compared in terms of the utilities achieved at equilibrium (in bits/Joule). The optimal cooperative solution is also discussed and compared with the non-cooperative approach. Extensions of the results to the case of multiple receive antennas are also presented. In addition, an admission control scheme based on maximizing the total utility in the network is proposed.<|reference_end|>
arxiv
@article{meshkati2005a, title={A Utility-Based Approach to Power Control and Receiver Design in Wireless Data Networks}, author={Farhad Meshkati, H. Vincent Poor, Stuart C. Schwartz and Narayan B. Mandayam}, journal={arXiv preprint arXiv:cs/0507011}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507011}, primaryClass={cs.IT math.IT} }
meshkati2005a
arxiv-673078
cs/0507012
Lattice Gas Cellular Automata for Computational Fluid Animation
<|reference_start|>Lattice Gas Cellular Automata for Computational Fluid Animation: The past two decades showed a rapid growing of physically-based modeling of fluids for computer graphics applications. In this area, a common top down approach is to model the fluid dynamics by Navier-Stokes equations and apply a numerical techniques such as Finite Differences or Finite Elements for the simulation. In this paper we focus on fluid modeling through Lattice Gas Cellular Automata (LGCA) for computer graphics applications. LGCA are discrete models based on point particles that move on a lattice, according to suitable and simple rules in order to mimic a fully molecular dynamics. By Chapman-Enskog expansion, a known multiscale technique in this area, it can be demonstrated that the Navier-Stokes model can be reproduced by the LGCA technique. Thus, with LGCA we get a fluid model that does not require solution of complicated equations. Therefore, we combine the advantage of the low computational cost of LGCA and its ability to mimic the realistic fluid dynamics to develop a new animating framework for computer graphics applications. In this work, we discuss the theoretical elements of our proposal and show experimental results.<|reference_end|>
arxiv
@article{giraldi2005lattice, title={Lattice Gas Cellular Automata for Computational Fluid Animation}, author={Gilson A. Giraldi, Adilson V. Xavier, Antonio L. Apolinario Jr, Paulo S. Rodrigues}, journal={arXiv preprint arXiv:cs/0507012}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507012}, primaryClass={cs.GR} }
giraldi2005lattice
arxiv-673079
cs/0507013
An O(n log n)-Time Algorithm for the Restricted Scaffold Assignment
<|reference_start|>An O(n log n)-Time Algorithm for the Restricted Scaffold Assignment: The assignment problem takes as input two finite point sets S and T and establishes a correspondence between points in S and points in T, such that each point in S maps to exactly one point in T, and each point in T maps to at least one point in S. In this paper we show that this problem has an O(n log n)-time solution, provided that the points in S and T are restricted to lie on a line (linear time, if S and T are presorted).<|reference_end|>
arxiv
@article{colannino2005an, title={An O(n log n)-Time Algorithm for the Restricted Scaffold Assignment}, author={Justin Colannino, Mirela Damian, Ferran Hurtado, John Iacono, Henk Meijer, Suneeta Ramaswami and Godfried Toussaint}, journal={arXiv preprint arXiv:cs/0507013}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507013}, primaryClass={cs.CG cs.DM} }
colannino2005an
arxiv-673080
cs/0507014
Isomorphism of graphs-a polynomial test
<|reference_start|>Isomorphism of graphs-a polynomial test: An explicit algorithm is presented for testing whether two non-directed graphs are isomorphic or not. It is shown that for a graph of n vertices, the number of n independent operations needed for the test is polynomial in n. A proof that the algorithm actually performs the test is presented.<|reference_end|>
arxiv
@article{schwartz2005isomorphism, title={Isomorphism of graphs-a polynomial test}, author={Moshe Schwartz}, journal={arXiv preprint arXiv:cs/0507014}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507014}, primaryClass={cs.DS} }
schwartz2005isomorphism
arxiv-673081
cs/0507015
Duality between Packings and Coverings of the Hamming Space
<|reference_start|>Duality between Packings and Coverings of the Hamming Space: We investigate the packing and covering densities of linear and nonlinear binary codes, and establish a number of duality relationships between the packing and covering problems. Specifically, we prove that if almost all codes (in the class of linear or nonlinear codes) are good packings, then only a vanishing fraction of codes are good coverings, and vice versa: if almost all codes are good coverings, then at most a vanishing fraction of codes are good packings. We also show that any specific maximal binary code is either a good packing or a good covering, in a certain well-defined sense.<|reference_end|>
arxiv
@article{cohen2005duality, title={Duality between Packings and Coverings of the Hamming Space}, author={G'erard Cohen and Alexander Vardy}, journal={arXiv preprint arXiv:cs/0507015}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507015}, primaryClass={cs.IT cs.DM math.IT} }
cohen2005duality
arxiv-673082
cs/0507016
Minimizing makespan in flowshop with time lags
<|reference_start|>Minimizing makespan in flowshop with time lags: We consider the problem of minimizing the makespan in a flowshop involving maximal and minimal time lags. Time lag constraints generalize the classical precedence constraints between operations. We assume that such constraints are only defined between operations of the same job. We propose a solution method and present several extensions.<|reference_end|>
arxiv
@article{fondrevelle2005minimizing, title={Minimizing makespan in flowshop with time lags}, author={Julien Fondrevelle (INRIA Lorraine - LORIA), Ammar Oulamara (INRIA Lorraine - LORIA), Marie-Claude Portmann (INRIA Lorraine - LORIA)}, journal={Dans MAPSP'2005 [OAI: oai:hal.inria.fr:inria-00000149_v1]}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507016}, primaryClass={cs.DM} }
fondrevelle2005minimizing
arxiv-673083
cs/0507017
Minimum Cost and List Homomorphisms to Semicomplete Digraphs
<|reference_start|>Minimum Cost and List Homomorphisms to Semicomplete Digraphs: The following optimization problem was introduced in \cite{gutinDAM}, where it was motivated by a real-world problem in defence logistics. Suppose we are given a pair of digraphs $D,H$ and a positive cost $c_i(u)$ for each $u\in V(D)$ and $i\in V(H)$. The cost of a homomorphism $f$ of $D$ to $H$ is $\sum_{u\in V(D)}c_{f(u)}(u)$. For a fixed digraph $H$, the minimum cost homomorphism problem for $H$, MinHOMP($H$), is stated as follows: For an input digraph $D$ and costs $c_i(u)$ for each $u\in V(D)$ and $i\in V(H)$, verify whether there is a homomorphism of $D$ to $H$ and, if it exists, find such a homomorphism of minimum cost. We obtain dichotomy classifications of the computational complexity of the list homomorphism problem and MinHOMP($H$), when $H$ is a semicomplete digraph (a digraph in which every two vertices have at least one arc between them). Our dichotomy for the list homomorphism problem coincides with the one obtained by Bang-Jensen, Hell and MacGillivray in 1988 for the homomorphism problem when $H$ is a semicomplete digraph: both problems are polynomial solvable if $H$ has at most one cycle; otherwise, both problems are NP-complete. The dichotomy for \MiP is different: the problem is polynomial time solvable if $H$ is acyclic or $H$ is a cycle of length 2 or 3; otherwise, the problem is NP-hard.<|reference_end|>
arxiv
@article{gutin2005minimum, title={Minimum Cost and List Homomorphisms to Semicomplete Digraphs}, author={G. Gutin, A. Rafiey, A. Yeo}, journal={arXiv preprint arXiv:cs/0507017}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507017}, primaryClass={cs.DM} }
gutin2005minimum
arxiv-673084
cs/0507018
Optimal and Suboptimal Detection of Gaussian Signals in Noise: Asymptotic Relative Efficiency
<|reference_start|>Optimal and Suboptimal Detection of Gaussian Signals in Noise: Asymptotic Relative Efficiency: The performance of Bayesian detection of Gaussian signals using noisy observations is investigated via the error exponent for the average error probability. Under unknown signal correlation structure or limited processing capability it is reasonable to use the simple quadratic detector that is optimal in the case of an independent and identically distributed (i.i.d.) signal. Using the large deviations principle, the performance of this detector (which is suboptimal for non-i.i.d. signals) is compared with that of the optimal detector for correlated signals via the asymptotic relative efficiency defined as the ratio between sample sizes of two detectors required for the same performance in the large-sample-size regime. The effects of SNR on the ARE are investigated. It is shown that the asymptotic efficiency of the simple quadratic detector relative to the optimal detector converges to one as the SNR increases without bound for any bounded spectrum, and that the simple quadratic detector performs as well as the optimal detector for a wide range of the correlation values at high SNR.<|reference_end|>
arxiv
@article{sung2005optimal, title={Optimal and Suboptimal Detection of Gaussian Signals in Noise: Asymptotic Relative Efficiency}, author={Youngchul Sung, Lang Tong and H. Vincent Poor}, journal={arXiv preprint arXiv:cs/0507018}, year={2005}, doi={10.1117/12.620177}, archivePrefix={arXiv}, eprint={cs/0507018}, primaryClass={cs.IT math.IT} }
sung2005optimal
arxiv-673085
cs/0507019
Making Space for Stories: Ambiguity in the Design of Personal Communication Systems
<|reference_start|>Making Space for Stories: Ambiguity in the Design of Personal Communication Systems: Pervasive personal communication technologies offer the potential for important social benefits for individual users, but also the potential for significant social difficulties and costs. In research on face-to-face social interaction, ambiguity is often identified as an important resource for resolving social difficulties. In this paper, we discuss two design cases of personal communication systems, one based on fieldwork of a commercial system and another based on an unrealized design concept. The cases illustrate how user behavior concerning a particular social difficulty, unexplained unresponsiveness, can be influenced by technological issues that result in interactional ambiguity. The cases also highlight the need to balance the utility of ambiguity against the utility of usability and communicative clarity.<|reference_end|>
arxiv
@article{aoki2005making, title={Making Space for Stories: Ambiguity in the Design of Personal Communication Systems}, author={Paul M. Aoki and Allison Woodruff}, journal={Proc. ACM SIGCHI Conf. on Human Factors in Computing Systems, Portland, OR, Apr. 2005, 181-190. ACM Press.}, year={2005}, doi={10.1145/1054972.1054998}, archivePrefix={arXiv}, eprint={cs/0507019}, primaryClass={cs.HC} }
aoki2005making
arxiv-673086
cs/0507020
First-order queries on structures of bounded degree are computable with constant delay
<|reference_start|>First-order queries on structures of bounded degree are computable with constant delay: A bounded degree structure is either a relational structure all of whose relations are of bounded degree or a functional structure involving bijective functions only. In this paper, we revisit the complexity of the evaluation problem of not necessarily Boolean first-order queries over structures of bounded degree. Query evaluation is considered here as a dynamical process. We prove that any query on bounded degree structures is $\constantdelaylin$, i.e., can be computed by an algorithm that has two separate parts: it has a precomputation step of linear time in the size of the structure and then, it outputs all tuples one by one with a constant (i.e. depending on the size of the formula only) delay between each. Seen as a global process, this implies that queries on bounded structures can be evaluated in total time $O(f(|\phi|).(|\calS|+|\phi(\calS)|))$ and space $O(f(|\phi|).|\calS|)$ where $\calS$ is the structure, $\phi$ is the formula, $\phi(\calS)$ is the result of the query and $f$ is some function. Among other things, our results generalize a result of \cite{Seese-96} on the data complexity of the model-checking problem for bounded degree structures. Besides, the originality of our approach compared to that \cite{Seese-96} and comparable results is that it does not rely on the Hanf's model-theoretic technic (see \cite{Hanf-65}) and is completely effective.<|reference_end|>
arxiv
@article{durand2005first-order, title={First-order queries on structures of bounded degree are computable with constant delay}, author={Arnaud Durand, Etienne Grandjean}, journal={arXiv preprint arXiv:cs/0507020}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507020}, primaryClass={cs.LO cs.CC} }
durand2005first-order
arxiv-673087
cs/0507021
Finding routes in anonymous sensor networks
<|reference_start|>Finding routes in anonymous sensor networks: We consider networks of anonymous sensors and address the problem of constructing routes for the delivery of information from a group of sensors in response to a query by a sink. In order to circumvent the restrictions imposed by anonymity, we rely on using the power level perceived by the sensors in the query from the sink. We introduce a simple distributed algorithm to achieve the building of routes to the sink and evaluate its performance by means of simulations.<|reference_end|>
arxiv
@article{dutra2005finding, title={Finding routes in anonymous sensor networks}, author={Renato C. Dutra, Valmir C. Barbosa}, journal={Information Processing Letters 98 (2006), 139-144}, year={2005}, doi={10.1016/j.ipl.2006.01.001}, archivePrefix={arXiv}, eprint={cs/0507021}, primaryClass={cs.NI} }
dutra2005finding
arxiv-673088
cs/0507022
On Hilberg's Law and Its Links with Guiraud's Law
<|reference_start|>On Hilberg's Law and Its Links with Guiraud's Law: Hilberg (1990) supposed that finite-order excess entropy of a random human text is proportional to the square root of the text length. Assuming that Hilberg's hypothesis is true, we derive Guiraud's law, which states that the number of word types in a text is greater than proportional to the square root of the text length. Our derivation is based on some mathematical conjecture in coding theory and on several experiments suggesting that words can be defined approximately as the nonterminals of the shortest context-free grammar for the text. Such operational definition of words can be applied even to texts deprived of spaces, which do not allow for Mandelbrot's ``intermittent silence'' explanation of Zipf's and Guiraud's laws. In contrast to Mandelbrot's, our model assumes some probabilistic long-memory effects in human narration and might be capable of explaining Menzerath's law.<|reference_end|>
arxiv
@article{dȩbowski2005on, title={On Hilberg's Law and Its Links with Guiraud's Law}, author={{L}ukasz Dc{e}bowski}, journal={Journal of Quantitative Linguistics 13(1):81-109, 2006}, year={2005}, doi={10.1080/09296170500500637}, archivePrefix={arXiv}, eprint={cs/0507022}, primaryClass={cs.CL cs.IT math.IT} }
dȩbowski2005on
arxiv-673089
cs/0507023
Two-dimensional cellular automata and the analysis of correlated time series
<|reference_start|>Two-dimensional cellular automata and the analysis of correlated time series: Correlated time series are time series that, by virtue of the underlying process to which they refer, are expected to influence each other strongly. We introduce a novel approach to handle such time series, one that models their interaction as a two-dimensional cellular automaton and therefore allows them to be treated as a single entity. We apply our approach to the problems of filling gaps and predicting values in rainfall time series. Computational results show that the new approach compares favorably to Kalman smoothing and filtering.<|reference_end|>
arxiv
@article{rigo2005two-dimensional, title={Two-dimensional cellular automata and the analysis of correlated time series}, author={Luis O. Rigo Jr., Valmir C. Barbosa}, journal={Pattern Recognition Letters 27 (2006), 1353-1360}, year={2005}, doi={10.1016/j.patrec.2006.01.005}, archivePrefix={arXiv}, eprint={cs/0507023}, primaryClass={cs.AI} }
rigo2005two-dimensional
arxiv-673090
cs/0507024
Experiments in Clustering Homogeneous XML Documents to Validate an Existing Typology
<|reference_start|>Experiments in Clustering Homogeneous XML Documents to Validate an Existing Typology: This paper presents some experiments in clustering homogeneous XMLdocuments to validate an existing classification or more generally anorganisational structure. Our approach integrates techniques for extracting knowledge from documents with unsupervised classification (clustering) of documents. We focus on the feature selection used for representing documents and its impact on the emerging classification. We mix the selection of structured features with fine textual selection based on syntactic characteristics.We illustrate and evaluate this approach with a collection of Inria activity reports for the year 2003. The objective is to cluster projects into larger groups (Themes), based on the keywords or different chapters of these activity reports. We then compare the results of clustering using different feature selections, with the official theme structure used by Inria.<|reference_end|>
arxiv
@article{despeyroux2005experiments, title={Experiments in Clustering Homogeneous XML Documents to Validate an Existing Typology}, author={Thierry Despeyroux (INRIA Rocquencourt / INRIA Sophia Antipolis), Yves Lechevallier (INRIA Rocquencourt / INRIA Sophia Antipolis), Brigitte Trousse (INRIA Rocquencourt / INRIA Sophia Antipolis), Anne-Marie Vercoustre (INRIA Rocquencourt / INRIA Sophia Antipolis)}, journal={arXiv preprint arXiv:cs/0507024}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507024}, primaryClass={cs.IR} }
despeyroux2005experiments
arxiv-673091
cs/0507025
Comparison of Resampling Schemes for Particle Filtering
<|reference_start|>Comparison of Resampling Schemes for Particle Filtering: This contribution is devoted to the comparison of various resampling approaches that have been proposed in the literature on particle filtering. It is first shown using simple arguments that the so-called residual and stratified methods do yield an improvement over the basic multinomial resampling approach. A simple counter-example showing that this property does not hold true for systematic resampling is given. Finally, some results on the large-sample behavior of the simple bootstrap filter algorithm are given. In particular, a central limit theorem is established for the case where resampling is performed using the residual approach.<|reference_end|>
arxiv
@article{douc2005comparison, title={Comparison of Resampling Schemes for Particle Filtering}, author={Randal Douc (CMAP), Olivier Capp'e (LTCI), Eric Moulines (LTCI)}, journal={Image and Signal Processing and Analysis, 2005. ISPA 2005. Proceedings of the 4th International Symposium on (2005) 64-69}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507025}, primaryClass={cs.CE} }
douc2005comparison
arxiv-673092
cs/0507026
Hard Problems of Algebraic Geometry Codes
<|reference_start|>Hard Problems of Algebraic Geometry Codes: The minimum distance is one of the most important combinatorial characterizations of a code. The maximum likelihood decoding problem is one of the most important algorithmic problems of a code. While these problems are known to be hard for general linear codes, the techniques used to prove their hardness often rely on the construction of artificial codes. In general, much less is known about the hardness of the specific classes of natural linear codes. In this paper, we show that both problems are NP-hard for algebraic geometry codes. We achieve this by reducing a well-known NP-complete problem to these problems using a randomized algorithm. The family of codes in the reductions are based on elliptic curves. They have positive rates, but the alphabet sizes are exponential in the block lengths.<|reference_end|>
arxiv
@article{cheng2005hard, title={Hard Problems of Algebraic Geometry Codes}, author={Qi Cheng}, journal={arXiv preprint arXiv:cs/0507026}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507026}, primaryClass={cs.IT math.IT} }
cheng2005hard
arxiv-673093
cs/0507027
Anyone but Him: The Complexity of Precluding an Alternative
<|reference_start|>Anyone but Him: The Complexity of Precluding an Alternative: Preference aggregation in a multiagent setting is a central issue in both human and computer contexts. In this paper, we study in terms of complexity the vulnerability of preference aggregation to destructive control. That is, we study the ability of an election's chair to, through such mechanisms as voter/candidate addition/suppression/partition, ensure that a particular candidate (equivalently, alternative) does not win. And we study the extent to which election systems can make it impossible, or computationally costly (NP-complete), for the chair to execute such control. Among the systems we study--plurality, Condorcet, and approval voting--we find cases where systems immune or computationally resistant to a chair choosing the winner nonetheless are vulnerable to the chair blocking a victory. Beyond that, we see that among our studied systems no one system offers the best protection against destructive control. Rather, the choice of a preference aggregation system will depend closely on which types of control one wishes to be protected against. We also find concrete cases where the complexity of or susceptibility to control varies dramatically based on the choice among natural tie-handling rules.<|reference_end|>
arxiv
@article{hemaspaandra2005anyone, title={Anyone but Him: The Complexity of Precluding an Alternative}, author={Edith Hemaspaandra, Lane A. Hemaspaandra, Joerg Rothe}, journal={arXiv preprint arXiv:cs/0507027}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507027}, primaryClass={cs.GT cs.CC cs.MA} }
hemaspaandra2005anyone
arxiv-673094
cs/0507028
Adapting CBPP platforms for instructional use
<|reference_start|>Adapting CBPP platforms for instructional use: Commons based peer-production (CBPP) is the de-centralized, net-based approach to the creation and dissemination of information resources. Underlying every CBPP system is a virtual community brought together by an internet tool (such as a web site) and structured by a specific collaboration protocol. In this talk we will argue that the value of such platforms can be leveraged by adapting them for pedagogical purposes. We report on one such recent adaptation. The Noosphere system is a web-based collaboration environment that underlies the popular Planetmath website, a collaboratively written encyclopedia of mathematics licensed under the GNU Free Documentation License (FDL). Recently, the system was used to host a graduate-level mathematics course at Dalhousie University, in Halifax, Canada. The course consisted of regular lectures and assignment problems. The students in the course collaborated on a set of course notes, encapsulating the lecture content and giving solutions of assigned problems. The successful outcome of this experiment demonstrated that a dedicated Noosphere system is well suited for classroom applications. We argue that this ``proof of concept'' experience also strongly suggests that every successful CBPP platform possesses latent pedagogical value.<|reference_end|>
arxiv
@article{milson2005adapting, title={Adapting CBPP platforms for instructional use}, author={Robert Milson, Aaron Krowne}, journal={arXiv preprint arXiv:cs/0507028}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507028}, primaryClass={cs.DL cs.HC} }
milson2005adapting
arxiv-673095
cs/0507029
ATNoSFERES revisited
<|reference_start|>ATNoSFERES revisited: ATNoSFERES is a Pittsburgh style Learning Classifier System (LCS) in which the rules are represented as edges of an Augmented Transition Network. Genotypes are strings of tokens of a stack-based language, whose execution builds the labeled graph. The original ATNoSFERES, using a bitstring to represent the language tokens, has been favorably compared in previous work to several Michigan style LCSs architectures in the context of Non Markov problems. Several modifications of ATNoSFERES are proposed here: the most important one conceptually being a representational change: each token is now represented by an integer, hence the genotype is a string of integers; several other modifications of the underlying grammar language are also proposed. The resulting ATNoSFERES-II is validated on several standard animat Non Markov problems, on which it outperforms all previously published results in the LCS literature. The reasons for these improvement are carefully analyzed, and some assumptions are proposed on the underlying mechanisms in order to explain these good results.<|reference_end|>
arxiv
@article{landau2005atnosferes, title={ATNoSFERES revisited}, author={Samuel Landau (INRIA Futurs), Olivier Sigaud (LIP6), Marc Schoenauer (INRIA Futurs)}, journal={Dans Proceedings of the Genetic and Evolutionary Computation Conference, GECCO-2005 [OAI: oai:hal.inria.fr:inria-00000158_v1] - http://hal.inria.fr/inria-00000158}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507029}, primaryClass={cs.AI} }
landau2005atnosferes
arxiv-673096
cs/0507030
Strictly convex drawings of planar graphs
<|reference_start|>Strictly convex drawings of planar graphs: Every three-connected planar graph with n vertices has a drawing on an O(n^2) x O(n^2) grid in which all faces are strictly convex polygons. These drawings are obtained by perturbing (not strictly) convex drawings on O(n) x O(n) grids. More generally, a strictly convex drawing exists on a grid of size O(W) x O(n^4/W), for any choice of a parameter W in the range n<W<n^2. Tighter bounds are obtained when the faces have fewer sides. In the proof, we derive an explicit lower bound on the number of primitive vectors in a triangle.<|reference_end|>
arxiv
@article{barany2005strictly, title={Strictly convex drawings of planar graphs}, author={Imre Barany and Guenter Rote}, journal={DOCUMENTA MATHEMATICA, Vol. 11 (2006), 369-391}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507030}, primaryClass={cs.CG cs.DM} }
barany2005strictly
arxiv-673097
cs/0507031
The error-floor of LDPC codes in the Laplacian channel
<|reference_start|>The error-floor of LDPC codes in the Laplacian channel: We analyze the performance of Low-Density-Parity-Check codes in the error-floor domain where the Signal-to-Noise-Ratio, s, is large, s >> 1. We describe how the instanton method of theoretical physics, recently adapted to coding theory, solves the problem of characterizing the error-floor domain in the Laplacian channel. An example of the (155,64,20) LDPC code with four iterations (each iteration consisting of two semi-steps: from bits-to-checks and from checks-to-bits) of the min-sum decoding is discussed. A generalized computational tree analysis is devised to explain the rational structure of the leading instantons. The asymptotic for the symbol Bit-Error-Rate in the error-floor domain is comprised of individual instanton contributions, each estimated as ~ \exp(-l_{inst;L} s), where the effective distances, l_{inst;L}, of the the leading instantons are 7.6, 8.0 and 8.0 respectively. (The Hamming distance of the code is 20.) The analysis shows that the instantons are distinctly different from the ones found for the same coding/decoding scheme performing over the Gaussian channel. We validate instanton results against direct simulations and offer an explanation for remarkable performance of the instanton approximation not only in the extremal, s -> \infty, limit but also at the moderate s values of practical interest.<|reference_end|>
arxiv
@article{stepanov2005the, title={The error-floor of LDPC codes in the Laplacian channel}, author={M. G. Stepanov, M. Chertkov}, journal={arXiv preprint arXiv:cs/0507031}, year={2005}, number={LA-UR-05-5131}, archivePrefix={arXiv}, eprint={cs/0507031}, primaryClass={cs.IT cond-mat.dis-nn math.IT} }
stepanov2005the
arxiv-673098
cs/0507032
Introduction to Quantum Message Space
<|reference_start|>Introduction to Quantum Message Space: This paper develops the quantum analog of the message ensemble of classical information theory as developed by Shannon and Khinchin. The principal mathematical tool is harmonic analysis on the free group with two generators.<|reference_end|>
arxiv
@article{ogden2005introduction, title={Introduction to Quantum Message Space}, author={R. D. Ogden}, journal={arXiv preprint arXiv:cs/0507032}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507032}, primaryClass={cs.IT math.IT math.OA quant-ph} }
ogden2005introduction
arxiv-673099
cs/0507033
Multiresolution Kernels
<|reference_start|>Multiresolution Kernels: We present in this work a new methodology to design kernels on data which is structured with smaller components, such as text, images or sequences. This methodology is a template procedure which can be applied on most kernels on measures and takes advantage of a more detailed "bag of components" representation of the objects. To obtain such a detailed description, we consider possible decompositions of the original bag into a collection of nested bags, following a prior knowledge on the objects' structure. We then consider these smaller bags to compare two objects both in a detailed perspective, stressing local matches between the smaller bags, and in a global or coarse perspective, by considering the entire bag. This multiresolution approach is likely to be best suited for tasks where the coarse approach is not precise enough, and where a more subtle mixture of both local and global similarities is necessary to compare objects. The approach presented here would not be computationally tractable without a factorization trick that we introduce before presenting promising results on an image retrieval task.<|reference_end|>
arxiv
@article{cuturi2005multiresolution, title={Multiresolution Kernels}, author={Marco Cuturi, Kenji Fukumizu}, journal={arXiv preprint arXiv:cs/0507033}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507033}, primaryClass={cs.LG} }
cuturi2005multiresolution
arxiv-673100
cs/0507034
Papillon: Greedy Routing in Rings
<|reference_start|>Papillon: Greedy Routing in Rings: We study {\sc greedy} routing over $n$ nodes placed in a ring, with the \emph{distance} between two nodes defined to be the clockwise or the absolute distance between them along the ring. Such graphs arise in the context of modeling social networks and in routing networks for peer-to-peer systems. We construct the first network over $n$ nodes in which {\sc greedy} routing takes $O(\log n / \log d)$ hops in the worst-case, with $d$ out-going links per node. Our result has the first asymptotically optimal greedy routing complexity. Previous constructions required $O(\frac{\log^2 n}{d})$ hops.<|reference_end|>
arxiv
@article{abraham2005papillon:, title={Papillon: Greedy Routing in Rings}, author={Ittai Abraham and Dahlia Malkhi and Gurmeet Singh Manku}, journal={arXiv preprint arXiv:cs/0507034}, year={2005}, archivePrefix={arXiv}, eprint={cs/0507034}, primaryClass={cs.DC cs.NI} }
abraham2005papillon: