corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-674701
cs/0608121
Cross Entropy Approximation of Structured Covariance Matrices
<|reference_start|>Cross Entropy Approximation of Structured Covariance Matrices: We apply two variations of the principle of Minimum Cross Entropy (the Kullback information measure) to fit parameterized probability density models to observed data densities. For an array beamforming problem with P incident narrowband point sources, N > P sensors, and colored noise, both approaches yield eigenvector fitting methods similar to that of the MUSIC algorithm[1]. Furthermore, the corresponding cross-entropies are related to the MDL model order selection criterion[2].<|reference_end|>
arxiv
@article{liou2006cross, title={Cross Entropy Approximation of Structured Covariance Matrices}, author={Cheng-Yuan Liou and Bruce R. Musicus}, journal={IEEE Transactions on Signal Processing, vol. 56, issue 7, Part 2, pages 3362-3367, 2008}, year={2006}, archivePrefix={arXiv}, eprint={cs/0608121}, primaryClass={cs.IT math.IT} }
liou2006cross
arxiv-674702
cs/0608122
FOSS-Based Grid Computing
<|reference_start|>FOSS-Based Grid Computing: In this expository paper we will be primarily concerned with core aspects of Grids and Grid computing using free and open-source software with some emphasis on utility computing. It is based on a technical report entitled 'Grid-Computing Using GNU/Linux' by the present author. This article was written in 2006 and should be of historical interest.<|reference_end|>
arxiv
@article{mani2006foss-based, title={FOSS-Based Grid Computing}, author={A. Mani}, journal={arXiv preprint arXiv:cs/0608122}, year={2006}, number={EG -15 (RMCE)}, archivePrefix={arXiv}, eprint={cs/0608122}, primaryClass={cs.DC cs.PL} }
mani2006foss-based
arxiv-674703
cs/0608123
Proof of a Conjecture of Helleseth Regarding Pairs of Binary m-Sequences
<|reference_start|>Proof of a Conjecture of Helleseth Regarding Pairs of Binary m-Sequences: This paper has been withdrawn by the author(s), due a crucial sign error in Thm. 11.<|reference_end|>
arxiv
@article{zha2006proof, title={Proof of a Conjecture of Helleseth Regarding Pairs of Binary m-Sequences}, author={Zhengbang Zha and XueLi Wang}, journal={arXiv preprint arXiv:cs/0608123}, year={2006}, archivePrefix={arXiv}, eprint={cs/0608123}, primaryClass={cs.IT math.IT} }
zha2006proof
arxiv-674704
cs/0608124
The Tree Inclusion Problem: In Linear Space and Faster
<|reference_start|>The Tree Inclusion Problem: In Linear Space and Faster: Given two rooted, ordered, and labeled trees $P$ and $T$ the tree inclusion problem is to determine if $P$ can be obtained from $T$ by deleting nodes in $T$. This problem has recently been recognized as an important query primitive in XML databases. Kilpel\"ainen and Mannila [\emph{SIAM J. Comput. 1995}] presented the first polynomial time algorithm using quadratic time and space. Since then several improved results have been obtained for special cases when $P$ and $T$ have a small number of leaves or small depth. However, in the worst case these algorithms still use quadratic time and space. Let $n_S$, $l_S$, and $d_S$ denote the number of nodes, the number of leaves, and the %maximum depth of a tree $S \in \{P, T\}$. In this paper we show that the tree inclusion problem can be solved in space $O(n_T)$ and time: O(\min(l_Pn_T, l_Pl_T\log \log n_T + n_T, \frac{n_Pn_T}{\log n_T} + n_{T}\log n_{T})). This improves or matches the best known time complexities while using only linear space instead of quadratic. This is particularly important in practical applications, such as XML databases, where the space is likely to be a bottleneck.<|reference_end|>
arxiv
@article{bille2006the, title={The Tree Inclusion Problem: In Linear Space and Faster}, author={Philip Bille and Inge Li Goertz}, journal={arXiv preprint arXiv:cs/0608124}, year={2006}, archivePrefix={arXiv}, eprint={cs/0608124}, primaryClass={cs.DS} }
bille2006the
arxiv-674705
cs/0608125
Decidability of Type-checking in the Calculus of Algebraic Constructions with Size Annotations
<|reference_start|>Decidability of Type-checking in the Calculus of Algebraic Constructions with Size Annotations: Since Val Tannen's pioneer work on the combination of simply-typed lambda-calculus and first-order rewriting (LICS'88), many authors have contributed to this subject by extending it to richer typed lambda-calculi and rewriting paradigms, culminating in calculi like the Calculus of Algebraic Constructions. These works provide theoretical foundations for type-theoretic proof assistants where functions and predicates are defined by oriented higher-order equations. This kind of definitions subsumes induction-based definitions, is easier to write and provides more automation. On the other hand, checking that user-defined rewrite rules are strongly normalizing and confluent, and preserve the decidability of type-checking when combined with beta-reduction, is more difficult. Most termination criteria rely on the term structure. In a previous work, we extended to dependent types and higher-order rewriting, the notion of ``sized types'' studied by several authors in the simpler framework of ML-like languages, and proved that it preserves strong normalization. The main contribution of the present paper is twofold. First, we prove that, in the Calculus of Algebraic Constructions with size annotations, the problems of type inference and type-checking are decidable, provided that the sets of constraints generated by size annotations are satisfiable and admit most general solutions. Second, we prove the later properties for a size algebra rich enough for capturing usual induction-based definitions and much more.<|reference_end|>
arxiv
@article{blanqui2006decidability, title={Decidability of Type-checking in the Calculus of Algebraic Constructions with Size Annotations}, author={Fr'ed'eric Blanqui (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0608125}, year={2006}, doi={10.1007/11538363\_11}, archivePrefix={arXiv}, eprint={cs/0608125}, primaryClass={cs.LO cs.PL} }
blanqui2006decidability
arxiv-674706
cs/0609001
A Robust Solution Procedure for Hyperelastic Solids with Large Boundary Deformation
<|reference_start|>A Robust Solution Procedure for Hyperelastic Solids with Large Boundary Deformation: Compressible Mooney-Rivlin theory has been used to model hyperelastic solids, such as rubber and porous polymers, and more recently for the modeling of soft tissues for biomedical tissues, undergoing large elastic deformations. We propose a solution procedure for Lagrangian finite element discretization of a static nonlinear compressible Mooney-Rivlin hyperelastic solid. We consider the case in which the boundary condition is a large prescribed deformation, so that mesh tangling becomes an obstacle for straightforward algorithms. Our solution procedure involves a largely geometric procedure to untangle the mesh: solution of a sequence of linear systems to obtain initial guesses for interior nodal positions for which no element is inverted. After the mesh is untangled, we take Newton iterations to converge to a mechanical equilibrium. The Newton iterations are safeguarded by a line search similar to one used in optimization. Our computational results indicate that the algorithm is up to 70 times faster than a straightforward Newton continuation procedure and is also more robust (i.e., able to tolerate much larger deformations). For a few extremely large deformations, the deformed mesh could only be computed through the use of an expensive Newton continuation method while using a tight convergence tolerance and taking very small steps.<|reference_end|>
arxiv
@article{shontz2006a, title={A Robust Solution Procedure for Hyperelastic Solids with Large Boundary Deformation}, author={Suzanne M. Shontz, Stephen A. Vavasis}, journal={arXiv preprint arXiv:cs/0609001}, year={2006}, doi={10.1007/s00366-011-0225-y}, archivePrefix={arXiv}, eprint={cs/0609001}, primaryClass={cs.NA cs.CE} }
shontz2006a
arxiv-674707
cs/0609002
On the confluence of lambda-calculus with conditional rewriting
<|reference_start|>On the confluence of lambda-calculus with conditional rewriting: The confluence of untyped lambda-calculus with unconditional rewriting has already been studied in various directions. In this paper, we investigate the confluence of lambda-calculus with conditional rewriting and provide general results in two directions. First, when conditional rules are algebraic. This extends results of Muller and Dougherty for unconditional rewriting. Two cases are considered, whether beta-reduction is allowed or not in the evaluation of conditions. Moreover, Dougherty's result is improved from the assumption of strongly normalizing beta-reduction to weakly normalizing beta-reduction. We also provide examples showing that outside these conditions, modularity of confluence is difficult to achieve. Second, we go beyond the algebraic framework and get new confluence results using an extended notion of orthogonality that takes advantage of the conditional part of rewrite rules.<|reference_end|>
arxiv
@article{blanqui2006on, title={On the confluence of lambda-calculus with conditional rewriting}, author={Fr'ed'eric Blanqui (INRIA Lorraine - LORIA), Claude Kirchner (INRIA Lorraine - LORIA), Colin Riba (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0609002}, year={2006}, doi={10.1007/11690634\_26}, archivePrefix={arXiv}, eprint={cs/0609002}, primaryClass={cs.LO cs.PL} }
blanqui2006on
arxiv-674708
cs/0609003
In Quest of Image Semantics: Are We Looking for It Under the Right Lamppost?
<|reference_start|>In Quest of Image Semantics: Are We Looking for It Under the Right Lamppost?: In the last years we witness a dramatic growth of research focused on semantic image understanding. Indeed, without understanding image content successful accomplishment of any image-processing task is simply incredible. Up to the recent times, the ultimate need for such understanding has been met by the knowledge that a domain expert or a vision system supervisor have contributed to every image-processing application. The advent of the Internet has drastically changed this situation. Internet sources of visual information are diffused and dispersed over the whole Web, so the duty of information content discovery and evaluation must be relegated now to an image understanding agent (a machine or a computer program) capable to perform image content assessment at a remote image location. Development of Content Based Image Retrieval (CBIR) techniques was a right move in a right direction, launched about ten years ago. Unfortunately, very little progress has been made since then. The reason for this can be seen in a rank of long lasting misconceptions that CBIR designers are continuing to adhere to. I hope, my arguments will help them to change their minds.<|reference_end|>
arxiv
@article{diamant2006in, title={In Quest of Image Semantics: Are We Looking for It Under the Right Lamppost?}, author={Emanuel Diamant}, journal={arXiv preprint arXiv:cs/0609003}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609003}, primaryClass={cs.CV cs.IR} }
diamant2006in
arxiv-674709
cs/0609004
Equality of complexity classes P and NP: Linear programming formulation of the quadratic assignment problem
<|reference_start|>Equality of complexity classes P and NP: Linear programming formulation of the quadratic assignment problem: In this paper, we present a polynomial-sized linear programming formulation of the Quadratic Assignment Problem (QAP). The proposed linear program is a network flow-based model. Hence, it provides for the solution of the QAP in polynomial time. Computational testing and results are discussed.<|reference_end|>
arxiv
@article{diaby2006equality, title={Equality of complexity classes P and NP: Linear programming formulation of the quadratic assignment problem}, author={Moustapha Diaby}, journal={arXiv preprint arXiv:cs/0609004}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609004}, primaryClass={cs.CC cs.DM} }
diaby2006equality
arxiv-674710
cs/0609005
The traveling salesman problem: A Linear programming formulation
<|reference_start|>The traveling salesman problem: A Linear programming formulation: In this paper, we present a polynomial-sized linear programming formulation of the Traveling Salesman Problem (TSP). The proposed linear program is a network flow-based model. Numerical implementation issues and results are discussed. (The exposition and proofs are much more detailed in an edition which I wrote in collaboration with Dr. M.H. Karwan in 2012-2014 . That edition is available at http://users.business.uconn.edu/mdiaby/P=NPProofPapers/tspPaper.pdf)<|reference_end|>
arxiv
@article{diaby2006the, title={The traveling salesman problem: A Linear programming formulation}, author={Moustapha Diaby}, journal={arXiv preprint arXiv:cs/0609005}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609005}, primaryClass={cs.CC cs.DM} }
diaby2006the
arxiv-674711
cs/0609006
New Quasi-Cyclic Codes from Simplex Codes
<|reference_start|>New Quasi-Cyclic Codes from Simplex Codes: As a generalization of cyclic codes, quasi-cyclic (QC) codes contain many good linear codes. But quasi-cyclic codes studied so far are mainly limited to one generator (1-generator) QC codes. In this correspondence, 2-generator and 3-generator QC codes are studied, and many good, new QC codes are constructed from simplex codes. Some new binary QC codes or related codes, that improve the bounds on maximum minimum distance for binary linear codes are constructed. They are 5-generator QC [93, 17, 34] and [254, 23, 102] codes, and related [96, 17, 36], [256, 23, 104] codes.<|reference_end|>
arxiv
@article{chen2006new, title={New Quasi-Cyclic Codes from Simplex Codes}, author={Eric Zhi Chen}, journal={arXiv preprint arXiv:cs/0609006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609006}, primaryClass={cs.IT math.IT} }
chen2006new
arxiv-674712
cs/0609007
A Massive Local Rules Search Approach to the Classification Problem
<|reference_start|>A Massive Local Rules Search Approach to the Classification Problem: An approach to the classification problem of machine learning, based on building local classification rules, is developed. The local rules are considered as projections of the global classification rules to the event we want to classify. A massive global optimization algorithm is used for optimization of quality criterion. The algorithm, which has polynomial complexity in typical case, is used to find all high--quality local rules. The other distinctive feature of the algorithm is the integration of attributes levels selection (for ordered attributes) with rules searching and original conflicting rules resolution strategy. The algorithm is practical; it was tested on a number of data sets from UCI repository, and a comparison with the other predicting techniques is presented.<|reference_end|>
arxiv
@article{malyshkin2006a, title={A Massive Local Rules Search Approach to the Classification Problem}, author={Vladislav Malyshkin, Ray Bakhramov, Andrey Gorodetsky}, journal={arXiv preprint arXiv:cs/0609007}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609007}, primaryClass={cs.LG} }
malyshkin2006a
arxiv-674713
cs/0609008
On the freeze quantifier in Constraint LTL: decidability and complexity
<|reference_start|>On the freeze quantifier in Constraint LTL: decidability and complexity: Constraint LTL, a generalisation of LTL over Presburger constraints, is often used as a formal language to specify the behavior of operational models with constraints. The freeze quantifier can be part of the language, as in some real-time logics, but this variable-binding mechanism is quite general and ubiquitous in many logical languages (first-order temporal logics, hybrid logics, logics for sequence diagrams, navigation logics, logics with lambda-abstraction etc.). We show that Constraint LTL over the simple domain (N,=) augmented with the freeze quantifier is undecidable which is a surprising result in view of the poor language for constraints (only equality tests). Many versions of freeze-free Constraint LTL are decidable over domains with qualitative predicates and our undecidability result actually establishes Sigma_1^1-completeness. On the positive side, we provide complexity results when the domain is finite (EXPSPACE-completeness) or when the formulae are flat in a sense introduced in the paper. Our undecidability results are sharp (i.e. with restrictions on the number of variables) and all our complexity characterisations ensure completeness with respect to some complexity class (mainly PSPACE and EXPSPACE).<|reference_end|>
arxiv
@article{demri2006on, title={On the freeze quantifier in Constraint LTL: decidability and complexity}, author={St'ephane Demri, Ranko Lazic, David Nowak}, journal={Information and Computation, 205(1):2-24, January 2007}, year={2006}, doi={10.1016/j.ic.2006.08.003}, archivePrefix={arXiv}, eprint={cs/0609008}, primaryClass={cs.LO cs.CC} }
demri2006on
arxiv-674714
cs/0609009
Finding heaviest H-subgraphs in real weighted graphs, with applications
<|reference_start|>Finding heaviest H-subgraphs in real weighted graphs, with applications: For a graph G with real weights assigned to the vertices (edges), the MAX H-SUBGRAPH problem is to find an H-subgraph of G with maximum total weight, if one exists. The all-pairs MAX H-SUBGRAPH problem is to find for every pair of vertices u,v, a maximum H-subgraph containing both u and v, if one exists. Our main results are new strongly polynomial algorithms for the all-pairs MAX H-SUBGRAPH problem for vertex weighted graphs. We also give improved algorithms for the MAX-H SUBGRAPH problem for edge weighted graphs, and various related problems, including computing the first k most significant bits of the distance product of two matrices. Some of our algorithms are based, in part, on fast matrix multiplication.<|reference_end|>
arxiv
@article{vassilevska2006finding, title={Finding heaviest H-subgraphs in real weighted graphs, with applications}, author={Virginia Vassilevska, Ryan Williams and Raphael Yuster}, journal={arXiv preprint arXiv:cs/0609009}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609009}, primaryClass={cs.DS cs.DM} }
vassilevska2006finding
arxiv-674715
cs/0609010
An effective edge--directed frequency filter for removal of aliasing in upsampled images
<|reference_start|>An effective edge--directed frequency filter for removal of aliasing in upsampled images: Raster images can have a range of various distortions connected to their raster structure. Upsampling them might in effect substantially yield the raster structure of the original image, known as aliasing. The upsampling itself may introduce aliasing into the upsampled image as well. The presented method attempts to remove the aliasing using frequency filters based on the discrete fast Fourier transform, and applied directionally in certain regions placed along the edges in the image. As opposed to some anisotropic smoothing methods, the presented algorithm aims to selectively reduce only the aliasing, preserving the sharpness of image details. The method can be used as a post--processing filter along with various upsampling algorithms. It was experimentally shown that the method can improve the visual quality of the upsampled images.<|reference_end|>
arxiv
@article{rataj2006an, title={An effective edge--directed frequency filter for removal of aliasing in upsampled images}, author={Artur Rataj}, journal={arXiv preprint arXiv:cs/0609010}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609010}, primaryClass={cs.CV} }
rataj2006an
arxiv-674716
cs/0609011
Scheduling for Stable and Reliable Communication over Multiaccess Channels and Degraded Broadcast Channels
<|reference_start|>Scheduling for Stable and Reliable Communication over Multiaccess Channels and Degraded Broadcast Channels: Information-theoretic arguments focus on modeling the reliability of information transmission, assuming availability of infinite data at sources, thus ignoring randomness in message generation times at the respective sources. However, in information transport networks, not only is reliable transmission important, but also stability, i.e., finiteness of mean delay incurred by messages from the time of generation to the time of successful reception. Usually, delay analysis is done separately using queueing-theoretic arguments, whereas reliable information transmission is studied using information theory. In this thesis, we investigate these two important aspects of data communication jointly by suitably combining models from these two fields. In particular, we model scheduled communication of messages, that arrive in a random process, (i) over multiaccess channels, with either independent decoding or joint decoding, and (ii) over degraded broadcast channels. The scheduling policies proposed permit up to a certain maximum number of messages for simultaneous transmission. In the first part of the thesis, we develop a multi-class discrete-time processor-sharing queueing model, and then investigate the stability of this queue. In particular, we model the queue by a discrete-time Markov chain defined on a countable state space, and then establish (i) a sufficient condition for $c$-regularity of the chain, and hence positive recurrence and finiteness of stationary mean of the function $c$ of the state, and (ii) a sufficient condition for transience of the chain. These stability results form the basis for the conclusions drawn in the thesis.<|reference_end|>
arxiv
@article{sayee2006scheduling, title={Scheduling for Stable and Reliable Communication over Multiaccess Channels and Degraded Broadcast Channels}, author={K.C.V. Kalyanarama Sesha Sayee}, journal={arXiv preprint arXiv:cs/0609011}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609011}, primaryClass={cs.NI cs.IT math.IT} }
sayee2006scheduling
arxiv-674717
cs/0609012
Baire Categories on Small Complexity Classes and Meager-Comeager Laws
<|reference_start|>Baire Categories on Small Complexity Classes and Meager-Comeager Laws: We introduce two resource-bounded Baire category notions on small complexity classes such as P, SUBEXP, and PSPACE and on probabilistic classes such as BPP, which differ on how the corresponding finite extension strategies are computed. We give an alternative characterization of small sets via resource-bounded Banach-Mazur games. As an application of the first notion, we show that for almost every language A (i.e. all except a meager class) computable in subexponential time, P(A)=BPP(A). We also show that almost all languages in PSPACE do not have small nonuniform complexity. We then switch to the second Baire category notion (called locally-computable), and show that the class SPARSE is meager in P. We show that in contrast to the resource-bounded measure case, meager-comeager laws can be obtained for many standard complexity classes, relative to locally-computable Baire category on BPP and PSPACE. Another topic where locally-computable Baire categories differ from resource-bounded measure is regarding weak-completeness: we show that there is no weak-completeness notion in P based on locally-computable Baire categories, i.e. every P-weakly-complete set is complete for P. We also prove that the class of complete sets for P under Turing-logspace reductions is meager in P, if P is not equal to DSPACE(log n), and that the same holds unconditionally for quasi-poly time. Finally we observe that locally-computable Baire categories are incomparable with all existing resource-bounded measure notions on small complexity classes, which might explain why those two settings seem to differ so fundamentally.<|reference_end|>
arxiv
@article{moser2006baire, title={Baire Categories on Small Complexity Classes and Meager-Comeager Laws}, author={Philippe Moser}, journal={arXiv preprint arXiv:cs/0609012}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609012}, primaryClass={cs.CC} }
moser2006baire
arxiv-674718
cs/0609013
Combining typing and size constraints for checking the termination of higher-order conditional rewrite systems
<|reference_start|>Combining typing and size constraints for checking the termination of higher-order conditional rewrite systems: In a previous work, the first author extended to higher-order rewriting and dependent types the use of size annotations in types, a termination proof technique called type or size based termination and initially developed for ML-like programs. Here, we go one step further by considering conditional rewriting and explicit quantifications and constraints on size annotations. This allows to describe more precisely how the size of the output of a function depends on the size of its inputs. Hence, we can check the termination of more functions. We first give a general type-checking algorithm based on constraint solving. Then, we give a termination criterion with constraints in Presburger arithmetic. To our knowledge, this is the first termination criterion for higher-order conditional rewriting taking into account the conditions in termination.<|reference_end|>
arxiv
@article{blanqui2006combining, title={Combining typing and size constraints for checking the termination of higher-order conditional rewrite systems}, author={Fr'ed'eric Blanqui (INRIA Lorraine - LORIA), Colin Riba (INRIA Lorraine - LORIA)}, journal={Dans 13th International Conference on Logic for Programming, Artificial Intelligence and Reasoning - LPAR 2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609013}, primaryClass={cs.LO} }
blanqui2006combining
arxiv-674719
cs/0609014
A simple stability condition for RED using TCP mean-field modeling
<|reference_start|>A simple stability condition for RED using TCP mean-field modeling: Congestion on the Internet is an old problem but still a subject of intensive research. The TCP protocol with its AIMD (Additive Increase and Multiplicative Decrease) behavior hides very challenging problems; one of them is to understand the interaction between a large number of users with delayed feedback. This article will focus on two modeling issues of TCP which appeared to be important to tackle concrete scenarios when implementing the model proposed in [Baccelli McDonald Reynier 02] firstly the modeling of the maximum TCP window size: this maximum can be reached quickly in many practical cases; secondly the delay structure: the usual Little-like formula behaves really poorly when queuing delays are variable, and may change dramatically the evolution of the predicted queue size, which makes it useless to study drop-tail or RED (Random Early Detection) mechanisms. Within proposed TCP modeling improvements, we are enabled to look at a concrete example where RED should be used in FIFO routers instead of letting the default drop-tail happen. We study mathematically fixed points of the window size distribution and local stability of RED. An interesting case is when RED operates at the limit when the congestion starts, it avoids unwanted loss of bandwidth and delay variations.<|reference_end|>
arxiv
@article{reynier2006a, title={A simple stability condition for RED using TCP mean-field modeling}, author={Julien Reynier (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:cs/0609014}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609014}, primaryClass={cs.NI math.PR} }
reynier2006a
arxiv-674720
cs/0609015
Residual Finite Tree Automata
<|reference_start|>Residual Finite Tree Automata: Tree automata based algorithms are essential in many fields in computer science such as verification, specification, program analysis. They become also essential for databases with the development of standards such as XML. In this paper, we define new classes of non deterministic tree automata, namely residual finite tree automata (RFTA). In the bottom-up case, we obtain a new characterization of regular tree languages. In the top-down case, we obtain a subclass of regular tree languages which contains the class of languages recognized by deterministic top-down tree automata. RFTA also come with the property of existence of canonical non deterministic tree automata.<|reference_end|>
arxiv
@article{carme2006residual, title={Residual Finite Tree Automata}, author={J. Carme (INRIA Futurs), R. Gilleron (INRIA Futurs), A. Lemay (INRIA Futurs), A. Terlutte (INRIA Futurs), M. Tommasi (INRIA Futurs)}, journal={arXiv preprint arXiv:cs/0609015}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609015}, primaryClass={cs.CC cs.LO} }
carme2006residual
arxiv-674721
cs/0609016
ANAP: Anonymous Authentication Protocol in Mobile Ad hoc Networks
<|reference_start|>ANAP: Anonymous Authentication Protocol in Mobile Ad hoc Networks: The pervasiveness of wireless communication recently gave mobile ad hoc networks (MANET) a significant researchers' attention, due to its innate capabilities of instant communication in many time and mission critical applications. However, its natural advantages of networking in civilian and military environments make them vulnerable to security threats. Support for an anonymity in MANET is an orthogonal to security critical challenge we faced in this paper. We propose a new anonymous authentication protocol for mobile ad hoc networks enhanced with a distributed reputation system. The main its objective is to provide mechanisms concealing a real identity of communicating nodes with an ability of resist to known attacks. The distributed reputation system is incorporated for a trust management and malicious behavior detection in the network. The end-to-end anonymous authentication is conducted in three-pass handshake based on an asymmetric and symmetric key cryptography. After successfully finished authentication phase secure and multiple anonymous data channels are established. The anonymity is guarantied by randomly chosen pseudonyms owned by a user. Nodes of the network are publicly identified and are independent of users' pseudonyms. In this paper we presented an example of the protocol implementation.<|reference_end|>
arxiv
@article{ciszkowski2006anap:, title={ANAP: Anonymous Authentication Protocol in Mobile Ad hoc Networks}, author={Tomasz Ciszkowski, Zbigniew Kotulski}, journal={arXiv preprint arXiv:cs/0609016}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609016}, primaryClass={cs.CR cs.NI} }
ciszkowski2006anap:
arxiv-674722
cs/0609017
On some winning strategies for the Iterated Prisoner's Dilemma or Mr Nice Guy and the Cosa Nostra
<|reference_start|>On some winning strategies for the Iterated Prisoner's Dilemma or Mr Nice Guy and the Cosa Nostra: We submitted two kinds of strategies to the iterated prisoner's dilemma (IPD) competitions organized by Graham Kendall, Paul Darwen and Xin Yao in 2004 and 2005. Our strategies performed exceedingly well in both years. One type is an intelligent and optimistic enhanced version of the well known TitForTat strategy which we named OmegaTitForTat. It recognizes common behaviour patterns and detects and recovers from repairable mutual defect deadlock situations, otherwise behaving much like TitForTat. The second type consists of a set of strategies working together as a team. These group strategies have one distinguished individual Godfather strategy that plays OmegaTitForTat against non-members while heavily profiting from the behaviour of the other members of his group, the Hitman. The Hitman willingly let themselves being abused by their Godfather while themselves lowering the scores of all other players as much as possible, thus further maximizing the performance of their Godfather in relation to other participants. The study of collusion in the simplified framework of the iterated prisoner's dilemma allows us to draw parallels to many common aspects of reality both in Nature as well as Human Society, and therefore further extends the scope of the iterated prisoner's dilemma as a metaphor for the study of cooperative behaviour in a new and natural direction. We further provide evidence that it will be unavoidable that such group strategies will dominate all future iterated prisoner's dilemma competitions as they can be stealthy camouflaged as non-group strategies with arbitrary subtlety. Moreover, we show that the general problem of recognizing stealth colluding strategies is undecidable in the theoretical sense.<|reference_end|>
arxiv
@article{slany2006on, title={On some winning strategies for the Iterated Prisoner's Dilemma or Mr. Nice Guy and the Cosa Nostra}, author={Wolfgang Slany and Wolfgang Kienreich}, journal={arXiv preprint arXiv:cs/0609017}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609017}, primaryClass={cs.GT} }
slany2006on
arxiv-674723
cs/0609018
Bilayer Low-Density Parity-Check Codes for Decode-and-Forward in Relay Channels
<|reference_start|>Bilayer Low-Density Parity-Check Codes for Decode-and-Forward in Relay Channels: This paper describes an efficient implementation of binning for the relay channel using low-density parity-check (LDPC) codes. We devise bilayer LDPC codes to approach the theoretically promised rate of the decode-and-forward relaying strategy by incorporating relay-generated information bits in specially designed bilayer graphical code structures. While conventional LDPC codes are sensitively tuned to operate efficiently at a certain channel parameter, the proposed bilayer LDPC codes are capable of working at two different channel parameters and two different rates: that at the relay and at the destination. To analyze the performance of bilayer LDPC codes, bilayer density evolution is devised as an extension of the standard density evolution algorithm. Based on bilayer density evolution, a design methodology is developed for the bilayer codes in which the degree distribution is iteratively improved using linear programming. Further, in order to approach the theoretical decode-and-forward rate for a wide range of channel parameters, this paper proposes two different forms bilayer codes, the bilayer-expurgated and bilayer-lengthened codes. It is demonstrated that a properly designed bilayer LDPC code can achieve an asymptotic infinite-length threshold within 0.24 dB gap to the Shannon limits of two different channels simultaneously for a wide range of channel parameters. By practical code construction, finite-length bilayer codes are shown to be able to approach within a 0.6 dB gap to the theoretical decode-and-forward rate of the relay channel at a block length of $10^5$ and a bit-error probability (BER) of $10^{-4}$. Finally, it is demonstrated that a generalized version of the proposed bilayer code construction is applicable to relay networks with multiple relays.<|reference_end|>
arxiv
@article{razaghi2006bilayer, title={Bilayer Low-Density Parity-Check Codes for Decode-and-Forward in Relay Channels}, author={Peyman Razaghi, Wei Yu}, journal={arXiv preprint arXiv:cs/0609018}, year={2006}, doi={10.1109/TIT.2007.904983}, archivePrefix={arXiv}, eprint={cs/0609018}, primaryClass={cs.IT math.IT} }
razaghi2006bilayer
arxiv-674724
cs/0609019
Improving Term Extraction with Terminological Resources
<|reference_start|>Improving Term Extraction with Terminological Resources: Studies of different term extractors on a corpus of the biomedical domain revealed decreasing performances when applied to highly technical texts. The difficulty or impossibility of customising them to new domains is an additional limitation. In this paper, we propose to use external terminologies to influence generic linguistic data in order to augment the quality of the extraction. The tool we implemented exploits testified terms at different steps of the process: chunking, parsing and extraction of term candidates. Experiments reported here show that, using this method, more term candidates can be acquired with a higher level of reliability. We further describe the extraction process involving endogenous disambiguation implemented in the term extractor YaTeA.<|reference_end|>
arxiv
@article{aubin2006improving, title={Improving Term Extraction with Terminological Resources}, author={Sophie Aubin (LIPN), Thierry Hamon (LIPN)}, journal={Advances in Natural Language Processing 5th International Conference on NLP, FinTAL 2006 (2006) 380}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609019}, primaryClass={cs.CL} }
aubin2006improving
arxiv-674725
cs/0609020
Fast algorithms for computing isogenies between elliptic curves
<|reference_start|>Fast algorithms for computing isogenies between elliptic curves: We survey algorithms for computing isogenies between elliptic curves defined over a field of characteristic either 0 or a large prime. We introduce a new algorithm that computes an isogeny of degree $\ell$ ($\ell$ different from the characteristic) in time quasi-linear with respect to $\ell$. This is based in particular on fast algorithms for power series expansion of the Weierstrass $\wp$-function and related functions.<|reference_end|>
arxiv
@article{bostan2006fast, title={Fast algorithms for computing isogenies between elliptic curves}, author={Alin Bostan (INRIA Rocquencourt), Bruno Salvy (INRIA Rocquencourt), Francois Morain (LIX, INRIA Futurs), Eric Schost (LIX)}, journal={arXiv preprint arXiv:cs/0609020}, year={2006}, doi={10.1090/S0025-5718-08-02066-8}, archivePrefix={arXiv}, eprint={cs/0609020}, primaryClass={cs.CC cs.SC math.NT} }
bostan2006fast
arxiv-674726
cs/0609021
Non uniform (hyper/multi)coherence spaces
<|reference_start|>Non uniform (hyper/multi)coherence spaces: In (hyper)coherence semantics, proofs/terms are cliques in (hyper)graphs. Intuitively, vertices represent results of computations and the edge relation witnesses the ability of being assembled into a same piece of data or a same (strongly) stable function, at arrow types. In (hyper)coherence semantics, the argument of a (strongly) stable functional is always a (strongly) stable function. As a consequence, comparatively to the relational semantics, where there is no edge relation, some vertices are missing. Recovering these vertices is essential for the purpose of reconstructing proofs/terms from their interpretations. It shall also be useful for the comparison with other semantics, like game semantics. In [BE01], Bucciarelli and Ehrhard introduced a so called non uniform coherence space semantics where no vertex is missing. By constructing the co-free exponential we set a new version of this last semantics, together with non uniform versions of hypercoherences and multicoherences, a new semantics where an edge is a finite multiset. Thanks to the co-free construction, these non uniform semantics are deterministic in the sense that the intersection of a clique and of an anti-clique contains at most one vertex, a result of interaction, and extensionally collapse onto the corresponding uniform semantics.<|reference_end|>
arxiv
@article{boudes2006non, title={Non uniform (hyper/multi)coherence spaces}, author={Pierre Boudes (LIPN)}, journal={arXiv preprint arXiv:cs/0609021}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609021}, primaryClass={cs.LO} }
boudes2006non
arxiv-674727
cs/0609022
Dichotomies and Duality in First-order Model Checking Problems
<|reference_start|>Dichotomies and Duality in First-order Model Checking Problems: We study the complexity of the model checking problem, for fixed model A, over certain fragments L of first-order logic. These are sometimes known as the expression complexities of L. We obtain various complexity classification theorems for these logics L as each ranges over models A, in the spirit of the dichotomy conjecture for the Constraint Satisfaction Problem -- which itself may be seen as the model checking problem for existential conjunctive positive first-order logic.<|reference_end|>
arxiv
@article{martin2006dichotomies, title={Dichotomies and Duality in First-order Model Checking Problems}, author={Barnaby Martin}, journal={arXiv preprint arXiv:cs/0609022}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609022}, primaryClass={cs.LO cs.CC} }
martin2006dichotomies
arxiv-674728
cs/0609023
Novel Reversible TSG Gate and Its Application for Designing Components of Primitive Reversible/Quantum ALU
<|reference_start|>Novel Reversible TSG Gate and Its Application for Designing Components of Primitive Reversible/Quantum ALU: In recent years, reversible logic has emerged as a promising computing paradigm having application in low power CMOS, quantum computing, nanotechnology, and optical computing. The classical set of gates such as AND, OR, and EXOR are not reversible. This paper utilizes a new 4 * 4 reversible gate called TSG gate to build the components of a primitive reversible/quantum ALU. The most significant aspect of the TSG gate is that it can work singly as a reversible full adder, that is reversible full adder can now be implemented with a single gate only. A Novel reversible 4:2 compressor is also designed from the TSG gate which is later used to design a novel 8x8 reversible Wallace tree multiplier. It is proved that the adder, 4:2 compressor and multiplier architectures designed using the TSG gate are better than their counterparts available in literature, in terms of number of reversible gates and garbage outputs. This is perhaps, the first attempt to design a reversible 4:2 compressor and a reversible Wallace tree multiplier as far as existing literature and our knowledge is concerned. Thus, this paper provides an initial threshold to build more complex systems which can execute complicated operations using reversible logic.<|reference_end|>
arxiv
@article{thapliyal2006novel, title={Novel Reversible TSG Gate and Its Application for Designing Components of Primitive Reversible/Quantum ALU}, author={Himanshu Thapliyal, M. B. Srinivas}, journal={arXiv preprint arXiv:cs/0609023}, year={2006}, doi={10.1109/ICICS.2005.1689293}, archivePrefix={arXiv}, eprint={cs/0609023}, primaryClass={cs.AR} }
thapliyal2006novel
arxiv-674729
cs/0609024
Linux, Open Source and Unicode
<|reference_start|>Linux, Open Source and Unicode: The paper is taken out.<|reference_end|>
arxiv
@article{prashant2006linux,, title={Linux, Open Source and Unicode}, author={Prashant}, journal={arXiv preprint arXiv:cs/0609024}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609024}, primaryClass={cs.SE} }
prashant2006linux,
arxiv-674730
cs/0609025
A XML Schema Definition based Universal User Interface
<|reference_start|>A XML Schema Definition based Universal User Interface: The article is taken out for change of contents.<|reference_end|>
arxiv
@article{prashant2006a, title={A XML Schema Definition based Universal User Interface}, author={Prashant}, journal={arXiv preprint arXiv:cs/0609025}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609025}, primaryClass={cs.SE} }
prashant2006a
arxiv-674731
cs/0609026
Rarest First and Choke Algorithms Are Enough
<|reference_start|>Rarest First and Choke Algorithms Are Enough: The performance of peer-to-peer file replication comes from its piece and peer selection strategies. Two such strategies have been introduced by the BitTorrent protocol: the rarest first and choke algorithms. Whereas it is commonly admitted that BitTorrent performs well, recent studies have proposed the replacement of the rarest first and choke algorithms in order to improve efficiency and fairness. In this paper, we use results from real experiments to advocate that the replacement of the rarest first and choke algorithms cannot be justified in the context of peer-to-peer file replication in the Internet. We instrumented a BitTorrent client and ran experiments on real torrents with different characteristics. Our experimental evaluation is peer oriented, instead of tracker oriented, which allows us to get detailed information on all exchanged messages and protocol events. We go beyond the mere observation of the good efficiency of both algorithms. We show that the rarest first algorithm guarantees close to ideal diversity of the pieces among peers. In particular, on our experiments, replacing the rarest first algorithm with source or network coding solutions cannot be justified. We also show that the choke algorithm in its latest version fosters reciprocation and is robust to free riders. In particular, the choke algorithm is fair and its replacement with a bit level tit-for-tat solution is not appropriate. Finally, we identify new areas of improvements for efficient peer-to-peer file replication protocols.<|reference_end|>
arxiv
@article{legout2006rarest, title={Rarest First and Choke Algorithms Are Enough}, author={Arnaud Legout (INRIA Sophia Antipolis / INRIA Rh^one-Alpes), Guillaume Urvoy-Keller (EURECOM), Pietro Michiardi (EURECOM)}, journal={Dans ACM SIGCOMM/USENIX IMC'2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609026}, primaryClass={cs.NI} }
legout2006rarest
arxiv-674732
cs/0609027
A Case for Peering of Content Delivery Networks
<|reference_start|>A Case for Peering of Content Delivery Networks: The proliferation of Content Delivery Networks (CDN) reveals that existing content networks are owned and operated by individual companies. As a consequence, closed delivery networks are evolved which do not cooperate with other CDNs and in practice, islands of CDNs are formed. Moreover, the logical separation between contents and services in this context results in two content networking domains. But present trends in content networks and content networking capabilities give rise to the interest in interconnecting content networks. Finding ways for distinct content networks to coordinate and cooperate with other content networks is necessary for better overall service. In addition to that, meeting the QoS requirements of users according to the negotiated Service Level Agreements between the user and the content network is a burning issue in this perspective. In this article, we present an open, scalable and Service-Oriented Architecture based system to assist the creation of open Content and Service Delivery Networks (CSDN) that scale and support sharing of resources with other CSDNs.<|reference_end|>
arxiv
@article{buyya2006a, title={A Case for Peering of Content Delivery Networks}, author={Rajkumar Buyya, Al-Mukaddim Khan Pathan, James Broberg and Zahir Tari}, journal={arXiv preprint arXiv:cs/0609027}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609027}, primaryClass={cs.DC} }
buyya2006a
arxiv-674733
cs/0609028
VLSI Implementation of RSA Encryption System Using Ancient Indian Vedic Mathematics
<|reference_start|>VLSI Implementation of RSA Encryption System Using Ancient Indian Vedic Mathematics: This paper proposes the hardware implementation of RSA encryption/decryption algorithm using the algorithms of Ancient Indian Vedic Mathematics that have been modified to improve performance. The recently proposed hierarchical overlay multiplier architecture is used in the RSA circuitry for multiplication operation. The most significant aspect of the paper is the development of a division architecture based on Straight Division algorithm of Ancient Indian Vedic Mathematics and embedding it in RSA encryption/decryption circuitry for improved efficiency. The coding is done in Verilog HDL and the FPGA synthesis is done using Xilinx Spartan library. The results show that RSA circuitry implemented using Vedic division and multiplication is efficient in terms of area/speed compared to its implementation using conventional multiplication and division architectures<|reference_end|>
arxiv
@article{thapliyal2006vlsi, title={VLSI Implementation of RSA Encryption System Using Ancient Indian Vedic Mathematics}, author={Himanshu Thapliyal and M.B Srinivas}, journal={arXiv preprint arXiv:cs/0609028}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609028}, primaryClass={cs.AR} }
thapliyal2006vlsi
arxiv-674734
cs/0609029
Reversible Programmable Logic Array (RPLA) using Fredkin & Feynman Gates for Industrial Electronics and Applications
<|reference_start|>Reversible Programmable Logic Array (RPLA) using Fredkin & Feynman Gates for Industrial Electronics and Applications: In recent years, reversible logic has emerged as a promising computing paradigm having application in low power CMOS, quantum computing, nanotechnology, and optical computing. The classical set of gates such as AND, OR, and EXOR are not reversible. In this paper, the authors have proposed reversible programmable logic array (RPLA) architecture using reversible Fredkin and Feynman gates. The proposed RPLA has n inputs and m outputs and can realize m functions of n variables. In order to demonstrate the design of RPLA, a 3 input RPLA is designed which can perform any 28 functions using the combination of 8 min terms (23). Furthermore, the application of the designed 3 input RPLA is shown by implementing the full adder and full subtractor functions through it.<|reference_end|>
arxiv
@article{thapliyal2006reversible, title={Reversible Programmable Logic Array (RPLA) using Fredkin & Feynman Gates for Industrial Electronics and Applications}, author={Himanshu Thapliyal and Hamid R. Arabnia}, journal={arXiv preprint arXiv:cs/0609029}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609029}, primaryClass={cs.AR} }
thapliyal2006reversible
arxiv-674735
cs/0609030
Space Division Multiple Access with a Sum Feedback Rate Constraint
<|reference_start|>Space Division Multiple Access with a Sum Feedback Rate Constraint: On a multi-antenna broadcast channel, simultaneous transmission to multiple users by joint beamforming and scheduling is capable of achieving high throughput, which grows double logarithmically with the number of users. The sum rate for channel state information (CSI) feedback, however, increases linearly with the number of users, reducing the effective uplink capacity. To address this problem, a novel space division multiple access (SDMA) design is proposed, where the sum feedback rate is upper-bounded by a constant. This design consists of algorithms for CSI quantization, threshold based CSI feedback, and joint beamforming and scheduling. The key feature of the proposed approach is the use of feedback thresholds to select feedback users with large channel gains and small CSI quantization errors such that the sum feedback rate constraint is satisfied. Despite this constraint, the proposed SDMA design is shown to achieve a sum capacity growth rate close to the optimal one. Moreover, the feedback overflow probability for this design is found to decrease exponentially with the difference between the allowable and the average sum feedback rates. Numerical results show that the proposed SDMA design is capable of attaining higher sum capacities than existing ones, even though the sum feedback rate is bounded.<|reference_end|>
arxiv
@article{huang2006space, title={Space Division Multiple Access with a Sum Feedback Rate Constraint}, author={Kaibin Huang, Robert W. Heath, Jr and Jeffrey G. Andrews}, journal={arXiv preprint arXiv:cs/0609030}, year={2006}, doi={10.1109/TSP.2007.894245}, archivePrefix={arXiv}, eprint={cs/0609030}, primaryClass={cs.IT cs.NI math.IT} }
huang2006space
arxiv-674736
cs/0609031
Approximation Algorithms for the Bipartite Multi-cut Problem
<|reference_start|>Approximation Algorithms for the Bipartite Multi-cut Problem: We introduce the {\it Bipartite Multi-cut} problem. This is a generalization of the {\it st-Min-cut} problem, is similar to the {\it Multi-cut} problem (except for more stringent requirements) and also turns out to be an immediate generalization of the {\it Min UnCut} problem. We prove that this problem is {\bf NP}-hard and then present LP and SDP based approximation algorithms. While the LP algorithm is based on the Garg-Vazirani-Yannakakis algorithm for {\it Multi-cut}, the SDP algorithm uses the {\it Structure Theorem} of $\ell_2^2$ Metrics.<|reference_end|>
arxiv
@article{kenkre2006approximation, title={Approximation Algorithms for the Bipartite Multi-cut Problem}, author={Sreyash Kenkre and Sundar Vishwanathan}, journal={arXiv preprint arXiv:cs/0609031}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609031}, primaryClass={cs.CC cs.DS} }
kenkre2006approximation
arxiv-674737
cs/0609032
CR-precis: A deterministic summary structure for update data streams
<|reference_start|>CR-precis: A deterministic summary structure for update data streams: We present the \crprecis structure, that is a general-purpose, deterministic and sub-linear data structure for summarizing \emph{update} data streams. The \crprecis structure yields the \emph{first deterministic sub-linear space/time algorithms for update streams} for answering a variety of fundamental stream queries, such as, (a) point queries, (b) range queries, (c) finding approximate frequent items, (d) finding approximate quantiles, (e) finding approximate hierarchical heavy hitters, (f) estimating inner-products, (g) near-optimal $B$-bucket histograms, etc..<|reference_end|>
arxiv
@article{ganguly2006cr-precis:, title={CR-precis: A deterministic summary structure for update data streams}, author={Sumit Ganguly and Anirban Majumder}, journal={arXiv preprint arXiv:cs/0609032}, year={2006}, number={IIT Kanpur, July 1 2006}, archivePrefix={arXiv}, eprint={cs/0609032}, primaryClass={cs.DS} }
ganguly2006cr-precis:
arxiv-674738
cs/0609033
Choosing Colors for Geometric Graphs via Color Space Embeddings
<|reference_start|>Choosing Colors for Geometric Graphs via Color Space Embeddings: Graph drawing research traditionally focuses on producing geometric embeddings of graphs satisfying various aesthetic constraints. After the geometric embedding is specified, there is an additional step that is often overlooked or ignored: assigning display colors to the graph's vertices. We study the additional aesthetic criterion of assigning distinct colors to vertices of a geometric graph so that the colors assigned to adjacent vertices are as different from one another as possible. We formulate this as a problem involving perceptual metrics in color space and we develop algorithms for solving this problem by embedding the graph in color space. We also present an application of this work to a distributed load-balancing visualization problem.<|reference_end|>
arxiv
@article{dillencourt2006choosing, title={Choosing Colors for Geometric Graphs via Color Space Embeddings}, author={Michael B. Dillencourt and David Eppstein and Michael T. Goodrich}, journal={arXiv preprint arXiv:cs/0609033}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609033}, primaryClass={cs.CG} }
dillencourt2006choosing
arxiv-674739
cs/0609034
Social Decision Making with Multi-Relational Networks and Grammar-Based Particle Swarms
<|reference_start|>Social Decision Making with Multi-Relational Networks and Grammar-Based Particle Swarms: Social decision support systems are able to aggregate the local perspectives of a diverse group of individuals into a global social decision. This paper presents a multi-relational network ontology and grammar-based particle swarm algorithm capable of aggregating the decisions of millions of individuals. This framework supports a diverse problem space and a broad range of vote aggregation algorithms. These algorithms account for individual expertise and representation across different domains of the group problem space. Individuals are able to pose and categorize problems, generate potential solutions, choose trusted representatives, and vote for particular solutions. Ultimately, via a social decision making algorithm, the system aggregates all the individual votes into a single collective decision.<|reference_end|>
arxiv
@article{rodriguez2006social, title={Social Decision Making with Multi-Relational Networks and Grammar-Based Particle Swarms}, author={Marko A. Rodriguez}, journal={Hawaii International Conference on Systems Science (HICSS), pages 39-49, Waikoloa, Hawaii, IEEE Computer Society, ISSN: 1530-1605, January 2007}, year={2006}, doi={10.1109/HICSS.2007.487}, number={LA-UR-06-2139}, archivePrefix={arXiv}, eprint={cs/0609034}, primaryClass={cs.CY cs.HC} }
rodriguez2006social
arxiv-674740
cs/0609035
Rational Secret Sharing and Multiparty Computation: Extended Abstract
<|reference_start|>Rational Secret Sharing and Multiparty Computation: Extended Abstract: We consider the problems of secret sharing and multiparty computation, assuming that agents prefer to get the secret (resp., function value) to not getting it, and secondarily, prefer that as few as possible of the other agents get it. We show that, under these assumptions, neither secret sharing nor multiparty function computation is possible using a mechanism that has a fixed running time. However, we show that both are possible using randomized mechanisms with constant expected running time.<|reference_end|>
arxiv
@article{halpern2006rational, title={Rational Secret Sharing and Multiparty Computation: Extended Abstract}, author={Joseph Y. Halpern and Vanessa Teague}, journal={arXiv preprint arXiv:cs/0609035}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609035}, primaryClass={cs.GT cs.CR cs.DC} }
halpern2006rational
arxiv-674741
cs/0609036
Reduced Area Low Power High Throughput BCD Adders for IEEE 754r Format
<|reference_start|>Reduced Area Low Power High Throughput BCD Adders for IEEE 754r Format: IEEE 754r is the ongoing revision to the IEEE 754 floating point standard and a major enhancement to the standard is the addition of decimal format. Firstly, this paper proposes novel two transistor AND and OR gates. The proposed AND gate has no power supply, thus it can be referred as the Powerless AND gate. Similarly, the proposed two transistor OR gate has no ground and can be referred as Groundless OR. Secondly for IEEE 754r format, two novel BCD adders called carry skip and carry look-ahead BCD adders are also proposed in this paper. In order to design the carry look-ahead BCD adder, a novel 4 bit carry look-ahead adder called NCLA is proposed which forms the basic building block of the proposed carry look-ahead BCD adder. Finally, the proposed two transistors AND and OR gates are used to provide the optimized small area low power high throughput circuitries of the proposed BCD adders.<|reference_end|>
arxiv
@article{thapliyal2006reduced, title={Reduced Area Low Power High Throughput BCD Adders for IEEE 754r Format}, author={Himanshu Thapliyal, Hamid R. Arabnia and M.B Srinivas}, journal={arXiv preprint arXiv:cs/0609036}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609036}, primaryClass={cs.AR} }
thapliyal2006reduced
arxiv-674742
cs/0609037
(HO)RPO Revisited
<|reference_start|>(HO)RPO Revisited: The notion of computability closure has been introduced for proving the termination of the combination of higher-order rewriting and beta-reduction. It is also used for strengthening the higher-order recursive path ordering. In the present paper, we study in more details the relations between the computability closure and the (higher-order) recursive path ordering. We show that the first-order recursive path ordering is equal to an ordering naturally defined from the computability closure. In the higher-order case, we get an ordering containing the higher-order recursive path ordering whose well-foundedness relies on the correctness of the computability closure. This provides a simple way to extend the higher-order recursive path ordering to richer type systems.<|reference_end|>
arxiv
@article{blanqui2006(ho)rpo, title={(HO)RPO Revisited}, author={Fr'ed'eric Blanqui (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0609037}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609037}, primaryClass={cs.LO} }
blanqui2006(ho)rpo
arxiv-674743
cs/0609038
On Performance of Event-to-Sink Transport in Transmit-Only Sensor Networks
<|reference_start|>On Performance of Event-to-Sink Transport in Transmit-Only Sensor Networks: We consider a hybrid wireless sensor network with regular and transmit-only sensors. The transmit-only sensors do not have receiver circuit, hence are cheaper and less energy consuming, but their transmissions cannot be coordinated. Regular sensors, also called cluster-heads, are responsible for receiving information from transmit-only sensors and forwarding it to sinks. The main goal of such a hybrid network is to reduce the cost of deployment while achieving some performance constraints (minimum coverage, sensing rate, etc). In this paper we are interested in the communication between transmit-only sensors and cluster-heads. We develop a detailed analytical model of the physical and MAC layer using tools from queuing theory and stochastic geometry. (The MAC model, that we call Erlang's loss model with interference, might be of independent interest as adequate for any non-slotted; i.e., unsynchronized, wireless communication channel.) We give an explicit formula for the frequency of successful packet reception by a cluster-head, given sensors' locations. We further define packet admission policies at a cluster-head, and we calculate the optimal policies for different performance criteria. Finally we show that the proposed hybrid network, using the optimal policies, can achieve substantial cost savings as compared to conventional architectures.<|reference_end|>
arxiv
@article{blaszczyszyn2006on, title={On Performance of Event-to-Sink Transport in Transmit-Only Sensor Networks}, author={Bartlomiej Bartek Blaszczyszyn (INRIA Rocquencourt), Bozidar Radunovic (INRIA Rocquencourt)}, journal={Proc. of IEEE Infocom 2008}, year={2006}, doi={10.1109/INFOCOM.2008.176}, archivePrefix={arXiv}, eprint={cs/0609038}, primaryClass={cs.NI math.PR} }
blaszczyszyn2006on
arxiv-674744
cs/0609039
Higher-Order Termination: from Kruskal to Computability
<|reference_start|>Higher-Order Termination: from Kruskal to Computability: Termination is a major question in both logic and computer science. In logic, termination is at the heart of proof theory where it is usually called strong normalization (of cut elimination). In computer science, termination has always been an important issue for showing programs correct. In the early days of logic, strong normalization was usually shown by assigning ordinals to expressions in such a way that eliminating a cut would yield an expression with a smaller ordinal. In the early days of verification, computer scientists used similar ideas, interpreting the arguments of a program call by a natural number, such as their size. Showing the size of the arguments to decrease for each recursive call gives a termination proof of the program, which is however rather weak since it can only yield quite small ordinals. In the sixties, Tait invented a new method for showing cut elimination of natural deduction, based on a predicate over the set of terms, such that the membership of an expression to the predicate implied the strong normalization property for that expression. The predicate being defined by induction on types, or even as a fixpoint, this method could yield much larger ordinals. Later generalized by Girard under the name of reducibility or computability candidates, it showed very effective in proving the strong normalization property of typed lambda-calculi...<|reference_end|>
arxiv
@article{blanqui2006higher-order, title={Higher-Order Termination: from Kruskal to Computability}, author={Fr'ed'eric Blanqui (INRIA Lorraine - LORIA), Jean-Pierre Jouannaud (LIX), Albert Rubio}, journal={Dans 13th International Conference on Logic for Programming, Artificial Intelligence and Reasoning - LPAR 2006 4246 (2006)}, year={2006}, doi={10.1007/11916277_1}, archivePrefix={arXiv}, eprint={cs/0609039}, primaryClass={cs.LO} }
blanqui2006higher-order
arxiv-674745
cs/0609040
Elgot Algebras
<|reference_start|>Elgot Algebras: Denotational semantics can be based on algebras with additional structure (order, metric, etc.) which makes it possible to interpret recursive specifications. It was the idea of Elgot to base denotational semantics on iterative theories instead, i.e., theories in which abstract recursive specifications are required to have unique solutions. Later Bloom and Esik studied iteration theories and iteration algebras in which a specified solution has to obey certain axioms. We propose so-called Elgot algebras as a convenient structure for semantics in the present paper. An Elgot algebra is an algebra with a specified solution for every system of flat recursive equations. That specification satisfies two simple and well motivated axioms: functoriality (stating that solutions are stable under renaming of recursion variables) and compositionality (stating how to perform simultaneous recursion). These two axioms stem canonically from Elgot's iterative theories: We prove that the category of Elgot algebras is the Eilenberg-Moore category of the monad given by a free iterative theory.<|reference_end|>
arxiv
@article{adamek2006elgot, title={Elgot Algebras}, author={Jiri Adamek, Stefan Milius, Jiri Velebil}, journal={Logical Methods in Computer Science, Volume 2, Issue 5 (November 8, 2006) lmcs:2235}, year={2006}, doi={10.2168/LMCS-2(5:4)2006}, archivePrefix={arXiv}, eprint={cs/0609040}, primaryClass={cs.LO math.CT} }
adamek2006elgot
arxiv-674746
cs/0609041
Primitive operations for the construction and reorganization of minimally persistent formations
<|reference_start|>Primitive operations for the construction and reorganization of minimally persistent formations: In this paper, we study the construction and transformation of two-dimensional persistent graphs. Persistence is a generalization to directed graphs of the undirected notion of rigidity. In the context of moving autonomous agent formations, persistence characterizes the efficacy of a directed structure of unilateral distances constraints seeking to preserve a formation shape. Analogously to the powerful results about Henneberg sequences in minimal rigidity theory, we propose different types of directed graph operations allowing one to sequentially build any minimally persistent graph (i.e. persistent graph with a minimal number of edges for a given number of vertices), each intermediate graph being also minimally persistent. We also consider the more generic problem of obtaining one minimally persistent graph from another, which corresponds to the on-line reorganization of an autonomous agent formation. We prove that we can obtain any minimally persistent formation from any other one by a sequence of elementary local operations such that minimal persistence is preserved throughout the reorganization process.<|reference_end|>
arxiv
@article{hendrickx2006primitive, title={Primitive operations for the construction and reorganization of minimally persistent formations}, author={Julien M. Hendrickx, Baris Fidan, Changbin Yu, Brian D.O. Anderson and Vincent D. Blondel}, journal={arXiv preprint arXiv:cs/0609041}, year={2006}, number={CESAME research report 2006.62}, archivePrefix={arXiv}, eprint={cs/0609041}, primaryClass={cs.MA} }
hendrickx2006primitive
arxiv-674747
cs/0609042
On Divergence-Power Inequalities
<|reference_start|>On Divergence-Power Inequalities: Expressions for (EPI Shannon type) Divergence-Power Inequalities (DPI) in two cases (time-discrete and band-limited time-continuous) of stationary random processes are given. The new expressions connect the divergence rate of the sum of independent processes, the individual divergence rate of each process, and their power spectral densities. All divergences are between a process and a Gaussian process with same second order statistics, and are assumed to be finite. A new proof of the Shannon entropy-power inequality EPI, based on the relationship between divergence and causal minimum mean-square error (CMMSE) in Gaussian channels with large signal-to-noise ratio, is also shown.<|reference_end|>
arxiv
@article{binia2006on, title={On Divergence-Power Inequalities}, author={Jacob Binia}, journal={arXiv preprint arXiv:cs/0609042}, year={2006}, doi={10.1109/TIT.2006.890715}, archivePrefix={arXiv}, eprint={cs/0609042}, primaryClass={cs.IT math.IT} }
binia2006on
arxiv-674748
cs/0609043
Challenging the principle of compositionality in interpreting natural language texts
<|reference_start|>Challenging the principle of compositionality in interpreting natural language texts: The paper aims at emphasizing that, even relaxed, the hypothesis of compositionality has to face many problems when used for interpreting natural language texts. Rather than fixing these problems within the compositional framework, we believe that a more radical change is necessary, and propose another approach.<|reference_end|>
arxiv
@article{gayral2006challenging, title={Challenging the principle of compositionality in interpreting natural language texts}, author={Franc{c}oise Gayral (LIPN), Daniel Kayser (LIPN), Franc{c}ois L'evy (LIPN)}, journal={conference on Compositionality, Concepts and Cognition, Allemagne (2004)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609043}, primaryClass={cs.CL} }
gayral2006challenging
arxiv-674749
cs/0609044
The role of time in considering collections
<|reference_start|>The role of time in considering collections: The paper concerns the understanding of plurals in the framework of Artificial Intelligence and emphasizes the role of time. The construction of collection(s) and their evolution across time is often crucial and has to be accounted for. The paper contrasts a "de dicto" collection where the collection can be considered as persisting over these situations even if its members change with a "de re" collection whose composition does not vary through time. It expresses different criteria of choice between the two interpretations (de re and de dicto) depending on the context of enunciation.<|reference_end|>
arxiv
@article{gayral2006the, title={The role of time in considering collections}, author={Franc{c}oise Gayral (LIPN), Daniel Kayser (LIPN), Franc{c}ois L'evy (LIPN)}, journal={Journ\'{e}es de S\'{e}mantique et Mod\'{e}lisation, France (2004)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609044}, primaryClass={cs.CL} }
gayral2006the
arxiv-674750
cs/0609045
Metric entropy in competitive on-line prediction
<|reference_start|>Metric entropy in competitive on-line prediction: Competitive on-line prediction (also known as universal prediction of individual sequences) is a strand of learning theory avoiding making any stochastic assumptions about the way the observations are generated. The predictor's goal is to compete with a benchmark class of prediction rules, which is often a proper Banach function space. Metric entropy provides a unifying framework for competitive on-line prediction: the numerous known upper bounds on the metric entropy of various compact sets in function spaces readily imply bounds on the performance of on-line prediction strategies. This paper discusses strengths and limitations of the direct approach to competitive on-line prediction via metric entropy, including comparisons to other approaches.<|reference_end|>
arxiv
@article{vovk2006metric, title={Metric entropy in competitive on-line prediction}, author={Vladimir Vovk}, journal={arXiv preprint arXiv:cs/0609045}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609045}, primaryClass={cs.LG} }
vovk2006metric
arxiv-674751
cs/0609046
Exhausting Error-Prone Patterns in LDPC Codes
<|reference_start|>Exhausting Error-Prone Patterns in LDPC Codes: It is proved in this work that exhaustively determining bad patterns in arbitrary, finite low-density parity-check (LDPC) codes, including stopping sets for binary erasure channels (BECs) and trapping sets (also known as near-codewords) for general memoryless symmetric channels, is an NP-complete problem, and efficient algorithms are provided for codes of practical short lengths n~=500. By exploiting the sparse connectivity of LDPC codes, the stopping sets of size <=13 and the trapping sets of size <=11 can be efficiently exhaustively determined for the first time, and the resulting exhaustive list is of great importance for code analysis and finite code optimization. The featured tree-based narrowing search distinguishes this algorithm from existing ones for which inexhaustive methods are employed. One important byproduct is a pair of upper bounds on the bit-error rate (BER) & frame-error rate (FER) iterative decoding performance of arbitrary codes over BECs that can be evaluated for any value of the erasure probability, including both the waterfall and the error floor regions. The tightness of these upper bounds and the exhaustion capability of the proposed algorithm are proved when combining an optimal leaf-finding module with the tree-based search. These upper bounds also provide a worst-case-performance guarantee which is crucial to optimizing LDPC codes for extremely low error rate applications, e.g., optical/satellite communications. Extensive numerical experiments are conducted that include both randomly and algebraically constructed LDPC codes, the results of which demonstrate the superior efficiency of the exhaustion algorithm and its significant value for finite length code optimization.<|reference_end|>
arxiv
@article{wang2006exhausting, title={Exhausting Error-Prone Patterns in LDPC Codes}, author={Chih-Chun Wang (1), Sanjeev R. Kulkarni (2), H. Vincent Poor (2) ((1) Purdue University, (2) Princeton University)}, journal={arXiv preprint arXiv:cs/0609046}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609046}, primaryClass={cs.IT cs.DS math.IT} }
wang2006exhausting
arxiv-674752
cs/0609047
Topology Control and Network Lifetime in Three-Dimensional Wireless Sensor Networks
<|reference_start|>Topology Control and Network Lifetime in Three-Dimensional Wireless Sensor Networks: Coverage and connectivity issues of three-dimensional (3D) networks are addressed in [2], but that work assumes that a node can be placed at any arbitrary location. In this work, we drop that assumption and rather assume that nodes are uniformly and densely deployed in a 3D space. We want to devise a mechanism that keeps some nodes active and puts other nodes into sleep so that the number of active nodes at a time is minimized (and thus network life time is maximized), while maintaining full coverage and connectivity. One simple way to do that is to partition the 3D space into cells, and only one node in each cell remains active at a time. Our results show that the number of active nodes can be minimized if the shape of each cell is a truncated octahedron. It requires the sensing range to be at least 0.542326 times the transmission radius. This value is 0.5, 0.53452 and 0.5 for cube, hexagonal prism, and rhombic dodecahedron, respectively. However, at a time the number of active nodes for cube, hexagonal prism and rhombic dodecahedron model is respectively 2.372239, 1.82615 and 1.49468 times of that of truncated octahedron model. So clearly truncated octahedron model has the highest network lifetime. We also provide a distributed topology control algorithm that can be used by each sensor node to determine its cell id using a constant number of local arithmetic operations provided that the sensor node knows its location. We also validate our results by simulation.<|reference_end|>
arxiv
@article{alam2006topology, title={Topology Control and Network Lifetime in Three-Dimensional Wireless Sensor Networks}, author={S. M. Nazrul Alam and Zygmunt J. Haas}, journal={arXiv preprint arXiv:cs/0609047}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609047}, primaryClass={cs.NI cs.CG} }
alam2006topology
arxiv-674753
cs/0609048
On the logical definability of certain graph and poset languages
<|reference_start|>On the logical definability of certain graph and poset languages: We show that it is equivalent, for certain sets of finite graphs, to be definable in CMS (counting monadic second-order logic, a natural extension of monadic second-order logic), and to be recognizable in an algebraic framework induced by the notion of modular decomposition of a finite graph. More precisely, we consider the set $F\_\infty$ of composition operations on graphs which occur in the modular decomposition of finite graphs. If $F$ is a subset of $F\_{\infty}$, we say that a graph is an $\calF$-graph if it can be decomposed using only operations in $F$. A set of $F$-graphs is recognizable if it is a union of classes in a finite-index equivalence relation which is preserved by the operations in $F$. We show that if $F$ is finite and its elements enjoy only a limited amount of commutativity -- a property which we call weak rigidity, then recognizability is equivalent to CMS-definability. This requirement is weak enough to be satisfied whenever all $F$-graphs are posets, that is, transitive dags. In particular, our result generalizes Kuske's recent result on series-parallel poset languages.<|reference_end|>
arxiv
@article{weil2006on, title={On the logical definability of certain graph and poset languages}, author={Pascal Weil (LaBRI)}, journal={Journal of Automata, Languages and Computation 9 (2004) 147-165}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609048}, primaryClass={cs.LO} }
weil2006on
arxiv-674754
cs/0609049
Scanning and Sequential Decision Making for Multi-Dimensional Data - Part I: the Noiseless Case
<|reference_start|>Scanning and Sequential Decision Making for Multi-Dimensional Data - Part I: the Noiseless Case: We investigate the problem of scanning and prediction ("scandiction", for short) of multidimensional data arrays. This problem arises in several aspects of image and video processing, such as predictive coding, for example, where an image is compressed by coding the error sequence resulting from scandicting it. Thus, it is natural to ask what is the optimal method to scan and predict a given image, what is the resulting minimum prediction loss, and whether there exist specific scandiction schemes which are universal in some sense. Specifically, we investigate the following problems: First, modeling the data array as a random field, we wish to examine whether there exists a scandiction scheme which is independent of the field's distribution, yet asymptotically achieves the same performance as if this distribution was known. This question is answered in the affirmative for the set of all spatially stationary random fields and under mild conditions on the loss function. We then discuss the scenario where a non-optimal scanning order is used, yet accompanied by an optimal predictor, and derive bounds on the excess loss compared to optimal scanning and prediction. This paper is the first part of a two-part paper on sequential decision making for multi-dimensional data. It deals with clean, noiseless data arrays. The second part deals with noisy data arrays, namely, with the case where the decision maker observes only a noisy version of the data, yet it is judged with respect to the original, clean data.<|reference_end|>
arxiv
@article{cohen2006scanning, title={Scanning and Sequential Decision Making for Multi-Dimensional Data - Part I: the Noiseless Case}, author={Asaf Cohen, Neri Merhav and Tsachy Weissman}, journal={arXiv preprint arXiv:cs/0609049}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609049}, primaryClass={cs.IT cs.LG math.IT} }
cohen2006scanning
arxiv-674755
cs/0609050
Exact Spectral Analysis of Single-h and Multi-h CPM Signals through PAM decomposition and Matrix Series Evaluation
<|reference_start|>Exact Spectral Analysis of Single-h and Multi-h CPM Signals through PAM decomposition and Matrix Series Evaluation: In this paper we address the problem of closed-form spectral evaluation of CPM. We show that the multi-h CPM signal can be conveniently generated by a PTI SM. The output is governed by a Markov chain with the unusual peculiarity of being cyclostationary and reducible; this holds also in the single-h context. Judicious reinterpretation of the result leads to a formalization through a stationary and irreducible Markov chain, whose spectral evaluation is known in closed-form from the literature. Two are the major outcomes of this paper. First, unlike the literature, we obtain a PSD in true closed-form. Second, we give novel insights into the CPM format.<|reference_end|>
arxiv
@article{cariolaro2006exact, title={Exact Spectral Analysis of Single-h and Multi-h CPM Signals through PAM decomposition and Matrix Series Evaluation}, author={G. Cariolaro, T. Erseghe, N. Laurenti}, journal={IEEE Transactions on Communications 59 (7), 1893-1903}, year={2006}, doi={10.1109/TCOMM.2011.050911.100631}, archivePrefix={arXiv}, eprint={cs/0609050}, primaryClass={cs.IT math.IT} }
cariolaro2006exact
arxiv-674756
cs/0609051
Multilingual person name recognition and transliteration
<|reference_start|>Multilingual person name recognition and transliteration: We present an exploratory tool that extracts person names from multilingual news collections, matches name variants referring to the same person, and infers relationships between people based on the co-occurrence of their names in related news. A novel feature is the matching of name variants across languages and writing systems, including names written with the Greek, Cyrillic and Arabic writing system. Due to our highly multilingual setting, we use an internal standard representation for name representation and matching, instead of adopting the traditional bilingual approach to transliteration. This work is part of the news analysis system NewsExplorer that clusters an average of 25,000 news articles per day to detect related news within the same and across different languages.<|reference_end|>
arxiv
@article{pouliquen2006multilingual, title={Multilingual person name recognition and transliteration}, author={Bruno Pouliquen, Ralf Steinberger, Camelia Ignat, Irina Temnikova, Anna Widiger, Wajdi Zaghouani, Jan Zizka}, journal={Journal CORELA - Cognition, Representation, Langage. Numeros speciaux, Le traitement lexicographique des noms propres. December 2005. ISSN 1638-5748}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609051}, primaryClass={cs.CL cs.IR} }
pouliquen2006multilingual
arxiv-674757
cs/0609052
Undecidability of the unification and admissibility problems for modal and description logics
<|reference_start|>Undecidability of the unification and admissibility problems for modal and description logics: We show that the unification problem `is there a substitution instance of a given formula that is provable in a given logic?' is undecidable for basic modal logics K and K4 extended with the universal modality. It follows that the admissibility problem for inference rules is undecidable for these logics as well. These are the first examples of standard decidable modal logics for which the unification and admissibility problems are undecidable. We also prove undecidability of the unification and admissibility problems for K and K4 with at least two modal operators and nominals (instead of the universal modality), thereby showing that these problems are undecidable for basic hybrid logics. Recently, unification has been introduced as an important reasoning service for description logics. The undecidability proof for K with nominals can be used to show the undecidability of unification for boolean description logics with nominals (such as ALCO and SHIQO). The undecidability proof for K with the universal modality can be used to show that the unification problem relative to role boxes is undecidable for Boolean description logic with transitive roles, inverse roles, and role hierarchies (such as SHI and SHIQ).<|reference_end|>
arxiv
@article{wolter2006undecidability, title={Undecidability of the unification and admissibility problems for modal and description logics}, author={Frank Wolter and Michael Zakharyaschev}, journal={arXiv preprint arXiv:cs/0609052}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609052}, primaryClass={cs.LO cs.AI} }
wolter2006undecidability
arxiv-674758
cs/0609053
Navigating multilingual news collections using automatically extracted information
<|reference_start|>Navigating multilingual news collections using automatically extracted information: We are presenting a text analysis tool set that allows analysts in various fields to sieve through large collections of multilingual news items quickly and to find information that is of relevance to them. For a given document collection, the tool set automatically clusters the texts into groups of similar articles, extracts names of places, people and organisations, lists the user-defined specialist terms found, links clusters and entities, and generates hyperlinks. Through its daily news analysis operating on thousands of articles per day, the tool also learns relationships between people and other entities. The fully functional prototype system allows users to explore and navigate multilingual document collections across languages and time.<|reference_end|>
arxiv
@article{steinberger2006navigating, title={Navigating multilingual news collections using automatically extracted information}, author={Ralf Steinberger, Bruno Pouliquen, Camelia Ignat (European Commission - Joint Research Centre)}, journal={Proceedings of the 27th International Conference 'Information Technology Interfaces' (ITI'2005). Cavtat / Dubrovnik}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609053}, primaryClass={cs.CL cs.IR} }
steinberger2006navigating
arxiv-674759
cs/0609054
High Data-Rate Single-Symbol ML Decodable Distributed STBCs for Cooperative Networks
<|reference_start|>High Data-Rate Single-Symbol ML Decodable Distributed STBCs for Cooperative Networks: High data-rate Distributed Orthogonal Space-Time Block Codes (DOSTBCs) which achieve the single-symbol decodability and full diversity order are proposed in this paper. An upper bound of the data-rate of the DOSTBC is derived and it is approximately twice larger than that of the conventional repetition-based cooperative strategy. In order to facilitate the systematic constructions of the DOSTBCs achieving the upper bound of the data-rate, some special DOSTBCs, which have diagonal noise covariance matrices at the destination terminal, are investigated. These codes are referred to as the row-monomial DOSTBCs. An upper bound of the data-rate of the row-monomial DOSTBC is derived and it is equal to or slightly smaller than that of the DOSTBC. Lastly, the systematic construction methods of the row-monomial DOSTBCs achieving the upper bound of the data-rate are presented.<|reference_end|>
arxiv
@article{yi2006high, title={High Data-Rate Single-Symbol ML Decodable Distributed STBCs for Cooperative Networks}, author={Zhihang Yi and Il-Min Kim}, journal={arXiv preprint arXiv:cs/0609054}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609054}, primaryClass={cs.IT math.IT} }
yi2006high
arxiv-674760
cs/0609055
Coding for Additive White Noise Channels with Feedback Corrupted by Uniform Quantization or Bounded Noise
<|reference_start|>Coding for Additive White Noise Channels with Feedback Corrupted by Uniform Quantization or Bounded Noise: We present simple coding strategies, which are variants of the Schalkwijk-Kailath scheme, for communicating reliably over additive white noise channels in the presence of corrupted feedback. More specifically, we consider a framework comprising an additive white forward channel and a backward link which is used for feedback. We consider two types of corruption mechanisms in the backward link. The first is quantization noise, i.e., the encoder receives the quantized values of the past outputs of the forward channel. The quantization is uniform, memoryless and time invariant (that is, symbol-by-symbol scalar quantization), with bounded quantization error. The second corruption mechanism is an arbitrarily distributed additive bounded noise in the backward link. Here we allow symbol-by-symbol encoding at the input to the backward channel. We propose simple explicit schemes that guarantee positive information rate, in bits per channel use, with positive error exponent. If the forward channel is additive white Gaussian then our schemes achieve capacity, in the limit of diminishing amplitude of the noise components at the backward link, while guaranteeing that the probability of error converges to zero as a doubly exponential function of the block length. Furthermore, if the forward channel is additive white Gaussian and the backward link consists of an additive bounded noise channel, with signal-to-noise ratio (SNR) constrained symbol-by-symbol encoding, then our schemes are also capacity-achieving in the limit of high SNR.<|reference_end|>
arxiv
@article{martins2006coding, title={Coding for Additive White Noise Channels with Feedback Corrupted by Uniform Quantization or Bounded Noise}, author={Nuno C Martins, Tsachy Weissman}, journal={arXiv preprint arXiv:cs/0609055}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609055}, primaryClass={cs.IT math.IT} }
martins2006coding
arxiv-674761
cs/0609056
Matrix Games, Linear Programming, and Linear Approximation
<|reference_start|>Matrix Games, Linear Programming, and Linear Approximation: The following four classes of computational problems are equivalent: solving matrix games, solving linear programs, best $l^{\infty}$ linear approximation, best $l^1$ linear approximation.<|reference_end|>
arxiv
@article{vaserstein2006matrix, title={Matrix Games, Linear Programming, and Linear Approximation}, author={L. N. Vaserstein}, journal={arXiv preprint arXiv:cs/0609056}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609056}, primaryClass={cs.GT cs.AI} }
vaserstein2006matrix
arxiv-674762
cs/0609057
Concurrently Non-Malleable Zero Knowledge in the Authenticated Public-Key Model
<|reference_start|>Concurrently Non-Malleable Zero Knowledge in the Authenticated Public-Key Model: We consider a type of zero-knowledge protocols that are of interest for their practical applications within networks like the Internet: efficient zero-knowledge arguments of knowledge that remain secure against concurrent man-in-the-middle attacks. In an effort to reduce the setup assumptions required for efficient zero-knowledge arguments of knowledge that remain secure against concurrent man-in-the-middle attacks, we consider a model, which we call the Authenticated Public-Key (APK) model. The APK model seems to significantly reduce the setup assumptions made by the CRS model (as no trusted party or honest execution of a centralized algorithm are required), and can be seen as a slightly stronger variation of the Bare Public-Key (BPK) model from \cite{CGGM,MR}, and a weaker variation of the registered public-key model used in \cite{BCNP}. We then define and study man-in-the-middle attacks in the APK model. Our main result is a constant-round concurrent non-malleable zero-knowledge argument of knowledge for any polynomial-time relation (associated to a language in $\mathcal{NP}$), under the (minimal) assumption of the existence of a one-way function family. Furthermore,We show time-efficient instantiations of our protocol based on known number-theoretic assumptions. We also note a negative result with respect to further reducing the setup assumptions of our protocol to those in the (unauthenticated) BPK model, by showing that concurrently non-malleable zero-knowledge arguments of knowledge in the BPK model are only possible for trivial languages.<|reference_end|>
arxiv
@article{deng2006concurrently, title={Concurrently Non-Malleable Zero Knowledge in the Authenticated Public-Key Model}, author={Yi Deng, Giovanni Di Crescenzo and Dongdai Lin}, journal={arXiv preprint arXiv:cs/0609057}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609057}, primaryClass={cs.CR} }
deng2006concurrently
arxiv-674763
cs/0609058
The JRC-Acquis: A multilingual aligned parallel corpus with 20+ languages
<|reference_start|>The JRC-Acquis: A multilingual aligned parallel corpus with 20+ languages: We present a new, unique and freely available parallel corpus containing European Union (EU) documents of mostly legal nature. It is available in all 20 official EUanguages, with additional documents being available in the languages of the EU candidate countries. The corpus consists of almost 8,000 documents per language, with an average size of nearly 9 million words per language. Pair-wise paragraph alignment information produced by two different aligners (Vanilla and HunAlign) is available for all 190+ language pair combinations. Most texts have been manually classified according to the EUROVOC subject domains so that the collection can also be used to train and test multi-label classification algorithms and keyword-assignment software. The corpus is encoded in XML, according to the Text Encoding Initiative Guidelines. Due to the large number of parallel texts in many languages, the JRC-Acquis is particularly suitable to carry out all types of cross-language research, as well as to test and benchmark text analysis software across different languages (for instance for alignment, sentence splitting and term extraction).<|reference_end|>
arxiv
@article{steinberger2006the, title={The JRC-Acquis: A multilingual aligned parallel corpus with 20+ languages}, author={Ralf Steinberger, Bruno Pouliquen, Anna Widiger, Camelia Ignat, Tomaz Erjavec, Dan Tufis, Daniel Varga}, journal={Proceedings of the 5th International Conference on Language Resources and Evaluation (LREC'2006), pp. 2142-2147. Genoa, Italy, 24-26 May 2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609058}, primaryClass={cs.CL} }
steinberger2006the
arxiv-674764
cs/0609059
Automatic annotation of multilingual text collections with a conceptual thesaurus
<|reference_start|>Automatic annotation of multilingual text collections with a conceptual thesaurus: Automatic annotation of documents with controlled vocabulary terms (descriptors) from a conceptual thesaurus is not only useful for document indexing and retrieval. The mapping of texts onto the same thesaurus furthermore allows to establish links between similar documents. This is also a substantial requirement of the Semantic Web. This paper presents an almost language-independent system that maps documents written in different languages onto the same multilingual conceptual thesaurus, EUROVOC. Conceptual thesauri differ from Natural Language Thesauri in that they consist of relatively small controlled lists of words or phrases with a rather abstract meaning. To automatically identify which thesaurus descriptors describe the contents of a document best, we developed a statistical, associative system that is trained on texts that have previously been indexed manually. In addition to describing the large number of empirically optimised parameters of the fully functional application, we present the performance of the software according to a human evaluation by professional indexers.<|reference_end|>
arxiv
@article{pouliquen2006automatic, title={Automatic annotation of multilingual text collections with a conceptual thesaurus}, author={Bruno Pouliquen, Ralf Steinberger, Camelia Ignat}, journal={Proceedings of the Workshop 'Ontologies and Information Extraction' at the Summer School 'The Semantic Web and Language Technology - Its Potential and Practicalities' (EUROLAN'2003), pp 9-28. Bucharest, Romania, 28 July - 8 August 2003}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609059}, primaryClass={cs.CL cs.IR} }
pouliquen2006automatic
arxiv-674765
cs/0609060
Automatic Identification of Document Translations in Large Multilingual Document Collections
<|reference_start|>Automatic Identification of Document Translations in Large Multilingual Document Collections: Texts and their translations are a rich linguistic resource that can be used to train and test statistics-based Machine Translation systems and many other applications. In this paper, we present a working system that can identify translations and other very similar documents among a large number of candidates, by representing the document contents with a vector of thesaurus terms from a multilingual thesaurus, and by then measuring the semantic similarity between the vectors. Tests on different text types have shown that the system can detect translations with over 96% precision in a large search space of 820 documents or more. The system was tuned to ignore language-specific similarities and to give similar documents in a second language the same similarity score as equivalent documents in the same language. The application can also be used to detect cross-lingual document plagiarism.<|reference_end|>
arxiv
@article{pouliquen2006automatic, title={Automatic Identification of Document Translations in Large Multilingual Document Collections}, author={Bruno Pouliquen, Ralf Steinberger, Camelia Ignat}, journal={Proceedings of the International Conference 'Recent Advances in Natural Language Processing' (RANLP'2003), pp. 401-408. Borovets, Bulgaria, 10 - 12 September 2003}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609060}, primaryClass={cs.CL cs.IR} }
pouliquen2006automatic
arxiv-674766
cs/0609061
Cross-lingual keyword assignment
<|reference_start|>Cross-lingual keyword assignment: This paper presents a language-independent approach to controlled vocabulary keyword assignment using the EUROVOC thesaurus. Due to the multilingual nature of EUROVOC, the keywords for a document written in one language can be displayed in all eleven official European Union languages. The mapping of documents written in different languages to the same multilingual thesaurus furthermore allows cross-language document comparison. The assignment of the controlled vocabulary thesaurus descriptors is achieved by applying a statistical method that uses a collection of manually indexed documents to identify, for each thesaurus descriptor, a large number of lemmas that are statistically associated to the descriptor. These associated words are then used during the assignment procedure to identify a ranked list of those EUROVOC terms that are most likely to be good keywords for a given document. The paper also describes the challenges of this task and discusses the achieved results of the fully functional prototype.<|reference_end|>
arxiv
@article{steinberger2006cross-lingual, title={Cross-lingual keyword assignment}, author={Ralf Steinberger (European Commission - Joint Research Centre)}, journal={Proceedings of the XVII Conference of the Spanish Society for Natural Language Processing (SEPLN-2001). Procesamiento del Lenguaje Natural, Revista No. 27, pp. 273-280. Jaen, Spain, 12-14 September 2001. ISSN 1135-5948}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609061}, primaryClass={cs.CL cs.IR} }
steinberger2006cross-lingual
arxiv-674767
cs/0609062
Nominal Logic Programming
<|reference_start|>Nominal Logic Programming: Nominal logic is an extension of first-order logic which provides a simple foundation for formalizing and reasoning about abstract syntax modulo consistent renaming of bound names (that is, alpha-equivalence). This article investigates logic programming based on nominal logic. We describe some typical nominal logic programs, and develop the model-theoretic, proof-theoretic, and operational semantics of such programs. Besides being of interest for ensuring the correct behavior of implementations, these results provide a rigorous foundation for techniques for analysis and reasoning about nominal logic programs, as we illustrate via examples.<|reference_end|>
arxiv
@article{cheney2006nominal, title={Nominal Logic Programming}, author={James Cheney, Christian Urban}, journal={ACM Transactions on Programming Languages and Systems 30(5):26, August 2008}, year={2006}, doi={10.1145/1387673.1387675}, archivePrefix={arXiv}, eprint={cs/0609062}, primaryClass={cs.PL cs.LO} }
cheney2006nominal
arxiv-674768
cs/0609063
Extending an Information Extraction tool set to Central and Eastern European languages
<|reference_start|>Extending an Information Extraction tool set to Central and Eastern European languages: In a highly multilingual and multicultural environment such as in the European Commission with soon over twenty official languages, there is an urgent need for text analysis tools that use minimal linguistic knowledge so that they can be adapted to many languages without much human effort. We are presenting two such Information Extraction tools that have already been adapted to various Western and Eastern European languages: one for the recognition of date expressions in text, and one for the detection of geographical place names and the visualisation of the results in geographical maps. An evaluation of the performance has produced very satisfying results.<|reference_end|>
arxiv
@article{ignat2006extending, title={Extending an Information Extraction tool set to Central and Eastern European languages}, author={Camelia Ignat, Bruno Pouliquen, Antonio Ribeiro, Ralf Steinberger (European Commission - Joint Research Centre)}, journal={Proceedings of the International Workshop 'Information Extraction for Slavonic and other Central and Eastern European Languages' (IESL-2003), held at RANLP-2003, pp. 33-39. Borovets, Bulgaria, 8 - 9 September 2003}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609063}, primaryClass={cs.CL cs.IR} }
ignat2006extending
arxiv-674769
cs/0609064
Exploiting multilingual nomenclatures and language-independent text features as an interlingua for cross-lingual text analysis applications
<|reference_start|>Exploiting multilingual nomenclatures and language-independent text features as an interlingua for cross-lingual text analysis applications: We are proposing a simple, but efficient basic approach for a number of multilingual and cross-lingual language technology applications that are not limited to the usual two or three languages, but that can be applied with relatively little effort to larger sets of languages. The approach consists of using existing multilingual linguistic resources such as thesauri, nomenclatures and gazetteers, as well as exploiting the existence of additional more or less language-independent text items such as dates, currency expressions, numbers, names and cognates. Mapping texts onto the multilingual resources and identifying word token links between texts in different languages are basic ingredients for applications such as cross-lingual document similarity calculation, multilingual clustering and categorisation, cross-lingual document retrieval, and tools to provide cross-lingual information access.<|reference_end|>
arxiv
@article{steinberger2006exploiting, title={Exploiting multilingual nomenclatures and language-independent text features as an interlingua for cross-lingual text analysis applications}, author={Ralf Steinberger, Bruno Pouliquen, Camelia Ignat (European Commission - Joint Research Centre)}, journal={Information Society 2004 (IS-2004) - Proceedings B of the 7th International Multiconference - Language Technologies, pages 2-12. Ljubljana, Slovenia, 13-14 October 2004}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609064}, primaryClass={cs.CL cs.IR} }
steinberger2006exploiting
arxiv-674770
cs/0609065
Geocoding multilingual texts: Recognition, disambiguation and visualisation
<|reference_start|>Geocoding multilingual texts: Recognition, disambiguation and visualisation: We are presenting a method to recognise geographical references in free text. Our tool must work on various languages with a minimum of language-dependent resources, except a gazetteer. The main difficulty is to disambiguate these place names by distinguishing places from persons and by selecting the most likely place out of a list of homographic place names world-wide. The system uses a number of language-independent clues and heuristics to disambiguate place name homographs. The final aim is to index texts with the countries and cities they mention and to automatically visualise this information on geographical maps using various tools.<|reference_end|>
arxiv
@article{pouliquen2006geocoding, title={Geocoding multilingual texts: Recognition, disambiguation and visualisation}, author={Bruno Pouliquen, Marco Kimler, Ralf Steinberger, Camelia Ignat, Tamara Oellinger, Ken Blackler, Flavio Fuart, Wajdi Zaghouani, Anna Widiger, Ann-Charlotte Forslund, Clive Best (European Commission - Joint Research Centre)}, journal={Proceedings of the 5th International Conference on Language Resources and Evaluation (LREC-2006), pp. 53-58. Genoa, Italy, 24-26 May 2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609065}, primaryClass={cs.CL cs.IR} }
pouliquen2006geocoding
arxiv-674771
cs/0609066
Building and displaying name relations using automatic unsupervised analysis of newspaper articles
<|reference_start|>Building and displaying name relations using automatic unsupervised analysis of newspaper articles: We present a tool that, from automatically recognised names, tries to infer inter-person relations in order to present associated people on maps. Based on an in-house Named Entity Recognition tool, applied on clusters of an average of 15,000 news articles per day, in 15 different languages, we build a knowledge base that allows extracting statistical co-occurrences of persons and visualising them on a per-person page or in various graphs.<|reference_end|>
arxiv
@article{pouliquen2006building, title={Building and displaying name relations using automatic unsupervised analysis of newspaper articles}, author={Bruno Pouliquen, Ralf Steinberger, Camelia Ignat, Tamara Oellinger (European Commission - Joint Research Centre)}, journal={Proceedings of the 8th International Conference on the Statistical Analysis of Textual Data (JADT-2006). Besancon, 19-21 April 2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609066}, primaryClass={cs.CL cs.IR} }
pouliquen2006building
arxiv-674772
cs/0609067
A tool set for the quick and efficient exploration of large document collections
<|reference_start|>A tool set for the quick and efficient exploration of large document collections: We are presenting a set of multilingual text analysis tools that can help analysts in any field to explore large document collections quickly in order to determine whether the documents contain information of interest, and to find the relevant text passages. The automatic tool, which currently exists as a fully functional prototype, is expected to be particularly useful when users repeatedly have to sieve through large collections of documents such as those downloaded automatically from the internet. The proposed system takes a whole document collection as input. It first carries out some automatic analysis tasks (named entity recognition, geo-coding, clustering, term extraction), annotates the texts with the generated meta-information and stores the meta-information in a database. The system then generates a zoomable and hyperlinked geographic map enhanced with information on entities and terms found. When the system is used on a regular basis, it builds up a historical database that contains information on which names have been mentioned together with which other names or places, and users can query this database to retrieve information extracted in the past.<|reference_end|>
arxiv
@article{ignat2006a, title={A tool set for the quick and efficient exploration of large document collections}, author={Camelia Ignat, Bruno Pouliquen, Ralf Steinberger, Tomaz Erjavec (European Commission - Joint Research Centre)}, journal={Proceedings of the Symposium on Safeguards and Nuclear Material Management. 27th Annual Meeting of the European SAfeguards Research and Development Association (ESARDA-2005). London, UK, 10-12 May 2005}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609067}, primaryClass={cs.CL cs.IR} }
ignat2006a
arxiv-674773
cs/0609068
The heterogeneity of inter-contact time distributions: its importance for routing in delay tolerant networks
<|reference_start|>The heterogeneity of inter-contact time distributions: its importance for routing in delay tolerant networks: Prior work on routing in delay tolerant networks (DTNs) has commonly made the assumption that each pair of nodes shares the same inter-contact time distribution as every other pair. The main argument in this paper is that researchers should also be looking at heterogeneous inter-contact time distributions. We demonstrate the presence of such heterogeneity in the often-used Dartmouth Wi-Fi data set. We also show that DTN routing can benefit from knowing these distributions. We first introduce a new stochastic model focusing on the inter-contact time distributions between all pairs of nodes, which we validate on real connectivity patterns. We then analytically derive the mean delivery time for a bundle of information traversing the network for simple single copy routing schemes. The purpose is to examine the theoretic impact of heterogeneous inter-contact time distributions. Finally, we show that we can exploit this user diversity to improve routing performance.<|reference_end|>
arxiv
@article{conan2006the, title={The heterogeneity of inter-contact time distributions: its importance for routing in delay tolerant networks}, author={Vania Conan, Jeremie Leguay, Timur Friedman}, journal={arXiv preprint arXiv:cs/0609068}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609068}, primaryClass={cs.NI} }
conan2006the
arxiv-674774
cs/0609069
Coverage and Connectivity in Three-Dimensional Networks
<|reference_start|>Coverage and Connectivity in Three-Dimensional Networks: Most wireless terrestrial networks are designed based on the assumption that the nodes are deployed on a two-dimensional (2D) plane. However, this 2D assumption is not valid in underwater, atmospheric, or space communications. In fact, recent interest in underwater acoustic ad hoc and sensor networks hints at the need to understand how to design networks in 3D. Unfortunately, the design of 3D networks is surprisingly more difficult than the design of 2D networks. For example, proofs of Kelvin's conjecture and Kepler's conjecture required centuries of research to achieve breakthroughs, whereas their 2D counterparts are trivial to solve. In this paper, we consider the coverage and connectivity issues of 3D networks, where the goal is to find a node placement strategy with 100% sensing coverage of a 3D space, while minimizing the number of nodes required for surveillance. Our results indicate that the use of the Voronoi tessellation of 3D space to create truncated octahedral cells results in the best strategy. In this truncated octahedron placement strategy, the transmission range must be at least 1.7889 times the sensing range in order to maintain connectivity among nodes. If the transmission range is between 1.4142 and 1.7889 times the sensing range, then a hexagonal prism placement strategy or a rhombic dodecahedron placement strategy should be used. Although the required number of nodes in the hexagonal prism and the rhombic dodecahedron placement strategies is the same, this number is 43.25% higher than the number of nodes required by the truncated octahedron placement strategy. We verify by simulation that our placement strategies indeed guarantee ubiquitous coverage. We believe that our approach and our results presented in this paper could be used for extending the processes of 2D network design to 3D networks.<|reference_end|>
arxiv
@article{alam2006coverage, title={Coverage and Connectivity in Three-Dimensional Networks}, author={S. M. Nazrul Alam and Zygmunt J. Haas}, journal={arXiv preprint arXiv:cs/0609069}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609069}, primaryClass={cs.NI} }
alam2006coverage
arxiv-674775
cs/0609070
Exploring Computer Science Concepts with a Ready-made Computer Game Framework
<|reference_start|>Exploring Computer Science Concepts with a Ready-made Computer Game Framework: Leveraging the prevailing interest in computer games among college students, both for entertainment and as a possible career path, is a major reason for the increasing prevalence of computer game design courses in computer science curricula. Because implementing a computer game requires strong programming skills, game design courses are most often restricted to more advanced computer science students. This paper reports on a ready-made game design and experimentation framework, implemented in Java, that makes game programming more widely accessible. This framework, called Labyrinth, enables students at all programming skill levels to participate in computer game design. We describe the architecture of the framework, and discuss programming projects suitable for a wide variety of computer science courses, from capstone to non-major.<|reference_end|>
arxiv
@article{distasio2006exploring, title={Exploring Computer Science Concepts with a Ready-made Computer Game Framework}, author={Joseph Distasio and Thomas P. Way}, journal={arXiv preprint arXiv:cs/0609070}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609070}, primaryClass={cs.OH} }
distasio2006exploring
arxiv-674776
cs/0609071
A kernel method for canonical correlation analysis
<|reference_start|>A kernel method for canonical correlation analysis: Canonical correlation analysis is a technique to extract common features from a pair of multivariate data. In complex situations, however, it does not extract useful features because of its linearity. On the other hand, kernel method used in support vector machine is an efficient approach to improve such a linear method. In this paper, we investigate the effectiveness of applying kernel method to canonical correlation analysis.<|reference_end|>
arxiv
@article{akaho2006a, title={A kernel method for canonical correlation analysis}, author={Shotaro Akaho}, journal={arXiv preprint arXiv:cs/0609071}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609071}, primaryClass={cs.LG cs.CV} }
akaho2006a
arxiv-674777
cs/0609072
The Connectivity of Boolean Satisfiability: Computational and Structural Dichotomies
<|reference_start|>The Connectivity of Boolean Satisfiability: Computational and Structural Dichotomies: Boolean satisfiability problems are an important benchmark for questions about complexity, algorithms, heuristics and threshold phenomena. Recent work on heuristics, and the satisfiability threshold has centered around the structure and connectivity of the solution space. Motivated by this work, we study structural and connectivity-related properties of the space of solutions of Boolean satisfiability problems and establish various dichotomies in Schaefer's framework. On the structural side, we obtain dichotomies for the kinds of subgraphs of the hypercube that can be induced by the solutions of Boolean formulas, as well as for the diameter of the connected components of the solution space. On the computational side, we establish dichotomy theorems for the complexity of the connectivity and st-connectivity questions for the graph of solutions of Boolean formulas. Our results assert that the intractable side of the computational dichotomies is PSPACE-complete, while the tractable side - which includes but is not limited to all problems with polynomial time algorithms for satisfiability - is in P for the st-connectivity question, and in coNP for the connectivity question. The diameter of components can be exponential for the PSPACE-complete cases, whereas in all other cases it is linear; thus, small diameter and tractability of the connectivity problems are remarkably aligned. The crux of our results is an expressibility theorem showing that in the tractable cases, the subgraphs induced by the solution space possess certain good structural properties, whereas in the intractable cases, the subgraphs can be arbitrary.<|reference_end|>
arxiv
@article{gopalan2006the, title={The Connectivity of Boolean Satisfiability: Computational and Structural Dichotomies}, author={Parikshit Gopalan, Phokion G. Kolaitis, Elitza Maneva, Christos H. Papadimitriou}, journal={Extended abstract in Proceedings of ICALP 2006, pp 346-357}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609072}, primaryClass={cs.CC} }
gopalan2006the
arxiv-674778
cs/0609073
Optimal power allocation for downlink cooperative cellular networks
<|reference_start|>Optimal power allocation for downlink cooperative cellular networks: This paper has been withdrawn by the author<|reference_end|>
arxiv
@article{pischella2006optimal, title={Optimal power allocation for downlink cooperative cellular networks}, author={Mylene Pischella, Jean-Claude Belfiore}, journal={arXiv preprint arXiv:cs/0609073}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609073}, primaryClass={cs.IT math.IT} }
pischella2006optimal
arxiv-674779
cs/0609074
A Non-anchored Unified Naming System for Ad Hoc Computing Environments
<|reference_start|>A Non-anchored Unified Naming System for Ad Hoc Computing Environments: A ubiquitous computing environment consists of many resources that need to be identified by users and applications. Users and developers require some way to identify resources by human readable names. In addition, ubiquitous computing environments impose additional requirements such as the ability to work well with ad hoc situations and the provision of names that depend on context. The Non-anchored Unified Naming (NUN) system was designed to satisfy these requirements. It is based on relative naming among resources and provides the ability to name arbitrary types of resources. By having resources themselves take part in naming, resources are able to able contribute their specialized knowledge into the name resolution process, making context-dependent mapping of names to resources possible. The ease of which new resource types can be added makes it simple to incorporate new types of contextual information within names. In this paper, we describe the naming system and evaluate its use.<|reference_end|>
arxiv
@article{chung2006a, title={A Non-anchored Unified Naming System for Ad Hoc Computing Environments}, author={Yoo Chul Chung and Dongman Lee}, journal={arXiv preprint arXiv:cs/0609074}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609074}, primaryClass={cs.DC cs.AR cs.NI} }
chung2006a
arxiv-674780
cs/0609075
On factorization and solution of multidimensional linear partial differential equations
<|reference_start|>On factorization and solution of multidimensional linear partial differential equations: We describe a method of obtaining closed-form complete solutions of certain second-order linear partial differential equations with more than two independent variables. This method generalizes the classical method of Laplace transformations of second-order hyperbolic equations in the plane and is based on an idea given by Ulisse Dini in 1902.<|reference_end|>
arxiv
@article{tsarev2006on, title={On factorization and solution of multidimensional linear partial differential equations}, author={S.P. Tsarev}, journal={arXiv preprint arXiv:cs/0609075}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609075}, primaryClass={cs.SC nlin.SI} }
tsarev2006on
arxiv-674781
cs/0609076
Asymptotic Spectral Distribution of Crosscorrelation Matrix in Asynchronous CDMA
<|reference_start|>Asymptotic Spectral Distribution of Crosscorrelation Matrix in Asynchronous CDMA: Asymptotic spectral distribution (ASD) of the crosscorrelation matrix is investigated for a random spreading short/long-code asynchronous direct sequence-code division multiple access (DS-CDMA) system. The discrete-time decision statistics are obtained as the output samples of a bank of symbol matched filters of all users. The crosscorrelation matrix is studied when the number of symbols transmitted by each user tends to infinity. Two levels of asynchronism are considered. One is symbol-asynchronous but chip-synchronous, and the other is chip-asynchronous. The existence of a nonrandom ASD is proved by moment convergence theorem, where the focus is on the derivation of asymptotic eigenvalue moments (AEM) of the crosscorrelation matrix. A combinatorics approach based on noncrossing partition of set partition theory is adopted for AEM computation. The spectral efficiency and the minimum mean-square-error (MMSE) achievable by a linear receiver of asynchronous CDMA are plotted by AEM using a numerical method.<|reference_end|>
arxiv
@article{hwang2006asymptotic, title={Asymptotic Spectral Distribution of Crosscorrelation Matrix in Asynchronous CDMA}, author={Chien-Hwa Hwang}, journal={arXiv preprint arXiv:cs/0609076}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609076}, primaryClass={cs.IT math.IT} }
hwang2006asymptotic
arxiv-674782
cs/0609077
On Intentional Attacks and Protections in Complex Communication Networks
<|reference_start|>On Intentional Attacks and Protections in Complex Communication Networks: Being motivated by recent developments in the theory of complex networks, we examine the robustness of communication networks under intentional attack that takes down network nodes in a decreasing order of their nodal degrees. In this paper, we study two different effects that have been largely missed in the existing results: (i) some communication networks, like Internet, are too large for anyone to have global information of their topologies, which makes the accurate intentional attack practically impossible; and (ii) most attacks in communication networks are propagated from one node to its neighborhood node(s), utilizing local network-topology information only. We show that incomplete global information has different impacts to the intentional attack in different circumstances, while local information-based attacks can be actually highly efficient. Such insights would be helpful for the future developments of efficient network attack/protection schemes.<|reference_end|>
arxiv
@article{xiao2006on, title={On Intentional Attacks and Protections in Complex Communication Networks}, author={Shi Xiao and Gaoxi Xiao}, journal={arXiv preprint arXiv:cs/0609077}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609077}, primaryClass={cs.NI} }
xiao2006on
arxiv-674783
cs/0609078
A Continuum Theory for Unstructured Mesh Generation in Two Dimensions
<|reference_start|>A Continuum Theory for Unstructured Mesh Generation in Two Dimensions: A continuum description of unstructured meshes in two dimensions, both for planar and curved surface domains, is proposed. The meshes described are those which, in the limit of an increasingly finer mesh (smaller cells), and away from irregular vertices, have ideally-shaped cells (squares or equilateral triangles), and can therefore be completely described by two local properties: local cell size and local edge directions. The connection between the two properties is derived by defining a Riemannian manifold whose geodesics trace the edges of the mesh. A function $\phi$, proportional to the logarithm of the cell size, is shown to obey the Poisson equation, with localized charges corresponding to irregular vertices. The problem of finding a suitable manifold for a given domain is thus shown to exactly reduce to an Inverse Poisson problem on $\phi$, of finding a distribution of localized charges adhering to the conditions derived for boundary alignment. Possible applications to mesh generation are discussed.<|reference_end|>
arxiv
@article{bunin2006a, title={A Continuum Theory for Unstructured Mesh Generation in Two Dimensions}, author={Guy Bunin}, journal={arXiv preprint arXiv:cs/0609078}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609078}, primaryClass={cs.CG} }
bunin2006a
arxiv-674784
cs/0609079
Modern Statistics by Kriging
<|reference_start|>Modern Statistics by Kriging: We present statistics (S-statistics) based only on random variable (not random value) with a mean squared error of mean estimation as a concept of error.<|reference_end|>
arxiv
@article{suslo2006modern, title={Modern Statistics by Kriging}, author={Tomasz Suslo}, journal={arXiv preprint arXiv:cs/0609079}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609079}, primaryClass={cs.NA cs.CE} }
suslo2006modern
arxiv-674785
cs/0609080
Solution of a Problem of Barendregt on Sensible lambda-Theories
<|reference_start|>Solution of a Problem of Barendregt on Sensible lambda-Theories: <i>H</i> is the theory extending &#946;-conversion by identifying all closed unsolvables. <i>H</i>&#969; is the closure of this theory under the &#969;-rule (and &#946;-conversion). A long-standing conjecture of H. Barendregt states that the provable equations of <i>H</i>&#969; form &#928;<sub>1</sub><sup>1</sup>-complete set. Here we prove that conjecture.<|reference_end|>
arxiv
@article{intrigila2006solution, title={Solution of a Problem of Barendregt on Sensible lambda-Theories}, author={Benedetto Intrigila and Richard Statman}, journal={Logical Methods in Computer Science, Volume 2, Issue 4 (October 18, 2006) lmcs:2241}, year={2006}, doi={10.2168/LMCS-2(4:5)2006}, archivePrefix={arXiv}, eprint={cs/0609080}, primaryClass={cs.LO} }
intrigila2006solution
arxiv-674786
cs/0609081
Recurrence relations and fast algorithms
<|reference_start|>Recurrence relations and fast algorithms: We construct fast algorithms for evaluating transforms associated with families of functions which satisfy recurrence relations. These include algorithms both for computing the coefficients in linear combinations of the functions, given the values of these linear combinations at certain points, and, vice versa, for evaluating such linear combinations at those points, given the coefficients in the linear combinations; such procedures are also known as analysis and synthesis of series of certain special functions. The algorithms of the present paper are efficient in the sense that their computational costs are proportional to n (ln n) (ln(1/epsilon))^3, where n is the amount of input and output data, and epsilon is the precision of computations. Stated somewhat more precisely, we find a positive real number C such that, for any positive integer n > 10, the algorithms require at most C n (ln n) (ln(1/epsilon))^3 floating-point operations and words of memory to evaluate at n appropriately chosen points any linear combination of n special functions, given the coefficients in the linear combination, where epsilon is the precision of computations.<|reference_end|>
arxiv
@article{tygert2006recurrence, title={Recurrence relations and fast algorithms}, author={Mark Tygert}, journal={Recurrence relations and fast algorithms, Applied and Computational Harmonic Analysis, 28 (1): 121-128, 2010}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609081}, primaryClass={cs.CE cs.NA} }
tygert2006recurrence
arxiv-674787
cs/0609082
Classifying extrema using intervals
<|reference_start|>Classifying extrema using intervals: We present a straightforward and verified method of deciding whether the n-dimensional point x (n>=1), such that \nabla f(x)=0, is the local minimizer, maximizer or just a saddle point of a real-valued function f. The method scales linearly with dimensionality of the problem and never produces false results.<|reference_end|>
arxiv
@article{gutowski2006classifying, title={Classifying extrema using intervals}, author={Marek W. Gutowski}, journal={arXiv preprint arXiv:cs/0609082}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609082}, primaryClass={cs.MS cs.CC cs.NA} }
gutowski2006classifying
arxiv-674788
cs/0609083
k-Colorability of P5-free graphs
<|reference_start|>k-Colorability of P5-free graphs: A polynomial time algorithm that determines for a fixed integer k whether or not a P5-free graph can be k-colored is presented in this paper. If such a coloring exists, the algorithm will produce a valid k-coloring.<|reference_end|>
arxiv
@article{hoang2006k-colorability, title={k-Colorability of P5-free graphs}, author={C. T. Hoang, J. Sawada, X. Shu}, journal={arXiv preprint arXiv:cs/0609083}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609083}, primaryClass={cs.DM cs.DS} }
hoang2006k-colorability
arxiv-674789
cs/0609084
Non-photorealistic image rendering with a labyrinthine tiling
<|reference_start|>Non-photorealistic image rendering with a labyrinthine tiling: The paper describes a new image processing for a non-photorealistic rendering. The algorithm is based on a random generation of gray tones and competing statistical requirements. The gray tone value of each pixel in the starting image is replaced selecting among randomly generated tone values, according to the statistics of nearest-neighbor and next-nearest-neighbor pixels. Two competing conditions for replacing the tone values - one position on the local mean value the other on the local variance - produce a peculiar pattern on the image. This pattern has a labyrinthine tiling aspect. For certain subjects, the pattern enhances the look of the image.<|reference_end|>
arxiv
@article{sparavigna2006non-photorealistic, title={Non-photorealistic image rendering with a labyrinthine tiling}, author={A. Sparavigna, B. Montrucchio}, journal={arXiv preprint arXiv:cs/0609084}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609084}, primaryClass={cs.GR} }
sparavigna2006non-photorealistic
arxiv-674790
cs/0609085
Improved Approximate String Matching and Regular Expression Matching on Ziv-Lempel Compressed Texts
<|reference_start|>Improved Approximate String Matching and Regular Expression Matching on Ziv-Lempel Compressed Texts: We study the approximate string matching and regular expression matching problem for the case when the text to be searched is compressed with the Ziv-Lempel adaptive dictionary compression schemes. We present a time-space trade-off that leads to algorithms improving the previously known complexities for both problems. In particular, we significantly improve the space bounds, which in practical applications are likely to be a bottleneck.<|reference_end|>
arxiv
@article{bille2006improved, title={Improved Approximate String Matching and Regular Expression Matching on Ziv-Lempel Compressed Texts}, author={Philip Bille, Rolf Fagerberg, Inge Li Goertz}, journal={arXiv preprint arXiv:cs/0609085}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609085}, primaryClass={cs.DS} }
bille2006improved
arxiv-674791
cs/0609086
About the Capacity of Flat and Self-Organized Ad Hoc and Hybrid Networks
<|reference_start|>About the Capacity of Flat and Self-Organized Ad Hoc and Hybrid Networks: Ad hoc networking specific challenges foster a strong research effort on efficient protocols design. Routing protocols based on a self-organized structure have been studied principally for the robustness and the scalability they provide. On the other hand, self-organization schemes may decrease the network capacity since they concentrate the traffic on privileged links. This paper presents four models for evaluating the capacity of a routing schemes on 802.11 like networks. Our approach consists in modeling the radio resource sharing principles of 802.11 like MAC protocols as a set of linear constraints. We have implemented two models of fairness. The first one assumes that nodes have a fair access to the channel, while the second one assumes that on the radio links. We then develop a pessimistic and an optimistic scenarii of spatial re-utilization of the medium, yielding a lower bound and an upper bound on the network capacity for each fairness case. Our models are independent of the routing protocols and provide therefore a relevant framework for their comparison. We apply our models to a comparative analysis of the well-known shortest path base flat routing protocol OLSR against two main self-organized structure approaches, VSR, and Wu & Li's protocols. This study concludes on the relevance of self-organized approaches from the network capacity point of view.<|reference_end|>
arxiv
@article{rivano2006about, title={About the Capacity of Flat and Self-Organized Ad Hoc and Hybrid Networks}, author={Herv'e Rivano (INRIA Sophia Antipolis), Fabrice Theoleyre (INRIA Rh^one-Alpes), Fabrice Valois (INRIA Rh^one-Alpes)}, journal={arXiv preprint arXiv:cs/0609086}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609086}, primaryClass={cs.NI} }
rivano2006about
arxiv-674792
cs/0609087
A comparative analysis of the geometrical surface texture of a real and virtual model of a tooth flank of a cylindrical gear
<|reference_start|>A comparative analysis of the geometrical surface texture of a real and virtual model of a tooth flank of a cylindrical gear: The paper presents the methodology of modelling tooth flanks of cylindrical gears in the Cad environment. The modelling consists in a computer simulation of gear generation. A model of tooth flanks is an envelope curve of a family of envelopes that originate from the rolling motion of a solid tool model in relation to a solid model of the cylindrical gear. The surface stereometry and topography of the tooth flanks, hobbed and chiselled by Fellows method, are compared to their numerical models. Metrological measurements of the real gears were carried out using a coordinated measuring machine and a two - and a three-dimensional profilometer. A computer simulation of the gear generation was performed in the Mechanical Desktop environment.<|reference_end|>
arxiv
@article{michalski2006a, title={A comparative analysis of the geometrical surface texture of a real and virtual model of a tooth flank of a cylindrical gear}, author={Jacek Michalski, Leszek Skoczylas}, journal={arXiv preprint arXiv:cs/0609087}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609087}, primaryClass={cs.CE} }
michalski2006a
arxiv-674793
cs/0609088
Deriving the Normalized Min-Sum Algorithm from Cooperative Optimization
<|reference_start|>Deriving the Normalized Min-Sum Algorithm from Cooperative Optimization: The normalized min-sum algorithm can achieve near-optimal performance at decoding LDPC codes. However, it is a critical question to understand the mathematical principle underlying the algorithm. Traditionally, people thought that the normalized min-sum algorithm is a good approximation to the sum-product algorithm, the best known algorithm for decoding LDPC codes and Turbo codes. This paper offers an alternative approach to understand the normalized min-sum algorithm. The algorithm is derived directly from cooperative optimization, a newly discovered general method for global/combinatorial optimization. This approach provides us another theoretical basis for the algorithm and offers new insights on its power and limitation. It also gives us a general framework for designing new decoding algorithms.<|reference_end|>
arxiv
@article{huang2006deriving, title={Deriving the Normalized Min-Sum Algorithm from Cooperative Optimization}, author={Xiaofei Huang}, journal={arXiv preprint arXiv:cs/0609088}, year={2006}, doi={10.1109/ITW2.2006.323788}, archivePrefix={arXiv}, eprint={cs/0609088}, primaryClass={cs.IT math.IT} }
huang2006deriving
arxiv-674794
cs/0609089
Fast Min-Sum Algorithms for Decoding of LDPC over GF(q)
<|reference_start|>Fast Min-Sum Algorithms for Decoding of LDPC over GF(q): In this paper, we present a fast min-sum algorithm for decoding LDPC codes over GF(q). Our algorithm is different from the one presented by David Declercq and Marc Fossorier in ISIT 05 only at the way of speeding up the horizontal scan in the min-sum algorithm. The Declercq and Fossorier's algorithm speeds up the computation by reducing the number of configurations, while our algorithm uses the dynamic programming instead. Compared with the configuration reduction algorithm, the dynamic programming one is simpler at the design stage because it has less parameters to tune. Furthermore, it does not have the performance degradation problem caused by the configuration reduction because it searches the whole configuration space efficiently through dynamic programming. Both algorithms have the same level of complexity and use simple operations which are suitable for hardware implementations.<|reference_end|>
arxiv
@article{huang2006fast, title={Fast Min-Sum Algorithms for Decoding of LDPC over GF(q)}, author={Xiaofei Huang, Suquan Ding, Zhixing Yang, Youshou Wu}, journal={arXiv preprint arXiv:cs/0609089}, year={2006}, doi={10.1109/ITW2.2006.323764}, archivePrefix={arXiv}, eprint={cs/0609089}, primaryClass={cs.IT math.IT} }
huang2006fast
arxiv-674795
cs/0609090
Single-Scan Min-Sum Algorithms for Fast Decoding of LDPC Codes
<|reference_start|>Single-Scan Min-Sum Algorithms for Fast Decoding of LDPC Codes: Many implementations for decoding LDPC codes are based on the (normalized/offset) min-sum algorithm due to its satisfactory performance and simplicity in operations. Usually, each iteration of the min-sum algorithm contains two scans, the horizontal scan and the vertical scan. This paper presents a single-scan version of the min-sum algorithm to speed up the decoding process. It can also reduce memory usage or wiring because it only needs the addressing from check nodes to variable nodes while the original min-sum algorithm requires that addressing plus the addressing from variable nodes to check nodes. To cut down memory usage or wiring further, another version of the single-scan min-sum algorithm is presented where the messages of the algorithm are represented by single bit values instead of using fixed point ones. The software implementation has shown that the single-scan min-sum algorithm is more than twice as fast as the original min-sum algorithm.<|reference_end|>
arxiv
@article{huang2006single-scan, title={Single-Scan Min-Sum Algorithms for Fast Decoding of LDPC Codes}, author={Xiaofei Huang}, journal={arXiv preprint arXiv:cs/0609090}, year={2006}, doi={10.1109/ITW2.2006.323774}, archivePrefix={arXiv}, eprint={cs/0609090}, primaryClass={cs.IT math.IT} }
huang2006single-scan
arxiv-674796
cs/0609091
Using shifted conjugacy in braid-based cryptography
<|reference_start|>Using shifted conjugacy in braid-based cryptography: Conjugacy is not the only possible primitive for designing braid-based protocols. To illustrate this principle, we describe a Fiat--Shamir-style authentication protocol that be can be implemented using any binary operation that satisfies the left self-distributive law. Conjugation is an example of such an operation, but there are other examples, in particular the shifted conjugation on Artin's braid group B\_oo, and the finite Laver tables. In both cases, the underlying structures have a high combinatorial complexity, and they lead to difficult problems.<|reference_end|>
arxiv
@article{dehornoy2006using, title={Using shifted conjugacy in braid-based cryptography}, author={Patrick Dehornoy (LMNO)}, journal={arXiv preprint arXiv:cs/0609091}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609091}, primaryClass={cs.CR} }
dehornoy2006using
arxiv-674797
cs/0609092
Analysis of Equality Relationships for Imperative Programs
<|reference_start|>Analysis of Equality Relationships for Imperative Programs: In this article, we discuss a flow--sensitive analysis of equality relationships for imperative programs. We describe its semantic domains, general purpose operations over abstract computational states (term evaluation and identification, semantic completion, widening operator, etc.) and semantic transformers corresponding to program constructs. We summarize our experiences from the last few years concerning this analysis and give attention to applications of analysis of automatically generated code. Among other illustrating examples, we consider a program for which the analysis diverges without a widening operator and results of analyzing residual programs produced by some automatic partial evaluator. An example of analysis of a program generated by this evaluator is given.<|reference_end|>
arxiv
@article{emelyanov2006analysis, title={Analysis of Equality Relationships for Imperative Programs}, author={P. Emelyanov}, journal={arXiv preprint arXiv:cs/0609092}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609092}, primaryClass={cs.PL} }
emelyanov2006analysis
arxiv-674798
cs/0609093
PAC Learning Mixtures of Axis-Aligned Gaussians with No Separation Assumption
<|reference_start|>PAC Learning Mixtures of Axis-Aligned Gaussians with No Separation Assumption: We propose and analyze a new vantage point for the learning of mixtures of Gaussians: namely, the PAC-style model of learning probability distributions introduced by Kearns et al. Here the task is to construct a hypothesis mixture of Gaussians that is statistically indistinguishable from the actual mixture generating the data; specifically, the KL-divergence should be at most epsilon. In this scenario, we give a poly(n/epsilon)-time algorithm that learns the class of mixtures of any constant number of axis-aligned Gaussians in n-dimensional Euclidean space. Our algorithm makes no assumptions about the separation between the means of the Gaussians, nor does it have any dependence on the minimum mixing weight. This is in contrast to learning results known in the ``clustering'' model, where such assumptions are unavoidable. Our algorithm relies on the method of moments, and a subalgorithm developed in previous work by the authors (FOCS 2005) for a discrete mixture-learning problem.<|reference_end|>
arxiv
@article{feldman2006pac, title={PAC Learning Mixtures of Axis-Aligned Gaussians with No Separation Assumption}, author={Jon Feldman, Ryan O'Donnell, Rocco A. Servedio}, journal={Proceedings of 19th Annual Conference on Learning Theory (COLT), Pittsburgh, PA, pp. 20--34, 2006}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609093}, primaryClass={cs.LG} }
feldman2006pac
arxiv-674799
cs/0609094
An Improved Sphere-Packing Bound Targeting Codes of Short to Moderate Block Lengths and Applications
<|reference_start|>An Improved Sphere-Packing Bound Targeting Codes of Short to Moderate Block Lengths and Applications: This paper derives an improved sphere-packing (ISP) bound targeting codes of short to moderate block lengths. We first review the 1967 sphere-packing (SP67) bound for discrete memoryless channels, and a recent improvement by Valembois and Fossorier. These concepts are used for the derivation of a new lower bound on the decoding error probability (referred to as the ISP bound) which is uniformly tighter than the SP67 bound and its recent improved version. Under a mild condition, the ISP bound is applicable to general memoryless channels, and some of its applications are exemplified. Its tightness is studied by comparing it with bounds on the ML decoding error probability. It is exemplified that the ISP bound suggests an interesting alternative to the 1959 sphere-packing (SP59) bound of Shannon for the Gaussian channel, especially for digital modulations of high spectral efficiency.<|reference_end|>
arxiv
@article{wiechman2006an, title={An Improved Sphere-Packing Bound Targeting Codes of Short to Moderate Block Lengths and Applications}, author={Gil Wiechman and Igal Sason}, journal={arXiv preprint arXiv:cs/0609094}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609094}, primaryClass={cs.IT math.IT} }
wiechman2006an
arxiv-674800
cs/0609095
Free Choice Petri Nets without frozen tokens and Bipolar Synchronization Systems
<|reference_start|>Free Choice Petri Nets without frozen tokens and Bipolar Synchronization Systems: Bipolar synchronization systems (BP-systems) constitute a class of coloured Petri nets, well suited for modeling the control flow of discrete, dynamical systems. Every BP-system has an underlying ordinary Petri net, which is a T-system. Moreover, it has a second ordinary net attached, which is a free-choice system. We prove that a BP-system is live and safe if the T-system and the free-choice system are live and safe and if the free-choice system has no frozen tokens. This result is the converse of a theorem of Genrich and Thiagarajan and proves an elder conjecture. The proof compares the different Petri nets by Petri net morphisms and makes use of the classical theory of free-choice systems<|reference_end|>
arxiv
@article{wehler2006free, title={Free Choice Petri Nets without frozen tokens and Bipolar Synchronization Systems}, author={Joachim Wehler}, journal={arXiv preprint arXiv:cs/0609095}, year={2006}, archivePrefix={arXiv}, eprint={cs/0609095}, primaryClass={cs.LO} }
wehler2006free