corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-672701 | cs/0503030 | A Suffix Tree Approach to Email Filtering | <|reference_start|>A Suffix Tree Approach to Email Filtering: We present an approach to email filtering based on the suffix tree data structure. A method for the scoring of emails using the suffix tree is developed and a number of scoring and score normalisation functions are tested. Our results show that the character level representation of emails and classes facilitated by the suffix tree can significantly improve classification accuracy when compared with the currently popular methods, such as naive Bayes. We believe the method can be extended to the classification of documents in other domains.<|reference_end|> | arxiv | @article{pampapathi2005a,
title={A Suffix Tree Approach to Email Filtering},
author={Rajesh M. Pampapathi, Boris Mirkin, Mark Levene},
journal={arXiv preprint arXiv:cs/0503030},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503030},
primaryClass={cs.AI cs.CL}
} | pampapathi2005a |
arxiv-672702 | cs/0503031 | On the Scalability of Cooperative Time Synchronization in Pulse-Connected Networks | <|reference_start|>On the Scalability of Cooperative Time Synchronization in Pulse-Connected Networks: The problem of time synchronization in dense wireless networks is considered. Well established synchronization techniques suffer from an inherent scalability problem in that synchronization errors grow with an increasing number of hops across the network. In this work, a model for communication in wireless networks is first developed, and then the model is used to define a new time synchronization mechanism. A salient feature of the proposed method is that, in the regime of asymptotically dense networks, it can average out all random errors and maintain global synchronization in the sense that all nodes in the multi-hop network can see identical timing signals. This is irrespective of the distance separating any two nodes.<|reference_end|> | arxiv | @article{hu2005on,
title={On the Scalability of Cooperative Time Synchronization in
Pulse-Connected Networks},
author={An-swol Hu and Sergio D. Servetto (Cornell University)},
journal={IEEE Trans. Inform. Theory, 52(6):2725-2748, 2006.},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503031},
primaryClass={cs.IT math.IT nlin.AO}
} | hu2005on |
arxiv-672703 | cs/0503032 | Complexity and Approximation of Fixing Numerical Attributes in Databases Under Integrity Constraints | <|reference_start|>Complexity and Approximation of Fixing Numerical Attributes in Databases Under Integrity Constraints: Consistent query answering is the problem of computing the answers from a database that are consistent with respect to certain integrity constraints that the database as a whole may fail to satisfy. Those answers are characterized as those that are invariant under minimal forms of restoring the consistency of the database. In this context, we study the problem of repairing databases by fixing integer numerical values at the attribute level with respect to denial and aggregation constraints. We introduce a quantitative definition of database fix, and investigate the complexity of several decision and optimization problems, including DFP, i.e. the existence of fixes within a given distance from the original instance, and CQA, i.e. deciding consistency of answers to aggregate conjunctive queries under different semantics. We provide sharp complexity bounds, identify relevant tractable cases; and introduce approximation algorithms for some of those that are intractable. More specifically, we obtain results like undecidability of existence of fixes for aggregation constraints; MAXSNP-hardness of DFP, but a good approximation algorithm for a relevant special case; and intractability but good approximation for CQA for aggregate queries for one database atom denials (plus built-ins).<|reference_end|> | arxiv | @article{bertossi2005complexity,
title={Complexity and Approximation of Fixing Numerical Attributes in Databases
Under Integrity Constraints},
author={L. Bertossi (1), L. Bravo (1), E. Franconi (2), A. Lopatenko (2 and 3)
((1) Carleton University (2) Free University of Bozen--Bolzano, (3)
University of Manchester)},
journal={arXiv preprint arXiv:cs/0503032},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503032},
primaryClass={cs.DB cs.CC}
} | bertossi2005complexity |
arxiv-672704 | cs/0503033 | An Introduction to the Summarization of Evolving Events: Linear and Non-linear Evolution | <|reference_start|>An Introduction to the Summarization of Evolving Events: Linear and Non-linear Evolution: This paper examines the summarization of events that evolve through time. It discusses different types of evolution taking into account the time in which the incidents of an event are happening and the different sources reporting on the specific event. It proposes an approach for multi-document summarization which employs ``messages'' for representing the incidents of an event and cross-document relations that hold between messages according to certain conditions. The paper also outlines the current version of the summarization system we are implementing to realize this approach.<|reference_end|> | arxiv | @article{afantenos2005an,
title={An Introduction to the Summarization of Evolving Events: Linear and
Non-linear Evolution},
author={Stergos D. Afantenos, Konstantina Liontou, Maria Salapata, Vangelis
Karkaletsis},
journal={Edited by Bernadete Sharp, Proceedings of the 2nd International
Workshop on Natural Language Understanding and Cognitive Science, NLUCS 2005.
Maiami, Florida, USA: INSTICC Press. pp 91-99.},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503033},
primaryClass={cs.CL cs.IR}
} | afantenos2005an |
arxiv-672705 | cs/0503034 | Comment on "Some non-conventional ideas about algorithmic complexity" | <|reference_start|>Comment on "Some non-conventional ideas about algorithmic complexity": We comment on a recent paper by D'Abramo [Chaos, Solitons & Fractals, 25 (2005) 29], focusing on the author's statement that an algorithm can produce a list of strings containing at least one string whose algorithmic complexity is greater than that of the entire list. We show that this statement, although perplexing, is not as paradoxical as it seems when the definition of algorithmic complexity is applied correctly.<|reference_end|> | arxiv | @article{poulin2005comment,
title={Comment on "Some non-conventional ideas about algorithmic complexity"},
author={David Poulin, Hugo Touchette},
journal={arXiv preprint arXiv:cs/0503034},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503034},
primaryClass={cs.CC}
} | poulin2005comment |
arxiv-672706 | cs/0503035 | The egalitarian sharing rule in provision of public projects | <|reference_start|>The egalitarian sharing rule in provision of public projects: In this note we consider a society that partitions itself into disjoint jurisdictions, each choosing a location of its public project and a taxation scheme to finance it. The set of public project is multi-dimensional, and their costs could vary from jurisdiction to jurisdiction. We impose two principles, egalitarianism, that requires the equalization of the total cost for all agents in the same jurisdiction, and efficiency, that implies the minimization of the aggregate total cost within jurisdiction. We show that these two principles always yield a core-stable partition but a Nash stable partition may fail to exist.<|reference_end|> | arxiv | @article{bogomolnaia2005the,
title={The egalitarian sharing rule in provision of public projects},
author={Anna Bogomolnaia, Michel Le Breton, Alexei Savvateev, Shlomo Weber},
journal={arXiv preprint arXiv:cs/0503035},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503035},
primaryClass={cs.GT}
} | bogomolnaia2005the |
arxiv-672707 | cs/0503036 | Timed Analysis of Security Protocols | <|reference_start|>Timed Analysis of Security Protocols: We propose a method for engineering security protocols that are aware of timing aspects. We study a simplified version of the well-known Needham Schroeder protocol and the complete Yahalom protocol, where timing information allows the study of different attack scenarios. We model check the protocols using UPPAAL. Further, a taxonomy is obtained by studying and categorising protocols from the well known Clark Jacob library and the Security Protocol Open Repository (SPORE) library. Finally, we present some new challenges and threats that arise when considering time in the analysis, by providing a novel protocol that uses time challenges and exposing a timing attack over an implementation of an existing security protocol.<|reference_end|> | arxiv | @article{corin2005timed,
title={Timed Analysis of Security Protocols},
author={R. Corin, S. Etalle, P.H. Hartel and A. Mader},
journal={arXiv preprint arXiv:cs/0503036},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503036},
primaryClass={cs.CR}
} | corin2005timed |
arxiv-672708 | cs/0503037 | Mining Top-k Approximate Frequent Patterns | <|reference_start|>Mining Top-k Approximate Frequent Patterns: Frequent pattern (itemset) mining in transactional databases is one of the most well-studied problems in data mining. One obstacle that limits the practical usage of frequent pattern mining is the extremely large number of patterns generated. Such a large size of the output collection makes it difficult for users to understand and use in practice. Even restricting the output to the border of the frequent itemset collection does not help much in alleviating the problem. In this paper we address the issue of overwhelmingly large output size by introducing and studying the following problem: mining top-k approximate frequent patterns. The union of the power sets of these k sets should satisfy the following conditions: (1) including itemsets with larger support as many as possible and (2) including itemsets with smaller support as less as possible. An integrated objective function is designed to combine these two objectives. Consequently, we derive the upper bounds on objective function and present an approximate branch-and-bound method for finding the feasible solution. We give empirical evidence showing that our formulation and approximation methods work well in practice.<|reference_end|> | arxiv | @article{he2005mining,
title={Mining Top-k Approximate Frequent Patterns},
author={Zengyou He},
journal={arXiv preprint arXiv:cs/0503037},
year={2005},
number={TR-2005-0315},
archivePrefix={arXiv},
eprint={cs/0503037},
primaryClass={cs.DB cs.AI}
} | he2005mining |
arxiv-672709 | cs/0503038 | On a Kronecker products sum distance bounds | <|reference_start|>On a Kronecker products sum distance bounds: A binary linear error correcting codes represented by two code families Kronecker products sum are considered. The dimension and distance of new code is investigated. Upper and lower bounds of distance are obtained. Some examples are given. It is shown that some classic constructions are the private cases of considered one. The subclass of codes with equal lower and upper distance bounds is allocated.<|reference_end|> | arxiv | @article{grigoryants2005on,
title={On a Kronecker products sum distance bounds},
author={Armen Grigoryants},
journal={arXiv preprint arXiv:cs/0503038},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503038},
primaryClass={cs.IT math.IT}
} | grigoryants2005on |
arxiv-672710 | cs/0503039 | Notes for Miscellaneous Lectures | <|reference_start|>Notes for Miscellaneous Lectures: Here I share a few notes I used in various course lectures, talks, etc. Some may be just calculations that in the textbooks are more complicated, scattered, or less specific; others may be simple observations I found useful or curious.<|reference_end|> | arxiv | @article{levin2005notes,
title={Notes for Miscellaneous Lectures},
author={Leonid A. Levin},
journal={Earlier version in: Pillars of Computer Science, LNCS, Springer,
2008},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503039},
primaryClass={cs.DM}
} | levin2005notes |
arxiv-672711 | cs/0503040 | Uplink Throughput in a Single-Macrocell/Single-Microcell CDMA System, with Application to Data Access Points | <|reference_start|>Uplink Throughput in a Single-Macrocell/Single-Microcell CDMA System, with Application to Data Access Points: This paper studies a two-tier CDMA system in which the microcell base is converted into a data access point (DAP), i.e., a limited-range base station that provides high-speed access to one user at a time. The microcell (or DAP) user operates on the same frequency as the macrocell users and has the same chip rate. However, it adapts its spreading factor, and thus its data rate, in accordance with interference conditions. By contrast, the macrocell serves multiple simultaneous data users, each with the same fixed rate. The achieveable throughput for individual microcell users is examined and a simple, accurate approximation for its probability distribution is presented. Computations for average throughputs, both per-user and total, are also presented. The numerical results highlight the impact of a desensitivity parameter used in the base-selection process.<|reference_end|> | arxiv | @article{kishore2005uplink,
title={Uplink Throughput in a Single-Macrocell/Single-Microcell CDMA System,
with Application to Data Access Points},
author={Shalinee Kishore, Stuart C. Schwartz, Larry J. Greenstein, H. Vincent
Poor},
journal={arXiv preprint arXiv:cs/0503040},
year={2005},
doi={10.1109/TWC.2005.852144},
archivePrefix={arXiv},
eprint={cs/0503040},
primaryClass={cs.IT math.IT}
} | kishore2005uplink |
arxiv-672712 | cs/0503041 | Soft Handoff and Uplink Capacity in a Two-Tier CDMA System | <|reference_start|>Soft Handoff and Uplink Capacity in a Two-Tier CDMA System: This paper examines the effect of soft handoff on the uplink user capacity of a CDMA system consisting of a single macrocell in which a single hotspot microcell is embedded. The users of these two base stations operate over the same frequency band. In the soft handoff scenario studied here, both macrocell and microcell base stations serve each system user and the two received copies of a desired user's signal are summed using maximal ratio combining. Exact and approximate analytical methods are developed to compute uplink user capacity. Simulation results demonstrate a 20% increase in user capacity compared to hard handoff. In addition, simple, approximate methods are presented for estimating soft handoff capacity and are shown to be quite accurate.<|reference_end|> | arxiv | @article{kishore2005soft,
title={Soft Handoff and Uplink Capacity in a Two-Tier CDMA System},
author={Shalinee Kishore, Larry J. Greenstein, H. Vincent Poor, Stuart C.
Schwartz},
journal={arXiv preprint arXiv:cs/0503041},
year={2005},
doi={10.1109/TWC.2005.850319},
archivePrefix={arXiv},
eprint={cs/0503041},
primaryClass={cs.IT math.IT}
} | kishore2005soft |
arxiv-672713 | cs/0503042 | Uplink User Capacity in a CDMA System with Hotspot Microcells: Effects of Finite Transmit Power and Dispersion | <|reference_start|>Uplink User Capacity in a CDMA System with Hotspot Microcells: Effects of Finite Transmit Power and Dispersion: This paper examines the uplink user capacity in a two-tier code division multiple access (CDMA) system with hotspot microcells when user terminal power is limited and the wireless channel is finitely-dispersive. A finitely-dispersive channel causes variable fading of the signal power at the output of the RAKE receiver. First, a two-cell system composed of one macrocell and one embedded microcell is studied and analytical methods are developed to estimate the user capacity as a function of a dimensionless parameter that depends on the transmit power constraint and cell radius. Next, novel analytical methods are developed to study the effect of variable fading, both with and without transmit power constraints. Finally, the analytical methods are extended to estimate uplink user capacity for multicell CDMA systems, composed of multiple macrocells and multiple embedded microcells. In all cases, the analysis-based estimates are compared with and confirmed by simulation results.<|reference_end|> | arxiv | @article{kishore2005uplink,
title={Uplink User Capacity in a CDMA System with Hotspot Microcells: Effects
of Finite Transmit Power and Dispersion},
author={Shalinee Kishore, Larry J. Greenstein, H. Vincent Poor, Stuart C.
Schwartz},
journal={arXiv preprint arXiv:cs/0503042},
year={2005},
doi={10.1109/TWC.2006.1611065},
archivePrefix={arXiv},
eprint={cs/0503042},
primaryClass={cs.IT math.IT}
} | kishore2005uplink |
arxiv-672714 | cs/0503043 | Complexity Issues in Finding Succinct Solutions of PSPACE-Complete Problems | <|reference_start|>Complexity Issues in Finding Succinct Solutions of PSPACE-Complete Problems: We study the problem of deciding whether some PSPACE-complete problems have models of bounded size. Contrary to problems in NP, models of PSPACE-complete problems may be exponentially large. However, such models may take polynomial space in a succinct representation. For example, the models of a QBF are explicitely represented by and-or trees (which are always of exponential size) but can be succinctely represented by circuits (which can be polynomial or exponential). We investigate the complexity of deciding the existence of such succinct models when a bound on size is given.<|reference_end|> | arxiv | @article{liberatore2005complexity,
title={Complexity Issues in Finding Succinct Solutions of PSPACE-Complete
Problems},
author={Paolo Liberatore},
journal={arXiv preprint arXiv:cs/0503043},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503043},
primaryClass={cs.AI cs.CC cs.LO}
} | liberatore2005complexity |
arxiv-672715 | cs/0503044 | Generating Hard Satisfiable Formulas by Hiding Solutions Deceptively | <|reference_start|>Generating Hard Satisfiable Formulas by Hiding Solutions Deceptively: To test incomplete search algorithms for constraint satisfaction problems such as 3-SAT, we need a source of hard, but satisfiable, benchmark instances. A simple way to do this is to choose a random truth assignment A, and then choose clauses randomly from among those satisfied by A. However, this method tends to produce easy problems, since the majority of literals point toward the ``hidden'' assignment A. Last year, Achlioptas, Jia and Moore proposed a problem generator that cancels this effect by hiding both A and its complement. While the resulting formulas appear to be just as hard for DPLL algorithms as random 3-SAT formulas with no hidden assignment, they can be solved by WalkSAT in only polynomial time. Here we propose a new method to cancel the attraction to A, by choosing a clause with t > 0 literals satisfied by A with probability proportional to q^t for some q < 1. By varying q, we can generate formulas whose variables have no bias, i.e., which are equally likely to be true or false; we can even cause the formula to ``deceptively'' point away from A. We present theoretical and experimental results suggesting that these formulas are exponentially hard both for DPLL algorithms and for incomplete algorithms such as WalkSAT.<|reference_end|> | arxiv | @article{jia2005generating,
title={Generating Hard Satisfiable Formulas by Hiding Solutions Deceptively},
author={Haixia Jia and Cristopher Moore and Doug Strain},
journal={arXiv preprint arXiv:cs/0503044},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503044},
primaryClass={cs.AI cond-mat.other cond-mat.stat-mech}
} | jia2005generating |
arxiv-672716 | cs/0503045 | Contextual Constraint Modeling in Grid Application Workflows | <|reference_start|>Contextual Constraint Modeling in Grid Application Workflows: This paper introduces a new mechanism for specifying constraints in distributed workflows. By introducing constraints in a contextual form, it is shown how different people and groups within collaborative communities can cooperatively constrain workflows. A comparison with existing state-of-the-art workflow systems is made. These ideas are explored in practice with an illustrative example from High Energy Physics.<|reference_end|> | arxiv | @article{graham2005contextual,
title={Contextual Constraint Modeling in Grid Application Workflows},
author={G. E. Graham (1), M. Anzar Afaq (1), David Evans (1), Gerald Guglielmo
(1), Eric Wicklund (1), Peter Love (2) ((1) Fermi National Accelerator
Laboratory, (2) University of Lancaster)},
journal={arXiv preprint arXiv:cs/0503045},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503045},
primaryClass={cs.DC}
} | graham2005contextual |
arxiv-672717 | cs/0503046 | Hiding Satisfying Assignments: Two are Better than One | <|reference_start|>Hiding Satisfying Assignments: Two are Better than One: The evaluation of incomplete satisfiability solvers depends critically on the availability of hard satisfiable instances. A plausible source of such instances consists of random k-SAT formulas whose clauses are chosen uniformly from among all clauses satisfying some randomly chosen truth assignment A. Unfortunately, instances generated in this manner tend to be relatively easy and can be solved efficiently by practical heuristics. Roughly speaking, as the formula's density increases, for a number of different algorithms, A acts as a stronger and stronger attractor. Motivated by recent results on the geometry of the space of satisfying truth assignments of random k-SAT and NAE-k-SAT formulas, we introduce a simple twist on this basic model, which appears to dramatically increase its hardness. Namely, in addition to forbidding the clauses violated by the hidden assignment A, we also forbid the clauses violated by its complement, so that both A and complement of A are satisfying. It appears that under this "symmetrization'' the effects of the two attractors largely cancel out, making it much harder for algorithms to find any truth assignment. We give theoretical and experimental evidence supporting this assertion.<|reference_end|> | arxiv | @article{achlioptas2005hiding,
title={Hiding Satisfying Assignments: Two are Better than One},
author={Dimitris Achlioptas and Haixia Jia and Cristopher Moore},
journal={arXiv preprint arXiv:cs/0503046},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503046},
primaryClass={cs.AI cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | achlioptas2005hiding |
arxiv-672718 | cs/0503047 | On Multiflows in Random Unit-Disk Graphs, and the Capacity of Some Wireless Networks | <|reference_start|>On Multiflows in Random Unit-Disk Graphs, and the Capacity of Some Wireless Networks: We consider the capacity problem for wireless networks. Networks are modeled as random unit-disk graphs, and the capacity problem is formulated as one of finding the maximum value of a multicommodity flow. In this paper, we develop a proof technique based on which we are able to obtain a tight characterization of the solution to the linear program associated with the multiflow problem, to within constants independent of network size. We also use this proof method to analyze network capacity for a variety of transmitter/receiver architectures, for which we obtain some conclusive results. These results contain as a special case (and strengthen) those of Gupta and Kumar for random networks, for which a new derivation is provided using only elementary counting and discrete probability tools.<|reference_end|> | arxiv | @article{peraki2005on,
title={On Multiflows in Random Unit-Disk Graphs, and the Capacity of Some
Wireless Networks},
author={Christina Peraki, Sergio D. Servetto (Cornell University)},
journal={arXiv preprint arXiv:cs/0503047},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503047},
primaryClass={cs.IT math.IT}
} | peraki2005on |
arxiv-672719 | cs/0503048 | Is entanglement necessary to have unconditional security in quantum bit commitment ? | <|reference_start|>Is entanglement necessary to have unconditional security in quantum bit commitment ?: A simple un-entanglement based quantum bit commitment scheme is presented. Although commitment is unconditionally secure but concealment is not.<|reference_end|> | arxiv | @article{mitra2005is,
title={Is entanglement necessary to have unconditional security in quantum bit
commitment ?},
author={Arindam Mitra},
journal={arXiv preprint arXiv:cs/0503048},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503048},
primaryClass={cs.CR}
} | mitra2005is |
arxiv-672720 | cs/0503049 | Enforcing and Defying Associativity, Commutativity, Totality, and Strong Noninvertibility for One-Way Functions in Complexity Theory | <|reference_start|>Enforcing and Defying Associativity, Commutativity, Totality, and Strong Noninvertibility for One-Way Functions in Complexity Theory: Rabi and Sherman [RS97,RS93] proved that the hardness of factoring is a sufficient condition for there to exist one-way functions (i.e., p-time computable, honest, p-time noninvertible functions; this paper is in the worst-case model, not the average-case model) that are total, commutative, and associative but not strongly noninvertible. In this paper we improve the sufficient condition to ``P does not equal NP.'' More generally, in this paper we completely characterize which types of one-way functions stand or fall together with (plain) one-way functions--equivalently, stand or fall together with P not equaling NP. We look at the four attributes used in Rabi and Sherman's seminal work on algebraic properties of one-way functions (see [RS97,RS93]) and subsequent papers--strongness (of noninvertibility), totality, commutativity, and associativity--and for each attribute, we allow it to be required to hold, required to fail, or ``don't care.'' In this categorization there are 3^4 = 81 potential types of one-way functions. We prove that each of these 81 feature-laden types stand or fall together with the existence of (plain) one-way functions.<|reference_end|> | arxiv | @article{hemaspaandra2005enforcing,
title={Enforcing and Defying Associativity, Commutativity, Totality, and Strong
Noninvertibility for One-Way Functions in Complexity Theory},
author={Lane A. Hemaspaandra, Joerg Rothe, Amitabh Saxena},
journal={arXiv preprint arXiv:cs/0503049},
year={2005},
number={URCS-TR-2005-854},
archivePrefix={arXiv},
eprint={cs/0503049},
primaryClass={cs.CC}
} | hemaspaandra2005enforcing |
arxiv-672721 | cs/0503050 | Systematic Method for Path-Complete White Box Testing | <|reference_start|>Systematic Method for Path-Complete White Box Testing: A systematic, language-independent method of finding a minimal set of paths covering the code of a sequential program is proposed for application in White Box testing. Execution of all paths from the set ensures also statement coverage. Execution fault marks problematic areas of the code. The method starts from a UML activity diagram of a program. The diagram is transformed into a directed graph: graph's nodes substitute decision and action points; graph's directed edges substitute action arrows. The number of independent paths equals easy-to-compute cyclomatic complexity of the graph. Association of a vector to each path creates a path vector space. Independence of the paths is equivalent to linear independence of the vectors. It is sufficient to test any base of the path space to complete the procedure. An effective algorithm for choosing the base paths is presented.<|reference_end|> | arxiv | @article{makaruk2005systematic,
title={Systematic Method for Path-Complete White Box Testing},
author={Hanna Makaruk, Robert Owczarek, Nikita Sakhanenko},
journal={arXiv preprint arXiv:cs/0503050},
year={2005},
number={LA-UR-05-1850},
archivePrefix={arXiv},
eprint={cs/0503050},
primaryClass={cs.SE}
} | makaruk2005systematic |
arxiv-672722 | cs/0503051 | Construction of Small Worlds in the Physical Topology of Wireless Networks | <|reference_start|>Construction of Small Worlds in the Physical Topology of Wireless Networks: The concept of small worlds is introduced into the physical topology of wireless networks in this work. A. Helmy provided two con- struction schemes of small worlds for the wireless networks, link rewiring and link addition, but he mainly focused on the virtual topology. Based on the broadcasting nature of the radio transmission, we propose a con- struction scheme of small worlds for the physical topology of Multiple- Input Multiple-Output (MIMO) wireless networks. Besides the topology- related topics, we also evaluate the reduction of the power required by a request.<|reference_end|> | arxiv | @article{zhou2005construction,
title={Construction of Small Worlds in the Physical Topology of Wireless
Networks},
author={Fengfeng Zhou, Guoliang Chen, and Yinlong Xu},
journal={arXiv preprint arXiv:cs/0503051},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503051},
primaryClass={cs.NI}
} | zhou2005construction |
arxiv-672723 | cs/0503052 | Zeta-Dimension | <|reference_start|>Zeta-Dimension: The zeta-dimension of a set A of positive integers is the infimum s such that the sum of the reciprocals of the s-th powers of the elements of A is finite. Zeta-dimension serves as a fractal dimension on the positive integers that extends naturally usefully to discrete lattices such as the set of all integer lattice points in d-dimensional space. This paper reviews the origins of zeta-dimension (which date to the eighteenth and nineteenth centuries) and develops its basic theory, with particular attention to its relationship with algorithmic information theory. New results presented include extended connections between zeta-dimension and classical fractal dimensions, a gale characterization of zeta-dimension, and a theorem on the zeta-dimensions of pointwise sums and products of sets of positive integers.<|reference_end|> | arxiv | @article{doty2005zeta-dimension,
title={Zeta-Dimension},
author={David Doty, Xiaoyang Gu, Jack H. Lutz, Elvira Mayordomo, Philippe
Moser},
journal={arXiv preprint arXiv:cs/0503052},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503052},
primaryClass={cs.CC cs.IT math.IT}
} | doty2005zeta-dimension |
arxiv-672724 | cs/0503053 | A hybrid MLP-PNN architecture for fast image superresolution | <|reference_start|>A hybrid MLP-PNN architecture for fast image superresolution: Image superresolution methods process an input image sequence of a scene to obtain a still image with increased resolution. Classical approaches to this problem involve complex iterative minimization procedures, typically with high computational costs. In this paper is proposed a novel algorithm for super-resolution that enables a substantial decrease in computer load. First, a probabilistic neural network architecture is used to perform a scattered-point interpolation of the image sequence data. The network kernel function is optimally determined for this problem by a multi-layer perceptron trained on synthetic data. Network parameters dependence on sequence noise level is quantitatively analyzed. This super-sampled image is spatially filtered to correct finite pixel size effects, to yield the final high-resolution estimate. Results on a real outdoor sequence are presented, showing the quality of the proposed method.<|reference_end|> | arxiv | @article{miravet2005a,
title={A hybrid MLP-PNN architecture for fast image superresolution},
author={Carlos Miravet and Francisco B. Rodriguez (EPS-UAM, Madrid, Spain)},
journal={Lect. Notes Comput. Sc. 2714 (2003) 401-408},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503053},
primaryClass={cs.CV cs.MM}
} | miravet2005a |
arxiv-672725 | cs/0503054 | Analytic Definition of Curves and Surfaces by Parabolic Blending | <|reference_start|>Analytic Definition of Curves and Surfaces by Parabolic Blending: A procedure for interpolating between specified points of a curve or surface is described. The method guarantees slope continuity at all junctions. A surface panel divided into p x q contiguous patches is completely specified by the coordinates of (p+1) x (q+1) points. Each individual patch, however, depends parametrically on the coordinates of 16 points, allowing shape flexibility and global conformity.<|reference_end|> | arxiv | @article{overhauser2005analytic,
title={Analytic Definition of Curves and Surfaces by Parabolic Blending},
author={A. W. Overhauser},
journal={arXiv preprint arXiv:cs/0503054},
year={2005},
number={SL 68-40},
archivePrefix={arXiv},
eprint={cs/0503054},
primaryClass={cs.GR}
} | overhauser2005analytic |
arxiv-672726 | cs/0503055 | Optimality in Goal-Dependent Analysis of Sharing | <|reference_start|>Optimality in Goal-Dependent Analysis of Sharing: We face the problems of correctness, optimality and precision for the static analysis of logic programs, using the theory of abstract interpretation. We propose a framework with a denotational, goal-dependent semantics equipped with two unification operators for forward unification (calling a procedure) and backward unification (returning from a procedure). The latter is implemented through a matching operation. Our proposal clarifies and unifies many different frameworks and ideas on static analysis of logic programming in a single, formal setting. On the abstract side, we focus on the domain Sharing by Jacobs and Langen and provide the best correct approximation of all the primitive semantic operators, namely, projection, renaming, forward and backward unification. We show that the abstract unification operators are strictly more precise than those in the literature defined over the same abstract domain. In some cases, our operators are more precise than those developed for more complex domains involving linearity and freeness. To appear in Theory and Practice of Logic Programming (TPLP)<|reference_end|> | arxiv | @article{amato2005optimality,
title={Optimality in Goal-Dependent Analysis of Sharing},
author={Gianluca Amato, Francesca Scozzari},
journal={Theory and Practice of Logic Programming, volume 9, issue 05, pp.
617-689, 2009},
year={2005},
doi={10.1017/S1471068409990111},
archivePrefix={arXiv},
eprint={cs/0503055},
primaryClass={cs.PL cs.LO}
} | amato2005optimality |
arxiv-672727 | cs/0503056 | Semi-automatic vectorization of linear networks on rasterized cartographic maps | <|reference_start|>Semi-automatic vectorization of linear networks on rasterized cartographic maps: A system for semi-automatic vectorization of linear networks (roads, rivers, etc.) on rasterized cartographic maps is presented. In this system, human intervention is limited to a graphic, interactive selection of the color attributes of the information to be obtained. Using this data, the system performs a preliminary extraction of the linear network, which is subsequently completed, refined and vectorized by means of an automatic procedure. Results on maps of different sources and scales are included. ----- Se presenta un sistema semi-automatico de vectorizacion de redes de objetos lineales (carreteras, rios, etc.) en mapas cartograficos digitalizados. En este sistema, la intervencion humana queda reducida a la seleccion grafica interactiva de los atributos de color de la informacion a obtener. Con estos datos, el sistema realiza una extraccion preliminar de la red lineal, que se completa, refina y vectoriza mediante un procedimiento automatico. Se presentan resultados de la aplicacion del sistema sobre imagenes digitalizadas de mapas de distinta procedencia y escala.<|reference_end|> | arxiv | @article{miravet2005semi-automatic,
title={Semi-automatic vectorization of linear networks on rasterized
cartographic maps},
author={Carlos Miravet, Enrique Coiras and Javier Santamaria (SENER, Madrid,
Spain)},
journal={Revista de Teledeteccion, 10 (1998)},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503056},
primaryClass={cs.CV cs.MM}
} | miravet2005semi-automatic |
arxiv-672728 | cs/0503057 | Exact and Approximation Algorithms for DNA Tag Set Design | <|reference_start|>Exact and Approximation Algorithms for DNA Tag Set Design: In this paper we propose new solution methods for designing tag sets for use in universal DNA arrays. First, we give integer linear programming formulations for two previous formalizations of the tag set design problem, and show that these formulations can be solved to optimality for instance sizes of practical interest by using general purpose optimization packages. Second, we note the benefits of periodic tags, and establish an interesting connection between the tag design problem and the problem of packing the maximum number of vertex-disjoint directed cycles in a given graph. We show that combining a simple greedy cycle packing algorithm with a previously proposed alphabetic tree search strategy yields an increase of over 40% in the number of tags compared to previous methods.<|reference_end|> | arxiv | @article{mandoiu2005exact,
title={Exact and Approximation Algorithms for DNA Tag Set Design},
author={Ion I. Mandoiu and Dragos Trinca},
journal={arXiv preprint arXiv:cs/0503057},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503057},
primaryClass={cs.DS}
} | mandoiu2005exact |
arxiv-672729 | cs/0503058 | On the Stopping Distance and the Stopping Redundancy of Codes | <|reference_start|>On the Stopping Distance and the Stopping Redundancy of Codes: It is now well known that the performance of a linear code $C$ under iterative decoding on a binary erasure channel (and other channels) is determined by the size of the smallest stopping set in the Tanner graph for $C$. Several recent papers refer to this parameter as the \emph{stopping distance} $s$ of $C$. This is somewhat of a misnomer since the size of the smallest stopping set in the Tanner graph for $C$ depends on the corresponding choice of a parity-check matrix. It is easy to see that $s \le d$, where $d$ is the minimum Hamming distance of $C$, and we show that it is always possible to choose a parity-check matrix for $C$ (with sufficiently many dependent rows) such that $s = d$. We thus introduce a new parameter, termed the \emph{stopping redundancy} of $C$, defined as the minimum number of rows in a parity-check matrix $H$ for $C$ such that the corresponding stopping distance $s(H)$ attains its largest possible value, namely $s(H) = d$. We then derive general bounds on the stopping redundancy of linear codes. We also examine several simple ways of constructing codes from other codes, and study the effect of these constructions on the stopping redundancy. Specifically, for the family of binary Reed-Muller codes (of all orders), we prove that their stopping redundancy is at most a constant times their conventional redundancy. We show that the stopping redundancies of the binary and ternary extended Golay codes are at most 35 and 22, respectively. Finally, we provide upper and lower bounds on the stopping redundancy of MDS codes.<|reference_end|> | arxiv | @article{schwartz2005on,
title={On the Stopping Distance and the Stopping Redundancy of Codes},
author={Moshe Schwartz and Alexander Vardy},
journal={arXiv preprint arXiv:cs/0503058},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503058},
primaryClass={cs.IT cs.DM math.IT}
} | schwartz2005on |
arxiv-672730 | cs/0503059 | Les repr\'esentations g\'en\'etiques d'objets : simples analogies ou mod\`eles pertinents ? Le point de vue de l' "\'evolutique"<br>–––<br>Genetic representations of objects : simple analogies or efficient models ? The "evolutic" point of view | <|reference_start|>Les repr\'esentations g\'en\'etiques d'objets : simples analogies ou mod\`eles pertinents ? Le point de vue de l' "\'evolutique"<br>–––<br>Genetic representations of objects : simple analogies or efficient models ? The "evolutic" point of view: Depuis une trentaine d'ann\'{e}es, les ing\'{e}nieurs utilisent couramment des analogies avec l'\'{e}volution naturelle pour optimiser des dispositifs techniques. Le plus souvent, ces m\'{e}thodes "g\'{e}n\'{e}tiques" ou "\'{e}volutionnaires" sont consid\'{e}r\'{e}es uniquement du point de vue pratique, comme des m\'{e}thodes d'optimisation performantes, qu'on peut utiliser \`{a} la place d'autres m\'{e}thodes (gradients, simplexes, ...). Dans cet article, nous essayons de montrer que les sciences et les techniques, mais aussi les organisations humaines, et g\'{e}n\'{e}ralement tous les syst\`{e}mes complexes, ob\'{e}issent \`{a} des lois d'\'{e}volution dont la g\'{e}n\'{e}tique est un bon mod\`{e}le repr\'{e}sentatif, m\^{e}me si g\^{e}nes et chromosomes sont "virtuels" : ainsi loin d'\^{e}tre seulement un outil ponctuel d'aide \`{a} la synth\`{e}se de solutions technologiques, la repr\'{e}sentation g\'{e}n\'{e}tique est-elle un mod\`{e}le dynamique global de l'\'{e}volution du monde fa\c{c}onn\'{e} par l'agitation humaine.––––For thirty years, engineers commonly use analogies with natural evolution to optimize technical devices. More often that not, these "genetic" or "evolutionary" methods are only view as efficient tools, which could replace other optimization techniques (gradient methods, simplex, ...). In this paper, we try to show that sciences, techniques, human organizations, and more generally all complex systems, obey to evolution rules, whose the genetic is a good representative model, even if genes and chromosomes are "virtual". Thus, the genetic representation is not only a specific tool helping for the design of technological solutions, but also a global and dynamic model for the action of the human agitation on our world.<|reference_end|> | arxiv | @article{krähenbühl2005les,
title={Les repr\'{e}sentations g\'{e}n\'{e}tiques d'objets : simples analogies
ou mod\`{e}les pertinents ? Le point de vue de l'
"\'{e}volutique".<br>–––<br>Genetic representations of
objects : simple analogies or efficient models ? The "evolutic" point of view},
author={Laurent Kr"ahenb"uhl (CEGELY)},
journal={arXiv preprint arXiv:cs/0503059},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503059},
primaryClass={cs.AI nlin.AO}
} | krähenbühl2005les |
arxiv-672731 | cs/0503060 | Multi-Dimensional Hash Chains and Application to Micropayment Schemes | <|reference_start|>Multi-Dimensional Hash Chains and Application to Micropayment Schemes: One-way hash chains have been used in many micropayment schemes due to their simplicity and efficiency. In this paper we introduce the notion of multi-dimensional hash chains, which is a new generalization of traditional one-way hash chains. We show that this construction has storage-computational complexity of O(logN) per chain element, which is comparable with the best result reported in recent literature. Based on multi-dimensional hash chains, we then propose two cash-like micropayment schemes, which have a number of advantages in terms of efficiency and security. We also point out some possible improvements to PayWord and similar schemes by using multi-dimensional hash chains<|reference_end|> | arxiv | @article{nguyen2005multi-dimensional,
title={Multi-Dimensional Hash Chains and Application to Micropayment Schemes},
author={Quan Son Nguyen (Hanoi University of Technology, Vietnam)},
journal={arXiv preprint arXiv:cs/0503060},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503060},
primaryClass={cs.CR}
} | nguyen2005multi-dimensional |
arxiv-672732 | cs/0503061 | Integrity Constraints in Trust Management | <|reference_start|>Integrity Constraints in Trust Management: We introduce the use, monitoring, and enforcement of integrity constraints in trust management-style authorization systems. We consider what portions of the policy state must be monitored to detect violations of integrity constraints. Then we address the fact that not all participants in a trust management system can be trusted to assist in such monitoring, and show how many integrity constraints can be monitored in a conservative manner so that trusted participants detect and report if the system enters a policy state from which evolution in unmonitored portions of the policy could lead to a constraint violation.<|reference_end|> | arxiv | @article{etalle2005integrity,
title={Integrity Constraints in Trust Management},
author={Sandro Etalle and William H. Winsborough},
journal={arXiv preprint arXiv:cs/0503061},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503061},
primaryClass={cs.CR cs.DB}
} | etalle2005integrity |
arxiv-672733 | cs/0503062 | On the Complexity of Nonrecursive XQuery and Functional Query Languages on Complex Values | <|reference_start|>On the Complexity of Nonrecursive XQuery and Functional Query Languages on Complex Values: This paper studies the complexity of evaluating functional query languages for complex values such as monad algebra and the recursion-free fragment of XQuery. We show that monad algebra with equality restricted to atomic values is complete for the class TA[2^{O(n)}, O(n)] of problems solvable in linear exponential time with a linear number of alternations. The monotone fragment of monad algebra with atomic value equality but without negation is complete for nondeterministic exponential time. For monad algebra with deep equality, we establish TA[2^{O(n)}, O(n)] lower and exponential-space upper bounds. Then we study a fragment of XQuery, Core XQuery, that seems to incorporate all the features of a query language on complex values that are traditionally deemed essential. A close connection between monad algebra on lists and Core XQuery (with ``child'' as the only axis) is exhibited, and it is shown that these languages are expressively equivalent up to representation issues. We show that Core XQuery is just as hard as monad algebra w.r.t. combined complexity, and that it is in TC0 if the query is assumed fixed.<|reference_end|> | arxiv | @article{koch2005on,
title={On the Complexity of Nonrecursive XQuery and Functional Query Languages
on Complex Values},
author={Christoph Koch},
journal={arXiv preprint arXiv:cs/0503062},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503062},
primaryClass={cs.DB cs.CC}
} | koch2005on |
arxiv-672734 | cs/0503063 | Randomly Spread CDMA: Asymptotics via Statistical Physics | <|reference_start|>Randomly Spread CDMA: Asymptotics via Statistical Physics: This paper studies randomly spread code-division multiple access (CDMA) and multiuser detection in the large-system limit using the replica method developed in statistical physics. Arbitrary input distributions and flat fading are considered. A generic multiuser detector in the form of the posterior mean estimator is applied before single-user decoding. The generic detector can be particularized to the matched filter, decorrelator, linear MMSE detector, the jointly or the individually optimal detector, and others. It is found that the detection output for each user, although in general asymptotically non-Gaussian conditioned on the transmitted symbol, converges as the number of users go to infinity to a deterministic function of a "hidden" Gaussian statistic independent of the interferers. Thus the multiuser channel can be decoupled: Each user experiences an equivalent single-user Gaussian channel, whose signal-to-noise ratio suffers a degradation due to the multiple-access interference. The uncoded error performance (e.g., symbol-error-rate) and the mutual information can then be fully characterized using the degradation factor, also known as the multiuser efficiency, which can be obtained by solving a pair of coupled fixed-point equations identified in this paper. Based on a general linear vector channel model, the results are also applicable to MIMO channels such as in multiantenna systems.<|reference_end|> | arxiv | @article{guo2005randomly,
title={Randomly Spread CDMA: Asymptotics via Statistical Physics},
author={Dongning Guo and Sergio Verdu},
journal={arXiv preprint arXiv:cs/0503063},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503063},
primaryClass={cs.IT math.IT}
} | guo2005randomly |
arxiv-672735 | cs/0503064 | Minimum-Cost Multicast over Coded Packet Networks | <|reference_start|>Minimum-Cost Multicast over Coded Packet Networks: We consider the problem of establishing minimum-cost multicast connections over coded packet networks, i.e. packet networks where the contents of outgoing packets are arbitrary, causal functions of the contents of received packets. We consider both wireline and wireless packet networks as well as both static multicast (where membership of the multicast group remains constant for the duration of the connection) and dynamic multicast (where membership of the multicast group changes in time, with nodes joining and leaving the group). For static multicast, we reduce the problem to a polynomial-time solvable optimization problem, and we present decentralized algorithms for solving it. These algorithms, when coupled with existing decentralized schemes for constructing network codes, yield a fully decentralized approach for achieving minimum-cost multicast. By contrast, establishing minimum-cost static multicast connections over routed packet networks is a very difficult problem even using centralized computation, except in the special cases of unicast and broadcast connections. For dynamic multicast, we reduce the problem to a dynamic programming problem and apply the theory of dynamic programming to suggest how it may be solved.<|reference_end|> | arxiv | @article{lun2005minimum-cost,
title={Minimum-Cost Multicast over Coded Packet Networks},
author={Desmond S. Lun, Niranjan Ratnakar, Muriel Medard, Ralf Koetter, David
R. Karger, Tracey Ho, Ebad Ahmed, Fang Zhao},
journal={IEEE Trans. Inform. Theory, vol. 52, no. 6, pp. 2608-2623, June
2006},
year={2005},
doi={10.1109/TIT.2006.874523},
archivePrefix={arXiv},
eprint={cs/0503064},
primaryClass={cs.IT cs.NI math.IT}
} | lun2005minimum-cost |
arxiv-672736 | cs/0503065 | Data-Structure Rewriting | <|reference_start|>Data-Structure Rewriting: We tackle the problem of data-structure rewriting including pointer redirections. We propose two basic rewrite steps: (i) Local Redirection and Replacement steps the aim of which is redirecting specific pointers determined by means of a pattern, as well as adding new information to an existing data ; and (ii) Global Redirection steps which are aimed to redirect all pointers targeting a node towards another one. We define these two rewriting steps following the double pushout approach. We define first the category of graphs we consider and then define rewrite rules as pairs of graph homomorphisms of the form "L <- K ->R". Unfortunately, inverse pushouts (complement pushouts) are not unique in our setting and pushouts do not always exist. Therefore, we define rewriting steps so that a rewrite rule can always be performed once a matching is found.<|reference_end|> | arxiv | @article{duval2005data-structure,
title={Data-Structure Rewriting},
author={Dominique Duval (LMC - IMAG), Rachid Echahed (Leibniz - IMAG),
Frederic Prost (Leibniz - IMAG)},
journal={arXiv preprint arXiv:cs/0503065},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503065},
primaryClass={cs.PL cs.DS}
} | duval2005data-structure |
arxiv-672737 | cs/0503066 | A Practical Approach for Circuit Routing on Dynamic Reconfigurable Devices | <|reference_start|>A Practical Approach for Circuit Routing on Dynamic Reconfigurable Devices: Management of communication by on-line routing in new FPGAs with a large amount of logic resources and partial reconfigurability is a new challenging problem. A Network-on-Chip (NoC) typically uses packet routing mechanism, which has often unsafe data transfers, and network interface overhead. In this paper, circuit routing for such dynamic NoCs is investigated, and a practical 1-dimensional network with an efficient routing algorithm is proposed and implemented. Also, this concept has been extended to the 2-dimensional case. The implementation results show the low area overhead and high performance of this network.<|reference_end|> | arxiv | @article{ahmadinia2005a,
title={A Practical Approach for Circuit Routing on Dynamic Reconfigurable
Devices},
author={Ali Ahmadinia and Christophe Bobda and Ji Ding and Mateusz Majer and
Juergen Teich and Sandor P. Fekete and Jan van der Veen},
journal={arXiv preprint arXiv:cs/0503066},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503066},
primaryClass={cs.AR}
} | ahmadinia2005a |
arxiv-672738 | cs/0503067 | Contextual equivalence for higher-order pi-calculus revisited | <|reference_start|>Contextual equivalence for higher-order pi-calculus revisited: The higher-order pi-calculus is an extension of the pi-calculus to allow communication of abstractions of processes rather than names alone. It has been studied intensively by Sangiorgi in his thesis where a characterisation of a contextual equivalence for higher-order pi-calculus is provided using labelled transition systems and normal bisimulations. Unfortunately the proof technique used there requires a restriction of the language to only allow finite types. We revisit this calculus and offer an alternative presentation of the labelled transition system and a novel proof technique which allows us to provide a fully abstract characterisation of contextual equivalence using labelled transitions and bisimulations for higher-order pi-calculus with recursive types also.<|reference_end|> | arxiv | @article{jeffrey2005contextual,
title={Contextual equivalence for higher-order pi-calculus revisited},
author={Alan Jeffrey and Julian Rathke},
journal={Logical Methods in Computer Science, Volume 1, Issue 1 (April 21,
2005) lmcs:2274},
year={2005},
doi={10.2168/LMCS-1(1:4)2005},
archivePrefix={arXiv},
eprint={cs/0503067},
primaryClass={cs.PL}
} | jeffrey2005contextual |
arxiv-672739 | cs/0503068 | A Survey of Reverse Engineering and Program Comprehension | <|reference_start|>A Survey of Reverse Engineering and Program Comprehension: Reverse engineering has been a standard practice in the hardware community for some time. It has only been within the last ten years that reverse engineering, or "program comprehension", has grown into the current sub-discipline of software engineering. Traditional software engineering is primarily focused on the development and design of new software. However, most programmers work on software that other people have designed and developed. Up to 50% of a software maintainers time can be spent determining the intent of source code. The growing demand to reevaluate and reimplement legacy software systems, brought on by the proliferation of clientserver and World Wide Web technologies, has underscored the need for reverse engineering tools and techniques. This paper introduces the terminology of reverse engineering and gives some of the obstacles that make reverse engineering difficult. Although reverse engineering remains heavily dependent on the human component, a number of automated tools are presented that aid the reverse engineer.<|reference_end|> | arxiv | @article{nelson2005a,
title={A Survey of Reverse Engineering and Program Comprehension},
author={Michael L. Nelson},
journal={arXiv preprint arXiv:cs/0503068},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503068},
primaryClass={cs.SE}
} | nelson2005a |
arxiv-672740 | cs/0503069 | mod_oai: An Apache Module for Metadata Harvesting | <|reference_start|>mod_oai: An Apache Module for Metadata Harvesting: We describe mod_oai, an Apache 2.0 module that implements the Open Archives Initiative Protocol for Metadata Harvesting (OAI-PMH). OAIPMH is the de facto standard for metadata exchange in digital libraries and allows repositories to expose their contents in a structured, application-neutral format with semantics optimized for accurate incremental harvesting. Current implementations of OAI-PMH are either separate applications that access an existing repository, or are built-in to repository software packages. mod_oai is different in that it optimizes harvesting web content by building OAI-PMH capability into the Apache server. We discuss the implications of adding harvesting capability to an Apache server and describe our initial experimental results accessing a departmental web site using both web crawling and OAIPMH harvesting techniques.<|reference_end|> | arxiv | @article{nelson2005mod_oai:,
title={mod_oai: An Apache Module for Metadata Harvesting},
author={Michael L. Nelson, Herbert Van de Sompel, Xiaoming Liu, Terry L.
Harrison, Nathan McFarland},
journal={arXiv preprint arXiv:cs/0503069},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503069},
primaryClass={cs.DL}
} | nelson2005mod_oai: |
arxiv-672741 | cs/0503070 | Improved message passing for inference in densely connected systems | <|reference_start|>Improved message passing for inference in densely connected systems: An improved inference method for densely connected systems is presented. The approach is based on passing condensed messages between variables, representing macroscopic averages of microscopic messages. We extend previous work that showed promising results in cases where the solution space is contiguous to cases where fragmentation occurs. We apply the method to the signal detection problem of Code Division Multiple Access (CDMA) for demonstrating its potential. A highly efficient practical algorithm is also derived on the basis of insight gained from the analysis.<|reference_end|> | arxiv | @article{neirotti2005improved,
title={Improved message passing for inference in densely connected systems},
author={Juan P. Neirotti and David Saad},
journal={arXiv preprint arXiv:cs/0503070},
year={2005},
doi={10.1209/epl/i2005-10148-5},
archivePrefix={arXiv},
eprint={cs/0503070},
primaryClass={cs.IT cond-mat.dis-nn math.IT}
} | neirotti2005improved |
arxiv-672742 | cs/0503071 | Consistency in Models for Distributed Learning under Communication Constraints | <|reference_start|>Consistency in Models for Distributed Learning under Communication Constraints: Motivated by sensor networks and other distributed settings, several models for distributed learning are presented. The models differ from classical works in statistical pattern recognition by allocating observations of an independent and identically distributed (i.i.d.) sampling process amongst members of a network of simple learning agents. The agents are limited in their ability to communicate to a central fusion center and thus, the amount of information available for use in classification or regression is constrained. For several basic communication models in both the binary classification and regression frameworks, we question the existence of agent decision rules and fusion rules that result in a universally consistent ensemble. The answers to this question present new issues to consider with regard to universal consistency. Insofar as these models present a useful picture of distributed scenarios, this paper addresses the issue of whether or not the guarantees provided by Stone's Theorem in centralized environments hold in distributed settings.<|reference_end|> | arxiv | @article{predd2005consistency,
title={Consistency in Models for Distributed Learning under Communication
Constraints},
author={Joel B. Predd, Sanjeev R. Kulkarni, and H. Vincent Poor},
journal={arXiv preprint arXiv:cs/0503071},
year={2005},
doi={10.1109/TIT.2005.860420},
archivePrefix={arXiv},
eprint={cs/0503071},
primaryClass={cs.IT cs.LG math.IT}
} | predd2005consistency |
arxiv-672743 | cs/0503072 | Distributed Learning in Wireless Sensor Networks | <|reference_start|>Distributed Learning in Wireless Sensor Networks: The problem of distributed or decentralized detection and estimation in applications such as wireless sensor networks has often been considered in the framework of parametric models, in which strong assumptions are made about a statistical description of nature. In certain applications, such assumptions are warranted and systems designed from these models show promise. However, in other scenarios, prior knowledge is at best vague and translating such knowledge into a statistical model is undesirable. Applications such as these pave the way for a nonparametric study of distributed detection and estimation. In this paper, we review recent work of the authors in which some elementary models for distributed learning are considered. These models are in the spirit of classical work in nonparametric statistics and are applicable to wireless sensor networks.<|reference_end|> | arxiv | @article{predd2005distributed,
title={Distributed Learning in Wireless Sensor Networks},
author={Joel B. Predd, Sanjeev R. Kulkarni, and H. Vincent Poor},
journal={arXiv preprint arXiv:cs/0503072},
year={2005},
doi={10.1109/MSP.2006.1657817},
archivePrefix={arXiv},
eprint={cs/0503072},
primaryClass={cs.IT cs.LG math.IT}
} | predd2005distributed |
arxiv-672744 | cs/0503073 | Tensor manipulation in GPL Maxima | <|reference_start|>Tensor manipulation in GPL Maxima: GPL Maxima is an open-source computer algebra system based on DOE-MACSYMA. GPL Maxima included two tensor manipulation packages from DOE-MACSYMA, but these were in various states of disrepair. One of the two packages, CTENSOR, implemented component-based tensor manipulation; the other, ITENSOR, treated tensor symbols as opaque, manipulating them based on their index properties. The present paper describes the state in which these packages were found, the steps that were needed to make the packages fully functional again, and the new functionality that was implemented to make them more versatile. A third package, ATENSOR, was also implemented; fully compatible with the identically named package in the commercial version of MACSYMA, ATENSOR implements abstract tensor algebras.<|reference_end|> | arxiv | @article{toth2005tensor,
title={Tensor manipulation in GPL Maxima},
author={Viktor Toth},
journal={arXiv preprint arXiv:cs/0503073},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503073},
primaryClass={cs.SC}
} | toth2005tensor |
arxiv-672745 | cs/0503074 | A File System Abstraction for Sense and Respond Systems | <|reference_start|>A File System Abstraction for Sense and Respond Systems: The heterogeneity and resource constraints of sense-and-respond systems pose significant challenges to system and application development. In this paper, we present a flexible, intuitive file system abstraction for organizing and managing sense-and-respond systems based on the Plan 9 design principles. A key feature of this abstraction is the ability to support multiple views of the system via filesystem namespaces. Constructed logical views present an application-specific representation of the network, thus enabling high-level programming of the network. Concurrently, structural views of the network enable resource-efficient planning and execution of tasks. We present and motivate the design using several examples, outline research challenges and our research plan to address them, and describe the current state of implementation.<|reference_end|> | arxiv | @article{tilak2005a,
title={A File System Abstraction for Sense and Respond Systems},
author={Sameer Tilak, Bhanu Pisupati, Kenneth Chiu, Geoffrey Brown, Nael
Abu-Ghazaleh},
journal={arXiv preprint arXiv:cs/0503074},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503074},
primaryClass={cs.NI cs.OS}
} | tilak2005a |
arxiv-672746 | cs/0503075 | Statistical Modelling of Information Sharing: Community, Membership and Content | <|reference_start|>Statistical Modelling of Information Sharing: Community, Membership and Content: File-sharing systems, like many online and traditional information sharing communities (e.g. newsgroups, BBS, forums, interest clubs), are dynamical systems in nature. As peers get in and out of the system, the information content made available by the prevailing membership varies continually in amount as well as composition, which in turn affects all peers' join/leave decisions. As a result, the dynamics of membership and information content are strongly coupled, suggesting interesting issues about growth, sustenance and stability. In this paper, we propose to study such communities with a simple statistical model of an information sharing club. Carrying their private payloads of information goods as potential supply to the club, peers join or leave on the basis of whether the information they demand is currently available. Information goods are chunked and typed, as in a file sharing system where peers contribute different files, or a forum where messages are grouped by topics or threads. Peers' demand and supply are then characterized by statistical distributions over the type domain. This model reveals interesting critical behaviour with multiple equilibria. A sharp growth threshold is derived: the club may grow towards a sustainable equilibrium only if the value of an order parameter is above the threshold, or shrink to emptiness otherwise. The order parameter is composite and comprises the peer population size, the level of their contributed supply, the club's efficiency in information search, the spread of supply and demand over the type domain, as well as the goodness of match between them.<|reference_end|> | arxiv | @article{ng2005statistical,
title={Statistical Modelling of Information Sharing: Community, Membership and
Content},
author={W.-Y. Ng, W.K. Lin, D.M. Chiu},
journal={arXiv preprint arXiv:cs/0503075},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503075},
primaryClass={cs.NI cond-mat.stat-mech physics.soc-ph}
} | ng2005statistical |
arxiv-672747 | cs/0503076 | Geometric Models of Rolling-Shutter Cameras | <|reference_start|>Geometric Models of Rolling-Shutter Cameras: Cameras with rolling shutters are becoming more common as low-power, low-cost CMOS sensors are being used more frequently in cameras. The rolling shutter means that not all scanlines are exposed over the same time interval. The effects of a rolling shutter are noticeable when either the camera or objects in the scene are moving and can lead to systematic biases in projection estimation. We develop a general projection equation for a rolling shutter camera and show how it is affected by different types of camera motion. In the case of fronto-parallel motion, we show how that camera can be modeled as an X-slit camera. We also develop approximate projection equations for a non-zero angular velocity about the optical axis and approximate the projection equation for a constant velocity screw motion. We demonstrate how the rolling shutter effects the projective geometry of the camera and in turn the structure-from-motion.<|reference_end|> | arxiv | @article{meingast2005geometric,
title={Geometric Models of Rolling-Shutter Cameras},
author={Marci Meingast, Christopher Geyer, Shankar Sastry},
journal={arXiv preprint arXiv:cs/0503076},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503076},
primaryClass={cs.CV cs.RO}
} | meingast2005geometric |
arxiv-672748 | cs/0503077 | Weighted Automata in Text and Speech Processing | <|reference_start|>Weighted Automata in Text and Speech Processing: Finite-state automata are a very effective tool in natural language processing. However, in a variety of applications and especially in speech precessing, it is necessary to consider more general machines in which arcs are assigned weights or costs. We briefly describe some of the main theoretical and algorithmic aspects of these machines. In particular, we describe an efficient composition algorithm for weighted transducers, and give examples illustrating the value of determinization and minimization algorithms for weighted automata.<|reference_end|> | arxiv | @article{mohri2005weighted,
title={Weighted Automata in Text and Speech Processing},
author={Mehryar Mohri, Fernando Pereira, Michael Riley},
journal={Mehryar Mohri, Fernando Pereira, and Michael Riley. Weighted
Automata in Text and Speech Processing. In Proceedings of the 12th biennial
European Conference on Artificial Intelligence (ECAI-96), Workshop on
Extended finite state models of language. Budapest, Hungary, 1996. John Wiley
and Sons, Chichester},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503077},
primaryClass={cs.CL cs.HC}
} | mohri2005weighted |
arxiv-672749 | cs/0503078 | Obtaining Membership Functions from a Neuron Fuzzy System extended by Kohonen Network | <|reference_start|>Obtaining Membership Functions from a Neuron Fuzzy System extended by Kohonen Network: This article presents the Neo-Fuzzy-Neuron Modified by Kohonen Network (NFN-MK), an hybrid computational model that combines fuzzy system technique and artificial neural networks. Its main task consists in the automatic generation of membership functions, in particular, triangle forms, aiming a dynamic modeling of a system. The model is tested by simulating real systems, here represented by a nonlinear mathematical function. Comparison with the results obtained by traditional neural networks, and correlated studies of neurofuzzy systems applied in system identification area, shows that the NFN-MK model has a similar performance, despite its greater simplicity.<|reference_end|> | arxiv | @article{pagliosa2005obtaining,
title={Obtaining Membership Functions from a Neuron Fuzzy System extended by
Kohonen Network},
author={Angelo Luis Pagliosa, Claudio Cesar de Sa and Fernando D. Sasse},
journal={arXiv preprint arXiv:cs/0503078},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503078},
primaryClass={cs.NE}
} | pagliosa2005obtaining |
arxiv-672750 | cs/0503079 | Space-time databases modeling global semantic networks | <|reference_start|>Space-time databases modeling global semantic networks: This paper represents an approach to creating global knowledge systems, using new philosophy and infrastructure of global distributed semantic network (frame knowledge representation system) based on the space-time database construction. The main idea of the space-time database environment introduced in the paper is to bind a document (an information frame, a knowledge) to a special kind of entity, that we call permanent entity, -- an object without history and evolution, described by a "point" in the generalized, informational space-time (not an evolving object in the real space having history). For documents (information) it means that document content is unchangeable, and documents are absolutely persistent. This approach leads to new knowledge representation and retreival techniques. We discuss the way of applying the concept to a global distributed scientific library and scientific workspace. Some practical aspects of the work are elaborated by the open IT project at http://sourceforge.net/projects/gil/.<|reference_end|> | arxiv | @article{prikhod'ko2005space-time,
title={Space-time databases modeling global semantic networks},
author={A. A. Prikhod'ko, N. A. Prikhod'ko},
journal={arXiv preprint arXiv:cs/0503079},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503079},
primaryClass={cs.IT cs.IR math.IT}
} | prikhod'ko2005space-time |
arxiv-672751 | cs/0503080 | Enforcing Semantic Integrity on Untrusted Clients in Networked Virtual Environments | <|reference_start|>Enforcing Semantic Integrity on Untrusted Clients in Networked Virtual Environments: During the last years, large-scale simulations of realistic physical environments which support the interaction of multiple participants over the Internet have become increasingly available and economically significant, most notably in the computer gaming industry. Such systems, commonly called networked virtual environments (NVEs), are usually based on a client-server architecture where for performance reasons and bandwidth restrictions, the simulation is partially deferred to the clients. This inevitable architectural choice renders the simulation vulnerable to attacks against the semantic integrity of the simulation: malicious clients may attempt to compromise the physical and logical laws governing the simulation, or to alter the causality of events a posteriori. In this paper, we initiate the systematic study of semantic integrity in NVEs from a security point of view. We argue that naive policies to enforce semantic integrity involve intolerable network load, and are therefore not practically feasible. We present a new semantic integrity protocol based on cryptographic primitives which enables the server system to audit the local computations of the clients on demand. Our approach facilitates low network and CPU load, incurs reasonable engineering overhead, and maximally decouples the auditing process from the soft real time constraints of the simulation.<|reference_end|> | arxiv | @article{hermann2005enforcing,
title={Enforcing Semantic Integrity on Untrusted Clients in Networked Virtual
Environments},
author={Uwe Hermann, Stefan Katzenbeisser, Christian Schallhart, Helmut Veith},
journal={arXiv preprint arXiv:cs/0503080},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503080},
primaryClass={cs.CR}
} | hermann2005enforcing |
arxiv-672752 | cs/0503081 | An Optimization Model for Outlier Detection in Categorical Data | <|reference_start|>An Optimization Model for Outlier Detection in Categorical Data: The task of outlier detection is to find small groups of data objects that are exceptional when compared with rest large amount of data. Detection of such outliers is important for many applications such as fraud detection and customer migration. Most existing methods are designed for numeric data. They will encounter problems with real-life applications that contain categorical data. In this paper, we formally define the problem of outlier detection in categorical data as an optimization problem from a global viewpoint. Moreover, we present a local-search heuristic based algorithm for efficiently finding feasible solutions. Experimental results on real datasets and large synthetic datasets demonstrate the superiority of our model and algorithm.<|reference_end|> | arxiv | @article{he2005an,
title={An Optimization Model for Outlier Detection in Categorical Data},
author={Zengyou He, Xiaofei Xu, Shengchun Deng},
journal={arXiv preprint arXiv:cs/0503081},
year={2005},
number={Tr-05-0329},
archivePrefix={arXiv},
eprint={cs/0503081},
primaryClass={cs.DB cs.AI}
} | he2005an |
arxiv-672753 | cs/0503082 | Spines of Random Constraint Satisfaction Problems: Definition and Connection with Computational Complexity | <|reference_start|>Spines of Random Constraint Satisfaction Problems: Definition and Connection with Computational Complexity: We study the connection between the order of phase transitions in combinatorial problems and the complexity of decision algorithms for such problems. We rigorously show that, for a class of random constraint satisfaction problems, a limited connection between the two phenomena indeed exists. Specifically, we extend the definition of the spine order parameter of Bollobas et al. to random constraint satisfaction problems, rigorously showing that for such problems a discontinuity of the spine is associated with a $2^{\Omega(n)}$ resolution complexity (and thus a $2^{\Omega(n)}$ complexity of DPLL algorithms) on random instances. The two phenomena have a common underlying cause: the emergence of ``large'' (linear size) minimally unsatisfiable subformulas of a random formula at the satisfiability phase transition. We present several further results that add weight to the intuition that random constraint satisfaction problems with a sharp threshold and a continuous spine are ``qualitatively similar to random 2-SAT''. Finally, we argue that it is the spine rather than the backbone parameter whose continuity has implications for the decision complexity of combinatorial problems, and we provide experimental evidence that the two parameters can behave in a different manner.<|reference_end|> | arxiv | @article{istrate2005spines,
title={Spines of Random Constraint Satisfaction Problems: Definition and
Connection with Computational Complexity},
author={Gabriel Istrate, Stefan Boettcher, Allon G. Percus},
journal={arXiv preprint arXiv:cs/0503082},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503082},
primaryClass={cs.CC cond-mat.dis-nn cs.AI}
} | istrate2005spines |
arxiv-672754 | cs/0503083 | Coarse and Sharp Thresholds of Boolean Constraint Satisfaction Problems | <|reference_start|>Coarse and Sharp Thresholds of Boolean Constraint Satisfaction Problems: We study threshold properties of random constraint satisfaction problems under a probabilistic model due to Molloy. We give a sufficient condition for the existence of a sharp threshold that leads (for boolean constraints) to a necessary and sufficient for the existence of a sharp threshold in the case where constraint templates are applied with equal probability, solving thus an open problem of Creignou and Daude.<|reference_end|> | arxiv | @article{istrate2005coarse,
title={Coarse and Sharp Thresholds of Boolean Constraint Satisfaction Problems},
author={Gabriel Istrate},
journal={arXiv preprint arXiv:cs/0503083},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503083},
primaryClass={cs.DM cs.CC}
} | istrate2005coarse |
arxiv-672755 | cs/0503084 | The Peculiarities of Nonstationary Formation of Inhomogeneous Structures of Charged Particles in the Electrodiffusion Processes | <|reference_start|>The Peculiarities of Nonstationary Formation of Inhomogeneous Structures of Charged Particles in the Electrodiffusion Processes: In this paper the distribution of charged particles is constructed under the approximation of ambipolar diffusion. The results of mathematical modelling in two-dimensional case taking into account the velocities of the system are presented.<|reference_end|> | arxiv | @article{nefyodov2005the,
title={The Peculiarities of Nonstationary Formation of Inhomogeneous Structures
of Charged Particles in the Electrodiffusion Processes},
author={P. Nefyodov, V. Reztsov, O. Riabinina},
journal={arXiv preprint arXiv:cs/0503084},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503084},
primaryClass={cs.CE}
} | nefyodov2005the |
arxiv-672756 | cs/0503085 | Dynamic Shannon Coding | <|reference_start|>Dynamic Shannon Coding: We present a new algorithm for dynamic prefix-free coding, based on Shannon coding. We give a simple analysis and prove a better upper bound on the length of the encoding produced than the corresponding bound for dynamic Huffman coding. We show how our algorithm can be modified for efficient length-restricted coding, alphabetic coding and coding with unequal letter costs.<|reference_end|> | arxiv | @article{gagie2005dynamic,
title={Dynamic Shannon Coding},
author={Travis Gagie},
journal={arXiv preprint arXiv:cs/0503085},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503085},
primaryClass={cs.IT math.IT}
} | gagie2005dynamic |
arxiv-672757 | cs/0503086 | Segmentation of the Homogeneity of a Signal Using a Piecewise Linear Recognition Tool | <|reference_start|>Segmentation of the Homogeneity of a Signal Using a Piecewise Linear Recognition Tool: In this paper a new method of detection of homogeneous zones and singularity parts of a 1D signal is proposed. The entropy function is used to transform signal in piecewise linear one. The multiple regression permits to detect lines and project them in the Hough parameters space in order to easily recognise homogeneous zone and abrupt changes of the signal. Two application examples are analysed, the first is a classical fractal signal and the other is issued from a dynamic mechanical study.<|reference_end|> | arxiv | @article{morlier2005segmentation,
title={Segmentation of the Homogeneity of a Signal Using a Piecewise Linear
Recognition Tool},
author={Joseph Morlier (LRBB)},
journal={arXiv preprint arXiv:cs/0503086},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503086},
primaryClass={cs.NA}
} | morlier2005segmentation |
arxiv-672758 | cs/0503087 | Dynamic Simulation of Construction Machinery: Towards an Operator Model | <|reference_start|>Dynamic Simulation of Construction Machinery: Towards an Operator Model: In dynamic simulation of complete wheel loaders, one interesting aspect, specific for the working task, is the momentary power distribution between drive train and hydraulics, which is balanced by the operator. This paper presents the initial results to a simulation model of a human operator. Rather than letting the operator model follow a predefined path with control inputs at given points, it follows a collection of general rules that together describe the machine's working cycle in a generic way. The advantage of this is that the working task description and the operator model itself are independent of the machine's technical parameters. Complete sub-system characteristics can thus be changed without compromising the relevance and validity of the simulation. Ultimately, this can be used to assess a machine's total performance, fuel efficiency and operability already in the concept phase of the product development process.<|reference_end|> | arxiv | @article{filla2005dynamic,
title={Dynamic Simulation of Construction Machinery: Towards an Operator Model},
author={Reno Filla (1), Allan Ericsson (1), Jan-Ove Palmberg (2) ((1) Volvo
Wheel Loaders AB, (2) Linkoping University)},
journal={International Fluid Power Exhibition 2005 Technical Conference,
pp. 429-438},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503087},
primaryClass={cs.CE}
} | filla2005dynamic |
arxiv-672759 | cs/0503088 | General non-asymptotic and asymptotic formulas in channel resolvability and identification capacity and their application to wire-tap channel | <|reference_start|>General non-asymptotic and asymptotic formulas in channel resolvability and identification capacity and their application to wire-tap channel: Several non-asymptotic formulas are established in channel resolvability and identification capacity, and they are applied to wire-tap channel. By using these formulas, the $\epsilon$ capacities of the above three problems are considered in the most general setting, where no structural assumptions such as the stationary memoryless property are made on a channel. As a result, we solve an open problem proposed in Han & Verdu and Han. Moreover, we obtain lower bounds of the exponents of error probability and the wire-tapper's information in wire-tap channel.<|reference_end|> | arxiv | @article{hayashi2005general,
title={General non-asymptotic and asymptotic formulas in channel resolvability
and identification capacity and their application to wire-tap channel},
author={Masahito Hayashi},
journal={IEEE Transactions on Information Theory, Vol. 52, No. 4, 1562-1575
(2006)},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503088},
primaryClass={cs.IT math.IT}
} | hayashi2005general |
arxiv-672760 | cs/0503089 | Second order asymptotics in fixed-length source coding and intrinsic randomness | <|reference_start|>Second order asymptotics in fixed-length source coding and intrinsic randomness: Second order asymptotics of fixed-length source coding and intrinsic randomness is discussed with a constant error constraint. There was a difference between optimal rates of fixed-length source coding and intrinsic randomness, which never occurred in the first order asymptotics. In addition, the relation between uniform distribution and compressed data is discussed based on this fact. These results are valid for general information sources as well as independent and identical distributions. A universal code attaining the second order optimal rate is also constructed.<|reference_end|> | arxiv | @article{hayashi2005second,
title={Second order asymptotics in fixed-length source coding and intrinsic
randomness},
author={Masahito Hayashi},
journal={Transactions on Information Theory, 54, 4619 - 4637 (2008)},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503089},
primaryClass={cs.IT math.IT}
} | hayashi2005second |
arxiv-672761 | cs/0503090 | Effects of variations of load distribution on network performance | <|reference_start|>Effects of variations of load distribution on network performance: This paper is concerned with the characterization of the relationship between topology and traffic dynamics. We use a model of network generation that allows the transition from random to scale free networks. Specifically, we consider three different topological types of network: random, scale-free with \gamma = 3, scale-free with \gamma = 2. By using a novel LRD traffic generator, we observe best performance, in terms of transmission rates and delivered packets, in the case of random networks. We show that, even if scale-free networks are characterized by shorter characteristic-path- length (the lower the exponent, the lower the path-length), they show worst performances in terms of communication. We conjecture this could be explained in terms of changes in the load distribution, defined here as the number of shortest paths going through a given vertex. In fact, that distribu- tion is characterized by (i) a decreasing mean (ii) an increas- ing standard deviation, as the networks becomes scale-free (especially scale-free networks with low exponents). The use of a degree-independent server also discriminates against a scale-free structure. As a result, since the model is un- controlled, most packets will go through the same vertices, favoring the onset of congestion.<|reference_end|> | arxiv | @article{arrowsmith2005effects,
title={Effects of variations of load distribution on network performance},
author={David Arrowsmith, Mario di Bernardo, Francesco Sorrentino},
journal={arXiv preprint arXiv:cs/0503090},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503090},
primaryClass={cs.NI}
} | arrowsmith2005effects |
arxiv-672762 | cs/0503091 | Resource Bounded Unprovability of Computational Lower Bounds | <|reference_start|>Resource Bounded Unprovability of Computational Lower Bounds: This paper introduces new notions of asymptotic proofs, PT(polynomial-time)-extensions, PTM(polynomial-time Turing machine)-omega-consistency, etc. on formal theories of arithmetic including PA (Peano Arithmetic). This paper shows that P not= NP (more generally, any super-polynomial-time lower bound in PSPACE) is unprovable in a PTM-omega-consistent theory T, where T is a consistent PT-extension of PA. This result gives a unified view to the existing two major negative results on proving P not= NP, Natural Proofs and relativizable proofs, through the two manners of characterization of PTM-omega-consistency. We also show that the PTM-omega-consistency of T cannot be proven in any PTM-omega-consistent theory S, where S is a consistent PT-extension of T.<|reference_end|> | arxiv | @article{okamoto2005resource,
title={Resource Bounded Unprovability of Computational Lower Bounds},
author={Tatsuaki Okamoto, Ryo Kashima},
journal={arXiv preprint arXiv:cs/0503091},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503091},
primaryClass={cs.CC cs.LO}
} | okamoto2005resource |
arxiv-672763 | cs/0503092 | Monotonic and Nonmonotonic Preference Revision | <|reference_start|>Monotonic and Nonmonotonic Preference Revision: We study here preference revision, considering both the monotonic case where the original preferences are preserved and the nonmonotonic case where the new preferences may override the original ones. We use a relational framework in which preferences are represented using binary relations (not necessarily finite). We identify several classes of revisions that preserve order axioms, for example the axioms of strict partial or weak orders. We consider applications of our results to preference querying in relational databases.<|reference_end|> | arxiv | @article{chomicki2005monotonic,
title={Monotonic and Nonmonotonic Preference Revision},
author={Jan Chomicki, Joyce Song},
journal={arXiv preprint arXiv:cs/0503092},
year={2005},
archivePrefix={arXiv},
eprint={cs/0503092},
primaryClass={cs.DB cs.AI}
} | chomicki2005monotonic |
arxiv-672764 | cs/0504001 | Probabilistic and Team PFIN-type Learning: General Properties | <|reference_start|>Probabilistic and Team PFIN-type Learning: General Properties: We consider the probability hierarchy for Popperian FINite learning and study the general properties of this hierarchy. We prove that the probability hierarchy is decidable, i.e. there exists an algorithm that receives p_1 and p_2 and answers whether PFIN-type learning with the probability of success p_1 is equivalent to PFIN-type learning with the probability of success p_2. To prove our result, we analyze the topological structure of the probability hierarchy. We prove that it is well-ordered in descending ordering and order-equivalent to ordinal epsilon_0. This shows that the structure of the hierarchy is very complicated. Using similar methods, we also prove that, for PFIN-type learning, team learning and probabilistic learning are of the same power.<|reference_end|> | arxiv | @article{ambainis2005probabilistic,
title={Probabilistic and Team PFIN-type Learning: General Properties},
author={Andris Ambainis},
journal={Journal of Computer and System Sciences, 74(4):457-489, 2008},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504001},
primaryClass={cs.LG}
} | ambainis2005probabilistic |
arxiv-672765 | cs/0504002 | On the Effect of Fading on Ad hoc Networking | <|reference_start|>On the Effect of Fading on Ad hoc Networking: Most MANET (Mobile Ad hoc NETwork) research assumes idealized propagation models. Experimental results have shown significant divergence from simulation results due to the effect of signal fading in realistic wireless communication channels. In this paper, we characterize the impact of fading on protocol performance. We first study the effect of fading on MAC performance and show that its effect can be dominating. One of our important conclusions is that eliminating RTS/CTS packets results in more effective operation under fading. We also identify an unfairness problem that arises due to backoffs in the presence of fading. Moreover, fading results in several subtle interactions between the MAC and routing layers. We identify several of these problems and make observations about effective approaches for addressing them. For example, the criteria for determining the best path should not only consider the link status but also the link order. In addition, because routing protocols rely on MAC level transmission failure (when the retry limit is exceeded), route failure errors are often generated unnecessarily. Finally, because MAC level broadcasts are unreliable, they are especially vulnerable to fading. We analyze these effects and outline preliminary solutions to them.<|reference_end|> | arxiv | @article{han2005on,
title={On the Effect of Fading on Ad hoc Networking},
author={Seon-Yeong Han and Nael Abu-Ghazaleh},
journal={arXiv preprint arXiv:cs/0504002},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504002},
primaryClass={cs.NI}
} | han2005on |
arxiv-672766 | cs/0504003 | Multiple Description Quantization via Gram-Schmidt Orthogonalization | <|reference_start|>Multiple Description Quantization via Gram-Schmidt Orthogonalization: The multiple description (MD) problem has received considerable attention as a model of information transmission over unreliable channels. A general framework for designing efficient multiple description quantization schemes is proposed in this paper. We provide a systematic treatment of the El Gamal-Cover (EGC) achievable MD rate-distortion region, and show that any point in the EGC region can be achieved via a successive quantization scheme along with quantization splitting. For the quadratic Gaussian case, the proposed scheme has an intrinsic connection with the Gram-Schmidt orthogonalization, which implies that the whole Gaussian MD rate-distortion region is achievable with a sequential dithered lattice-based quantization scheme as the dimension of the (optimal) lattice quantizers becomes large. Moreover, this scheme is shown to be universal for all i.i.d. smooth sources with performance no worse than that for an i.i.d. Gaussian source with the same variance and asymptotically optimal at high resolution. A class of low-complexity MD scalar quantizers in the proposed general framework also is constructed and is illustrated geometrically; the performance is analyzed in the high resolution regime, which exhibits a noticeable improvement over the existing MD scalar quantization schemes.<|reference_end|> | arxiv | @article{chen2005multiple,
title={Multiple Description Quantization via Gram-Schmidt Orthogonalization},
author={Jun Chen, Chao Tian, Toby Berger, Sheila Hemami},
journal={arXiv preprint arXiv:cs/0504003},
year={2005},
doi={10.1109/TIT.2006.885498},
archivePrefix={arXiv},
eprint={cs/0504003},
primaryClass={cs.IT math.IT}
} | chen2005multiple |
arxiv-672767 | cs/0504004 | Statistical analysis of quality measures for mobile ad hoc networks | <|reference_start|>Statistical analysis of quality measures for mobile ad hoc networks: How can the quality of a mobile ad hoc network (MANET) be quantified? This work aims at an answer based on the lower network layers, i.e. on connectivity between the wireless nodes, using statistical methods. A number of different quality measures are introduced and classified according to their scaling behaviour. They are analysed in a statistical model of a 1-dimensional MANET system (corresponding e.g. to cars on a road). Neglecting boundary effects, the model turns out to be exactly solvable, so that explicit analytical results for the quality levels can be obtained both at fixed system size and in the limit of large systems. In particular, this improves estimates known in the literature for the probability of connectedness of 1-dimensional MANETs.<|reference_end|> | arxiv | @article{bostelmann2005statistical,
title={Statistical analysis of quality measures for mobile ad hoc networks},
author={Henning Bostelmann},
journal={arXiv preprint arXiv:cs/0504004},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504004},
primaryClass={cs.NI cs.DM}
} | bostelmann2005statistical |
arxiv-672768 | cs/0504005 | Fast Codes for Large Alphabets | <|reference_start|>Fast Codes for Large Alphabets: We address the problem of constructing a fast lossless code in the case when the source alphabet is large. The main idea of the new scheme may be described as follows. We group letters with small probabilities in subsets (acting as super letters) and use time consuming coding for these subsets only, whereas letters in the subsets have the same code length and therefore can be coded fast. The described scheme can be applied to sources with known and unknown statistics.<|reference_end|> | arxiv | @article{ryabko2005fast,
title={Fast Codes for Large Alphabets},
author={Boris Ryabko, Jaakko Astola, Karen Egiazarian},
journal={arXiv preprint arXiv:cs/0504005},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504005},
primaryClass={cs.IT math.IT}
} | ryabko2005fast |
arxiv-672769 | cs/0504006 | Using Information Theory Approach to Randomness Testing | <|reference_start|>Using Information Theory Approach to Randomness Testing: We address the problem of detecting deviations of binary sequence from randomness,which is very important for random number (RNG) and pseudorandom number generators (PRNG). Namely, we consider a null hypothesis $H_0$ that a given bit sequence is generated by Bernoulli source with equal probabilities of 0 and 1 and the alternative hypothesis $H_1$ that the sequence is generated by a stationary and ergodic source which differs from the source under $H_0$. We show that data compression methods can be used as a basis for such testing and describe two new tests for randomness, which are based on ideas of universal coding. Known statistical tests and suggested ones are applied for testing PRNGs. Those experiments show that the power of the new tests is greater than of many known algorithms.<|reference_end|> | arxiv | @article{ryabko2005using,
title={Using Information Theory Approach to Randomness Testing},
author={B. Ya. Ryabko, V.A. Monarev},
journal={arXiv preprint arXiv:cs/0504006},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504006},
primaryClass={cs.IT math.IT}
} | ryabko2005using |
arxiv-672770 | cs/0504007 | The Bandwidth Exchange Architecture | <|reference_start|>The Bandwidth Exchange Architecture: New applications for the Internet such as video on demand, grid computing etc. depend on the availability of high bandwidth connections with acceptable Quality of Service (QoS). There appears to be, therefore, a requirement for a market where bandwidth-related transactions can take place. For this market to be effective, it must be efficient for both the provider (seller) and the user (buyer) of the bandwidth. This implies that: (a) the buyer must have a wide choice of providers that operate in a competitive environment, (b) the seller must be assured that a QoS transaction will be paid by the customer, and (c) the QoS transaction establishment must have low overheads so that it may be used by individual customers without a significant burden to the provider. In order to satisfy these requirements, we propose a framework that allows customers to purchase bandwidth using an open market where providers advertise links and capacities and customers bid for these services. The model is close to that of a commodities market that offers both advance bookings (futures) and a spot market. We explore the mechanisms that can support such a model.<|reference_end|> | arxiv | @article{turner2005the,
title={The Bandwidth Exchange Architecture},
author={David Michael Turner, Vassilis Prevelakis, Angelos D. Keromytis},
journal={arXiv preprint arXiv:cs/0504007},
year={2005},
number={DU-CS-05-03},
archivePrefix={arXiv},
eprint={cs/0504007},
primaryClass={cs.NI cs.CR}
} | turner2005the |
arxiv-672771 | cs/0504008 | Super Object Oriented Programming | <|reference_start|>Super Object Oriented Programming: This submission has been withdrawn at the request of the author.<|reference_end|> | arxiv | @article{g2005super,
title={Super Object Oriented Programming},
author={Raju Renjit. G},
journal={arXiv preprint arXiv:cs/0504008},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504008},
primaryClass={cs.PL}
} | g2005super |
arxiv-672772 | cs/0504009 | Towards a Group Theoretic Quantum Encryption Scheme Based on Generalized Hidden Subgroup Problem | <|reference_start|>Towards a Group Theoretic Quantum Encryption Scheme Based on Generalized Hidden Subgroup Problem: This paper introduces a completely new approach to encryption based on group theoretic quantum framework. Quantum cryptography has essentially focused only on key distribution and proceeded with classical encryption algorithm with the generated key. Here, we present a first step towards a quantum encryption scheme based on the solution for the hidden subgroup problem. The shared secret key K from QKD evolves as a generator for a subgroup H of a group G, in combination of the plain text data modeled as group elements. The key K helps in regeneration of the plain data on the receiver's side based on subgroup reconstruction. This paper models all quantum computations using group representations. A non-constructive proof is attempted towards the security of the encryption scheme. We also address the issues involved in a such a venture into the realms of Quantum data encryption.<|reference_end|> | arxiv | @article{srinivasan2005towards,
title={Towards a Group Theoretic Quantum Encryption Scheme Based on Generalized
Hidden Subgroup Problem},
author={N. Srinivasan, C. Sanjeevakumar, L. Sudarsan, M. Kasi Rajan, R.
Venkatesh},
journal={arXiv preprint arXiv:cs/0504009},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504009},
primaryClass={cs.DM cs.CR}
} | srinivasan2005towards |
arxiv-672773 | cs/0504010 | Reversible Fault-Tolerant Logic | <|reference_start|>Reversible Fault-Tolerant Logic: It is now widely accepted that the CMOS technology implementing irreversible logic will hit a scaling limit beyond 2016, and that the increased power dissipation is a major limiting factor. Reversible computing can potentially require arbitrarily small amounts of energy. Recently several nano-scale devices which have the potential to scale, and which naturally perform reversible logic, have emerged. This paper addresses several fundamental issues that need to be addressed before any nano-scale reversible computing systems can be realized, including reliability and performance trade-offs and architecture optimization. Many nano-scale devices will be limited to only near neighbor interactions, requiring careful optimization of circuits. We provide efficient fault-tolerant (FT) circuits when restricted to both 2D and 1D. Finally, we compute bounds on the entropy (and hence, heat) generated by our FT circuits and provide quantitative estimates on how large can we make our circuits before we lose any advantage over irreversible computing.<|reference_end|> | arxiv | @article{boykin2005reversible,
title={Reversible Fault-Tolerant Logic},
author={P. Oscar Boykin, Vwani P. Roychowdhury},
journal={arXiv preprint arXiv:cs/0504010},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504010},
primaryClass={cs.IT math.IT quant-ph}
} | boykin2005reversible |
arxiv-672774 | cs/0504011 | Average Coset Weight Distribution of Combined LDPC Matrix Ensemble | <|reference_start|>Average Coset Weight Distribution of Combined LDPC Matrix Ensemble: In this paper, the average coset weight distribution (ACWD) of structured ensembles of LDPC (Low-density Parity-Check) matrix, which is called combined ensembles, is discussed. A combined ensemble is composed of a set of simpler ensembles such as a regular bipartite ensemble. Two classes of combined ensembles have prime importance; a stacked ensemble and a concatenated ensemble, which consists of set of stacked matrices and concatenated matrices, respectively. The ACWD formulas of these ensembles is shown in this paper. Such formulas are key tools to evaluate the ACWD of a complex combined ensemble. From the ACWD of an ensemble, we can obtain some detailed properties of a code (e.g., weight of coset leaders) which is not available from an average weight distribution. Moreover, it is shown that the analysis based on the ACWD is indispensable to evaluate the average weight distribution of some classes of combined ensembles.<|reference_end|> | arxiv | @article{wadayama2005average,
title={Average Coset Weight Distribution of Combined LDPC Matrix Ensemble},
author={Tadashi Wadayama},
journal={arXiv preprint arXiv:cs/0504011},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504011},
primaryClass={cs.IT math.IT}
} | wadayama2005average |
arxiv-672775 | cs/0504012 | Improving Spam Detection Based on Structural Similarity | <|reference_start|>Improving Spam Detection Based on Structural Similarity: We propose a new detection algorithm that uses structural relationships between senders and recipients of email as the basis for the identification of spam messages. Users and receivers are represented as vectors in their reciprocal spaces. A measure of similarity between vectors is constructed and used to group users into clusters. Knowledge of their classification as past senders/receivers of spam or legitimate mail, comming from an auxiliary detection algorithm, is then used to label these clusters probabilistically. This knowledge comes from an auxiliary algorithm. The measure of similarity between the sender and receiver sets of a new message to the center vector of clusters is then used to asses the possibility of that message being legitimate or spam. We show that the proposed algorithm is able to correct part of the false positives (legitimate messages classified as spam) using a testbed of one week smtp log.<|reference_end|> | arxiv | @article{gomes2005improving,
title={Improving Spam Detection Based on Structural Similarity},
author={Luiz H. Gomes, Fernando D. O. Castro, Rodrigo B. Almeida, Luis M. A.
Bettencourt, Virgilio A. F. Almeida, Jussara M. Almeida},
journal={arXiv preprint arXiv:cs/0504012},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504012},
primaryClass={cs.CR}
} | gomes2005improving |
arxiv-672776 | cs/0504013 | Pseudocodewords of Tanner graphs | <|reference_start|>Pseudocodewords of Tanner graphs: This papers presents a detailed analysis of pseudocodewords of Tanner graphs. Pseudocodewords arising on the iterative decoder's computation tree are distinguished from pseudocodewords arising on finite degree lifts. Lower bounds on the minimum pseudocodeword weight are presented for the BEC, BSC, and AWGN channel. Some structural properties of pseudocodewords are examined, and pseudocodewords and graph properties that are potentially problematic with min-sum iterative decoding are identified. An upper bound on the minimum degree lift needed to realize a particular irreducible lift-realizable pseudocodeword is given in terms of its maximal component, and it is shown that all irreducible lift-realizable pseudocodewords have components upper bounded by a finite value $t$ that is dependent on the graph structure. Examples and different Tanner graph representations of individual codes are examined and the resulting pseudocodeword distributions and iterative decoding performances are analyzed. The results obtained provide some insights in relating the structure of the Tanner graph to the pseudocodeword distribution and suggest ways of designing Tanner graphs with good minimum pseudocodeword weight.<|reference_end|> | arxiv | @article{kelley2005pseudocodewords,
title={Pseudocodewords of Tanner graphs},
author={Christine A. Kelley and Deepak Sridhara},
journal={arXiv preprint arXiv:cs/0504013},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504013},
primaryClass={cs.IT math.IT}
} | kelley2005pseudocodewords |
arxiv-672777 | cs/0504014 | Network Information Flow with Correlated Sources | <|reference_start|>Network Information Flow with Correlated Sources: In this paper, we consider a network communications problem in which multiple correlated sources must be delivered to a single data collector node, over a network of noisy independent point-to-point channels. We prove that perfect reconstruction of all the sources at the sink is possible if and only if, for all partitions of the network nodes into two subsets S and S^c such that the sink is always in S^c, we have that H(U_S|U_{S^c}) < \sum_{i\in S,j\in S^c} C_{ij}. Our main finding is that in this setup a general source/channel separation theorem holds, and that Shannon information behaves as a classical network flow, identical in nature to the flow of water in pipes. At first glance, it might seem surprising that separation holds in a fairly general network situation like the one we study. A closer look, however, reveals that the reason for this is that our model allows only for independent point-to-point channels between pairs of nodes, and not multiple-access and/or broadcast channels, for which separation is well known not to hold. This ``information as flow'' view provides an algorithmic interpretation for our results, among which perhaps the most important one is the optimality of implementing codes using a layered protocol stack.<|reference_end|> | arxiv | @article{barros2005network,
title={Network Information Flow with Correlated Sources},
author={Joao Barros (1) and Sergio D. Servetto (2) ((1) University of Porto,
Portugal; (2) Cornell University)},
journal={IEEE Trans. Inform. Theory, 52(1):155-170, 2006.},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504014},
primaryClass={cs.IT math.IT}
} | barros2005network |
arxiv-672778 | cs/0504015 | Design of Block Transceivers with Decision Feedback Detection | <|reference_start|>Design of Block Transceivers with Decision Feedback Detection: This paper presents a method for jointly designing the transmitter-receiver pair in a block-by-block communication system that employs (intra-block) decision feedback detection. We provide closed-form expressions for transmitter-receiver pairs that simultaneously minimize the arithmetic mean squared error (MSE) at the decision point (assuming perfect feedback), the geometric MSE, and the bit error rate of a uniformly bit-loaded system at moderate-to-high signal-to-noise ratios. Separate expressions apply for the ``zero-forcing'' and ``minimum MSE'' (MMSE) decision feedback structures. In the MMSE case, the proposed design also maximizes the Gaussian mutual information and suggests that one can approach the capacity of the block transmission system using (independent instances of) the same (Gaussian) code for each element of the block. Our simulation studies indicate that the proposed transceivers perform significantly better than standard transceivers, and that they retain their performance advantages in the presence of error propagation.<|reference_end|> | arxiv | @article{xu2005design,
title={Design of Block Transceivers with Decision Feedback Detection},
author={Fang Xu, Tim Davidson, Jian-Kang Zhang and K. Max Wong},
journal={arXiv preprint arXiv:cs/0504015},
year={2005},
doi={10.1109/TSP.2005.861779},
archivePrefix={arXiv},
eprint={cs/0504015},
primaryClass={cs.IT math.IT}
} | xu2005design |
arxiv-672779 | cs/0504016 | Shortened Array Codes of Large Girth | <|reference_start|>Shortened Array Codes of Large Girth: One approach to designing structured low-density parity-check (LDPC) codes with large girth is to shorten codes with small girth in such a manner that the deleted columns of the parity-check matrix contain all the variables involved in short cycles. This approach is especially effective if the parity-check matrix of a code is a matrix composed of blocks of circulant permutation matrices, as is the case for the class of codes known as array codes. We show how to shorten array codes by deleting certain columns of their parity-check matrices so as to increase their girth. The shortening approach is based on the observation that for array codes, and in fact for a slightly more general class of LDPC codes, the cycles in the corresponding Tanner graph are governed by certain homogeneous linear equations with integer coefficients. Consequently, we can selectively eliminate cycles from an array code by only retaining those columns from the parity-check matrix of the original code that are indexed by integer sequences that do not contain solutions to the equations governing those cycles. We provide Ramsey-theoretic estimates for the maximum number of columns that can be retained from the original parity-check matrix with the property that the sequence of their indices avoid solutions to various types of cycle-governing equations. This translates to estimates of the rate penalty incurred in shortening a code to eliminate cycles. Simulation results show that for the codes considered, shortening them to increase the girth can lead to significant gains in signal-to-noise ratio in the case of communication over an additive white Gaussian noise channel.<|reference_end|> | arxiv | @article{milenkovic2005shortened,
title={Shortened Array Codes of Large Girth},
author={Olgica Milenkovic, Navin Kashyap, David Leyba},
journal={arXiv preprint arXiv:cs/0504016},
year={2005},
doi={10.1109/TIT.2006.878179},
archivePrefix={arXiv},
eprint={cs/0504016},
primaryClass={cs.DM cs.IT math.IT}
} | milenkovic2005shortened |
arxiv-672780 | cs/0504017 | A new SISO algorithm with application to turbo equalization | <|reference_start|>A new SISO algorithm with application to turbo equalization: In this paper we propose a new soft-input soft-output equalization algorithm, offering very good performance/complexity tradeoffs. It follows the structure of the BCJR algorithm, but dynamically constructs a simplified trellis during the forward recursion. In each trellis section, only the M states with the strongest forward metric are preserved, similar to the M-BCJR algorithm. Unlike the M-BCJR, however, the remaining states are not deleted, but rather merged into the surviving states. The new algorithm compares favorably with the reduced-state BCJR algorithm, offering better performance and more flexibility, particularly for systems with higher order modulations.<|reference_end|> | arxiv | @article{sikora2005a,
title={A new SISO algorithm with application to turbo equalization},
author={Marcin Sikora and Daniel J. Costello Jr},
journal={arXiv preprint arXiv:cs/0504017},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504017},
primaryClass={cs.IT math.IT}
} | sikora2005a |
arxiv-672781 | cs/0504018 | A Rule-Based Logic for Quantum Information | <|reference_start|>A Rule-Based Logic for Quantum Information: In the present article, we explore a new approach for the study of orthomodular lattices, where we replace the problematic conjunction by a binary operator, called the Sasaki projection. We present a characterization of orthomodular lattices based on the use of an algebraic version of the Sasaki projection operator (together with orthocomplementation) rather than on the conjunction. We then define of a new logic, which we call Sasaki Orthologic, which is closely related to quantum logic, and provide a rule-based definition of this logic.<|reference_end|> | arxiv | @article{brunet2005a,
title={A Rule-Based Logic for Quantum Information},
author={Olivier Brunet (Leibniz - IMAG)},
journal={arXiv preprint arXiv:cs/0504018},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504018},
primaryClass={cs.LO quant-ph}
} | brunet2005a |
arxiv-672782 | cs/0504019 | Efficient Authenticated Encryption Schemes with Public Verifiability | <|reference_start|>Efficient Authenticated Encryption Schemes with Public Verifiability: An authenticated encryption scheme allows messages to be encrypted and authenticated simultaneously. In 2003, Ma and Chen proposed such a scheme with public verifiability. That is, in their scheme the receiver can efficiently prove to a third party that a message is indeed originated from a specific sender. In this paper, we first identify two security weaknesses in the Ma-Chen authenticated encryption scheme. Then, based on the Schnorr signature, we proposed an efficient and secure improved scheme such that all the desired security requirements are satisfied.<|reference_end|> | arxiv | @article{wang2005efficient,
title={Efficient Authenticated Encryption Schemes with Public Verifiability},
author={Guilin Wang, Feng Bao, Changshe Ma, and Kefei Chen},
journal={arXiv preprint arXiv:cs/0504019},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504019},
primaryClass={cs.CR}
} | wang2005efficient |
arxiv-672783 | cs/0504020 | The Viterbi Algorithm: A Personal History | <|reference_start|>The Viterbi Algorithm: A Personal History: The story of the Viterbi algorithm (VA) is told from a personal perspective. Applications both within and beyond communications are discussed. In brief summary, the VA has proved to be an extremely important algorithm in a surprising variety of fields.<|reference_end|> | arxiv | @article{forney2005the,
title={The Viterbi Algorithm: A Personal History},
author={G. David Forney Jr},
journal={arXiv preprint arXiv:cs/0504020},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504020},
primaryClass={cs.IT math.IT}
} | forney2005the |
arxiv-672784 | cs/0504021 | Near Perfect Decoding of LDPC Codes | <|reference_start|>Near Perfect Decoding of LDPC Codes: Cooperative optimization is a new way for finding global optima of complicated functions of many variables. It has some important properties not possessed by any conventional optimization methods. It has been successfully applied in solving many large scale optimization problems in image processing, computer vision, and computational chemistry. This paper shows the application of this optimization principle in decoding LDPC codes, which is another hard combinatorial optimization problem. In our experiments, it significantly out-performed the sum-product algorithm, the best known method for decoding LDPC codes. Compared to the sum-product algorithm, our algorithm reduced the error rate further by three fold, improved the speed by six times, and lowered error floors dramatically in the decoding.<|reference_end|> | arxiv | @article{huang2005near,
title={Near Perfect Decoding of LDPC Codes},
author={Xiaofei Huang},
journal={arXiv preprint arXiv:cs/0504021},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504021},
primaryClass={cs.IT math.IT}
} | huang2005near |
arxiv-672785 | cs/0504022 | A Matter of Opinion: Sentiment Analysis and Business Intelligence (position paper) | <|reference_start|>A Matter of Opinion: Sentiment Analysis and Business Intelligence (position paper): A general-audience introduction to the area of "sentiment analysis", the computational treatment of subjective, opinion-oriented language (an example application is determining whether a review is "thumbs up" or "thumbs down"). Some challenges, applications to business-intelligence tasks, and potential future directions are described.<|reference_end|> | arxiv | @article{lee2005a,
title={A Matter of Opinion: Sentiment Analysis and Business Intelligence
(position paper)},
author={Lillian Lee},
journal={Presented at the IBM Faculty Summit on the Architecture of
On-Demand Business, May 2004},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504022},
primaryClass={cs.CL}
} | lee2005a |
arxiv-672786 | cs/0504023 | Correlation Clustering with a Fixed Number of Clusters | <|reference_start|>Correlation Clustering with a Fixed Number of Clusters: We continue the investigation of problems concerning correlation clustering or clustering with qualitative information, which is a clustering formulation that has been studied recently. The basic setup here is that we are given as input a complete graph on n nodes (which correspond to nodes to be clustered) whose edges are labeled + (for similar pairs of items) and - (for dissimilar pairs of items). Thus we have only as input qualitative information on similarity and no quantitative distance measure between items. The quality of a clustering is measured in terms of its number of agreements, which is simply the number of edges it correctly classifies, that is the sum of number of - edges whose endpoints it places in different clusters plus the number of + edges both of whose endpoints it places within the same cluster. In this paper, we study the problem of finding clusterings that maximize the number of agreements, and the complementary minimization version where we seek clusterings that minimize the number of disagreements. We focus on the situation when the number of clusters is stipulated to be a small constant k. Our main result is that for every k, there is a polynomial time approximation scheme for both maximizing agreements and minimizing disagreements. (The problems are NP-hard for every k >= 2.) The main technical work is for the minimization version, as the PTAS for maximizing agreements follows along the lines of the property tester for Max k-CUT. In contrast, when the number of clusters is not specified, the problem of minimizing disagreements was shown to be APX-hard, even though the maximization version admits a PTAS.<|reference_end|> | arxiv | @article{giotis2005correlation,
title={Correlation Clustering with a Fixed Number of Clusters},
author={Ioannis Giotis and Venkatesan Guruswami},
journal={arXiv preprint arXiv:cs/0504023},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504023},
primaryClass={cs.DS}
} | giotis2005correlation |
arxiv-672787 | cs/0504024 | Constraint-Based Qualitative Simulation | <|reference_start|>Constraint-Based Qualitative Simulation: We consider qualitative simulation involving a finite set of qualitative relations in presence of complete knowledge about their interrelationship. We show how it can be naturally captured by means of constraints expressed in temporal logic and constraint satisfaction problems. The constraints relate at each stage the 'past' of a simulation with its 'future'. The benefit of this approach is that it readily leads to an implementation based on constraint technology that can be used to generate simulations and to answer queries about them.<|reference_end|> | arxiv | @article{apt2005constraint-based,
title={Constraint-Based Qualitative Simulation},
author={Krzysztof R. Apt, Sebastian Brand},
journal={arXiv preprint arXiv:cs/0504024},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504024},
primaryClass={cs.AI cs.LO}
} | apt2005constraint-based |
arxiv-672788 | cs/0504025 | Incorporating LINQ, State Diagrams Templating and Package Extension Into Java | <|reference_start|>Incorporating LINQ, State Diagrams Templating and Package Extension Into Java: This submission has been withdrawn at the request of the author.<|reference_end|> | arxiv | @article{g2005incorporating,
title={Incorporating LINQ, State Diagrams Templating and Package Extension Into
Java},
author={Raju Renjit. G},
journal={arXiv preprint arXiv:cs/0504025},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504025},
primaryClass={cs.PL}
} | g2005incorporating |
arxiv-672789 | cs/0504026 | Searching Monotone Multi-dimensional Arrays | <|reference_start|>Searching Monotone Multi-dimensional Arrays: In this paper we investigate the problem of searching monotone multi-dimensional arrays. We generalize Linial and Saks' search algorithm \cite{LS1} for monotone 3-dimensional arrays to $d$-dimensions with $d\geq 4$. Our new search algorithm is asymptotically optimal for $d=4$.<|reference_end|> | arxiv | @article{cheng2005searching,
title={Searching Monotone Multi-dimensional Arrays},
author={Yongxi Cheng, Xiaoming Sun, Yiqun Lisa Yin},
journal={arXiv preprint arXiv:cs/0504026},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504026},
primaryClass={cs.DS cs.DM}
} | cheng2005searching |
arxiv-672790 | cs/0504027 | Linear Datalog and Bounded Path Duality of Relational Structures | <|reference_start|>Linear Datalog and Bounded Path Duality of Relational Structures: In this paper we systematically investigate the connections between logics with a finite number of variables, structures of bounded pathwidth, and linear Datalog Programs. We prove that, in the context of Constraint Satisfaction Problems, all these concepts correspond to different mathematical embodiments of a unique robust notion that we call bounded path duality. We also study the computational complexity implications of the notion of bounded path duality. We show that every constraint satisfaction problem $\csp(\best)$ with bounded path duality is solvable in NL and that this notion explains in a uniform way all families of CSPs known to be in NL. Finally, we use the results developed in the paper to identify new problems in NL.<|reference_end|> | arxiv | @article{dalmau2005linear,
title={Linear Datalog and Bounded Path Duality of Relational Structures},
author={Victor Dalmau},
journal={Logical Methods in Computer Science, Volume 1, Issue 1 (April 29,
2005) lmcs:2275},
year={2005},
doi={10.2168/LMCS-1(1:5)2005},
archivePrefix={arXiv},
eprint={cs/0504027},
primaryClass={cs.LO cs.CC}
} | dalmau2005linear |
arxiv-672791 | cs/0504028 | On Extrinsic Information of Good Codes Operating Over Discrete Memoryless Channels | <|reference_start|>On Extrinsic Information of Good Codes Operating Over Discrete Memoryless Channels: We show that the Extrinsic Information about the coded bits of any good (capacity achieving) code operating over a wide class of discrete memoryless channels (DMC) is zero when channel capacity is below the code rate and positive constant otherwise, that is, the Extrinsic Information Transfer (EXIT) chart is a step function of channel quality, for any capacity achieving code. It follows that, for a common class of iterative receivers where the error correcting decoder must operate at first iteration at rate above capacity (such as in turbo equalization, turbo channel estimation, parallel and serial concatenated coding and the like), classical good codes which achieve capacity over the DMC are not effective and should be replaced by different new ones. Another meaning of the results is that a good code operating at rate above channel capacity falls apart into its individual transmitted symbols in the sense that all the information about a coded transmitted symbol is contained in the corresponding received symbol and no information about it can be inferred from the other received symbols. The binary input additive white Gaussian noise channel is treated in part 1 of this report. Part 2 extends the results to the symmetric binary channel and to the binary erasure channel and provides an heuristic extension to wider class of channel models.<|reference_end|> | arxiv | @article{peleg2005on,
title={On Extrinsic Information of Good Codes Operating Over Discrete
Memoryless Channels},
author={Michael Peleg, Amichai Sanderovich and Shlomo Shamai},
journal={arXiv preprint arXiv:cs/0504028},
year={2005},
number={CCIT-525},
archivePrefix={arXiv},
eprint={cs/0504028},
primaryClass={cs.IT math.IT}
} | peleg2005on |
arxiv-672792 | cs/0504029 | Fast Distributed Algorithms for Computing Separable Functions | <|reference_start|>Fast Distributed Algorithms for Computing Separable Functions: The problem of computing functions of values at the nodes in a network in a totally distributed manner, where nodes do not have unique identities and make decisions based only on local information, has applications in sensor, peer-to-peer, and ad-hoc networks. The task of computing separable functions, which can be written as linear combinations of functions of individual variables, is studied in this context. Known iterative algorithms for averaging can be used to compute the normalized values of such functions, but these algorithms do not extend in general to the computation of the actual values of separable functions. The main contribution of this paper is the design of a distributed randomized algorithm for computing separable functions. The running time of the algorithm is shown to depend on the running time of a minimum computation algorithm used as a subroutine. Using a randomized gossip mechanism for minimum computation as the subroutine yields a complete totally distributed algorithm for computing separable functions. For a class of graphs with small spectral gap, such as grid graphs, the time used by the algorithm to compute averages is of a smaller order than the time required by a known iterative averaging scheme.<|reference_end|> | arxiv | @article{mosk-aoyama2005fast,
title={Fast Distributed Algorithms for Computing Separable Functions},
author={Damon Mosk-Aoyama and Devavrat Shah},
journal={arXiv preprint arXiv:cs/0504029},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504029},
primaryClass={cs.NI cs.DC cs.DS}
} | mosk-aoyama2005fast |
arxiv-672793 | cs/0504030 | Sufficient conditions for convergence of the Sum-Product Algorithm | <|reference_start|>Sufficient conditions for convergence of the Sum-Product Algorithm: We derive novel conditions that guarantee convergence of the Sum-Product algorithm (also known as Loopy Belief Propagation or simply Belief Propagation) to a unique fixed point, irrespective of the initial messages. The computational complexity of the conditions is polynomial in the number of variables. In contrast with previously existing conditions, our results are directly applicable to arbitrary factor graphs (with discrete variables) and are shown to be valid also in the case of factors containing zeros, under some additional conditions. We compare our bounds with existing ones, numerically and, if possible, analytically. For binary variables with pairwise interactions, we derive sufficient conditions that take into account local evidence (i.e., single variable factors) and the type of pair interactions (attractive or repulsive). It is shown empirically that this bound outperforms existing bounds.<|reference_end|> | arxiv | @article{mooij2005sufficient,
title={Sufficient conditions for convergence of the Sum-Product Algorithm},
author={Joris M. Mooij, Hilbert J. Kappen},
journal={IEEE Transactions on Information Theory, 53(12):4422-4437 Dec.
2007},
year={2005},
doi={10.1109/TIT.2007.909166},
archivePrefix={arXiv},
eprint={cs/0504030},
primaryClass={cs.IT cs.AI math.IT}
} | mooij2005sufficient |
arxiv-672794 | cs/0504031 | Convexity Analysis of Snake Models Based on Hamiltonian Formulation | <|reference_start|>Convexity Analysis of Snake Models Based on Hamiltonian Formulation: This paper presents a convexity analysis for the dynamic snake model based on the Potential Energy functional and the Hamiltonian formulation of the classical mechanics. First we see the snake model as a dynamical system whose singular points are the borders we seek. Next we show that a necessary condition for a singular point to be an attractor is that the energy functional is strictly convex in a neighborhood of it, that means, if the singular point is a local minimum of the potential energy. As a consequence of this analysis, a local expression relating the dynamic parameters and the rate of convergence arises. Such results link the convexity analysis of the potential energy and the dynamic snake model and point forward to the necessity of a physical quantity whose convexity analysis is related to the dynamic and which incorporate the velocity space. Such a quantity is exactly the (conservative) Hamiltonian of the system.<|reference_end|> | arxiv | @article{giraldi2005convexity,
title={Convexity Analysis of Snake Models Based on Hamiltonian Formulation},
author={Gilson Antonio Giraldi, Antonio Alberto Fernandes de Oliveira},
journal={arXiv preprint arXiv:cs/0504031},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504031},
primaryClass={cs.CV cs.GR}
} | giraldi2005convexity |
arxiv-672795 | cs/0504032 | Critical Point for Maximum Likelihood Decoding of Linear Block Codes | <|reference_start|>Critical Point for Maximum Likelihood Decoding of Linear Block Codes: In this letter, the SNR value at which the error performance curve of a soft decision maximum likelihood decoder reaches the slope corresponding to the code minimum distance is determined for a random code. Based on this value, referred to as the critical point, new insight about soft bounded distance decoding of random-like codes (and particularly Reed-Solomon codes) is provided.<|reference_end|> | arxiv | @article{fossorier2005critical,
title={Critical Point for Maximum Likelihood Decoding of Linear Block Codes},
author={Marc Fossorier},
journal={arXiv preprint arXiv:cs/0504032},
year={2005},
doi={10.1109/LCOMM.2005.1506713},
archivePrefix={arXiv},
eprint={cs/0504032},
primaryClass={cs.IT math.IT}
} | fossorier2005critical |
arxiv-672796 | cs/0504033 | Resource Management Services for a Grid Analysis Environment | <|reference_start|>Resource Management Services for a Grid Analysis Environment: Selecting optimal resources for submitting jobs on a computational Grid or accessing data from a data grid is one of the most important tasks of any Grid middleware. Most modern Grid software today satisfies this responsibility and gives a best-effort performance to solve this problem. Almost all decisions regarding scheduling and data access are made by the software automatically, giving users little or no control over the entire process. To solve this problem, a more interactive set of services and middleware is desired that provides users more information about Grid weather, and gives them more control over the decision making process. This paper presents a set of services that have been developed to provide more interactive resource management capabilities within the Grid Analysis Environment (GAE) being developed collaboratively by Caltech, NUST and several other institutes. These include a steering service, a job monitoring service and an estimator service that have been designed and written using a common Grid-enabled Web Services framework named Clarens. The paper also presents a performance analysis of the developed services to show that they have indeed resulted in a more interactive and powerful system for user-centric Grid-enabled physics analysis.<|reference_end|> | arxiv | @article{ali2005resource,
title={Resource Management Services for a Grid Analysis Environment},
author={Arshad Ali, Ashiq Anjum, Tahir Azim, Julian Bunn, Atif Mehmood,
Richard McClatchey, Harvey Newman, Waqas ur Rehman, Conrad Steenberg, Michael
Thomas, Frank van Lingen, Ian Willers, Muhammad Adeel Zafar},
journal={arXiv preprint arXiv:cs/0504033},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504033},
primaryClass={cs.DC}
} | ali2005resource |
arxiv-672797 | cs/0504034 | Heterogeneous Relational Databases for a Grid-enabled Analysis Environment | <|reference_start|>Heterogeneous Relational Databases for a Grid-enabled Analysis Environment: Grid based systems require a database access mechanism that can provide seamless homogeneous access to the requested data through a virtual data access system, i.e. a system which can take care of tracking the data that is stored in geographically distributed heterogeneous databases. This system should provide an integrated view of the data that is stored in the different repositories by using a virtual data access mechanism, i.e. a mechanism which can hide the heterogeneity of the backend databases from the client applications. This paper focuses on accessing data stored in disparate relational databases through a web service interface, and exploits the features of a Data Warehouse and Data Marts. We present a middleware that enables applications to access data stored in geographically distributed relational databases without being aware of their physical locations and underlying schema. A web service interface is provided to enable applications to access this middleware in a language and platform independent way. A prototype implementation was created based on Clarens [4], Unity [7] and POOL [8]. This ability to access the data stored in the distributed relational databases transparently is likely to be a very powerful one for Grid users, especially the scientific community wishing to collate and analyze data distributed over the Grid.<|reference_end|> | arxiv | @article{ali2005heterogeneous,
title={Heterogeneous Relational Databases for a Grid-enabled Analysis
Environment},
author={Arshad Ali, Ashiq Anjum, Tahir Azim, Julian Bunn, Saima Iqbal, Richard
McClatchey, Harvey Newman, S. Yousaf Shah, Tony Solomonides, Conrad
Steenberg, Michael Thomas, Frank van Lingen, Ian Willers},
journal={arXiv preprint arXiv:cs/0504034},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504034},
primaryClass={cs.DC}
} | ali2005heterogeneous |
arxiv-672798 | cs/0504035 | Fitness Uniform Deletion: A Simple Way to Preserve Diversity | <|reference_start|>Fitness Uniform Deletion: A Simple Way to Preserve Diversity: A commonly experienced problem with population based optimisation methods is the gradual decline in population diversity that tends to occur over time. This can slow a system's progress or even halt it completely if the population converges on a local optimum from which it cannot escape. In this paper we present the Fitness Uniform Deletion Scheme (FUDS), a simple but somewhat unconventional approach to this problem. Under FUDS the deletion operation is modified to only delete those individuals which are "common" in the sense that there exist many other individuals of similar fitness in the population. This makes it impossible for the population to collapse to a collection of highly related individuals with similar fitness. Our experimental results on a range of optimisation problems confirm this, in particular for deceptive optimisation problems the performance is significantly more robust to variation in the selection intensity.<|reference_end|> | arxiv | @article{legg2005fitness,
title={Fitness Uniform Deletion: A Simple Way to Preserve Diversity},
author={Shane Legg and Marcus Hutter},
journal={Proc. Genetic and Evolutionary Computation Conference (GECCO 2005)
1271-1278},
year={2005},
number={IDSIA-11-04},
archivePrefix={arXiv},
eprint={cs/0504035},
primaryClass={cs.NE cs.AI}
} | legg2005fitness |
arxiv-672799 | cs/0504036 | Scientific impact quantity and quality: Analysis of two sources of bibliographic data | <|reference_start|>Scientific impact quantity and quality: Analysis of two sources of bibliographic data: Attempts to understand the consequence of any individual scientist's activity within the long-term trajectory of science is one of the most difficult questions within the philosophy of science. Because scientific publications play such as central role in the modern enterprise of science, bibliometric techniques which measure the ``impact'' of an individual publication as a function of the number of citations it receives from subsequent authors have provided some of the most useful empirical data on this question. Until recently, Thompson/ISI has provided the only source of large-scale ``inverted'' bibliographic data of the sort required for impact analysis. In the end of 2004, Google introduced a new service, GoogleScholar, making much of this same data available. Here we analyze 203 publications, collectively cited by more than 4000 other publications. We show surprisingly good agreement between data citation counts provided by the two services. Data quality across the systems is analyzed, and potentially useful complementarities between are considered. The additional robustness offered by multiple sources of such data promises to increase the utility of these measurements as open citation protocols and open access increase their impact on electronic scientific publication practices.<|reference_end|> | arxiv | @article{belew2005scientific,
title={Scientific impact quantity and quality: Analysis of two sources of
bibliographic data},
author={Richard K. Belew},
journal={arXiv preprint arXiv:cs/0504036},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504036},
primaryClass={cs.IR cs.DL}
} | belew2005scientific |
arxiv-672800 | cs/0504037 | Bayesian Restoration of Digital Images Employing Markov Chain Monte Carlo a Review | <|reference_start|>Bayesian Restoration of Digital Images Employing Markov Chain Monte Carlo a Review: A review of Bayesian restoration of digital images based on Monte Carlo techniques is presented. The topics covered include Likelihood, Prior and Posterior distributions, Poisson, Binay symmetric channel, and Gaussian channel models of Likelihood distribution,Ising and Potts spin models of Prior distribution, restoration of an image through Posterior maximization, statistical estimation of a true image from Posterior ensembles, Markov Chain Monte Carlo methods and cluster algorithms.<|reference_end|> | arxiv | @article{murthy2005bayesian,
title={Bayesian Restoration of Digital Images Employing Markov Chain Monte
Carlo a Review},
author={K. P. N. Murthy, M. Janani and B. Shenbga Priya},
journal={arXiv preprint arXiv:cs/0504037},
year={2005},
archivePrefix={arXiv},
eprint={cs/0504037},
primaryClass={cs.CV cond-mat.stat-mech physics.comp-ph}
} | murthy2005bayesian |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.