corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-5701
0812.1843
Identification of parameters underlying emotions and a classification of emotions
<|reference_start|>Identification of parameters underlying emotions and a classification of emotions: The standard classification of emotions involves categorizing the expression of emotions. In this paper, parameters underlying some emotions are identified and a new classification based on these parameters is suggested.<|reference_end|>
arxiv
@article{kumar2008identification, title={Identification of parameters underlying emotions and a classification of emotions}, author={N. Arvind Kumar}, journal={arXiv preprint arXiv:0812.1843}, year={2008}, archivePrefix={arXiv}, eprint={0812.1843}, primaryClass={cs.AI} }
kumar2008identification
arxiv-5702
0812.1857
Dependence Balance Based Outer Bounds for Gaussian Networks with Cooperation and Feedback
<|reference_start|>Dependence Balance Based Outer Bounds for Gaussian Networks with Cooperation and Feedback: We obtain new outer bounds on the capacity regions of the two-user multiple access channel with generalized feedback (MAC-GF) and the two-user interference channel with generalized feedback (IC-GF). These outer bounds are based on the idea of dependence balance which was proposed by Hekstra and Willems [1]. To illustrate the usefulness of our outer bounds, we investigate three different channel models. We first consider a Gaussian MAC with noisy feedback (MAC-NF), where transmitter $k$, $k=1,2$, receives a feedback $Y_{F_{k}}$, which is the channel output $Y$ corrupted with additive white Gaussian noise $Z_{k}$. As the feedback noise variances become large, one would expect the feedback to become useless, which is not reflected by the cut-set bound. We demonstrate that our outer bound improves upon the cut-set bound for all non-zero values of the feedback noise variances. Moreover, in the limit as $\sigma_{Z_{k}}^{2}\to \infty$, $k=1,2$, our outer bound collapses to the capacity region of the Gaussian MAC without feedback. Secondly, we investigate a Gaussian MAC with user-cooperation (MAC-UC), where each transmitter receives an additive white Gaussian noise corrupted version of the channel input of the other transmitter [2]. For this channel model, the cut-set bound is sensitive to the cooperation noises, but not sensitive enough. For all non-zero values of cooperation noise variances, our outer bound strictly improves upon the cut-set outer bound. Thirdly, we investigate a Gaussian IC with user-cooperation (IC-UC). For this channel model, the cut-set bound is again sensitive to cooperation noise variances but not sensitive enough. We demonstrate that our outer bound strictly improves upon the cut-set bound for all non-zero values of cooperation noise variances.<|reference_end|>
arxiv
@article{tandon2008dependence, title={Dependence Balance Based Outer Bounds for Gaussian Networks with Cooperation and Feedback}, author={Ravi Tandon, Sennur Ulukus}, journal={arXiv preprint arXiv:0812.1857}, year={2008}, doi={10.1109/TIT.2011.2145150}, archivePrefix={arXiv}, eprint={0812.1857}, primaryClass={cs.IT math.IT} }
tandon2008dependence
arxiv-5703
0812.1869
Convex Sparse Matrix Factorizations
<|reference_start|>Convex Sparse Matrix Factorizations: We present a convex formulation of dictionary learning for sparse signal decomposition. Convexity is obtained by replacing the usual explicit upper bound on the dictionary size by a convex rank-reducing term similar to the trace norm. In particular, our formulation introduces an explicit trade-off between size and sparsity of the decomposition of rectangular matrices. Using a large set of synthetic examples, we compare the estimation abilities of the convex and non-convex approaches, showing that while the convex formulation has a single local minimum, this may lead in some cases to performance which is inferior to the local minima of the non-convex formulation.<|reference_end|>
arxiv
@article{bach2008convex, title={Convex Sparse Matrix Factorizations}, author={Francis Bach (INRIA Rocquencourt), Julien Mairal (INRIA Rocquencourt), Jean Ponce (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0812.1869}, year={2008}, archivePrefix={arXiv}, eprint={0812.1869}, primaryClass={cs.LG} }
bach2008convex
arxiv-5704
0812.1908
A new metric for robustness with respect to virus spread
<|reference_start|>A new metric for robustness with respect to virus spread: The robustness of a network is depending on the type of attack we are considering. In this paper we focus on the spread of viruses on networks. It is common practice to use the epidemic threshold as a measure for robustness. Because the epidemic threshold is inversely proportional to the largest eigenvalue of the adjacency matrix, it seems easy to compare the robustness of two networks. We will show in this paper that the comparison of the robustness with respect to virus spread for two networks actually depends on the value of the effective spreading rate tau. For this reason we propose a new metric, the viral conductance, which takes into account the complete range of values tau can obtain. In this paper we determine the viral conductance of regular graphs, complete bi-partite graphs and a number of realistic networks.<|reference_end|>
arxiv
@article{kooij2008a, title={A new metric for robustness with respect to virus spread}, author={Robert Kooij, Phillip Schumm, Caterina Scoglio}, journal={arXiv preprint arXiv:0812.1908}, year={2008}, archivePrefix={arXiv}, eprint={0812.1908}, primaryClass={cs.DM} }
kooij2008a
arxiv-5705
0812.1915
Dynamic Complexity of Formal Languages
<|reference_start|>Dynamic Complexity of Formal Languages: The paper investigates the power of the dynamic complexity classes DynFO, DynQF and DynPROP over string languages. The latter two classes contain problems that can be maintained using quantifier-free first-order updates, with and without auxiliary functions, respectively. It is shown that the languages maintainable in DynPROP exactly are the regular languages, even when allowing arbitrary precomputation. This enables lower bounds for DynPROP and separates DynPROP from DynQF and DynFO. Further, it is shown that any context-free language can be maintained in DynFO and a number of specific context-free languages, for example all Dyck-languages, are maintainable in DynQF. Furthermore, the dynamic complexity of regular tree languages is investigated and some results concerning arbitrary structures are obtained: there exist first-order definable properties which are not maintainable in DynPROP. On the other hand any existential first-order property can be maintained in DynQF when allowing precomputation.<|reference_end|>
arxiv
@article{gelade2008dynamic, title={Dynamic Complexity of Formal Languages}, author={Wouter Gelade, Marcel Marquardt, Thomas Schwentick}, journal={arXiv preprint arXiv:0812.1915}, year={2008}, archivePrefix={arXiv}, eprint={0812.1915}, primaryClass={cs.CC cs.DS cs.LO} }
gelade2008dynamic
arxiv-5706
0812.1951
The convex hull of a regular set of integer vectors is polyhedral and effectively computable
<|reference_start|>The convex hull of a regular set of integer vectors is polyhedral and effectively computable: Number Decision Diagrams (NDD) provide a natural finite symbolic representation for regular set of integer vectors encoded as strings of digit vectors (least or most significant digit first). The convex hull of the set of vectors represented by a NDD is proved to be an effectively computable convex polyhedron.<|reference_end|>
arxiv
@article{finkel2008the, title={The convex hull of a regular set of integer vectors is polyhedral and effectively computable}, author={Alain Finkel (LSV), J'er^ome Leroux (LaBRI)}, journal={Information Processing Letters 96, 1 (2005) 30 - 35}, year={2008}, doi={10.1016/j.ipl.2005.04.004}, archivePrefix={arXiv}, eprint={0812.1951}, primaryClass={cs.CG cs.DS cs.LO} }
finkel2008the
arxiv-5707
0812.1967
Decomposition of Decidable First-Order Logics over Integers and Reals
<|reference_start|>Decomposition of Decidable First-Order Logics over Integers and Reals: We tackle the issue of representing infinite sets of real- valued vectors. This paper introduces an operator for combining integer and real sets. Using this operator, we decompose three well-known logics extending Presburger with reals. Our decomposition splits a logic into two parts : one integer, and one decimal (i.e. on the interval [0,1]). We also give a basis for an implementation of our representation.<|reference_end|>
arxiv
@article{bouchy2008decomposition, title={Decomposition of Decidable First-Order Logics over Integers and Reals}, author={Florent Bouchy (LSV), Alain Finkel (LSV), J'er^ome Leroux (LaBRI)}, journal={Temporal Representation and Reasoning, 2008. TIME '08. 15th International Symposium on, Montreal, QC : Canada (2008)}, year={2008}, doi={10.1109/TIME.2008.22}, archivePrefix={arXiv}, eprint={0812.1967}, primaryClass={cs.LO} }
bouchy2008decomposition
arxiv-5708
0812.1986
Control software analysis, part II: Closed-loop analysis
<|reference_start|>Control software analysis, part II: Closed-loop analysis: The analysis and proper documentation of the properties of closed-loop control software presents many distinct aspects from the analysis of the same software running open-loop. Issues of physical system representations arise, and it is desired that such representations remain independent from the representations of the control program. For that purpose, a concurrent program representation of the plant and the control processes is proposed, although the closed-loop system is sufficiently serialized to enable a sequential analysis. While dealing with closed-loop system properties, it is also shown by means of examples how special treatment of nonlinearities extends from the analysis of control specifications to code analysis.<|reference_end|>
arxiv
@article{feron2008control, title={Control software analysis, part II: Closed-loop analysis}, author={Eric Feron and Fernando Alegre}, journal={arXiv preprint arXiv:0812.1986}, year={2008}, archivePrefix={arXiv}, eprint={0812.1986}, primaryClass={cs.SE cs.PL} }
feron2008control
arxiv-5709
0812.2011
Accelerated Data-Flow Analysis
<|reference_start|>Accelerated Data-Flow Analysis: Acceleration in symbolic verification consists in computing the exact effect of some control-flow loops in order to speed up the iterative fix-point computation of reachable states. Even if no termination guarantee is provided in theory, successful results were obtained in practice by different tools implementing this framework. In this paper, the acceleration framework is extended to data-flow analysis. Compared to a classical widening/narrowing-based abstract interpretation, the loss of precision is controlled here by the choice of the abstract domain and does not depend on the way the abstract value is computed. Our approach is geared towards precision, but we don't loose efficiency on the way. Indeed, we provide a cubic-time acceleration-based algorithm for solving interval constraints with full multiplication.<|reference_end|>
arxiv
@article{leroux2008accelerated, title={Accelerated Data-Flow Analysis}, author={J'er^ome Leroux (LaBRI), Gregoire Sutre (LaBRI)}, journal={Static Analysis, Kongens Lyngby : Danemark (2007)}, year={2008}, doi={10.1007/978-3-540-74061-2_12}, archivePrefix={arXiv}, eprint={0812.2011}, primaryClass={cs.DS} }
leroux2008accelerated
arxiv-5710
0812.2014
Convex Hull of Arithmetic Automata
<|reference_start|>Convex Hull of Arithmetic Automata: Arithmetic automata recognize infinite words of digits denoting decompositions of real and integer vectors. These automata are known expressive and efficient enough to represent the whole set of solutions of complex linear constraints combining both integral and real variables. In this paper, the closed convex hull of arithmetic automata is proved rational polyhedral. Moreover an algorithm computing the linear constraints defining these convex set is provided. Such an algorithm is useful for effectively extracting geometrical properties of the whole set of solutions of complex constraints symbolically represented by arithmetic automata.<|reference_end|>
arxiv
@article{leroux2008convex, title={Convex Hull of Arithmetic Automata}, author={J'er^ome Leroux (LaBRI)}, journal={Static Analysis, Valencia : Espagne (2008)}, year={2008}, doi={10.1007/978-3-540-69166-2_4}, archivePrefix={arXiv}, eprint={0812.2014}, primaryClass={cs.DS} }
leroux2008convex
arxiv-5711
0812.2049
Consensus Answers for Queries over Probabilistic Databases
<|reference_start|>Consensus Answers for Queries over Probabilistic Databases: We address the problem of finding a "best" deterministic query answer to a query over a probabilistic database. For this purpose, we propose the notion of a consensus world (or a consensus answer) which is a deterministic world (answer) that minimizes the expected distance to the possible worlds (answers). This problem can be seen as a generalization of the well-studied inconsistent information aggregation problems (e.g. rank aggregation) to probabilistic databases. We consider this problem for various types of queries including SPJ queries, \Topk queries, group-by aggregate queries, and clustering. For different distance metrics, we obtain polynomial time optimal or approximation algorithms for computing the consensus answers (or prove NP-hardness). Most of our results are for a general probabilistic database model, called {\em and/xor tree model}, which significantly generalizes previous probabilistic database models like x-tuples and block-independent disjoint models, and is of independent interest.<|reference_end|>
arxiv
@article{li2008consensus, title={Consensus Answers for Queries over Probabilistic Databases}, author={Jian Li, Amol Deshpande}, journal={arXiv preprint arXiv:0812.2049}, year={2008}, archivePrefix={arXiv}, eprint={0812.2049}, primaryClass={cs.DB} }
li2008consensus
arxiv-5712
0812.2094
Interoperability between Heterogeneous Federation Architectures: Illustration with SAML and WS-Federation
<|reference_start|>Interoperability between Heterogeneous Federation Architectures: Illustration with SAML and WS-Federation: Digital identity management intra and inter information systems, and, service oriented architectures, are the roots of identity federation. This kind of security architectures aims at enabling information system interoperability. Existing architectures, however, do not consider interoperability of heterogeneous federation architectures, which rely on different federation protocols.In this paper, we try to initiate an in-depth reflection on this issue, through the comparison of two main federation architecture specifications: SAML and WS-Federation. We firstly propose an overall outline of identity federation. We furthermore address the issue of interoperability for federation architectures using a different federation protocol. Afterwards, we compare SAML and WS-Federation. Eventually, we define the ways of convergence, and therefore, of interoperability.<|reference_end|>
arxiv
@article{ates2008interoperability, title={Interoperability between Heterogeneous Federation Architectures: Illustration with SAML and WS-Federation}, author={Mika"el Ates (DIOM), Christophe Gravier (DIOM), J'er'emy Lardon (DIOM), Jacques Fayolle (DIOM), B. Sauviac (DIOM)}, journal={Third International IEEE Conference on Signal-Image Technologies and Internet-Based System (SITIS 07), Shangai : Chine (2007)}, year={2008}, doi={10.1109/SITIS.2007.148}, archivePrefix={arXiv}, eprint={0812.2094}, primaryClass={cs.CR} }
ates2008interoperability
arxiv-5713
0812.2115
Performance of a greedy algorithm for edge covering by cliques in interval graphs
<|reference_start|>Performance of a greedy algorithm for edge covering by cliques in interval graphs: In this paper a greedy algorithm to detect conflict cliques in interval graphs and circular-arc graphs is analyzed. In a graph, a stable set requires that at most one vertex is chosen for each edge. It is equivalent to requiring that at most one vertex for each maximal clique is chosen. We show that this algorithm finds all maximal cliques for interval graphs, i.e. it can compute the convex hull of the stable set polytope. In case of circular-arc graphs, the algorithm is not able to detect all maximal cliques, yet remaining correct. This problem occurs in the context of railway scheduling. A train requests the allocation of a railway infrastructure resource for a specific time interval. As one is looking for conflict-free train schedules, the used resource allocation intervals in a schedule must not overlap. The conflict-free choices of used intervals for each resource correspond to stable sets in the interval graph associated to the allocation time intervals.<|reference_end|>
arxiv
@article{caimi2008performance, title={Performance of a greedy algorithm for edge covering by cliques in interval graphs}, author={Gabrio Caimi, Holger Flier, Martin Fuchsberger, Marc Nunkesser}, journal={arXiv preprint arXiv:0812.2115}, year={2008}, archivePrefix={arXiv}, eprint={0812.2115}, primaryClass={cs.DM cs.DS} }
caimi2008performance
arxiv-5714
0812.2137
A Factor 3/2 Approximation for Generalized Steiner Tree Problem with Distances One and Two
<|reference_start|>A Factor 3/2 Approximation for Generalized Steiner Tree Problem with Distances One and Two: We design a 3/2 approximation algorithm for the Generalized Steiner Tree problem (GST) in metrics with distances 1 and 2. This is the first polynomial time approximation algorithm for a wide class of non-geometric metric GST instances with approximation factor below 2.<|reference_end|>
arxiv
@article{berman2008a, title={A Factor 3/2 Approximation for Generalized Steiner Tree Problem with Distances One and Two}, author={Piotr Berman, Marek Karpinski, Alex Zelikovsky}, journal={arXiv preprint arXiv:0812.2137}, year={2008}, archivePrefix={arXiv}, eprint={0812.2137}, primaryClass={cs.CC cs.DM cs.DS} }
berman2008a
arxiv-5715
0812.2164
Optimization of Decentralized Scheduling for Physic Applications in Grid Environments
<|reference_start|>Optimization of Decentralized Scheduling for Physic Applications in Grid Environments: This paper presents a scheduling framework that is configured for, and used in physic systems. Our work addresses the problem of scheduling various computationally intensive and data intensive applications that are required for extracting information from satellite images. The proposed solution allows mapping of image processing applications onto available resources. The scheduling is done at the level of groups of concurrent applications. It demonstrates a very good behavior for scheduling and executing groups of applications, while also achieving a near-optimal utilization of the resources.<|reference_end|>
arxiv
@article{pop2008optimization, title={Optimization of Decentralized Scheduling for Physic Applications in Grid Environments}, author={Florin Pop}, journal={D. Iordache, P. Sterian (eds.), Proceedings of the 4th edition of the Colloquium Mathematics in Engineering and Numerical Physics, pp. 150-153, October 6-8, Ed. Printech, 2007, ISBN: 978-973-718-761-1}, year={2008}, archivePrefix={arXiv}, eprint={0812.2164}, primaryClass={cs.DC physics.comp-ph} }
pop2008optimization
arxiv-5716
0812.2195
Equivalence of SQL Queries in Presence of Embedded Dependencies
<|reference_start|>Equivalence of SQL Queries in Presence of Embedded Dependencies: We consider the problem of finding equivalent minimal-size reformulations of SQL queries in presence of embedded dependencies [1]. Our focus is on select-project-join (SPJ) queries with equality comparisons, also known as safe conjunctive (CQ) queries, possibly with grouping and aggregation. For SPJ queries, the semantics of the SQL standard treat query answers as multisets (a.k.a. bags), whereas the stored relations may be treated either as sets, which is called bag-set semantics for query evaluation, or as bags, which is called bag semantics. (Under set semantics, both query answers and stored relations are treated as sets.) In the context of the above Query-Reformulation Problem, we develop a comprehensive framework for equivalence of CQ queries under bag and bag-set semantics in presence of embedded dependencies, and make a number of conceptual and technical contributions. Specifically, we develop equivalence tests for CQ queries in presence of arbitrary sets of embedded dependencies under bag and bag-set semantics, under the condition that chase [9] under set semantics (set-chase) on the inputs terminates. We also present equivalence tests for aggregate CQ queries in presence of embedded dependencies. We use our equivalence tests to develop sound and complete (whenever set-chase on the inputs terminates) algorithms for solving instances of the Query-Reformulation Problem with CQ queries under each of bag and bag-set semantics, as well as for instances of the problem with aggregate queries.<|reference_end|>
arxiv
@article{chirkova2008equivalence, title={Equivalence of SQL Queries in Presence of Embedded Dependencies}, author={Rada Chirkova, Michael Genesereth}, journal={arXiv preprint arXiv:0812.2195}, year={2008}, number={NCSU CSC TR-2008-27}, archivePrefix={arXiv}, eprint={0812.2195}, primaryClass={cs.DB} }
chirkova2008equivalence
arxiv-5717
0812.2202
Greedy Signal Recovery Review
<|reference_start|>Greedy Signal Recovery Review: The two major approaches to sparse recovery are L1-minimization and greedy methods. Recently, Needell and Vershynin developed Regularized Orthogonal Matching Pursuit (ROMP) that has bridged the gap between these two approaches. ROMP is the first stable greedy algorithm providing uniform guarantees. Even more recently, Needell and Tropp developed the stable greedy algorithm Compressive Sampling Matching Pursuit (CoSaMP). CoSaMP provides uniform guarantees and improves upon the stability bounds and RIC requirements of ROMP. CoSaMP offers rigorous bounds on computational cost and storage. In many cases, the running time is just O(NlogN), where N is the ambient dimension of the signal. This review summarizes these major advances.<|reference_end|>
arxiv
@article{needell2008greedy, title={Greedy Signal Recovery Review}, author={D. Needell, J. A. Tropp and R. Vershynin}, journal={Proc. Asilomar Conference on Signals, Systems, and Computers, Pacific Grove, CA Oct. 2008}, year={2008}, archivePrefix={arXiv}, eprint={0812.2202}, primaryClass={math.NA cs.IT math.IT} }
needell2008greedy
arxiv-5718
0812.2257
Unfolding Convex Polyhedra via Quasigeodesic Star Unfoldings
<|reference_start|>Unfolding Convex Polyhedra via Quasigeodesic Star Unfoldings: We extend the notion of a star unfolding to be based on a simple quasigeodesic loop Q rather than on a point. This gives a new general method to unfold the surface of any convex polyhedron P to a simple, planar polygon: shortest paths from all vertices of P to Q are cut, and all but one segment of Q is cut.<|reference_end|>
arxiv
@article{itoh2008unfolding, title={Unfolding Convex Polyhedra via Quasigeodesic Star Unfoldings}, author={Jin-ichi Itoh, Joseph O'Rourke, Costin V^ilcu}, journal={arXiv preprint arXiv:0812.2257}, year={2008}, number={Smith 091}, archivePrefix={arXiv}, eprint={0812.2257}, primaryClass={cs.CG} }
itoh2008unfolding
arxiv-5719
0812.2275
Secrecy capacity of a class of orthogonal relay eavesdropper channels
<|reference_start|>Secrecy capacity of a class of orthogonal relay eavesdropper channels: The secrecy capacity of relay channels with orthogonal components is studied in the presence of an additional passive eavesdropper node. The relay and destination receive signals from the source on two orthogonal channels such that the destination also receives transmissions from the relay on its channel. The eavesdropper can overhear either one or both of the orthogonal channels. Inner and outer bounds on the secrecy capacity are developed for both the discrete memoryless and the Gaussian channel models. For the discrete memoryless case, the secrecy capacity is shown to be achieved by a partial decode-and-forward (PDF) scheme when the eavesdropper can overhear only one of the two orthogonal channels. Two new outer bounds are presented for the Gaussian model using recent capacity results for a Gaussian multi-antenna point-to-point channel with a multi-antenna eavesdropper. The outer bounds are shown to be tight for two sub-classes of channels. The first sub-class is one in which the source and relay are clustered and the and the eavesdropper receives signals only on the channel from the source and the relay to the destination, for which the PDF strategy is optimal. The second is a sub-class in which the source does not transmit to the relay, for which a noise-forwarding strategy is optimal.<|reference_end|>
arxiv
@article{aggarwal2008secrecy, title={Secrecy capacity of a class of orthogonal relay eavesdropper channels}, author={Vaneet Aggarwal and Lalitha Sankar and A. Robert Calderbank and H. Vincent Poor}, journal={Eurasip Journal on Wireless Communications and Networking, Special Issue on Wireless Physical Layer Security, vol. 2009, Jun. 2009.}, year={2008}, doi={10.1155/2009/494696}, archivePrefix={arXiv}, eprint={0812.2275}, primaryClass={cs.IT math.IT} }
aggarwal2008secrecy
arxiv-5720
0812.2277
An Efficient PTAS for Two-Strategy Anonymous Games
<|reference_start|>An Efficient PTAS for Two-Strategy Anonymous Games: We present a novel polynomial time approximation scheme for two-strategy anonymous games, in which the players' utility functions, although potentially different, do not differentiate among the identities of the other players. Our algorithm computes an $eps$-approximate Nash equilibrium of an $n$-player 2-strategy anonymous game in time $poly(n) (1/eps)^{O(1/eps^2)}$, which significantly improves upon the running time $n^{O(1/eps^2)}$ required by the algorithm of Daskalakis & Papadimitriou, 2007. The improved running time is based on a new structural understanding of approximate Nash equilibria: We show that, for any $eps$, there exists an $eps$-approximate Nash equilibrium in which either only $O(1/eps^3)$ players randomize, or all players who randomize use the same mixed strategy. To show this result we employ tools from the literature on Stein's Method.<|reference_end|>
arxiv
@article{daskalakis2008an, title={An Efficient PTAS for Two-Strategy Anonymous Games}, author={Constantinos Daskalakis}, journal={arXiv preprint arXiv:0812.2277}, year={2008}, archivePrefix={arXiv}, eprint={0812.2277}, primaryClass={cs.GT} }
daskalakis2008an
arxiv-5721
0812.2288
Congestion Control Protocol for Wireless Sensor Networks Handling Prioritized Heterogeneous Traffic
<|reference_start|>Congestion Control Protocol for Wireless Sensor Networks Handling Prioritized Heterogeneous Traffic: Heterogeneous applications could be assimilated within the same wireless sensor network with the aid of modern motes that have multiple sensor boards on a single radio board. Different types of data generated from such types of motes might have different transmission characteristics in terms of priority, transmission rate, required bandwidth, tolerable packet loss, delay demands etc. Considering a sensor network consisting of such multi-purpose nodes, in this paper we propose Prioritized Heterogeneous Traffic-oriented Congestion Control Protocol (PHTCCP) which ensures efficient rate control for prioritized heterogeneous traffic. Our protocol uses intra-queue and inter-queue priorities for ensuring feasible transmission rates of heterogeneous data. It also guarantees efficient link utilization by using dynamic transmission rate adjustment. Detailed analysis and simulation results are presented along with the description of our protocol to demonstrate its effectiveness in handling prioritized heterogeneous traffic in wireless sensor networks.<|reference_end|>
arxiv
@article{monowar2008congestion, title={Congestion Control Protocol for Wireless Sensor Networks Handling Prioritized Heterogeneous Traffic}, author={Muhammad Mostafa Monowar, Md. Obaidur Rahman, Al-Sakib Khan Pathan, and Choong Seon Hong}, journal={Proceedings of SMPE'08 in conjunction with MobiQuitous 2008}, year={2008}, archivePrefix={arXiv}, eprint={0812.2288}, primaryClass={cs.NI} }
monowar2008congestion
arxiv-5722
0812.2291
Characterizing Truthful Multi-Armed Bandit Mechanisms
<|reference_start|>Characterizing Truthful Multi-Armed Bandit Mechanisms: We consider a multi-round auction setting motivated by pay-per-click auctions for Internet advertising. In each round the auctioneer selects an advertiser and shows her ad, which is then either clicked or not. An advertiser derives value from clicks; the value of a click is her private information. Initially, neither the auctioneer nor the advertisers have any information about the likelihood of clicks on the advertisements. The auctioneer's goal is to design a (dominant strategies) truthful mechanism that (approximately) maximizes the social welfare. If the advertisers bid their true private values, our problem is equivalent to the "multi-armed bandit problem", and thus can be viewed as a strategic version of the latter. In particular, for both problems the quality of an algorithm can be characterized by "regret", the difference in social welfare between the algorithm and the benchmark which always selects the same "best" advertisement. We investigate how the design of multi-armed bandit algorithms is affected by the restriction that the resulting mechanism must be truthful. We find that truthful mechanisms have certain strong structural properties -- essentially, they must separate exploration from exploitation -- and they incur much higher regret than the optimal multi-armed bandit algorithms. Moreover, we provide a truthful mechanism which (essentially) matches our lower bound on regret.<|reference_end|>
arxiv
@article{babaioff2008characterizing, title={Characterizing Truthful Multi-Armed Bandit Mechanisms}, author={Moshe Babaioff, Yogeshwer Sharma, Aleksandrs Slivkins}, journal={arXiv preprint arXiv:0812.2291}, year={2008}, archivePrefix={arXiv}, eprint={0812.2291}, primaryClass={cs.DS cs.GT cs.LG} }
babaioff2008characterizing
arxiv-5723
0812.2298
Efficient Isomorphism Testing for a Class of Group Extensions
<|reference_start|>Efficient Isomorphism Testing for a Class of Group Extensions: The group isomorphism problem asks whether two given groups are isomorphic or not. Whereas the case where both groups are abelian is well understood and can be solved efficiently, very little is known about the complexity of isomorphism testing for nonabelian groups. In this paper we study this problem for a class of groups corresponding to one of the simplest ways of constructing nonabelian groups from abelian groups: the groups that are extensions of an abelian group A by a cyclic group of order m. We present an efficient algorithm solving the group isomorphism problem for all the groups of this class such that the order of A is coprime with m. More precisely, our algorithm runs in time almost linear in the orders of the input groups and works in the general setting where the groups are given as black-boxes.<|reference_end|>
arxiv
@article{gall2008efficient, title={Efficient Isomorphism Testing for a Class of Group Extensions}, author={Francois Le Gall}, journal={Proceedings of the 26th International Symposium on Theoretical Aspects of Computer Science (STACS 2009), pp. 625-636, 2009}, year={2008}, doi={10.4230/LIPIcs.STACS.2009.1830}, archivePrefix={arXiv}, eprint={0812.2298}, primaryClass={cs.DS cs.CC math.GR quant-ph} }
gall2008efficient
arxiv-5724
0812.2301
Cooperative Hybrid ARQ Protocols: Unified Frameworks for Protocol Analysis
<|reference_start|>Cooperative Hybrid ARQ Protocols: Unified Frameworks for Protocol Analysis: Cooperative hybrid-ARQ (HARQ) protocols, which can exploit the spatial and temporal diversities, have been widely studied. The efficiency of cooperative HARQ protocols is higher than that of cooperative protocols, because retransmissions are only performed when necessary. We classify cooperative HARQ protocols as three decode-and-forward based HARQ (DF-HARQ) protocols and two amplified-and-forward based (AF-HARQ) protocols. To compare these protocols and obtain the optimum parameters, two unified frameworks are developed for protocol analysis. Using the frameworks, we can evaluate and compare the maximum throughput and outage probabilities according to the SNR, the relay location, and the delay constraint for the protocols.<|reference_end|>
arxiv
@article{byun2008cooperative, title={Cooperative Hybrid ARQ Protocols: Unified Frameworks for Protocol Analysis}, author={Ilmu Byun, and Kwang Soon Kim}, journal={arXiv preprint arXiv:0812.2301}, year={2008}, archivePrefix={arXiv}, eprint={0812.2301}, primaryClass={cs.IT math.IT} }
byun2008cooperative
arxiv-5725
0812.2309
Classification of Cell Images Using MPEG-7-influenced Descriptors and Support Vector Machines in Cell Morphology
<|reference_start|>Classification of Cell Images Using MPEG-7-influenced Descriptors and Support Vector Machines in Cell Morphology: Counting and classifying blood cells is an important diagnostic tool in medicine. Support Vector Machines are increasingly popular and efficient and could replace artificial neural network systems. Here a method to classify blood cells is proposed using SVM. A set of statistics on images are implemented in C++. The MPEG-7 descriptors Scalable Color Descriptor, Color Structure Descriptor, Color Layout Descriptor and Homogeneous Texture Descriptor are extended in size and combined with textural features corresponding to textural properties perceived visually by humans. From a set of images of human blood cells these statistics are collected. A SVM is implemented and trained to classify the cell images. The cell images come from a CellaVision DM-96 machine which classify cells from images from microscopy. The output images and classification of the CellaVision machine is taken as ground truth, a truth that is 90-95% correct. The problem is divided in two -- the primary and the simplified. The primary problem is to classify the same classes as the CellaVision machine. The simplified problem is to differ between the five most common types of white blood cells. An encouraging result is achieved in both cases -- error rates of 10.8% and 3.1% -- considering that the SVM is misled by the errors in ground truth. Conclusion is that further investigation of performance is worthwhile.<|reference_end|>
arxiv
@article{abenius2008classification, title={Classification of Cell Images Using MPEG-7-influenced Descriptors and Support Vector Machines in Cell Morphology}, author={Tobias Abenius}, journal={arXiv preprint arXiv:0812.2309}, year={2008}, number={ISSN 1651-6389}, archivePrefix={arXiv}, eprint={0812.2309}, primaryClass={stat.AP cs.CV stat.ML} }
abenius2008classification
arxiv-5726
0812.2313
Urologic robots and future directions
<|reference_start|>Urologic robots and future directions: PURPOSE OF REVIEW: Robot-assisted laparoscopic surgery in urology has gained immense popularity with the daVinci system, but a lot of research teams are working on new robots. The purpose of this study is to review current urologic robots and present future development directions. RECENT FINDINGS: Future systems are expected to advance in two directions: improvements of remote manipulation robots and developments of image-guided robots. SUMMARY: The final goal of robots is to allow safer and more homogeneous outcomes with less variability of surgeon performance, as well as new tools to perform tasks on the basis of medical transcutaneous imaging, in a less invasive way, at lower costs. It is expected that improvements for a remote system could be augmented in reality, with haptic feedback, size reduction, and development of new tools for natural orifice translumenal endoscopic surgery. The paradigm of image-guided robots is close to clinical availability and the most advanced robots are presented with end-user technical assessments. It is also notable that the potential of robots lies much further ahead than the accomplishments of the daVinci system. The integration of imaging with robotics holds a substantial promise, because this can accomplish tasks otherwise impossible. Image-guided robots have the potential to offer a paradigm shift.<|reference_end|>
arxiv
@article{mozer2008urologic, title={Urologic robots and future directions}, author={Pierre Mozer (TIMC, URObotics), Jocelyne Troccaz (TIMC), Dan Stoianovici (URObotics)}, journal={Current Opinion in Urology 19, 1 (2009) 114-9}, year={2008}, doi={10.1097/MOU.0b013e32831cc1ba}, archivePrefix={arXiv}, eprint={0812.2313}, primaryClass={cs.RO} }
mozer2008urologic
arxiv-5727
0812.2324
The MIMO Iterative Waterfilling Algorithm
<|reference_start|>The MIMO Iterative Waterfilling Algorithm: This paper considers the non-cooperative maximization of mutual information in the vector Gaussian interference channel in a fully distributed fashion via game theory. This problem has been widely studied in a number of works during the past decade for frequency-selective channels, and recently for the more general MIMO case, for which the state-of-the art results are valid only for nonsingular square channel matrices. Surprisingly, these results do not hold true when the channel matrices are rectangular and/or rank deficient matrices. The goal of this paper is to provide a complete characterization of the MIMO game for arbitrary channel matrices, in terms of conditions guaranteeing both the uniqueness of the Nash equilibrium and the convergence of asynchronous distributed iterative waterfilling algorithms. Our analysis hinges on new technical intermediate results, such as a new expression for the MIMO waterfilling projection valid (also) for singular matrices, a mean-value theorem for complex matrix-valued functions, and a general contraction theorem for the multiuser MIMO watefilling mapping valid for arbitrary channel matrices. The quite surprising result is that uniqueness/convergence conditions in the case of tall (possibly singular) channel matrices are more restrictive than those required in the case of (full rank) fat channel matrices. We also propose a modified game and algorithm with milder conditions for the uniqueness of the equilibrium and convergence, and virtually the same performance (in terms of Nash equilibria) of the original game.<|reference_end|>
arxiv
@article{scutari2008the, title={The MIMO Iterative Waterfilling Algorithm}, author={Gesualdo Scutari, Daniel P. Palomar, and Sergio Barbarossa}, journal={arXiv preprint arXiv:0812.2324}, year={2008}, doi={10.1109/TSP.2009.2013894}, archivePrefix={arXiv}, eprint={0812.2324}, primaryClass={cs.IT cs.GT math.IT} }
scutari2008the
arxiv-5728
0812.2379
On the Decoder Error Probability of Rank Metric Codes and Constant-Dimension Codes
<|reference_start|>On the Decoder Error Probability of Rank Metric Codes and Constant-Dimension Codes: Rank metric codes and constant-dimension codes (CDCs) have been considered for error control in random network coding. Since decoder errors are more detrimental to system performance than decoder failures, in this paper we investigate the decoder error probability (DEP) of bounded distance decoders (BDDs) for rank metric codes and CDCs. For rank metric codes, we consider a channel motivated by network coding, where errors with the same row space are equiprobable. Over such channels, we establish upper bounds on the DEPs of BDDs, determine the exact DEP of BDDs for maximum rank distance (MRD) codes, and show that MRD codes have the greatest DEPs up to a scalar. To evaluate the DEPs of BDDs for CDCs, we first establish some fundamental geometric properties of the projective space. Using these geometric properties, we then consider BDDs in both subspace and injection metrics and derive analytical expressions of their DEPs for CDCs, over a symmetric operator channel, as functions of their distance distributions. Finally, we focus on CDCs obtained by lifting rank metric codes and establish two important results: First, we derive asymptotically tight upper bounds on the DEPs of BDDs in both metrics; Second, we show that the DEPs for KK codes are the greatest up to a scalar among all CDCs obtained by lifting rank metric codes.<|reference_end|>
arxiv
@article{gadouleau2008on, title={On the Decoder Error Probability of Rank Metric Codes and Constant-Dimension Codes}, author={Maximilien Gadouleau and Zhiyuan Yan}, journal={arXiv preprint arXiv:0812.2379}, year={2008}, archivePrefix={arXiv}, eprint={0812.2379}, primaryClass={cs.IT math.IT} }
gadouleau2008on
arxiv-5729
0812.2386
A note on regular Ramsey graphs
<|reference_start|>A note on regular Ramsey graphs: We prove that there is an absolute constant $C>0$ so that for every natural $n$ there exists a triangle-free \emph{regular} graph with no independent set of size at least $C\sqrt{n\log n}$.<|reference_end|>
arxiv
@article{alon2008a, title={A note on regular Ramsey graphs}, author={Noga Alon, Sonny Ben-Shimon, Michael Krivelevich}, journal={Journal of Graph Theory, 64 (3):244--249, 2010}, year={2008}, doi={10.1002/jgt.20453}, archivePrefix={arXiv}, eprint={0812.2386}, primaryClass={math.CO cs.DM} }
alon2008a
arxiv-5730
0812.2388
Physics of risk and uncertainty in quantum decision making
<|reference_start|>Physics of risk and uncertainty in quantum decision making: The Quantum Decision Theory, developed recently by the authors, is applied to clarify the role of risk and uncertainty in decision making and in particular in relation to the phenomenon of dynamic inconsistency. By formulating this notion in precise mathematical terms, we distinguish three types of inconsistency: time inconsistency, planning paradox, and inconsistency occurring in some discounting effects. While time inconsistency is well accounted for in classical decision theory, the planning paradox is in contradiction with classical utility theory. It finds a natural explanation in the frame of the Quantum Decision Theory. Different types of discounting effects are analyzed and shown to enjoy a straightforward explanation within the suggested theory. We also introduce a general methodology based on self-similar approximation theory for deriving the evolution equations for the probabilities of future prospects. This provides a novel classification of possible discount factors, which include the previously known cases (exponential or hyperbolic discounting), but also predicts a novel class of discount factors that decay to a strictly positive constant for very large future time horizons. This class may be useful to deal with very long-term discounting situations associated with intergenerational public policy choices, encompassing issues such as global warming and nuclear waste disposal.<|reference_end|>
arxiv
@article{yukalov2008physics, title={Physics of risk and uncertainty in quantum decision making}, author={V.I. Yukalov and D. Sornette}, journal={Eur. Phys. J. B 71 (2009) 533-548}, year={2008}, doi={10.1140/epjb/e2009-00245-9}, archivePrefix={arXiv}, eprint={0812.2388}, primaryClass={physics.soc-ph cs.AI quant-ph} }
yukalov2008physics
arxiv-5731
0812.2390
Completeness for Flat Modal Fixpoint Logics
<|reference_start|>Completeness for Flat Modal Fixpoint Logics: This paper exhibits a general and uniform method to prove completeness for certain modal fixpoint logics. Given a set \Gamma of modal formulas of the form \gamma(x, p1, . . ., pn), where x occurs only positively in \gamma, the language L\sharp (\Gamma) is obtained by adding to the language of polymodal logic a connective \sharp\_\gamma for each \gamma \epsilon. The term \sharp\_\gamma (\varphi1, . . ., \varphin) is meant to be interpreted as the least fixed point of the functional interpretation of the term \gamma(x, \varphi 1, . . ., \varphi n). We consider the following problem: given \Gamma, construct an axiom system which is sound and complete with respect to the concrete interpretation of the language L\sharp (\Gamma) on Kripke frames. We prove two results that solve this problem. First, let K\sharp (\Gamma) be the logic obtained from the basic polymodal K by adding a Kozen-Park style fixpoint axiom and a least fixpoint rule, for each fixpoint connective \sharp\_\gamma. Provided that each indexing formula \gamma satisfies the syntactic criterion of being untied in x, we prove this axiom system to be complete. Second, addressing the general case, we prove the soundness and completeness of an extension K+ (\Gamma) of K\_\sharp (\Gamma). This extension is obtained via an effective procedure that, given an indexing formula \gamma as input, returns a finite set of axioms and derivation rules for \sharp\_\gamma, of size bounded by the length of \gamma. Thus the axiom system K+ (\Gamma) is finite whenever \Gamma is finite.<|reference_end|>
arxiv
@article{santocanale2008completeness, title={Completeness for Flat Modal Fixpoint Logics}, author={Luigi Santocanale (LIF), Yde Venema (ILLC)}, journal={arXiv preprint arXiv:0812.2390}, year={2008}, archivePrefix={arXiv}, eprint={0812.2390}, primaryClass={cs.LO math.LO} }
santocanale2008completeness
arxiv-5732
0812.2405
A New Trend in Optimization on Multi Overcomplete Dictionary toward Inpainting
<|reference_start|>A New Trend in Optimization on Multi Overcomplete Dictionary toward Inpainting: Recently, great attention was intended toward overcomplete dictionaries and the sparse representations they can provide. In a wide variety of signal processing problems, sparsity serves a crucial property leading to high performance. Inpainting, the process of reconstructing lost or deteriorated parts of images or videos, is an interesting application which can be handled by suitably decomposition of an image through combination of overcomplete dictionaries. This paper addresses a novel technique of such a decomposition and investigate that through inpainting of images. Simulations are presented to demonstrate the validation of our approach.<|reference_end|>
arxiv
@article{valiollahzadeh2008a, title={A New Trend in Optimization on Multi Overcomplete Dictionary toward Inpainting}, author={SeyyedMajid Valiollahzadeh, Mohammad Nazari, Massoud Babaie-Zadeh, Christian Jutten}, journal={arXiv preprint arXiv:0812.2405}, year={2008}, number={ICASSP 2009}, archivePrefix={arXiv}, eprint={0812.2405}, primaryClass={cs.MM cs.AI} }
valiollahzadeh2008a
arxiv-5733
0812.2409
Sensing Models and Its Impact on Network Coverage in Wireless Sensor Network
<|reference_start|>Sensing Models and Its Impact on Network Coverage in Wireless Sensor Network: Network coverage of wireless sensor network (WSN) means how well an area of interest is being monitored by the deployed network. It depends mainly on sensing model of nodes. In this paper, we present three types of sensing models viz. Boolean sensing model, shadow-fading sensing model and Elfes sensing model. We investigate the impact of sensing models on network coverage. We also investigate network coverage based on Poisson node distribution. A comparative study between regular and random node placement has also been presented in this paper. This study will be useful for coverage analysis of WSN.<|reference_end|>
arxiv
@article{hossain2008sensing, title={Sensing Models and Its Impact on Network Coverage in Wireless Sensor Network}, author={Ashraf Hossain, S. Chakrabarti and P. K. Biswas}, journal={arXiv preprint arXiv:0812.2409}, year={2008}, doi={10.1109/ICIINFS.2008.4798455}, archivePrefix={arXiv}, eprint={0812.2409}, primaryClass={cs.IT math.IT} }
hossain2008sensing
arxiv-5734
0812.2411
Probabilistic SVM/GMM Classifier for Speaker-Independent Vowel Recognition in Continues Speech
<|reference_start|>Probabilistic SVM/GMM Classifier for Speaker-Independent Vowel Recognition in Continues Speech: In this paper, we discuss the issues in automatic recognition of vowels in Persian language. The present work focuses on new statistical method of recognition of vowels as a basic unit of syllables. First we describe a vowel detection system then briefly discuss how the detected vowels can feed to recognition unit. According to pattern recognition, Support Vector Machines (SVM) as a discriminative classifier and Gaussian mixture model (GMM) as a generative model classifier are two most popular techniques. Current state-ofthe- art systems try to combine them together for achieving more power of classification and improving the performance of the recognition systems. The main idea of the study is to combine probabilistic SVM and traditional GMM pattern classification with some characteristic of speech like band-pass energy to achieve better classification rate. This idea has been analytically formulated and tested on a FarsDat based vowel recognition system. The results show inconceivable increases in recognition accuracy. The tests have been carried out by various proposed vowel recognition algorithms and the results have been compared.<|reference_end|>
arxiv
@article{nazari2008probabilistic, title={Probabilistic SVM/GMM Classifier for Speaker-Independent Vowel Recognition in Continues Speech}, author={Mohammad Nazari, Abolghasem Sayadiyan, SeyedMajid Valiollahzadeh}, journal={arXiv preprint arXiv:0812.2411}, year={2008}, number={ICASSP 2009}, archivePrefix={arXiv}, eprint={0812.2411}, primaryClass={cs.MM cs.AI} }
nazari2008probabilistic
arxiv-5735
0812.2423
On the Expressive Power of 2-Stack Visibly Pushdown Automata
<|reference_start|>On the Expressive Power of 2-Stack Visibly Pushdown Automata: Visibly pushdown automata are input-driven pushdown automata that recognize some non-regular context-free languages while preserving the nice closure and decidability properties of finite automata. Visibly pushdown automata with multiple stacks have been considered recently by La Torre, Madhusudan, and Parlato, who exploit the concept of visibility further to obtain a rich automata class that can even express properties beyond the class of context-free languages. At the same time, their automata are closed under boolean operations, have a decidable emptiness and inclusion problem, and enjoy a logical characterization in terms of a monadic second-order logic over words with an additional nesting structure. These results require a restricted version of visibly pushdown automata with multiple stacks whose behavior can be split up into a fixed number of phases. In this paper, we consider 2-stack visibly pushdown automata (i.e., visibly pushdown automata with two stacks) in their unrestricted form. We show that they are expressively equivalent to the existential fragment of monadic second-order logic. Furthermore, it turns out that monadic second-order quantifier alternation forms an infinite hierarchy wrt words with multiple nestings. Combining these results, we conclude that 2-stack visibly pushdown automata are not closed under complementation. Finally, we discuss the expressive power of B\"{u}chi 2-stack visibly pushdown automata running on infinite (nested) words. Extending the logic by an infinity quantifier, we can likewise establish equivalence to existential monadic second-order logic.<|reference_end|>
arxiv
@article{bollig2008on, title={On the Expressive Power of 2-Stack Visibly Pushdown Automata}, author={Benedikt Bollig}, journal={Logical Methods in Computer Science, Volume 4, Issue 4 (December 24, 2008) lmcs:1101}, year={2008}, doi={10.2168/LMCS-4(4:16)2008}, archivePrefix={arXiv}, eprint={0812.2423}, primaryClass={cs.LO} }
bollig2008on
arxiv-5736
0812.2454
On the statistical physics of directed polymers in a random medium and their relation to tree codes
<|reference_start|>On the statistical physics of directed polymers in a random medium and their relation to tree codes: Using well-known results from statistical physics, concerning the almost-sure behavior of the free energy of directed polymers in a random medium, we prove that random tree codes achieve the distortion-rate function almost surely under a certain symmetry condition.<|reference_end|>
arxiv
@article{merhav2008on, title={On the statistical physics of directed polymers in a random medium and their relation to tree codes}, author={Neri Merhav}, journal={arXiv preprint arXiv:0812.2454}, year={2008}, archivePrefix={arXiv}, eprint={0812.2454}, primaryClass={cs.IT math.IT} }
merhav2008on
arxiv-5737
0812.2458
Square Complex Orthogonal Designs with no Zero Entry for any $2^m$ Antennas
<|reference_start|>Square Complex Orthogonal Designs with no Zero Entry for any $2^m$ Antennas: Space-time block codes from square complex orthogonal designs (SCOD) have been extensively studied and most of the existing SCODs contain large number of zeros. The zeros in the designs result in high peak-to-average power ratio and also impose a severe constraint on hardware implementation of the code while turning off some of the transmitting antennas whenever a zero is transmitted. Recently, SCODs with no zero entry have been constructed for $2^a$ transmit antennas whenever $a+1$ is a power of 2. Though there exists codes for 4 and 16 transmit antennas with no zero entry, there is no general method of construction which gives codes for any number of transmit antennas. In this paper, we construct SCODs for any power of 2 number of transmit antennas having all its entries non-zero. Simulation results show that the codes constructed in this paper outperform the existing codes for the same number of antennas under peak power constraint while performing the same under average power constraint.<|reference_end|>
arxiv
@article{das2008square, title={Square Complex Orthogonal Designs with no Zero Entry for any $2^m$ Antennas}, author={Smarajit Das and B. Sundar Rajan}, journal={arXiv preprint arXiv:0812.2458}, year={2008}, archivePrefix={arXiv}, eprint={0812.2458}, primaryClass={cs.IT math.IT} }
das2008square
arxiv-5738
0812.2466
Van der Waerden's Theorem and Avoidability in Words
<|reference_start|>Van der Waerden's Theorem and Avoidability in Words: Pirillo and Varricchio, and independently, Halbeisen and Hungerbuhler considered the following problem, open since 1994: Does there exist an infinite word w over a finite subset of Z such that w contains no two consecutive blocks of the same length and sum? We consider some variations on this problem in the light of van der Waerden's theorem on arithmetic progressions.<|reference_end|>
arxiv
@article{au2008van, title={Van der Waerden's Theorem and Avoidability in Words}, author={Yu-Hin Au, Aaron Robertson, Jeffrey Shallit}, journal={arXiv preprint arXiv:0812.2466}, year={2008}, archivePrefix={arXiv}, eprint={0812.2466}, primaryClass={math.CO cs.FL} }
au2008van
arxiv-5739
0812.2518
Strongly Multiplicative and 3-Multiplicative Linear Secret Sharing Schemes
<|reference_start|>Strongly Multiplicative and 3-Multiplicative Linear Secret Sharing Schemes: Strongly multiplicative linear secret sharing schemes (LSSS) have been a powerful tool for constructing secure multiparty computation protocols. However, it remains open whether or not there exist efficient constructions of strongly multiplicative LSSS from general LSSS. In this paper, we propose the new concept of a 3-multiplicative LSSS, and establish its relationship with strongly multiplicative LSSS. More precisely, we show that any 3-multiplicative LSSS is a strongly multiplicative LSSS, but the converse is not true; and that any strongly multiplicative LSSS can be efficiently converted into a 3-multiplicative LSSS. Furthermore, we apply 3-multiplicative LSSS to the computation of unbounded fan-in multiplication, which reduces its round complexity to four (from five of the previous protocol based on strongly multiplicative LSSS). We also give two constructions of 3-multiplicative LSSS from Reed-Muller codes and algebraic geometric codes. We believe that the construction and verification of 3-multiplicative LSSS are easier than those of strongly multiplicative LSSS. This presents a step forward in settling the open problem of efficient constructions of strongly multiplicative LSSS from general LSSS.<|reference_end|>
arxiv
@article{zhang2008strongly, title={Strongly Multiplicative and 3-Multiplicative Linear Secret Sharing Schemes}, author={Zhifang Zhang, Mulan Liu, Yeow Meng Chee, San Ling, and Huaxiong Wang}, journal={Advances in Cryptology - Asiacrypt 2008, vol. 5350 of Lecture Notes in Computer Science, pp. 19-36, Springer-Verlag, 2008}, year={2008}, doi={10.1007/978-3-540-89255-7}, archivePrefix={arXiv}, eprint={0812.2518}, primaryClass={cs.CR} }
zhang2008strongly
arxiv-5740
0812.2529
Kalinahia: Considering Quality of Service to Design and Execute Distributed Multimedia Applications
<|reference_start|>Kalinahia: Considering Quality of Service to Design and Execute Distributed Multimedia Applications: One of the current challenges of Information Systems is to ensure semi-structured data transmission, such as multimedia data, in a distributed and pervasive environment. Information Sytems must then guarantee users a quality of service ensuring data accessibility whatever the hardware and network conditions may be. They must also guarantee information coherence and particularly intelligibility that imposes a personalization of the service. Within this framework, we propose a design method based on original models of multimedia applications and quality of service. We also define a supervision platform Kalinahia using a user centered heuristic allowing us to define at any moment which configuration of software components constitutes the best answers to users' wishes in terms of service.<|reference_end|>
arxiv
@article{laplace2008kalinahia:, title={Kalinahia: Considering Quality of Service to Design and Execute Distributed Multimedia Applications}, author={Sophie Laplace (LIUPPA), Marc Dalmau (LIUPPA), Philippe Roose (LIUPPA)}, journal={IEEE/IFIP Int'l Conference on Network Management and Management Symposium, Salvador de Bahia : Br\'esil (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0812.2529}, primaryClass={cs.MM} }
laplace2008kalinahia:
arxiv-5741
0812.2535
Pattern Recognition and Memory Mapping using Mirroring Neural Networks
<|reference_start|>Pattern Recognition and Memory Mapping using Mirroring Neural Networks: In this paper, we present a new kind of learning implementation to recognize the patterns using the concept of Mirroring Neural Network (MNN) which can extract information from distinct sensory input patterns and perform pattern recognition tasks. It is also capable of being used as an advanced associative memory wherein image data is associated with voice inputs in an unsupervised manner. Since the architecture is hierarchical and modular it has the potential of being used to devise learning engines of ever increasing complexity.<|reference_end|>
arxiv
@article{deepthi2008pattern, title={Pattern Recognition and Memory Mapping using Mirroring Neural Networks}, author={Dasika Ratna Deepthi and K.Eswaran}, journal={Paper No 336, IEEE, ICETiC 2009, International Conference on Emerging Trends in Computing}, year={2008}, archivePrefix={arXiv}, eprint={0812.2535}, primaryClass={cs.AI cs.NE} }
deepthi2008pattern
arxiv-5742
0812.2543
Perturbation analysis of an M/M/1 queue in a diffusion random environment
<|reference_start|>Perturbation analysis of an M/M/1 queue in a diffusion random environment: We study in this paper an $M/M/1$ queue whose server rate depends upon the state of an independent Ornstein-Uhlenbeck diffusion process $(X(t))$ so that its value at time $t$ is $\mu \phi(X(t))$, where $\phi(x)$ is some bounded function and $\mu>0$. We first establish the differential system for the conditional probability density functions of the couple $(L(t),X(t))$ in the stationary regime, where $L(t)$ is the number of customers in the system at time $t$. By assuming that $\phi(x)$ is defined by $\phi(x) = 1-\varepsilon ((x\wedge a/\varepsilon)\vee(-b/\varepsilon))$ for some positive real numbers $a$, $b$ and $\varepsilon$, we show that the above differential system has a unique solution under some condition on $a$ and $b$. We then show that this solution is close, in some appropriate sense, to the solution to the differential system obtained when $\phi$ is replaced with $\Phi(x)=1-\varepsilon x$ for sufficiently small $\varepsilon$. We finally perform a perturbation analysis of this latter solution for small $\varepsilon$. This allows us to check at the first order the validity of the so-called reduced service rate approximation, stating that everything happens as if the server rate were constant and equal to $\mu(1-\eps\E(X(t)))$.<|reference_end|>
arxiv
@article{fricker2008perturbation, title={Perturbation analysis of an M/M/1 queue in a diffusion random environment}, author={Christine Fricker (INRIA Rocquencourt), Fabrice Guillemin (FT R&D), Philippe Robert (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0812.2543}, year={2008}, archivePrefix={arXiv}, eprint={0812.2543}, primaryClass={cs.NI} }
fricker2008perturbation
arxiv-5743
0812.2544
Inference of Flow Statistics via Packet Sampling in the Internet
<|reference_start|>Inference of Flow Statistics via Packet Sampling in the Internet: We show in this note that by deterministic packet sampling, the tail of the distribution of the original flow size can be obtained by rescaling that of the sampled flow size. To recover information on the flow size distribution lost through packet sampling, we propose some heuristics based on measurements from different backbone IP networks. These heuristic arguments allow us to recover the complete flow size distribution.<|reference_end|>
arxiv
@article{chabchoub2008inference, title={Inference of Flow Statistics via Packet Sampling in the Internet}, author={Yousra Chabchoub (INRIA Rocquencourt), Christine Fricker (INRIA Rocquencourt), Fabrice Guillemin, Philippe Robert (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0812.2544}, year={2008}, archivePrefix={arXiv}, eprint={0812.2544}, primaryClass={cs.NI} }
chabchoub2008inference
arxiv-5744
0812.2546
An identification problem in an urn and ball model with heavy tailed distributions
<|reference_start|>An identification problem in an urn and ball model with heavy tailed distributions: We consider in this paper an urn and ball problem with replacement, where balls are with different colors and are drawn uniformly from a unique urn. The numbers of balls with a given color are i.i.d. random variables with a heavy tailed probability distribution, for instance a Pareto or a Weibull distribution. We draw a small fraction $p\ll 1$ of the total number of balls. The basic problem addressed in this paper is to know to which extent we can infer the total number of colors and the distribution of the number of balls with a given color. By means of Le Cam's inequality and Chen-Stein method, bounds for the total variation norm between the distribution of the number of balls drawn with a given color and the Poisson distribution with the same mean are obtained. We then show that the distribution of the number of balls drawn with a given color has the same tail as that of the original number of balls. We finally establish explicit bounds between the two distributions when each ball is drawn with fixed probability $p$.<|reference_end|>
arxiv
@article{fricker2008an, title={An identification problem in an urn and ball model with heavy tailed distributions}, author={Christine Fricker (INRIA Rocquencourt), Fabrice Guillemin, Philippe Robert (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0812.2546}, year={2008}, archivePrefix={arXiv}, eprint={0812.2546}, primaryClass={cs.NI} }
fricker2008an
arxiv-5745
0812.2559
A Separation Algorithm for Improved LP-Decoding of Linear Block Codes
<|reference_start|>A Separation Algorithm for Improved LP-Decoding of Linear Block Codes: Maximum Likelihood (ML) decoding is the optimal decoding algorithm for arbitrary linear block codes and can be written as an Integer Programming (IP) problem. Feldman et al. relaxed this IP problem and presented Linear Programming (LP) based decoding algorithm for linear block codes. In this paper, we propose a new IP formulation of the ML decoding problem and solve the IP with generic methods. The formulation uses indicator variables to detect violated parity checks. We derive Gomory cuts from our formulation and use them in a separation algorithm to find ML codewords. We further propose an efficient method of finding cuts induced by redundant parity checks (RPC). Under certain circumstances we can guarantee that these RPC cuts are valid and cut off the fractional optimal solutions of LP decoding. We demonstrate on two LDPC codes and one BCH code that our separation algorithm performs significantly better than LP decoding.<|reference_end|>
arxiv
@article{tanatmis2008a, title={A Separation Algorithm for Improved LP-Decoding of Linear Block Codes}, author={Akin Tanatmis, Stefan Ruzika, Horst W. Hamacher, Mayur Punekar, Frank Kienle and Norbert Wehn}, journal={arXiv preprint arXiv:0812.2559}, year={2008}, archivePrefix={arXiv}, eprint={0812.2559}, primaryClass={cs.IT math.IT} }
tanatmis2008a
arxiv-5746
0812.2563
A Sparse Flat Extension Theorem for Moment Matrices
<|reference_start|>A Sparse Flat Extension Theorem for Moment Matrices: In this note we prove a generalization of the flat extension theorem of Curto and Fialkow for truncated moment matrices. It applies to moment matrices indexed by an arbitrary set of monomials and its border, assuming that this set is connected to 1. When formulated in a basis-free setting, this gives an equivalent result for truncated Hankel operators.<|reference_end|>
arxiv
@article{laurent2008a, title={A Sparse Flat Extension Theorem for Moment Matrices}, author={Monique Laurent (CWI), Bernard Mourrain (INRIA Sophia Antipolis)}, journal={arXiv preprint arXiv:0812.2563}, year={2008}, archivePrefix={arXiv}, eprint={0812.2563}, primaryClass={cs.SC math.AC} }
laurent2008a
arxiv-5747
0812.2567
An $O(\log n\over \log\log n)$ Upper Bound on the Price of Stability for Undirected Shapley Network Design Games
<|reference_start|>An $O(\log n\over \log\log n)$ Upper Bound on the Price of Stability for Undirected Shapley Network Design Games: In this paper, we consider the Shapley network design game on undirected networks. In this game, we have an edge weighted undirected network $G(V,E)$ and $n$ selfish players where player $i$ wants to choose a path from source vertex $s_i$ to destination vertex $t_i$. The cost of each edge is equally split among players who pass it. The price of stability is defined as the ratio of the cost of the best Nash equilibrium to that of the optimal solution. We present an $O(\log n/\log\log n)$ upper bound on price of stability for the single sink case, i.e, $t_i=t$ for all $i$.<|reference_end|>
arxiv
@article{li2008an, title={An $O({\log n\over \log\log n})$ Upper Bound on the Price of Stability for Undirected Shapley Network Design Games}, author={Jian Li}, journal={Information Processing Letters archive Volume 109 Issue 15, July, 2009 Pages 876-878}, year={2008}, doi={10.1016/j.ipl.2009.04.015}, archivePrefix={arXiv}, eprint={0812.2567}, primaryClass={cs.GT} }
li2008an
arxiv-5748
0812.2574
Feature Selection By KDDA For SVM-Based MultiView Face Recognition
<|reference_start|>Feature Selection By KDDA For SVM-Based MultiView Face Recognition: Applications such as face recognition that deal with high-dimensional data need a mapping technique that introduces representation of low-dimensional features with enhanced discriminatory power and a proper classifier, able to classify those complex features. Most of traditional Linear Discriminant Analysis suffer from the disadvantage that their optimality criteria are not directly related to the classification ability of the obtained feature representation. Moreover, their classification accuracy is affected by the "small sample size" problem which is often encountered in FR tasks. In this short paper, we combine nonlinear kernel based mapping of data called KDDA with Support Vector machine classifier to deal with both of the shortcomings in an efficient and cost effective manner. The proposed here method is compared, in terms of classification accuracy, to other commonly used FR methods on UMIST face database. Results indicate that the performance of the proposed method is overall superior to those of traditional FR approaches, such as the Eigenfaces, Fisherfaces, and D-LDA methods and traditional linear classifiers.<|reference_end|>
arxiv
@article{valiollahzadeh2008feature, title={Feature Selection By KDDA For SVM-Based MultiView Face Recognition}, author={Seyyed Majid Valiollahzadeh, Abolghasem Sayadiyan, Mohammad Nazari}, journal={arXiv preprint arXiv:0812.2574}, year={2008}, number={IEEE SETIT 2007}, archivePrefix={arXiv}, eprint={0812.2574}, primaryClass={cs.CV cs.LG} }
valiollahzadeh2008feature
arxiv-5749
0812.2575
Face Detection Using Adaboosted SVM-Based Component Classifier
<|reference_start|>Face Detection Using Adaboosted SVM-Based Component Classifier: Recently, Adaboost has been widely used to improve the accuracy of any given learning algorithm. In this paper we focus on designing an algorithm to employ combination of Adaboost with Support Vector Machine as weak component classifiers to be used in Face Detection Task. To obtain a set of effective SVM-weaklearner Classifier, this algorithm adaptively adjusts the kernel parameter in SVM instead of using a fixed one. Proposed combination outperforms in generalization in comparison with SVM on imbalanced classification problem. The proposed here method is compared, in terms of classification accuracy, to other commonly used Adaboost methods, such as Decision Trees and Neural Networks, on CMU+MIT face database. Results indicate that the performance of the proposed method is overall superior to previous Adaboost approaches.<|reference_end|>
arxiv
@article{valiollahzadeh2008face, title={Face Detection Using Adaboosted SVM-Based Component Classifier}, author={Seyyed Majid Valiollahzadeh, Abolghasem Sayadiyan, Mohammad Nazari}, journal={arXiv preprint arXiv:0812.2575}, year={2008}, number={ICEIS Portugal 2007}, archivePrefix={arXiv}, eprint={0812.2575}, primaryClass={cs.CV cs.LG} }
valiollahzadeh2008face
arxiv-5750
0812.2599
Learning Low Rank Matrices from O(n) Entries
<|reference_start|>Learning Low Rank Matrices from O(n) Entries: How many random entries of an n by m, rank r matrix are necessary to reconstruct the matrix within an accuracy d? We address this question in the case of a random matrix with bounded rank, whereby the observed entries are chosen uniformly at random. We prove that, for any d>0, C(r,d)n observations are sufficient. Finally we discuss the question of reconstructing the matrix efficiently, and demonstrate through extensive simulations that this task can be accomplished in nPoly(log n) operations, for small rank.<|reference_end|>
arxiv
@article{keshavan2008learning, title={Learning Low Rank Matrices from O(n) Entries}, author={Raghunandan H. Keshavan, Andrea Montanari, Sewoong Oh}, journal={arXiv preprint arXiv:0812.2599}, year={2008}, archivePrefix={arXiv}, eprint={0812.2599}, primaryClass={cs.DS} }
keshavan2008learning
arxiv-5751
0812.2602
The statistical restricted isometry property and the Wigner semicircle distribution of incoherent dictionaries
<|reference_start|>The statistical restricted isometry property and the Wigner semicircle distribution of incoherent dictionaries: In this article we present a statistical version of the Candes-Tao restricted isometry property (SRIP for short) which holds in general for any incoherent dictionary which is a disjoint union of orthonormal bases. In addition, we show that, under appropriate normalization, the eigenvalues of the associated Gram matrix fluctuate around 1 according to the Wigner semicircle distribution. The result is then applied to various dictionaries that arise naturally in the setting of finite harmonic analysis, giving, in particular, a better understanding on a remark of Applebaum-Howard-Searle-Calderbank concerning RIP for the Heisenberg dictionary of chirp like functions.<|reference_end|>
arxiv
@article{gurevich2008the, title={The statistical restricted isometry property and the Wigner semicircle distribution of incoherent dictionaries}, author={Shamgar Gurevich (University of California Berkeley) and Ronny Hadani (University of Chicago)}, journal={arXiv preprint arXiv:0812.2602}, year={2008}, archivePrefix={arXiv}, eprint={0812.2602}, primaryClass={cs.IT cs.DM math.IT math.PR} }
gurevich2008the
arxiv-5752
0812.2636
Approximating the least hypervolume contributor: NP-hard in general, but fast in practice
<|reference_start|>Approximating the least hypervolume contributor: NP-hard in general, but fast in practice: The hypervolume indicator is an increasingly popular set measure to compare the quality of two Pareto sets. The basic ingredient of most hypervolume indicator based optimization algorithms is the calculation of the hypervolume contribution of single solutions regarding a Pareto set. We show that exact calculation of the hypervolume contribution is #P-hard while its approximation is NP-hard. The same holds for the calculation of the minimal contribution. We also prove that it is NP-hard to decide whether a solution has the least hypervolume contribution. Even deciding whether the contribution of a solution is at most $(1+\eps)$ times the minimal contribution is NP-hard. This implies that it is neither possible to efficiently find the least contributing solution (unless $P = NP$) nor to approximate it (unless $NP = BPP$). Nevertheless, in the second part of the paper we present a fast approximation algorithm for this problem. We prove that for arbitrarily given $\eps,\delta>0$ it calculates a solution with contribution at most $(1+\eps)$ times the minimal contribution with probability at least $(1-\delta)$. Though it cannot run in polynomial time for all instances, it performs extremely fast on various benchmark datasets. The algorithm solves very large problem instances which are intractable for exact algorithms (e.g., 10000 solutions in 100 dimensions) within a few seconds.<|reference_end|>
arxiv
@article{bringmann2008approximating, title={Approximating the least hypervolume contributor: NP-hard in general, but fast in practice}, author={Karl Bringmann, Tobias Friedrich}, journal={arXiv preprint arXiv:0812.2636}, year={2008}, doi={10.1016/j.tcs.2010.09.026}, archivePrefix={arXiv}, eprint={0812.2636}, primaryClass={cs.DS cs.CC} }
bringmann2008approximating
arxiv-5753
0812.2702
Standard Logics Are Valuation-Nonmonotonic
<|reference_start|>Standard Logics Are Valuation-Nonmonotonic: It has recently been discovered that both quantum and classical propositional logics can be modelled by classes of non-orthomodular and thus non-distributive lattices that properly contain standard orthomodular and Boolean classes, respectively. In this paper we prove that these logics are complete even for those classes of the former lattices from which the standard orthomodular lattices and Boolean algebras are excluded. We also show that neither quantum nor classical computers can be founded on the latter models. It follows that logics are "valuation-nonmonotonic" in the sense that their possible models (corresponding to their possible hardware implementations) and the valuations for them drastically change when we add new conditions to their defining conditions. These valuations can even be completely separated by putting them into disjoint lattice classes by a technique presented in the paper.<|reference_end|>
arxiv
@article{pavicic2008standard, title={Standard Logics Are Valuation-Nonmonotonic}, author={Mladen Pavicic and Norman D. Megill}, journal={Journal of Logic and Computation, 18 (6) 959-982 (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0812.2702}, primaryClass={cs.LO cs.AI quant-ph} }
pavicic2008standard
arxiv-5754
0812.2709
Variations on a theme by Schalkwijk and Kailath
<|reference_start|>Variations on a theme by Schalkwijk and Kailath: Schalkwijk and Kailath (1966) developed a class of block codes for Gaussian channels with ideal feedback for which the probability of decoding error decreases as a second-order exponent in block length for rates below capacity. This well-known but surprising result is explained and simply derived here in terms of a result by Elias (1956) concerning the minimum mean-square distortion achievable in transmitting a single Gaussian random variable over multiple uses of the same Gaussian channel. A simple modification of the Schalkwijk-Kailath scheme is then shown to have an error probability that decreases with an exponential order which is linearly increasing with block length. In the infinite bandwidth limit, this scheme produces zero error probability using bounded expected energy at all rates below capacity. A lower bound on error probability for the finite bandwidth case is then derived in which the error probability decreases with an exponential order which is linearly increasing in block length at the same rate as the upper bound.<|reference_end|>
arxiv
@article{gallager2008variations, title={Variations on a theme by Schalkwijk and Kailath}, author={Robert G. Gallager, Baris Nakiboglu}, journal={IEEE Transactions on Information Theory, 56(1):6-17, Jan 2010}, year={2008}, doi={10.1109/TIT.2009.2034896}, archivePrefix={arXiv}, eprint={0812.2709}, primaryClass={cs.IT math.IT} }
gallager2008variations
arxiv-5755
0812.2719
Secret Sharing over Fast-Fading MIMO Wiretap Channels
<|reference_start|>Secret Sharing over Fast-Fading MIMO Wiretap Channels: Secret sharing over the fast-fading MIMO wiretap channel is considered. A source and a destination try to share secret information over a fast-fading MIMO channel in the presence of a wiretapper who also makes channel observations that are different from but correlated to those made by the destination. An interactive authenticated unrestricted public channel is also available for use by the source and destination in the secret sharing process. This falls under the "channel-type model with wiretapper" considered by Ahlswede and Csiszar. A minor extension of their result (to continuous channel alphabets) is employed to evaluate the key capacity of the fast-fading MIMO wiretap channel. The effects of spatial dimensionality provided by the use of multiple antennas at the source, destination, and wiretapper are then investigated.<|reference_end|>
arxiv
@article{wong2008secret, title={Secret Sharing over Fast-Fading MIMO Wiretap Channels}, author={Tan F. Wong, Matthieu Bloch, and John M. Shea}, journal={arXiv preprint arXiv:0812.2719}, year={2008}, archivePrefix={arXiv}, eprint={0812.2719}, primaryClass={cs.IT math.IT} }
wong2008secret
arxiv-5756
0812.2726
Universal Behavior in Large-scale Aggregation of Independent Noisy Observations
<|reference_start|>Universal Behavior in Large-scale Aggregation of Independent Noisy Observations: Aggregation of noisy observations involves a difficult tradeoff between observation quality, which can be increased by increasing the number of observations, and aggregation quality which decreases if the number of observations is too large. We clarify this behavior for a protypical system in which arbitrarily large numbers of observations exceeding the system capacity can be aggregated using lossy data compression. We show the existence of a scaling relation between the collective error and the system capacity, and show that large scale lossy aggregation can outperform lossless aggregation above a critical level of observation noise. Further, we show that universal results for scaling and critical value of noise which are independent of system capacity can be obtained by considering asymptotic behavior when the system capacity increases toward infinity.<|reference_end|>
arxiv
@article{murayama2008universal, title={Universal Behavior in Large-scale Aggregation of Independent Noisy Observations}, author={Tatsuto Murayama and Peter Davis}, journal={arXiv preprint arXiv:0812.2726}, year={2008}, doi={10.1209/0295-5075/87/48003}, archivePrefix={arXiv}, eprint={0812.2726}, primaryClass={cs.IT math.IT} }
murayama2008universal
arxiv-5757
0812.2734
Asteroids in rooted and directed path graphs
<|reference_start|>Asteroids in rooted and directed path graphs: An asteroidal triple is a stable set of three vertices such that each pair is connected by a path avoiding the neighborhood of the third vertex. Asteroidal triples play a central role in a classical characterization of interval graphs by Lekkerkerker and Boland. Their result says that a chordal graph is an interval graph if and only if it contains no asteroidal triple. In this paper, we prove an analogous theorem for directed path graphs which are the intersection graphs of directed paths in a directed tree. For this purpose, we introduce the notion of a strong path. Two non-adjacent vertices are linked by a strong path if either they have a common neighbor or they are the endpoints of two vertex-disjoint chordless paths satisfying certain conditions. A strong asteroidal triple is an asteroidal triple such that each pair is linked by a strong path. We prove that a chordal graph is a directed path graph if and only if it contains no strong asteroidal triple. We also introduce a related notion of asteroidal quadruple, and conjecture a characterization of rooted path graphs which are the intersection graphs of directed paths in a rooted tree.<|reference_end|>
arxiv
@article{cameron2008asteroids, title={Asteroids in rooted and directed path graphs}, author={Kathie Cameron, Chinh Ho`ang, Benjamin L'ev^eque}, journal={arXiv preprint arXiv:0812.2734}, year={2008}, archivePrefix={arXiv}, eprint={0812.2734}, primaryClass={cs.DM} }
cameron2008asteroids
arxiv-5758
0812.2769
Geometric scaling: a simple preconditioner for certain linear systems with discontinuous coefficients
<|reference_start|>Geometric scaling: a simple preconditioner for certain linear systems with discontinuous coefficients: Linear systems with large differences between coefficients ("discontinuous coefficients") arise in many cases in which partial differential equations(PDEs) model physical phenomena involving heterogeneous media. The standard approach to solving such problems is to use domain decomposition techniques, with domain boundaries conforming to the boundaries between the different media. This approach can be difficult to implement when the geometry of the domain boundaries is complicated or the grid is unstructured. This work examines the simple preconditioning technique of scaling the equations by dividing each equation by the Lp-norm of its coefficients. This preconditioning is called geometric scaling (GS). It has long been known that diagonal scaling can be useful in improving convergence, but there is no study on the general usefulness of this approach for discontinuous coefficients. GS was tested on several nonsymmetric linear systems with discontinuous coefficients derived from convection-diffusion elliptic PDEs with small to moderate convection terms. It is shown that GS improved the convergence properties of restarted GMRES and Bi-CGSTAB, with and without the ILUT preconditioner. GS was also shown to improve the distribution of the eigenvalues by reducing their concentration around the origin very significantly.<|reference_end|>
arxiv
@article{gordon2008geometric, title={Geometric scaling: a simple preconditioner for certain linear systems with discontinuous coefficients}, author={Dan Gordon (Univ. of Haifa), Rachel Gordon (Technion-Israel Inst. of Technology)}, journal={arXiv preprint arXiv:0812.2769}, year={2008}, archivePrefix={arXiv}, eprint={0812.2769}, primaryClass={cs.MS cs.NA} }
gordon2008geometric
arxiv-5759
0812.2775
Optimal Succinctness for Range Minimum Queries
<|reference_start|>Optimal Succinctness for Range Minimum Queries: For a static array A of n ordered objects, a range minimum query asks for the position of the minimum between two specified array indices. We show how to preprocess A into a scheme of size 2n+o(n) bits that allows to answer range minimum queries on A in constant time. This space is asymptotically optimal in the important setting where access to A is not permitted after the preprocessing step. Our scheme can be computed in linear time, using only n + o(n) additional bits at construction time. In interesting by-product is that we also improve on LCA-computation in BPS- or DFUDS-encoded trees.<|reference_end|>
arxiv
@article{fischer2008optimal, title={Optimal Succinctness for Range Minimum Queries}, author={Johannes Fischer}, journal={arXiv preprint arXiv:0812.2775}, year={2008}, archivePrefix={arXiv}, eprint={0812.2775}, primaryClass={cs.DS} }
fischer2008optimal
arxiv-5760
0812.2785
Prediction of Platinum Prices Using Dynamically Weighted Mixture of Experts
<|reference_start|>Prediction of Platinum Prices Using Dynamically Weighted Mixture of Experts: Neural networks are powerful tools for classification and regression in static environments. This paper describes a technique for creating an ensemble of neural networks that adapts dynamically to changing conditions. The model separates the input space into four regions and each network is given a weight in each region based on its performance on samples from that region. The ensemble adapts dynamically by constantly adjusting these weights based on the current performance of the networks. The data set used is a collection of financial indicators with the goal of predicting the future platinum price. An ensemble with no weightings does not improve on the naive estimate of no weekly change; our weighting algorithm gives an average percentage error of 63% for twenty weeks of prediction.<|reference_end|>
arxiv
@article{lubinsky2008prediction, title={Prediction of Platinum Prices Using Dynamically Weighted Mixture of Experts}, author={Baruch Lubinsky, Bekir Genc and Tshilidzi Marwala}, journal={arXiv preprint arXiv:0812.2785}, year={2008}, archivePrefix={arXiv}, eprint={0812.2785}, primaryClass={cs.AI} }
lubinsky2008prediction
arxiv-5761
0812.2851
The Violation Heap: A Relaxed Fibonacci-Like Heap
<|reference_start|>The Violation Heap: A Relaxed Fibonacci-Like Heap: We give a priority queue that achieves the same amortized bounds as Fibonacci heaps. Namely, find-min requires O(1) worst-case time, insert, meld and decrease-key require O(1) amortized time, and delete-min requires $O(\log n)$ amortized time. Our structure is simple and promises an efficient practical behavior when compared to other known Fibonacci-like heaps. The main idea behind our construction is to propagate rank updates instead of performing cascaded cuts following a decrease-key operation, allowing for a relaxed structure.<|reference_end|>
arxiv
@article{elmasry2008the, title={The Violation Heap: A Relaxed Fibonacci-Like Heap}, author={Amr Elmasry}, journal={arXiv preprint arXiv:0812.2851}, year={2008}, archivePrefix={arXiv}, eprint={0812.2851}, primaryClass={cs.DS} }
elmasry2008the
arxiv-5762
0812.2868
Minimax Trees in Linear Time
<|reference_start|>Minimax Trees in Linear Time: A minimax tree is similar to a Huffman tree except that, instead of minimizing the weighted average of the leaves' depths, it minimizes the maximum of any leaf's weight plus its depth. Golumbic (1976) introduced minimax trees and gave a Huffman-like, $\Oh{n \log n}$-time algorithm for building them. Drmota and Szpankowski (2002) gave another $\Oh{n \log n}$-time algorithm, which checks the Kraft Inequality in each step of a binary search. In this paper we show how Drmota and Szpankowski's algorithm can be made to run in linear time on a word RAM with (\Omega (\log n))-bit words. We also discuss how our solution applies to problems in data compression, group testing and circuit design.<|reference_end|>
arxiv
@article{gawrychowski2008minimax, title={Minimax Trees in Linear Time}, author={Pawel Gawrychowski and Travis Gagie}, journal={arXiv preprint arXiv:0812.2868}, year={2008}, archivePrefix={arXiv}, eprint={0812.2868}, primaryClass={cs.DS} }
gawrychowski2008minimax
arxiv-5763
0812.2870
How to eat 4/9 of a pizza
<|reference_start|>How to eat 4/9 of a pizza: Given two players alternately picking pieces of a pizza sliced by radial cuts, in such a way that after the first piece is taken every subsequent chosen piece is adjacent to some previously taken piece, we provide a strategy for the starting player to get 4/9 of the pizza. This is best possible and settles a conjecture of Peter Winkler.<|reference_end|>
arxiv
@article{knauer2008how, title={How to eat 4/9 of a pizza}, author={Kolja Knauer, Piotr Micek, Torsten Ueckerdt}, journal={arXiv preprint arXiv:0812.2870}, year={2008}, archivePrefix={arXiv}, eprint={0812.2870}, primaryClass={cs.DM math.CO} }
knauer2008how
arxiv-5764
0812.2874
A Data Model for Integrating Heterogeneous Medical Data in the Health-e-Child Project
<|reference_start|>A Data Model for Integrating Heterogeneous Medical Data in the Health-e-Child Project: There has been much research activity in recent times about providing the data infrastructures needed for the provision of personalised healthcare. In particular the requirement of integrating multiple, potentially distributed, heterogeneous data sources in the medical domain for the use of clinicians has set challenging goals for the healthgrid community. The approach advocated in this paper surrounds the provision of an Integrated Data Model plus links to/from ontologies to homogenize biomedical (from genomic, through cellular, disease, patient and population-related) data in the context of the EC Framework 6 Health-e-Child project. Clinical requirements are identified, the design approach in constructing the model is detailed and the integrated model described in the context of examples taken from that project. Pointers are given to future work relating the model to medical ontologies and challenges to the use of fully integrated models and ontologies are identified.<|reference_end|>
arxiv
@article{branson2008a, title={A Data Model for Integrating Heterogeneous Medical Data in the Health-e-Child Project}, author={Andrew Branson, Tamas Hauer, Richard McClatchey, Dmitry Rogulin and Jetendr Shamdasani}, journal={arXiv preprint arXiv:0812.2874}, year={2008}, archivePrefix={arXiv}, eprint={0812.2874}, primaryClass={cs.DB} }
branson2008a
arxiv-5765
0812.2879
Ontology Assisted Query Reformulation Using Semantic and Assertion Capabilities of OWL-DL Ontologies
<|reference_start|>Ontology Assisted Query Reformulation Using Semantic and Assertion Capabilities of OWL-DL Ontologies: End users of recent biomedical information systems are often unaware of the storage structure and access mechanisms of the underlying data sources and can require simplified mechanisms for writing domain specific complex queries. This research aims to assist users and their applications in formulating queries without requiring complete knowledge of the information structure of underlying data sources. To achieve this, query reformulation techniques and algorithms have been developed that can interpret ontology-based search criteria and associated domain knowledge in order to reformulate a relational query. These query reformulation algorithms exploit the semantic relationships and assertion capabilities of OWL-DL based domain ontologies for query reformulation. In this paper, this approach is applied to the integrated database schema of the EU funded Health-e-Child (HeC) project with the aim of providing ontology assisted query reformulation techniques to simplify the global access that is needed to millions of medical records across the UK and Europe.<|reference_end|>
arxiv
@article{munir2008ontology, title={Ontology Assisted Query Reformulation Using Semantic and Assertion Capabilities of OWL-DL Ontologies}, author={Kamran Munir, Mohammed Odeh and Richard McClatchey}, journal={arXiv preprint arXiv:0812.2879}, year={2008}, archivePrefix={arXiv}, eprint={0812.2879}, primaryClass={cs.DB} }
munir2008ontology
arxiv-5766
0812.2891
On the Value of a Social Network
<|reference_start|>On the Value of a Social Network: In this paper we investigate the value of a social network with respect to the probability mechanism underlying its structure. Specifically, we compute the value for small world and scale free networks. We provide evidence in support of the value to be given by Zipfs law.<|reference_end|>
arxiv
@article{chalasani2008on, title={On the Value of a Social Network}, author={Sandeep Chalasani}, journal={arXiv preprint arXiv:0812.2891}, year={2008}, archivePrefix={arXiv}, eprint={0812.2891}, primaryClass={cs.NI} }
chalasani2008on
arxiv-5767
0812.2892
Sparse Component Analysis (SCA) in Random-valued and Salt and Pepper Noise Removal
<|reference_start|>Sparse Component Analysis (SCA) in Random-valued and Salt and Pepper Noise Removal: In this paper, we propose a new method for impulse noise removal from images. It uses the sparsity of images in the Discrete Cosine Transform (DCT) domain. The zeros in this domain give us the exact mathematical equation to reconstruct the pixels that are corrupted by random-value impulse noises. The proposed method can also detect and correct the corrupted pixels. Moreover, in a simpler case that salt and pepper noise is the brightest and darkest pixels in the image, we propose a simpler version of our method. In addition to the proposed method, we suggest a combination of the traditional median filter method with our method to yield better results when the percentage of the corrupted samples is high.<|reference_end|>
arxiv
@article{zayyani2008sparse, title={Sparse Component Analysis (SCA) in Random-valued and Salt and Pepper Noise Removal}, author={Hadi. Zayyani, Seyyedmajid Valiollahzadeh, Massoud. Babaie-Zadeh}, journal={arXiv preprint arXiv:0812.2892}, year={2008}, number={ICEE 2008}, archivePrefix={arXiv}, eprint={0812.2892}, primaryClass={cs.CV} }
zayyani2008sparse
arxiv-5768
0812.2926
New parallel programming language design: a bridge between brain models and multi-core/many-core computers?
<|reference_start|>New parallel programming language design: a bridge between brain models and multi-core/many-core computers?: The recurrent theme of this paper is that sequences of long temporal patterns as opposed to sequences of simple statements are to be fed into computation devices, being them (new proposed) models for brain activity or multi-core/many-core computers. In such models, parts of these long temporal patterns are already committed while other are predicted. This combination of matching patterns and making predictions appears as a key element in producing intelligent processing in brain models and getting efficient speculative execution on multi-core/many-core computers. A bridge between these far-apart models of computation could be provided by appropriate design of massively parallel, interactive programming languages. Agapia is a recently proposed language of this kind, where user controlled long high-level temporal structures occur at the interaction interfaces of processes. In this paper Agapia is used to link HTMs brain models with TRIPS multi-core/many-core architectures.<|reference_end|>
arxiv
@article{stefanescu2008new, title={New parallel programming language design: a bridge between brain models and multi-core/many-core computers?}, author={Gheorghe Stefanescu and Camelia Chira}, journal={arXiv preprint arXiv:0812.2926}, year={2008}, archivePrefix={arXiv}, eprint={0812.2926}, primaryClass={cs.PL cs.AI} }
stefanescu2008new
arxiv-5769
0812.2967
Shape Fitting on Point Sets with Probability Distributions
<|reference_start|>Shape Fitting on Point Sets with Probability Distributions: A typical computational geometry problem begins: Consider a set P of n points in R^d. However, many applications today work with input that is not precisely known, for example when the data is sensed and has some known error model. What if we do not know the set P exactly, but rather we have a probability distribution mu_p governing the location of each point p in P? Consider a set of (non-fixed) points P, and let mu_P be the probability distribution of this set. We study several measures (e.g. the radius of the smallest enclosing ball, or the area of the smallest enclosing box) with respect to mu_P. The solutions to these problems do not, as in the traditional case, consist of a single answer, but rather a distribution of answers. We describe several data structures that approximate distributions of answers for shape fitting problems. We provide simple and efficient randomized algorithms for computing all of these data structures, which are easy to implement and practical. We provide some experimental results to assert this. We also provide more involved deterministic algorithms for some of these data structures that run in time polynomial in n and 1/eps, where eps is the approximation factor.<|reference_end|>
arxiv
@article{loffler2008shape, title={Shape Fitting on Point Sets with Probability Distributions}, author={Maarten Loffler and Jeff M. Phillips}, journal={arXiv preprint arXiv:0812.2967}, year={2008}, archivePrefix={arXiv}, eprint={0812.2967}, primaryClass={cs.CG} }
loffler2008shape
arxiv-5770
0812.2969
A Growing Self-Organizing Network for Reconstructing Curves and Surfaces
<|reference_start|>A Growing Self-Organizing Network for Reconstructing Curves and Surfaces: Self-organizing networks such as Neural Gas, Growing Neural Gas and many others have been adopted in actual applications for both dimensionality reduction and manifold learning. Typically, in these applications, the structure of the adapted network yields a good estimate of the topology of the unknown subspace from where the input data points are sampled. The approach presented here takes a different perspective, namely by assuming that the input space is a manifold of known dimension. In return, the new type of growing self-organizing network presented gains the ability to adapt itself in way that may guarantee the effective and stable recovery of the exact topological structure of the input manifold.<|reference_end|>
arxiv
@article{piastra2008a, title={A Growing Self-Organizing Network for Reconstructing Curves and Surfaces}, author={Marco Piastra}, journal={Neural Networks, 2009. IJCNN 2009. International Joint Conference on , vol., no., pp.2533,2540, 14-19 June 2009}, year={2008}, doi={10.1109/IJCNN.2009.5178709}, archivePrefix={arXiv}, eprint={0812.2969}, primaryClass={cs.NE cs.AI} }
piastra2008a
arxiv-5771
0812.2971
Cyclotomic FFT of Length 2047 Based on a Novel 11-point Cyclic Convolution
<|reference_start|>Cyclotomic FFT of Length 2047 Based on a Novel 11-point Cyclic Convolution: In this manuscript, we propose a novel 11-point cyclic convolution algorithm based on alternate Fourier transform. With the proposed bilinear form, we construct a length-2047 cyclotomic FFT.<|reference_end|>
arxiv
@article{wagh2008cyclotomic, title={Cyclotomic FFT of Length 2047 Based on a Novel 11-point Cyclic Convolution}, author={Meghanad D. Wagh, Ning Chen, and Zhiyuan Yan}, journal={arXiv preprint arXiv:0812.2971}, year={2008}, archivePrefix={arXiv}, eprint={0812.2971}, primaryClass={cs.IT math.IT} }
wagh2008cyclotomic
arxiv-5772
0812.2988
The Korrontea Data Modeling
<|reference_start|>The Korrontea Data Modeling: Needs of multimedia systems evolved due to the evolution of their architecture which is now distributed into heterogeneous contexts. A critical issue lies in the fact that they handle, process, and transmit multimedia data. This data integrates several properties which should be considered since it holds a considerable part of its semantics, for instance the lips synchronization in a video. In this paper, we focus on the definition of a model as a basic abstraction for describing and modeling media in multimedia systems by taking into account their properties. This model will be used in software architecture in order to handle data in efficient way. The provided model is an interesting solution for the integration of media into applications; we propose to consider and to handle them in a uniform way. This model is proposed with synchronization policies to ensure synchronous transport of media. Therefore, we use it in a component model that we develop for the design and deployment of distributed multimedia systems.<|reference_end|>
arxiv
@article{bouix2008the, title={The Korrontea Data Modeling}, author={Emmanuel Bouix (LIUPPA), Philippe Roose (LIUPPA), Marc Dalmau (LIUPPA)}, journal={Ambisys, Quebec City : Canada (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0812.2988}, primaryClass={cs.MM} }
bouix2008the
arxiv-5773
0812.2989
Heterogeneous component interactions: Sensors integration into multimedia applications
<|reference_start|>Heterogeneous component interactions: Sensors integration into multimedia applications: Resource-constrained embedded and mobile devices are becoming increasingly common. Since few years, some mobile and ubiquitous devices such as wireless sensor, able to be aware of their physical environment, appeared. Such devices enable proposing applications which adapt to user's need according the context evolution. It implies the collaboration of sensors and software components which differ on their nature and their communication mechanisms. This paper proposes a unified component model in order to easily design applications based on software components and sensors without taking care of their nature. Then it presents a state of the art of communication problems linked to heterogeneous components and proposes an interaction mechanism which ensures information exchanges between wireless sensors and software components.<|reference_end|>
arxiv
@article{louberry2008heterogeneous, title={Heterogeneous component interactions: Sensors integration into multimedia applications}, author={Christine Louberry (LIUPPA), Philippe Roose (LIUPPA), Marc Dalmau (LIUPPA)}, journal={Journal of Networks, Issue N6, Academy Publisher 3, 4 (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0812.2989}, primaryClass={cs.MM} }
louberry2008heterogeneous
arxiv-5774
0812.2990
Tree-width of hypergraphs and surface duality
<|reference_start|>Tree-width of hypergraphs and surface duality: In Graph Minor III, Robertson and Seymour conjecture that the tree-width of a planar graph and that of its dual differ by at most one. We prove that given a hypergraph H on a surface of Euler genus k, the tree-width of H^* is at most the maximum of tw(H) + 1 + k and the maximum size of a hyperedge of H^*.<|reference_end|>
arxiv
@article{mazoit2008tree-width, title={Tree-width of hypergraphs and surface duality}, author={Fr'ed'eric Mazoit (LaBRI)}, journal={arXiv preprint arXiv:0812.2990}, year={2008}, archivePrefix={arXiv}, eprint={0812.2990}, primaryClass={cs.DM} }
mazoit2008tree-width
arxiv-5775
0812.2991
Analyse et structuration automatique des guides de bonnes pratiques cliniques : essai d'\'evaluation
<|reference_start|>Analyse et structuration automatique des guides de bonnes pratiques cliniques : essai d'\'evaluation: Health Practice Guideliens are supposed to unify practices and propose recommendations to physicians. This paper describes GemFrame, a system capable of semi-automatically filling an XML template from free texts in the clinical domain. The XML template includes semantic information not explicitly encoded in the text (pairs of conditions and ac-tions/recommendations). Therefore, there is a need to compute the exact scope of condi-tions over text sequences expressing the re-quired actions. We present a system developped for this task. We show that it yields good performance when applied to the analysis of French practice guidelines. We conclude with a precise evaluation of the tool.<|reference_end|>
arxiv
@article{bouffier2008analyse, title={Analyse et structuration automatique des guides de bonnes pratiques cliniques : essai d'\'evaluation}, author={Amanda Bouffier (LIPN), Thierry Poibeau (LIPN), Catherine Duclos (LIM&Bio)}, journal={arXiv preprint arXiv:0812.2991}, year={2008}, archivePrefix={arXiv}, eprint={0812.2991}, primaryClass={cs.AI} }
bouffier2008analyse
arxiv-5776
0812.3066
Beyond Bandlimited Sampling: Nonlinearities, Smoothness and Sparsity
<|reference_start|>Beyond Bandlimited Sampling: Nonlinearities, Smoothness and Sparsity: Sampling theory has benefited from a surge of research in recent years, due in part to the intense research in wavelet theory and the connections made between the two fields. In this survey we present several extensions of the Shannon theorem, that have been developed primarily in the past two decades, which treat a wide class of input signals as well as nonideal sampling and nonlinear distortions. This framework is based on viewing sampling in a broader sense of projection onto appropriate subspaces, and then choosing the subspaces to yield interesting new possibilities. For example, our results can be used to uniformly sample non-bandlimited signals, and to perfectly compensate for nonlinear effects.<|reference_end|>
arxiv
@article{eldar2008beyond, title={Beyond Bandlimited Sampling: Nonlinearities, Smoothness and Sparsity}, author={Y. C. Eldar, T. Michaeli}, journal={arXiv preprint arXiv:0812.3066}, year={2008}, archivePrefix={arXiv}, eprint={0812.3066}, primaryClass={cs.IT math.IT} }
eldar2008beyond
arxiv-5777
0812.3068
Branching Bisimilarity with Explicit Divergence
<|reference_start|>Branching Bisimilarity with Explicit Divergence: We consider the relational characterisation of branching bisimilarity with explicit divergence. We prove that it is an equivalence and that it coincides with the original definition of branching bisimilarity with explicit divergence in terms of coloured traces. We also establish a correspondence with several variants of an action-based modal logic with until- and divergence modalities.<|reference_end|>
arxiv
@article{van glabbeek2008branching, title={Branching Bisimilarity with Explicit Divergence}, author={Rob van Glabbeek, Bas Luttik and Nikola Trcka}, journal={arXiv preprint arXiv:0812.3068}, year={2008}, number={CS-R 08-25}, archivePrefix={arXiv}, eprint={0812.3068}, primaryClass={cs.LO} }
van glabbeek2008branching
arxiv-5778
0812.3070
A Computational Model to Disentangle Semantic Information Embedded in Word Association Norms
<|reference_start|>A Computational Model to Disentangle Semantic Information Embedded in Word Association Norms: Two well-known databases of semantic relationships between pairs of words used in psycholinguistics, feature-based and association-based, are studied as complex networks. We propose an algorithm to disentangle feature based relationships from free association semantic networks. The algorithm uses the rich topology of the free association semantic network to produce a new set of relationships between words similar to those observed in feature production norms.<|reference_end|>
arxiv
@article{borge2008a, title={A Computational Model to Disentangle Semantic Information Embedded in Word Association Norms}, author={J. Borge, A. Arenas}, journal={arXiv preprint arXiv:0812.3070}, year={2008}, archivePrefix={arXiv}, eprint={0812.3070}, primaryClass={cs.CL cs.AI physics.data-an physics.soc-ph} }
borge2008a
arxiv-5779
0812.3120
Mode Switching for MIMO Broadcast Channel Based on Delay and Channel Quantization
<|reference_start|>Mode Switching for MIMO Broadcast Channel Based on Delay and Channel Quantization: Imperfect channel state information degrades the performance of multiple-input multiple-output (MIMO) communications; its effect on single-user (SU) and multi-user (MU) MIMO transmissions are quite different. In particular, MU-MIMO suffers from residual inter-user interference due to imperfect channel state information while SU-MIMO only suffers from a power loss. This paper compares the throughput loss of both SU and MU MIMO on the downlink due to delay and channel quantization. Accurate closed-form approximations are derived for the achievable rates for both SU and MU MIMO. It is shown that SU-MIMO is relatively robust to delayed and quantized channel information, while MU MIMO with zero-forcing precoding loses spatial multiplexing gain with a fixed delay or fixed codebook size. Based on derived achievable rates, a mode switching algorithm is proposed that switches between SU and MU MIMO modes to improve the spectral efficiency, based on the average signal-to-noise ratio (SNR), the normalized Doppler frequency, and the channel quantization codebook size. The operating regions for SU and MU modes with different delays and codebook sizes are determined, which can be used to select the preferred mode. It is shown that the MU mode is active only when the normalized Doppler frequency is very small and the codebook size is large.<|reference_end|>
arxiv
@article{zhang2008mode, title={Mode Switching for MIMO Broadcast Channel Based on Delay and Channel Quantization}, author={Jun Zhang, Robert W. Heath Jr., Marios Kountouris, and Jeffrey G. Andrews}, journal={arXiv preprint arXiv:0812.3120}, year={2008}, doi={10.1155/2009/802548}, archivePrefix={arXiv}, eprint={0812.3120}, primaryClass={cs.IT math.IT} }
zhang2008mode
arxiv-5780
0812.3124
Achievable Throughput of Multi-mode Multiuser MIMO with Imperfect CSI Constraints
<|reference_start|>Achievable Throughput of Multi-mode Multiuser MIMO with Imperfect CSI Constraints: For the multiple-input multiple-output (MIMO) broadcast channel with imperfect channel state information (CSI), neither the capacity nor the optimal transmission technique have been fully discovered. In this paper, we derive achievable ergodic rates for a MIMO fading broadcast channel when CSI is delayed and quantized. It is shown that we should not support too many users with spatial division multiplexing due to the residual inter-user interference caused by imperfect CSI. Based on the derived achievable rates, we propose a multi-mode transmission strategy to maximize the throughput, which adaptively adjusts the number of active users based on the channel statistics information.<|reference_end|>
arxiv
@article{zhang2008achievable, title={Achievable Throughput of Multi-mode Multiuser MIMO with Imperfect CSI Constraints}, author={Jun Zhang, Marios Kountouris, Jeffrey G. Andrews and Robert W. Heath Jr}, journal={arXiv preprint arXiv:0812.3124}, year={2008}, doi={10.1109/ISIT.2009.5205915}, archivePrefix={arXiv}, eprint={0812.3124}, primaryClass={cs.IT math.IT} }
zhang2008achievable
arxiv-5781
0812.3137
Compressive sensing: a paradigm shift in signal processing
<|reference_start|>Compressive sensing: a paradigm shift in signal processing: We survey a new paradigm in signal processing known as "compressive sensing". Contrary to old practices of data acquisition and reconstruction based on the Shannon-Nyquist sampling principle, the new theory shows that it is possible to reconstruct images or signals of scientific interest accurately and even exactly from a number of samples which is far smaller than the desired resolution of the image/signal, e.g., the number of pixels in the image. This new technique draws from results in several fields of mathematics, including algebra, optimization, probability theory, and harmonic analysis. We will discuss some of the key mathematical ideas behind compressive sensing, as well as its implications to other fields: numerical analysis, information theory, theoretical computer science, and engineering.<|reference_end|>
arxiv
@article{holtz2008compressive, title={Compressive sensing: a paradigm shift in signal processing}, author={Olga Holtz}, journal={arXiv preprint arXiv:0812.3137}, year={2008}, archivePrefix={arXiv}, eprint={0812.3137}, primaryClass={math.HO cs.DS cs.NA math.NA math.OC} }
holtz2008compressive
arxiv-5782
0812.3145
Binary Classification Based on Potentials
<|reference_start|>Binary Classification Based on Potentials: We introduce a simple and computationally trivial method for binary classification based on the evaluation of potential functions. We demonstrate that despite the conceptual and computational simplicity of the method its performance can match or exceed that of standard Support Vector Machine methods.<|reference_end|>
arxiv
@article{boczko2008binary, title={Binary Classification Based on Potentials}, author={Erik Boczko, Andrew DiLullo and Todd Young}, journal={arXiv preprint arXiv:0812.3145}, year={2008}, archivePrefix={arXiv}, eprint={0812.3145}, primaryClass={cs.LG} }
boczko2008binary
arxiv-5783
0812.3147
Comparison of Binary Classification Based on Signed Distance Functions with Support Vector Machines
<|reference_start|>Comparison of Binary Classification Based on Signed Distance Functions with Support Vector Machines: We investigate the performance of a simple signed distance function (SDF) based method by direct comparison with standard SVM packages, as well as K-nearest neighbor and RBFN methods. We present experimental results comparing the SDF approach with other classifiers on both synthetic geometric problems and five benchmark clinical microarray data sets. On both geometric problems and microarray data sets, the non-optimized SDF based classifiers perform just as well or slightly better than well-developed, standard SVM methods. These results demonstrate the potential accuracy of SDF-based methods on some types of problems.<|reference_end|>
arxiv
@article{boczko2008comparison, title={Comparison of Binary Classification Based on Signed Distance Functions with Support Vector Machines}, author={Erik M. Boczko, Todd Young, Minhui Zie, and Di Wu}, journal={arXiv preprint arXiv:0812.3147}, year={2008}, archivePrefix={arXiv}, eprint={0812.3147}, primaryClass={cs.LG cs.CG} }
boczko2008comparison
arxiv-5784
0812.3186
Bounds for the discrete correlation of infinite sequences on k symbols and generalized Rudin-Shapiro sequences
<|reference_start|>Bounds for the discrete correlation of infinite sequences on k symbols and generalized Rudin-Shapiro sequences: Motivated by the known autocorrelation properties of the Rudin-Shapiro sequence, we study the discrete correlation among infinite sequences over a finite alphabet, where we just take into account whether two symbols are identical. We show by combinatorial means that sequences cannot be "too" different, and by an explicit construction generalizing the Rudin-Shapiro sequence, we show that we can achieve the maximum possible difference.<|reference_end|>
arxiv
@article{grant2008bounds, title={Bounds for the discrete correlation of infinite sequences on k symbols and generalized Rudin-Shapiro sequences}, author={E. Grant, J. Shallit, T. Stoll}, journal={arXiv preprint arXiv:0812.3186}, year={2008}, doi={10.4064/aa140-4-5}, archivePrefix={arXiv}, eprint={0812.3186}, primaryClass={math.CO cs.FL math.NT} }
grant2008bounds
arxiv-5785
0812.3214
Two conjectures such that the proof of any one of them will lead to the proof that P = NP
<|reference_start|>Two conjectures such that the proof of any one of them will lead to the proof that P = NP: In this paper we define a construct called a time-graph. A complete time-graph of order n is the cartesian product of a complete graph with n vertices and a linear graph with n vertices. A time-graph of order n is given by a subset of the set of edges E(n) of such a graph. The notion of a hamiltonian time-graph is defined in a natural way and we define the Hamiltonian time-graph problem (HAMTG) as : Given a time-graph is it hamiltonian ? We show that the Hamiltonian path problem (HAMP) can be transformed to HAMTG in polynomial time. We then define certain vector spaces of functions from E(n) and E(n)xE(n) to B = {0,1}, the field of two elements and derive certain properties of these spaces. We give two conjectures about these spaces and prove that if any one of these conjectures is true, we get a polynomial time algorithm for the Hamiltonian path problem. Since the Hamiltonian path problem is NP-complete we obtain the proof of P = NP provided any one of the two conjectures is true.<|reference_end|>
arxiv
@article{dutta2008two, title={Two conjectures such that the proof of any one of them will lead to the proof that P = NP}, author={Malay Dutta (Tezpur University India)}, journal={arXiv preprint arXiv:0812.3214}, year={2008}, archivePrefix={arXiv}, eprint={0812.3214}, primaryClass={cs.CC} }
dutta2008two
arxiv-5786
0812.3226
BiopSym: a simulator for enhanced learning of ultrasound-guided prostate biopsy
<|reference_start|>BiopSym: a simulator for enhanced learning of ultrasound-guided prostate biopsy: This paper describes a simulator of ultrasound-guided prostate biopsies for cancer diagnosis. When performing biopsy series, the clinician has to move the ultrasound probe and to mentally integrate the real-time bi-dimensional images into a three-dimensional (3D) representation of the anatomical environment. Such a 3D representation is necessary to sample regularly the prostate in order to maximize the probability of detecting a cancer if any. To make the training of young physicians easier and faster we developed a simulator that combines images computed from three-dimensional ultrasound recorded data to haptic feedback. The paper presents the first version of this simulator.<|reference_end|>
arxiv
@article{sclaverano2008biopsym:, title={BiopSym: a simulator for enhanced learning of ultrasound-guided prostate biopsy}, author={Stefano Sclaverano (TIMC), Gr'egoire Chevreau (TIMC), Lucile Vadcard (LSE), Pierre Mozer, Jocelyne Troccaz (TIMC)}, journal={Medecine Meets Virtual Reality, Los Angeles : \'Etats-Unis d'Am\'erique (2009)}, year={2008}, doi={10.3233/978-1-58603-964-6-301}, archivePrefix={arXiv}, eprint={0812.3226}, primaryClass={cs.RO} }
sclaverano2008biopsym:
arxiv-5787
0812.3232
Maximum Sum-Rate of MIMO Multiuser Scheduling with Linear Receivers
<|reference_start|>Maximum Sum-Rate of MIMO Multiuser Scheduling with Linear Receivers: We analyze scheduling algorithms for multiuser communication systems with users having multiple antennas and linear receivers. When there is no feedback of channel information, we consider a common round robin scheduling algorithm, and derive new exact and high signal-to-noise ratio (SNR) maximum sum-rate results for the maximum ratio combining (MRC) and minimum mean squared error (MMSE) receivers. We also present new analysis of MRC, zero forcing (ZF) and MMSE receivers in the low SNR regime. When there are limited feedback capabilities in the system, we consider a common practical scheduling scheme based on signal-to-interference-and-noise ratio (SINR) feedback at the transmitter. We derive new accurate approximations for the maximum sum-rate, for the cases of MRC, ZF and MMSE receivers. We also derive maximum sum-rate scaling laws, which reveal that the maximum sum-rate of all three linear receivers converge to the same value for a large number of users, but at different rates.<|reference_end|>
arxiv
@article{louie2008maximum, title={Maximum Sum-Rate of MIMO Multiuser Scheduling with Linear Receivers}, author={Raymond H. Y. Louie, Matthew R. McKay, Iain B. Collings}, journal={arXiv preprint arXiv:0812.3232}, year={2008}, doi={10.1109/TCOMM.2009.11.080210}, archivePrefix={arXiv}, eprint={0812.3232}, primaryClass={cs.IT math.IT} }
louie2008maximum
arxiv-5788
0812.3249
Chain-Based Representations for Solid and Physical Modeling
<|reference_start|>Chain-Based Representations for Solid and Physical Modeling: In this paper we show that the (co)chain complex associated with a decomposition of the computational domain, commonly called a mesh in computational science and engineering, can be represented by a block-bidiagonal matrix that we call the Hasse matrix. Moreover, we show that topology-preserving mesh refinements, produced by the action of (the simplest) Euler operators, can be reduced to multilinear transformations of the Hasse matrix representing the complex. Our main result is a new representation of the (co)chain complex underlying field computations, a representation that provides new insights into the transformations induced by local mesh refinements. Our approach is based on first principles and is general in that it applies to most representational domains that can be characterized as cell complexes, without any restrictions on their type, dimension, codimension, orientability, manifoldness, connectedness.<|reference_end|>
arxiv
@article{dicarlo2008chain-based, title={Chain-Based Representations for Solid and Physical Modeling}, author={Antonio DiCarlo, Franco Milicchio, Alberto Paoluzzi, and Vadim Shapiro}, journal={arXiv preprint arXiv:0812.3249}, year={2008}, archivePrefix={arXiv}, eprint={0812.3249}, primaryClass={cs.CG} }
dicarlo2008chain-based
arxiv-5789
0812.3259
Approximate conditional distributions of distances between nodes in a two-dimensional sensor network
<|reference_start|>Approximate conditional distributions of distances between nodes in a two-dimensional sensor network: When we represent a network of sensors in Euclidean space by a graph, there are two distances between any two nodes that we may consider. One of them is the Euclidean distance. The other is the distance between the two nodes in the graph, defined to be the number of edges on a shortest path between them. In this paper, we consider a network of sensors placed uniformly at random in a two-dimensional region and study two conditional distributions related to these distances. The first is the probability distribution of distances in the graph, conditioned on Euclidean distances; the other is the probability density function associated with Euclidean distances, conditioned on distances in the graph. We study these distributions both analytically (when feasible) and by means of simulations. To the best of our knowledge, our results constitute the first of their kind and open up the possibility of discovering improved solutions to certain sensor-network problems, as for example sensor localization.<|reference_end|>
arxiv
@article{leao2008approximate, title={Approximate conditional distributions of distances between nodes in a two-dimensional sensor network}, author={Rodrigo S. C. Leao, Valmir C. Barbosa}, journal={Lecture Notes in Computer Science 5513 (2009), 324-338}, year={2008}, doi={10.1007/978-3-642-02205-0_23}, archivePrefix={arXiv}, eprint={0812.3259}, primaryClass={cs.NI} }
leao2008approximate
arxiv-5790
0812.3285
On Successive Refinement for the Kaspi/Heegard-Berger Problem
<|reference_start|>On Successive Refinement for the Kaspi/Heegard-Berger Problem: Consider a source that produces independent copies of a triplet of jointly distributed random variables, $\{X_{i},Y_{i},Z_{i}\}_{i=1}^{\infty}$. The process $\{X_{i}\}$ is observed at the encoder, and is supposed to be reproduced at two decoders, where $\{Y_{i}\}$ and $\{Z_{i}\}$ are observed, in either a causal or non-causal manner. The communication between the encoder and the decoders is carried in two successive stages. In the first stage, the transmission is available to both decoders and the source is reconstructed according to the received bit-stream and the individual side information (SI). In the second stage, additional information is sent to both decoders and the source reconstructions are refined according to the transmissions at both stages and the available SI. It is desired to find the necessary and sufficient conditions on the communication rates between the encoder and decoders, so that the distortions incurred (at each stage) will not exceed given thresholds. For the case of non-degraded causal SI at the decoders, an exact single-letter characterization of the achievable region is derived for the case of pure source-coding. Then, for the case of communication carried over independent DMS's with random states known causally/non-causally at the encoder and with causal SI about the source at the decoders, a single-letter characterization of all achievable distortion in both stages is provided and it is shown that the separation theorem holds. Finally, for non-causal degraded SI, inner and outer bounds to the achievable rate-distortion region are derived. These bounds are shown to be tight for certain cases of reconstruction requirements at the decoders, thereby shading some light on the problem of successive refinement with non-degraded SI at the decoders.<|reference_end|>
arxiv
@article{maor2008on, title={On Successive Refinement for the Kaspi/Heegard-Berger Problem}, author={Alina Maor and Neri Merhav}, journal={arXiv preprint arXiv:0812.3285}, year={2008}, number={Technical Report, CCIT Pub. no. 711, EE Pub. no. 1668, December 2008}, archivePrefix={arXiv}, eprint={0812.3285}, primaryClass={cs.IT math.IT} }
maor2008on
arxiv-5791
0812.3306
Worst-Case Optimal Adaptive Prefix Coding
<|reference_start|>Worst-Case Optimal Adaptive Prefix Coding: A common complaint about adaptive prefix coding is that it is much slower than static prefix coding. Karpinski and Nekrich recently took an important step towards resolving this: they gave an adaptive Shannon coding algorithm that encodes each character in (O (1)) amortized time and decodes it in (O (\log H)) amortized time, where $H$ is the empirical entropy of the input string $s$. For comparison, Gagie's adaptive Shannon coder and both Knuth's and Vitter's adaptive Huffman coders all use (\Theta (H)) amortized time for each character. In this paper we give an adaptive Shannon coder that both encodes and decodes each character in (O (1)) worst-case time. As with both previous adaptive Shannon coders, we store $s$ in at most ((H + 1) |s| + o (|s|)) bits. We also show that this encoding length is worst-case optimal up to the lower order term.<|reference_end|>
arxiv
@article{gagie2008worst-case, title={Worst-Case Optimal Adaptive Prefix Coding}, author={Travis Gagie and Yakov Nekrich}, journal={arXiv preprint arXiv:0812.3306}, year={2008}, archivePrefix={arXiv}, eprint={0812.3306}, primaryClass={cs.IT math.IT} }
gagie2008worst-case
arxiv-5792
0812.3404
Diversity-Multiplexing Tradeoff for the MIMO Static Half-Duplex Relay
<|reference_start|>Diversity-Multiplexing Tradeoff for the MIMO Static Half-Duplex Relay: In this work, we investigate the diversity-multiplexing tradeoff (DMT) of the multiple-antenna (MIMO) static half-duplex relay channel. A general expression is derived for the DMT upper bound, which can be achieved by a compress-and-forward protocol at the relay, under certain assumptions. The DMT expression is given as the solution of a minimization problem in general, and an explicit expression is found when the relay channel is symmetric in terms of number of antennas, i.e. the source and the destination have n antennas each, and the relay has m antennas. It is observed that the static half-duplex DMT matches the full-duplex DMT when the relay has a single antenna, and is strictly below the full-duplex DMT when the relay has multiple antennas. Besides, the derivation of the upper bound involves a new asymptotic study of spherical integrals (that is, integrals with respect to the Haar measure on the unitary group U(n)), which is a topic of mathematical interest in itself.<|reference_end|>
arxiv
@article{leveque2008diversity-multiplexing, title={Diversity-Multiplexing Tradeoff for the MIMO Static Half-Duplex Relay}, author={Olivier Leveque, Christophe Vignat, Melda Yuksel}, journal={arXiv preprint arXiv:0812.3404}, year={2008}, archivePrefix={arXiv}, eprint={0812.3404}, primaryClass={cs.IT math.IT} }
leveque2008diversity-multiplexing
arxiv-5793
0812.3429
Quantum Predictive Learning and Communication Complexity with Single Input
<|reference_start|>Quantum Predictive Learning and Communication Complexity with Single Input: We define a new model of quantum learning that we call Predictive Quantum (PQ). This is a quantum analogue of PAC, where during the testing phase the student is only required to answer a polynomial number of testing queries. We demonstrate a relational concept class that is efficiently learnable in PQ, while in any "reasonable" classical model exponential amount of training data would be required. This is the first unconditional separation between quantum and classical learning. We show that our separation is the best possible in several ways; in particular, there is no analogous result for a functional class, as well as for several weaker versions of quantum learning. In order to demonstrate tightness of our separation we consider a special case of one-way communication that we call single-input mode, where Bob receives no input. Somewhat surprisingly, this setting becomes nontrivial when relational communication tasks are considered. In particular, any problem with two-sided input can be transformed into a single-input relational problem of equal classical one-way cost. We show that the situation is different in the quantum case, where the same transformation can make the communication complexity exponentially larger. This happens if and only if the original problem has exponential gap between quantum and classical one-way communication costs. We believe that these auxiliary results might be of independent interest.<|reference_end|>
arxiv
@article{gavinsky2008quantum, title={Quantum Predictive Learning and Communication Complexity with Single Input}, author={Dmytro Gavinsky}, journal={arXiv preprint arXiv:0812.3429}, year={2008}, archivePrefix={arXiv}, eprint={0812.3429}, primaryClass={quant-ph cs.LG} }
gavinsky2008quantum
arxiv-5794
0812.3447
Completion Time Minimization and Robust Power Control in Wireless Packet Networks
<|reference_start|>Completion Time Minimization and Robust Power Control in Wireless Packet Networks: A wireless packet network is considered in which each user transmits a stream of packets to its destination. The transmit power of each user interferes with the transmission of all other users. A convex cost function of the completion times of the user packets is minimized by optimally allocating the users' transmission power subject to their respective power constraints. At all ranges of SINR, completion time minimization can be formulated as a convex optimization problem and hence can be efficiently solved. In particular, although the feasible rate region of the wireless network is non-convex, its corresponding completion time region is shown to be convex. When channel knowledge is imperfect, robust power control is considered based on the channel fading distribution subject to outage probability constraints. The problem is shown to be convex when the fading distribution is log-concave in exponentiated channel power gains; e.g., when each user is under independent Rayleigh, Nakagami, or log-normal fading. Applying the optimization frameworks in a wireless cellular network, the average completion time is significantly reduced as compared to full power transmission.<|reference_end|>
arxiv
@article{ng2008completion, title={Completion Time Minimization and Robust Power Control in Wireless Packet Networks}, author={Chris T. K. Ng, Muriel Medard, Asuman Ozdaglar}, journal={arXiv preprint arXiv:0812.3447}, year={2008}, archivePrefix={arXiv}, eprint={0812.3447}, primaryClass={cs.IT math.IT} }
ng2008completion
arxiv-5795
0812.3465
Linearly Parameterized Bandits
<|reference_start|>Linearly Parameterized Bandits: We consider bandit problems involving a large (possibly infinite) collection of arms, in which the expected reward of each arm is a linear function of an $r$-dimensional random vector $\mathbf{Z} \in \mathbb{R}^r$, where $r \geq 2$. The objective is to minimize the cumulative regret and Bayes risk. When the set of arms corresponds to the unit sphere, we prove that the regret and Bayes risk is of order $\Theta(r \sqrt{T})$, by establishing a lower bound for an arbitrary policy, and showing that a matching upper bound is obtained through a policy that alternates between exploration and exploitation phases. The phase-based policy is also shown to be effective if the set of arms satisfies a strong convexity condition. For the case of a general set of arms, we describe a near-optimal policy whose regret and Bayes risk admit upper bounds of the form $O(r \sqrt{T} \log^{3/2} T)$.<|reference_end|>
arxiv
@article{rusmevichientong2008linearly, title={Linearly Parameterized Bandits}, author={Paat Rusmevichientong and John N. Tsitsiklis}, journal={arXiv preprint arXiv:0812.3465}, year={2008}, archivePrefix={arXiv}, eprint={0812.3465}, primaryClass={cs.LG} }
rusmevichientong2008linearly
arxiv-5796
0812.3478
Automatic Construction of Lightweight Domain Ontologies for Chemical Engineering Risk Management
<|reference_start|>Automatic Construction of Lightweight Domain Ontologies for Chemical Engineering Risk Management: The need for domain ontologies in mission critical applications such as risk management and hazard identification is becoming more and more pressing. Most research on ontology learning conducted in the academia remains unrealistic for real-world applications. One of the main problems is the dependence on non-incremental, rare knowledge and textual resources, and manually-crafted patterns and rules. This paper reports work in progress aiming to address such undesirable dependencies during ontology construction. Initial experiments using a working prototype of the system revealed promising potentials in automatically constructing high-quality domain ontologies using real-world texts.<|reference_end|>
arxiv
@article{wong2008automatic, title={Automatic Construction of Lightweight Domain Ontologies for Chemical Engineering Risk Management}, author={Wilson Wong, Wei Liu, Saujoe Liaw, Nicoletta Balliu, Hongwei Wu, Moses Tade}, journal={arXiv preprint arXiv:0812.3478}, year={2008}, archivePrefix={arXiv}, eprint={0812.3478}, primaryClass={cs.AI} }
wong2008automatic
arxiv-5797
0812.3550
XML Static Analyzer User Manual
<|reference_start|>XML Static Analyzer User Manual: This document describes how to use the XML static analyzer in practice. It provides informal documentation for using the XML reasoning solver implementation. The solver allows automated verification of properties that are expressed as logical formulas over trees. A logical formula may for instance express structural constraints or navigation properties (like e.g. path existence and node selection) in finite trees. Logical formulas can be expressed using the syntax of XPath expressions, DTD, XML Schemas, and Relax NG definitions.<|reference_end|>
arxiv
@article{geneves2008xml, title={XML Static Analyzer User Manual}, author={Pierre Geneves and Nabil Layaida}, journal={arXiv preprint arXiv:0812.3550}, year={2008}, number={RR-6726}, archivePrefix={arXiv}, eprint={0812.3550}, primaryClass={cs.PL cs.DB cs.LO cs.SE} }
geneves2008xml
arxiv-5798
0812.3563
Questions & Answers for TEI Newcomers
<|reference_start|>Questions & Answers for TEI Newcomers: This paper provides an introduction to the Text Encoding Initia-tive (TEI), focused at bringing in newcomers who have to deal with a digital document project and are looking at the capacity that the TEI environment may have to fulfil his needs. To this end, we avoid a strictly technical presentation of the TEI and concentrate on the actual issues that such projects face, with parallel made on the situation within two institutions. While a quick walkthrough the TEI technical framework is provided, the papers ends up by showing the essential role of the community in the actual technical contributions that are being brought to the TEI.<|reference_end|>
arxiv
@article{romary2008questions, title={Questions & Answers for TEI Newcomers}, author={Laurent Romary (LORIA)}, journal={Jahrbuch f\"ur Computerphilologie 10 (2009)}, year={2008}, archivePrefix={arXiv}, eprint={0812.3563}, primaryClass={cs.DL} }
romary2008questions
arxiv-5799
0812.3593
A randomized polynomial-time algorithm for the Spanning Hypertree Problem on 3-uniform hypergraphs
<|reference_start|>A randomized polynomial-time algorithm for the Spanning Hypertree Problem on 3-uniform hypergraphs: Consider the problem of determining whether there exists a spanning hypertree in a given k-uniform hypergraph. This problem is trivially in P for k=2, and is NP-complete for k>= 4, whereas for k=3, there exists a polynomial-time algorithm based on Lovasz' theory of polymatroid matching. Here we give a completely different, randomized polynomial-time algorithm in the case k=3. The main ingredients are a Pfaffian formula by Vaintrob and one of the authors (G.M.) for a polynomial that enumerates spanning hypertrees with some signs, and a lemma on the number of roots of polynomials over a finite field.<|reference_end|>
arxiv
@article{caracciolo2008a, title={A randomized polynomial-time algorithm for the Spanning Hypertree Problem on 3-uniform hypergraphs}, author={Sergio Caracciolo, Gregor Masbaum, Alan D. Sokal, Andrea Sportiello}, journal={arXiv preprint arXiv:0812.3593}, year={2008}, archivePrefix={arXiv}, eprint={0812.3593}, primaryClass={cs.CC math.CO} }
caracciolo2008a
arxiv-5800
0812.3632
Optimal detection of homogeneous segment of observations in stochastic sequence
<|reference_start|>Optimal detection of homogeneous segment of observations in stochastic sequence: A Markov process is registered. At random moment $\theta$ the distribution of observed sequence changes. Using probability maximizing approach the optimal stopping rule for detecting the change is identified. Some explicit solution is obtained.<|reference_end|>
arxiv
@article{sarnowski2008optimal, title={Optimal detection of homogeneous segment of observations in stochastic sequence}, author={Wojciech Sarnowski, Krzysztof Szajowski}, journal={Stochastics An International Journal of Probability and Stochastic Processes, Vol. 83, Issue 4-6, 2011, pp. 569-581}, year={2008}, doi={10.1080/17442508.2010.540015}, number={Institute of Mathematics, Polish Academy of Science 696}, archivePrefix={arXiv}, eprint={0812.3632}, primaryClass={math.PR cs.IT math.IT math.ST stat.TH} }
sarnowski2008optimal