corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-6501 | 0902.3861 | A Few Remarks About Formal Development of Secure Systems | <|reference_start|>A Few Remarks About Formal Development of Secure Systems: Formal methods provide remarkable tools allowing for high levels of confidence in the correctness of developments. Their use is therefore encouraged, when not required, for the development of systems in which safety or security is mandatory. But effectively specifying a secure system or deriving a secure implementation can be tricky. We propose a review of some classical `gotchas' and other possible sources of concerns with the objective to improve the confidence in formal developments, or at least to better assess the actual confidence level.<|reference_end|> | arxiv | @article{jaeger2009a,
title={A Few Remarks About Formal Development of Secure Systems},
author={Eric Jaeger (LIP6, Dcssi/SDS/Lti), Th'er`ese Hardin (LIP6)},
journal={High Assurance Systems Engineering Symposium, Nanjing : Chine
(2008)},
year={2009},
doi={10.1109/HASE.2008.49},
archivePrefix={arXiv},
eprint={0902.3861},
primaryClass={cs.LO}
} | jaeger2009a |
arxiv-6502 | 0902.3865 | Yet Another Deep Embedding of B:Extending de Bruijn Notations | <|reference_start|>Yet Another Deep Embedding of B:Extending de Bruijn Notations: We present Bicoq3, a deep embedding of the B system in Coq, focusing on the technical aspects of the development. The main subjects discussed are related to the representation of sets and maps, the use of induction principles, and the introduction of a new de Bruijn notation providing solutions to various problems related to the mechanisation of languages and logics.<|reference_end|> | arxiv | @article{jaeger2009yet,
title={Yet Another Deep Embedding of B:Extending de Bruijn Notations},
author={Eric Jaeger (LIP6, Dcssi/SDS/Lti), Th'er`ese Hardin (LIP6)},
journal={arXiv preprint arXiv:0902.3865},
year={2009},
archivePrefix={arXiv},
eprint={0902.3865},
primaryClass={cs.LO}
} | jaeger2009yet |
arxiv-6503 | 0902.3883 | Directed Graph Representation of Half-Rate Additive Codes over GF(4) | <|reference_start|>Directed Graph Representation of Half-Rate Additive Codes over GF(4): We show that (n,2^n) additive codes over GF(4) can be represented as directed graphs. This generalizes earlier results on self-dual additive codes over GF(4), which correspond to undirected graphs. Graph representation reduces the complexity of code classification, and enables us to classify additive (n,2^n) codes over GF(4) of length up to 7. From this we also derive classifications of isodual and formally self-dual codes. We introduce new constructions of circulant and bordered circulant directed graph codes, and show that these codes will always be isodual. A computer search of all such codes of length up to 26 reveals that these constructions produce many codes of high minimum distance. In particular, we find new near-extremal formally self-dual codes of length 11 and 13, and isodual codes of length 24, 25, and 26 with better minimum distance than the best known self-dual codes.<|reference_end|> | arxiv | @article{danielsen2009directed,
title={Directed Graph Representation of Half-Rate Additive Codes over GF(4)},
author={Lars Eirik Danielsen and Matthew G. Parker},
journal={Des. Codes Cryptogr. 59, pp. 119-130, 2011},
year={2009},
doi={10.1007/s10623-010-9469-6},
archivePrefix={arXiv},
eprint={0902.3883},
primaryClass={math.CO cs.IT math.IT}
} | danielsen2009directed |
arxiv-6504 | 0902.3958 | Antichains for the Automata-Based Approach to Model-Checking | <|reference_start|>Antichains for the Automata-Based Approach to Model-Checking: We propose and evaluate antichain algorithms to solve the universality and language inclusion problems for nondeterministic Buechi automata, and the emptiness problem for alternating Buechi automata. To obtain those algorithms, we establish the existence of simulation pre-orders that can be exploited to efficiently evaluate fixed points on the automata defined during the complementation step (that we keep implicit in our approach). We evaluate the performance of the algorithm to check the universality of Buechi automata using the random automaton model recently proposed by Tabakov and Vardi. We show that on the difficult instances of this probabilistic model, our algorithm outperforms the standard ones by several orders of magnitude.<|reference_end|> | arxiv | @article{doyen2009antichains,
title={Antichains for the Automata-Based Approach to Model-Checking},
author={Laurent Doyen and Jean-Francois Raskin},
journal={Logical Methods in Computer Science, Volume 5, Issue 1 (March 2,
2009) lmcs:1027},
year={2009},
doi={10.2168/LMCS-5(1:5)2009},
archivePrefix={arXiv},
eprint={0902.3958},
primaryClass={cs.LO}
} | doyen2009antichains |
arxiv-6505 | 0902.3979 | Optimal Control of a Single Queue with Retransmissions: Delay-Dropping Tradeoffs | <|reference_start|>Optimal Control of a Single Queue with Retransmissions: Delay-Dropping Tradeoffs: A single queue incorporating a retransmission protocol is investigated, assuming that the sequence of per effort success probabilities in the Automatic Retransmission reQuest (ARQ) chain is a priori defined and no channel state information at the transmitter is available. A Markov Decision Problem with an average cost criterion is formulated where the possible actions are to either continue the retransmission process of an erroneous packet at the next time slot or to drop the packet and move on to the next packet awaiting for transmission. The cost per slot is a linear combination of the current queue length and a penalty term in case dropping is chosen as action. The investigation seeks policies that provide the best possible average packet delay-dropping trade-off for Quality of Service guarantees. An optimal deterministic stationary policy is shown to exist, several structural properties of which are obtained. Based on that, a class of suboptimal <L,K>-policies is introduced. These suggest that it is almost optimal to use a K-truncated ARQ protocol as long as the queue length is lower than L, else send all packets in one shot. The work concludes with an evaluation of the optimal delay-dropping tradeoff using dynamic programming and a comparison between the optimal and suboptimal policies.<|reference_end|> | arxiv | @article{giovanidis2009optimal,
title={Optimal Control of a Single Queue with Retransmissions: Delay-Dropping
Tradeoffs},
author={Anastasios Giovanidis, Gerhard Wunder and Joerg Buehler},
journal={IEEE Transactions on Wireless Communications (Volume:8 , Issue: 7
), pp. 3736 - 3746,. July 2009},
year={2009},
doi={10.1109/TWC.2009.080959},
archivePrefix={arXiv},
eprint={0902.3979},
primaryClass={cs.MM}
} | giovanidis2009optimal |
arxiv-6506 | 0902.4042 | Algebraic operators for querying pattern bases | <|reference_start|>Algebraic operators for querying pattern bases: The objectives of this research work which is intimately related to pattern discovery and management are threefold: (i) handle the problem of pattern manipulation by defining operations on patterns, (ii) study the problem of enriching and updating a pattern set (e.g., concepts, rules) when changes occur in the user's needs and the input data (e.g., object/attribute insertion or elimination, taxonomy utilization), and (iii) approximate a "presumed" concept using a related pattern space so that patterns can augment data with knowledge. To conduct our work, we use formal concept analysis (FCA) as a framework for pattern discovery and management and we take a joint database-FCA perspective by defining operators similar in spirit to relational algebra operators, investigating approximation in concept lattices and exploiting existing work related to operations on contexts and lattices to formalize such operators.<|reference_end|> | arxiv | @article{missaoui2009algebraic,
title={Algebraic operators for querying pattern bases},
author={Rokia Missaoui, Leonard Kwuida, Mohamed Quafafou, Jean Vaillancourt},
journal={arXiv preprint arXiv:0902.4042},
year={2009},
archivePrefix={arXiv},
eprint={0902.4042},
primaryClass={cs.DB cs.IR}
} | missaoui2009algebraic |
arxiv-6507 | 0902.4045 | Sparse Recovery of Positive Signals with Minimal Expansion | <|reference_start|>Sparse Recovery of Positive Signals with Minimal Expansion: We investigate the sparse recovery problem of reconstructing a high-dimensional non-negative sparse vector from lower dimensional linear measurements. While much work has focused on dense measurement matrices, sparse measurement schemes are crucial in applications, such as DNA microarrays and sensor networks, where dense measurements are not practically feasible. One possible construction uses the adjacency matrices of expander graphs, which often leads to recovery algorithms much more efficient than $\ell_1$ minimization. However, to date, constructions based on expanders have required very high expansion coefficients which can potentially make the construction of such graphs difficult and the size of the recoverable sets small. In this paper, we construct sparse measurement matrices for the recovery of non-negative vectors, using perturbations of the adjacency matrix of an expander graph with much smaller expansion coefficient. We present a necessary and sufficient condition for $\ell_1$ optimization to successfully recover the unknown vector and obtain expressions for the recovery threshold. For certain classes of measurement matrices, this necessary and sufficient condition is further equivalent to the existence of a "unique" vector in the constraint set, which opens the door to alternative algorithms to $\ell_1$ minimization. We further show that the minimal expansion we use is necessary for any graph for which sparse recovery is possible and that therefore our construction is tight. We finally present a novel recovery algorithm that exploits expansion and is much faster than $\ell_1$ optimization. Finally, we demonstrate through theoretical bounds, as well as simulation, that our method is robust to noise and approximate sparsity.<|reference_end|> | arxiv | @article{khajehnejad2009sparse,
title={Sparse Recovery of Positive Signals with Minimal Expansion},
author={M.Amin Khajehnejad, Alexandros G. Dimakis, Weiyu Xu, Babak Hassibi},
journal={arXiv preprint arXiv:0902.4045},
year={2009},
archivePrefix={arXiv},
eprint={0902.4045},
primaryClass={cs.IT math.IT}
} | khajehnejad2009sparse |
arxiv-6508 | 0902.4060 | Network of two-Chinese-character compound words in Japanese language | <|reference_start|>Network of two-Chinese-character compound words in Japanese language: Some statistical properties of a network of two-Chinese-character compound words in Japanese language are reported. In this network, a node represents a Chinese character and an edge represents a two-Chinese-character compound word. It is found that this network has properties of "small-world" and "scale-free." A network formed by only Chinese characters for common use ({\it joyo-kanji} in Japanese), which is regarded as a subclass of the original network, also has small-world property. However, a degree distribution of the network exhibits no clear power law. In order to reproduce disappearance of the power-law property, a model for a selecting process of the Chinese characters for common use is proposed.<|reference_end|> | arxiv | @article{yamamoto2009network,
title={Network of two-Chinese-character compound words in Japanese language},
author={Ken Yamamoto, Yoshihiro Yamazaki},
journal={Physica A 388, 2555-2560 (2009)},
year={2009},
doi={10.1016/j.physa.2009.02.032},
archivePrefix={arXiv},
eprint={0902.4060},
primaryClass={cs.CL physics.soc-ph}
} | yamamoto2009network |
arxiv-6509 | 0902.4073 | Dipole and Quadrupole Moments in Image Processing | <|reference_start|>Dipole and Quadrupole Moments in Image Processing: This paper proposes an algorithm for image processing, obtained by adapting to image maps the definitions of two well-known physical quantities. These quantities are the dipole and quadrupole moments of a charge distribution. We will see how it is possible to define dipole and quadrupole moments for the gray-tone maps and apply them in the development of algorithms for edge detection.<|reference_end|> | arxiv | @article{sparavigna2009dipole,
title={Dipole and Quadrupole Moments in Image Processing},
author={Amelia Sparavigna},
journal={arXiv preprint arXiv:0902.4073},
year={2009},
archivePrefix={arXiv},
eprint={0902.4073},
primaryClass={cs.CV}
} | sparavigna2009dipole |
arxiv-6510 | 0902.4091 | Determining the closed forms of the $O(a_s^3)$ anomalous dimensions and Wilson coefficients from Mellin moments by means of computer algebra | <|reference_start|>Determining the closed forms of the $O(a_s^3)$ anomalous dimensions and Wilson coefficients from Mellin moments by means of computer algebra: Single scale quantities, as anomalous dimensions and hard scattering cross sections, in renormalizable Quantum Field Theories are found to obey difference equations of finite order in Mellin space. It is often easier to calculate fixed moments for these quantities compared to a direct attempt to derive them in terms of harmonic sums and their generalizations involving the Mellin parameter $N$. Starting from a sufficiently large number of given moments, we establish linear recurrence relations of lowest possible order with polynomial coefficients of usually high degree. Then these recurrence equations are solved in terms of d'Alembertian solutions where the involved nested sums are represented in optimal nested depth. Given this representation, it is then an easy task to express the result in terms of harmonic sums. In this process we compactify the result such that no algebraic relations occur among the sums involved. We demonstrate the method for the QCD unpolarized anomalous dimensions and massless Wilson coefficients to 3--loop order treating the contributions for individual color coefficients. For the most complicated subproblem 5114 moments were needed in order to produce a recurrence of order 35 whose coefficients have degrees up to 938. About four months of CPU time were needed to establish and solve the recurrences for the anomalous dimensions and Wilson coefficients on a 2 GHz machine requiring less than 10 GB of memory. No algorithm is known yet to provide such a high number of moments for 3--loop quantities. Yet the method presented shows that it is possible to establish and solve recurrences of rather large order and and degree, occurring in physics problems, uniquely, fast and reliably with computer algebra.<|reference_end|> | arxiv | @article{blümlein2009determining,
title={Determining the closed forms of the $O(a_s^3)$ anomalous dimensions and
Wilson coefficients from Mellin moments by means of computer algebra},
author={J. Bl"umlein, M. Kauers, S. Klein, and C. Schneider},
journal={Comput.Phys.Commun.180:2143-2165,2009},
year={2009},
doi={10.1016/j.cpc.2009.06.020},
number={DESY 09-002, SFB/CPP-09-22},
archivePrefix={arXiv},
eprint={0902.4091},
primaryClass={hep-ph cs.SC math-ph math.AG math.CO math.MP}
} | blümlein2009determining |
arxiv-6511 | 0902.4095 | From Moments to Functions in Quantum Chromodynamics | <|reference_start|>From Moments to Functions in Quantum Chromodynamics: Single-scale quantities, like the QCD anomalous dimensions and Wilson coefficients, obey difference equations. Therefore their analytic form can be determined from a finite number of moments. We demonstrate this in an explicit calculation by establishing and solving large scale recursions by means of computer algebra for the anomalous dimensions and Wilson coefficients in unpolarized deeply inelastic scattering from their Mellin moments to 3-loop order.<|reference_end|> | arxiv | @article{blümlein2009from,
title={From Moments to Functions in Quantum Chromodynamics},
author={J. Bl"umlein, M. Kauers, S. Klein, and C. Schneider},
journal={PoS ACAT08:106,2008},
year={2009},
number={DESY 09-011, SFB-CPP-09/17},
archivePrefix={arXiv},
eprint={0902.4095},
primaryClass={hep-ph cs.SC math-ph math.AG math.CO math.MP}
} | blümlein2009from |
arxiv-6512 | 0902.4098 | Coordination in multiagent systems and Laplacian spectra of digraphs | <|reference_start|>Coordination in multiagent systems and Laplacian spectra of digraphs: Constructing and studying distributed control systems requires the analysis of the Laplacian spectra and the forest structure of directed graphs. In this paper, we present some basic results of this analysis partially obtained by the present authors. We also discuss the application of these results to decentralized control and touch upon some problems of spectral graph theory.<|reference_end|> | arxiv | @article{chebotarev2009coordination,
title={Coordination in multiagent systems and Laplacian spectra of digraphs},
author={Pavel Chebotarev and Rafig Agaev},
journal={Automation and Remote Control, Vol.70 (2009), No.3, P. 469-483},
year={2009},
doi={10.1134/S0005117909030126},
archivePrefix={arXiv},
eprint={0902.4098},
primaryClass={cs.MA cs.DM math.CO math.OC}
} | chebotarev2009coordination |
arxiv-6513 | 0902.4106 | Filter and nested-lattice code design for fading MIMO channels with side-information | <|reference_start|>Filter and nested-lattice code design for fading MIMO channels with side-information: Linear-assignment Gel'fand-Pinsker coding (LA-GPC) is a coding technique for channels with interference known only at the transmitter, where the known interference is treated as side-information (SI). As a special case of LA-GPC, dirty paper coding has been shown to be able to achieve the optimal interference-free rate for interference channels with perfect channel state information at the transmitter (CSIT). In the cases where only the channel distribution information at the transmitter (CDIT) is available, LA-GPC also has good (sometimes optimal) performance in a variety of fast and slow fading SI channels. In this paper, we design the filters in nested-lattice based coding to make it achieve the same rate performance as LA-GPC in multiple-input multiple-output (MIMO) channels. Compared with the random Gaussian codebooks used in previous works, our resultant coding schemes have an algebraic structure and can be implemented in practical systems. A simulation in a slow-fading channel is also provided, and near interference-free error performance is obtained. The proposed coding schemes can serve as the fundamental building blocks to achieve the promised rate performance of MIMO Gaussian broadcast channels with CDIT or perfect CSIT<|reference_end|> | arxiv | @article{lin2009filter,
title={Filter and nested-lattice code design for fading MIMO channels with
side-information},
author={Shih-Chun Lin, Pin-Hsun Lin, Chung-Pi Lee, Hsuan-Jung Su},
journal={IEEE Transactions on Communications, vol. 59. No. 6, pp. 1489 -
1494, June 2011},
year={2009},
doi={10.1109/TCOMM.2011.050211.090113A},
archivePrefix={arXiv},
eprint={0902.4106},
primaryClass={cs.IT math.IT}
} | lin2009filter |
arxiv-6514 | 0902.4127 | Prediction with expert evaluators' advice | <|reference_start|>Prediction with expert evaluators' advice: We introduce a new protocol for prediction with expert advice in which each expert evaluates the learner's and his own performance using a loss function that may change over time and may be different from the loss functions used by the other experts. The learner's goal is to perform better or not much worse than each expert, as evaluated by that expert, for all experts simultaneously. If the loss functions used by the experts are all proper scoring rules and all mixable, we show that the defensive forecasting algorithm enjoys the same performance guarantee as that attainable by the Aggregating Algorithm in the standard setting and known to be optimal. This result is also applied to the case of "specialist" (or "sleeping") experts. In this case, the defensive forecasting algorithm reduces to a simple modification of the Aggregating Algorithm.<|reference_end|> | arxiv | @article{chernov2009prediction,
title={Prediction with expert evaluators' advice},
author={Alexey Chernov and Vladimir Vovk},
journal={arXiv preprint arXiv:0902.4127},
year={2009},
archivePrefix={arXiv},
eprint={0902.4127},
primaryClass={cs.LG}
} | chernov2009prediction |
arxiv-6515 | 0902.4157 | Efficient Greedy Geographical Non-Planar Routing with Reactive Deflection | <|reference_start|>Efficient Greedy Geographical Non-Planar Routing with Reactive Deflection: We present a novel geographical routing scheme for spontaneous wireless mesh networks. Greedy geographical routing has many advantages, but suffers from packet losses occurring at the border of voids. In this paper, we propose a flexible greedy routing scheme that can be adapted to any variant of geographical routing and works for any connectivity graph, not necessarily Unit Disk Graphs. The idea is to reactively detect voids, backtrack packets, and propagate information on blocked sectors to reduce packet loss. We also propose an extrapolating algorithm to reduce the latency of void discovery and to limit route stretch. Performance evaluation via simulation shows that our modified greedy routing avoids most of packet losses.<|reference_end|> | arxiv | @article{theoleyre2009efficient,
title={Efficient Greedy Geographical Non-Planar Routing with Reactive
Deflection},
author={Fabrice Theoleyre (LIG), Eryk Schiller (LIG), Andrzej Duda (LIG)},
journal={arXiv preprint arXiv:0902.4157},
year={2009},
archivePrefix={arXiv},
eprint={0902.4157},
primaryClass={cs.NI}
} | theoleyre2009efficient |
arxiv-6516 | 0902.4177 | Convolutional Codes for Network-Error Correction | <|reference_start|>Convolutional Codes for Network-Error Correction: In this work, we introduce convolutional codes for network-error correction in the context of coherent network coding. We give a construction of convolutional codes that correct a given set of error patterns, as long as consecutive errors are separated by a certain interval. We also give some bounds on the field size and the number of errors that can get corrected in a certain interval. Compared to previous network error correction schemes, using convolutional codes is seen to have advantages in field size and decoding technique. Some examples are discussed which illustrate the several possible situations that arise in this context.<|reference_end|> | arxiv | @article{prasad2009convolutional,
title={Convolutional Codes for Network-Error Correction},
author={K. Prasad, B. Sundar Rajan},
journal={arXiv preprint arXiv:0902.4177},
year={2009},
archivePrefix={arXiv},
eprint={0902.4177},
primaryClass={cs.IT math.IT}
} | prasad2009convolutional |
arxiv-6517 | 0902.4185 | Quiet Planting in the Locked Constraint Satisfaction Problems | <|reference_start|>Quiet Planting in the Locked Constraint Satisfaction Problems: We study the planted ensemble of locked constraint satisfaction problems. We describe the connection between the random and planted ensembles. The use of the cavity method is combined with arguments from reconstruction on trees and first and second moment considerations; in particular the connection with the reconstruction on trees appears to be crucial. Our main result is the location of the hard region in the planted ensemble. In a part of that hard region instances have with high probability a single satisfying assignment.<|reference_end|> | arxiv | @article{zdeborová2009quiet,
title={Quiet Planting in the Locked Constraint Satisfaction Problems},
author={Lenka Zdeborov'a and Florent Krzakala},
journal={SIAM J. Discrete Math. 25, 750-770 (2011)},
year={2009},
doi={10.1137/090750755},
archivePrefix={arXiv},
eprint={0902.4185},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.CC}
} | zdeborová2009quiet |
arxiv-6518 | 0902.4218 | On graph theoretic results underlying the analysis of consensus in multi-agent systems | <|reference_start|>On graph theoretic results underlying the analysis of consensus in multi-agent systems: This note corrects a pretty serious mistake and some inaccuracies in "Consensus and cooperation in networked multi-agent systems" by R. Olfati-Saber, J.A. Fax, and R.M. Murray, published in Vol. 95 of the Proceedings of the IEEE (2007, No. 1, P. 215-233). It also mentions several stronger results applicable to the class of problems under consideration and addresses the issue of priority whose interpretation in the above-mentioned paper is not exact.<|reference_end|> | arxiv | @article{chebotarev2009on,
title={On graph theoretic results underlying the analysis of consensus in
multi-agent systems},
author={Pavel Chebotarev},
journal={Proceedings of the IEEE, Vol. 98, No. 7, July 2010},
year={2009},
doi={10.1109/JPROC.2010.2049911},
archivePrefix={arXiv},
eprint={0902.4218},
primaryClass={cs.MA cs.DM math.CO math.OC}
} | chebotarev2009on |
arxiv-6519 | 0902.4221 | Semantic Network Layering | <|reference_start|>Semantic Network Layering: The stack in various forms has been widely used as an architectural template for networking systems. Recently the stack has been subject to criticism for a lack of flexibility. However, when it comes right down to it nobody has offered a truly compelling alternative. Various cross-layer optimizations have been proposed, but these optimizations are frequently hacks to achieve a particular goal and offer no direct insight into why the existing network stack is inadequate. We propose that a fundamental problem with the existing network stack is that it attempts to layer functionality that is not well-suited to layering. In this work we use a "bottom up" model of information computation, storage, and transfer and the "top down" goals of networking systems to formulate a modular decomposition of networking systems. Based on this modular decomposition we propose a semantic layered structure for networking systems that eliminates many awkward cross-layer interactions that arise in the canonical layered stack.<|reference_end|> | arxiv | @article{neufeld2009semantic,
title={Semantic Network Layering},
author={Michael Neufeld, Craig Partridge},
journal={arXiv preprint arXiv:0902.4221},
year={2009},
archivePrefix={arXiv},
eprint={0902.4221},
primaryClass={cs.NI}
} | neufeld2009semantic |
arxiv-6520 | 0902.4228 | Multiplicative updates For Non-Negative Kernel SVM | <|reference_start|>Multiplicative updates For Non-Negative Kernel SVM: We present multiplicative updates for solving hard and soft margin support vector machines (SVM) with non-negative kernels. They follow as a natural extension of the updates for non-negative matrix factorization. No additional param- eter setting, such as choosing learning, rate is required. Ex- periments demonstrate rapid convergence to good classifiers. We analyze the rates of asymptotic convergence of the up- dates and establish tight bounds. We test the performance on several datasets using various non-negative kernels and report equivalent generalization errors to that of a standard SVM.<|reference_end|> | arxiv | @article{potluru2009multiplicative,
title={Multiplicative updates For Non-Negative Kernel SVM},
author={Vamsi K. Potluru, Sergey M. Plis, Morten Morup, Vince D. Calhoun,
Terran Lane},
journal={arXiv preprint arXiv:0902.4228},
year={2009},
archivePrefix={arXiv},
eprint={0902.4228},
primaryClass={cs.LG}
} | potluru2009multiplicative |
arxiv-6521 | 0902.4246 | Constant-Weight and Constant-Charge Binary Run-Length Limited Codes | <|reference_start|>Constant-Weight and Constant-Charge Binary Run-Length Limited Codes: Constant-weight and constant-charge binary sequences with constrained run length of zeros are introduced. For these sequences, the weight and the charge distribution are found. Then, recurrent and direct formulas for calculating the number of these sequences are obtained. With considering these numbers of constant-weight and constant-charge RLL sequences as coefficients of convergent power series, generating functions are derived. The fact, that generating function for enumerating constant-charge RLL sequences does not have a closed form, is proved. Implementation of encoding and decoding procedures using Cover's enumerative scheme is shown. On the base of obtained results, some examples, such as enumeration of running-digital-sum (RDS) constrained RLL sequences or peak-shifts control capability are also provided.<|reference_end|> | arxiv | @article{kurmaev2009constant-weight,
title={Constant-Weight and Constant-Charge Binary Run-Length Limited Codes},
author={Oleg Kurmaev},
journal={arXiv preprint arXiv:0902.4246},
year={2009},
doi={10.1109/TIT.2011.2145490},
archivePrefix={arXiv},
eprint={0902.4246},
primaryClass={cs.IT math.IT}
} | kurmaev2009constant-weight |
arxiv-6522 | 0902.4250 | Fundamental limit of sample generalized eigenvalue based detection of signals in noise using relatively few signal-bearing and noise-only samples | <|reference_start|>Fundamental limit of sample generalized eigenvalue based detection of signals in noise using relatively few signal-bearing and noise-only samples: The detection problem in statistical signal processing can be succinctly formulated: Given m (possibly) signal bearing, n-dimensional signal-plus-noise snapshot vectors (samples) and N statistically independent n-dimensional noise-only snapshot vectors, can one reliably infer the presence of a signal? This problem arises in the context of applications as diverse as radar, sonar, wireless communications, bioinformatics, and machine learning and is the critical first step in the subsequent signal parameter estimation phase. The signal detection problem can be naturally posed in terms of the sample generalized eigenvalues. The sample generalized eigenvalues correspond to the eigenvalues of the matrix formed by "whitening" the signal-plus-noise sample covariance matrix with the noise-only sample covariance matrix. In this article we prove a fundamental asymptotic limit of sample generalized eigenvalue based detection of signals in arbitrarily colored noise when there are relatively few signal bearing and noise-only samples. Numerical simulations highlight the accuracy of our analytical prediction and permit us to extend our heuristic definition of the effective number of identifiable signals in colored noise. We discuss implications of our result for the detection of weak and/or closely spaced signals in sensor array processing, abrupt change detection in sensor networks, and clustering methodologies in machine learning.<|reference_end|> | arxiv | @article{rao2009fundamental,
title={Fundamental limit of sample generalized eigenvalue based detection of
signals in noise using relatively few signal-bearing and noise-only samples},
author={N. Raj Rao and Jack W. Silverstein},
journal={arXiv preprint arXiv:0902.4250},
year={2009},
archivePrefix={arXiv},
eprint={0902.4250},
primaryClass={cs.IT math.IT}
} | rao2009fundamental |
arxiv-6523 | 0902.4291 | From Theory to Practice: Sub-Nyquist Sampling of Sparse Wideband Analog Signals | <|reference_start|>From Theory to Practice: Sub-Nyquist Sampling of Sparse Wideband Analog Signals: Conventional sub-Nyquist sampling methods for analog signals exploit prior information about the spectral support. In this paper, we consider the challenging problem of blind sub-Nyquist sampling of multiband signals, whose unknown frequency support occupies only a small portion of a wide spectrum. Our primary design goals are efficient hardware implementation and low computational load on the supporting digital processing. We propose a system, named the modulated wideband converter, which first multiplies the analog signal by a bank of periodic waveforms. The product is then lowpass filtered and sampled uniformly at a low rate, which is orders of magnitude smaller than Nyquist. Perfect recovery from the proposed samples is achieved under certain necessary and sufficient conditions. We also develop a digital architecture, which allows either reconstruction of the analog input, or processing of any band of interest at a low rate, that is, without interpolating to the high Nyquist rate. Numerical simulations demonstrate many engineering aspects: robustness to noise and mismodeling, potential hardware simplifications, realtime performance for signals with time-varying support and stability to quantization effects. We compare our system with two previous approaches: periodic nonuniform sampling, which is bandwidth limited by existing hardware devices, and the random demodulator, which is restricted to discrete multitone signals and has a high computational load. In the broader context of Nyquist sampling, our scheme has the potential to break through the bandwidth barrier of state-of-the-art analog conversion technologies such as interleaved converters.<|reference_end|> | arxiv | @article{mishali2009from,
title={From Theory to Practice: Sub-Nyquist Sampling of Sparse Wideband Analog
Signals},
author={Moshe Mishali and Yonina C. Eldar},
journal={arXiv preprint arXiv:0902.4291},
year={2009},
doi={10.1109/JSTSP.2010.2042414},
archivePrefix={arXiv},
eprint={0902.4291},
primaryClass={cs.IT math.IT}
} | mishali2009from |
arxiv-6524 | 0902.4337 | Probabilistic Matching of Planar Regions | <|reference_start|>Probabilistic Matching of Planar Regions: We analyze a probabilistic algorithm for matching shapes modeled by planar regions under translations and rigid motions (rotation and translation). Given shapes $A$ and $B$, the algorithm computes a transformation $t$ such that with high probability the area of overlap of $t(A)$ and $B$ is close to maximal. In the case of polygons, we give a time bound that does not depend significantly on the number of vertices.<|reference_end|> | arxiv | @article{alt2009probabilistic,
title={Probabilistic Matching of Planar Regions},
author={Helmut Alt, Ludmila Scharf, Daria Schymura},
journal={arXiv preprint arXiv:0902.4337},
year={2009},
archivePrefix={arXiv},
eprint={0902.4337},
primaryClass={cs.CG}
} | alt2009probabilistic |
arxiv-6525 | 0902.4348 | On ground word problem of term equation systems | <|reference_start|>On ground word problem of term equation systems: We give semi-decision procedures for the ground word problem of variable preserving term equation systems and term equation systems. They are natural improvements of two well known trivial semi-decision procedures. We show the correctness of our procedures.<|reference_end|> | arxiv | @article{vagvolgyi2009on,
title={On ground word problem of term equation systems},
author={Sandor Vagvolgyi},
journal={arXiv preprint arXiv:0902.4348},
year={2009},
archivePrefix={arXiv},
eprint={0902.4348},
primaryClass={cs.LO}
} | vagvolgyi2009on |
arxiv-6526 | 0902.4394 | Circulant and Toeplitz matrices in compressed sensing | <|reference_start|>Circulant and Toeplitz matrices in compressed sensing: Compressed sensing seeks to recover a sparse vector from a small number of linear and non-adaptive measurements. While most work so far focuses on Gaussian or Bernoulli random measurements we investigate the use of partial random circulant and Toeplitz matrices in connection with recovery by $\ell_1$-minization. In contrast to recent work in this direction we allow the use of an arbitrary subset of rows of a circulant and Toeplitz matrix. Our recovery result predicts that the necessary number of measurements to ensure sparse reconstruction by $\ell_1$-minimization with random partial circulant or Toeplitz matrices scales linearly in the sparsity up to a $\log$-factor in the ambient dimension. This represents a significant improvement over previous recovery results for such matrices. As a main tool for the proofs we use a new version of the non-commutative Khintchine inequality.<|reference_end|> | arxiv | @article{rauhut2009circulant,
title={Circulant and Toeplitz matrices in compressed sensing},
author={Holger Rauhut},
journal={arXiv preprint arXiv:0902.4394},
year={2009},
archivePrefix={arXiv},
eprint={0902.4394},
primaryClass={cs.IT math.IT}
} | rauhut2009circulant |
arxiv-6527 | 0902.4447 | Percolation Processes and Wireless Network Resilience to Degree-Dependent and Cascading Node Failures | <|reference_start|>Percolation Processes and Wireless Network Resilience to Degree-Dependent and Cascading Node Failures: We study the problem of wireless network resilience to node failures from a percolation-based perspective. In practical wireless networks, it is often the case that the failure probability of a node depends on its degree (number of neighbors). We model this phenomenon as a degree-dependent site percolation process on random geometric graphs. In particular, we obtain analytical conditions for the existence of phase transitions within this model. Furthermore, in networks carrying traffic load, the failure of one node can result in redistribution of the load onto other nearby nodes. If these nodes fail due to excessive load, then this process can result in a cascading failure. Using a simple but descriptive model, we show that the cascading failure problem for large-scale wireless networks is equivalent to a degree-dependent site percolation on random geometric graphs. We obtain analytical conditions for cascades in this model.<|reference_end|> | arxiv | @article{kong2009percolation,
title={Percolation Processes and Wireless Network Resilience to
Degree-Dependent and Cascading Node Failures},
author={Zhenning Kong and Edmund M. Yeh},
journal={arXiv preprint arXiv:0902.4447},
year={2009},
archivePrefix={arXiv},
eprint={0902.4447},
primaryClass={cs.NI cs.IT math.IT}
} | kong2009percolation |
arxiv-6528 | 0902.4449 | Connectivity, Percolation, and Information Dissemination in Large-Scale Wireless Networks with Dynamic Links | <|reference_start|>Connectivity, Percolation, and Information Dissemination in Large-Scale Wireless Networks with Dynamic Links: We investigate the problem of disseminating broadcast messages in wireless networks with time-varying links from a percolation-based perspective. Using a model of wireless networks based on random geometric graphs with dynamic on-off links, we show that the delay for disseminating broadcast information exhibits two behavioral regimes, corresponding to the phase transition of the underlying network connectivity. When the dynamic network is in the subcritical phase, ignoring propagation delays, the delay scales linearly with the Euclidean distance between the sender and the receiver. When the dynamic network is in the supercritical phase, the delay scales sub-linearly with the distance. Finally, we show that in the presence of a non-negligible propagation delay, the delay for information dissemination scales linearly with the Euclidean distance in both the subcritical and supercritical regimes, with the rates for the linear scaling being different in the two regimes.<|reference_end|> | arxiv | @article{kong2009connectivity,,
title={Connectivity, Percolation, and Information Dissemination in Large-Scale
Wireless Networks with Dynamic Links},
author={Zhenning Kong and Edmund M. Yeh},
journal={arXiv preprint arXiv:0902.4449},
year={2009},
archivePrefix={arXiv},
eprint={0902.4449},
primaryClass={cs.IT cs.NI math.IT math.PR}
} | kong2009connectivity, |
arxiv-6529 | 0902.4460 | Strategies of Voting in Stochastic Environment: Egoism and Collectivism | <|reference_start|>Strategies of Voting in Stochastic Environment: Egoism and Collectivism: Consideration was given to a model of social dynamics controlled by successive collective decisions based on the threshold majority procedures. The current system state is characterized by the vector of participants' capitals (utilities). At each step, the voters can either retain their status quo or accept the proposal which is a vector of the algebraic increments in the capitals of the participants. In this version of the model, the vector is generated stochastically. Comparative utility of two social attitudes--egoism and collectivism--was analyzed. It was established that, except for some special cases, the collectivists have advantages, which makes realizable the following scenario: on the conditions of protecting the corporate interests, a group is created which is joined then by the egoists attracted by its achievements. At that, group egoism approaches altruism. Additionally, one of the considered variants of collectivism handicaps manipulation of voting by the organizers.<|reference_end|> | arxiv | @article{borzenko2009strategies,
title={Strategies of Voting in Stochastic Environment: Egoism and Collectivism},
author={V.I. Borzenko, Z.M. Lezina, A. K.Loginov, Ya.Yu. Tsodikova, and P.Yu.
Chebotarev},
journal={Automation and Remote Control, 2006, Vol. 67, No. 2, pp. 311-328.
Original Russian text published in Avtomatika i Telemekhanika, 2006, No. 2,
pp. 154-173},
year={2009},
doi={10.1134/S0005117906020093},
archivePrefix={arXiv},
eprint={0902.4460},
primaryClass={math.OC cs.MA math.PR}
} | borzenko2009strategies |
arxiv-6530 | 0902.4463 | SAPPORO: A way to turn your graphics cards into a GRAPE-6 | <|reference_start|>SAPPORO: A way to turn your graphics cards into a GRAPE-6: We present Sapporo, a library for performing high-precision gravitational N-body simulations on NVIDIA Graphical Processing Units (GPUs). Our library mimics the GRAPE-6 library, and N-body codes currently running on GRAPE-6 can switch to Sapporo by a simple relinking of the library. The precision of our library is comparable to that of GRAPE-6, even though internally the GPU hardware is limited to single precision arithmetics. This limitation is effectively overcome by emulating double precision for calculating the distance between particles. The performance loss of this operation is small (< 20%) compared to the advantage of being able to run at high precision. We tested the library using several GRAPE-6-enabled N-body codes, in particular with Starlab and phiGRAPE. We measured peak performance of 800 Gflop/s for running with 10^6 particles on a PC with four commercial G92 architecture GPUs (two GeForce 9800GX2). As a production test, we simulated a 32k Plummer model with equal mass stars well beyond core collapse. The simulation took 41 days, during which the mean performance was 113 Gflop/s. The GPU did not show any problems from running in a production environment for such an extended period of time.<|reference_end|> | arxiv | @article{gaburov2009sapporo:,
title={SAPPORO: A way to turn your graphics cards into a GRAPE-6},
author={Evghenii Gaburov, Stefan Harfst and Simon Portegies Zwart},
journal={arXiv preprint arXiv:0902.4463},
year={2009},
doi={10.1016/j.newast.2009.03.002},
archivePrefix={arXiv},
eprint={0902.4463},
primaryClass={astro-ph.IM cs.DC}
} | gaburov2009sapporo: |
arxiv-6531 | 0902.4481 | Stability of Finite Population ALOHA with Variable Packets | <|reference_start|>Stability of Finite Population ALOHA with Variable Packets: ALOHA is one of the most basic Medium Access Control (MAC) protocols and represents a foundation for other more sophisticated distributed and asynchronous MAC protocols, e.g., CSMA. In this paper, unlike in the traditional work that focused on mean value analysis, we study the distributional properties of packet transmission delays over an ALOHA channel. We discover a new phenomenon showing that a basic finite population ALOHA model with variable size (exponential) packets is characterized by power law transmission delays, possibly even resulting in zero throughput. These results are in contrast to the classical work that shows exponential delays and positive throughput for finite population ALOHA with fixed packets. Furthermore, we characterize a new stability condition that is entirely derived from the tail behavior of the packet and backoff distributions that may not be determined by mean values. The power law effects and the possible instability might be diminished, or perhaps eliminated, by reducing the variability of packets. However, we show that even a slotted (synchronized) ALOHA with packets of constant size can exhibit power law delays when the number of active users is random. From an engineering perspective, our results imply that the variability of packet sizes and number of active users need to be taken into consideration when designing robust MAC protocols, especially for ad-hoc/sensor networks where other factors, such as link failures and mobility, might further compound the problem.<|reference_end|> | arxiv | @article{jelenkovic2009stability,
title={Stability of Finite Population ALOHA with Variable Packets},
author={Predrag R. Jelenkovic and Jian Tan},
journal={arXiv preprint arXiv:0902.4481},
year={2009},
number={EE2009-02-20},
archivePrefix={arXiv},
eprint={0902.4481},
primaryClass={cs.PF cs.IT math.IT}
} | jelenkovic2009stability |
arxiv-6532 | 0902.4508 | Exponential Sums, Cyclic Codes and Sequences: the Odd Characteristic Kasami Case | <|reference_start|>Exponential Sums, Cyclic Codes and Sequences: the Odd Characteristic Kasami Case: Let $q=p^n$ with $n=2m$ and $p$ be an odd prime. Let $0\leq k\leq n-1$ and $k\neq m$. In this paper we determine the value distribution of following exponential(character) sums \[\sum\limits_{x\in \bF_q}\zeta_p^{\Tra_1^m (\alpha x^{p^{m}+1})+\Tra_1^n(\beta x^{p^k+1})}\quad(\alpha\in \bF_{p^m},\beta\in \bF_{q})\] and \[\sum\limits_{x\in \bF_q}\zeta_p^{\Tra_1^m (\alpha x^{p^{m}+1})+\Tra_1^n(\beta x^{p^k+1}+\ga x)}\quad(\alpha\in \bF_{p^m},\beta,\ga\in \bF_{q})\] where $\Tra_1^n: \bF_q\ra \bF_p$ and $\Tra_1^m: \bF_{p^m}\ra\bF_p$ are the canonical trace mappings and $\zeta_p=e^{\frac{2\pi i}{p}}$ is a primitive $p$-th root of unity. As applications: (1). We determine the weight distribution of the cyclic codes $\cC_1$ and $\cC_2$ over $\bF_{p^t}$ with parity-check polynomials $h_2(x)h_3(x)$ and $h_1(x)h_2(x)h_3(x)$ respectively where $t$ is a divisor of $d=\gcd(m,k)$, and $h_1(x)$, $h_2(x)$ and $h_3(x)$ are the minimal polynomials of $\pi^{-1}$, $\pi^{-(p^k+1)}$ and $\pi^{-(p^m+1)}$ over $\bF_{p^t}$ respectively for a primitive element $\pi$ of $\bF_q$. (2). We determine the correlation distribution among a family of m-sequences. This paper extends the results in \cite{Zen Li}.<|reference_end|> | arxiv | @article{luo2009exponential,
title={Exponential Sums, Cyclic Codes and Sequences: the Odd Characteristic
Kasami Case},
author={Jinquan Luo, Yuansheng Tang and Hongyu Wang},
journal={arXiv preprint arXiv:0902.4508},
year={2009},
archivePrefix={arXiv},
eprint={0902.4508},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | luo2009exponential |
arxiv-6533 | 0902.4509 | Cyclic Codes and Sequences from a Class of Dembowski-Ostrom Functions | <|reference_start|>Cyclic Codes and Sequences from a Class of Dembowski-Ostrom Functions: Let $q=p^n$ with $p$ be an odd prime. Let $0\leq k\leq n-1$ and $k\neq n/2$. In this paper we determine the value distribution of following exponential(character) sums \[\sum\limits_{x\in \bF_q}\zeta_p^{\Tra_1^n(\alpha x^{p^{3k}+1}+\beta x^{p^k+1})}\quad(\alpha\in \bF_{p^m},\beta\in \bF_{q})\] and \[\sum\limits_{x\in \bF_q}\zeta_p^{\Tra_1^n(\alpha x^{p^{3k}+1}+\beta x^{p^k+1}+\ga x)}\quad(\alpha\in \bF_{p^m},\beta,\ga\in \bF_{q})\] where $\Tra_1^n: \bF_q\ra \bF_p$ and $\Tra_1^m: \bF_{p^m}\ra\bF_p$ are the canonical trace mappings and $\zeta_p=e^{\frac{2\pi i}{p}}$ is a primitive $p$-th root of unity. As applications: (1). We determine the weight distribution of the cyclic codes $\cC_1$ and $\cC_2$ over $\bF_{p^t}$ with parity-check polynomials $h_2(x)h_3(x)$ and $h_1(x)h_2(x)h_3(x)$ respectively where $t$ is a divisor of $d=\gcd(n,k)$, and $h_1(x)$, $h_2(x)$ and $h_3(x)$ are the minimal polynomials of $\pi^{-1}$, $\pi^{-(p^k+1)}$ and $\pi^{-(p^{3k}+1)}$ over $\bF_{p^t}$ respectively for a primitive element $\pi$ of $\bF_q$. (2). We determine the correlation distribution among a family of m-sequences.<|reference_end|> | arxiv | @article{luo2009cyclic,
title={Cyclic Codes and Sequences from a Class of Dembowski-Ostrom Functions},
author={Jinquan Luo, San Ling and Chaoping Xing},
journal={arXiv preprint arXiv:0902.4509},
year={2009},
archivePrefix={arXiv},
eprint={0902.4509},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | luo2009cyclic |
arxiv-6534 | 0902.4510 | Cyclic Codes and Sequences: the Generalized Kasami Case | <|reference_start|>Cyclic Codes and Sequences: the Generalized Kasami Case: Let $q=2^n$ with $n=2m$ . Let $1\leq k\leq n-1$ and $k\neq m$. In this paper we determine the value distribution of following exponential sums \[\sum\limits_{x\in \bF_q}(-1)^{\Tra_1^m (\alpha x^{2^{m}+1})+\Tra_1^n(\beta x^{2^k+1})}\quad(\alpha\in \bF_{2^m},\beta\in \bF_{q})\] and \[\sum\limits_{x\in \bF_q}(-1)^{\Tra_1^m (\alpha x^{2^{m}+1})+\Tra_1^n(\beta x^{2^k+1}+\ga x)}\quad(\alpha\in \bF_{2^m},\beta,\ga\in \bF_{q})\] where $\Tra_1^n: \bF_q\ra \bF_2$ and $\Tra_1^m: \bF_{p^m}\ra\bF_2$ are the canonical trace mappings. As applications: (1). We determine the weight distribution of the binary cyclic codes $\cC_1$ and $\cC_2$ with parity-check polynomials $h_2(x)h_3(x)$ and $h_1(x)h_2(x)h_3(x)$ respectively where $h_1(x)$, $h_2(x)$ and $h_3(x)$ are the minimal polynomials of $\pi^{-1}$, $\pi^{-(2^k+1)}$ and $\pi^{-(2^m+1)}$ over $\bF_{2}$ respectively for a primitive element $\pi$ of $\bF_q$. (2). We determine the correlation distribution among a family of m-sequences. This paper is the binary version of Luo, Tang and Wang\cite{Luo Tan} and extends the results in Kasami\cite{Kasa1}, Van der Vlugt\cite{Vand2} and Zeng, Liu and Hu\cite{Zen Liu}.<|reference_end|> | arxiv | @article{luo2009cyclic,
title={Cyclic Codes and Sequences: the Generalized Kasami Case},
author={Jinquan Luo, Hongyu Wang and Yuansheng Tang},
journal={arXiv preprint arXiv:0902.4510},
year={2009},
archivePrefix={arXiv},
eprint={0902.4510},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | luo2009cyclic |
arxiv-6535 | 0902.4511 | Cyclic Codes and Sequences from Kasami-Welch Functions | <|reference_start|>Cyclic Codes and Sequences from Kasami-Welch Functions: Let $q=2^n$, $0\leq k\leq n-1$ and $k\neq n/2$. In this paper we determine the value distribution of following exponential sums \[\sum\limits_{x\in \bF_q}(-1)^{\Tra_1^n(\alpha x^{2^{3k}+1}+\beta x^{2^k+1})}\quad(\alpha,\beta\in \bF_{q})\] and \[\sum\limits_{x\in \bF_q}(-1)^{\Tra_1^n(\alpha x^{2^{3k}+1}+\beta x^{2^k+1}+\ga x)}\quad(\alpha,\beta,\ga\in \bF_{q})\] where $\Tra_1^n: \bF_{2^n}\ra \bF_2$ is the canonical trace mapping. As applications: (1). We determine the weight distribution of the binary cyclic codes $\cC_1$ and $\cC_2$ with parity-check polynomials $h_2(x)h_3(x)$ and $h_1(x)h_2(x)h_3(x)$ respectively where $h_1(x)$, $h_2(x)$ and $h_3(x)$ are the minimal polynomials of $\pi^{-1}$, $\pi^{-(2^k+1)}$ and $\pi^{-(2^{3k}+1)}$ respectively for a primitive element $\pi$ of $\bF_q$. (2). We determine the correlation distribution among a family of binary m-sequences.<|reference_end|> | arxiv | @article{luo2009cyclic,
title={Cyclic Codes and Sequences from Kasami-Welch Functions},
author={Jinquan Luo, San Ling, Chaoping Xing},
journal={arXiv preprint arXiv:0902.4511},
year={2009},
archivePrefix={arXiv},
eprint={0902.4511},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | luo2009cyclic |
arxiv-6536 | 0902.4514 | Analytical Expression of the Expected Values of Capital at Voting in the Stochastic Environment | <|reference_start|>Analytical Expression of the Expected Values of Capital at Voting in the Stochastic Environment: In the simplest version of the model of group decision making in the stochastic environment, the participants are segregated into egoists and a group of collectivists. A "proposal of the environment" is a stochastically generated vector of algebraic increments of participants' capitals. The social dynamics is determined by the sequence of proposals accepted by a majority voting (with a threshold) of the participants. In this paper, we obtain analytical expressions for the expected values of capitals for all the participants, including collectivists and egoists. In addition, distinctions between some principles of group voting are discussed.<|reference_end|> | arxiv | @article{chebotarev2009analytical,
title={Analytical Expression of the Expected Values of Capital at Voting in the
Stochastic Environment},
author={Pavel Chebotarev},
journal={Automation and Remote Control, 2006, Vol. 67, No. 2, pp. 480-492.
Original Russian text published in Avtomatika i Telemekhanika, 2006, No. 3,
pp. 152-165},
year={2009},
doi={10.1134/S000511790603012X},
archivePrefix={arXiv},
eprint={0902.4514},
primaryClass={math.OC cs.MA cs.SI cs.SY math.PR}
} | chebotarev2009analytical |
arxiv-6537 | 0902.4521 | Are Tensor Decomposition Solutions Unique? On the global convergence of HOSVD and ParaFac algorithms | <|reference_start|>Are Tensor Decomposition Solutions Unique? On the global convergence of HOSVD and ParaFac algorithms: For tensor decompositions such as HOSVD and ParaFac, the objective functions are nonconvex. This implies, theoretically, there exists a large number of local optimas: starting from different starting point, the iteratively improved solution will converge to different local solutions. This non-uniqueness present a stability and reliability problem for image compression and retrieval. In this paper, we present the results of a comprehensive investigation of this problem. We found that although all tensor decomposition algorithms fail to reach a unique global solution on random data and severely scrambled data; surprisingly however, on all real life several data sets (even with substantial scramble and occlusions), HOSVD always produce the unique global solution in the parameter region suitable to practical applications, while ParaFac produce non-unique solutions. We provide an eigenvalue based rule for the assessing the solution uniqueness.<|reference_end|> | arxiv | @article{luo2009are,
title={Are Tensor Decomposition Solutions Unique? On the global convergence of
HOSVD and ParaFac algorithms},
author={Dijun Luo, Heng Huang, Chris Ding},
journal={arXiv preprint arXiv:0902.4521},
year={2009},
archivePrefix={arXiv},
eprint={0902.4521},
primaryClass={cs.CV cs.AI}
} | luo2009are |
arxiv-6538 | 0902.4527 | EXtensible Animator for Mobile Simulations: EXAMS | <|reference_start|>EXtensible Animator for Mobile Simulations: EXAMS: One of the most widely used simulation environments for mobile wireless networks is the Network Simulator 2 (NS-2). However NS-2 stores its outcome in a text file, so there is a need for a visualization tool to animate the simulation of the wireless network. The purpose of this tool is to help the researcher examine in detail how the wireless protocol works both on a network and a node basis. It is clear that much of this information is protocol dependent and cannot be depicted properly by a general purpose animation process. Existing animation tools do not provide this level of information neither permit the specific protocol to control the animation at all. EXAMS is an NS-2 visualization tool for mobile simulations which makes possible the portrayal of NS-2 internal information like transmission properties and node data structures. This is mainly possible due to EXAMS extensible architecture which separates the animation process into a general and a protocol specific part. The latter can be developed independently by the protocol designer and loaded on demand. These and other useful characteristics of the EXAMS tool can be an invaluable help for a researcher in order to investigate and debug a mobile networking protocol.<|reference_end|> | arxiv | @article{livathinos2009extensible,
title={EXtensible Animator for Mobile Simulations: EXAMS},
author={Nikolaos S. Livathinos},
journal={arXiv preprint arXiv:0902.4527},
year={2009},
archivePrefix={arXiv},
eprint={0902.4527},
primaryClass={cs.NI cs.PF}
} | livathinos2009extensible |
arxiv-6539 | 0902.4535 | Electronical Health Record's Systems Interoperability | <|reference_start|>Electronical Health Record's Systems Interoperability: Understanding the importance that the electronic medical health records system has, with its various structural types and grades, has led to the elaboration of a series of standards and quality control methods, meant to control its functioning. In time, the electronic health records system has evolved along with the medical data change of structure. Romania has not yet managed to fully clarify this concept, various definitions still being encountered, such as "Patient's electronic chart", "Electronic health file". A slow change from functional interoperability (OSI level 6) to semantic interoperability (level 7) is being aimed at the moment. This current article will try to present the main electronic files models, from a functional interoperability system's possibility to be created perspective.<|reference_end|> | arxiv | @article{apostol2009electronical,
title={Electronical Health Record's Systems. Interoperability},
author={Simona Angela Apostol, Cosmin Catu, Corina Vernic},
journal={Ann. Univ. Tibiscus Comp. Sci. Series 6 (2008), 7-20},
year={2009},
archivePrefix={arXiv},
eprint={0902.4535},
primaryClass={cs.DB}
} | apostol2009electronical |
arxiv-6540 | 0902.4572 | A Multipath Energy-Aware On demand Source Routing Protocol for Mobile Ad-Hoc Networks | <|reference_start|>A Multipath Energy-Aware On demand Source Routing Protocol for Mobile Ad-Hoc Networks: Energy consumption is the most challenging issue in routing protocol design for mobile ad-hoc networks (MANETs), since mobile nodes are battery powered. Furthermore, replacing or recharging batteries is often impossible in critical environments such as in military or rescue missions. In a MANET, the energy depletion of a node does not affect the node itself only, but the overall network lifetime. In this paper, we present multipath and energy-aware on demand source routing (MEA-DSR) protocol, which exploits route diversity and information about batteries-energy levels for balancing energy consumption between mobile nodes. Simulation results, have shown that MEA-DSR protocol is more energy efficient than DSR in almost mobility scenarios.<|reference_end|> | arxiv | @article{chettibi2009a,
title={A Multipath Energy-Aware On demand Source Routing Protocol for Mobile
Ad-Hoc Networks},
author={Saloua Chettibi, M. Benmohamed},
journal={1st Workshop on Next Generation Networks: Mobility, WNGN,, Maroc
(2009)},
year={2009},
archivePrefix={arXiv},
eprint={0902.4572},
primaryClass={cs.NI}
} | chettibi2009a |
arxiv-6541 | 0902.4577 | Using Distributed Rate-Splitting Game to Approach Rate Region Boundary of the Gaussian Interference Channel | <|reference_start|>Using Distributed Rate-Splitting Game to Approach Rate Region Boundary of the Gaussian Interference Channel: Determining how to approach the rate boundary of the Gaussian interference channel in practical system is a big concern. In this paper, a distributed rate-splitting (DRS) scheme is proposed to approach the rate region boundary of the Gaussian interference channel. It is shown that the DRS scheme can be formulated as a non-cooperative game. We introduce the Stackelberg equilibrium (SE) with multiple leaders as the equilibrium point of the non-cooperative game. Therefore, an iterative multiple waterlevels water-filling algorithm (IML-WFA) is developed to efficiently reach the SE of the non-cooperative game. The existence of SE is established for the game. Numerical examples show that the rate-tuples achieved by the DRS are very close to the boundary of the well-known HK region.<|reference_end|> | arxiv | @article{jing2009using,
title={Using Distributed Rate-Splitting Game to Approach Rate Region Boundary
of the Gaussian Interference Channel},
author={Zhenhai Jing, Baoming Bai and Xiao Ma},
journal={arXiv preprint arXiv:0902.4577},
year={2009},
archivePrefix={arXiv},
eprint={0902.4577},
primaryClass={cs.IT math.IT}
} | jing2009using |
arxiv-6542 | 0902.4640 | The comparison of tree-sibling time consistent phylogenetic networks is graph isomorphism-complete | <|reference_start|>The comparison of tree-sibling time consistent phylogenetic networks is graph isomorphism-complete: In a previous work, we gave a metric on the class of semibinary tree-sibling time consistent phylogenetic networks that is computable in polynomial time; in particular, the problem of deciding if two networks of this kind are isomorphic is in P. In this paper, we show that if we remove the semibinarity condition above, then the problem becomes much harder. More precisely, we proof that the isomorphism problem for generic tree-sibling time consistent phylogenetic networks is polynomially equivalent to the graph isomorphism problem. Since the latter is believed to be neither in P nor NP-complete, the chances are that it is impossible to define a metric on the class of all tree-sibling time consistent phylogenetic networks that can be computed in polynomial time.<|reference_end|> | arxiv | @article{cardona2009the,
title={The comparison of tree-sibling time consistent phylogenetic networks is
graph isomorphism-complete},
author={Gabriel Cardona, Merce Llabres, Francesc Rossello, Gabriel Valiente},
journal={arXiv preprint arXiv:0902.4640},
year={2009},
archivePrefix={arXiv},
eprint={0902.4640},
primaryClass={q-bio.PE cs.DM}
} | cardona2009the |
arxiv-6543 | 0902.4647 | Source-Channel Coding and Separation for Generalized Communication Systems | <|reference_start|>Source-Channel Coding and Separation for Generalized Communication Systems: We consider transmission of stationary and ergodic sources over non-ergodic composite channels with channel state information at the receiver (CSIR). Previously we introduced alternate capacity definitions to Shannon capacity, including the capacity versus outage and the expected capacity. These generalized definitions relax the constraint of Shannon capacity that all transmitted information must be decoded at the receiver. In this work alternate end-to-end distortion metrics such as the distortion versus outage and the expected distortion are introduced to relax the constraint that a single distortion level has to be maintained for all channel states. For transmission of stationary and ergodic sources over stationary and ergodic channels, the classical Shannon separation theorem enables separate design of source and channel codes and guarantees optimal performance. For generalized communication systems, we show that different end-to-end distortion metrics lead to different conclusions about separation optimality even for the same source and channel models.<|reference_end|> | arxiv | @article{liang2009source-channel,
title={Source-Channel Coding and Separation for Generalized Communication
Systems},
author={Yifan Liang, Andrea Goldsmith and Michelle Effros},
journal={arXiv preprint arXiv:0902.4647},
year={2009},
archivePrefix={arXiv},
eprint={0902.4647},
primaryClass={cs.IT math.IT}
} | liang2009source-channel |
arxiv-6544 | 0902.4658 | Toward Understanding Friendship in Online Social Networks | <|reference_start|>Toward Understanding Friendship in Online Social Networks: All major on-line social networks, such as MySpace, Facebook, LiveJournal, and Orkut, are built around the concept of friendship. It is not uncommon for a social network participant to have over 100 friends. A natural question arises: are they all real friends of hers, or does she mean something different when she calls them "friends?" Speaking in other words, what is the relationship between off-line (real, traditional) friendship and its on-line (virtual) namesake? In this paper, we use sociological data to suggest that there is a significant difference between the concepts of virtual and real friendships. We further investigate the structure of on-line friendship and observe that it follows the Pareto (or double Pareto) distribution and is subject to age stratification but not to gender segregation. We introduce the concept of digital personality that quantifies the willingness of a social network participant to engage in virtual friendships.<|reference_end|> | arxiv | @article{zinoviev2009toward,
title={Toward Understanding Friendship in Online Social Networks},
author={Dmitry Zinoviev, Vy Duong},
journal={arXiv preprint arXiv:0902.4658},
year={2009},
archivePrefix={arXiv},
eprint={0902.4658},
primaryClass={cs.CY}
} | zinoviev2009toward |
arxiv-6545 | 0902.4663 | Dipole Vectors in Images Processing | <|reference_start|>Dipole Vectors in Images Processing: Instead of evaluating the gradient field of the brightness map of an image, we propose the use of dipole vectors. This approach is obtained by adapting to the image gray-tone distribution the definition of the dipole moment of charge distributions. We will show how to evaluate the dipoles and obtain a vector field, which can be a good alternative to the gradient field in pattern recognition.<|reference_end|> | arxiv | @article{sparavigna2009dipole,
title={Dipole Vectors in Images Processing},
author={Amelia Sparavigna},
journal={arXiv preprint arXiv:0902.4663},
year={2009},
archivePrefix={arXiv},
eprint={0902.4663},
primaryClass={cs.CV}
} | sparavigna2009dipole |
arxiv-6546 | 0902.4682 | Lectures on Jacques Herbrand as a Logician | <|reference_start|>Lectures on Jacques Herbrand as a Logician: We give some lectures on the work on formal logic of Jacques Herbrand, and sketch his life and his influence on automated theorem proving. The intended audience ranges from students interested in logic over historians to logicians. Besides the well-known correction of Herbrand's False Lemma by Goedel and Dreben, we also present the hardly known unpublished correction of Heijenoort and its consequences on Herbrand's Modus Ponens Elimination. Besides Herbrand's Fundamental Theorem and its relation to the Loewenheim-Skolem-Theorem, we carefully investigate Herbrand's notion of intuitionism in connection with his notion of falsehood in an infinite domain. We sketch Herbrand's two proofs of the consistency of arithmetic and his notion of a recursive function, and last but not least, present the correct original text of his unification algorithm with a new translation.<|reference_end|> | arxiv | @article{wirth2009lectures,
title={Lectures on Jacques Herbrand as a Logician},
author={Claus-Peter Wirth, Joerg Siekmann, Christoph Benzmueller, Serge
Autexier},
journal={arXiv preprint arXiv:0902.4682},
year={2009},
number={SEKI Report SR-2009-01},
archivePrefix={arXiv},
eprint={0902.4682},
primaryClass={cs.LO cs.AI}
} | wirth2009lectures |
arxiv-6547 | 0902.4723 | Degrees of Undecidability in Rewriting | <|reference_start|>Degrees of Undecidability in Rewriting: Undecidability of various properties of first order term rewriting systems is well-known. An undecidable property can be classified by the complexity of the formula defining it. This gives rise to a hierarchy of distinct levels of undecidability, starting from the arithmetical hierarchy classifying properties using first order arithmetical formulas and continuing into the analytic hierarchy, where also quantification over function variables is allowed. In this paper we consider properties of first order term rewriting systems and classify them in this hierarchy. Weak and strong normalization for single terms turn out to be Sigma-0-1-complete, while their uniform versions as well as dependency pair problems with minimality flag are Pi-0-2-complete. We find that confluence is Pi-0-2-complete both for single terms and uniform. Unexpectedly weak confluence for ground terms turns out to be harder than weak confluence for open terms. The former property is Pi-0-2-complete while the latter is Sigma-0-1-complete (and thereby recursively enumerable). The most surprising result is on dependency pair problems without minimality flag: we prove this to be Pi-1-1-complete, which means that this property exceeds the arithmetical hierarchy and is essentially analytic.<|reference_end|> | arxiv | @article{endrullis2009degrees,
title={Degrees of Undecidability in Rewriting},
author={Joerg Endrullis, Herman Geuvers, Hans Zantema},
journal={arXiv preprint arXiv:0902.4723},
year={2009},
archivePrefix={arXiv},
eprint={0902.4723},
primaryClass={cs.LO cs.CC}
} | endrullis2009degrees |
arxiv-6548 | 0902.4730 | Minimal Economic Distributed Computing | <|reference_start|>Minimal Economic Distributed Computing: In an ideal distributed computing infrastructure, users would be able to use diverse distributed computing resources in a simple coherent way, with guaranteed security and efficient use of shared resources in accordance with the wishes of the owners of the resources. Our strategy for approaching this ideal is to first find the simplest structure within which these goals can plausibly be achieved. This structure, we find, is given by a particular recursive distributive lattice freely constructed from a presumed partially ordered set of all data in the infrastructure. Minor syntactic adjustments to the resulting algebra yields a simple language resembling a UNIX shell, a concept of execution and an interprocess protocol. Persons, organizations and servers within the system express their interests explicitly via a hierarchical currency. The currency provides a common framework for treating authentication, access control and resource sharing as economic problems while also introducing a new dimension for improving the infrastructure over time by designing system components which compete with each other to earn the currency. We explain these results, discuss experience with an implementation called egg and point out areas where more research is needed.<|reference_end|> | arxiv | @article{youssef2009minimal,
title={Minimal Economic Distributed Computing},
author={Saul Youssef, John Brunelle, John Huth, David C. Parkes, Margo Seltzer
and Jim Shank},
journal={arXiv preprint arXiv:0902.4730},
year={2009},
archivePrefix={arXiv},
eprint={0902.4730},
primaryClass={cs.DC}
} | youssef2009minimal |
arxiv-6549 | 0902.4779 | Simulation and Performance Analysis of MP-OLSR for Mobile Ad hoc Networks | <|reference_start|>Simulation and Performance Analysis of MP-OLSR for Mobile Ad hoc Networks: Mobile ad hoc networks (MANETs) consist of a collection of wireless mobile nodes which dynamically exchange data without reliance on a fixed base station or a wired backbone network, which makes routing a crucial issue for the design of a ad hoc networks. In this paper we discussed a hybrid multipath routing protocol named MP-OLSR. It is based on the link state algorithm and employs periodic exchange of messages to maintain topology information of the networks. In the mean time, it updates the routing table in an on-demand scheme and forwards the packets in multiple paths which have been determined at the source. If a link failure is detected, the algorithm recovers the route automatically. Concerning the instability of the wireless networks, the redundancy coding is used to improve the delivery ratio. The simulation in NS2 shows that the new protocol can effectively improve the performance of the networks.<|reference_end|> | arxiv | @article{yi2009simulation,
title={Simulation and Performance Analysis of MP-OLSR for Mobile Ad hoc
Networks},
author={Jiazi Yi (IRCCyN), Eddy Cizeron (IRCCyN), Salima Hamma (IRCCyN),
Beno^it Parrein (IRCCyN)},
journal={IEEE WCNC 2008, Las Vegas : \'Etats-Unis d'Am\'erique (2008)},
year={2009},
archivePrefix={arXiv},
eprint={0902.4779},
primaryClass={cs.NI}
} | yi2009simulation |
arxiv-6550 | 0902.4781 | Implementation of Multipath and Multiple Description Coding in OLSR | <|reference_start|>Implementation of Multipath and Multiple Description Coding in OLSR: In this paper we discussed the application and the implementation of multipath routing and multiple description coding (MDC) extension of OLSR, called MP-OLSR. It is based on the link state algorithm and employs periodic exchange of messages to maintain topology information of the networks. In the mean time, it updates the routing table in an on-demand scheme and forwards the packets in multiple paths which have been determined at the source. If a link failure is detected, the algorithm recovers the route automatically. Concerning the instability of the wireless networks, the multiple description coding is used to improve reliability of the network transmission, and several methods are proposed to allocate the redundancy in different paths. The simulation in NS2 shows that the new protocol can effectively improve the performance of the networks. The implementation of MP-OLSR is also proposed in the end.<|reference_end|> | arxiv | @article{yi2009implementation,
title={Implementation of Multipath and Multiple Description Coding in OLSR},
author={Jiazi Yi (IRCCyN), Eddy Cizeron (IRCCyN), Salima Hamma (IRCCyN),
Beno^it Parrein (IRCCyN), Pascal Lesage (IRCCyN)},
journal={4th OLSR Interop/Work Shop, Ottawa : Canada (2008)},
year={2009},
archivePrefix={arXiv},
eprint={0902.4781},
primaryClass={cs.NI}
} | yi2009implementation |
arxiv-6551 | 0902.4822 | Lightweight Task Analysis for Cache-Aware Scheduling on Heterogeneous Clusters | <|reference_start|>Lightweight Task Analysis for Cache-Aware Scheduling on Heterogeneous Clusters: We present a novel characterization of how a program stresses cache. This characterization permits fast performance prediction in order to simulate and assist task scheduling on heterogeneous clusters. It is based on the estimation of stack distance probability distributions. The analysis requires the observation of a very small subset of memory accesses, and yields a reasonable to very accurate prediction in constant time.<|reference_end|> | arxiv | @article{grehant2009lightweight,
title={Lightweight Task Analysis for Cache-Aware Scheduling on Heterogeneous
Clusters},
author={Xavier Grehant and Sverre Jarp},
journal={arXiv preprint arXiv:0902.4822},
year={2009},
archivePrefix={arXiv},
eprint={0902.4822},
primaryClass={cs.DC cs.PF}
} | grehant2009lightweight |
arxiv-6552 | 0902.4881 | Controllability and observabiliy of an artificial advection-diffusion problem | <|reference_start|>Controllability and observabiliy of an artificial advection-diffusion problem: In this paper we study the controllability of an artificial advection-diffusion system through the boundary. Suitable Carleman estimates give us the observability on the adjoint system in the one dimensional case. We also study some basic properties of our problem such as backward uniqueness and we get an intuitive result on the control cost for vanishing viscosity.<|reference_end|> | arxiv | @article{cornilleau2009controllability,
title={Controllability and observabiliy of an artificial advection-diffusion
problem},
author={Pierre Cornilleau, Sergio Guerrero},
journal={Mathematics of Control, Signals, and Systems July 2012, Volume 24,
Issue 3, pp 265-294},
year={2009},
doi={10.1007/s00498-012-0076-0},
archivePrefix={arXiv},
eprint={0902.4881},
primaryClass={math.OC cs.SY math.AP}
} | cornilleau2009controllability |
arxiv-6553 | 0903.0034 | Measuring Independence of Datasets | <|reference_start|>Measuring Independence of Datasets: A data stream model represents setting where approximating pairwise, or $k$-wise, independence with sublinear memory is of considerable importance. In the streaming model the joint distribution is given by a stream of $k$-tuples, with the goal of testing correlations among the components measured over the entire stream. In the streaming model, Indyk and McGregor (SODA 08) recently gave exciting new results for measuring pairwise independence. The Indyk and McGregor methods provide $\log{n}$-approximation under statistical distance between the joint and product distributions in the streaming model. Indyk and McGregor leave, as their main open question, the problem of improving their $\log n$-approximation for the statistical distance metric. In this paper we solve the main open problem posed by of Indyk and McGregor for the statistical distance for pairwise independence and extend this result to any constant $k$. In particular, we present an algorithm that computes an $(\epsilon, \delta)$-approximation of the statistical distance between the joint and product distributions defined by a stream of $k$-tuples. Our algorithm requires $O(({1\over \epsilon}\log({nm\over \delta}))^{(30+k)^k})$ memory and a single pass over the data stream.<|reference_end|> | arxiv | @article{braverman2009measuring,
title={Measuring Independence of Datasets},
author={Vladimir Braverman, Rafail Ostrovsky},
journal={arXiv preprint arXiv:0903.0034},
year={2009},
archivePrefix={arXiv},
eprint={0903.0034},
primaryClass={cs.DS cs.DB cs.IR cs.PF}
} | braverman2009measuring |
arxiv-6554 | 0903.0035 | ScALPEL: A Scalable Adaptive Lightweight Performance Evaluation Library for application performance monitoring | <|reference_start|>ScALPEL: A Scalable Adaptive Lightweight Performance Evaluation Library for application performance monitoring: As supercomputers continue to grow in scale and capabilities, it is becoming increasingly difficult to isolate processor and system level causes of performance degradation. Over the last several years, a significant number of performance analysis and monitoring tools have been built/proposed. However, these tools suffer from several important shortcomings, particularly in distributed environments. In this paper we present ScALPEL, a Scalable Adaptive Lightweight Performance Evaluation Library for application performance monitoring at the functional level. Our approach provides several distinct advantages. First, ScALPEL is portable across a wide variety of architectures, and its ability to selectively monitor functions presents low run-time overhead, enabling its use for large-scale production applications. Second, it is run-time configurable, enabling both dynamic selection of functions to profile as well as events of interest on a per function basis. Third, our approach is transparent in that it requires no source code modifications. Finally, ScALPEL is implemented as a pluggable unit by reusing existing performance monitoring frameworks such as Perfmon and PAPI and extending them to support both sequential and MPI applications.<|reference_end|> | arxiv | @article{pyla2009scalpel:,
title={ScALPEL: A Scalable Adaptive Lightweight Performance Evaluation Library
for application performance monitoring},
author={Hari K. Pyla, Bharath Ramesh, Calvin J. Ribbens and Srinidhi
Varadarajan},
journal={arXiv preprint arXiv:0903.0035},
year={2009},
archivePrefix={arXiv},
eprint={0903.0035},
primaryClass={cs.DC cs.PF}
} | pyla2009scalpel: |
arxiv-6555 | 0903.0041 | Learning DTW Global Constraint for Time Series Classification | <|reference_start|>Learning DTW Global Constraint for Time Series Classification: 1-Nearest Neighbor with the Dynamic Time Warping (DTW) distance is one of the most effective classifiers on time series domain. Since the global constraint has been introduced in speech community, many global constraint models have been proposed including Sakoe-Chiba (S-C) band, Itakura Parallelogram, and Ratanamahatana-Keogh (R-K) band. The R-K band is a general global constraint model that can represent any global constraints with arbitrary shape and size effectively. However, we need a good learning algorithm to discover the most suitable set of R-K bands, and the current R-K band learning algorithm still suffers from an 'overfitting' phenomenon. In this paper, we propose two new learning algorithms, i.e., band boundary extraction algorithm and iterative learning algorithm. The band boundary extraction is calculated from the bound of all possible warping paths in each class, and the iterative learning is adjusted from the original R-K band learning. We also use a Silhouette index, a well-known clustering validation technique, as a heuristic function, and the lower bound function, LB_Keogh, to enhance the prediction speed. Twenty datasets, from the Workshop and Challenge on Time Series Classification, held in conjunction of the SIGKDD 2007, are used to evaluate our approach.<|reference_end|> | arxiv | @article{niennattrakul2009learning,
title={Learning DTW Global Constraint for Time Series Classification},
author={Vit Niennattrakul and Chotirat Ann Ratanamahatana},
journal={arXiv preprint arXiv:0903.0041},
year={2009},
archivePrefix={arXiv},
eprint={0903.0041},
primaryClass={cs.AI}
} | niennattrakul2009learning |
arxiv-6556 | 0903.0050 | Succinctness of two-way probabilistic and quantum finite automata | <|reference_start|>Succinctness of two-way probabilistic and quantum finite automata: We prove that two-way probabilistic and quantum finite automata (2PFA's and 2QFA's) can be considerably more concise than both their one-way versions (1PFA's and 1QFA's), and two-way nondeterministic finite automata (2NFA's). For this purpose, we demonstrate several infinite families of regular languages which can be recognized with some fixed probability greater than $ {1/2} $ by just tuning the transition amplitudes of a 2QFA (and, in one case, a 2PFA) with a constant number of states, whereas the sizes of the corresponding 1PFA's, 1QFA's and 2NFA's grow without bound. We also show that 2QFA's with mixed states can support highly efficient probability amplification. The weakest known model of computation where quantum computers recognize more languages with bounded error than their classical counterparts is introduced.<|reference_end|> | arxiv | @article{yakaryilmaz2009succinctness,
title={Succinctness of two-way probabilistic and quantum finite automata},
author={Abuzer Yakaryilmaz and A. C. Cem Say},
journal={Discrete Mathematics & Theoretical Computer Science, Vol 12, No 4
(2010)},
year={2009},
archivePrefix={arXiv},
eprint={0903.0050},
primaryClass={cs.CC}
} | yakaryilmaz2009succinctness |
arxiv-6557 | 0903.0051 | Non Linear System for a Veritable PID Substitute | <|reference_start|>Non Linear System for a Veritable PID Substitute: The paper deals with a non-linear system largely used in biology, which, in certain conditions and for particular coefficient values, becomes linear, with a linear diagram over a large range of time. It can be used as a veritable regulator in systems' control<|reference_end|> | arxiv | @article{bucur2009non,
title={Non Linear System for a Veritable PID Substitute},
author={Petre Bucur, Lucian Luca},
journal={Ann. Univ. Tibiscus Comp. Sci. Series 6 (2008), 21-24},
year={2009},
archivePrefix={arXiv},
eprint={0903.0051},
primaryClass={cs.DM}
} | bucur2009non |
arxiv-6558 | 0903.0053 | Workflow Patterns in Process Modeling | <|reference_start|>Workflow Patterns in Process Modeling: This paper proposes an introduction to one of the newest modelling methods, an executable model based on workflows. We present the terminology for some basic workflow patterns, as described in the Workflow Management Coalition Terminology and Glossary.<|reference_end|> | arxiv | @article{fortis2009workflow,
title={Workflow Patterns in Process Modeling},
author={Alexandra Fortis, Florin Fortis},
journal={Ann. Univ. Tibiscus Comp. Sci. Series 6 (2008), 81-94},
year={2009},
archivePrefix={arXiv},
eprint={0903.0053},
primaryClass={cs.SE}
} | fortis2009workflow |
arxiv-6559 | 0903.0054 | Considerations on Resource Usage in Exceptions and Failures in Workflows | <|reference_start|>Considerations on Resource Usage in Exceptions and Failures in Workflows: The paper presents a description of some point of view of different authors related to the failures and exceptions that appear in workflows, as a direct consequence of unavailability of resources involved in the workflow. Each of these interpretations is typical for a certain situation, depending on the authors' interpretation of failures and exceptions in workflows modeling real dynamical systems.<|reference_end|> | arxiv | @article{fortis2009considerations,
title={Considerations on Resource Usage in Exceptions and Failures in Workflows},
author={Alexandra Fortis, Alexandru Cicortas, Victoria Iordan},
journal={Ann. Univ. Tibiscus, Comp. Sci. Series 6 (2008), 69-80},
year={2009},
archivePrefix={arXiv},
eprint={0903.0054},
primaryClass={cs.SE}
} | fortis2009considerations |
arxiv-6560 | 0903.0061 | Separable Implementation of L2-Orthogonal STC CPM with Fast Decoding | <|reference_start|>Separable Implementation of L2-Orthogonal STC CPM with Fast Decoding: In this paper we present an alternative separable implementation of L2-orthogonal space-time codes (STC) for continuous phase modulation (CPM). In this approach, we split the STC CPM transmitter into a single conventional CPM modulator and a correction filter bank. While the CPM modulator is common to all transmit antennas, the correction filter bank applies different correction units to each antenna. Thereby desirable code properties as orthogonality and full diversity are achievable with just a slightly larger bandwidth demand. This new representation has three main advantages. First, it allows to easily generalize the orthogonality condition to any arbitrary number of transmit antennas. Second, for a quite general set of correction functions that we detail, it can be proved that full diversity is achieved. Third, by separating the modulation and correction steps inside the receiver, a simpler receiver can be designed as a bank of data independent inverse correction filters followed by a single CPM demodulator. Therefore, in this implementation, only one correlation filter bank for the detection of all transmitted signals is necessary. The decoding effort grows only linearly with the number of transmit antennas.<|reference_end|> | arxiv | @article{hesse2009separable,
title={Separable Implementation of L2-Orthogonal STC CPM with Fast Decoding},
author={Matthias Hesse, Jerome Lebrun, Lutz Lampe (UBC), Luc Deneire},
journal={IEEE International Conference on Communications (2009)},
year={2009},
doi={10.1109/ICC.2009.5199452},
archivePrefix={arXiv},
eprint={0903.0061},
primaryClass={cs.IT math.IT}
} | hesse2009separable |
arxiv-6561 | 0903.0064 | Manipulation Robustness of Collaborative Filtering Systems | <|reference_start|>Manipulation Robustness of Collaborative Filtering Systems: A collaborative filtering system recommends to users products that similar users like. Collaborative filtering systems influence purchase decisions, and hence have become targets of manipulation by unscrupulous vendors. We provide theoretical and empirical results demonstrating that while common nearest neighbor algorithms, which are widely used in commercial systems, can be highly susceptible to manipulation, two classes of collaborative filtering algorithms which we refer to as linear and asymptotically linear are relatively robust. These results provide guidance for the design of future collaborative filtering systems.<|reference_end|> | arxiv | @article{yan2009manipulation,
title={Manipulation Robustness of Collaborative Filtering Systems},
author={Xiang Yan, Benjamin Van Roy},
journal={arXiv preprint arXiv:0903.0064},
year={2009},
archivePrefix={arXiv},
eprint={0903.0064},
primaryClass={cs.LG cs.IT math.IT}
} | yan2009manipulation |
arxiv-6562 | 0903.0069 | Improved identity-based identification using correcting codes | <|reference_start|>Improved identity-based identification using correcting codes: In this paper, a new identity-based identification scheme based on error-correcting codes is proposed. Two well known code-based schemes are combined : the signature scheme by Courtois, Finiasz and Sendrier and an identification scheme by Stern. A proof of security for the scheme in the Random Oracle Model is given.<|reference_end|> | arxiv | @article{cayrel2009improved,
title={Improved identity-based identification using correcting codes},
author={Pierre-Louis Cayrel, Philippe Gaborit, David Galindo and Marc Girault},
journal={arXiv preprint arXiv:0903.0069},
year={2009},
archivePrefix={arXiv},
eprint={0903.0069},
primaryClass={cs.CR}
} | cayrel2009improved |
arxiv-6563 | 0903.0094 | Dynamic Conjectures in Random Access Networks Using Bio-inspired Learning | <|reference_start|>Dynamic Conjectures in Random Access Networks Using Bio-inspired Learning: This paper considers a conjecture-based distributed learning approach that enables autonomous nodes to independently optimize their transmission probabilities in random access networks. We model the interaction among multiple self-interested nodes as a game. It is well-known that the Nash equilibria in this game result in zero throughput for all the nodes if they take myopic best-response, thereby leading to a network collapse. This paper enables nodes to behave as intelligent entities which can proactively gather information, form internal conjectures on how their competitors would react to their actions, and update their beliefs according to their local observations. In this way, nodes are capable to autonomously "learn" the behavior of their competitors, optimize their own actions, and eventually cultivate reciprocity in the random access network. To characterize the steady-state outcome, the conjectural equilibrium is introduced. Inspired by the biological phenomena of "derivative action" and "gradient dynamics", two distributed conjecture-based action update mechanisms are proposed to stabilize the random access network. The sufficient conditions that guarantee the proposed conjecture-based learning algorithms to converge are derived. Moreover, it is shown that all the achievable operating points in the throughput region are essentially stable conjectural equilibria corresponding to different conjectures. We investigate how the conjectural equilibrium can be selected in heterogeneous networks and how the proposed methods can be extended to ad-hoc networks. Simulations verify that the system performance significantly outperforms existing protocols, such as IEEE 802.11 DCF protocol and the PMAC protocol, in terms of throughput, fairness, convergence, and stability.<|reference_end|> | arxiv | @article{su2009dynamic,
title={Dynamic Conjectures in Random Access Networks Using Bio-inspired
Learning},
author={Yi Su and Mihaela van der Schaar},
journal={arXiv preprint arXiv:0903.0094},
year={2009},
archivePrefix={arXiv},
eprint={0903.0094},
primaryClass={cs.GT}
} | su2009dynamic |
arxiv-6564 | 0903.0096 | Modeling Multi-Cell IEEE 80211 WLANs with Application to Channel Assignment | <|reference_start|>Modeling Multi-Cell IEEE 80211 WLANs with Application to Channel Assignment: We provide a simple and accurate analytical model for multi-cell infrastructure IEEE 802.11 WLANs. Our model applies if the cell radius, $R$, is much smaller than the carrier sensing range, $R_{cs}$. We argue that, the condition $R_{cs} >> R$ is likely to hold in a dense deployment of Access Points (APs) where, for every client or station (STA), there is an AP very close to the STA such that the STA can associate with the AP at a high physical rate. We develop a scalable cell level model for such WLANs with saturated AP and STA queues as well as for TCP-controlled long file downloads. The accuracy of our model is demonstrated by comparison with ns-2 simulations. We also demonstrate how our analytical model could be applied in conjunction with a Learning Automata (LA) algorithm for optimal channel assignment. Based on the insights provided by our analytical model, we propose a simple decentralized algorithm which provides static channel assignments that are Nash equilibria in pure strategies for the objective of maximizing normalized network throughput. Our channel assignment algorithm requires neither any explicit knowledge of the topology nor any message passing, and provides assignments in only as many steps as there are channels. In contrast to prior work, our approach to channel assignment is based on the throughput metric.<|reference_end|> | arxiv | @article{panda2009modeling,
title={Modeling Multi-Cell IEEE 802.11 WLANs with Application to Channel
Assignment},
author={Manoj K. Panda and Anurag Kumar},
journal={arXiv preprint arXiv:0903.0096},
year={2009},
archivePrefix={arXiv},
eprint={0903.0096},
primaryClass={cs.NI cs.PF}
} | panda2009modeling |
arxiv-6565 | 0903.0099 | Spectral Efficiency Optimized Adaptive Transmission for Cognitive Radios in an Interference Channel | <|reference_start|>Spectral Efficiency Optimized Adaptive Transmission for Cognitive Radios in an Interference Channel: In this paper, we consider a primary and a cognitive user transmitting over a wireless fading interference channel. The primary user transmits with a constant power and utilizes an adaptive modulation and coding (AMC) scheme satisfying a bit error rate requirement. We propose a link adaptation scheme to maximize the average spectral efficiency of the cognitive radio, while a minimum required spectral efficiency for the primary user is provisioned. The resulting problem is constrained to also satisfy a bit error rate requirement and a power constraint for the cognitive link. The AMC mode selection and power control at the cognitive transmitter is optimized based on the scaled signal to noise plus interference ratio feedback of both links. The problem is then cast as a nonlinear discrete optimization problem for which a fast and efficient suboptimum solution is presented. We also present a scheme with rate adaption and a constant power. An important characteristic of the proposed schemes is that no negotiation between the users is required. Comparisons with underlay and interweave approaches to cognitive radio with adaptive transmission demonstrate the efficiency of the proposed solutions.<|reference_end|> | arxiv | @article{taki2009spectral,
title={Spectral Efficiency Optimized Adaptive Transmission for Cognitive Radios
in an Interference Channel},
author={Mehrdad Taki and Farshad Lahouti},
journal={arXiv preprint arXiv:0903.0099},
year={2009},
archivePrefix={arXiv},
eprint={0903.0099},
primaryClass={cs.IT math.IT}
} | taki2009spectral |
arxiv-6566 | 0903.0116 | Heaps Simplified | <|reference_start|>Heaps Simplified: The heap is a basic data structure used in a wide variety of applications, including shortest path and minimum spanning tree algorithms. In this paper we explore the design space of comparison-based, amortized-efficient heap implementations. From a consideration of dynamic single-elimination tournaments, we obtain the binomial queue, a classical heap implementation, in a simple and natural way. We give four equivalent ways of representing heaps arising from tournaments, and we obtain two new variants of binomial queues, a one-tree version and a one-pass version. We extend the one-pass version to support key decrease operations, obtaining the {\em rank-pairing heap}, or {\em rp-heap}. Rank-pairing heaps combine the performance guarantees of Fibonacci heaps with simplicity approaching that of pairing heaps. Like pairing heaps, rank-pairing heaps consist of trees of arbitrary structure, but these trees are combined by rank, not by list position, and rank changes, but not structural changes, cascade during key decrease operations.<|reference_end|> | arxiv | @article{haeupler2009heaps,
title={Heaps Simplified},
author={Bernhard Haeupler, Siddhartha Sen, and Robert E. Tarjan},
journal={arXiv preprint arXiv:0903.0116},
year={2009},
archivePrefix={arXiv},
eprint={0903.0116},
primaryClass={cs.DS}
} | haeupler2009heaps |
arxiv-6567 | 0903.0126 | Villager's dilemma | <|reference_start|>Villager's dilemma: With deeper study of the Game Theory, some conditions of Prisoner's Dilemma is no longer suitable of games in real life. So we try to develop a new model-Villager's Dilemma which has more realistic conditions to stimulate the process of game. It is emphasize that Prisoner's Dilemma is an exception which is lack of universality and the importance of rules in the game. And it puts forward that to let the rule maker take part in the game and specifies game players can stop the game as they like. This essay describes the basic model, the villager's dilemma (VD) and put some extended use of it, and points out the importance of rules and the effect it has on the result of the game. It briefly describes the disadvantage of Prisoner's Dilemma and advantage Villager's Dilemma has. It summarizes the premise and scope of application of Villager's Dilemma, and provides theory foundation for making rules for game and forecast of the future of the game.<|reference_end|> | arxiv | @article{he2009villager's,
title={Villager's dilemma},
author={Beihang He},
journal={arXiv preprint arXiv:0903.0126},
year={2009},
archivePrefix={arXiv},
eprint={0903.0126},
primaryClass={cs.GT}
} | he2009villager's |
arxiv-6568 | 0903.0134 | Recognition of Regular Shapes in Satelite Images | <|reference_start|>Recognition of Regular Shapes in Satelite Images: This paper has been withdrawn by the author ali pourmohammad.<|reference_end|> | arxiv | @article{eskandari2009recognition,
title={Recognition of Regular Shapes in Satelite Images},
author={Ahmad Reza Eskandari, Ali Pourmohammad},
journal={arXiv preprint arXiv:0903.0134},
year={2009},
archivePrefix={arXiv},
eprint={0903.0134},
primaryClass={cs.CV}
} | eskandari2009recognition |
arxiv-6569 | 0903.0136 | A polynomial graph extension procedure for improving graph isomorphism algorithms | <|reference_start|>A polynomial graph extension procedure for improving graph isomorphism algorithms: We present in this short note a polynomial graph extension procedure that can be used to improve any graph isomorphism algorithm. This construction propagates new constraints from the isomorphism constraints of the input graphs (denoted by $G(V,E)$ and $G'(V',E')$). Thus, information from the edge structures of $G$ and $G'$ is "hashed" into the weighted edges of the extended graphs. A bijective mapping is an isomorphism of the initial graphs if and only if it is an isomorphism of the extended graphs. As such, the construction enables the identification of pair of vertices $i\in V$ and $i'\in V'$ that can not be mapped by any isomorphism $h^*:V \to V'$ (e.g. if the extended edges of $i$ and $i'$ are different). A forbidding matrix $F$, that encodes all pairs of incompatible mappings $(i,i')$, is constructed in order to be used by a different algorithm. Moreover, tests on numerous graph classes show that the matrix $F$ might leave only one compatible element for each $i \in V$.<|reference_end|> | arxiv | @article{porumbel2009a,
title={A polynomial graph extension procedure for improving graph isomorphism
algorithms},
author={Daniel Cosmin Porumbel},
journal={arXiv preprint arXiv:0903.0136},
year={2009},
archivePrefix={arXiv},
eprint={0903.0136},
primaryClass={cs.DS}
} | porumbel2009a |
arxiv-6570 | 0903.0153 | Document Relevance Evaluation via Term Distribution Analysis Using Fourier Series Expansion | <|reference_start|>Document Relevance Evaluation via Term Distribution Analysis Using Fourier Series Expansion: In addition to the frequency of terms in a document collection, the distribution of terms plays an important role in determining the relevance of documents for a given search query. In this paper, term distribution analysis using Fourier series expansion as a novel approach for calculating an abstract representation of term positions in a document corpus is introduced. Based on this approach, two methods for improving the evaluation of document relevance are proposed: (a) a function-based ranking optimization representing a user defined document region, and (b) a query expansion technique based on overlapping the term distributions in the top-ranked documents. Experimental results demonstrate the effectiveness of the proposed approach in providing new possibilities for optimizing the retrieval process.<|reference_end|> | arxiv | @article{galeas2009document,
title={Document Relevance Evaluation via Term Distribution Analysis Using
Fourier Series Expansion},
author={Patricio Galeas (1), Ralph Kretschmer (2), Bernd Freisleben (1) ((1)
University of Marburg, Germany, (2) Kretschmer Software, Siegen, Germany)},
journal={Proceedings of the 2009 Joint international Conference on Digital
Libraries (Austin, TX, USA, June 15 - 19, 2009). JCDL '09. ACM, New York, NY,
277-284},
year={2009},
doi={10.1145/1555400.1555446},
archivePrefix={arXiv},
eprint={0903.0153},
primaryClass={cs.IR}
} | galeas2009document |
arxiv-6571 | 0903.0173 | Optimal Interdiction of Unreactive Markovian Evaders | <|reference_start|>Optimal Interdiction of Unreactive Markovian Evaders: The interdiction problem arises in a variety of areas including military logistics, infectious disease control, and counter-terrorism. In the typical formulation of network interdiction, the task of the interdictor is to find a set of edges in a weighted network such that the removal of those edges would maximally increase the cost to an evader of traveling on a path through the network. Our work is motivated by cases in which the evader has incomplete information about the network or lacks planning time or computational power, e.g. when authorities set up roadblocks to catch bank robbers, the criminals do not know all the roadblock locations or the best path to use for their escape. We introduce a model of network interdiction in which the motion of one or more evaders is described by Markov processes and the evaders are assumed not to react to interdiction decisions. The interdiction objective is to find an edge set of size B, that maximizes the probability of capturing the evaders. We prove that similar to the standard least-cost formulation for deterministic motion this interdiction problem is also NP-hard. But unlike that problem our interdiction problem is submodular and the optimal solution can be approximated within 1-1/e using a greedy algorithm. Additionally, we exploit submodularity through a priority evaluation strategy that eliminates the linear complexity scaling in the number of network edges and speeds up the solution by orders of magnitude. Taken together the results bring closer the goal of finding realistic solutions to the interdiction problem on global-scale networks.<|reference_end|> | arxiv | @article{gutfraind2009optimal,
title={Optimal Interdiction of Unreactive Markovian Evaders},
author={Alexander Gutfraind and Aric Hagberg and Feng Pan},
journal={CPAIOR 2009},
year={2009},
number={LA-UR-09-00560},
archivePrefix={arXiv},
eprint={0903.0173},
primaryClass={cs.DM cs.CC cs.DS}
} | gutfraind2009optimal |
arxiv-6572 | 0903.0174 | Accelerating and Evaluation of Syntactic Parsing in Natural Language Question Answering Systems | <|reference_start|>Accelerating and Evaluation of Syntactic Parsing in Natural Language Question Answering Systems: With the development of Natural Language Processing (NLP), more and more systems want to adopt NLP in User Interface Module to process user input, in order to communicate with user in a natural way. However, this raises a speed problem. That is, if NLP module can not process sentences in durable time delay, users will never use the system. As a result, systems which are strict with processing time, such as dialogue systems, web search systems, automatic customer service systems, especially real-time systems, have to abandon NLP module in order to get a faster system response. This paper aims to solve the speed problem. In this paper, at first, the construction of a syntactic parser which is based on corpus machine learning and statistics model is introduced, and then a speed problem analysis is performed on the parser and its algorithms. Based on the analysis, two accelerating methods, Compressed POS Set and Syntactic Patterns Pruning, are proposed, which can effectively improve the time efficiency of parsing in NLP module. To evaluate different parameters in the accelerating algorithms, two new factors, PT and RT, are introduced and explained in detail. Experiments are also completed to prove and test these methods, which will surely contribute to the application of NLP.<|reference_end|> | arxiv | @article{chen2009accelerating,
title={Accelerating and Evaluation of Syntactic Parsing in Natural Language
Question Answering Systems},
author={Zhe Chen, Dunwei Wen},
journal={arXiv preprint arXiv:0903.0174},
year={2009},
archivePrefix={arXiv},
eprint={0903.0174},
primaryClass={cs.AI cs.HC}
} | chen2009accelerating |
arxiv-6573 | 0903.0194 | A Graph Analysis of the Linked Data Cloud | <|reference_start|>A Graph Analysis of the Linked Data Cloud: The Linked Data community is focused on integrating Resource Description Framework (RDF) data sets into a single unified representation known as the Web of Data. The Web of Data can be traversed by both man and machine and shows promise as the \textit{de facto} standard for integrating data world wide much like the World Wide Web is the \textit{de facto} standard for integrating documents. On February 27$^\text{th}$ of 2009, an updated Linked Data cloud visualization was made publicly available. This visualization represents the various RDF data sets currently in the Linked Data cloud and their interlinking relationships. For the purposes of this article, this visual representation was manually transformed into a directed graph and analyzed.<|reference_end|> | arxiv | @article{rodriguez2009a,
title={A Graph Analysis of the Linked Data Cloud},
author={Marko A. Rodriguez},
journal={arXiv preprint arXiv:0903.0194},
year={2009},
number={KRS-2009-01},
archivePrefix={arXiv},
eprint={0903.0194},
primaryClass={cs.CY cs.AI cs.SC}
} | rodriguez2009a |
arxiv-6574 | 0903.0197 | Rotation Distance is Fixed-Parameter Tractable | <|reference_start|>Rotation Distance is Fixed-Parameter Tractable: Rotation distance between trees measures the number of simple operations it takes to transform one tree into another. There are no known polynomial-time algorithms for computing rotation distance. In the case of ordered rooted trees, we show that the rotation distance between two ordered trees is fixed-parameter tractable, in the parameter, k, the rotation distance. The proof relies on the kernalization of the initial trees to trees with size bounded by 7k.<|reference_end|> | arxiv | @article{cleary2009rotation,
title={Rotation Distance is Fixed-Parameter Tractable},
author={Sean Cleary and Katherine St. John},
journal={Inform. Process. Lett. 109 (2009), no. 16, 918-922},
year={2009},
archivePrefix={arXiv},
eprint={0903.0197},
primaryClass={cs.DS}
} | cleary2009rotation |
arxiv-6575 | 0903.0199 | A Linear-Time Approximation Algorithm for Rotation Distance | <|reference_start|>A Linear-Time Approximation Algorithm for Rotation Distance: Rotation distance between rooted binary trees measures the number of simple operations it takes to transform one tree into another. There are no known polynomial-time algorithms for computing rotation distance. We give an efficient, linear-time approximation algorithm, which estimates the rotation distance, within a provable factor of 2, between ordered rooted binary trees. .<|reference_end|> | arxiv | @article{cleary2009a,
title={A Linear-Time Approximation Algorithm for Rotation Distance},
author={Sean Cleary and Katherine St. John},
journal={J. Graph Algorithms Appl. 14 (2010), no. 2, 385-390},
year={2009},
archivePrefix={arXiv},
eprint={0903.0199},
primaryClass={cs.DS}
} | cleary2009a |
arxiv-6576 | 0903.0200 | Faith in the Algorithm, Part 1: Beyond the Turing Test | <|reference_start|>Faith in the Algorithm, Part 1: Beyond the Turing Test: Since the Turing test was first proposed by Alan Turing in 1950, the primary goal of artificial intelligence has been predicated on the ability for computers to imitate human behavior. However, the majority of uses for the computer can be said to fall outside the domain of human abilities and it is exactly outside of this domain where computers have demonstrated their greatest contribution to intelligence. Another goal for artificial intelligence is one that is not predicated on human mimicry, but instead, on human amplification. This article surveys various systems that contribute to the advancement of human and social intelligence.<|reference_end|> | arxiv | @article{rodriguez2009faith,
title={Faith in the Algorithm, Part 1: Beyond the Turing Test},
author={Marko A. Rodriguez and Alberto Pepe},
journal={Proceedings of the AISB Symposium on Computing and Philosophy, The
Society for the Study of Artificial Intelligence and Simulation of Behaviour,
Edinburgh, Scotland, April 2009.},
year={2009},
number={LA-UR-09-00052},
archivePrefix={arXiv},
eprint={0903.0200},
primaryClass={cs.CY cs.AI}
} | rodriguez2009faith |
arxiv-6577 | 0903.0207 | A Systematic Framework for Dynamically Optimizing Multi-User Wireless Video Transmission | <|reference_start|>A Systematic Framework for Dynamically Optimizing Multi-User Wireless Video Transmission: In this paper, we formulate the collaborative multi-user wireless video transmission problem as a multi-user Markov decision process (MUMDP) by explicitly considering the users' heterogeneous video traffic characteristics, time-varying network conditions and the resulting dynamic coupling between the wireless users. These environment dynamics are often ignored in existing multi-user video transmission solutions. To comply with the decentralized nature of wireless networks, we propose to decompose the MUMDP into local MDPs using Lagrangian relaxation. Unlike in conventional multi-user video transmission solutions stemming from the network utility maximization framework, the proposed decomposition enables each wireless user to individually solve its own dynamic cross-layer optimization (i.e. the local MDP) and the network coordinator to update the Lagrangian multipliers (i.e. resource prices) based on not only current, but also future resource needs of all users, such that the long-term video quality of all users is maximized. However, solving the MUMDP requires statistical knowledge of the experienced environment dynamics, which is often unavailable before transmission time. To overcome this obstacle, we then propose a novel online learning algorithm, which allows the wireless users to update their policies in multiple states during one time slot. This is different from conventional learning solutions, which often update one state per time slot. The proposed learning algorithm can significantly improve the learning performance, thereby dramatically improving the video quality experienced by the wireless users over time. Our simulation results demonstrate the efficiency of the proposed MUMDP framework as compared to conventional multi-user video transmission solutions.<|reference_end|> | arxiv | @article{fu2009a,
title={A Systematic Framework for Dynamically Optimizing Multi-User Wireless
Video Transmission},
author={Fangwen Fu, Mihaela van der Schaar},
journal={arXiv preprint arXiv:0903.0207},
year={2009},
archivePrefix={arXiv},
eprint={0903.0207},
primaryClass={cs.MM}
} | fu2009a |
arxiv-6578 | 0903.0211 | Range and Roots: Two Common Patterns for Specifying and Propagating Counting and Occurrence Constraints | <|reference_start|>Range and Roots: Two Common Patterns for Specifying and Propagating Counting and Occurrence Constraints: We propose Range and Roots which are two common patterns useful for specifying a wide range of counting and occurrence constraints. We design specialised propagation algorithms for these two patterns. Counting and occurrence constraints specified using these patterns thus directly inherit a propagation algorithm. To illustrate the capabilities of the Range and Roots constraints, we specify a number of global constraints taken from the literature. Preliminary experiments demonstrate that propagating counting and occurrence constraints using these two patterns leads to a small loss in performance when compared to specialised global constraints and is competitive with alternative decompositions using elementary constraints.<|reference_end|> | arxiv | @article{bessiere2009range,
title={Range and Roots: Two Common Patterns for Specifying and Propagating
Counting and Occurrence Constraints},
author={Christian Bessiere, Emmanuel Hebrard, Brahim Hnich, Zeynep Kiziltan,
Toby Walsh},
journal={arXiv preprint arXiv:0903.0211},
year={2009},
archivePrefix={arXiv},
eprint={0903.0211},
primaryClass={cs.AI}
} | bessiere2009range |
arxiv-6579 | 0903.0276 | Impact of Cognitive Radio on Future Management of Spectrum | <|reference_start|>Impact of Cognitive Radio on Future Management of Spectrum: Cognitive radio is a breakthrough technology which is expected to have a profound impact on the way radio spectrum will be accessed, managed and shared in the future. In this paper I examine some of the implications of cognitive radio for future management of spectrum. Both a near-term view involving the opportunistic spectrum access model and a longer-term view involving a self-regulating dynamic spectrum access model within a society of cognitive radios are discussed.<|reference_end|> | arxiv | @article{nekovee2009impact,
title={Impact of Cognitive Radio on Future Management of Spectrum},
author={Maziar Nekovee},
journal={arXiv preprint arXiv:0903.0276},
year={2009},
archivePrefix={arXiv},
eprint={0903.0276},
primaryClass={cs.AI cs.GT}
} | nekovee2009impact |
arxiv-6580 | 0903.0279 | An introduction to DSmT | <|reference_start|>An introduction to DSmT: The management and combination of uncertain, imprecise, fuzzy and even paradoxical or high conflicting sources of information has always been, and still remains today, of primal importance for the development of reliable modern information systems involving artificial reasoning. In this introduction, we present a survey of our recent theory of plausible and paradoxical reasoning, known as Dezert-Smarandache Theory (DSmT), developed for dealing with imprecise, uncertain and conflicting sources of information. We focus our presentation on the foundations of DSmT and on its most important rules of combination, rather than on browsing specific applications of DSmT available in literature. Several simple examples are given throughout this presentation to show the efficiency and the generality of this new approach.<|reference_end|> | arxiv | @article{dezert2009an,
title={An introduction to DSmT},
author={Jean Dezert (ONERA), Florentin Smarandache},
journal={arXiv preprint arXiv:0903.0279},
year={2009},
archivePrefix={arXiv},
eprint={0903.0279},
primaryClass={cs.AI}
} | dezert2009an |
arxiv-6581 | 0903.0302 | Asymptotic Improvement of the Binary Gilbert-Varshamov Bound on the Code Rate | <|reference_start|>Asymptotic Improvement of the Binary Gilbert-Varshamov Bound on the Code Rate: We compute the code parameters for binary linear codes obtained by greedy constructing the parity check matrix. Then we show that these codes improve the Gilbert-Varshamov (GV) bound on the code size and rate. This result counter proves the conjecture on the asymptotical exactness of the binary GV bound.<|reference_end|> | arxiv | @article{spasov2009asymptotic,
title={Asymptotic Improvement of the Binary Gilbert-Varshamov Bound on the Code
Rate},
author={Dejan Spasov, Marjan Gusev},
journal={arXiv preprint arXiv:0903.0302},
year={2009},
archivePrefix={arXiv},
eprint={0903.0302},
primaryClass={cs.IT math.IT}
} | spasov2009asymptotic |
arxiv-6582 | 0903.0307 | Polar Codes are Optimal for Lossy Source Coding | <|reference_start|>Polar Codes are Optimal for Lossy Source Coding: We consider lossy source compression of a binary symmetric source using polar codes and the low-complexity successive encoding algorithm. It was recently shown by Arikan that polar codes achieve the capacity of arbitrary symmetric binary-input discrete memoryless channels under a successive decoding strategy. We show the equivalent result for lossy source compression, i.e., we show that this combination achieves the rate-distortion bound for a binary symmetric source. We further show the optimality of polar codes for various problems including the binary Wyner-Ziv and the binary Gelfand-Pinsker problem<|reference_end|> | arxiv | @article{korada2009polar,
title={Polar Codes are Optimal for Lossy Source Coding},
author={Satish Babu Korada and Rudiger Urbanke},
journal={arXiv preprint arXiv:0903.0307},
year={2009},
archivePrefix={arXiv},
eprint={0903.0307},
primaryClass={cs.IT math.IT}
} | korada2009polar |
arxiv-6583 | 0903.0308 | Algorithms for Marketing-Mix Optimization | <|reference_start|>Algorithms for Marketing-Mix Optimization: Algorithms for determining quality/cost/price tradeoffs in saturated markets are considered. A product is modeled by $d$ real-valued qualities whose sum determines the unit cost of producing the product. This leads to the following optimization problem: given a set of $n$ customers, each of whom has certain minimum quality requirements and a maximum price they are willing to pay, design a new product and select a price for that product in order to maximize the resulting profit. An $O(n\log n)$ time algorithm is given for the case, $d=1$, of linear products, and $O(n(\log n)^{d+1})$ time approximation algorithms are given for products with any constant number, $d$, of qualities. To achieve the latter result, an $O(nk^{d-1})$ bound on the complexity of an arrangement of homothetic simplices in $\R^d$ is given, where $k$ is the maximum number of simplices that all contain a single points.<|reference_end|> | arxiv | @article{gudmundsson2009algorithms,
title={Algorithms for Marketing-Mix Optimization},
author={Joachim Gudmundsson, Pat Morin, and Michiel Smid},
journal={arXiv preprint arXiv:0903.0308},
year={2009},
archivePrefix={arXiv},
eprint={0903.0308},
primaryClass={cs.CG}
} | gudmundsson2009algorithms |
arxiv-6584 | 0903.0314 | Granularity-Adaptive Proof Presentation | <|reference_start|>Granularity-Adaptive Proof Presentation: When mathematicians present proofs they usually adapt their explanations to their didactic goals and to the (assumed) knowledge of their addressees. Modern automated theorem provers, in contrast, present proofs usually at a fixed level of detail (also called granularity). Often these presentations are neither intended nor suitable for human use. A challenge therefore is to develop user- and goal-adaptive proof presentation techniques that obey common mathematical practice. We present a flexible and adaptive approach to proof presentation that exploits machine learning techniques to extract a model of the specific granularity of proof examples and employs this model for the automated generation of further proofs at an adapted level of granularity.<|reference_end|> | arxiv | @article{schiller2009granularity-adaptive,
title={Granularity-Adaptive Proof Presentation},
author={Marvin Schiller and Christoph Benzmueller},
journal={arXiv preprint arXiv:0903.0314},
year={2009},
number={SEKI Working-Paper SWP-2009-01},
archivePrefix={arXiv},
eprint={0903.0314},
primaryClass={cs.AI}
} | schiller2009granularity-adaptive |
arxiv-6585 | 0903.0353 | General Game Management Agent | <|reference_start|>General Game Management Agent: The task of managing general game playing in a multi-agent system is the problem addressed in this paper. It is considered to be done by an agent. There are many reasons for constructing such an agent, called general game management agent. This agent manages strategic interactions between other agents - players, natural or also artificial. The agent records the interaction for further benchmarking and analysis. He can also be used for a kind of restricted communications. His behavior is defined by a game description written in a logic-based language. The language, we present for this application, is more expressive than the language GDL, which is already used for such purposes. Our language can represent imperfect information and time dependent elements of a game. Time dependent elements like delays and timeouts are of crucial importance for interactions between players with bounded processing power like humans. We provide examples to show the feasibility of our approach. A way for game theoretical solving of an interaction description in our language is considered as future work.<|reference_end|> | arxiv | @article{tagiew2009general,
title={General Game Management Agent},
author={Rustam Tagiew},
journal={arXiv preprint arXiv:0903.0353},
year={2009},
archivePrefix={arXiv},
eprint={0903.0353},
primaryClass={cs.GT cs.MA}
} | tagiew2009general |
arxiv-6586 | 0903.0367 | How to Play Unique Games on Expanders | <|reference_start|>How to Play Unique Games on Expanders: In this note we improve a recent result by Arora, Khot, Kolla, Steurer, Tulsiani, and Vishnoi on solving the Unique Games problem on expanders. Given a $(1-\varepsilon)$-satisfiable instance of Unique Games with the constraint graph $G$, our algorithm finds an assignment satisfying at least a $1- C \varepsilon/h_G$ fraction of all constraints if $\varepsilon < c \lambda_G$ where $h_G$ is the edge expansion of $G$, $\lambda_G$ is the second smallest eigenvalue of the Laplacian of $G$, and $C$ and $c$ are some absolute constants.<|reference_end|> | arxiv | @article{makarychev2009how,
title={How to Play Unique Games on Expanders},
author={Konstantin Makarychev and Yury Makarychev},
journal={arXiv preprint arXiv:0903.0367},
year={2009},
archivePrefix={arXiv},
eprint={0903.0367},
primaryClass={cs.DS}
} | makarychev2009how |
arxiv-6587 | 0903.0391 | De-amortized Cuckoo Hashing: Provable Worst-Case Performance and Experimental Results | <|reference_start|>De-amortized Cuckoo Hashing: Provable Worst-Case Performance and Experimental Results: Cuckoo hashing is a highly practical dynamic dictionary: it provides amortized constant insertion time, worst case constant deletion time and lookup time, and good memory utilization. However, with a noticeable probability during the insertion of n elements some insertion requires \Omega(log n) time. Whereas such an amortized guarantee may be suitable for some applications, in other applications (such as high-performance routing) this is highly undesirable. Recently, Kirsch and Mitzenmacher (Allerton '07) proposed a de-amortization of cuckoo hashing using various queueing techniques that preserve its attractive properties. Kirsch and Mitzenmacher demonstrated a significant improvement to the worst case performance of cuckoo hashing via experimental results, but they left open the problem of constructing a scheme with provable properties. In this work we follow Kirsch and Mitzenmacher and present a de-amortization of cuckoo hashing that provably guarantees constant worst case operations. Specifically, for any sequence of polynomially many operations, with overwhelming probability over the randomness of the initialization phase, each operation is performed in constant time. Our theoretical analysis and experimental results indicate that the scheme is highly efficient, and provides a practical alternative to the only other known dynamic dictionary with such worst case guarantees, due to Dietzfelbinger and Meyer auf der Heide (ICALP '90).<|reference_end|> | arxiv | @article{arbitman2009de-amortized,
title={De-amortized Cuckoo Hashing: Provable Worst-Case Performance and
Experimental Results},
author={Yuriy Arbitman, Moni Naor and Gil Segev},
journal={arXiv preprint arXiv:0903.0391},
year={2009},
archivePrefix={arXiv},
eprint={0903.0391},
primaryClass={cs.DS}
} | arbitman2009de-amortized |
arxiv-6588 | 0903.0419 | Random hypergraphs and their applications | <|reference_start|>Random hypergraphs and their applications: In the last few years we have witnessed the emergence, primarily in on-line communities, of new types of social networks that require for their representation more complex graph structures than have been employed in the past. One example is the folksonomy, a tripartite structure of users, resources, and tags -- labels collaboratively applied by the users to the resources in order to impart meaningful structure on an otherwise undifferentiated database. Here we propose a mathematical model of such tripartite structures which represents them as random hypergraphs. We show that it is possible to calculate many properties of this model exactly in the limit of large network size and we compare the results against observations of a real folksonomy, that of the on-line photography web site Flickr. We show that in some cases the model matches the properties of the observed network well, while in others there are significant differences, which we find to be attributable to the practice of multiple tagging, i.e., the application by a single user of many tags to one resource, or one tag to many resources.<|reference_end|> | arxiv | @article{ghoshal2009random,
title={Random hypergraphs and their applications},
author={Gourab Ghoshal, Vinko Zlatic, Guido Caldarelli, and M. E. J. Newman},
journal={Phys. Rev. E 79, 066118 (2009)},
year={2009},
doi={10.1103/PhysRevE.79.066118},
archivePrefix={arXiv},
eprint={0903.0419},
primaryClass={physics.soc-ph cs.DL}
} | ghoshal2009random |
arxiv-6589 | 0903.0422 | Deductive Inference for the Interiors and Exteriors of Horn Theories | <|reference_start|>Deductive Inference for the Interiors and Exteriors of Horn Theories: In this paper, we investigate the deductive inference for the interiors and exteriors of Horn knowledge bases, where the interiors and exteriors were introduced by Makino and Ibaraki to study stability properties of knowledge bases. We present a linear time algorithm for the deduction for the interiors and show that it is co-NP-complete for the deduction for the exteriors. Under model-based representation, we show that the deduction problem for interiors is NP-complete while the one for exteriors is co-NP-complete. As for Horn envelopes of the exteriors, we show that it is linearly solvable under model-based representation, while it is co-NP-complete under formula-based representation. We also discuss the polynomially solvable cases for all the intractable problems.<|reference_end|> | arxiv | @article{makino2009deductive,
title={Deductive Inference for the Interiors and Exteriors of Horn Theories},
author={Kazuhisa Makino and Hirotaka Ono},
journal={arXiv preprint arXiv:0903.0422},
year={2009},
archivePrefix={arXiv},
eprint={0903.0422},
primaryClass={cs.AI cs.CC cs.DS cs.LO}
} | makino2009deductive |
arxiv-6590 | 0903.0443 | Design Guidelines for Training-based MIMO Systems with Feedback | <|reference_start|>Design Guidelines for Training-based MIMO Systems with Feedback: In this paper, we study the optimal training and data transmission strategies for block fading multiple-input multiple-output (MIMO) systems with feedback. We consider both the channel gain feedback (CGF) system and the channel covariance feedback (CCF) system. Using an accurate capacity lower bound as a figure of merit, we investigate the optimization problems on the temporal power allocation to training and data transmission as well as the training length. For CGF systems without feedback delay, we prove that the optimal solutions coincide with those for non-feedback systems. Moreover, we show that these solutions stay nearly optimal even in the presence of feedback delay. This finding is important for practical MIMO training design. For CCF systems, the optimal training length can be less than the number of transmit antennas, which is verified through numerical analysis. Taking this fact into account, we propose a simple yet near optimal transmission strategy for CCF systems, and derive the optimal temporal power allocation over pilot and data transmission.<|reference_end|> | arxiv | @article{zhou2009design,
title={Design Guidelines for Training-based MIMO Systems with Feedback},
author={Xiangyun Zhou, Parastoo Sadeghi, Tharaka A. Lamahewa and Salman
Durrani},
journal={IEEE Transactions on Signal Processing, vol. 57, no. 10, pp.
4014-4026, Oct. 2009},
year={2009},
doi={10.1109/TSP.2009.2023930},
archivePrefix={arXiv},
eprint={0903.0443},
primaryClass={cs.IT math.IT}
} | zhou2009design |
arxiv-6591 | 0903.0445 | Raptor Codes Based Distributed Storage Algorithms for Wireless Sensor Networks | <|reference_start|>Raptor Codes Based Distributed Storage Algorithms for Wireless Sensor Networks: We consider a distributed storage problem in a large-scale wireless sensor network with $n$ nodes among which $k$ acquire (sense) independent data. The goal is to disseminate the acquired information throughout the network so that each of the $n$ sensors stores one possibly coded packet and the original $k$ data packets can be recovered later in a computationally simple way from any $(1+\epsilon)k$ of nodes for some small $\epsilon>0$. We propose two Raptor codes based distributed storage algorithms for solving this problem. In the first algorithm, all the sensors have the knowledge of $n$ and $k$. In the second one, we assume that no sensor has such global information.<|reference_end|> | arxiv | @article{aly2009raptor,
title={Raptor Codes Based Distributed Storage Algorithms for Wireless Sensor
Networks},
author={Salah A. Aly, Zhenning Kong, Emina Soljanin},
journal={arXiv preprint arXiv:0903.0445},
year={2009},
doi={10.1109/ISIT.2008.4595350},
archivePrefix={arXiv},
eprint={0903.0445},
primaryClass={cs.IT cs.DS cs.NI math.IT}
} | aly2009raptor |
arxiv-6592 | 0903.0460 | Filtering Algorithms for the Multiset Ordering Constraint | <|reference_start|>Filtering Algorithms for the Multiset Ordering Constraint: Constraint programming (CP) has been used with great success to tackle a wide variety of constraint satisfaction problems which are computationally intractable in general. Global constraints are one of the important factors behind the success of CP. In this paper, we study a new global constraint, the multiset ordering constraint, which is shown to be useful in symmetry breaking and searching for leximin optimal solutions in CP. We propose efficient and effective filtering algorithms for propagating this global constraint. We show that the algorithms are sound and complete and we discuss possible extensions. We also consider alternative propagation methods based on existing constraints in CP toolkits. Our experimental results on a number of benchmark problems demonstrate that propagating the multiset ordering constraint via a dedicated algorithm can be very beneficial.<|reference_end|> | arxiv | @article{frisch2009filtering,
title={Filtering Algorithms for the Multiset Ordering Constraint},
author={Alan Frisch, Brahim Hnich, Zeynep Kiziltan, Ian Miguel, Toby Walsh},
journal={Artificial Intelligence, 173 (2), 299-328, 2009},
year={2009},
doi={10.1016/j.artint.2008.11.001},
archivePrefix={arXiv},
eprint={0903.0460},
primaryClass={cs.AI cs.DS}
} | frisch2009filtering |
arxiv-6593 | 0903.0465 | Breaking Value Symmetry | <|reference_start|>Breaking Value Symmetry: Symmetry is an important factor in solving many constraint satisfaction problems. One common type of symmetry is when we have symmetric values. In a recent series of papers, we have studied methods to break value symmetries. Our results identify computational limits on eliminating value symmetry. For instance, we prove that pruning all symmetric values is NP-hard in general. Nevertheless, experiments show that much value symmetry can be broken in practice. These results may be useful to researchers in planning, scheduling and other areas as value symmetry occurs in many different domains.<|reference_end|> | arxiv | @article{walsh2009breaking,
title={Breaking Value Symmetry},
author={Toby Walsh},
journal={AAAI 2008: 1585-1588},
year={2009},
archivePrefix={arXiv},
eprint={0903.0465},
primaryClass={cs.AI}
} | walsh2009breaking |
arxiv-6594 | 0903.0467 | The Parameterized Complexity of Global Constraints | <|reference_start|>The Parameterized Complexity of Global Constraints: We argue that parameterized complexity is a useful tool with which to study global constraints. In particular, we show that many global constraints which are intractable to propagate completely have natural parameters which make them fixed-parameter tractable and which are easy to compute. This tractability tends either to be the result of a simple dynamic program or of a decomposition which has a strong backdoor of bounded size. This strong backdoor is often a cycle cutset. We also show that parameterized complexity can be used to study other aspects of constraint programming like symmetry breaking. For instance, we prove that value symmetry is fixed-parameter tractable to break in the number of symmetries. Finally, we argue that parameterized complexity can be used to derive results about the approximability of constraint propagation.<|reference_end|> | arxiv | @article{bessiere2009the,
title={The Parameterized Complexity of Global Constraints},
author={Christian Bessiere and Emmanuel Hebrard and Brahim Hnich and Zeynep
Kiziltan and Toby Walsh},
journal={AAAI-2008, 235-240, 2008},
year={2009},
archivePrefix={arXiv},
eprint={0903.0467},
primaryClass={cs.AI cs.CC}
} | bessiere2009the |
arxiv-6595 | 0903.0470 | Decompositions of Grammar Constraints | <|reference_start|>Decompositions of Grammar Constraints: A wide range of constraints can be compactly specified using automata or formal languages. In a sequence of recent papers, we have shown that an effective means to reason with such specifications is to decompose them into primitive constraints. We can then, for instance, use state of the art SAT solvers and profit from their advanced features like fast unit propagation, clause learning, and conflict-based search heuristics. This approach holds promise for solving combinatorial problems in scheduling, rostering, and configuration, as well as problems in more diverse areas like bioinformatics, software testing and natural language processing. In addition, decomposition may be an effective method to propagate other global constraints.<|reference_end|> | arxiv | @article{quimper2009decompositions,
title={Decompositions of Grammar Constraints},
author={Claude-Guy Quimper and Toby Walsh},
journal={AAAI 2008: 1567-1570},
year={2009},
archivePrefix={arXiv},
eprint={0903.0470},
primaryClass={cs.AI cs.FL}
} | quimper2009decompositions |
arxiv-6596 | 0903.0471 | SLIDE: A Useful Special Case of the CARDPATH Constraint | <|reference_start|>SLIDE: A Useful Special Case of the CARDPATH Constraint: We study the CardPath constraint. This ensures a given constraint holds a number of times down a sequence of variables. We show that SLIDE, a special case of CardPath where the slid constraint must hold always, can be used to encode a wide range of sliding sequence constraints including CardPath itself. We consider how to propagate SLIDE and provide a complete propagator for CardPath. Since propagation is NP-hard in general, we identify special cases where propagation takes polynomial time. Our experiments demonstrate that using SLIDE to encode global constraints can be as efficient and effective as specialised propagators.<|reference_end|> | arxiv | @article{bessiere2009slide:,
title={SLIDE: A Useful Special Case of the CARDPATH Constraint},
author={Christian Bessiere, Emmanuel Hebrard, Brahim Hnich, Zeynep Kiziltan,
Toby Walsh},
journal={ECAI 2008: 475-479},
year={2009},
doi={10.3233/978-1-58603-891-5-475},
archivePrefix={arXiv},
eprint={0903.0471},
primaryClass={cs.AI cs.CC}
} | bessiere2009slide: |
arxiv-6597 | 0903.0475 | Reformulating Global Grammar Constraints | <|reference_start|>Reformulating Global Grammar Constraints: An attractive mechanism to specify global constraints in rostering and other domains is via formal languages. For instance, the Regular and Grammar constraints specify constraints in terms of the languages accepted by an automaton and a context-free grammar respectively. Taking advantage of the fixed length of the constraint, we give an algorithm to transform a context-free grammar into an automaton. We then study the use of minimization techniques to reduce the size of such automata and speed up propagation. We show that minimizing such automata after they have been unfolded and domains initially reduced can give automata that are more compact than minimizing before unfolding and reducing. Experimental results show that such transformations can improve the size of rostering problems that we can 'model and run'.<|reference_end|> | arxiv | @article{katsirelos2009reformulating,
title={Reformulating Global Grammar Constraints},
author={George Katsirelos, Nina Narodytska, Toby Walsh},
journal={arXiv preprint arXiv:0903.0475},
year={2009},
archivePrefix={arXiv},
eprint={0903.0475},
primaryClass={cs.AI}
} | katsirelos2009reformulating |
arxiv-6598 | 0903.0479 | Combining Symmetry Breaking and Global Constraints | <|reference_start|>Combining Symmetry Breaking and Global Constraints: We propose a new family of constraints which combine together lexicographical ordering constraints for symmetry breaking with other common global constraints. We give a general purpose propagator for this family of constraints, and show how to improve its complexity by exploiting properties of the included global constraints.<|reference_end|> | arxiv | @article{katsirelos2009combining,
title={Combining Symmetry Breaking and Global Constraints},
author={George Katsirelos, Nina Narodytska, Toby Walsh},
journal={arXiv preprint arXiv:0903.0479},
year={2009},
archivePrefix={arXiv},
eprint={0903.0479},
primaryClass={cs.AI}
} | katsirelos2009combining |
arxiv-6599 | 0903.0519 | Teacher's Evaluation - a Component of Quality Assessment System | <|reference_start|>Teacher's Evaluation - a Component of Quality Assessment System: One of the most important activities to increase the importance and the responsibility of the higher education is the quality management, assessment and evaluation. Starting from 2006, a national mechanism was created in Romania and all the educational institutions have to apply a concrete algorithm to ensure the internal evaluation, the external evaluation and, the most important, to increase the quality of the educational process. This paper presents the implementation of the quality assessment in "Tibiscus" University of Timisoara, particularly at the Faculty of Computers and Applied Computer Science.<|reference_end|> | arxiv | @article{karnyanszky2009teacher's,
title={Teacher's Evaluation - a Component of Quality Assessment System},
author={Tiberiu Marius Karnyanszky, Laurentiu Dan Lacrama, Lucian Luca, Ioana
Iacob},
journal={Ann. Univ. Tibiscus Comp. Sci. Series 6 (2008), 107 - 112},
year={2009},
archivePrefix={arXiv},
eprint={0903.0519},
primaryClass={cs.CY}
} | karnyanszky2009teacher's |
arxiv-6600 | 0903.0520 | MANETS: High mobility can make up for low transmission power | <|reference_start|>MANETS: High mobility can make up for low transmission power: We consider a Mobile Ad-hoc NETworks (MANET) formed by "n" nodes that move independently at random over a finite square region of the plane. Nodes exchange data if they are at distance at most "r" within each other, where r>0 is the node transmission radius. The "flooding time" is the number of time steps required to broadcast a message from a source node to every node of the network. Flooding time is an important measure of the speed of information spreading in dynamic networks. We derive a nearly-tight upper bound on the flooding time which is a decreasing function of the maximal "velocity" of the nodes. It turns out that, when the node velocity is sufficiently high, even if the node transmission radius "r" is far below the "connectivity threshold", the flooding time does not asymptotically depend on "r". This implies that flooding can be very fast even though every "snapshot" (i.e. the static random geometric graph at any fixed time) of the MANET is fully disconnected. Data reach all nodes quickly despite these ones use very low transmission power. Our result is the first analytical evidence of the fact that high, random node mobility strongly speed-up information spreading and, at the same time, let nodes save energy.<|reference_end|> | arxiv | @article{clementi2009manets:,
title={MANETS: High mobility can make up for low transmission power},
author={Andrea E.F. Clementi, Francesco Pasquale, and Riccardo Silvestri},
journal={arXiv preprint arXiv:0903.0520},
year={2009},
archivePrefix={arXiv},
eprint={0903.0520},
primaryClass={cs.DM cs.PF}
} | clementi2009manets: |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.