corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-2001 | 0712.0165 | On the Accepting Power of 2-Tape B\"uchi Automata | <|reference_start|>On the Accepting Power of 2-Tape B\"uchi Automata: We show that, from a topological point of view, 2-tape B\"uchi automata have the same accepting power than Turing machines equipped with a B\"uchi acceptance condition. In particular, we show that for every non null recursive ordinal alpha, there exist some Sigma^0_alpha-complete and some Pi^0_alpha-complete infinitary rational relations accepted by 2-tape B\"uchi automata. This very surprising result gives answers to questions of W. Thomas [Automata and Quantifier Hierarchies, in: Formal Properties of Finite automata and Applications, Ramatuelle, 1988, LNCS 386, Springer, 1989, p.104-119], of P. Simonnet [Automates et Th\'eorie Descriptive, Ph. D. Thesis, Universit\'e Paris 7, March 1992], and of H. Lescow and W. Thomas [Logical Specifications of Infinite Computations, In: "A Decade of Concurrency", LNCS 803, Springer, 1994, p. 583-621].<|reference_end|> | arxiv | @article{finkel2007on,
title={On the Accepting Power of 2-Tape B\"uchi Automata},
author={Olivier Finkel (ELM)},
journal={23rd International Symposium on Theoretical Aspects of Computer
Science, STACS 2006, France (2006)},
year={2007},
archivePrefix={arXiv},
eprint={0712.0165},
primaryClass={cs.CC cs.LO math.LO}
} | finkel2007on |
arxiv-2002 | 0712.0171 | A Spectral Approach to Analyzing Belief Propagation for 3-Coloring | <|reference_start|>A Spectral Approach to Analyzing Belief Propagation for 3-Coloring: Contributing to the rigorous understanding of BP, in this paper we relate the convergence of BP to spectral properties of the graph. This encompasses a result for random graphs with a ``planted'' solution; thus, we obtain the first rigorous result on BP for graph coloring in the case of a complex graphical structure (as opposed to trees). In particular, the analysis shows how Belief Propagation breaks the symmetry between the $3!$ possible permutations of the color classes.<|reference_end|> | arxiv | @article{coja-oghlan2007a,
title={A Spectral Approach to Analyzing Belief Propagation for 3-Coloring},
author={Amin Coja-Oghlan and Elchanan Mossel and Dan Vilenchik},
journal={Combinatorics, Probability and Computing 18 (2009) 881 - 912},
year={2007},
doi={10.1017/S096354830900981X},
archivePrefix={arXiv},
eprint={0712.0171},
primaryClass={cs.CC cs.AI cs.DM}
} | coja-oghlan2007a |
arxiv-2003 | 0712.0271 | Distributed Arithmetic Coding for the Asymmetric Slepian-Wolf problem | <|reference_start|>Distributed Arithmetic Coding for the Asymmetric Slepian-Wolf problem: Distributed source coding schemes are typically based on the use of channels codes as source codes. In this paper we propose a new paradigm, termed "distributed arithmetic coding", which exploits the fact that arithmetic codes are good source as well as channel codes. In particular, we propose a distributed binary arithmetic coder for Slepian-Wolf coding with decoder side information, along with a soft joint decoder. The proposed scheme provides several advantages over existing Slepian-Wolf coders, especially its good performance at small block lengths, and the ability to incorporate arbitrary source models in the encoding process, e.g. context-based statistical models. We have compared the performance of distributed arithmetic coding with turbo codes and low-density parity-check codes, and found that the proposed approach has very competitive performance.<|reference_end|> | arxiv | @article{grangetto2007distributed,
title={Distributed Arithmetic Coding for the Asymmetric Slepian-Wolf problem},
author={M. Grangetto, E. Magli, G. Olmo},
journal={arXiv preprint arXiv:0712.0271},
year={2007},
archivePrefix={arXiv},
eprint={0712.0271},
primaryClass={cs.IT math.IT}
} | grangetto2007distributed |
arxiv-2004 | 0712.0305 | The analytic computability of the Shannon transform for a large class of random matrix channels | <|reference_start|>The analytic computability of the Shannon transform for a large class of random matrix channels: We define a class of "algebraic" random matrix channels for which one can generically compute the limiting Shannon transform using numerical techniques and often enumerate the low SNR series expansion coefficients in closed form. We describe this class, the coefficient enumeration techniques and compare theory with simulations.<|reference_end|> | arxiv | @article{rao2007the,
title={The analytic computability of the Shannon transform for a large class of
random matrix channels},
author={N. Raj Rao},
journal={arXiv preprint arXiv:0712.0305},
year={2007},
archivePrefix={arXiv},
eprint={0712.0305},
primaryClass={cs.IT math.IT}
} | rao2007the |
arxiv-2005 | 0712.0392 | Collaborative Gain in Resource Sharing Communication Networks | <|reference_start|>Collaborative Gain in Resource Sharing Communication Networks: This paper has been withdrawn<|reference_end|> | arxiv | @article{akhavan-astaneh2007collaborative,
title={Collaborative Gain in Resource Sharing Communication Networks},
author={Saeed Akhavan-Astaneh, Saeed Gazor},
journal={arXiv preprint arXiv:0712.0392},
year={2007},
archivePrefix={arXiv},
eprint={0712.0392},
primaryClass={cs.IT math.IT}
} | akhavan-astaneh2007collaborative |
arxiv-2006 | 0712.0411 | Period of the d-Sequence Based Random Number Generator | <|reference_start|>Period of the d-Sequence Based Random Number Generator: This paper presents an expression to compute the exact period of a recursive random number generator based on d-sequences. Using the multi-recursive version of this generator we can produce large number of pseudorandom sequences.<|reference_end|> | arxiv | @article{thippireddy2007period,
title={Period of the d-Sequence Based Random Number Generator},
author={Suresh Thippireddy and Sandeep Chalasani},
journal={arXiv preprint arXiv:0712.0411},
year={2007},
archivePrefix={arXiv},
eprint={0712.0411},
primaryClass={cs.CR}
} | thippireddy2007period |
arxiv-2007 | 0712.0451 | A Reactive Tabu Search Algorithm for Stimuli Generation in Psycholinguistics | <|reference_start|>A Reactive Tabu Search Algorithm for Stimuli Generation in Psycholinguistics: The generation of meaningless "words" matching certain statistical and/or linguistic criteria is frequently needed for experimental purposes in Psycholinguistics. Such stimuli receive the name of pseudowords or nonwords in the Cognitive Neuroscience literatue. The process for building nonwords sometimes has to be based on linguistic units such as syllables or morphemes, resulting in a numerical explosion of combinations when the size of the nonwords is increased. In this paper, a reactive tabu search scheme is proposed to generate nonwords of variables size. The approach builds pseudowords by using a modified Metaheuristic algorithm based on a local search procedure enhanced by a feedback-based scheme. Experimental results show that the new algorithm is a practical and effective tool for nonword generation.<|reference_end|> | arxiv | @article{de lara2007a,
title={A Reactive Tabu Search Algorithm for Stimuli Generation in
Psycholinguistics},
author={Alejandro Chinea Manrique De Lara},
journal={Artificial Intelligence in Science and Technology AISAT 2004
Conference},
year={2007},
archivePrefix={arXiv},
eprint={0712.0451},
primaryClass={cs.AI cs.CC cs.DM cs.LG}
} | de lara2007a |
arxiv-2008 | 0712.0499 | Simrank++: Query rewriting through link analysis of the click graph | <|reference_start|>Simrank++: Query rewriting through link analysis of the click graph: We focus on the problem of query rewriting for sponsored search. We base rewrites on a historical click graph that records the ads that have been clicked on in response to past user queries. Given a query q, we first consider Simrank as a way to identify queries similar to q, i.e., queries whose ads a user may be interested in. We argue that Simrank fails to properly identify query similarities in our application, and we present two enhanced version of Simrank: one that exploits weights on click graph edges and another that exploits ``evidence.'' We experimentally evaluate our new schemes against Simrank, using actual click graphs and queries form Yahoo!, and using a variety of metrics. Our results show that the enhanced methods can yield more and better query rewrites.<|reference_end|> | arxiv | @article{antonellis2007simrank++:,
title={Simrank++: Query rewriting through link analysis of the click graph},
author={Ioannis Antonellis, Hector Garcia-Molina, Chi-Chao Chang},
journal={arXiv preprint arXiv:0712.0499},
year={2007},
number={Stanford University, Infolab TR 2007-32},
archivePrefix={arXiv},
eprint={0712.0499},
primaryClass={cs.DL cs.DB cs.IR}
} | antonellis2007simrank++: |
arxiv-2009 | 0712.0531 | Prostate biopsies guided by three-dimensional real-time (4-D) transrectal ultrasonography on a phantom: comparative study versus two-dimensional transrectal ultrasound-guided biopsies | <|reference_start|>Prostate biopsies guided by three-dimensional real-time (4-D) transrectal ultrasonography on a phantom: comparative study versus two-dimensional transrectal ultrasound-guided biopsies: OBJECTIVE: This study evaluated the accuracy in localisation and distribution of real-time three-dimensional (4-D) ultrasound-guided biopsies on a prostate phantom. METHODS: A prostate phantom was created. A three-dimensional real-time ultrasound system with a 5.9MHz probe was used, making it possible to see several reconstructed orthogonal viewing planes in real time. Fourteen operators performed biopsies first under 2-D then 4-D transurethral ultrasound (TRUS) guidance (336 biopsies). The biopsy path was modelled using segmentation in a 3-D ultrasonographic volume. Special software was used to visualise the biopsy paths in a reference prostate and assess the sampled area. A comparative study was performed to examine the accuracy of the entry points and target of the needle. Distribution was assessed by measuring the volume sampled and a redundancy ratio of the sampled prostate. RESULTS: A significant increase in accuracy in hitting the target zone was identified using 4-D ultrasonography as compared to 2-D. There was no increase in the sampled volume or improvement in the biopsy distribution with 4-D ultrasonography as compared to 2-D. CONCLUSION: The 4-D TRUS guidance appears to show, on a synthetic model, an improvement in location accuracy and in the ability to reproduce a protocol. The biopsy distribution does not seem improved.<|reference_end|> | arxiv | @article{long2007prostate,
title={Prostate biopsies guided by three-dimensional real-time (4-D)
transrectal ultrasonography on a phantom: comparative study versus
two-dimensional transrectal ultrasound-guided biopsies},
author={Jean-Alexandre Long (TIMC), Vincent Daanen (TIMC), Alexandre
Moreau-Gaudry (TIMC, CHU-Grenoble CIC), Jocelyne Troccaz (TIMC), Jean-Jacques
Rambeaud, Jean-Luc Descotes},
journal={European Urology 52, 4 (2007) 1097-104},
year={2007},
doi={10.1016/j.eururo.2006.11.034},
archivePrefix={arXiv},
eprint={0712.0531},
primaryClass={cs.OH}
} | long2007prostate |
arxiv-2010 | 0712.0541 | New Construction of A Family of Quasi-Twisted Two-Weight Codes | <|reference_start|>New Construction of A Family of Quasi-Twisted Two-Weight Codes: Based on cyclic and consta-cyclic simplex codes, a new explicit construction of a family of two-weight codes is presented. These two-weight codes obtained are in the form of 2-generator quasi-cyclic, or quasi-twisted structure. Based on this construction, new optimal binary quasi-cyclic [195, 8, 96], [210, 8, 104] and [240, 8, 120] codes, and good QC ternary [208, 6, 135] and [221, 6, 144] codes are thus obtained. It is also shown that many codes among the family meet the Griesmer bound and thereful are optimal.<|reference_end|> | arxiv | @article{chen2007new,
title={New Construction of A Family of Quasi-Twisted Two-Weight Codes},
author={Eric Z. Chen},
journal={IEEE Trans. Inform. Theory, December 2008},
year={2007},
archivePrefix={arXiv},
eprint={0712.0541},
primaryClass={cs.IT math.IT}
} | chen2007new |
arxiv-2011 | 0712.0554 | Spanners of Complete $k$-Partite Geometric Graphs | <|reference_start|>Spanners of Complete $k$-Partite Geometric Graphs: We address the following problem: Given a complete $k$-partite geometric graph $K$ whose vertex set is a set of $n$ points in $\mathbb{R}^d$, compute a spanner of $K$ that has a ``small'' stretch factor and ``few'' edges. We present two algorithms for this problem. The first algorithm computes a $(5+\epsilon)$-spanner of $K$ with O(n) edges in $O(n \log n)$ time. The second algorithm computes a $(3+\epsilon)$-spanner of $K$ with $O(n \log n)$ edges in $O(n \log n)$ time. The latter result is optimal: We show that for any $2 \leq k \leq n - \Theta(\sqrt{n \log n})$, spanners with $O(n \log n)$ edges and stretch factor less than 3 do not exist for all complete $k$-partite geometric graphs.<|reference_end|> | arxiv | @article{bose2007spanners,
title={Spanners of Complete $k$-Partite Geometric Graphs},
author={Prosenjit Bose, Paz Carmi, Mathieu Couture, Anil Maheshwari, Pat
Morin, Michiel Smid},
journal={arXiv preprint arXiv:0712.0554},
year={2007},
archivePrefix={arXiv},
eprint={0712.0554},
primaryClass={cs.CG}
} | bose2007spanners |
arxiv-2012 | 0712.0616 | Upper Bounds for the Number of Hamiltonian Cycles | <|reference_start|>Upper Bounds for the Number of Hamiltonian Cycles: An upper bound for the number of Hamiltonian cycles of symmetric diagraphs is established first in this paper, which is tighter than the famous Minc's bound and the Br$\acute{e}$gman's bound. A transformation on graphs is proposed, so that counting the number of Hamiltonian cycles of an undirected graph can be done by counting the number of Hamiltonian cycles of its corresponding symmetric directed graph. In this way, an upper bound for the number of Hamiltonian cycles of undirected graphs is also obtained.<|reference_end|> | arxiv | @article{zhang2007upper,
title={Upper Bounds for the Number of Hamiltonian Cycles},
author={Jinshan Zhang},
journal={arXiv preprint arXiv:0712.0616},
year={2007},
archivePrefix={arXiv},
eprint={0712.0616},
primaryClass={cs.DM}
} | zhang2007upper |
arxiv-2013 | 0712.0653 | Equations of States in Singular Statistical Estimation | <|reference_start|>Equations of States in Singular Statistical Estimation: Learning machines which have hierarchical structures or hidden variables are singular statistical models because they are nonidentifiable and their Fisher information matrices are singular. In singular statistical models, neither the Bayes a posteriori distribution converges to the normal distribution nor the maximum likelihood estimator satisfies asymptotic normality. This is the main reason why it has been difficult to predict their generalization performances from trained states. In this paper, we study four errors, (1) Bayes generalization error, (2) Bayes training error, (3) Gibbs generalization error, and (4) Gibbs training error, and prove that there are mathematical relations among these errors. The formulas proved in this paper are equations of states in statistical estimation because they hold for any true distribution, any parametric model, and any a priori distribution. Also we show that Bayes and Gibbs generalization errors are estimated by Bayes and Gibbs training errors, and propose widely applicable information criteria which can be applied to both regular and singular statistical models.<|reference_end|> | arxiv | @article{watanabe2007equations,
title={Equations of States in Singular Statistical Estimation},
author={Sumio Watanabe},
journal={arXiv preprint arXiv:0712.0653},
year={2007},
archivePrefix={arXiv},
eprint={0712.0653},
primaryClass={cs.LG}
} | watanabe2007equations |
arxiv-2014 | 0712.0693 | Cryptanalysis of an image encryption scheme based on the Hill cipher | <|reference_start|>Cryptanalysis of an image encryption scheme based on the Hill cipher: This paper studies the security of an image encryption scheme based on the Hill cipher and reports its following problems: 1) there is a simple necessary and sufficient condition that makes a number of secret keys invalid; 2) it is insensitive to the change of the secret key; 3) it is insensitive to the change of the plain-image; 4) it can be broken with only one known/chosen-plaintext; 5) it has some other minor defects.<|reference_end|> | arxiv | @article{li2007cryptanalysis,
title={Cryptanalysis of an image encryption scheme based on the Hill cipher},
author={Chengqing Li, Dan Zhang, and Guanrong Chen},
journal={arXiv preprint arXiv:0712.0693},
year={2007},
doi={10.1631/jzus.A0720102},
archivePrefix={arXiv},
eprint={0712.0693},
primaryClass={cs.CR}
} | li2007cryptanalysis |
arxiv-2015 | 0712.0744 | Computational Chemotaxis in Ants and Bacteria over Dynamic Environments | <|reference_start|>Computational Chemotaxis in Ants and Bacteria over Dynamic Environments: Chemotaxis can be defined as an innate behavioural response by an organism to a directional stimulus, in which bacteria, and other single-cell or multicellular organisms direct their movements according to certain chemicals in their environment. This is important for bacteria to find food (e.g., glucose) by swimming towards the highest concentration of food molecules, or to flee from poisons. Based on self-organized computational approaches and similar stigmergic concepts we derive a novel swarm intelligent algorithm. What strikes from these observations is that both eusocial insects as ant colonies and bacteria have similar natural mechanisms based on stigmergy in order to emerge coherent and sophisticated patterns of global collective behaviour. Keeping in mind the above characteristics we will present a simple model to tackle the collective adaptation of a social swarm based on real ant colony behaviors (SSA algorithm) for tracking extrema in dynamic environments and highly multimodal complex functions described in the well-know De Jong test suite. Later, for the purpose of comparison, a recent model of artificial bacterial foraging (BFOA algorithm) based on similar stigmergic features is described and analyzed. Final results indicate that the SSA collective intelligence is able to cope and quickly adapt to unforeseen situations even when over the same cooperative foraging period, the community is requested to deal with two different and contradictory purposes, while outperforming BFOA in adaptive speed. Results indicate that the present approach deals well in severe Dynamic Optimization problems.<|reference_end|> | arxiv | @article{ramos2007computational,
title={Computational Chemotaxis in Ants and Bacteria over Dynamic Environments},
author={Vitorino Ramos, C. M. Fernandes, A. C. Rosa, A. Abraham},
journal={arXiv preprint arXiv:0712.0744},
year={2007},
doi={10.1109/CEC.2007.4424594},
archivePrefix={arXiv},
eprint={0712.0744},
primaryClass={cs.MA cs.AI q-bio.PE q-bio.QM}
} | ramos2007computational |
arxiv-2016 | 0712.0769 | Towards 3D ultrasound image based soft tissue tracking: a transrectal ultrasound prostate image alignment system | <|reference_start|>Towards 3D ultrasound image based soft tissue tracking: a transrectal ultrasound prostate image alignment system: The emergence of real-time 3D ultrasound (US) makes it possible to consider image-based tracking of subcutaneous soft tissue targets for computer guided diagnosis and therapy. We propose a 3D transrectal US based tracking system for precise prostate biopsy sample localisation. The aim is to improve sample distribution, to enable targeting of unsampled regions for repeated biopsies, and to make post-interventional quality controls possible. Since the patient is not immobilized, since the prostate is mobile and due to the fact that probe movements are only constrained by the rectum during biopsy acquisition, the tracking system must be able to estimate rigid transformations that are beyond the capture range of common image similarity measures. We propose a fast and robust multi-resolution attribute-vector registration approach that combines global and local optimization methods to solve this problem. Global optimization is performed on a probe movement model that reduces the dimensionality of the search space and thus renders optimization efficient. The method was tested on 237 prostate volumes acquired from 14 different patients for 3D to 3D and 3D to orthogonal 2D slices registration. The 3D-3D version of the algorithm converged correctly in 96.7% of all cases in 6.5s with an accuracy of 1.41mm (r.m.s.) and 3.84mm (max). The 3D to slices method yielded a success rate of 88.9% in 2.3s with an accuracy of 1.37mm (r.m.s.) and 4.3mm (max).<|reference_end|> | arxiv | @article{baumann2007towards,
title={Towards 3D ultrasound image based soft tissue tracking: a transrectal
ultrasound prostate image alignment system},
author={Michael Baumann (TIMC), Pierre Mozer (TIMC), Vincent Daanen (TIMC),
Jocelyne Troccaz (TIMC)},
journal={Dans Proceedings of MICCAI 2007 - Medical Image Computing and
Computer Assisted Interventions 2007, Brisbane : Australie (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.0769},
primaryClass={cs.OH physics.med-ph}
} | baumann2007towards |
arxiv-2017 | 0712.0804 | Minimum Cost Homomorphisms to Locally Semicomplete and Quasi-Transitive Digraphs | <|reference_start|>Minimum Cost Homomorphisms to Locally Semicomplete and Quasi-Transitive Digraphs: For digraphs $G$ and $H$, a homomorphism of $G$ to $H$ is a mapping $f:\ V(G)\dom V(H)$ such that $uv\in A(G)$ implies $f(u)f(v)\in A(H)$. If, moreover, each vertex $u \in V(G)$ is associated with costs $c_i(u), i \in V(H)$, then the cost of a homomorphism $f$ is $\sum_{u\in V(G)}c_{f(u)}(u)$. For each fixed digraph $H$, the minimum cost homomorphism problem for $H$, denoted MinHOM($H$), can be formulated as follows: Given an input digraph $G$, together with costs $c_i(u)$, $u\in V(G)$, $i\in V(H)$, decide whether there exists a homomorphism of $G$ to $H$ and, if one exists, to find one of minimum cost. Minimum cost homomorphism problems encompass (or are related to) many well studied optimization problems such as the minimum cost chromatic partition and repair analysis problems. We focus on the minimum cost homomorphism problem for locally semicomplete digraphs and quasi-transitive digraphs which are two well-known generalizations of tournaments. Using graph-theoretic characterization results for the two digraph classes, we obtain a full dichotomy classification of the complexity of minimum cost homomorphism problems for both classes.<|reference_end|> | arxiv | @article{gupta2007minimum,
title={Minimum Cost Homomorphisms to Locally Semicomplete and Quasi-Transitive
Digraphs},
author={A. Gupta, G. Gutin, M. Karimi, E.J. Kim, A. Rafiey},
journal={arXiv preprint arXiv:0712.0804},
year={2007},
archivePrefix={arXiv},
eprint={0712.0804},
primaryClass={cs.DM}
} | gupta2007minimum |
arxiv-2018 | 0712.0811 | The Fast Fibonacci Decompression Algorithm | <|reference_start|>The Fast Fibonacci Decompression Algorithm: Data compression has been widely applied in many data processing areas. Compression methods use variable-size codes with the shorter codes assigned to symbols or groups of symbols that appear in the data frequently. Fibonacci coding, as a representative of these codes, is used for compressing small numbers. Time consumption of a decompression algorithm is not usually as important as the time of a compression algorithm. However, efficiency of the decompression may be a critical issue in some cases. For example, a real-time compression of tree data structures follows this issue. Tree's pages are decompressed during every reading from a secondary storage into the main memory. In this case, the efficiency of a decompression algorithm is extremely important. We have developed a Fast Fibonacci decompression for this purpose. Our approach is up to $3.5\times$ faster than the original implementation.<|reference_end|> | arxiv | @article{baca2007the,
title={The Fast Fibonacci Decompression Algorithm},
author={R.Baca, V.Snasel, J.Platos, M.Kratky, E.El-Qawasmeh},
journal={arXiv preprint arXiv:0712.0811},
year={2007},
archivePrefix={arXiv},
eprint={0712.0811},
primaryClass={cs.PF cs.OH}
} | baca2007the |
arxiv-2019 | 0712.0836 | Evolving localizations in reaction-diffusion cellular automata | <|reference_start|>Evolving localizations in reaction-diffusion cellular automata: We consider hexagonal cellular automata with immediate cell neighbourhood and three cell-states. Every cell calculates its next state depending on the integral representation of states in its neighbourhood, i.e. how many neighbours are in each one state. We employ evolutionary algorithms to breed local transition functions that support mobile localizations (gliders), and characterize sets of the functions selected in terms of quasi-chemical systems. Analysis of the set of functions evolved allows to speculate that mobile localizations are likely to emerge in the quasi-chemical systems with limited diffusion of one reagent, a small number of molecules is required for amplification of travelling localizations, and reactions leading to stationary localizations involve relatively equal amount of quasi-chemical species. Techniques developed can be applied in cascading signals in nature-inspired spatially extended computing devices, and phenomenological studies and classification of non-linear discrete systems.<|reference_end|> | arxiv | @article{adamatzky2007evolving,
title={Evolving localizations in reaction-diffusion cellular automata},
author={Andrew Adamatzky, Larry Bull, Pierre Collet, Emmanuel Sapin},
journal={International Journal of Modern Physics C (IJMPC) Volume: 19,
Issue: 4 (April 2008) pp. 557-567},
year={2007},
doi={10.1142/S0129183108012376},
archivePrefix={arXiv},
eprint={0712.0836},
primaryClass={cs.AI}
} | adamatzky2007evolving |
arxiv-2020 | 0712.0840 | A Universal Kernel for Learning Regular Languages | <|reference_start|>A Universal Kernel for Learning Regular Languages: We give a universal kernel that renders all the regular languages linearly separable. We are not able to compute this kernel efficiently and conjecture that it is intractable, but we do have an efficient $\eps$-approximation.<|reference_end|> | arxiv | @article{leonid2007a,
title={A Universal Kernel for Learning Regular Languages},
author={Leonid (Aryeh) Kontorovich},
journal={The 5th International Workshop on Mining and Learning with Graphs,
2007},
year={2007},
archivePrefix={arXiv},
eprint={0712.0840},
primaryClass={cs.LG cs.DM}
} | leonid2007a |
arxiv-2021 | 0712.0871 | Balancing forward and feedback error correction for erasure channels with unreliable feedback | <|reference_start|>Balancing forward and feedback error correction for erasure channels with unreliable feedback: The traditional information theoretic approach to studying feedback is to consider ideal instantaneous high-rate feedback of the channel outputs to the encoder. This was acceptable in classical work because the results were negative: Shannon pointed out that even perfect feedback often does not improve capacity and in the context of symmetric DMCs, Dobrushin showed that it does not improve the fixed block-coding error exponents in the interesting high rate regime. However, it has recently been shown that perfect feedback does allow great improvements in the asymptotic tradeoff between end-to-end delay and probability of error, even for symmetric channels at high rate. Since gains are claimed with ideal instantaneous feedback, it is natural to wonder whether these improvements remain if the feedback is unreliable or otherwise limited. Here, packet-erasure channels are considered on both the forward and feedback links. First, the feedback channel is considered as a given and a strategy is given to balance forward and feedback error correction in the suitable information-theoretic limit of long end-to-end delays. At high enough rates, perfect-feedback performance is asymptotically attainable despite having only unreliable feedback! Second, the results are interpreted in the zero- sum case of "half-duplex" nodes where the allocation of bandwidth or time to the feedback channel comes at the direct expense of the forward channel. It turns out that even here, feedback is worthwhile since dramatically lower asymptotic delays are possible by appropriately balancing forward and feedback error correction. The results easily generalize to channels with strictly positive zero-undeclared-error capacities.<|reference_end|> | arxiv | @article{sahai2007balancing,
title={Balancing forward and feedback error correction for erasure channels
with unreliable feedback},
author={Anant Sahai},
journal={arXiv preprint arXiv:0712.0871},
year={2007},
archivePrefix={arXiv},
eprint={0712.0871},
primaryClass={cs.IT math.IT}
} | sahai2007balancing |
arxiv-2022 | 0712.0873 | The price of ignorance: The impact of side-information on delay for lossless source-coding | <|reference_start|>The price of ignorance: The impact of side-information on delay for lossless source-coding: Inspired by the context of compressing encrypted sources, this paper considers the general tradeoff between rate, end-to-end delay, and probability of error for lossless source coding with side-information. The notion of end-to-end delay is made precise by considering a sequential setting in which source symbols are revealed in real time and need to be reconstructed at the decoder within a certain fixed latency requirement. Upper bounds are derived on the reliability functions with delay when side-information is known only to the decoder as well as when it is also known at the encoder. When the encoder is not ignorant of the side-information (including the trivial case when there is no side-information), it is possible to have substantially better tradeoffs between delay and probability of error at all rates. This shows that there is a fundamental price of ignorance in terms of end-to-end delay when the encoder is not aware of the side information. This effect is not visible if only fixed-block-length codes are considered. In this way, side-information in source-coding plays a role analogous to that of feedback in channel coding. While the theorems in this paper are asymptotic in terms of long delays and low probabilities of error, an example is used to show that the qualitative effects described here are significant even at short and moderate delays.<|reference_end|> | arxiv | @article{chang2007the,
title={The price of ignorance: The impact of side-information on delay for
lossless source-coding},
author={Cheng Chang and Anant Sahai},
journal={arXiv preprint arXiv:0712.0873},
year={2007},
archivePrefix={arXiv},
eprint={0712.0873},
primaryClass={cs.IT math.IT}
} | chang2007the |
arxiv-2023 | 0712.0917 | Some properties of finite meadows | <|reference_start|>Some properties of finite meadows: The aim of this note is to describe the structure of finite meadows. We will show that the class of finite meadows is the closure of the class of finite fields under finite products. As a corollary, we obtain a unique representation of minimal meadows in terms of prime fields.<|reference_end|> | arxiv | @article{bethke2007some,
title={Some properties of finite meadows},
author={Inge Bethke and Piet Rodenburg},
journal={arXiv preprint arXiv:0712.0917},
year={2007},
archivePrefix={arXiv},
eprint={0712.0917},
primaryClass={math.RA cs.SC}
} | bethke2007some |
arxiv-2024 | 0712.0932 | Dimensionality Reduction and Reconstruction using Mirroring Neural Networks and Object Recognition based on Reduced Dimension Characteristic Vector | <|reference_start|>Dimensionality Reduction and Reconstruction using Mirroring Neural Networks and Object Recognition based on Reduced Dimension Characteristic Vector: In this paper, we present a Mirroring Neural Network architecture to perform non-linear dimensionality reduction and Object Recognition using a reduced lowdimensional characteristic vector. In addition to dimensionality reduction, the network also reconstructs (mirrors) the original high-dimensional input vector from the reduced low-dimensional data. The Mirroring Neural Network architecture has more number of processing elements (adalines) in the outer layers and the least number of elements in the central layer to form a converging-diverging shape in its configuration. Since this network is able to reconstruct the original image from the output of the innermost layer (which contains all the information about the input pattern), these outputs can be used as object signature to classify patterns. The network is trained to minimize the discrepancy between actual output and the input by back propagating the mean squared error from the output layer to the input layer. After successfully training the network, it can reduce the dimension of input vectors and mirror the patterns fed to it. The Mirroring Neural Network architecture gave very good results on various test patterns.<|reference_end|> | arxiv | @article{deepthi2007dimensionality,
title={Dimensionality Reduction and Reconstruction using Mirroring Neural
Networks and Object Recognition based on Reduced Dimension Characteristic
Vector},
author={Dasika Ratna Deepthi, Sujeet Kuchibhotla and K.Eswaran},
journal={IEEE International Conference On Advances in Computer Vision and
Information Tech. (IEEE, ACVIT-07), pp. 348 - 353 (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.0932},
primaryClass={cs.CV cs.AI cs.NE}
} | deepthi2007dimensionality |
arxiv-2025 | 0712.0938 | Automatic Pattern Classification by Unsupervised Learning Using Dimensionality Reduction of Data with Mirroring Neural Networks | <|reference_start|>Automatic Pattern Classification by Unsupervised Learning Using Dimensionality Reduction of Data with Mirroring Neural Networks: This paper proposes an unsupervised learning technique by using Multi-layer Mirroring Neural Network and Forgy's clustering algorithm. Multi-layer Mirroring Neural Network is a neural network that can be trained with generalized data inputs (different categories of image patterns) to perform non-linear dimensionality reduction and the resultant low-dimensional code is used for unsupervised pattern classification using Forgy's algorithm. By adapting the non-linear activation function (modified sigmoidal function) and initializing the weights and bias terms to small random values, mirroring of the input pattern is initiated. In training, the weights and bias terms are changed in such a way that the input presented is reproduced at the output by back propagating the error. The mirroring neural network is capable of reducing the input vector to a great degree (approximately 1/30th the original size) and also able to reconstruct the input pattern at the output layer from this reduced code units. The feature set (output of central hidden layer) extracted from this network is fed to Forgy's algorithm, which classify input data patterns into distinguishable classes. In the implementation of Forgy's algorithm, initial seed points are selected in such a way that they are distant enough to be perfectly grouped into different categories. Thus a new method of unsupervised learning is formulated and demonstrated in this paper. This method gave impressive results when applied to classification of different image patterns.<|reference_end|> | arxiv | @article{deepthi2007automatic,
title={Automatic Pattern Classification by Unsupervised Learning Using
Dimensionality Reduction of Data with Mirroring Neural Networks},
author={Dasika Ratna Deepthi, G.R.Aditya Krishna and K. Eswaran},
journal={IEEE International Conference on Advances in Computer Vision and
Information Tech. (IEEE, ACVIT-07), pp. 354 - 360 (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.0938},
primaryClass={cs.LG cs.AI cs.NE}
} | deepthi2007automatic |
arxiv-2026 | 0712.0948 | A Common View on Strong, Uniform, and Other Notions of Equivalence in Answer-Set Programming | <|reference_start|>A Common View on Strong, Uniform, and Other Notions of Equivalence in Answer-Set Programming: Logic programming under the answer-set semantics nowadays deals with numerous different notions of program equivalence. This is due to the fact that equivalence for substitution (known as strong equivalence) and ordinary equivalence are different concepts. The former holds, given programs P and Q, iff P can be faithfully replaced by Q within any context R, while the latter holds iff P and Q provide the same output, that is, they have the same answer sets. Notions in between strong and ordinary equivalence have been introduced as theoretical tools to compare incomplete programs and are defined by either restricting the syntactic structure of the considered context programs R or by bounding the set A of atoms allowed to occur in R (relativized equivalence).For the latter approach, different A yield properly different equivalence notions, in general. For the former approach, however, it turned out that any ``reasonable'' syntactic restriction to R coincides with either ordinary, strong, or uniform equivalence. In this paper, we propose a parameterization for equivalence notions which takes care of both such kinds of restrictions simultaneously by bounding, on the one hand, the atoms which are allowed to occur in the rule heads of the context and, on the other hand, the atoms which are allowed to occur in the rule bodies of the context. We introduce a general semantical characterization which includes known ones as SE-models (for strong equivalence) or UE-models (for uniform equivalence) as special cases. Moreover,we provide complexity bounds for the problem in question and sketch a possible implementation method. To appear in Theory and Practice of Logic Programming (TPLP).<|reference_end|> | arxiv | @article{woltran2007a,
title={A Common View on Strong, Uniform, and Other Notions of Equivalence in
Answer-Set Programming},
author={Stefan Woltran},
journal={arXiv preprint arXiv:0712.0948},
year={2007},
archivePrefix={arXiv},
eprint={0712.0948},
primaryClass={cs.AI cs.LO}
} | woltran2007a |
arxiv-2027 | 0712.0975 | Random quantum codes from Gaussian ensembles and an uncertainty relation | <|reference_start|>Random quantum codes from Gaussian ensembles and an uncertainty relation: Using random Gaussian vectors and an information-uncertainty relation, we give a proof that the coherent information is an achievable rate for entanglement transmission through a noisy quantum channel. The codes are random subspaces selected according to the Haar measure, but distorted as a function of the sender's input density operator. Using large deviations techniques, we show that classical data transmitted in either of two Fourier-conjugate bases for the coding subspace can be decoded with low probability of error. A recently discovered information-uncertainty relation then implies that the quantum mutual information for entanglement encoded into the subspace and transmitted through the channel will be high. The monogamy of quantum correlations finally implies that the environment of the channel cannot be significantly coupled to the entanglement, and concluding, which ensures the existence of a decoding by the receiver.<|reference_end|> | arxiv | @article{hayden2007random,
title={Random quantum codes from Gaussian ensembles and an uncertainty relation},
author={Patrick Hayden, Peter W. Shor, Andreas Winter},
journal={Open Syst. Inf. Dyn. 15 (2008) 71-89},
year={2007},
doi={10.1142/S1230161208000079},
archivePrefix={arXiv},
eprint={0712.0975},
primaryClass={quant-ph cs.IT math.IT}
} | hayden2007random |
arxiv-2028 | 0712.1014 | Characterization Of A Class Of Graphs Related To Pairs Of Disjoint Matchings | <|reference_start|>Characterization Of A Class Of Graphs Related To Pairs Of Disjoint Matchings: For a given graph consider a pair of disjoint matchings the union of which contains as many edges as possible. Furthermore, consider the relation of the cardinalities of a maximum matching and the largest matching in those pairs. It is known that this relation does not exceed 5/4 for any graph. We characterize the class of graphs for which this relation is precisely 5/4. Our characterization implies that these graphs contain a spanning subgraph, every component of which is the minimal graph of this class.<|reference_end|> | arxiv | @article{tserunyan2007characterization,
title={Characterization Of A Class Of Graphs Related To Pairs Of Disjoint
Matchings},
author={A. V. Tserunyan},
journal={Discrete Mathematics 309 (2009) 693--713},
year={2007},
doi={10.1016/j.disc.2008.01.004},
archivePrefix={arXiv},
eprint={0712.1014},
primaryClass={cs.DM}
} | tserunyan2007characterization |
arxiv-2029 | 0712.1037 | The Importance of Being First: Position Dependent Citation Rates on arXiv:astro-ph | <|reference_start|>The Importance of Being First: Position Dependent Citation Rates on arXiv:astro-ph: We study the dependence of citation counts of e-prints published on the arXiv:astro-ph server on their position in the daily astro-ph listing. Using the SPIRES literature database we reconstruct the astro-ph listings from July 2002 to December 2005 and determine citation counts for e-prints from their ADS entry. We use Zipf plots to analyze the citation distributions for each astro-ph position. We find that e-prints appearing at or near the top of the astro-ph mailings receive significantly more citations than those further down the list. This difference is significant at the 7 sigma level and on average amounts to two times more citations for papers at the top than those further down the listing. We propose three possible non-exclusive explanations for this positional citation effect and try to test them. We conclude that self-promotion by authors plays a role in the observed effect but cannot exclude that increased visibility at the top of the daily listings contributes to higher citation counts as well. We can rule out that the positional dependence of citations is caused by the coincidence of the submission deadline with the working hours of a geographically constrained set of intrinsically higher cited authors. We discuss several ways of mitigating the observed effect, including splitting astro-ph into several subject classes, randomizing the order of e-prints, and a novel approach to sorting entries by relevance to individual readers.<|reference_end|> | arxiv | @article{dietrich2007the,
title={The Importance of Being First: Position Dependent Citation Rates on
arXiv:astro-ph},
author={J. P. Dietrich},
journal={arXiv preprint arXiv:0712.1037},
year={2007},
doi={10.1086/527522},
archivePrefix={arXiv},
eprint={0712.1037},
primaryClass={astro-ph cs.DL}
} | dietrich2007the |
arxiv-2030 | 0712.1097 | On Using Unsatisfiability for Solving Maximum Satisfiability | <|reference_start|>On Using Unsatisfiability for Solving Maximum Satisfiability: Maximum Satisfiability (MaxSAT) is a well-known optimization pro- blem, with several practical applications. The most widely known MAXS AT algorithms are ineffective at solving hard problems instances from practical application domains. Recent work proposed using efficient Boolean Satisfiability (SAT) solvers for solving the MaxSAT problem, based on identifying and eliminating unsatisfiable subformulas. However, these algorithms do not scale in practice. This paper analyzes existing MaxSAT algorithms based on unsatisfiable subformula identification. Moreover, the paper proposes a number of key optimizations to these MaxSAT algorithms and a new alternative algorithm. The proposed optimizations and the new algorithm provide significant performance improvements on MaxSAT instances from practical applications. Moreover, the efficiency of the new generation of unsatisfiability-based MaxSAT solvers becomes effectively indexed to the ability of modern SAT solvers to proving unsatisfiability and identifying unsatisfiable subformulas.<|reference_end|> | arxiv | @article{marques-silva2007on,
title={On Using Unsatisfiability for Solving Maximum Satisfiability},
author={Joao Marques-Silva, Jordi Planes},
journal={arXiv preprint arXiv:0712.1097},
year={2007},
archivePrefix={arXiv},
eprint={0712.1097},
primaryClass={cs.AI cs.DS}
} | marques-silva2007on |
arxiv-2031 | 0712.1163 | Efficient modularity optimization by multistep greedy algorithm and vertex mover refinement | <|reference_start|>Efficient modularity optimization by multistep greedy algorithm and vertex mover refinement: Identifying strongly connected substructures in large networks provides insight into their coarse-grained organization. Several approaches based on the optimization of a quality function, e.g., the modularity, have been proposed. We present here a multistep extension of the greedy algorithm (MSG) that allows the merging of more than one pair of communities at each iteration step. The essential idea is to prevent the premature condensation into few large communities. Upon convergence of the MSG a simple refinement procedure called "vertex mover" (VM) is used for reassigning vertices to neighboring communities to improve the final modularity value. With an appropriate choice of the step width, the combined MSG-VM algorithm is able to find solutions of higher modularity than those reported previously. The multistep extension does not alter the scaling of computational cost of the greedy algorithm.<|reference_end|> | arxiv | @article{schuetz2007efficient,
title={Efficient modularity optimization by multistep greedy algorithm and
vertex mover refinement},
author={Philipp Schuetz and Amedeo Caflisch},
journal={Phys. Rev. E 77,046112 (2008)},
year={2007},
doi={10.1103/PhysRevE.77.046112},
archivePrefix={arXiv},
eprint={0712.1163},
primaryClass={cs.DS cond-mat.dis-nn cs.DM physics.soc-ph}
} | schuetz2007efficient |
arxiv-2032 | 0712.1167 | Transactional WaveCache: Towards Speculative and Out-of-Order DataFlow Execution of Memory Operations | <|reference_start|>Transactional WaveCache: Towards Speculative and Out-of-Order DataFlow Execution of Memory Operations: The WaveScalar is the first DataFlow Architecture that can efficiently provide the sequential memory semantics required by imperative languages. This work presents an alternative memory ordering mechanism for this architecture, the Transaction WaveCache. Our mechanism maintains the execution order of memory operations within blocks of code, called Waves, but adds the ability to speculatively execute, out-of-order, operations from different waves. This ordering mechanism is inspired by progress in supporting Transactional Memories. Waves are considered as atomic regions and executed as nested transactions. If a wave has finished the execution of all its memory operations, as soon as the previous waves are committed, it can be committed. If a hazard is detected in a speculative Wave, all the following Waves (children) are aborted and re-executed. We evaluate the WaveCache on a set artificial benchmarks. If the benchmark does not access memory often, we could achieve speedups of around 90%. Speedups of 33.1% and 24% were observed on more memory intensive applications, and slowdowns up to 16% arise if memory bandwidth is a bottleneck. For an application full of WAW, WAR and RAW hazards, a speedup of 139.7% was verified.<|reference_end|> | arxiv | @article{marzulo2007transactional,
title={Transactional WaveCache: Towards Speculative and Out-of-Order DataFlow
Execution of Memory Operations},
author={Leandro A. J. Marzulo, Felipe M. G. Franc{c}a and V'itor Santos
Costa},
journal={arXiv preprint arXiv:0712.1167},
year={2007},
archivePrefix={arXiv},
eprint={0712.1167},
primaryClass={cs.AR cs.DC}
} | marzulo2007transactional |
arxiv-2033 | 0712.1169 | Opportunistic Relaying in Wireless Networks | <|reference_start|>Opportunistic Relaying in Wireless Networks: Relay networks having $n$ source-to-destination pairs and $m$ half-duplex relays, all operating in the same frequency band in the presence of block fading, are analyzed. This setup has attracted significant attention and several relaying protocols have been reported in the literature. However, most of the proposed solutions require either centrally coordinated scheduling or detailed channel state information (CSI) at the transmitter side. Here, an opportunistic relaying scheme is proposed, which alleviates these limitations. The scheme entails a two-hop communication protocol, in which sources communicate with destinations only through half-duplex relays. The key idea is to schedule at each hop only a subset of nodes that can benefit from \emph{multiuser diversity}. To select the source and destination nodes for each hop, it requires only CSI at receivers (relays for the first hop, and destination nodes for the second hop) and an integer-value CSI feedback to the transmitters. For the case when $n$ is large and $m$ is fixed, it is shown that the proposed scheme achieves a system throughput of $m/2$ bits/s/Hz. In contrast, the information-theoretic upper bound of $(m/2)\log \log n$ bits/s/Hz is achievable only with more demanding CSI assumptions and cooperation between the relays. Furthermore, it is shown that, under the condition that the product of block duration and system bandwidth scales faster than $\log n$, the achievable throughput of the proposed scheme scales as $\Theta ({\log n})$. Notably, this is proven to be the optimal throughput scaling even if centralized scheduling is allowed, thus proving the optimality of the proposed scheme in the scaling law sense.<|reference_end|> | arxiv | @article{cui2007opportunistic,
title={Opportunistic Relaying in Wireless Networks},
author={Shengshan Cui, Alexander M. Haimovich, Oren Somekh and H. Vincent Poor},
journal={arXiv preprint arXiv:0712.1169},
year={2007},
doi={10.1109/TIT.2009.2030435},
archivePrefix={arXiv},
eprint={0712.1169},
primaryClass={cs.IT math.IT}
} | cui2007opportunistic |
arxiv-2034 | 0712.1182 | Cumulative and Averaging Fission of Beliefs | <|reference_start|>Cumulative and Averaging Fission of Beliefs: Belief fusion is the principle of combining separate beliefs or bodies of evidence originating from different sources. Depending on the situation to be modelled, different belief fusion methods can be applied. Cumulative and averaging belief fusion is defined for fusing opinions in subjective logic, and for fusing belief functions in general. The principle of fission is the opposite of fusion, namely to eliminate the contribution of a specific belief from an already fused belief, with the purpose of deriving the remaining belief. This paper describes fission of cumulative belief as well as fission of averaging belief in subjective logic. These operators can for example be applied to belief revision in Bayesian belief networks, where the belief contribution of a given evidence source can be determined as a function of a given fused belief and its other contributing beliefs.<|reference_end|> | arxiv | @article{josang2007cumulative,
title={Cumulative and Averaging Fission of Beliefs},
author={Audun Josang},
journal={arXiv preprint arXiv:0712.1182},
year={2007},
archivePrefix={arXiv},
eprint={0712.1182},
primaryClass={cs.AI cs.LO}
} | josang2007cumulative |
arxiv-2035 | 0712.1189 | Implementation, Compilation, Optimization of Object-Oriented Languages, Programs and Systems - Report on the Workshop ICOOOLPS'2007 at ECOOP'07 | <|reference_start|>Implementation, Compilation, Optimization of Object-Oriented Languages, Programs and Systems - Report on the Workshop ICOOOLPS'2007 at ECOOP'07: ICOOOLPS'2007 was the second edition of the ECOOP-ICOOOLPS workshop. ICOOOLPS intends to bring researchers and practitioners both from academia and industry together, with a spirit of openness, to try and identify and begin to address the numerous and very varied issues of optimization. After a first successful edition, this second one put a stronger emphasis on exchanges and discussions amongst the participants, progressing on the bases set last year in Nantes. The workshop attendance was a success, since the 30-people limit we had set was reached about 2 weeks before the workshop itself. Some of the discussions (e.g. annotations) were so successful that they would required even more time than we were able to dedicate to them. That's one area we plan to further improve for the next edition.<|reference_end|> | arxiv | @article{zendra2007implementation,,
title={Implementation, Compilation, Optimization of Object-Oriented Languages,
Programs and Systems - Report on the Workshop ICOOOLPS'2007 at ECOOP'07},
author={Olivier Zendra (INRIA Lorraine - LORIA), Eric Jul (DIKU), Roland
Ducournau (LIRMM), Etienne Gagnon, Richard E. Jones, Chandra Krintz (RACE
LAB), Philippe Mulet, Jan Vitek (S3L)},
journal={ECOOP 2007 Workshop Reader Springer (Ed.) (2008)},
year={2007},
archivePrefix={arXiv},
eprint={0712.1189},
primaryClass={cs.PL cs.SE}
} | zendra2007implementation, |
arxiv-2036 | 0712.1205 | Lambda-RBAC: Programming with Role-Based Access Control | <|reference_start|>Lambda-RBAC: Programming with Role-Based Access Control: We study mechanisms that permit program components to express role constraints on clients, focusing on programmatic security mechanisms, which permit access controls to be expressed, in situ, as part of the code realizing basic functionality. In this setting, two questions immediately arise: (1) The user of a component faces the issue of safety: is a particular role sufficient to use the component? (2) The component designer faces the dual issue of protection: is a particular role demanded in all execution paths of the component? We provide a formal calculus and static analysis to answer both questions.<|reference_end|> | arxiv | @article{jagadeesan2007lambda-rbac:,
title={Lambda-RBAC: Programming with Role-Based Access Control},
author={Radha Jagadeesan, Alan Jeffrey, Corin Pitcher, James Riely},
journal={Logical Methods in Computer Science, Volume 4, Issue 1 (January 9,
2008) lmcs:1195},
year={2007},
doi={10.2168/LMCS-4(1:2)2008},
archivePrefix={arXiv},
eprint={0712.1205},
primaryClass={cs.PL cs.CR}
} | jagadeesan2007lambda-rbac: |
arxiv-2037 | 0712.1224 | Evaluating the Utility of Anonymized Network Traces for Intrusion Detection | <|reference_start|>Evaluating the Utility of Anonymized Network Traces for Intrusion Detection: Anonymization is the process of removing or hiding sensitive information in logs. Anonymization allows organizations to share network logs while not exposing sensitive information. However, there is an inherent trade off between the amount of information revealed in the log and the usefulness of the log to the client (the utility of a log). There are many anonymization techniques, and there are many ways to anonymize a particular log (that is, which fields to anonymize and how). Different anonymization policies will result in logs with varying levels of utility for analysis. In this paper we explore the effect of different anonymization policies on logs. We provide an empirical analysis of the effect of varying anonymization policies by looking at the number of alerts generated by an Intrusion Detection System. This is the first work to thoroughly evaluate the effect of single field anonymization policies on a data set. Our main contributions are to determine a set of fields that have a large impact on the utility of a log.<|reference_end|> | arxiv | @article{lakkaraju2007evaluating,
title={Evaluating the Utility of Anonymized Network Traces for Intrusion
Detection},
author={Kiran Lakkaraju, Adam Slagell},
journal={arXiv preprint arXiv:0712.1224},
year={2007},
archivePrefix={arXiv},
eprint={0712.1224},
primaryClass={cs.CR}
} | lakkaraju2007evaluating |
arxiv-2038 | 0712.1279 | Kleene, Rogers and Rice Theorems Revisited in C and in Bash | <|reference_start|>Kleene, Rogers and Rice Theorems Revisited in C and in Bash: The recursion theorem in the weak form {e}(z)=x(e,z) (universal function not needed) and in Rogers form {n}(z)={{x}(n)}(z) and Rice theorem are proved a first time using programs in C, and a second time with scripts in Bash.<|reference_end|> | arxiv | @article{caporaso2007kleene,,
title={Kleene, Rogers and Rice Theorems Revisited in C and in Bash},
author={Salvatore Caporaso and Nicola Corriero},
journal={arXiv preprint arXiv:0712.1279},
year={2007},
archivePrefix={arXiv},
eprint={0712.1279},
primaryClass={cs.LO}
} | caporaso2007kleene, |
arxiv-2039 | 0712.1309 | Complex base numeral systems | <|reference_start|>Complex base numeral systems: In this paper will be introduced large, probably complete family of complex base systems, which are 'proper' - for each point of the space there is a representation which is unique for all but some zero measure set. The condition defining this family is the periodicity - we get periodic covering of the plane by fractals in hexagonal-type structure, what can be used for example in image compression. There will be introduced full methodology of analyzing and using this approach - both for the integer part: periodic lattice and the fractional: attractor of some IFS, for which the convex hull or properties like dimension of the boundary can be found analytically. There will be also shown how to generalize this approach to higher dimensions and found some proper systems in dimension 3.<|reference_end|> | arxiv | @article{duda2007complex,
title={Complex base numeral systems},
author={Jarek Duda},
journal={arXiv preprint arXiv:0712.1309},
year={2007},
archivePrefix={arXiv},
eprint={0712.1309},
primaryClass={math.DS cs.DM}
} | duda2007complex |
arxiv-2040 | 0712.1310 | About Algorithm for Transformation of Logic Functions (ATLF) | <|reference_start|>About Algorithm for Transformation of Logic Functions (ATLF): In this article the algorithm for transformation of logic functions which are given by truth tables is considered. The suggested algorithm allows the transformation of many-valued logic functions with the required number of variables and can be looked in this sense as universal.<|reference_end|> | arxiv | @article{cherbanski2007about,
title={About Algorithm for Transformation of Logic Functions (ATLF)},
author={Lev Cherbanski},
journal={arXiv preprint arXiv:0712.1310},
year={2007},
archivePrefix={arXiv},
eprint={0712.1310},
primaryClass={cs.LO cs.AI}
} | cherbanski2007about |
arxiv-2041 | 0712.1337 | Axiomatizing rational power series | <|reference_start|>Axiomatizing rational power series: Iteration semirings are Conway semirings satisfying Conway's group identities. We show that the semirings $\N^{\rat}\llangle \Sigma^* \rrangle$ of rational power series with coefficients in the semiring $\N$ of natural numbers are the free partial iteration semirings. Moreover, we characterize the semirings $\N_\infty^{\rat}\llangle \Sigma^* \rrangle$ as the free semirings in the variety of iteration semirings defined by three additional simple identities, where $\N_\infty$ is the completion of $\N$ obtained by adding a point of infinity. We also show that this latter variety coincides with the variety generated by the complete, or continuous semirings. As a consequence of these results, we obtain that the semirings $\N_\infty^{\rat}\llangle \Sigma^* \rrangle$, equipped with the sum order, are free in the class of symmetric inductive $^*$-semirings. This characterization corresponds to Kozen's axiomatization of regular languages.<|reference_end|> | arxiv | @article{bloom2007axiomatizing,
title={Axiomatizing rational power series},
author={S. L. Bloom, Z. Esik},
journal={arXiv preprint arXiv:0712.1337},
year={2007},
archivePrefix={arXiv},
eprint={0712.1337},
primaryClass={cs.LO cs.DM}
} | bloom2007axiomatizing |
arxiv-2042 | 0712.1339 | Joint Receiver and Transmitter Optimization for Energy-Efficient CDMA Communications | <|reference_start|>Joint Receiver and Transmitter Optimization for Energy-Efficient CDMA Communications: This paper focuses on the cross-layer issue of joint multiuser detection and resource allocation for energy efficiency in wireless CDMA networks. In particular, assuming that a linear multiuser detector is adopted in the uplink receiver, the case considered is that in which each terminal is allowed to vary its transmit power, spreading code, and uplink receiver in order to maximize its own utility, which is defined as the ratio of data throughput to transmit power. Resorting to a game-theoretic formulation, a non-cooperative game for utility maximization is formulated, and it is proved that a unique Nash equilibrium exists, which, under certain conditions, is also Pareto-optimal. Theoretical results concerning the relationship between the problems of SINR maximization and MSE minimization are given, and, resorting to the tools of large system analysis, a new distributed power control algorithm is implemented, based on very little prior information about the user of interest. The utility profile achieved by the active users in a large CDMA system is also computed, and, moreover, the centralized socially optimum solution is analyzed. Considerations on the extension of the proposed framework to a multi-cell scenario are also briefly detailed. Simulation results confirm that the proposed non-cooperative game largely outperforms competing alternatives, and that it exhibits a quite small performance loss with respect to the socially optimum solution, and only in the case in which the users number exceeds the processing gain. Finally, results also show an excellent agreement between the theoretical closed-form formulas based on large system analysis and the outcome of numerical experiments.<|reference_end|> | arxiv | @article{buzzi2007joint,
title={Joint Receiver and Transmitter Optimization for Energy-Efficient CDMA
Communications},
author={Stefano Buzzi and H. Vincent Poor},
journal={arXiv preprint arXiv:0712.1339},
year={2007},
archivePrefix={arXiv},
eprint={0712.1339},
primaryClass={cs.IT cs.GT math.IT}
} | buzzi2007joint |
arxiv-2043 | 0712.1345 | Sequential operators in computability logic | <|reference_start|>Sequential operators in computability logic: Computability logic (CL) (see http://www.cis.upenn.edu/~giorgi/cl.html) is a semantical platform and research program for redeveloping logic as a formal theory of computability, as opposed to the formal theory of truth which it has more traditionally been. Formulas in CL stand for (interactive) computational problems, understood as games between a machine and its environment; logical operators represent operations on such entities; and "truth" is understood as existence of an effective solution, i.e., of an algorithmic winning strategy. The formalism of CL is open-ended, and may undergo series of extensions as the study of the subject advances. The main groups of operators on which CL has been focused so far are the parallel, choice, branching, and blind operators. The present paper introduces a new important group of operators, called sequential. The latter come in the form of sequential conjunction and disjunction, sequential quantifiers, and sequential recurrences. As the name may suggest, the algorithmic intuitions associated with this group are those of sequential computations, as opposed to the intuitions of parallel computations associated with the parallel group of operations: playing a sequential combination of games means playing its components in a sequential fashion, one after one. The main technical result of the present paper is a sound and complete axiomatization of the propositional fragment of computability logic whose vocabulary, together with negation, includes all three -- parallel, choice and sequential -- sorts of conjunction and disjunction. An extension of this result to the first-order level is also outlined.<|reference_end|> | arxiv | @article{japaridze2007sequential,
title={Sequential operators in computability logic},
author={Giorgi Japaridze},
journal={Information and Computation 206 (2008), pp. 1443-1475},
year={2007},
doi={10.1016/j.ic.2008.10.001},
archivePrefix={arXiv},
eprint={0712.1345},
primaryClass={cs.LO cs.AI math.LO}
} | japaridze2007sequential |
arxiv-2044 | 0712.1359 | Borel Ranks and Wadge Degrees of Context Free Omega Languages | <|reference_start|>Borel Ranks and Wadge Degrees of Context Free Omega Languages: We show that, from a topological point of view, considering the Borel and the Wadge hierarchies, 1-counter B\"uchi automata have the same accepting power than Turing machines equipped with a B\"uchi acceptance condition. In particular, for every non null recursive ordinal alpha, there exist some Sigma^0_alpha-complete and some Pi^0_alpha-complete omega context free languages accepted by 1-counter B\"uchi automata, and the supremum of the set of Borel ranks of context free omega languages is the ordinal gamma^1_2 which is strictly greater than the first non recursive ordinal. This very surprising result gives answers to questions of H. Lescow and W. Thomas [Logical Specifications of Infinite Computations, In:"A Decade of Concurrency", LNCS 803, Springer, 1994, p. 583-621].<|reference_end|> | arxiv | @article{finkel2007borel,
title={Borel Ranks and Wadge Degrees of Context Free Omega Languages},
author={Olivier Finkel (ELM)},
journal={Mathematical Structures in Computer Science 16 (5) (2006) 813-840},
year={2007},
archivePrefix={arXiv},
eprint={0712.1359},
primaryClass={cs.LO cs.GT math.LO}
} | finkel2007borel |
arxiv-2045 | 0712.1363 | Undecidable Problems About Timed Automata | <|reference_start|>Undecidable Problems About Timed Automata: We solve some decision problems for timed automata which were recently raised by S. Tripakis in [ Folk Theorems on the Determinization and Minimization of Timed Automata, in the Proceedings of the International Workshop FORMATS'2003, LNCS, Volume 2791, p. 182-188, 2004 ] and by E. Asarin in [ Challenges in Timed Languages, From Applied Theory to Basic Theory, Bulletin of the EATCS, Volume 83, p. 106-120, 2004 ]. In particular, we show that one cannot decide whether a given timed automaton is determinizable or whether the complement of a timed regular language is timed regular. We show that the problem of the minimization of the number of clocks of a timed automaton is undecidable. It is also undecidable whether the shuffle of two timed regular languages is timed regular. We show that in the case of timed B\"uchi automata accepting infinite timed words some of these problems are Pi^1_1-hard, hence highly undecidable (located beyond the arithmetical hierarchy).<|reference_end|> | arxiv | @article{finkel2007undecidable,
title={Undecidable Problems About Timed Automata},
author={Olivier Finkel (ELM)},
journal={Dans Proceedings of the 4th International Conference on Formal
Modelling and Analysis of Timed Systems - FORMATS'06, France (2006)},
year={2007},
archivePrefix={arXiv},
eprint={0712.1363},
primaryClass={cs.LO cs.CC math.LO}
} | finkel2007undecidable |
arxiv-2046 | 0712.1365 | Population stratification using a statistical model on hypergraphs | <|reference_start|>Population stratification using a statistical model on hypergraphs: Population stratification is a problem encountered in several areas of biology and public health. We tackle this problem by mapping a population and its elements attributes into a hypergraph, a natural extension of the concept of graph or network to encode associations among any number of elements. On this hypergraph, we construct a statistical model reflecting our intuition about how the elements attributes can emerge from a postulated population structure. Finally, we introduce the concept of stratification representativeness as a mean to identify the simplest stratification already containing most of the information about the population structure. We demonstrate the power of this framework stratifying an animal and a human population based on phenotypic and genotypic properties, respectively.<|reference_end|> | arxiv | @article{vazquez2007population,
title={Population stratification using a statistical model on hypergraphs},
author={Alexei Vazquez},
journal={Phys. Rev. E 77, 066106 (2008)},
year={2007},
doi={10.1103/PhysRevE.77.066106},
archivePrefix={arXiv},
eprint={0712.1365},
primaryClass={q-bio.PE cs.AI physics.data-an}
} | vazquez2007population |
arxiv-2047 | 0712.1400 | Birthday attack to discrete logarithm | <|reference_start|>Birthday attack to discrete logarithm: The discrete logarithm in a finite group of large order has been widely applied in public key cryptosystem. In this paper, we will present a probabilistic algorithm for discrete logarithm.<|reference_end|> | arxiv | @article{li2007birthday,
title={Birthday attack to discrete logarithm},
author={An-Ping Li},
journal={arXiv preprint arXiv:0712.1400},
year={2007},
archivePrefix={arXiv},
eprint={0712.1400},
primaryClass={cs.CR}
} | li2007birthday |
arxiv-2048 | 0712.1402 | Reconstruction of Markov Random Fields from Samples: Some Easy Observations and Algorithms | <|reference_start|>Reconstruction of Markov Random Fields from Samples: Some Easy Observations and Algorithms: Markov random fields are used to model high dimensional distributions in a number of applied areas. Much recent interest has been devoted to the reconstruction of the dependency structure from independent samples from the Markov random fields. We analyze a simple algorithm for reconstructing the underlying graph defining a Markov random field on $n$ nodes and maximum degree $d$ given observations. We show that under mild non-degeneracy conditions it reconstructs the generating graph with high probability using $\Theta(d \epsilon^{-2}\delta^{-4} \log n)$ samples where $\epsilon,\delta$ depend on the local interactions. For most local interaction $\eps,\delta$ are of order $\exp(-O(d))$. Our results are optimal as a function of $n$ up to a multiplicative constant depending on $d$ and the strength of the local interactions. Our results seem to be the first results for general models that guarantee that {\em the} generating model is reconstructed. Furthermore, we provide explicit $O(n^{d+2} \epsilon^{-2}\delta^{-4} \log n)$ running time bound. In cases where the measure on the graph has correlation decay, the running time is $O(n^2 \log n)$ for all fixed $d$. We also discuss the effect of observing noisy samples and show that as long as the noise level is low, our algorithm is effective. On the other hand, we construct an example where large noise implies non-identifiability even for generic noise and interactions. Finally, we briefly show that in some simple cases, models with hidden nodes can also be recovered.<|reference_end|> | arxiv | @article{bresler2007reconstruction,
title={Reconstruction of Markov Random Fields from Samples: Some Easy
Observations and Algorithms},
author={Guy Bresler, Elchanan Mossel, Allan Sly},
journal={arXiv preprint arXiv:0712.1402},
year={2007},
archivePrefix={arXiv},
eprint={0712.1402},
primaryClass={cs.CC cs.LG}
} | bresler2007reconstruction |
arxiv-2049 | 0712.1442 | On types of growth for graph-different permutations | <|reference_start|>On types of growth for graph-different permutations: We consider an infinite graph G whose vertex set is the set of natural numbers and adjacency depends solely on the difference between vertices. We study the largest cardinality of a set of permutations of [n] any pair of which differ somewhere in a pair of adjacent vertices of G and determine it completely in an interesting special case. We give estimates for other cases and compare the results in case of complementary graphs. We also explore the close relationship between our problem and the concept of Shannon capacity "within a given type".<|reference_end|> | arxiv | @article{körner2007on,
title={On types of growth for graph-different permutations},
author={J'anos K"orner, G'abor Simonyi, Blerina Sinaimeri},
journal={arXiv preprint arXiv:0712.1442},
year={2007},
archivePrefix={arXiv},
eprint={0712.1442},
primaryClass={math.CO cs.IT math.IT}
} | körner2007on |
arxiv-2050 | 0712.1499 | On the computational complexity of cut-reduction | <|reference_start|>On the computational complexity of cut-reduction: Using appropriate notation systems for proofs, cut-reduction can often be rendered feasible on these notations, and explicit bounds can be given. Developing a suitable notation system for Bounded Arithmetic, and applying these bounds, all the known results on definable functions of certain such theories can be reobtained in a uniform way.<|reference_end|> | arxiv | @article{aehlig2007on,
title={On the computational complexity of cut-reduction},
author={Klaus Aehlig, Arnold Beckmann},
journal={arXiv preprint arXiv:0712.1499},
year={2007},
number={CSR15-2007},
archivePrefix={arXiv},
eprint={0712.1499},
primaryClass={cs.LO cs.CC}
} | aehlig2007on |
arxiv-2051 | 0712.1519 | Discrete Nondeterminism and Nash Equilibria for Strategy-Based Games | <|reference_start|>Discrete Nondeterminism and Nash Equilibria for Strategy-Based Games: Several notions of game enjoy a Nash-like notion of equilibrium without guarantee of existence. There are different ways of weakening a definition of Nash-like equilibrium in order to guarantee the existence of a weakened equilibrium. Nash's approach to the problem for strategic games is probabilistic, \textit{i.e.} continuous, and static. CP and BR approaches for CP and BR games are discrete and dynamic. This paper proposes an approach that lies between those two different approaches: a discrete and static approach. multi strategic games are introduced as a formalism that is able to express both sequential and simultaneous decision-making, which promises a good modelling power. multi strategic games are a generalisation of strategic games and sequential graph games that still enjoys a Cartesian product structure, \textit{i.e.} where agent actually choose their strategies. A pre-fixed point result allows guaranteeing existence of discrete and non deterministic equilibria. On the one hand, these equilibria can be computed with polynomial (low) complexity. On the other hand, they are effective in terms of recommendation, as shown by a numerical example.<|reference_end|> | arxiv | @article{roux2007discrete,
title={Discrete Nondeterminism and Nash Equilibria for Strategy-Based Games},
author={St'ephane Le Roux (LIP)},
journal={arXiv preprint arXiv:0712.1519},
year={2007},
archivePrefix={arXiv},
eprint={0712.1519},
primaryClass={cs.GT}
} | roux2007discrete |
arxiv-2052 | 0712.1521 | Graphs and Path Equilibria | <|reference_start|>Graphs and Path Equilibria: The quest for optimal/stable paths in graphs has gained attention in a few practical or theoretical areas. To take part in this quest this chapter adopts an equilibrium-oriented approach that is abstract and general: it works with (quasi-arbitrary) arc-labelled digraphs, and it assumes very little about the structure of the sought paths and the definition of equilibrium, \textit{i.e.} optimality/stability. In this setting, this chapter presents a sufficient condition for equilibrium existence for every graph; it also presents a necessary condition for equilibrium existence for every graph. The necessary condition does not imply the sufficient condition a priori. However, the chapter pinpoints their logical difference and thus identifies what work remains to be done. Moreover, the necessary and the sufficient conditions coincide when the definition of optimality relates to a total order, which provides a full-equivalence property. These results are applied to network routing.<|reference_end|> | arxiv | @article{roux2007graphs,
title={Graphs and Path Equilibria},
author={St'ephane Le Roux (LIP)},
journal={arXiv preprint arXiv:0712.1521},
year={2007},
archivePrefix={arXiv},
eprint={0712.1521},
primaryClass={cs.GT}
} | roux2007graphs |
arxiv-2053 | 0712.1529 | Ontology and Formal Semantics - Integration Overdue | <|reference_start|>Ontology and Formal Semantics - Integration Overdue: In this note we suggest that difficulties encountered in natural language semantics are, for the most part, due to the use of mere symbol manipulation systems that are devoid of any content. In such systems, where there is hardly any link with our common-sense view of the world, and it is quite difficult to envision how one can formally account for the considerable amount of content that is often implicit, but almost never explicitly stated in our everyday discourse. The solution, in our opinion, is a compositional semantics grounded in an ontology that reflects our commonsense view of the world and the way we talk about it in ordinary language. In the compositional logic we envision there are ontological (or first-intension) concepts, and logical (or second-intension) concepts, and where the ontological concepts include not only Davidsonian events, but other abstract objects as well (e.g., states, processes, properties, activities, attributes, etc.) It will be demonstrated here that in such a framework, a number of challenges in the semantics of natural language (e.g., metonymy, intensionality, metaphor, etc.) can be properly and uniformly addressed.<|reference_end|> | arxiv | @article{saba2007ontology,
title={Ontology and Formal Semantics - Integration Overdue},
author={Walid S. Saba},
journal={arXiv preprint arXiv:0712.1529},
year={2007},
archivePrefix={arXiv},
eprint={0712.1529},
primaryClass={cs.AI cs.CL}
} | saba2007ontology |
arxiv-2054 | 0712.1532 | Hard constraint satisfaction problems have hard gaps at location 1 | <|reference_start|>Hard constraint satisfaction problems have hard gaps at location 1: An instance of Max CSP is a finite collection of constraints on a set of variables, and the goal is to assign values to the variables that maximises the number of satisfied constraints. Max CSP captures many well-known problems (such as Max k-SAT and Max Cut) and is consequently NP-hard. Thus, it is natural to study how restrictions on the allowed constraint types (or constraint languages) affect the complexity and approximability of Max CSP. The PCP theorem is equivalent to the existence of a constraint language for which Max CSP has a hard gap at location 1, i.e. it is NP-hard to distinguish between satisfiable instances and instances where at most some constant fraction of the constraints are satisfiable. All constraint languages, for which the CSP problem (i.e., the problem of deciding whether all constraints can be satisfied) is currently known to be NP-hard, have a certain algebraic property. We prove that any constraint language with this algebraic property makes Max CSP have a hard gap at location 1 which, in particular, implies that such problems cannot have a PTAS unless P = NP. We then apply this result to Max CSP restricted to a single constraint type; this class of problems contains, for instance, Max Cut and Max DiCut. Assuming P $\neq$ NP, we show that such problems do not admit PTAS except in some trivial cases. Our results hold even if the number of occurrences of each variable is bounded by a constant. We use these results to partially answer open questions and strengthen results by Engebretsen et al. [Theor. Comput. Sci., 312 (2004), pp. 17--45], Feder et al. [Discrete Math., 307 (2007), pp. 386--392], Krokhin and Larose [Proc. Principles and Practice of Constraint Programming (2005), pp. 388--402], and Jonsson and Krokhin [J. Comput. System Sci., 73 (2007), pp. 691--702]<|reference_end|> | arxiv | @article{jonsson2007hard,
title={Hard constraint satisfaction problems have hard gaps at location 1},
author={Peter Jonsson, Andrei Krokhin, Fredrik Kuivinen},
journal={arXiv preprint arXiv:0712.1532},
year={2007},
archivePrefix={arXiv},
eprint={0712.1532},
primaryClass={cs.CC}
} | jonsson2007hard |
arxiv-2055 | 0712.1549 | Dynamic Multilevel Graph Visualization | <|reference_start|>Dynamic Multilevel Graph Visualization: We adapt multilevel, force-directed graph layout techniques to visualizing dynamic graphs in which vertices and edges are added and removed in an online fashion (i.e., unpredictably). We maintain multiple levels of coarseness using a dynamic, randomized coarsening algorithm. To ensure the vertices follow smooth trajectories, we employ dynamics simulation techniques, treating the vertices as point particles. We simulate fine and coarse levels of the graph simultaneously, coupling the dynamics of adjacent levels. Projection from coarser to finer levels is adaptive, with the projection determined by an affine transformation that evolves alongside the graph layouts. The result is a dynamic graph visualizer that quickly and smoothly adapts to changes in a graph.<|reference_end|> | arxiv | @article{veldhuizen2007dynamic,
title={Dynamic Multilevel Graph Visualization},
author={Todd L. Veldhuizen},
journal={arXiv preprint arXiv:0712.1549},
year={2007},
archivePrefix={arXiv},
eprint={0712.1549},
primaryClass={cs.GR cs.DM}
} | veldhuizen2007dynamic |
arxiv-2056 | 0712.1609 | Distributed Consensus Algorithms in Sensor Networks: Quantized Data and Random Link Failures | <|reference_start|>Distributed Consensus Algorithms in Sensor Networks: Quantized Data and Random Link Failures: The paper studies the problem of distributed average consensus in sensor networks with quantized data and random link failures. To achieve consensus, dither (small noise) is added to the sensor states before quantization. When the quantizer range is unbounded (countable number of quantizer levels), stochastic approximation shows that consensus is asymptotically achieved with probability one and in mean square to a finite random variable. We show that the meansquared error (m.s.e.) can be made arbitrarily small by tuning the link weight sequence, at a cost of the convergence rate of the algorithm. To study dithered consensus with random links when the range of the quantizer is bounded, we establish uniform boundedness of the sample paths of the unbounded quantizer. This requires characterization of the statistical properties of the supremum taken over the sample paths of the state of the quantizer. This is accomplished by splitting the state vector of the quantizer in two components: one along the consensus subspace and the other along the subspace orthogonal to the consensus subspace. The proofs use maximal inequalities for submartingale and supermartingale sequences. From these, we derive probability bounds on the excursions of the two subsequences, from which probability bounds on the excursions of the quantizer state vector follow. The paper shows how to use these probability bounds to design the quantizer parameters and to explore tradeoffs among the number of quantizer levels, the size of the quantization steps, the desired probability of saturation, and the desired level of accuracy $\epsilon$ away from consensus. Finally, the paper illustrates the quantizer design with a numerical study.<|reference_end|> | arxiv | @article{kar2007distributed,
title={Distributed Consensus Algorithms in Sensor Networks: Quantized Data and
Random Link Failures},
author={Soummya Kar and Jose M.F.Moura},
journal={arXiv preprint arXiv:0712.1609},
year={2007},
archivePrefix={arXiv},
eprint={0712.1609},
primaryClass={cs.MA cs.IT math.IT}
} | kar2007distributed |
arxiv-2057 | 0712.1655 | Virtual Laboratories and Virtual Worlds | <|reference_start|>Virtual Laboratories and Virtual Worlds: Since we cannot put stars in a laboratory, astrophysicists had to wait till the invention of computers before becoming laboratory scientists. For half a century now, we have been conducting experiments in our virtual laboratories. However, we ourselves have remained behind the keyboard, with the screen of the monitor separating us from the world we are simulating. Recently, 3D on-line technology, developed first for games but now deployed in virtual worlds like Second Life, is beginning to make it possible for astrophysicists to enter their virtual labs themselves, in virtual form as avatars. This has several advantages, from new possibilities to explore the results of the simulations to a shared presence in a virtual lab with remote collaborators on different continents. I will report my experiences with the use of Qwaq Forums, a virtual world developed by a new company (see http://www.qwaq.com)<|reference_end|> | arxiv | @article{hut2007virtual,
title={Virtual Laboratories and Virtual Worlds},
author={Piet Hut (IAS, Princeton)},
journal={arXiv preprint arXiv:0712.1655},
year={2007},
doi={10.1017/S1743921308016153},
archivePrefix={arXiv},
eprint={0712.1655},
primaryClass={astro-ph cs.HC physics.comp-ph}
} | hut2007virtual |
arxiv-2058 | 0712.1658 | Program algebra with a jump-shift instruction | <|reference_start|>Program algebra with a jump-shift instruction: We study sequential programs that are instruction sequences with jump-shift instructions in the setting of PGA (ProGram Algebra). Jump-shift instructions preceding a jump instruction increase the position to jump to. The jump-shift instruction is not found in programming practice. Its merit is that the expressive power of PGA extended with the jump-shift instruction, is not reduced if the reach of jump instructions is bounded. This is used to show that there exists a finite-state execution mechanism that by making use of a counter can produce each finite-state thread from some program that is a finite or periodic infinite sequence of instructions from a finite set.<|reference_end|> | arxiv | @article{bergstra2007program,
title={Program algebra with a jump-shift instruction},
author={J. A. Bergstra, C. A. Middelburg},
journal={Journal of Applied Logic, 6(4):553--563, 2008},
year={2007},
doi={10.1016/j.jal.2008.07.001},
number={PRG0711},
archivePrefix={arXiv},
eprint={0712.1658},
primaryClass={cs.PL}
} | bergstra2007program |
arxiv-2059 | 0712.1659 | Non-linear and Linear Broadcasting with QoS Requirements: Tractable Approaches for Bounded Channel Uncertainties | <|reference_start|>Non-linear and Linear Broadcasting with QoS Requirements: Tractable Approaches for Bounded Channel Uncertainties: We consider the downlink of a cellular system in which the base station employs multiple transmit antennas, each receiver has a single antenna, and the users specify. We consider communication schemes in which the users have certain Quality of Service (QoS) requirements. We study the design of robust broadcasting schemes that minimize the transmission power necessary to guarantee that the QoS requirements are satisfied for all channels within bounded uncertainty regions around the transmitter's estimate of each user's channel. Each user's QoS requirement is formulated as a constraint on the mean square error (MSE) in its received signal, and we show that these MSE constraints imply constraints on the received SINR. Using the MSE constraints, we present a unified design approach for robust linear and non-linear transceivers with QoS requirements. The proposed designs overcome the limitations of existing approaches that provide conservative designs or are only applicable to the case of linear precoding. Furthermore, we provide computationally-efficient design formulations for a rather general model of channel uncertainty that subsumes many natural choices for the uncertainty region. We also consider the problem of the robust counterpart to precoding schemes that maximize the fidelity of the weakest user's signal subject to a power constraint. For this problem, we provide quasi-convex formulations, for both linear and non-linear transceivers, that can be efficiently solved using a one-dimensional bisection search. Our numerical results demonstrate that in the presence of CSI uncertainty, the proposed designs provide guarantees for a larger range of QoS requirements than the existing approaches, and consume require less transmission power in providing these guarantees.<|reference_end|> | arxiv | @article{shenouda2007non-linear,
title={Non-linear and Linear Broadcasting with QoS Requirements: Tractable
Approaches for Bounded Channel Uncertainties},
author={Michael Botros Shenouda and Timothy N. Davidson},
journal={arXiv preprint arXiv:0712.1659},
year={2007},
archivePrefix={arXiv},
eprint={0712.1659},
primaryClass={cs.IT math.IT}
} | shenouda2007non-linear |
arxiv-2060 | 0712.1662 | Link Scheduling in STDMA Wireless Networks: A Line Graph Approach | <|reference_start|>Link Scheduling in STDMA Wireless Networks: A Line Graph Approach: We consider point to point link scheduling in Spatial Time Division Multiple Access (STDMA) wireless networks under the physical interference model. We propose a novel link scheduling algorithm based on a line graph representation of the network, by embedding the interferences between pairs of nodes into the edge weights of the line graph. Our algorithm achieves lower schedule length and lower run time complexity than existing algorithms.<|reference_end|> | arxiv | @article{kumar2007link,
title={Link Scheduling in STDMA Wireless Networks: A Line Graph Approach},
author={N. Praneeth Kumar, Ashutosh Deepak Gore and Abhay Karandikar},
journal={arXiv preprint arXiv:0712.1662},
year={2007},
archivePrefix={arXiv},
eprint={0712.1662},
primaryClass={cs.NI}
} | kumar2007link |
arxiv-2061 | 0712.1759 | A Web-based System for Observing and Analyzing Computer Mediated Communications | <|reference_start|>A Web-based System for Observing and Analyzing Computer Mediated Communications: Tracking data of user's activities resulting from Computer Mediated Communication (CMC) tools (forum, chat, etc.) is often carried out in an ad-hoc manner, which either confines the reusability of data in different purposes or makes data exploitation difficult. Our research works are biased toward methodological challenges involved in designing and developing a generic system for tracking user's activities while interacting with asynchronous communication tools like discussion forums. We present in this paper, an approach for building a Web-based system for observing and analyzing user activity on any type of discussion forums.<|reference_end|> | arxiv | @article{may2007a,
title={A Web-based System for Observing and Analyzing Computer Mediated
Communications},
author={Madeth May (LIESP), S'ebastien George (LIESP), Patrick Pr'ev^ot
(LIESP)},
journal={Dans Proceedings of the IEEE/WIC/ACM International Conference on
Web Intelligence (WI 2006) - IEEE/WIC/ACM International Conference on Web
Intelligence (WI 2006, Hong Kong : Chine (2006)},
year={2007},
archivePrefix={arXiv},
eprint={0712.1759},
primaryClass={cs.HC}
} | may2007a |
arxiv-2062 | 0712.1765 | Solving Simple Stochastic Games with Few Random Vertices | <|reference_start|>Solving Simple Stochastic Games with Few Random Vertices: Simple stochastic games are two-player zero-sum stochastic games with turn-based moves, perfect information, and reachability winning conditions. We present two new algorithms computing the values of simple stochastic games. Both of them rely on the existence of optimal permutation strategies, a class of positional strategies derived from permutations of the random vertices. The "permutation-enumeration" algorithm performs an exhaustive search among these strategies, while the "permutation-improvement" algorithm is based on successive improvements, \`a la Hoffman-Karp. Our algorithms improve previously known algorithms in several aspects. First they run in polynomial time when the number of random vertices is fixed, so the problem of solving simple stochastic games is fixed-parameter tractable when the parameter is the number of random vertices. Furthermore, our algorithms do not require the input game to be transformed into a stopping game. Finally, the permutation-enumeration algorithm does not use linear programming, while the permutation-improvement algorithm may run in polynomial time.<|reference_end|> | arxiv | @article{gimbert2007solving,
title={Solving Simple Stochastic Games with Few Random Vertices},
author={Hugo Gimbert (LaBRI), Florian Horn (LIAFA, Cwi)},
journal={Logical Methods in Computer Science, Volume 5, Issue 2 (May 25,
2009) lmcs:1119},
year={2007},
doi={10.2168/LMCS-5(2:9)2009},
archivePrefix={arXiv},
eprint={0712.1765},
primaryClass={cs.GT}
} | gimbert2007solving |
arxiv-2063 | 0712.1768 | Conceptions et usages des plates-formes de formation, Revue Sciences et Technologies de l'Information et de la Communication pour l'\'Education et la Formation | <|reference_start|>Conceptions et usages des plates-formes de formation, Revue Sciences et Technologies de l'Information et de la Communication pour l'\'Education et la Formation: Educative platforms are at the heart of the development of online education. They can not only be reduced to technological aspects. Underlying models impact teaching and learning from the preparing of lessons to the learning sessions. Research related to these platforms are numerous and their stakes are important. For these reasons, we launched a call to a special issue on "Designs and uses of educative platforms" An educative platform is a computer system designed to automate various functions relating to the organization of the course, to the management of their content, to the monitoring of learners and supervision of persons in charge of various formations (Office de la langue fran\c{c}aise, 2005). So educative platforms are Learning Management Systems (LMS) which are specific to education contexts.<|reference_end|> | arxiv | @article{george2007conceptions,
title={Conceptions et usages des plates-formes de formation, Revue Sciences et
Technologies de l'Information et de la Communication pour l'\'Education et la
Formation},
author={S'ebastien George (LIESP), Alain Derycke (TRIGONE)},
journal={Sciences et Technologies de l'Information et de la Communication
pour l'Education et la Formation 12 (2006) 51-64},
year={2007},
archivePrefix={arXiv},
eprint={0712.1768},
primaryClass={cs.HC}
} | george2007conceptions |
arxiv-2064 | 0712.1775 | On Computation of Error Locations and Values in Hermitian Codes | <|reference_start|>On Computation of Error Locations and Values in Hermitian Codes: We obtain a technique to reduce the computational complexity associated with decoding of Hermitian codes. In particular, we propose a method to compute the error locations and values using an uni-variate error locator and an uni-variate error evaluator polynomial. To achieve this, we introduce the notion of Semi-Erasure Decoding of Hermitian codes and prove that decoding of Hermitian codes can always be performed using semi-erasure decoding. The central results are: * Searching for error locations require evaluating an univariate error locator polynomial over $q^2$ points as in Chien search for Reed-Solomon codes. * Forney's formula for error value computation in Reed-Solomon codes can directly be applied to compute the error values in Hermitian codes. The approach develops from the idea that transmitting a modified form of the information may be more efficient that the information itself.<|reference_end|> | arxiv | @article{agarwal2007on,
title={On Computation of Error Locations and Values in Hermitian Codes},
author={Rachit Agarwal},
journal={arXiv preprint arXiv:0712.1775},
year={2007},
archivePrefix={arXiv},
eprint={0712.1775},
primaryClass={cs.IT math.IT}
} | agarwal2007on |
arxiv-2065 | 0712.1800 | Conception d'outils de communication sp\'ecifiques au contexte \'educatif | <|reference_start|>Conception d'outils de communication sp\'ecifiques au contexte \'educatif: In a distance learning context, providing usual communication tools (forum, chat, ...) is not always enough to create efficient interactions between learners and to favour collective knowledge building. A solution consists in setting-up collective activities which encourage learners to communicate. But, even in that case, tools can sometimes become a barrier to communication. We present in this paper examples of specific tools that are designed in order to favour and to guide communications in an educational context, but also to foster interactions during learning activities that are not inherently collaborative. We describe synchronous communication tools (semi-structured chat), asynchronous tools (temporally structured forum, contextual forum) and a system which promotes mutual aid between learners.<|reference_end|> | arxiv | @article{george2007conception,
title={Conception d'outils de communication sp\'ecifiques au contexte
\'educatif},
author={S'ebastien George (LIESP), C'ecile Bothorel (TECH/EASY)},
journal={Sciences et Technologies de l'Information et de la Communication
pour l'Education et la Formation 13 (2007) 317-344},
year={2007},
archivePrefix={arXiv},
eprint={0712.1800},
primaryClass={cs.HC}
} | george2007conception |
arxiv-2066 | 0712.1803 | Tournament MAC with Constant Size Congestion Window for WLAN | <|reference_start|>Tournament MAC with Constant Size Congestion Window for WLAN: In the context of radio distributed networks, we present a generalized approach for the Medium Access Control (MAC) with fixed congestion window. Our protocol is quite simple to analyze and can be used in a lot of different situations. We give mathematical evidence showing that our performance is tight, in the sense that no protocol with fixed congestion window can do better. We also place ourselves in the WiFi/WiMAX framework, and show experimental results enlightening collision reduction of 14% to 21% compared to the best known other methods. We show channel capacity improvement, and fairness considerations.<|reference_end|> | arxiv | @article{galtier2007tournament,
title={Tournament MAC with Constant Size Congestion Window for WLAN},
author={Jerome Galtier (INRIA Sophia Antipolis)},
journal={arXiv preprint arXiv:0712.1803},
year={2007},
archivePrefix={arXiv},
eprint={0712.1803},
primaryClass={cs.NI}
} | galtier2007tournament |
arxiv-2067 | 0712.1854 | Back-of-the-Envelope Computation of Throughput Distributions in CSMA Wireless Networks | <|reference_start|>Back-of-the-Envelope Computation of Throughput Distributions in CSMA Wireless Networks: This work started out with our accidental discovery of a pattern of throughput distributions among links in IEEE 802.11 networks from experimental results. This pattern gives rise to an easy computation method, which we term back-of-the-envelop (BoE) computation, because for many network configurations, very accurate results can be obtained within minutes, if not seconds, by simple hand computation. BoE beats prior methods in terms of both speed and accuracy. While the computation procedure of BoE is simple, explaining why it works is by no means trivial. Indeed the majority of our investigative efforts have been devoted to the construction of a theory to explain BoE. This paper models an ideal CSMA network as a set of interacting on-off telegraph processes. In developing the theory, we discovered a number of analytical techniques and observations that have eluded prior research, such as that the carrier-sensing interactions among links in an ideal CSMA network result in a system state evolution that is time-reversible; and that the probability distribution of the system state is insensitive to the distributions of the "on" and "off" durations given their means, and is a Markov random field. We believe these theoretical frameworks are useful not just for explaining BoE, but could also be a foundation for a fundamental understanding of how links in CSMA networks interact. Last but not least, because of their basic nature, we surmise that some of the techniques and results developed in this paper may be applicable to not just CSMA networks, but also to other physical and engineering systems consisting of entities interacting with each other in time and space.<|reference_end|> | arxiv | @article{liew2007back-of-the-envelope,
title={Back-of-the-Envelope Computation of Throughput Distributions in CSMA
Wireless Networks},
author={S.C. Liew, C. Kai, J. Leung, B. Wong},
journal={arXiv preprint arXiv:0712.1854},
year={2007},
archivePrefix={arXiv},
eprint={0712.1854},
primaryClass={cs.NI cs.PF}
} | liew2007back-of-the-envelope |
arxiv-2068 | 0712.1863 | Constructing Bio-molecular Databases on a DNA-based Computer | <|reference_start|>Constructing Bio-molecular Databases on a DNA-based Computer: Codd [Codd 1970] wrote the first paper in which the model of a relational database was proposed. Adleman [Adleman 1994] wrote the first paper in which DNA strands in a test tube were used to solve an instance of the Hamiltonian path problem. From [Adleman 1994], it is obviously indicated that for storing information in molecules of DNA allows for an information density of approximately 1 bit per cubic nm (nanometer) and a dramatic improvement over existing storage media such as video tape which store information at a density of approximately 1 bit per 1012 cubic nanometers. This paper demonstrates that biological operations can be applied to construct bio-molecular databases where data records in relational tables are encoded as DNA strands. In order to achieve the goal, DNA algorithms are proposed to perform eight operations of relational algebra (calculus) on bio-molecular relational databases, which include Cartesian product, union, set difference, selection, projection, intersection, join and division. Furthermore, this work presents clear evidence of the ability of molecular computing to perform data retrieval operations on bio-molecular relational databases.<|reference_end|> | arxiv | @article{chang2007constructing,
title={Constructing Bio-molecular Databases on a DNA-based Computer},
author={Weng-Long Chang, Michael (Shan-Hui) Ho, and Minyi Guo},
journal={arXiv preprint arXiv:0712.1863},
year={2007},
archivePrefix={arXiv},
eprint={0712.1863},
primaryClass={cs.NE cs.DB q-bio.OT}
} | chang2007constructing |
arxiv-2069 | 0712.1869 | Two-connected graphs with prescribed three-connected components | <|reference_start|>Two-connected graphs with prescribed three-connected components: We adapt the classical 3-decomposition of any 2-connected graph to the case of simple graphs (no loops or multiple edges). By analogy with the block-cutpoint tree of a connected graph, we deduce from this decomposition a bicolored tree tc(g) associated with any 2-connected graph g, whose white vertices are the 3-components of g (3-connected components or polygons) and whose black vertices are bonds linking together these 3-components, arising from separating pairs of vertices of g. Two fundamental relationships on graphs and networks follow from this construction. The first one is a dissymmetry theorem which leads to the expression of the class B=B(F) of 2-connected graphs, all of whose 3-connected components belong to a given class F of 3-connected graphs, in terms of various rootings of B. The second one is a functional equation which characterizes the corresponding class R=R(F) of two-pole networks all of whose 3-connected components are in F. All the rootings of B are then expressed in terms of F and R. There follow corresponding identities for all the associated series, in particular the edge index series. Numerous enumerative consequences are discussed.<|reference_end|> | arxiv | @article{gagarin2007two-connected,
title={Two-connected graphs with prescribed three-connected components},
author={Andrei Gagarin (1), Gilbert Labelle (2), Pierre Leroux (2) and Timothy
Walsh (2) ((1) Acadia Un. Wolfville N. S. Canada, (2) LaCIM UQAM Montreal Qc
Canada)},
journal={Adv. in Appl. Math. 43 (2009), no. 1, pp. 46-74},
year={2007},
doi={10.1016/j.aam.2009.01.002},
archivePrefix={arXiv},
eprint={0712.1869},
primaryClass={math.CO cs.DM}
} | gagarin2007two-connected |
arxiv-2070 | 0712.1875 | Critique du rapport signal \`a bruit en th\'eorie de l'information -- A critical appraisal of the signal to noise ratio in information theory | <|reference_start|>Critique du rapport signal \`a bruit en th\'eorie de l'information -- A critical appraisal of the signal to noise ratio in information theory: The signal to noise ratio, which plays such an important role in information theory, is shown to become pointless in digital communications where - symbols are modulating carriers, which are solutions of linear differential equations with polynomial coefficients, - demodulations is achieved thanks to new algebraic estimation techniques. Operational calculus, differential algebra and nonstandard analysis are the main mathematical tools.<|reference_end|> | arxiv | @article{fliess2007critique,
title={Critique du rapport signal \`a bruit en th\'eorie de l'information -- A
critical appraisal of the signal to noise ratio in information theory},
author={Michel Fliess (INRIA Futurs)},
journal={arXiv preprint arXiv:0712.1875},
year={2007},
archivePrefix={arXiv},
eprint={0712.1875},
primaryClass={cs.IT math.IT math.LO math.PR math.RA quant-ph}
} | fliess2007critique |
arxiv-2071 | 0712.1878 | Hierarchy construction schemes within the Scale set framework | <|reference_start|>Hierarchy construction schemes within the Scale set framework: Segmentation algorithms based on an energy minimisation framework often depend on a scale parameter which balances a fit to data and a regularising term. Irregular pyramids are defined as a stack of graphs successively reduced. Within this framework, the scale is often defined implicitly as the height in the pyramid. However, each level of an irregular pyramid can not usually be readily associated to the global optimum of an energy or a global criterion on the base level graph. This last drawback is addressed by the scale set framework designed by Guigues. The methods designed by this author allow to build a hierarchy and to design cuts within this hierarchy which globally minimise an energy. This paper studies the influence of the construction scheme of the initial hierarchy on the resulting optimal cuts. We propose one sequential and one parallel method with two variations within both. Our sequential methods provide partitions near the global optima while parallel methods require less execution times than the sequential method of Guigues even on sequential machines.<|reference_end|> | arxiv | @article{pruvot2007hierarchy,
title={Hierarchy construction schemes within the Scale set framework},
author={Jean Hugues Pruvot (GREYC), Luc Brun (GREYC)},
journal={Dans Graph-Based Representations in Pattern Recognition - Graph
based Representation 2007, Alicante : Espagne (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.1878},
primaryClass={cs.CV}
} | pruvot2007hierarchy |
arxiv-2072 | 0712.1916 | Ranking forestry journals using the h-index | <|reference_start|>Ranking forestry journals using the h-index: An expert ranking of forestry journals was compared with journal impact factors and h-indices computed from the ISI Web of Science and internet-based data. Citations reported by Google Scholar appear to offer the most efficient way to rank all journals objectively, in a manner consistent with other indicators. This h-index exhibited a high correlation with the journal impact factor (r=0.92), but is not confined to journals selected by any particular commercial provider. A ranking of 180 forestry journals is presented, on the basis of this index.<|reference_end|> | arxiv | @article{vanclay2007ranking,
title={Ranking forestry journals using the h-index},
author={Jerome K. Vanclay},
journal={Journal of Informetrics 2 (2008) 326-334},
year={2007},
doi={10.1016/j.joi.2008.07.002},
archivePrefix={arXiv},
eprint={0712.1916},
primaryClass={cs.DL}
} | vanclay2007ranking |
arxiv-2073 | 0712.1928 | Distribution of Edge Load in Scale-free Trees | <|reference_start|>Distribution of Edge Load in Scale-free Trees: Node betweenness has been studied recently by a number of authors, but until now less attention has been paid to edge betweenness. In this paper, we present an exact analytic study of edge betweenness in evolving scale-free and non-scale-free trees. We aim at the probability distribution of edge betweenness under the condition that a local property, the in-degree of the ``younger'' node of a randomly selected edge, is known. En route to the conditional distribution of edge betweenness the exact joint distribution of cluster size and in-degree, and its one dimensional marginal distributions have been presented in the paper as well. From the derived probability distributions the expectation values of different quantities have been calculated. Our results provide an exact solution not only for infinite, but for finite networks as well.<|reference_end|> | arxiv | @article{fekete2007distribution,
title={Distribution of Edge Load in Scale-free Trees},
author={Attila Fekete, G'abor Vattay, Ljupco Kocarev},
journal={Phys. Rev. E 73, 046102 (2006)},
year={2007},
doi={10.1103/PhysRevE.73.046102},
archivePrefix={arXiv},
eprint={0712.1928},
primaryClass={cs.NI cond-mat.other}
} | fekete2007distribution |
arxiv-2074 | 0712.1959 | Delaunay Edge Flips in Dense Surface Triangulations | <|reference_start|>Delaunay Edge Flips in Dense Surface Triangulations: Delaunay flip is an elegant, simple tool to convert a triangulation of a point set to its Delaunay triangulation. The technique has been researched extensively for full dimensional triangulations of point sets. However, an important case of triangulations which are not full dimensional is surface triangulations in three dimensions. In this paper we address the question of converting a surface triangulation to a subcomplex of the Delaunay triangulation with edge flips. We show that the surface triangulations which closely approximate a smooth surface with uniform density can be transformed to a Delaunay triangulation with a simple edge flip algorithm. The condition on uniformity becomes less stringent with increasing density of the triangulation. If the condition is dropped completely, the flip algorithm still terminates although the output surface triangulation becomes "almost Delaunay" instead of exactly Delaunay.<|reference_end|> | arxiv | @article{cheng2007delaunay,
title={Delaunay Edge Flips in Dense Surface Triangulations},
author={Siu-Wing Cheng and Tamal K. Dey},
journal={arXiv preprint arXiv:0712.1959},
year={2007},
archivePrefix={arXiv},
eprint={0712.1959},
primaryClass={cs.CG cs.DS}
} | cheng2007delaunay |
arxiv-2075 | 0712.1987 | A New Outer Bound and the Noisy-Interference Sum-Rate Capacity for Gaussian Interference Channels | <|reference_start|>A New Outer Bound and the Noisy-Interference Sum-Rate Capacity for Gaussian Interference Channels: A new outer bound on the capacity region of Gaussian interference channels is developed. The bound combines and improves existing genie-aided methods and is shown to give the sum-rate capacity for noisy interference as defined in this paper. Specifically, it is shown that if the channel coefficients and power constraints satisfy a simple condition then single-user detection at each receiver is sum-rate optimal, i.e., treating the interference as noise incurs no loss in performance. This is the first concrete (finite signal-to-noise ratio) capacity result for the Gaussian interference channel with weak to moderate interference. Furthermore, for certain mixed (weak and strong) interference scenarios, the new outer bounds give a corner point of the capacity region.<|reference_end|> | arxiv | @article{shang2007a,
title={A New Outer Bound and the Noisy-Interference Sum-Rate Capacity for
Gaussian Interference Channels},
author={Xiaohu Shang, Gerhard Kramer, and Biao Chen},
journal={arXiv preprint arXiv:0712.1987},
year={2007},
doi={10.1109/TIT.2008.2009793},
archivePrefix={arXiv},
eprint={0712.1987},
primaryClass={cs.IT math.IT}
} | shang2007a |
arxiv-2076 | 0712.1994 | Knowledge Engineering Technique for Cluster Development | <|reference_start|>Knowledge Engineering Technique for Cluster Development: After the concept of industry cluster was tangibly applied in many countries, SMEs trended to link to each other to maintain their competitiveness in the market. The major key success factors of the cluster are knowledge sharing and collaboration between partners. This knowledge is collected in form of tacit and explicit knowledge from experts and institutions within the cluster. The objective of this study is about enhancing the industry cluster with knowledge management by using knowledge engineering which is one of the most important method for managing knowledge. This work analyzed three well known knowledge engineering methods, i.e. MOKA, SPEDE and CommonKADS, and compares the capability to be implemented in the cluster context. Then, we selected one method and proposed the adapted methodology. At the end of this paper, we validated and demonstrated the proposed methodology with some primary result by using case study of handicraft cluster in Thailand.<|reference_end|> | arxiv | @article{sureephong2007knowledge,
title={Knowledge Engineering Technique for Cluster Development},
author={Pradorn Sureephong (LIESP, CAMT), Nopasit Chakpitak (CAMT), Yacine
Ouzrout (LIESP), Gilles Neubert (LIESP), Abdelaziz Bouras (LIESP)},
journal={Dans Proceeding of Knowledge Science, Engineering and Management
(KSEM 07) - Knowledge Science, Engineering and Management (KSEM 07),
Melbourne : Australie (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.1994},
primaryClass={cs.OH}
} | sureephong2007knowledge |
arxiv-2077 | 0712.1996 | A case study of the difficulty of quantifier elimination in constraint databases: the alibi query in moving object databases | <|reference_start|>A case study of the difficulty of quantifier elimination in constraint databases: the alibi query in moving object databases: In the constraint database model, spatial and spatio-temporal data are stored by boolean combinations of polynomial equalities and inequalities over the real numbers. The relational calculus augmented with polynomial constraints is the standard first-order query language for constraint databases. Although the expressive power of this query language has been studied extensively, the difficulty of the efficient evaluation of queries, usually involving some form of quantifier elimination, has received considerably less attention. The inefficiency of existing quantifier-elimination software and the intrinsic difficulty of quantifier elimination have proven to be a bottle-neck for for real-world implementations of constraint database systems. In this paper, we focus on a particular query, called the \emph{alibi query}, that asks whether two moving objects whose positions are known at certain moments in time, could have possibly met, given certain speed constraints. This query can be seen as a constraint database query and its evaluation relies on the elimination of a block of three existential quantifiers. Implementations of general purpose elimination algorithms are in the specific case, for practical purposes, too slow in answering the alibi query and fail completely in the parametric case. The main contribution of this paper is an analytical solution to the parametric alibi query, which can be used to answer this query in the specific case in constant time. We also give an analytic solution to the alibi query at a fixed moment in time. The solutions we propose are based on geometric argumentation and they illustrate the fact that some practical problems require creative solutions, where at least in theory, existing systems could provide a solution.<|reference_end|> | arxiv | @article{kuijpers2007a,
title={A case study of the difficulty of quantifier elimination in constraint
databases: the alibi query in moving object databases},
author={Bart Kuijpers, Walied Othman, Rafael Grimson},
journal={arXiv preprint arXiv:0712.1996},
year={2007},
archivePrefix={arXiv},
eprint={0712.1996},
primaryClass={cs.LO cs.CC cs.DB}
} | kuijpers2007a |
arxiv-2078 | 0712.2054 | Distributed Fair Scheduling Using Variable Transmission Lengths in Carrier-Sensing-based Wireless Networks | <|reference_start|>Distributed Fair Scheduling Using Variable Transmission Lengths in Carrier-Sensing-based Wireless Networks: The fairness of IEEE 802.11 wireless networks (including Wireless LAN and Ad-hoc networks) is hard to predict and control because of the randomness and complexity of the MAC contentions and dynamics. Moreover, asymmetric channel conditions such as those caused by capture and channel errors often lead to severe unfairness among stations. In this paper we propose a novel distributed scheduling algorithm that we call VLS, for ``{\em variable-length scheduling}'', that provides weighted fairness to all stations despite the imperfections of the MAC layer and physical channels. Distinct features of VLS include the use of variable transmission lengths based on distributed observations, compatibility with 802.11's contention window algorithm, opportunistic scheduling to achieve high throughput in time-varying wireless environments, and flexibility and ease of implementation. Also, VLS makes the throughput of each station more smooth, which is appealing to real-time applications such as video and voice. Although the paper mostly assumes 802.11 protocol, the idea generally applies to wireless networks based on CSMA (Carrier Sensing Multiple Access).<|reference_end|> | arxiv | @article{jiang2007distributed,
title={Distributed Fair Scheduling Using Variable Transmission Lengths in
Carrier-Sensing-based Wireless Networks},
author={Libin Jiang, Jean Walrand},
journal={arXiv preprint arXiv:0712.2054},
year={2007},
archivePrefix={arXiv},
eprint={0712.2054},
primaryClass={cs.NI}
} | jiang2007distributed |
arxiv-2079 | 0712.2063 | An axiomatic approach to intrinsic dimension of a dataset | <|reference_start|>An axiomatic approach to intrinsic dimension of a dataset: We perform a deeper analysis of an axiomatic approach to the concept of intrinsic dimension of a dataset proposed by us in the IJCNN'07 paper (arXiv:cs/0703125). The main features of our approach are that a high intrinsic dimension of a dataset reflects the presence of the curse of dimensionality (in a certain mathematically precise sense), and that dimension of a discrete i.i.d. sample of a low-dimensional manifold is, with high probability, close to that of the manifold. At the same time, the intrinsic dimension of a sample is easily corrupted by moderate high-dimensional noise (of the same amplitude as the size of the manifold) and suffers from prohibitevely high computational complexity (computing it is an $NP$-complete problem). We outline a possible way to overcome these difficulties.<|reference_end|> | arxiv | @article{pestov2007an,
title={An axiomatic approach to intrinsic dimension of a dataset},
author={Vladimir Pestov},
journal={Neural Networks 21, 2-3 (2008), 204-213.},
year={2007},
archivePrefix={arXiv},
eprint={0712.2063},
primaryClass={cs.IR}
} | pestov2007an |
arxiv-2080 | 0712.2083 | VoIP over Multiple IEEE 80211 Wireless LANs | <|reference_start|>VoIP over Multiple IEEE 80211 Wireless LANs: Prior work indicates that 802.11 is extremely inefficient for VoIP transport. Only 12 and 60 VoIP sessions can be supported in an 802.11b and an 802.11g WLAN, respectively. This paper shows that the bad news does not stop there. When there are multiple WLANs in the vicinity of each other, the already-low VoIP capacity can be further eroded in a significant manner. For example, in a 5-by-5, 25-cell multi-WLAN network, the VoIP capacities for 802.11b and 802.11g are only 1.63 and 10.34 sessions per AP, respectively. This paper investigates several solutions to improve the VoIP capacity. Based on a conflict graph model, we propose a clique-analytical call-admission scheme, which increases the VoIP capacity by 52% and 37% in 802.11b and 802.11g respectively. If all the three orthogonal frequency channels available in 11b and 11g are used, the capacity can be nearly tripled by the call-admission scheme. This paper also proposes for the first time the use of coarse-grained time-division multiple access (CoTDMA) in conjunction with the basic 802.11 CSMA to eliminate the performance-degrading exposed-node and hidden-node problems. We find that CoTDMA can further increase the VoIP capacity in the multi-WLAN scenario by an additional 35%.<|reference_end|> | arxiv | @article{chan2007voip,
title={VoIP over Multiple IEEE 802.11 Wireless LANs},
author={A. Chan, S. C. Liew},
journal={arXiv preprint arXiv:0712.2083},
year={2007},
archivePrefix={arXiv},
eprint={0712.2083},
primaryClass={cs.NI}
} | chan2007voip |
arxiv-2081 | 0712.2094 | Hinged Dissections Exist | <|reference_start|>Hinged Dissections Exist: We prove that any finite collection of polygons of equal area has a common hinged dissection. That is, for any such collection of polygons there exists a chain of polygons hinged at vertices that can be folded in the plane continuously without self-intersection to form any polygon in the collection. This result settles the open problem about the existence of hinged dissections between pairs of polygons that goes back implicitly to 1864 and has been studied extensively in the past ten years. Our result generalizes and indeed builds upon the result from 1814 that polygons have common dissections (without hinges). We also extend our common dissection result to edge-hinged dissections of solid 3D polyhedra that have a common (unhinged) dissection, as determined by Dehn's 1900 solution to Hilbert's Third Problem. Our proofs are constructive, giving explicit algorithms in all cases. For a constant number of planar polygons, both the number of pieces and running time required by our construction are pseudopolynomial. This bound is the best possible, even for unhinged dissections. Hinged dissections have possible applications to reconfigurable robotics, programmable matter, and nanomanufacturing.<|reference_end|> | arxiv | @article{abbott2007hinged,
title={Hinged Dissections Exist},
author={Timothy G. Abbott, Zachary Abel, David Charlton, Erik D. Demaine,
Martin L. Demaine, Scott D. Kominers},
journal={Proceedings of the Twenty-fourth Annual Symposium on Computational
Geometry (2008): 110-119.},
year={2007},
doi={10.1145/1377676.1377695},
archivePrefix={arXiv},
eprint={0712.2094},
primaryClass={cs.CG}
} | abbott2007hinged |
arxiv-2082 | 0712.2099 | MRI/TRUS data fusion for brachytherapy | <|reference_start|>MRI/TRUS data fusion for brachytherapy: BACKGROUND: Prostate brachytherapy consists in placing radioactive seeds for tumour destruction under transrectal ultrasound imaging (TRUS) control. It requires prostate delineation from the images for dose planning. Because ultrasound imaging is patient- and operator-dependent, we have proposed to fuse MRI data to TRUS data to make image processing more reliable. The technical accuracy of this approach has already been evaluated. METHODS: We present work in progress concerning the evaluation of the approach from the dosimetry viewpoint. The objective is to determine what impact this system may have on the treatment of the patient. Dose planning is performed from initial TRUS prostate contours and evaluated on contours modified by data fusion. RESULTS: For the eight patients included, we demonstrate that TRUS prostate volume is most often underestimated and that dose is overestimated in a correlated way. However, dose constraints are still verified for those eight patients. CONCLUSIONS: This confirms our initial hypothesis.<|reference_end|> | arxiv | @article{daanen2007mri/trus,
title={MRI/TRUS data fusion for brachytherapy},
author={V. Daanen (TIMC), J. Gastaldo, J. Y. Giraud, P. Fourneret, J. L.
Descotes, M. Bolla, D. Collomb, Jocelyne Troccaz (TIMC)},
journal={International Journal of Medical Robotics and Computer Assisted
Surgery 2, 3 (2006) 256-61},
year={2007},
doi={10.1002/rcs.95},
archivePrefix={arXiv},
eprint={0712.2099},
primaryClass={cs.OH}
} | daanen2007mri/trus |
arxiv-2083 | 0712.2100 | Medical image computing and computer-aided medical interventions applied to soft tissues Work in progress in urology | <|reference_start|>Medical image computing and computer-aided medical interventions applied to soft tissues Work in progress in urology: Until recently, Computer-Aided Medical Interventions (CAMI) and Medical Robotics have focused on rigid and non deformable anatomical structures. Nowadays, special attention is paid to soft tissues, raising complex issues due to their mobility and deformation. Mini-invasive digestive surgery was probably one of the first fields where soft tissues were handled through the development of simulators, tracking of anatomical structures and specific assistance robots. However, other clinical domains, for instance urology, are concerned. Indeed, laparoscopic surgery, new tumour destruction techniques (e.g. HIFU, radiofrequency, or cryoablation), increasingly early detection of cancer, and use of interventional and diagnostic imaging modalities, recently opened new challenges to the urologist and scientists involved in CAMI. This resulted in the last five years in a very significant increase of research and developments of computer-aided urology systems. In this paper, we propose a description of the main problems related to computer-aided diagnostic and therapy of soft tissues and give a survey of the different types of assistance offered to the urologist: robotization, image fusion, surgical navigation. Both research projects and operational industrial systems are discussed.<|reference_end|> | arxiv | @article{troccaz2007medical,
title={Medical image computing and computer-aided medical interventions applied
to soft tissues. Work in progress in urology},
author={Jocelyne Troccaz (TIMC), Michael Baumann (TIMC), Peter Berkelman
(TIMC), Philippe Cinquin (TIMC), Vincent Daanen (TIMC), Antoine Leroy (TIMC),
Maud Marchal (TIMC), Yohan Payan (TIMC), Emmanuel Promayon (TIMC), Sandrine
Voros (TIMC), St'ephane Bart (TIMC), Michel Bolla, Emmanuel
Chartier-Kastler, Jean-Luc Descotes, Andr'ee Dusserre, Jean-Yves Giraud,
Jean-Alexandre Long (TIMC), Ronan Moalic, Pierre Mozer (TIMC)},
journal={Proceedings of the IEEE 94, 9 (2006) 1665-1677},
year={2007},
archivePrefix={arXiv},
eprint={0712.2100},
primaryClass={cs.OH cs.RO}
} | troccaz2007medical |
arxiv-2084 | 0712.2113 | On the deployment of Mobile Trusted Modules | <|reference_start|>On the deployment of Mobile Trusted Modules: In its recently published TCG Mobile Reference Architecture, the TCG Mobile Phone Work Group specifies a new concept to enable trust into future mobile devices. For this purpose, the TCG devises a trusted mobile platform as a set of trusted engines on behalf of different stakeholders supported by a physical trust-anchor. In this paper, we present our perception on this emerging specification. We propose an approach for the practical design and implementation of this concept and how to deploy it to a trustworthy operating platform. In particular we propose a method for the take-ownership of a device by the user and the migration (i.e., portability) of user credentials between devices.<|reference_end|> | arxiv | @article{schmidt2007on,
title={On the deployment of Mobile Trusted Modules},
author={Andreas U. Schmidt, Nicolai Kuntze and Michael Kasper},
journal={arXiv preprint arXiv:0712.2113},
year={2007},
doi={10.1109/WCNC.2008.553},
archivePrefix={arXiv},
eprint={0712.2113},
primaryClass={cs.CR}
} | schmidt2007on |
arxiv-2085 | 0712.2141 | Numerical Sensitivity and Efficiency in the Treatment of Epistemic and Aleatory Uncertainty | <|reference_start|>Numerical Sensitivity and Efficiency in the Treatment of Epistemic and Aleatory Uncertainty: The treatment of both aleatory and epistemic uncertainty by recent methods often requires an high computational effort. In this abstract, we propose a numerical sampling method allowing to lighten the computational burden of treating the information by means of so-called fuzzy random variables.<|reference_end|> | arxiv | @article{chojnacki2007numerical,
title={Numerical Sensitivity and Efficiency in the Treatment of Epistemic and
Aleatory Uncertainty},
author={Eric Chojnacki (IRSN), Jean Baccou (IRSN), S'ebastien Destercke
(IRSN, IRIT)},
journal={Fifth International Conference on Sensitivity Analysis of Model
Output, Budapest : Hongrie (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0712.2141},
primaryClass={cs.AI math.PR}
} | chojnacki2007numerical |
arxiv-2086 | 0712.2168 | Study of conditions of use of E-services accessible to visually disabled persons | <|reference_start|>Study of conditions of use of E-services accessible to visually disabled persons: The aim of this paper is to determine the expectations that French-speaking disabled persons have for electronic administrative sites (utility). At the same time, it is a matter of identifying the difficulties of use that the manipulation of these E-services poses concretely for blind people (usability) and of evaluating the psychosocial impacts on the way of life of these people with specific needs. We show that the lack of numerical accessibility is likely to accentuate the social exclusion of which these people are victim by establishing a numerical glass ceiling.<|reference_end|> | arxiv | @article{bobiller-chaumon2007study,
title={Study of conditions of use of E-services accessible to visually disabled
persons},
author={Marc-Eric Bobiller-Chaumon (GRePS), Michel Dubois (LIP - PC2S),
Franc{c}oise Sandoz-Guermond (LIESP)},
journal={Dans CEUR Workshop Proceedings - DEGAS'07 : Workshop of Design &
Evaluation of e-Government Applications and services, Rio de Janeiro :
Br\'esil (2006)},
year={2007},
archivePrefix={arXiv},
eprint={0712.2168},
primaryClass={cs.HC}
} | bobiller-chaumon2007study |
arxiv-2087 | 0712.2182 | Optimal codes for correcting a single (wrap-around) burst of errors | <|reference_start|>Optimal codes for correcting a single (wrap-around) burst of errors: In 2007, Martinian and Trott presented codes for correcting a burst of erasures with a minimum decoding delay. Their construction employs [n,k] codes that can correct any burst of erasures (including wrap-around bursts) of length n-k. The raised the question if such [n,k] codes exist for all integers k and n with 1<= k <= n and all fields (in particular, for the binary field). In this note, we answer this question affirmatively by giving two recursive constructions and a direct one.<|reference_end|> | arxiv | @article{hollmann2007optimal,
title={Optimal codes for correcting a single (wrap-around) burst of errors},
author={Henk D.L. Hollmann and Ludo M.G.M. Tolhuizen},
journal={arXiv preprint arXiv:0712.2182},
year={2007},
archivePrefix={arXiv},
eprint={0712.2182},
primaryClass={cs.IT math.IT}
} | hollmann2007optimal |
arxiv-2088 | 0712.2183 | Apports des d\'emarches d'inspection et des tests d'usage dans l'\'evaluation de l'accessibilit\'e de E-services | <|reference_start|>Apports des d\'emarches d'inspection et des tests d'usage dans l'\'evaluation de l'accessibilit\'e de E-services: This article proposes to describe and compare the contributions of various techniques of evaluation of the accessibility of E-services carried out starting from (i) methods of inspection (on the basis of traditional ergonomic criteria and accessibility) and (ii) of tests of use. It show that these are the latter which show the best rate of identification of the problems of uses for the poeple with disabilities<|reference_end|> | arxiv | @article{bobiller-chaumon2007apports,
title={Apports des d\'emarches d'inspection et des tests d'usage dans
l'\'evaluation de l'accessibilit\'e de E-services},
author={Marc-Eric Bobiller-Chaumon (GRePS), Franc{c}oise Sandoz-Guermond
(LIESP)},
journal={Dans Actes du congr\`es ERGO IA'2006 - ERGO'IA : L'humain comme
facteur de performance des syst\`emes complexes, Biarritz : France (2006)},
year={2007},
archivePrefix={arXiv},
eprint={0712.2183},
primaryClass={cs.HC}
} | bobiller-chaumon2007apports |
arxiv-2089 | 0712.2223 | Entanglement-Assisted Quantum Convolutional Coding | <|reference_start|>Entanglement-Assisted Quantum Convolutional Coding: We show how to protect a stream of quantum information from decoherence induced by a noisy quantum communication channel. We exploit preshared entanglement and a convolutional coding structure to develop a theory of entanglement-assisted quantum convolutional coding. Our construction produces a Calderbank-Shor-Steane (CSS) entanglement-assisted quantum convolutional code from two arbitrary classical binary convolutional codes. The rate and error-correcting properties of the classical convolutional codes directly determine the corresponding properties of the resulting entanglement-assisted quantum convolutional code. We explain how to encode our CSS entanglement-assisted quantum convolutional codes starting from a stream of information qubits, ancilla qubits, and shared entangled bits.<|reference_end|> | arxiv | @article{wilde2007entanglement-assisted,
title={Entanglement-Assisted Quantum Convolutional Coding},
author={Mark M. Wilde and Todd A. Brun},
journal={Physical Review A 81, 042333 (2010)},
year={2007},
doi={10.1103/PhysRevA.81.042333},
number={CSI-07-12-01},
archivePrefix={arXiv},
eprint={0712.2223},
primaryClass={quant-ph cs.IT math.IT}
} | wilde2007entanglement-assisted |
arxiv-2090 | 0712.2231 | Trust for Location-based Authorisation | <|reference_start|>Trust for Location-based Authorisation: We propose a concept for authorisation using the location of a mobile device and the enforcement of location-based policies. Mobile devices enhanced by Trusted Computing capabilities operate an autonomous and secure location trigger and policy enforcement entity. Location determination is two-tiered, integrating cell-based triggering at handover with precision location measurement by the device.<|reference_end|> | arxiv | @article{schmidt2007trust,
title={Trust for Location-based Authorisation},
author={Andreas U. Schmidt, Nicolai Kuntze and Joerg Abendroth},
journal={arXiv preprint arXiv:0712.2231},
year={2007},
doi={10.1109/WCNC.2008.552},
archivePrefix={arXiv},
eprint={0712.2231},
primaryClass={cs.CR}
} | schmidt2007trust |
arxiv-2091 | 0712.2235 | A Dynamic ID-based Remote User Authentication Scheme | <|reference_start|>A Dynamic ID-based Remote User Authentication Scheme: Password-based authentication schemes are the most widely used techniques for remote user authentication. Many static ID-based remote user authentication schemes both with and without smart cards have been proposed. Most of the schemes do not allow the users to choose and change their passwords, and maintain a verifier table to verify the validity of the user login. In this paper we present a dynamic ID-based remote user authentication scheme using smart cards. Our scheme allows the users to choose and change their passwords freely, and do not maintain any verifier table. The scheme is secure against ID-theft, and can resist the reply attacks, forgery attacks, guessing attacks, insider attacks and stolen verifier attacks.<|reference_end|> | arxiv | @article{das2007a,
title={A Dynamic ID-based Remote User Authentication Scheme},
author={Manik Lal Das, Ashutosh Saxena, and Ved P. Gulati},
journal={IEEE Transactions on Consumer Electronics, Vol. 50, No. 2, 2004,
pp. 629-631},
year={2007},
doi={10.1109/TCE.2004.1309441},
archivePrefix={arXiv},
eprint={0712.2235},
primaryClass={cs.CR}
} | das2007a |
arxiv-2092 | 0712.2245 | Exact and Approximate Expressions for the Probability of Undetected Error of Varshamov-Tenengol'ts Codes | <|reference_start|>Exact and Approximate Expressions for the Probability of Undetected Error of Varshamov-Tenengol'ts Codes: Computation of the undetected error probability for error correcting codes over the Z-channel is an important issue, explored only in part in previous literature. In this paper we consider the case of Varshamov-Tenengol'ts codes, by presenting some analytical, numerical, and heuristic methods for unveiling this additional feature. Possible comparisons with Hamming codes are also shown and discussed.<|reference_end|> | arxiv | @article{baldi2007exact,
title={Exact and Approximate Expressions for the Probability of Undetected
Error of Varshamov-Tenengol'ts Codes},
author={Marco Baldi, Franco Chiaraluce and Torleiv Kl{o}ve},
journal={IEEE Transactions on Information Theory, ISSN 0018-9448, Vol. 54,
No. 11, pp. 5019-5029, Nov. 2008},
year={2007},
doi={10.1109/TIT.2008.929912},
archivePrefix={arXiv},
eprint={0712.2245},
primaryClass={cs.IT math.IT}
} | baldi2007exact |
arxiv-2093 | 0712.2255 | Human-Machine Symbiosis, 50 Years On | <|reference_start|>Human-Machine Symbiosis, 50 Years On: Licklider advocated in 1960 the construction of computers capable of working symbiotically with humans to address problems not easily addressed by humans working alone. Since that time, many of the advances that he envisioned have been achieved, yet the time spent by human problem solvers in mundane activities remains large. I propose here four areas in which improved tools can further advance the goal of enhancing human intellect: services, provenance, knowledge communities, and automation of problem-solving protocols.<|reference_end|> | arxiv | @article{foster2007human-machine,
title={Human-Machine Symbiosis, 50 Years On},
author={Ian Foster},
journal={arXiv preprint arXiv:0712.2255},
year={2007},
archivePrefix={arXiv},
eprint={0712.2255},
primaryClass={cs.DC cs.CE cs.HC}
} | foster2007human-machine |
arxiv-2094 | 0712.2262 | The Earth System Grid: Supporting the Next Generation of Climate Modeling Research | <|reference_start|>The Earth System Grid: Supporting the Next Generation of Climate Modeling Research: Understanding the earth's climate system and how it might be changing is a preeminent scientific challenge. Global climate models are used to simulate past, present, and future climates, and experiments are executed continuously on an array of distributed supercomputers. The resulting data archive, spread over several sites, currently contains upwards of 100 TB of simulation data and is growing rapidly. Looking toward mid-decade and beyond, we must anticipate and prepare for distributed climate research data holdings of many petabytes. The Earth System Grid (ESG) is a collaborative interdisciplinary project aimed at addressing the challenge of enabling management, discovery, access, and analysis of these critically important datasets in a distributed and heterogeneous computational environment. The problem is fundamentally a Grid problem. Building upon the Globus toolkit and a variety of other technologies, ESG is developing an environment that addresses authentication, authorization for data access, large-scale data transport and management, services and abstractions for high-performance remote data access, mechanisms for scalable data replication, cataloging with rich semantic and syntactic information, data discovery, distributed monitoring, and Web-based portals for using the system.<|reference_end|> | arxiv | @article{bernholdt2007the,
title={The Earth System Grid: Supporting the Next Generation of Climate
Modeling Research},
author={David Bernholdt, Shishir Bharathi, David Brown, Kasidit Chanchio,
Meili Chen, Ann Chervenak, Luca Cinquini, Bob Drach, Ian Foster, Peter Fox,
Jose Garcia, Carl Kesselman, Rob Markel, Don Middleton, Veronika Nefedova,
Line Pouchard, Arie Shoshani, Alex Sim, Gary Strand, Dean Williams},
journal={arXiv preprint arXiv:0712.2262},
year={2007},
archivePrefix={arXiv},
eprint={0712.2262},
primaryClass={cs.CE cs.DC cs.NI}
} | bernholdt2007the |
arxiv-2095 | 0712.2274 | Distributed MAC Strategy for Exploiting Multi-user Diversity in Multi-rate IEEE 80211 Wireless LANs | <|reference_start|>Distributed MAC Strategy for Exploiting Multi-user Diversity in Multi-rate IEEE 80211 Wireless LANs: Fast rate adaptation has been established as an effective way to improve the PHY-layer raw date rate of wireless networks. However, within the current IEEE 802.11 legacy, MAC-layer throughput is dominated by users with the lowest data rates, resulting in underutilization of bandwidth. In this paper, we propose and analyze a novel distributed MAC strategy, referred to as Rate-aware DCF (R-DCF), to leverage the potential of rate adaptation in IEEE 802.11 WLANs. The key feature of R-DCF is that by introducing different mini slots according to the instantaneous channel conditions, only contending stations with the highest data rate can access the channel. In this way, the R-DCF protocol not only exploits multi-user diversity in a fully distributed manner but also reduces the loss of throughput due to collisions. Through analysis, we develop an analytical model to derive the throughput of R-DCF in general multi-rate WLANs. Using the analytical model we investigate the performance of R-DCF protocol in various network settings with different rate adaptation strategies and channel variations. Based on the analysis, we further derive the maximal throughput achievable by R-DCF. For practical implementation, an offline adaptive backoff method is developed to achieve a close-to-optimal performance at low runtime complexity. The superiority of R-DCF is proven via extensive analyses and simulations.<|reference_end|> | arxiv | @article{chen2007distributed,
title={Distributed MAC Strategy for Exploiting Multi-user Diversity in
Multi-rate IEEE 802.11 Wireless LANs},
author={Da Rui Chen and Ying Jun (Angela) Zhang},
journal={arXiv preprint arXiv:0712.2274},
year={2007},
archivePrefix={arXiv},
eprint={0712.2274},
primaryClass={cs.NI}
} | chen2007distributed |
arxiv-2096 | 0712.2302 | Data access optimizations for highly threaded multi-core CPUs with multiple memory controllers | <|reference_start|>Data access optimizations for highly threaded multi-core CPUs with multiple memory controllers: Processor and system architectures that feature multiple memory controllers are prone to show bottlenecks and erratic performance numbers on codes with regular access patterns. Although such effects are well known in the form of cache thrashing and aliasing conflicts, they become more severe when memory access is involved. Using the new Sun UltraSPARC T2 processor as a prototypical multi-core design, we analyze performance patterns in low-level and application benchmarks and show ways to circumvent bottlenecks by careful data layout and padding.<|reference_end|> | arxiv | @article{hager2007data,
title={Data access optimizations for highly threaded multi-core CPUs with
multiple memory controllers},
author={Georg Hager, Thomas Zeiser, Gerhard Wellein},
journal={arXiv preprint arXiv:0712.2302},
year={2007},
archivePrefix={arXiv},
eprint={0712.2302},
primaryClass={cs.DC cs.PF}
} | hager2007data |
arxiv-2097 | 0712.2371 | Maximum-rate, Minimum-Decoding-Complexity STBCs from Clifford Algebras | <|reference_start|>Maximum-rate, Minimum-Decoding-Complexity STBCs from Clifford Algebras: It is well known that Space-Time Block Codes (STBCs) from orthogonal designs (ODs) are single-symbol decodable/symbol-by-symbol decodable (SSD) and are obtainable from unitary matrix representations of Clifford algebras. However, SSD codes are obtainable from designs that are not orthogonal also. Recently, two such classes of SSD codes have been studied: (i) Coordinate Interleaved Orthogonal Designs (CIODs) and (ii) Minimum-Decoding-Complexity (MDC) STBCs from Quasi-ODs (QODs). Codes from ODs, CIODs and MDC-QODs are mutually non-intersecting classes of codes. The class of CIODs have {\it non-unitary weight matrices} when written as a Linear Dispersion Code (LDC) proposed by Hassibi and Hochwald, whereas several known SSD codes including CODs have {\it unitary weight matrices}. In this paper, we obtain SSD codes with unitary weight matrices (that are not CODs) called Clifford Unitary Weight SSDs (CUW-SSDs) from matrix representations of Clifford algebras. A main result of this paper is the derivation of an achievable upper bound on the rate of any unitary weight SSD code as $\frac{a}{2^{a-1}}$ for $2^a$ antennas which is larger than that of the CODs which is $\frac{a+1}{2^a}$. It is shown that several known classes of SSD codes are CUW-SSD codes and CUW-SSD codes meet this upper bound. Also, for the codes of this paper conditions on the signal sets which ensure full-diversity and expressions for the coding gain are presented. A large class of SSD codes with non-unitary weight matrices are obtained which include CIODs as a proper subclass.<|reference_end|> | arxiv | @article{karmakar2007maximum-rate,,
title={Maximum-rate, Minimum-Decoding-Complexity STBCs from Clifford Algebras},
author={Sanjay Karmakar and B. Sundar Rajan},
journal={arXiv preprint arXiv:0712.2371},
year={2007},
archivePrefix={arXiv},
eprint={0712.2371},
primaryClass={cs.IT math.IT}
} | karmakar2007maximum-rate, |
arxiv-2098 | 0712.2384 | Multi-group ML Decodable Collocated and Distributed Space Time Block Codes | <|reference_start|>Multi-group ML Decodable Collocated and Distributed Space Time Block Codes: In this paper, collocated and distributed space-time block codes (DSTBCs) which admit multi-group maximum likelihood (ML) decoding are studied. First the collocated case is considered and the problem of constructing space-time block codes (STBCs) which optimally tradeoff rate and ML decoding complexity is posed. Recently, sufficient conditions for multi-group ML decodability have been provided in the literature and codes meeting these sufficient conditions were called Clifford Unitary Weight (CUW) STBCs. An algebraic framework based on extended Clifford algebras is proposed to study CUW STBCs and using this framework, the optimal tradeoff between rate and ML decoding complexity of CUW STBCs is obtained for few specific cases. Code constructions meeting this tradeoff optimally are also provided. The paper then focuses on multi-group ML decodable DSTBCs for application in synchronous wireless relay networks and three constructions of four-group ML decodable DSTBCs are provided. Finally, the OFDM based Alamouti space-time coded scheme proposed by Li-Xia for a 2 relay asynchronous relay network is extended to a more general transmission scheme that can achieve full asynchronous cooperative diversity for arbitrary number of relays. It is then shown how differential encoding at the source can be combined with the proposed transmission scheme to arrive at a new transmission scheme that can achieve full cooperative diversity in asynchronous wireless relay networks with no channel information and also no timing error knowledge at the destination node. Four-group decodable DSTBCs applicable in the proposed OFDM based transmission scheme are also given.<|reference_end|> | arxiv | @article{rajan2007multi-group,
title={Multi-group ML Decodable Collocated and Distributed Space Time Block
Codes},
author={G. Susinder Rajan and B. Sundar Rajan},
journal={arXiv preprint arXiv:0712.2384},
year={2007},
archivePrefix={arXiv},
eprint={0712.2384},
primaryClass={cs.IT cs.DM math.IT math.RA}
} | rajan2007multi-group |
arxiv-2099 | 0712.2389 | Decomposition During Search for Propagation-Based Constraint Solvers | <|reference_start|>Decomposition During Search for Propagation-Based Constraint Solvers: We describe decomposition during search (DDS), an integration of And/Or tree search into propagation-based constraint solvers. The presented search algorithm dynamically decomposes sub-problems of a constraint satisfaction problem into independent partial problems, avoiding redundant work. The paper discusses how DDS interacts with key features that make propagation-based solvers successful: constraint propagation, especially for global constraints, and dynamic search heuristics. We have implemented DDS for the Gecode constraint programming library. Two applications, solution counting in graph coloring and protein structure prediction, exemplify the benefits of DDS in practice.<|reference_end|> | arxiv | @article{mann2007decomposition,
title={Decomposition During Search for Propagation-Based Constraint Solvers},
author={Martin Mann and Guido Tack and Sebastian Will},
journal={arXiv preprint arXiv:0712.2389},
year={2007},
archivePrefix={arXiv},
eprint={0712.2389},
primaryClass={cs.AI}
} | mann2007decomposition |
arxiv-2100 | 0712.2430 | Limits to consistent on-line forecasting for ergodic time series | <|reference_start|>Limits to consistent on-line forecasting for ergodic time series: This study concerns problems of time-series forecasting under the weakest of assumptions. Related results are surveyed and are points of departure for the developments here, some of which are new and others are new derivations of previous findings. The contributions in this study are all negative, showing that various plausible prediction problems are unsolvable, or in other cases, are not solvable by predictors which are known to be consistent when mixing conditions hold.<|reference_end|> | arxiv | @article{gyorfi2007limits,
title={Limits to consistent on-line forecasting for ergodic time series},
author={L. Gyorfi, G. Morvai, and S. Yakowitz},
journal={IEEE Trans. Inform. Theory 44 (1998), no. 2, 886--892},
year={2007},
doi={10.1109/18.661540},
archivePrefix={arXiv},
eprint={0712.2430},
primaryClass={math.PR cs.IT math.IT}
} | gyorfi2007limits |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.