corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-1701 | 0711.0114 | Geometric Spanners With Small Chromatic Number | <|reference_start|>Geometric Spanners With Small Chromatic Number: Given an integer $k \geq 2$, we consider the problem of computing the smallest real number $t(k)$ such that for each set $P$ of points in the plane, there exists a $t(k)$-spanner for $P$ that has chromatic number at most $k$. We prove that $t(2) = 3$, $t(3) = 2$, $t(4) = \sqrt{2}$, and give upper and lower bounds on $t(k)$ for $k>4$. We also show that for any $\epsilon >0$, there exists a $(1+\epsilon)t(k)$-spanner for $P$ that has $O(|P|)$ edges and chromatic number at most $k$. Finally, we consider an on-line variant of the problem where the points of $P$ are given one after another, and the color of a point must be assigned at the moment the point is given. In this setting, we prove that $t(2) = 3$, $t(3) = 1+ \sqrt{3}$, $t(4) = 1+ \sqrt{2}$, and give upper and lower bounds on $t(k)$ for $k>4$.<|reference_end|> | arxiv | @article{bose2007geometric,
title={Geometric Spanners With Small Chromatic Number},
author={Prosenjit Bose, Paz Carmi, Mathieu Couture, Anil Maheshwari, Michiel
Smid and Norbert Zeh},
journal={arXiv preprint arXiv:0711.0114},
year={2007},
number={TR-07-15},
archivePrefix={arXiv},
eprint={0711.0114},
primaryClass={cs.CG}
} | bose2007geometric |
arxiv-1702 | 0711.0128 | Security Analysis of a Remote User Authentication Scheme with Smart Cards | <|reference_start|>Security Analysis of a Remote User Authentication Scheme with Smart Cards: Yoon et al. proposed a new efficient remote user authentication scheme using smart cards to solve the security problems of W. C. Ku and S. M. Chen scheme. This paper reviews Yoon et al. scheme and then proves that the password change phase of Yoon et al. scheme is still insecure. This paper also proves that the Yoon et al. is still vulnerable to parallel session attack.<|reference_end|> | arxiv | @article{kumar2007security,
title={Security Analysis of a Remote User Authentication Scheme with Smart
Cards},
author={Manoj Kumar},
journal={arXiv preprint arXiv:0711.0128},
year={2007},
archivePrefix={arXiv},
eprint={0711.0128},
primaryClass={cs.CR}
} | kumar2007security |
arxiv-1703 | 0711.0189 | A Tutorial on Spectral Clustering | <|reference_start|>A Tutorial on Spectral Clustering: In recent years, spectral clustering has become one of the most popular modern clustering algorithms. It is simple to implement, can be solved efficiently by standard linear algebra software, and very often outperforms traditional clustering algorithms such as the k-means algorithm. On the first glance spectral clustering appears slightly mysterious, and it is not obvious to see why it works at all and what it really does. The goal of this tutorial is to give some intuition on those questions. We describe different graph Laplacians and their basic properties, present the most common spectral clustering algorithms, and derive those algorithms from scratch by several different approaches. Advantages and disadvantages of the different spectral clustering algorithms are discussed.<|reference_end|> | arxiv | @article{von luxburg2007a,
title={A Tutorial on Spectral Clustering},
author={Ulrike von Luxburg},
journal={Statistics and Computing 17(4), 2007},
year={2007},
archivePrefix={arXiv},
eprint={0711.0189},
primaryClass={cs.DS cs.LG}
} | von luxburg2007a |
arxiv-1704 | 0711.0194 | Coinductive Proof Principles for Stochastic Processes | <|reference_start|>Coinductive Proof Principles for Stochastic Processes: We give an explicit coinduction principle for recursively-defined stochastic processes. The principle applies to any closed property, not just equality, and works even when solutions are not unique. The rule encapsulates low-level analytic arguments, allowing reasoning about such processes at a higher algebraic level. We illustrate the use of the rule in deriving properties of a simple coin-flip process.<|reference_end|> | arxiv | @article{kozen2007coinductive,
title={Coinductive Proof Principles for Stochastic Processes},
author={Dexter Kozen},
journal={Logical Methods in Computer Science, Volume 3, Issue 4 (November
12, 2007) lmcs:1098},
year={2007},
doi={10.2168/LMCS-3(4:8)2007},
archivePrefix={arXiv},
eprint={0711.0194},
primaryClass={cs.LO}
} | kozen2007coinductive |
arxiv-1705 | 0711.0237 | Zero-rate feedback can achieve the empirical capacity | <|reference_start|>Zero-rate feedback can achieve the empirical capacity: The utility of limited feedback for coding over an individual sequence of DMCs is investigated. This study complements recent results showing how limited or noisy feedback can boost the reliability of communication. A strategy with fixed input distribution $P$ is given that asymptotically achieves rates arbitrarily close to the mutual information induced by $P$ and the state-averaged channel. When the capacity achieving input distribution is the same over all channel states, this achieves rates at least as large as the capacity of the state averaged channel, sometimes called the empirical capacity.<|reference_end|> | arxiv | @article{eswaran2007zero-rate,
title={Zero-rate feedback can achieve the empirical capacity},
author={Krishnan Eswaran, Anand D. Sarwate, Anant Sahai, and Michael Gastpar},
journal={arXiv preprint arXiv:0711.0237},
year={2007},
doi={10.1109/TIT.2009.2034779},
archivePrefix={arXiv},
eprint={0711.0237},
primaryClass={cs.IT math.IT}
} | eswaran2007zero-rate |
arxiv-1706 | 0711.0251 | Faster Algorithms for Online Topological Ordering | <|reference_start|>Faster Algorithms for Online Topological Ordering: We present two algorithms for maintaining the topological order of a directed acyclic graph with n vertices, under an online edge insertion sequence of m edges. Efficient algorithms for online topological ordering have many applications, including online cycle detection, which is to discover the first edge that introduces a cycle under an arbitrary sequence of edge insertions in a directed graph. In this paper we present efficient algorithms for the online topological ordering problem. We first present a simple algorithm with running time O(n^{5/2}) for the online topological ordering problem. This is the current fastest algorithm for this problem on dense graphs, i.e., when m > n^{5/3}. We then present an algorithm with running time O((m + nlog n)\sqrt{m}); this is more efficient for sparse graphs. Our results yield an improved upper bound of O(min(n^{5/2}, (m + nlog n)sqrt{m})) for the online topological ordering problem.<|reference_end|> | arxiv | @article{kavitha2007faster,
title={Faster Algorithms for Online Topological Ordering},
author={Telikepalli Kavitha and Rogers Mathew},
journal={arXiv preprint arXiv:0711.0251},
year={2007},
number={IISC-CSA-TR-2007-12},
archivePrefix={arXiv},
eprint={0711.0251},
primaryClass={cs.DS}
} | kavitha2007faster |
arxiv-1707 | 0711.0259 | Diversification in the Internet Economy:The Role of For-Profit Mediators | <|reference_start|>Diversification in the Internet Economy:The Role of For-Profit Mediators: We investigate market forces that would lead to the emergence of new classes of players in the sponsored search market. We report a 3-fold diversification triggered by two inherent features of the sponsored search market, namely, capacity constraints and collusion-vulnerability of current mechanisms. In the first scenario, we present a comparative study of two models motivated by capacity constraints - one where the additional capacity is provided by for-profit agents, who compete for slots in the original auction, draw traffic, and run their own sub-auctions, and the other, where the additional capacity is provided by the auctioneer herself, by essentially acting as a mediator and running a single combined auction. This study was initiated by us in \cite{SRGR07}, where the mediator-based model was studied. In the present work, we study the auctioneer-based model and show that this model seems inferior to the mediator-based model in terms of revenue or efficiency guarantee due to added capacity. In the second scenario, we initiate a game theoretic study of current sponsored search auctions, involving incentive driven mediators who exploit the fact that these mechanisms are not collusion-resistant. In particular, we show that advertisers can improve their payoffs by using the services of the mediator compared to directly participating in the auction, and that the mediator can also obtain monetary benefit, without violating incentive constraints from the advertisers who do not use its services. We also point out that the auctioneer can not do very much via mechanism design to avoid such for-profit mediation without losing badly in terms of revenue, and therefore, the mediators are likely to prevail.<|reference_end|> | arxiv | @article{singh2007diversification,
title={Diversification in the Internet Economy:The Role of For-Profit Mediators},
author={Sudhir Kumar Singh, Vwani P. Roychowdhury, Himawan Gunadhi, Behnam A.
Rezaei},
journal={arXiv preprint arXiv:0711.0259},
year={2007},
archivePrefix={arXiv},
eprint={0711.0259},
primaryClass={cs.GT}
} | singh2007diversification |
arxiv-1708 | 0711.0261 | Gradient Descent Bit Flipping Algorithms for Decoding LDPC Codes | <|reference_start|>Gradient Descent Bit Flipping Algorithms for Decoding LDPC Codes: A novel class of bit-flipping (BF) algorithms for decoding low-density parity-check (LDPC) codes is presented. The proposed algorithms, which are called gradient descent bit flipping (GDBF) algorithms, can be regarded as simplified gradient descent algorithms. Based on gradient descent formulation, the proposed algorithms are naturally derived from a simple non-linear objective function.<|reference_end|> | arxiv | @article{wadayama2007gradient,
title={Gradient Descent Bit Flipping Algorithms for Decoding LDPC Codes},
author={Tadashi Wadayama, Keisuke Nakamura, Masayuki Yagita, Yuuki Funahashi,
Shogo Usami, Ichi Takumi},
journal={arXiv preprint arXiv:0711.0261},
year={2007},
archivePrefix={arXiv},
eprint={0711.0261},
primaryClass={cs.IT math.IT}
} | wadayama2007gradient |
arxiv-1709 | 0711.0277 | Bandwidth Partitioning in Decentralized Wireless Networks | <|reference_start|>Bandwidth Partitioning in Decentralized Wireless Networks: This paper addresses the following question, which is of interest in the design of a multiuser decentralized network. Given a total system bandwidth of W Hz and a fixed data rate constraint of R bps for each transmission, how many frequency slots N of size W/N should the band be partitioned into in order to maximize the number of simultaneous links in the network? Dividing the available spectrum results in two competing effects. On the positive side, a larger N allows for more parallel, noninterfering communications to take place in the same area. On the negative side, a larger N increases the SINR requirement for each link because the same information rate must be achieved over less bandwidth. Exploring this tradeoff and determining the optimum value of N in terms of the system parameters is the focus of the paper. Using stochastic geometry, the optimal SINR threshold - which directly corresponds to the optimal spectral efficiency - is derived for both the low SNR (power-limited) and high SNR (interference-limited) regimes. This leads to the optimum choice of the number of frequency bands N in terms of the path loss exponent, power and noise spectral density, desired rate, and total bandwidth.<|reference_end|> | arxiv | @article{jindal2007bandwidth,
title={Bandwidth Partitioning in Decentralized Wireless Networks},
author={Nihar Jindal, Jeffrey G. Andrews, Steven Weber},
journal={arXiv preprint arXiv:0711.0277},
year={2007},
doi={10.1109/T-WC.2008.071220},
archivePrefix={arXiv},
eprint={0711.0277},
primaryClass={cs.IT math.IT}
} | jindal2007bandwidth |
arxiv-1710 | 0711.0301 | Throughput Optimal On-Line Algorithms for Advanced Resource Reservation in Ultra High-Speed Networks | <|reference_start|>Throughput Optimal On-Line Algorithms for Advanced Resource Reservation in Ultra High-Speed Networks: Advanced channel reservation is emerging as an important feature of ultra high-speed networks requiring the transfer of large files. Applications include scientific data transfers and database backup. In this paper, we present two new, on-line algorithms for advanced reservation, called BatchAll and BatchLim, that are guaranteed to achieve optimal throughput performance, based on multi-commodity flow arguments. Both algorithms are shown to have polynomial-time complexity and provable bounds on the maximum delay for 1+epsilon bandwidth augmented networks. The BatchLim algorithm returns the completion time of a connection immediately as a request is placed, but at the expense of a slightly looser competitive ratio than that of BatchAll. We also present a simple approach that limits the number of parallel paths used by the algorithms while provably bounding the maximum reduction factor in the transmission throughput. We show that, although the number of different paths can be exponentially large, the actual number of paths needed to approximate the flow is quite small and proportional to the number of edges in the network. Simulations for a number of topologies show that, in practice, 3 to 5 parallel paths are sufficient to achieve close to optimal performance. The performance of the competitive algorithms are also compared to a greedy benchmark, both through analysis and simulation.<|reference_end|> | arxiv | @article{cohen2007throughput,
title={Throughput Optimal On-Line Algorithms for Advanced Resource Reservation
in Ultra High-Speed Networks},
author={Reuven Cohen, Niloofar Fazlollahi and David Starobinski},
journal={arXiv preprint arXiv:0711.0301},
year={2007},
archivePrefix={arXiv},
eprint={0711.0301},
primaryClass={cs.NI}
} | cohen2007throughput |
arxiv-1711 | 0711.0311 | Improving the LP bound of a MILP by branching concurrently | <|reference_start|>Improving the LP bound of a MILP by branching concurrently: We'll measure the differences of the dual variables and the gain of the objective function when creating new problems, which each has one inequality more than the starting LP-instance. These differences of the dual variables are naturally connected to the branches. Then we'll choose those differences of dual variables, so that for all combinations of choices at the connected branches, all dual inequalities will hold for sure. By adding the gain of each chosen branching, we get a total gain, which gives a better limit of the original problem. By this technique it is also possible to create cuts.<|reference_end|> | arxiv | @article{buesching2007improving,
title={Improving the LP bound of a MILP by branching concurrently},
author={H. Georg Buesching},
journal={arXiv preprint arXiv:0711.0311},
year={2007},
archivePrefix={arXiv},
eprint={0711.0311},
primaryClass={cs.DM cs.DS}
} | buesching2007improving |
arxiv-1712 | 0711.0314 | Resource and Application Models for Advanced Grid Schedulers | <|reference_start|>Resource and Application Models for Advanced Grid Schedulers: As Grid computing is becoming an inevitable future, managing, scheduling and monitoring dynamic, heterogeneous resources will present new challenges. Solutions will have to be agile and adaptive, support self-organization and autonomous management, while maintaining optimal resource utilisation. Presented in this paper are basic principles and architectural concepts for efficient resource allocation in heterogeneous Grid environment.<|reference_end|> | arxiv | @article{lazarevic2007resource,
title={Resource and Application Models for Advanced Grid Schedulers},
author={Aleksandar Lazarevic, Lionel Sacks},
journal={London Communications Symposium 2003},
year={2007},
archivePrefix={arXiv},
eprint={0711.0314},
primaryClass={cs.DC}
} | lazarevic2007resource |
arxiv-1713 | 0711.0315 | Measuring and Monitoring Grid Resource Utilisation | <|reference_start|>Measuring and Monitoring Grid Resource Utilisation: Effective resource utilisation monitoring and highly granular yet adaptive measurements are prerequisites for a more efficient Grid scheduler. We present a suite of measurement applications able to monitor per-process resource utilisation, and a customisable tool for emulating observed utilisation models.<|reference_end|> | arxiv | @article{lazarevic2007measuring,
title={Measuring and Monitoring Grid Resource Utilisation},
author={Aleksandar Lazarevic, Lionel Sacks},
journal={London Communications Symposium 2004},
year={2007},
archivePrefix={arXiv},
eprint={0711.0315},
primaryClass={cs.DC}
} | lazarevic2007measuring |
arxiv-1714 | 0711.0316 | A Study of Grid Applications: Scheduling Perspective | <|reference_start|>A Study of Grid Applications: Scheduling Perspective: As the Grid evolves from a high performance cluster middleware to a multipurpose utility computing framework, a good understanding of Grid applications, their statistics and utilisation patterns is required. This study looks at job execution times and resource utilisations in a Grid environment, and their significance in cluster and network dimensioning, local level scheduling and resource management.<|reference_end|> | arxiv | @article{lazarevic2007a,
title={A Study of Grid Applications: Scheduling Perspective},
author={Aleksandar Lazarevic, Lionel Sacks},
journal={London Communications Symposium 2005},
year={2007},
archivePrefix={arXiv},
eprint={0711.0316},
primaryClass={cs.DC}
} | lazarevic2007a |
arxiv-1715 | 0711.0325 | Self-Organising management of Grid environments | <|reference_start|>Self-Organising management of Grid environments: This paper presents basic concepts, architectural principles and algorithms for efficient resource and security management in cluster computing environments and the Grid. The work presented in this paper is funded by BTExacT and the EPSRC project SO-GRM (GR/S21939).<|reference_end|> | arxiv | @article{liabotis2007self-organising,
title={Self-Organising management of Grid environments},
author={Ioannis Liabotis, Ognjen Prnjat, Tope Olukemi, Adrian Li Mow Ching,
Aleksandar Lazarevic, Lionel Sacks, Mike Fisher, Paul McKee},
journal={International Symposium on Telecommunications 2003},
year={2007},
archivePrefix={arXiv},
eprint={0711.0325},
primaryClass={cs.DC}
} | liabotis2007self-organising |
arxiv-1716 | 0711.0326 | Enabling Adaptive Grid Scheduling and Resource Management | <|reference_start|>Enabling Adaptive Grid Scheduling and Resource Management: Wider adoption of the Grid concept has led to an increasing amount of federated computational, storage and visualisation resources being available to scientists and researchers. Distributed and heterogeneous nature of these resources renders most of the legacy cluster monitoring and management approaches inappropriate, and poses new challenges in workflow scheduling on such systems. Effective resource utilisation monitoring and highly granular yet adaptive measurements are prerequisites for a more efficient Grid scheduler. We present a suite of measurement applications able to monitor per-process resource utilisation, and a customisable tool for emulating observed utilisation models. We also outline our future work on a predictive and probabilistic Grid scheduler. The research is undertaken as part of UK e-Science EPSRC sponsored project SO-GRM (Self-Organising Grid Resource Management) in cooperation with BT.<|reference_end|> | arxiv | @article{lazarevic2007enabling,
title={Enabling Adaptive Grid Scheduling and Resource Management},
author={Aleksandar Lazarevic, Lionel Sacks, Ognjen Prnjat},
journal={International Symposium on Integrated Network Management 2005},
year={2007},
archivePrefix={arXiv},
eprint={0711.0326},
primaryClass={cs.DC}
} | lazarevic2007enabling |
arxiv-1717 | 0711.0327 | Managing Uncertainty: A Case for Probabilistic Grid Scheduling | <|reference_start|>Managing Uncertainty: A Case for Probabilistic Grid Scheduling: The Grid technology is evolving into a global, service-orientated architecture, a universal platform for delivering future high demand computational services. Strong adoption of the Grid and the utility computing concept is leading to an increasing number of Grid installations running a wide range of applications of different size and complexity. In this paper we address the problem of elivering deadline/economy based scheduling in a heterogeneous application environment using statistical properties of job historical executions and its associated meta-data. This approach is motivated by a study of six-month computational load generated by Grid applications in a multi-purpose Grid cluster serving a community of twenty e-Science projects. The observed job statistics, resource utilisation and user behaviour is discussed in the context of management approaches and models most suitable for supporting a probabilistic and autonomous scheduling architecture.<|reference_end|> | arxiv | @article{lazarevic2007managing,
title={Managing Uncertainty: A Case for Probabilistic Grid Scheduling},
author={Aleksandar Lazarevic, Lionel Sacks, Ognjen Prnjat},
journal={arXiv preprint arXiv:0711.0327},
year={2007},
archivePrefix={arXiv},
eprint={0711.0327},
primaryClass={cs.DC}
} | lazarevic2007managing |
arxiv-1718 | 0711.0344 | Automatic Coding Rule Conformance Checking Using Logic Programs | <|reference_start|>Automatic Coding Rule Conformance Checking Using Logic Programs: Some approaches to increasing program reliability involve a disciplined use of programming languages so as to minimise the hazards introduced by error-prone features. This is realised by writing code that is constrained to a subset of the a priori admissible programs, and that, moreover, may use only a subset of the language. These subsets are determined by a collection of so-called coding rules.<|reference_end|> | arxiv | @article{marpons-ucero2007automatic,
title={Automatic Coding Rule Conformance Checking Using Logic Programs},
author={Guillem Marpons-Ucero, Julio Mari~no, 'Angel Herranz, Lars-{AA}ke
Fredlund, Manuel Carro, Juan Jos'e Moreno-Navarro},
journal={arXiv preprint arXiv:0711.0344},
year={2007},
archivePrefix={arXiv},
eprint={0711.0344},
primaryClass={cs.PL cs.SE}
} | marpons-ucero2007automatic |
arxiv-1719 | 0711.0345 | A Prolog-based Environment for Reasoning about Programming Languages (Extended abstract) | <|reference_start|>A Prolog-based Environment for Reasoning about Programming Languages (Extended abstract): ECLAIR is a Prolog-based prototype system aiming to provide a functionally complete environment for the study, development and evaluation of programming language analysis and implementation tools. In this paper, we sketch the overall structure of the system, outlining the main methodologies and technologies underlying its components. We also discuss the appropriateness of Prolog as the implementation language for the system: besides highlighting its strengths, we also point out a few potential weaknesses, hinting at possible solutions.<|reference_end|> | arxiv | @article{bagnara2007a,
title={A Prolog-based Environment for Reasoning about Programming Languages
(Extended abstract)},
author={Roberto Bagnara, Patricia Hill, Enea Zaffanella},
journal={arXiv preprint arXiv:0711.0345},
year={2007},
archivePrefix={arXiv},
eprint={0711.0345},
primaryClass={cs.PL cs.SE}
} | bagnara2007a |
arxiv-1720 | 0711.0348 | Compiling ER Specifications into Declarative Programs | <|reference_start|>Compiling ER Specifications into Declarative Programs: This paper proposes an environment to support high-level database programming in a declarative programming language. In order to ensure safe database updates, all access and update operations related to the database are generated from high-level descriptions in the entity- relationship (ER) model. We propose a representation of ER diagrams in the declarative language Curry so that they can be constructed by various tools and then translated into this representation. Furthermore, we have implemented a compiler from this representation into a Curry program that provides access and update operations based on a high-level API for database programming.<|reference_end|> | arxiv | @article{braßel2007compiling,
title={Compiling ER Specifications into Declarative Programs},
author={Bernd Bra{ss}el, Michael Hanus and Marion Muller},
journal={arXiv preprint arXiv:0711.0348},
year={2007},
archivePrefix={arXiv},
eprint={0711.0348},
primaryClass={cs.PL cs.SE}
} | braßel2007compiling |
arxiv-1721 | 0711.0350 | Intermittent estimation of stationary time series | <|reference_start|>Intermittent estimation of stationary time series: Let $\{X_n\}_{n=0}^{\infty}$ be a stationary real-valued time series with unknown distribution. Our goal is to estimate the conditional expectation of $X_{n+1}$ based on the observations $X_i$, $0\le i\le n$ in a strongly consistent way. Bailey and Ryabko proved that this is not possible even for ergodic binary time series if one estimates at all values of $n$. We propose a very simple algorithm which will make prediction infinitely often at carefully selected stopping times chosen by our rule. We show that under certain conditions our procedure is strongly (pointwise) consistent, and $L_2$ consistent without any condition. An upper bound on the growth of the stopping times is also presented in this paper.<|reference_end|> | arxiv | @article{morvai2007intermittent,
title={Intermittent estimation of stationary time series},
author={G. Morvai and B. Weiss},
journal={Test 13 (2004), no. 2, 525--542},
year={2007},
archivePrefix={arXiv},
eprint={0711.0350},
primaryClass={math.PR cs.IT math.IT}
} | morvai2007intermittent |
arxiv-1722 | 0711.0351 | Noise threshold for universality of 2-input gates | <|reference_start|>Noise threshold for universality of 2-input gates: Evans and Pippenger showed in 1998 that noisy gates with 2 inputs are universal for arbitrary computation (i.e. can compute any function with bounded error), if all gates fail independently with probability epsilon and epsilon<theta, where theta is roughly 8.856%. We show that formulas built from gates with 2 inputs, in which each gate fails with probability at least theta cannot be universal. Hence, there is a threshold on the tolerable noise for formulas with 2-input gates and it is theta. We conjecture that the same threshold also holds for circuits.<|reference_end|> | arxiv | @article{unger2007noise,
title={Noise threshold for universality of 2-input gates},
author={Falk Unger},
journal={arXiv preprint arXiv:0711.0351},
year={2007},
archivePrefix={arXiv},
eprint={0711.0351},
primaryClass={cs.IT cs.CC math.IT}
} | unger2007noise |
arxiv-1723 | 0711.0366 | Shannon Theoretic Limits on Noisy Compressive Sampling | <|reference_start|>Shannon Theoretic Limits on Noisy Compressive Sampling: In this paper, we study the number of measurements required to recover a sparse signal in ${\mathbb C}^M$ with $L$ non-zero coefficients from compressed samples in the presence of noise. For a number of different recovery criteria, we prove that $O(L)$ (an asymptotically linear multiple of $L$) measurements are necessary and sufficient if $L$ grows linearly as a function of $M$. This improves on the existing literature that is mostly focused on variants of a specific recovery algorithm based on convex programming, for which $O(L\log(M-L))$ measurements are required. We also show that $O(L\log(M-L))$ measurements are required in the sublinear regime ($L = o(M)$).<|reference_end|> | arxiv | @article{akçakaya2007shannon,
title={Shannon Theoretic Limits on Noisy Compressive Sampling},
author={Mehmet Akc{c}akaya and Vahid Tarokh},
journal={arXiv preprint arXiv:0711.0366},
year={2007},
archivePrefix={arXiv},
eprint={0711.0366},
primaryClass={cs.IT math.IT}
} | akçakaya2007shannon |
arxiv-1724 | 0711.0367 | Nonparametric inference for ergodic, stationary time series | <|reference_start|>Nonparametric inference for ergodic, stationary time series: The setting is a stationary, ergodic time series. The challenge is to construct a sequence of functions, each based on only finite segments of the past, which together provide a strongly consistent estimator for the conditional probability of the next observation, given the infinite past. Ornstein gave such a construction for the case that the values are from a finite set, and recently Algoet extended the scheme to time series with coordinates in a Polish space. The present study relates a different solution to the challenge. The algorithm is simple and its verification is fairly transparent. Some extensions to regression, pattern recognition, and on-line forecasting are mentioned.<|reference_end|> | arxiv | @article{morvai2007nonparametric,
title={Nonparametric inference for ergodic, stationary time series},
author={G. Morvai, S. Yakowitz, and L. Gyorfi},
journal={Ann. Statist. 24 (1996), no. 1, 370--379},
year={2007},
archivePrefix={arXiv},
eprint={0711.0367},
primaryClass={math.PR cs.IT math.IT}
} | morvai2007nonparametric |
arxiv-1725 | 0711.0436 | An example of algebraization of analysis and Fibonacci cobweb poset characterization | <|reference_start|>An example of algebraization of analysis and Fibonacci cobweb poset characterization: In recent Kwasniewski's papers inspired by O. V. Viskov it was shown that the $\psi$-calculus in parts appears to be almost automatic, natural extension of classical operator calculus of Rota - Mullin or equivalently - of umbral calculus of Roman and Rota. At the same time this calculus is an example of the algebraization of the analysis - here restricted to the algebra of polynomials. The first part of the article is the review of the recent author's contribution. The main definitions and theorems of Finite Fibonomial Operator Calculus which is a special case of $\psi$-extented Rota's finite operator calculus are presented there. In the second part the characterization of Fibonacci Cobweb poset P as DAG and oDAG is given. The dim 2 poset such that its Hasse diagram coincide with digraf of P is constructed.<|reference_end|> | arxiv | @article{krot-sieniawska2007an,
title={An example of algebraization of analysis and Fibonacci cobweb poset
characterization},
author={Ewa Krot-Sieniawska},
journal={arXiv preprint arXiv:0711.0436},
year={2007},
archivePrefix={arXiv},
eprint={0711.0436},
primaryClass={math.CO cs.DM math.GM}
} | krot-sieniawska2007an |
arxiv-1726 | 0711.0471 | Prediction for discrete time series | <|reference_start|>Prediction for discrete time series: Let $\{X_n\}$ be a stationary and ergodic time series taking values from a finite or countably infinite set ${\cal X}$. Assume that the distribution of the process is otherwise unknown. We propose a sequence of stopping times $\lambda_n$ along which we will be able to estimate the conditional probability $P(X_{\lambda_n+1}=x|X_0,...,X_{\lambda_n})$ from data segment $(X_0,...,X_{\lambda_n})$ in a pointwise consistent way for a restricted class of stationary and ergodic finite or countably infinite alphabet time series which includes among others all stationary and ergodic finitarily Markovian processes. If the stationary and ergodic process turns out to be finitarily Markovian (among others, all stationary and ergodic Markov chains are included in this class) then $ \lim_{n\to \infty} {n\over \lambda_n}>0$ almost surely. If the stationary and ergodic process turns out to possess finite entropy rate then $\lambda_n$ is upperbounded by a polynomial, eventually almost surely.<|reference_end|> | arxiv | @article{morvai2007prediction,
title={Prediction for discrete time series},
author={G. Morvai and B. Weiss},
journal={Probab. Theory Related Fields 132 (2005), no. 1, 1--12},
year={2007},
archivePrefix={arXiv},
eprint={0711.0471},
primaryClass={math.PR cs.IT math.IT}
} | morvai2007prediction |
arxiv-1727 | 0711.0472 | Order estimation of Markov chains | <|reference_start|>Order estimation of Markov chains: We describe estimators $\chi_n(X_0,X_1,...,X_n)$, which when applied to an unknown stationary process taking values from a countable alphabet ${\cal X}$, converge almost surely to $k$ in case the process is a $k$-th order Markov chain and to infinity otherwise.<|reference_end|> | arxiv | @article{morvai2007order,
title={Order estimation of Markov chains},
author={G. Morvai and B. Weiss},
journal={IEEE Trans. Inform. Theory 51 (2005), no. 4, 1496--1497},
year={2007},
archivePrefix={arXiv},
eprint={0711.0472},
primaryClass={math.PR cs.IT math.IT}
} | morvai2007order |
arxiv-1728 | 0711.0486 | Triangular Peg Solitaire Unlimited | <|reference_start|>Triangular Peg Solitaire Unlimited: Triangular peg solitaire is a well-known one-person game or puzzle. When one peg captures many pegs consecutively, this is called a sweep. We investigate whether the game can end in a dramatic fashion, with one peg sweeping all remaining pegs off the board. For triangular boards of side 6 and 8 (with 21 and 36 holes, respectively) the geometrically longest sweep can occur as the final move in a game. On larger triangular boards, we demonstrate how to construct solutions that finish with arbitrarily long sweeps. We also consider the problem of finding solutions that minimize the total number of moves (where a move is one or more consecutive jumps by the same peg).<|reference_end|> | arxiv | @article{bell2007triangular,
title={Triangular Peg Solitaire Unlimited},
author={George I. Bell},
journal={The Games and Puzzles Journal, Issue 36, November-December 2004
http://gpj.connectfree.co.uk/gpjr.htm},
year={2007},
archivePrefix={arXiv},
eprint={0711.0486},
primaryClass={math.CO cs.DM}
} | bell2007triangular |
arxiv-1729 | 0711.0528 | Web-based Interface in Public Cluster | <|reference_start|>Web-based Interface in Public Cluster: A web-based interface dedicated for cluster computer which is publicly accessible for free is introduced. The interface plays an important role to enable secure public access, while providing user-friendly computational environment for end-users and easy maintainance for administrators as well. The whole architecture which integrates both aspects of hardware and software is briefly explained. It is argued that the public cluster is globally a unique approach, and could be a new kind of e-learning system especially for parallel programming communities.<|reference_end|> | arxiv | @article{akbar2007web-based,
title={Web-based Interface in Public Cluster},
author={Z. Akbar and L.T. Handoko},
journal={arXiv preprint arXiv:0711.0528},
year={2007},
number={FISIKALIPI-07016},
archivePrefix={arXiv},
eprint={0711.0528},
primaryClass={cs.DC cs.CY}
} | akbar2007web-based |
arxiv-1730 | 0711.0538 | Spreadsheet Engineering: A Research Framework | <|reference_start|>Spreadsheet Engineering: A Research Framework: Spreadsheet engineering adapts the lessons of software engineering to spreadsheets, providing eight principles as a framework for organizing spreadsheet programming recommendations. Spreadsheets raise issues inadequately addressed by software engineering. Spreadsheets are a powerful modeling language, allowing strategic rapid model change, and enabling exploratory modeling. Spreadsheets users learn slowly with experience because they focus on the problem domain not programming. The heterogeneity of spreadsheet users requires a taxonomy to guide recommendations. Deployment of best practices is difficult and merits research.<|reference_end|> | arxiv | @article{grossman2007spreadsheet,
title={Spreadsheet Engineering: A Research Framework},
author={Thomas A. Grossman},
journal={Proc. European Spreadsheet Risks Int. Grp. 2002 23-34 ISBN 1 86166
182 7},
year={2007},
archivePrefix={arXiv},
eprint={0711.0538},
primaryClass={cs.SE}
} | grossman2007spreadsheet |
arxiv-1731 | 0711.0557 | Kerdock Codes for Limited Feedback Precoded MIMO Systems | <|reference_start|>Kerdock Codes for Limited Feedback Precoded MIMO Systems: A codebook based limited feedback strategy is a practical way to obtain partial channel state information at the transmitter in a precoded multiple-input multiple-output (MIMO) wireless system. Conventional codebook designs use Grassmannian packing, equiangular frames, vector quantization, or Fourier based constructions. While the capacity and error rate performance of conventional codebook constructions have been extensively investigated, constructing these codebooks is notoriously difficult relying on techniques such as nonlinear search or iterative algorithms. Further, the resulting codebooks may not have a systematic structure to facilitate storage of the codebook and low search complexity. In this paper, we propose a new systematic codebook design based on Kerdock codes and mutually unbiased bases. The proposed Kerdock codebook consists of multiple mutually unbiased unitary bases matrices with quaternary entries and the identity matrix. We propose to derive the beamforming and precoding codebooks from this base codebook, eliminating the requirement to store multiple codebooks. The propose structure requires little memory to store and, as we show, the quaternary structure facilitates codeword search. We derive the chordal distance for two antenna and four antenna codebooks, showing that the proposed codebooks compare favorably with prior designs. Monte Carlo simulations are used to compare achievable rates and error rates for different codebooks sizes.<|reference_end|> | arxiv | @article{inoue2007kerdock,
title={Kerdock Codes for Limited Feedback Precoded MIMO Systems},
author={Takao Inoue and Robert W. Heath Jr},
journal={arXiv preprint arXiv:0711.0557},
year={2007},
archivePrefix={arXiv},
eprint={0711.0557},
primaryClass={cs.IT math.IT}
} | inoue2007kerdock |
arxiv-1732 | 0711.0574 | Singular Curves in the Joint Space and Cusp Points of 3-RPR parallel manipulators | <|reference_start|>Singular Curves in the Joint Space and Cusp Points of 3-RPR parallel manipulators: This paper investigates the singular curves in the joint space of a family of planar parallel manipulators. It focuses on special points, referred to as cusp points, which may appear on these curves. Cusp points play an important role in the kinematic behavior of parallel manipulators since they make possible a nonsingular change of assembly mode. The purpose of this study is twofold. First, it exposes a method to compute joint space singular curves of 3-RPR planar parallel manipulators. Second, it presents an algorithm for detecting and computing all cusp points in the joint space of these same manipulators.<|reference_end|> | arxiv | @article{zein2007singular,
title={Singular Curves in the Joint Space and Cusp Points of 3-RPR parallel
manipulators},
author={Mazen Zein (IRCCyN), Philippe Wenger (IRCCyN), Damien Chablat (IRCCyN)},
journal={Robotica 25, 6 (2007) 717-724},
year={2007},
doi={10.1017/S0263574707003785},
archivePrefix={arXiv},
eprint={0711.0574},
primaryClass={cs.RO}
} | zein2007singular |
arxiv-1733 | 0711.0607 | Exploring the Composition of Unit Test Suites | <|reference_start|>Exploring the Composition of Unit Test Suites: In agile software development, test code can considerably contribute to the overall source code size. Being a valuable asset both in terms of verification and documentation, the composition of a test suite needs to be well understood in order to identify opportunities as well as weaknesses for further evolution. In this paper, we argue that the visualization of structural characteristics is a viable means to support the exploration of test suites. Thanks to general agreement on a limited set of key test design principles, such visualizations are relatively easy to interpret. In particular, we present visualizations that support testers in (i) locating test cases; (ii) examining the relation between test code and production code; and (iii) studying the composition of and dependencies within test cases. By means of two case studies, we demonstrate how visual patterns help to identify key test suite characteristics. This approach forms the first step in assisting a developer to build up understanding about test suites beyond code reading.<|reference_end|> | arxiv | @article{van rompaey2007exploring,
title={Exploring the Composition of Unit Test Suites},
author={Bart Van Rompaey and Serge Demeyer},
journal={arXiv preprint arXiv:0711.0607},
year={2007},
number={UA TR2007-01},
archivePrefix={arXiv},
eprint={0711.0607},
primaryClass={cs.SE}
} | van rompaey2007exploring |
arxiv-1734 | 0711.0618 | PIDoc: Wiki style Literate Programming for Prolog | <|reference_start|>PIDoc: Wiki style Literate Programming for Prolog: This document introduces PlDoc, a literate programming system for Prolog. Starting point for PlDoc was minimal distraction from the programming task and maximal immediate reward, attempting to seduce the programmer to use the system. Minimal distraction is achieved using structured comments that are as closely as possible related to common Prolog documentation practices. Immediate reward is provided by a web interface powered from the Prolog development environment that integrates searching and browsing application and system documentation. When accessed from localhost, it is possible to go from documentation shown in a browser to the source code displayed in the user's editor of choice.<|reference_end|> | arxiv | @article{wielemaker2007pidoc:,
title={PIDoc: Wiki style Literate Programming for Prolog},
author={Jan Wielemaker, Anjo Anjewierden},
journal={arXiv preprint arXiv:0711.0618},
year={2007},
archivePrefix={arXiv},
eprint={0711.0618},
primaryClass={cs.PL cs.SE}
} | wielemaker2007pidoc: |
arxiv-1735 | 0711.0643 | A parallel gravitational N-body kernel | <|reference_start|>A parallel gravitational N-body kernel: We describe source code level parallelization for the {\tt kira} direct gravitational $N$-body integrator, the workhorse of the {\tt starlab} production environment for simulating dense stellar systems. The parallelization strategy, called ``j-parallelization'', involves the partition of the computational domain by distributing all particles in the system among the available processors. Partial forces on the particles to be advanced are calculated in parallel by their parent processors, and are then summed in a final global operation. Once total forces are obtained, the computing elements proceed to the computation of their particle trajectories. We report the results of timing measurements on four different parallel computers, and compare them with theoretical predictions. The computers employ either a high-speed interconnect, a NUMA architecture to minimize the communication overhead or are distributed in a grid. The code scales well in the domain tested, which ranges from 1024 - 65536 stars on 1 - 128 processors, providing satisfactory speedup. Running the production environment on a grid becomes inefficient for more than 60 processors distributed across three sites.<|reference_end|> | arxiv | @article{zwart2007a,
title={A parallel gravitational N-body kernel},
author={Simon Portegies Zwart (Amsterdam), Steve McMillan (Drexel), Derek
Groen (Amsterdam), Alessia Gualandris (Rochester), Michael Sipior (Astron),
Willem Vermin (SARA)},
journal={arXiv preprint arXiv:0711.0643},
year={2007},
doi={10.1016/j.newast.2007.11.002},
archivePrefix={arXiv},
eprint={0711.0643},
primaryClass={astro-ph cs.DC}
} | zwart2007a |
arxiv-1736 | 0711.0666 | Discriminative Phoneme Sequences Extraction for Non-Native Speaker's Origin Classification | <|reference_start|>Discriminative Phoneme Sequences Extraction for Non-Native Speaker's Origin Classification: In this paper we present an automated method for the classification of the origin of non-native speakers. The origin of non-native speakers could be identified by a human listener based on the detection of typical pronunciations for each nationality. Thus we suppose the existence of several phoneme sequences that might allow the classification of the origin of non-native speakers. Our new method is based on the extraction of discriminative sequences of phonemes from a non-native English speech database. These sequences are used to construct a probabilistic classifier for the speakers' origin. The existence of discriminative phone sequences in non-native speech is a significant result of this work. The system that we have developed achieved a significant correct classification rate of 96.3% and a significant error reduction compared to some other tested techniques.<|reference_end|> | arxiv | @article{bouselmi2007discriminative,
title={Discriminative Phoneme Sequences Extraction for Non-Native Speaker's
Origin Classification},
author={Ghazi Bouselmi (INRIA Lorraine - LORIA), Dominique Fohr (INRIA
Lorraine - LORIA), Irina Illina (INRIA Lorraine - LORIA), Jean-Paul Haton
(INRIA Lorraine - LORIA)},
journal={Dans ISSPA, International Symposium on Signal Processing and its
Applications (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0711.0666},
primaryClass={cs.CL}
} | bouselmi2007discriminative |
arxiv-1737 | 0711.0692 | On the defence notion | <|reference_start|>On the defence notion: 'Trojan horses', 'logic bombs', 'armoured viruses' and 'cryptovirology' are terms recalling war gears. In fact, concepts of attack and defence drive the world of computer virology, which looks like a war universe in an information society. This war has several shapes, from invasions of a network by worms, to military and industrial espionage ...<|reference_end|> | arxiv | @article{bonfante2007on,
title={On the defence notion},
author={Anne Bonfante (INRIA Lorraine - LORIA), Jean-Yves Marion (INRIA
Lorraine - LORIA)},
journal={Journal in Computer Virology 3, 4 (2007) 247-251},
year={2007},
doi={10.1007/s11416-007-0058-9},
archivePrefix={arXiv},
eprint={0711.0692},
primaryClass={cs.CR}
} | bonfante2007on |
arxiv-1738 | 0711.0694 | Performance Bounds for Lambda Policy Iteration and Application to the Game of Tetris | <|reference_start|>Performance Bounds for Lambda Policy Iteration and Application to the Game of Tetris: We consider the discrete-time infinite-horizon optimal control problem formalized by Markov Decision Processes. We revisit the work of Bertsekas and Ioffe, that introduced $\lambda$ Policy Iteration, a family of algorithms parameterized by $\lambda$ that generalizes the standard algorithms Value Iteration and Policy Iteration, and has some deep connections with the Temporal Differences algorithm TD($\lambda$) described by Sutton and Barto. We deepen the original theory developped by the authors by providing convergence rate bounds which generalize standard bounds for Value Iteration described for instance by Puterman. Then, the main contribution of this paper is to develop the theory of this algorithm when it is used in an approximate form and show that this is sound. Doing so, we extend and unify the separate analyses developped by Munos for Approximate Value Iteration and Approximate Policy Iteration. Eventually, we revisit the use of this algorithm in the training of a Tetris playing controller as originally done by Bertsekas and Ioffe. We provide an original performance bound that can be applied to such an undiscounted control problem. Our empirical results are different from those of Bertsekas and Ioffe (which were originally qualified as "paradoxical" and "intriguing"), and much more conform to what one would expect from a learning experiment. We discuss the possible reason for such a difference.<|reference_end|> | arxiv | @article{scherrer2007performance,
title={Performance Bounds for Lambda Policy Iteration and Application to the
Game of Tetris},
author={Bruno Scherrer (INRIA Lorraine - LORIA)},
journal={arXiv preprint arXiv:0711.0694},
year={2007},
archivePrefix={arXiv},
eprint={0711.0694},
primaryClass={cs.AI cs.RO}
} | scherrer2007performance |
arxiv-1739 | 0711.0705 | Feedback Capacity of the Compound Channel | <|reference_start|>Feedback Capacity of the Compound Channel: In this work we find the capacity of a compound finite-state channel with time-invariant deterministic feedback. The model we consider involves the use of fixed length block codes. Our achievability result includes a proof of the existence of a universal decoder for the family of finite-state channels with feedback. As a consequence of our capacity result, we show that feedback does not increase the capacity of the compound Gilbert-Elliot channel. Additionally, we show that for a stationary and uniformly ergodic Markovian channel, if the compound channel capacity is zero without feedback then it is zero with feedback. Finally, we use our result on the finite-state channel to show that the feedback capacity of the memoryless compound channel is given by $\inf_{\theta} \max_{Q_X} I(X;Y|\theta)$.<|reference_end|> | arxiv | @article{shrader2007feedback,
title={Feedback Capacity of the Compound Channel},
author={Brooke Shrader, Haim Permuter},
journal={arXiv preprint arXiv:0711.0705},
year={2007},
doi={10.1109/TIT.2009.2023727},
archivePrefix={arXiv},
eprint={0711.0705},
primaryClass={cs.IT math.IT}
} | shrader2007feedback |
arxiv-1740 | 0711.0708 | A Rank-Metric Approach to Error Control in Random Network Coding | <|reference_start|>A Rank-Metric Approach to Error Control in Random Network Coding: The problem of error control in random linear network coding is addressed from a matrix perspective that is closely related to the subspace perspective of K\"otter and Kschischang. A large class of constant-dimension subspace codes is investigated. It is shown that codes in this class can be easily constructed from rank-metric codes, while preserving their distance properties. Moreover, it is shown that minimum distance decoding of such subspace codes can be reformulated as a generalized decoding problem for rank-metric codes where partial information about the error is available. This partial information may be in the form of erasures (knowledge of an error location but not its value) and deviations (knowledge of an error value but not its location). Taking erasures and deviations into account (when they occur) strictly increases the error correction capability of a code: if $\mu$ erasures and $\delta$ deviations occur, then errors of rank $t$ can always be corrected provided that $2t \leq d - 1 + \mu + \delta$, where $d$ is the minimum rank distance of the code. For Gabidulin codes, an important family of maximum rank distance codes, an efficient decoding algorithm is proposed that can properly exploit erasures and deviations. In a network coding application where $n$ packets of length $M$ over $F_q$ are transmitted, the complexity of the decoding algorithm is given by $O(dM)$ operations in an extension field $F_{q^n}$.<|reference_end|> | arxiv | @article{silva2007a,
title={A Rank-Metric Approach to Error Control in Random Network Coding},
author={Danilo Silva, Frank R. Kschischang, Ralf K"otter},
journal={IEEE Transactions on Information Theory, vol. 54, no. 9, pp.
3951-3967, Sep. 2008},
year={2007},
doi={10.1109/TIT.2008.928291},
archivePrefix={arXiv},
eprint={0711.0708},
primaryClass={cs.IT math.IT}
} | silva2007a |
arxiv-1741 | 0711.0711 | Information-Theoretic Security in Wireless Networks | <|reference_start|>Information-Theoretic Security in Wireless Networks: This paper summarizes recent contributions of the authors and their co-workers in the area of information-theoretic security.<|reference_end|> | arxiv | @article{liang2007information-theoretic,
title={Information-Theoretic Security in Wireless Networks},
author={Yingbin Liang, H. Vincent Poor and Shlomo Shamai (Shitz)},
journal={arXiv preprint arXiv:0711.0711},
year={2007},
archivePrefix={arXiv},
eprint={0711.0711},
primaryClass={cs.IT cs.CR math.IT}
} | liang2007information-theoretic |
arxiv-1742 | 0711.0784 | Addendum to Research MMMCV; A Man/Microbio/Megabio/Computer Vision | <|reference_start|>Addendum to Research MMMCV; A Man/Microbio/Megabio/Computer Vision: In October 2007, a Research Proposal for the University of Sydney, Australia, the author suggested that biovie-physical phenomenon as `electrodynamic dependant biological vision', is governed by relativistic quantum laws and biovision. The phenomenon on the basis of `biovielectroluminescence', satisfies man/microbio/megabio/computer vision (MMMCV), as a robust candidate for physical and visual sciences. The general aim of this addendum is to present a refined text of Sections 1-3 of that proposal and highlighting the contents of its Appendix in form of a `Mechanisms' Section. We then briefly remind in an article aimed for December 2007, by appending two more equations into Section 3, a theoretical II-time scenario as a time model well-proposed for the phenomenon. The time model within the core of the proposal, plays a significant role in emphasizing the principle points on Objectives no. 1-8, Sub-hypothesis 3.1.2, mentioned in Article [arXiv:0710.0410]. It also expresses the time concept in terms of causing quantized energy f(|E|) of time |t|, emit in regard to shortening the probability of particle loci as predictable patterns of particle's un-occurred motion, a solution to Heisenberg's uncertainty principle (HUP) into a simplistic manner. We conclude that, practical frames via a time algorithm to this model, fixates such predictable patterns of motion of scenery bodies onto recordable observation points of a MMMCV system. It even suppresses/predicts superposition phenomena coming from a human subject and/or other bio-subjects for any decision making event, e.g., brainwave quantum patterns based on vision. Maintaining the existential probability of Riemann surfaces of II-time scenarios in the context of biovielectroluminescence, makes motion-prediction a possibility.<|reference_end|> | arxiv | @article{alipour2007addendum,
title={Addendum to Research MMMCV; A Man/Microbio/Megabio/Computer Vision},
author={Philip B. Alipour},
journal={arXiv preprint arXiv:0711.0784},
year={2007},
archivePrefix={arXiv},
eprint={0711.0784},
primaryClass={cs.CV cs.CE}
} | alipour2007addendum |
arxiv-1743 | 0711.0811 | Combined Acoustic and Pronunciation Modelling for Non-Native Speech Recognition | <|reference_start|>Combined Acoustic and Pronunciation Modelling for Non-Native Speech Recognition: In this paper, we present several adaptation methods for non-native speech recognition. We have tested pronunciation modelling, MLLR and MAP non-native pronunciation adaptation and HMM models retraining on the HIWIRE foreign accented English speech database. The ``phonetic confusion'' scheme we have developed consists in associating to each spoken phone several sequences of confused phones. In our experiments, we have used different combinations of acoustic models representing the canonical and the foreign pronunciations: spoken and native models, models adapted to the non-native accent with MAP and MLLR. The joint use of pronunciation modelling and acoustic adaptation led to further improvements in recognition accuracy. The best combination of the above mentioned techniques resulted in a relative word error reduction ranging from 46% to 71%.<|reference_end|> | arxiv | @article{bouselmi2007combined,
title={Combined Acoustic and Pronunciation Modelling for Non-Native Speech
Recognition},
author={Ghazi Bouselmi (INRIA Lorraine - LORIA), Dominique Fohr (INRIA
Lorraine - LORIA), Irina Illina (INRIA Lorraine - LORIA)},
journal={Dans InterSpeech 2007 (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0711.0811},
primaryClass={cs.CL}
} | bouselmi2007combined |
arxiv-1744 | 0711.0829 | Instruction sequences with indirect jumps | <|reference_start|>Instruction sequences with indirect jumps: We study sequential programs that are instruction sequences with direct and indirect jump instructions. The intuition is that indirect jump instructions are jump instructions where the position of the instruction to jump to is the content of some memory cell. We consider several kinds of indirect jump instructions. For each kind, we define the meaning of programs with indirect jump instructions of that kind by means of a translation into programs without indirect jump instructions. For each kind, the intended behaviour of a program with indirect jump instructions of that kind under execution is the behaviour of the translated program under execution on interaction with some memory device.<|reference_end|> | arxiv | @article{bergstra2007instruction,
title={Instruction sequences with indirect jumps},
author={J. A. Bergstra, C. A. Middelburg},
journal={Scientific Annals of Computer Science, 17:19--46, 2007.
http://www.infoiasi.ro/bin/download/Annals/XVII/XVII_1.pdf},
year={2007},
number={PRG0709},
archivePrefix={arXiv},
eprint={0711.0829},
primaryClass={cs.PL}
} | bergstra2007instruction |
arxiv-1745 | 0711.0834 | An interface group for process components | <|reference_start|>An interface group for process components: We take a process component as a pair of an interface and a behaviour. We study the composition of interacting process components in the setting of process algebra. We formalize the interfaces of interacting process components by means of an interface group. An interesting feature of the interface group is that it allows for distinguishing between expectations and promises in interfaces of process components. This distinction comes into play in case components with both client and server behaviour are involved.<|reference_end|> | arxiv | @article{bergstra2007an,
title={An interface group for process components},
author={J. A. Bergstra, C. A. Middelburg},
journal={Fundamenta Informaticae, 99(4):355--382, 2010},
year={2007},
doi={10.3233/FI-2010-254},
number={PRG0707},
archivePrefix={arXiv},
eprint={0711.0834},
primaryClass={cs.LO}
} | bergstra2007an |
arxiv-1746 | 0711.0836 | Machine structure oriented control code logic | <|reference_start|>Machine structure oriented control code logic: Control code is a concept that is closely related to a frequently occurring practitioner's view on what is a program: code that is capable of controlling the behaviour of some machine. We present a logical approach to explain issues concerning control codes that are independent of the details of the behaviours that are controlled. Using this approach, such issues can be explained at a very abstract level. We illustrate this among other things by means of an example about the production of a new compiler from an existing one. The approach is based on abstract machine models, called machine structures. We introduce a model of systems that provide execution environments for the executable codes of machine structures and use it to go into portability of control codes.<|reference_end|> | arxiv | @article{bergstra2007machine,
title={Machine structure oriented control code logic},
author={J. A. Bergstra, C. A. Middelburg},
journal={Acta Informatica, 46(5):375--401, 2009},
year={2007},
doi={10.1007/s00236-009-0099-2},
number={PRG0704},
archivePrefix={arXiv},
eprint={0711.0836},
primaryClass={cs.SE}
} | bergstra2007machine |
arxiv-1747 | 0711.0838 | On the operating unit size of load/store architectures | <|reference_start|>On the operating unit size of load/store architectures: We introduce a strict version of the concept of a load/store instruction set architecture in the setting of Maurer machines. We take the view that transformations on the states of a Maurer machine are achieved by applying threads as considered in thread algebra to the Maurer machine. We study how the transformations on the states of the main memory of a strict load/store instruction set architecture that can be achieved by applying threads depend on the operating unit size, the cardinality of the instruction set, and the maximal number of states of the threads.<|reference_end|> | arxiv | @article{bergstra2007on,
title={On the operating unit size of load/store architectures},
author={J. A. Bergstra, C. A. Middelburg},
journal={Mathematical Structures in Computer Science, 20(3):395--417, 2010},
year={2007},
doi={10.1017/S0960129509990314},
number={PRG0703},
archivePrefix={arXiv},
eprint={0711.0838},
primaryClass={cs.AR}
} | bergstra2007on |
arxiv-1748 | 0711.0840 | A thread calculus with molecular dynamics | <|reference_start|>A thread calculus with molecular dynamics: We present a theory of threads, interleaving of threads, and interaction between threads and services with features of molecular dynamics, a model of computation that bears on computations in which dynamic data structures are involved. Threads can interact with services of which the states consist of structured data objects and computations take place by means of actions which may change the structure of the data objects. The features introduced include restriction of the scope of names used in threads to refer to data objects. Because that feature makes it troublesome to provide a model based on structural operational semantics and bisimulation, we construct a projective limit model for the theory.<|reference_end|> | arxiv | @article{bergstra2007a,
title={A thread calculus with molecular dynamics},
author={J. A. Bergstra, C. A. Middelburg},
journal={Information and Computation, 208(7):817-844, 2010},
year={2007},
doi={10.1016/j.ic.2010.01.004},
archivePrefix={arXiv},
eprint={0711.0840},
primaryClass={cs.LO}
} | bergstra2007a |
arxiv-1749 | 0711.0892 | Routing in Outer Space: Improved Security and Energy-Efficiency in Multi-Hop Wireless Networks | <|reference_start|>Routing in Outer Space: Improved Security and Energy-Efficiency in Multi-Hop Wireless Networks: In this paper we consider security-related and energy-efficiency issues in multi-hop wireless networks. We start our work from the observation, known in the literature, that shortest path routing creates congested areas in multi-hop wireless networks. These areas are critical--they generate both security and energy efficiency issues. We attack these problems and set out routing in outer space, a new routing mechanism that transforms any shortest path routing protocol (or approximated versions of it) into a new protocol that, in case of uniform traffic, guarantees that every node of the network is responsible for relaying the same number of messages, on expectation. We can show that a network that uses routing in outer space does not have congested areas, does not have the associated security-related issues, does not encourage selfish positioning, and, in spite of using more energy globally, lives longer of the same network using the original routing protocol.<|reference_end|> | arxiv | @article{mei2007routing,
title={Routing in Outer Space: Improved Security and Energy-Efficiency in
Multi-Hop Wireless Networks},
author={Alessandro Mei and Julinda Stefa},
journal={arXiv preprint arXiv:0711.0892},
year={2007},
archivePrefix={arXiv},
eprint={0711.0892},
primaryClass={cs.NI}
} | mei2007routing |
arxiv-1750 | 0711.0917 | SWI-Prolog and the Web | <|reference_start|>SWI-Prolog and the Web: Where Prolog is commonly seen as a component in a Web application that is either embedded or communicates using a proprietary protocol, we propose an architecture where Prolog communicates to other components in a Web application using the standard HTTP protocol. By avoiding embedding in external Web servers development and deployment become much easier. To support this architecture, in addition to the transfer protocol, we must also support parsing, representing and generating the key Web document types such as HTML, XML and RDF. This paper motivates the design decisions in the libraries and extensions to Prolog for handling Web documents and protocols. The design has been guided by the requirement to handle large documents efficiently. The described libraries support a wide range of Web applications ranging from HTML and XML documents to Semantic Web RDF processing. To appear in Theory and Practice of Logic Programming (TPLP)<|reference_end|> | arxiv | @article{wielemaker2007swi-prolog,
title={SWI-Prolog and the Web},
author={Jan Wielemaker, Zhisheng Huang and Lourens van der Meij},
journal={arXiv preprint arXiv:0711.0917},
year={2007},
archivePrefix={arXiv},
eprint={0711.0917},
primaryClass={cs.PL cs.SC}
} | wielemaker2007swi-prolog |
arxiv-1751 | 0711.1016 | An On-the-fly Tableau-based Decision Procedure for PDL-Satisfiability | <|reference_start|>An On-the-fly Tableau-based Decision Procedure for PDL-Satisfiability: We present a tableau-based algorithm for deciding satisfiability for propositional dynamic logic (PDL) which builds a finite rooted tree with ancestor loops and passes extra information from children to parents to separate good loops from bad loops during backtracking. It is easy to implement, with potential for parallelisation, because it constructs a pseudo-model ``on the fly'' by exploring each tableau branch independently. But its worst-case behaviour is 2EXPTIME rather than EXPTIME. A prototype implementation in the TWB (http://twb.rsise.anu.edu.au) is available.<|reference_end|> | arxiv | @article{abate2007an,
title={An On-the-fly Tableau-based Decision Procedure for PDL-Satisfiability},
author={Pietro Abate, Rajeev Gor'e and Florian Widmann},
journal={arXiv preprint arXiv:0711.1016},
year={2007},
archivePrefix={arXiv},
eprint={0711.1016},
primaryClass={cs.LO}
} | abate2007an |
arxiv-1752 | 0711.1038 | Am\'elioration des Performances des Syst\`emes Automatiques de Reconnaissance de la Parole pour la Parole Non Native | <|reference_start|>Am\'elioration des Performances des Syst\`emes Automatiques de Reconnaissance de la Parole pour la Parole Non Native: In this article, we present an approach for non native automatic speech recognition (ASR). We propose two methods to adapt existing ASR systems to the non-native accents. The first method is based on the modification of acoustic models through integration of acoustic models from the mother tong. The phonemes of the target language are pronounced in a similar manner to the native language of speakers. We propose to combine the models of confused phonemes so that the ASR system could recognize both concurrent pronounciations. The second method we propose is a refinment of the pronounciation error detection through the introduction of graphemic constraints. Indeed, non native speakers may rely on the writing of words in their uttering. Thus, the pronounctiation errors might depend on the characters composing the words. The average error rate reduction that we observed is (22.5%) relative for the sentence error rate, and 34.5% (relative) in word error rate.<|reference_end|> | arxiv | @article{bouselmi2007am\'elioration,
title={Am\'elioration des Performances des Syst\`emes Automatiques de
Reconnaissance de la Parole pour la Parole Non Native},
author={Ghazi Bouselmi (INRIA Lorraine - LORIA), Dominique Fohr (INRIA
Lorraine - LORIA), Irina Illina (INRIA Lorraine - LORIA), Jean-Paul Haton
(INRIA Lorraine - LORIA)},
journal={Dans TAIMA'07, Traitement et Analyse de l'Information : M\'ethodes
et Applications (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0711.1038},
primaryClass={cs.CL}
} | bouselmi2007am\'elioration |
arxiv-1753 | 0711.1055 | Simple Recursive Games | <|reference_start|>Simple Recursive Games: We define the class of "simple recursive games". A simple recursive game is defined as a simple stochastic game (a notion due to Anne Condon), except that we allow arbitrary real payoffs but disallow moves of chance. We study the complexity of solving simple recursive games and obtain an almost-linear time comparison-based algorithm for computing an equilibrium of such a game. The existence of a linear time comparison-based algorithm remains an open problem.<|reference_end|> | arxiv | @article{andersson2007simple,
title={Simple Recursive Games},
author={Daniel Andersson, Kristoffer Arnsfelt Hansen, Peter Bro Miltersen,
Troels Bjerre Sorensen},
journal={arXiv preprint arXiv:0711.1055},
year={2007},
archivePrefix={arXiv},
eprint={0711.1055},
primaryClass={cs.GT cs.DS}
} | andersson2007simple |
arxiv-1754 | 0711.1056 | Bounds on the Number of Iterations for Turbo-Like Ensembles over the Binary Erasure Channe | <|reference_start|>Bounds on the Number of Iterations for Turbo-Like Ensembles over the Binary Erasure Channe: This paper provides simple lower bounds on the number of iterations which is required for successful message-passing decoding of some important families of graph-based code ensembles (including low-density parity-check codes and variations of repeat-accumulate codes). The transmission of the code ensembles is assumed to take place over a binary erasure channel, and the bounds refer to the asymptotic case where we let the block length tend to infinity. The simplicity of the bounds derived in this paper stems from the fact that they are easily evaluated and are expressed in terms of some basic parameters of the ensemble which include the fraction of degree-2 variable nodes, the target bit erasure probability and the gap between the channel capacity and the design rate of the ensemble. This paper demonstrates that the number of iterations which is required for successful message-passing decoding scales at least like the inverse of the gap (in rate) to capacity, provided that the fraction of degree-2 variable nodes of these turbo-like ensembles does not vanish (hence, the number of iterations becomes unbounded as the gap to capacity vanishes).<|reference_end|> | arxiv | @article{sason2007bounds,
title={Bounds on the Number of Iterations for Turbo-Like Ensembles over the
Binary Erasure Channe},
author={Igal Sason and Gil Wiechman},
journal={arXiv preprint arXiv:0711.1056},
year={2007},
archivePrefix={arXiv},
eprint={0711.1056},
primaryClass={cs.IT math.IT}
} | sason2007bounds |
arxiv-1755 | 0711.1161 | Joint Source-Channel Codes for MIMO Block Fading Channels | <|reference_start|>Joint Source-Channel Codes for MIMO Block Fading Channels: We consider transmission of a continuous amplitude source over an L-block Rayleigh fading $M_t \times M_r$ MIMO channel when the channel state information is only available at the receiver. Since the channel is not ergodic, Shannon's source-channel separation theorem becomes obsolete and the optimal performance requires a joint source -channel approach. Our goal is to minimize the expected end-to-end distortion, particularly in the high SNR regime. The figure of merit is the distortion exponent, defined as the exponential decay rate of the expected distortion with increasing SNR. We provide an upper bound and lower bounds for the distortion exponent with respect to the bandwidth ratio among the channel and source bandwidths. For the lower bounds, we analyze three different strategies based on layered source coding concatenated with progressive, superposition or hybrid digital/analog transmission. In each case, by adjusting the system parameters we optimize the distortion exponent as a function of the bandwidth ratio. We prove that the distortion exponent upper bound can be achieved when the channel has only one degree of freedom, that is L=1, and $\min\{M_t,M_r\}=1$. When we have more degrees of freedom, our achievable distortion exponents meet the upper bound for only certain ranges of the bandwidth ratio. We demonstrate that our results, which were derived for a complex Gaussian source, can be extended to more general source distributions as well.<|reference_end|> | arxiv | @article{gunduz2007joint,
title={Joint Source-Channel Codes for MIMO Block Fading Channels},
author={Deniz Gunduz, Elza Erkip},
journal={arXiv preprint arXiv:0711.1161},
year={2007},
archivePrefix={arXiv},
eprint={0711.1161},
primaryClass={cs.IT math.IT}
} | gunduz2007joint |
arxiv-1756 | 0711.1177 | Considerations on P vs NP | <|reference_start|>Considerations on P vs NP: In order to prove that the P of problems is different to the NP class, we consider the satisfability problem of propositional calculus formulae, which is an NP-complete problem. It is shown that, for every search algorithm A, there is a set E(A) containing propositional calculus formulae, each of which requires the algorithm A to take non-polynomial time to find the truth-values of its propositional letters satisfying it. Moreover, E(A)'s size is an exponential function of n, which makes it impossible to detect such formulae in a polynomial time. Hence, the satisfability problem does not have a polynomial complexity<|reference_end|> | arxiv | @article{von reckow2007considerations,
title={Considerations on P vs NP},
author={Alfredo von Reckow},
journal={arXiv preprint arXiv:0711.1177},
year={2007},
archivePrefix={arXiv},
eprint={0711.1177},
primaryClass={cs.CC cs.LO}
} | von reckow2007considerations |
arxiv-1757 | 0711.1189 | Clique Minors in Cartesian Products of Graphs | <|reference_start|>Clique Minors in Cartesian Products of Graphs: A "clique minor" in a graph G can be thought of as a set of connected subgraphs in G that are pairwise disjoint and pairwise adjacent. The "Hadwiger number" h(G) is the maximum cardinality of a clique minor in G. This paper studies clique minors in the Cartesian product G*H. Our main result is a rough structural characterisation theorem for Cartesian products with bounded Hadwiger number. It implies that if the product of two sufficiently large graphs has bounded Hadwiger number then it is one of the following graphs: - a planar grid with a vortex of bounded width in the outerface, - a cylindrical grid with a vortex of bounded width in each of the two `big' faces, or - a toroidal grid. Motivation for studying the Hadwiger number of a graph includes Hadwiger's Conjecture, which states that the chromatic number chi(G) <= h(G). It is open whether Hadwiger's Conjecture holds for every Cartesian product. We prove that if |V(H)|-1 >= chi(G) >= chi(H) then Hadwiger's Conjecture holds for G*H. On the other hand, we prove that Hadwiger's Conjecture holds for all Cartesian products if and only if it holds for all G * K_2. We then show that h(G * K_2) is tied to the treewidth of G. We also develop connections with pseudoachromatic colourings and connected dominating sets that imply near-tight bounds on the Hadwiger number of grid graphs (Cartesian products of paths) and Hamming graphs (Cartesian products of cliques).<|reference_end|> | arxiv | @article{wood2007clique,
title={Clique Minors in Cartesian Products of Graphs},
author={David R. Wood},
journal={New York J. Mathematics 17:627-682, 2011},
year={2007},
archivePrefix={arXiv},
eprint={0711.1189},
primaryClass={math.CO cs.DM}
} | wood2007clique |
arxiv-1758 | 0711.1226 | Dynamic aspects of individual design activities A cognitive ergonomics viewpoint | <|reference_start|>Dynamic aspects of individual design activities A cognitive ergonomics viewpoint: This paper focuses on the use of knowledge possessed by designers. Data collection was based on observations (by the cognitive ergonomics researcher) and simultaneous verbalisations (by the designers) in empirical studies conducted in the context of industrial design projects. The contribution of this research is typical of cognitive ergonomics, in that it provides data on actual activities implemented by designers in their actual work situation (rather than on prescribed and/or idealised processes and methods). Data presented concern global strategies (the way in which designers actually organise their activity) and local strategies (reuse in design). Results from cognitive ergonomics and other research that challenges the way in which people are supposed to work with existing systems are generally not received warmly. Abundant corroboration of such results is required before industry may consider taking them into account. The opportunistic organisation of design activity is taken here as an example of this reluctance. The results concerning this aspect of design have been verified repeatedly, but only prototypes and experimental systems implementing some of the requirements formulated on their basis, are under development.<|reference_end|> | arxiv | @article{visser2007dynamic,
title={Dynamic aspects of individual design activities. A cognitive ergonomics
viewpoint},
author={Willemien Visser (INRIA Rocquencourt)},
journal={Human behaviour in design Springer Verlag (Ed.) (2003) 87-96},
year={2007},
archivePrefix={arXiv},
eprint={0711.1226},
primaryClass={cs.HC}
} | visser2007dynamic |
arxiv-1759 | 0711.1227 | Designing as Construction of Representations: A Dynamic Viewpoint in Cognitive Design Research | <|reference_start|>Designing as Construction of Representations: A Dynamic Viewpoint in Cognitive Design Research: This article presents a cognitively oriented viewpoint on design. It focuses on cognitive, dynamic aspects of real design, i.e., the actual cognitive activity implemented by designers during their work on professional design projects. Rather than conceiving de-signing as problem solving - Simon's symbolic information processing (SIP) approach - or as a reflective practice or some other form of situated activity - the situativity (SIT) approach - we consider that, from a cognitive viewpoint, designing is most appropriately characterised as a construction of representations. After a critical discussion of the SIP and SIT approaches to design, we present our view-point. This presentation concerns the evolving nature of representations regarding levels of abstraction and degrees of precision, the function of external representations, and specific qualities of representation in collective design. Designing is described at three levels: the organisation of the activity, its strategies, and its design-representation construction activities (different ways to generate, trans-form, and evaluate representations). Even if we adopt a "generic design" stance, we claim that design can take different forms depending on the nature of the artefact, and we propose some candidates for dimensions that allow a distinction to be made between these forms of design. We discuss the potential specificity of HCI design, and the lack of cognitive design research occupied with the quality of design. We close our discussion of representational structures and activities by an outline of some directions regarding their functional linkages.<|reference_end|> | arxiv | @article{visser2007designing,
title={Designing as Construction of Representations: A Dynamic Viewpoint in
Cognitive Design Research},
author={Willemien Visser (INRIA Rocquencourt)},
journal={Human-Computer Interaction 21, 1 (2006) 103-152},
year={2007},
archivePrefix={arXiv},
eprint={0711.1227},
primaryClass={cs.HC}
} | visser2007designing |
arxiv-1760 | 0711.1231 | Optimizing Latency and Reliability of Pipeline Workflow Applications | <|reference_start|>Optimizing Latency and Reliability of Pipeline Workflow Applications: Mapping applications onto heterogeneous platforms is a difficult challenge, even for simple application patterns such as pipeline graphs. The problem is even more complex when processors are subject to failure during the execution of the application. In this paper, we study the complexity of a bi-criteria mapping which aims at optimizing the latency (i.e., the response time) and the reliability (i.e., the probability that the computation will be successful) of the application. Latency is minimized by using faster processors, while reliability is increased by replicating computations on a set of processors. However, replication increases latency (additional communications, slower processors). The application fails to be executed only if all the processors fail during execution. While simple polynomial algorithms can be found for fully homogeneous platforms, the problem becomes NP-hard when tackling heterogeneous platforms. This is yet another illustration of the additional complexity added by heterogeneity.<|reference_end|> | arxiv | @article{benoit2007optimizing,
title={Optimizing Latency and Reliability of Pipeline Workflow Applications},
author={Anne Benoit (INRIA Rh^one-Alpes / LIP Laboratoire d'Informatique du
Parall'elisme, LIP), Veronika Rehn-Sonigo (INRIA Rh^one-Alpes / LIP
Laboratoire d'Informatique du Parall'elisme, LIP), Yves Robert (INRIA
Rh^one-Alpes / LIP Laboratoire d'Informatique du Parall'elisme, LIP)},
journal={arXiv preprint arXiv:0711.1231},
year={2007},
archivePrefix={arXiv},
eprint={0711.1231},
primaryClass={cs.DC}
} | benoit2007optimizing |
arxiv-1761 | 0711.1242 | The Price of Selfish Stackelberg Leadership in a Network Game | <|reference_start|>The Price of Selfish Stackelberg Leadership in a Network Game: We study a class of games in which a finite number of agents each controls a quantity of flow to be routed through a network, and are able to split their own flow between multiple paths through the network. Recent work on this model has contrasted the social cost of Nash equilibria with the best possible social cost. Here we show that additional costs are incurred in situations where a selfish ``leader'' agent allocates his flow, and then commits to that choice so that other agents are compelled to minimise their own cost based on the first agent's choice. We find that even in simple networks, the leader can often improve his own cost at the expense of increased social cost. Focusing on the 2-player case, we give upper and lower bounds on the worst-case additional cost incurred.<|reference_end|> | arxiv | @article{goldberg2007the,
title={The Price of Selfish Stackelberg Leadership in a Network Game},
author={P.W. Goldberg, P. Polpinit},
journal={arXiv preprint arXiv:0711.1242},
year={2007},
archivePrefix={arXiv},
eprint={0711.1242},
primaryClass={cs.GT}
} | goldberg2007the |
arxiv-1762 | 0711.1269 | Fair Scheduling in OFDMA-based Wireless Systems with QoS Constraints | <|reference_start|>Fair Scheduling in OFDMA-based Wireless Systems with QoS Constraints: In this work we consider the problem of downlink resource allocation for proportional fairness of long term received rates of data users and quality of service for real time sessions in an OFDMA-based wireless system. The base station allocates available power and bandwidth to individual users based on long term average received rates, QoS based rate constraints and channel conditions. We solve the underlying constrained optimization problem and propose an algorithm that achieves the optimal allocation. Numerical evaluation results show that the proposed algorithm provides better QoS to voice and video sessions while providing more and fair rates to data users in comparison with existing schemes.<|reference_end|> | arxiv | @article{girici2007fair,
title={Fair Scheduling in OFDMA-based Wireless Systems with QoS Constraints},
author={Tolga Girici, Chenxi Zhu, Jonathan R. Agre, Anthony Ephremides},
journal={International OFDM Workshop, Hamburg Germany on Aug. 30th 2007
(Inowo 07)},
year={2007},
archivePrefix={arXiv},
eprint={0711.1269},
primaryClass={cs.NI}
} | girici2007fair |
arxiv-1763 | 0711.1273 | Practical Resource Allocation Algorithms for QoS in OFDMA-based Wireless Systems | <|reference_start|>Practical Resource Allocation Algorithms for QoS in OFDMA-based Wireless Systems: In this work we propose an efficient resource allocation algorithm for OFDMA based wireless systems supporting heterogeneous traffic. The proposed algorithm provides proportionally fairness to data users and short term rate guarantees to real-time users. Based on the QoS requirements, buffer occupancy and channel conditions, we propose a scheme for rate requirement determination for delay constrained sessions. Then we formulate and solve the proportional fair rate allocation problem subject to those rate requirements and power/bandwidth constraints. Simulations results show that the proposed algorithm provides significant improvement with respect to the benchmark algorithm.<|reference_end|> | arxiv | @article{girici2007practical,
title={Practical Resource Allocation Algorithms for QoS in OFDMA-based Wireless
Systems},
author={Tolga Girici, Chenxi Zhu, Jonathan R. Agre, Anthony Ephremides},
journal={To be presented at 2nd IEEE International Broadband Wireless
Access Workshop. Las Vegas, Nevada USA Jan 12 2008},
year={2007},
doi={10.1109/ccnc08.2007.209},
archivePrefix={arXiv},
eprint={0711.1273},
primaryClass={cs.NI}
} | girici2007practical |
arxiv-1764 | 0711.1290 | Conception individuelle et collective Approche de l'ergonomie cognitive [Individual and Collective Design The Cognitive-Ergonomics Approach] | <|reference_start|>Conception individuelle et collective Approche de l'ergonomie cognitive [Individual and Collective Design The Cognitive-Ergonomics Approach]: This text presents the cognitive-ergonomics approach to design, in both its individual and collective form. It focuses on collective design with respect to individual design. The theoretical framework adopted is that of information processing, specified for design problems. The cognitive characteristics of design problems are presented: the effects of their ill-defined character and of the different types of representation implemented in solving these problems, amongst others the more or less "satisficing" character of the different possible solutions. The text first describes the cognitive activities implemented in both individual and collective design: different types of control activities and of the executive activities of solution development and evaluation. Specific collective-design characteristics are then presented: co-design and distributed-design activities, temporo-operative and cognitive synchronisation, and different types of argumentation, of co-designers' intervention modes in the design process, of solution-proposals evaluation. The paper concludes by a confrontation between the two types of design, individual and collective.<|reference_end|> | arxiv | @article{visser2007conception,
title={Conception individuelle et collective. Approche de l'ergonomie cognitive
[Individual and Collective Design. The Cognitive-Ergonomics Approach]},
author={Willemien Visser (INRIA Rocquencourt)},
journal={Cognition et cr\'eation. Explorations cognitives des processus de
conception (Cognition and creation. Cognitive explorations of design
processes) Mardaga (Ed.) (2002) 311-327},
year={2007},
archivePrefix={arXiv},
eprint={0711.1290},
primaryClass={cs.OH}
} | visser2007conception |
arxiv-1765 | 0711.1295 | On the performance of Golden space-time trellis coded modulation over MIMO block fading channels | <|reference_start|>On the performance of Golden space-time trellis coded modulation over MIMO block fading channels: The Golden space-time trellis coded modulation (GST-TCM) scheme was proposed in \cite{Hong06} for a high rate $2\times 2$ multiple-input multiple-output (MIMO) system over slow fading channels. In this letter, we present the performance analysis of GST-TCM over block fading channels, where the channel matrix is constant over a fraction of the codeword length and varies from one fraction to another, independently. In practice, it is not useful to design such codes for specific block fading channel parameters and a robust solution is preferable. We then show both analytically and by simulation that the GST-TCM designed for slow fading channels are indeed robust to all block fading channel conditions.<|reference_end|> | arxiv | @article{viterbo2007on,
title={On the performance of Golden space-time trellis coded modulation over
MIMO block fading channels},
author={Emanuele Viterbo and Yi Hong},
journal={arXiv preprint arXiv:0711.1295},
year={2007},
archivePrefix={arXiv},
eprint={0711.1295},
primaryClass={cs.IT math.IT}
} | viterbo2007on |
arxiv-1766 | 0711.1360 | Analytical approach to bit-string models of language evolution | <|reference_start|>Analytical approach to bit-string models of language evolution: A formulation of bit-string models of language evolution, based on differential equations for the population speaking each language, is introduced and preliminarily studied. Connections with replicator dynamics and diffusion processes are pointed out. The stability of the dominance state, where most of the population speaks a single language, is analyzed within a mean-field-like approximation, while the homogeneous state, where the population is evenly distributed among languages, can be exactly studied. This analysis discloses the existence of a bistability region, where dominance coexists with homogeneity as possible asymptotic states. Numerical resolution of the differential system validates these findings.<|reference_end|> | arxiv | @article{zanette2007analytical,
title={Analytical approach to bit-string models of language evolution},
author={Damian H. Zanette},
journal={arXiv preprint arXiv:0711.1360},
year={2007},
doi={10.1142/S0129183108012340},
archivePrefix={arXiv},
eprint={0711.1360},
primaryClass={physics.soc-ph cs.CL}
} | zanette2007analytical |
arxiv-1767 | 0711.1383 | On Minimal Tree Realizations of Linear Codes | <|reference_start|>On Minimal Tree Realizations of Linear Codes: A tree decomposition of the coordinates of a code is a mapping from the coordinate set to the set of vertices of a tree. A tree decomposition can be extended to a tree realization, i.e., a cycle-free realization of the code on the underlying tree, by specifying a state space at each edge of the tree, and a local constraint code at each vertex of the tree. The constraint complexity of a tree realization is the maximum dimension of any of its local constraint codes. A measure of the complexity of maximum-likelihood decoding for a code is its treewidth, which is the least constraint complexity of any of its tree realizations. It is known that among all tree realizations of a code that extends a given tree decomposition, there exists a unique minimal realization that minimizes the state space dimension at each vertex of the underlying tree. In this paper, we give two new constructions of these minimal realizations. As a by-product of the first construction, a generalization of the state-merging procedure for trellis realizations, we obtain the fact that the minimal tree realization also minimizes the local constraint code dimension at each vertex of the underlying tree. The second construction relies on certain code decomposition techniques that we develop. We further observe that the treewidth of a code is related to a measure of graph complexity, also called treewidth. We exploit this connection to resolve a conjecture of Forney's regarding the gap between the minimum trellis constraint complexity and the treewidth of a code. We present a family of codes for which this gap can be arbitrarily large.<|reference_end|> | arxiv | @article{kashyap2007on,
title={On Minimal Tree Realizations of Linear Codes},
author={Navin Kashyap},
journal={arXiv preprint arXiv:0711.1383},
year={2007},
doi={10.1109/TIT.2009.2023718},
archivePrefix={arXiv},
eprint={0711.1383},
primaryClass={cs.IT math.IT}
} | kashyap2007on |
arxiv-1768 | 0711.1401 | Towards a Sound Theory of Adaptation for the Simple Genetic Algorithm | <|reference_start|>Towards a Sound Theory of Adaptation for the Simple Genetic Algorithm: The pace of progress in the fields of Evolutionary Computation and Machine Learning is currently limited -- in the former field, by the improbability of making advantageous extensions to evolutionary algorithms when their capacity for adaptation is poorly understood, and in the latter by the difficulty of finding effective semi-principled reductions of hard real-world problems to relatively simple optimization problems. In this paper we explain why a theory which can accurately explain the simple genetic algorithm's remarkable capacity for adaptation has the potential to address both these limitations. We describe what we believe to be the impediments -- historic and analytic -- to the discovery of such a theory and highlight the negative role that the building block hypothesis (BBH) has played. We argue based on experimental results that a fundamental limitation which is widely believed to constrain the SGA's adaptive ability (and is strongly implied by the BBH) is in fact illusionary and does not exist. The SGA therefore turns out to be more powerful than it is currently thought to be. We give conditions under which it becomes feasible to numerically approximate and study the multivariate marginals of the search distribution of an infinite population SGA over multiple generations even when its genomes are long, and explain why this analysis is relevant to the riddle of the SGA's remarkable adaptive abilities.<|reference_end|> | arxiv | @article{burjorjee2007towards,
title={Towards a Sound Theory of Adaptation for the Simple Genetic Algorithm},
author={Keki Burjorjee},
journal={arXiv preprint arXiv:0711.1401},
year={2007},
archivePrefix={arXiv},
eprint={0711.1401},
primaryClass={cs.NE cs.AI}
} | burjorjee2007towards |
arxiv-1769 | 0711.1466 | Predicting relevant empty spots in social interaction | <|reference_start|>Predicting relevant empty spots in social interaction: An empty spot refers to an empty hard-to-fill space which can be found in the records of the social interaction, and is the clue to the persons in the underlying social network who do not appear in the records. This contribution addresses a problem to predict relevant empty spots in social interaction. Homogeneous and inhomogeneous networks are studied as a model underlying the social interaction. A heuristic predictor function approach is presented as a new method to address the problem. Simulation experiment is demonstrated over a homogeneous network. A test data in the form of baskets is generated from the simulated communication. Precision to predict the empty spots is calculated to demonstrate the performance of the presented approach.<|reference_end|> | arxiv | @article{maeno2007predicting,
title={Predicting relevant empty spots in social interaction},
author={Yoshiharu Maeno and Yukio Ohsawa},
journal={Journal of Systems Science and Complexity vol.21, pp.161-171
(2008)},
year={2007},
archivePrefix={arXiv},
eprint={0711.1466},
primaryClass={cs.AI}
} | maeno2007predicting |
arxiv-1770 | 0711.1478 | A constructive Borel-Cantelli Lemma Constructing orbits with required statistical properties | <|reference_start|>A constructive Borel-Cantelli Lemma Constructing orbits with required statistical properties: In the general context of computable metric spaces and computable measures we prove a kind of constructive Borel-Cantelli lemma: given a sequence (constructive in some way) of sets $A_{i}$ with effectively summable measures, there are computable points which are not contained in infinitely many $A_{i}$. As a consequence of this we obtain the existence of computable points which follow the \emph{typical statistical behavior} of a dynamical system (they satisfy the Birkhoff theorem) for a large class of systems, having computable invariant measure and a certain ``logarithmic'' speed of convergence of Birkhoff averages over Lipshitz observables. This is applied to uniformly hyperbolic systems, piecewise expanding maps, systems on the interval with an indifferent fixed point and it directly implies the existence of computable numbers which are normal with respect to any base.<|reference_end|> | arxiv | @article{galatolo2007a,
title={A constructive Borel-Cantelli Lemma. Constructing orbits with required
statistical properties},
author={Stefano Galatolo, Mathieu Hoyrup and Cristobal Rojas},
journal={arXiv preprint arXiv:0711.1478},
year={2007},
archivePrefix={arXiv},
eprint={0711.1478},
primaryClass={math.CA cs.IT math.DS math.IT math.PR math.ST stat.TH}
} | galatolo2007a |
arxiv-1771 | 0711.1533 | N3Logic: A Logical Framework For the World Wide Web | <|reference_start|>N3Logic: A Logical Framework For the World Wide Web: The Semantic Web drives towards the use of the Web for interacting with logically interconnected data. Through knowledge models such as Resource Description Framework (RDF), the Semantic Web provides a unifying representation of richly structured data. Adding logic to the Web implies the use of rules to make inferences, choose courses of action, and answer questions. This logic must be powerful enough to describe complex properties of objects but not so powerful that agents can be tricked by being asked to consider a paradox. The Web has several characteristics that can lead to problems when existing logics are used, in particular, the inconsistencies that inevitably arise due to the openness of the Web, where anyone can assert anything. N3Logic is a logic that allows rules to be expressed in a Web environment. It extends RDF with syntax for nested graphs and quantified variables and with predicates for implication and accessing resources on the Web, and functions including cryptographic, string, math. The main goal of N3Logic is to be a minimal extension to the RDF data model such that the same language can be used for logic and data. In this paper, we describe N3Logic and illustrate through examples why it is an appropriate logic for the Web.<|reference_end|> | arxiv | @article{berners-lee2007n3logic:,
title={N3Logic: A Logical Framework For the World Wide Web},
author={Tim Berners-Lee, Dan Connolly, Lalana Kagal, Yosi Scharf, and Jim
Hendler},
journal={arXiv preprint arXiv:0711.1533},
year={2007},
archivePrefix={arXiv},
eprint={0711.1533},
primaryClass={cs.NI}
} | berners-lee2007n3logic: |
arxiv-1772 | 0711.1565 | Channel Code Design with Causal Side Information at the Encoder | <|reference_start|>Channel Code Design with Causal Side Information at the Encoder: The problem of channel code design for the $M$-ary input AWGN channel with additive $Q$-ary interference where the sequence of i.i.d. interference symbols is known causally at the encoder is considered. The code design criterion at high SNR is derived by defining a new distance measure between the input symbols of the Shannon's \emph{associated} channel. For the case of binary-input channel, i.e., M=2, it is shown that it is sufficient to use only two (out of $2^Q$) input symbols of the \emph{associated} channel in the encoding as far as the distance spectrum of code is concerned. This reduces the problem of channel code design for the binary-input AWGN channel with known interference at the encoder to design of binary codes for the binary symmetric channel where the Hamming distance among codewords is the major factor in the performance of the code.<|reference_end|> | arxiv | @article{farmanbar2007channel,
title={Channel Code Design with Causal Side Information at the Encoder},
author={Hamid Farmanbar, Shahab Oveis Gharan, and Amir Keyvan Khandani},
journal={arXiv preprint arXiv:0711.1565},
year={2007},
archivePrefix={arXiv},
eprint={0711.1565},
primaryClass={cs.IT math.IT}
} | farmanbar2007channel |
arxiv-1773 | 0711.1569 | Capacity as a Fundamental Metric for Mechanism Design in the Information Economy | <|reference_start|>Capacity as a Fundamental Metric for Mechanism Design in the Information Economy: The auction theory literature has so far focused mostly on the design of mechanisms that takes the revenue or the efficiency as a yardstick. However, scenarios where the {\it capacity}, which we define as \textit{``the number of bidders the auctioneer wants to have a positive probability of getting the item''}, is a fundamental concern are ubiquitous in the information economy. For instance, in sponsored search auctions (SSA's) or in online ad-exchanges, the true value of an ad-slot for an advertiser is inherently derived from the conversion-rate, which in turn depends on whether the advertiser actually obtained the ad-slot or not; thus, unless the capacity of the underlying auction is large, key parameters, such as true valuations and advertiser-specific conversion rates, will remain unknown or uncertain leading to inherent inefficiencies in the system. In general, the same holds true for all information goods/digital goods. We initiate a study of mechanisms, which take capacity as a yardstick, in addition to revenue/efficiency. We show that in the case of a single indivisible item one simple way to incorporate capacity constraints is via designing mechanisms to sell probability distributions, and that under certain conditions, such optimal probability distributions could be identified using a Linear programming approach. We define a quantity called {\it price of capacity} to capture the tradeoff between capacity and revenue/efficiency. We also study the case of sponsored search auctions. Finally, we discuss how general such an approach via probability spikes can be made, and potential directions for future investigations.<|reference_end|> | arxiv | @article{singh2007capacity,
title={Capacity as a Fundamental Metric for Mechanism Design in the Information
Economy},
author={Sudhir Kumar Singh, Vwani P. Roychowdhury},
journal={arXiv preprint arXiv:0711.1569},
year={2007},
archivePrefix={arXiv},
eprint={0711.1569},
primaryClass={cs.GT}
} | singh2007capacity |
arxiv-1774 | 0711.1573 | Outage-Efficient Downlink Transmission Without Transmit Channel State Information | <|reference_start|>Outage-Efficient Downlink Transmission Without Transmit Channel State Information: This paper investigates downlink transmission over a quasi-static fading Gaussian broadcast channel (BC), to model delay-sensitive applications over slowly time-varying fading channels. System performance is characterized by outage achievable rate regions. In contrast to most previous work, here the problem is studied under the key assumption that the transmitter only knows the probability distributions of the fading coefficients, but not their realizations. For scalar-input channels, two coding schemes are proposed. The first scheme is called blind dirty paper coding (B-DPC), which utilizes a robustness property of dirty paper coding to perform precoding at the transmitter. The second scheme is called statistical superposition coding (S-SC), in which each receiver adaptively performs successive decoding with the process statistically governed by the realized fading. Both B-DPC and S-SC schemes lead to the same outage achievable rate region, which always dominates that of time-sharing, irrespective of the particular fading distributions. The S-SC scheme can be extended to BCs with multiple transmit antennas.<|reference_end|> | arxiv | @article{zhang2007outage-efficient,
title={Outage-Efficient Downlink Transmission Without Transmit Channel State
Information},
author={Wenyi Zhang, Shivaprasad Kotagiri, J. Nicholas Laneman},
journal={arXiv preprint arXiv:0711.1573},
year={2007},
archivePrefix={arXiv},
eprint={0711.1573},
primaryClass={cs.IT math.IT}
} | zhang2007outage-efficient |
arxiv-1775 | 0711.1605 | Asymptotic Capacity of Wireless Ad Hoc Networks with Realistic Links under a Honey Comb Topology | <|reference_start|>Asymptotic Capacity of Wireless Ad Hoc Networks with Realistic Links under a Honey Comb Topology: We consider the effects of Rayleigh fading and lognormal shadowing in the physical interference model for all the successful transmissions of traffic across the network. New bounds are derived for the capacity of a given random ad hoc wireless network that reflect packet drop or capture probability of the transmission links. These bounds are based on a simplified network topology termed as honey-comb topology under a given routing and scheduling scheme.<|reference_end|> | arxiv | @article{asnani2007asymptotic,
title={Asymptotic Capacity of Wireless Ad Hoc Networks with Realistic Links
under a Honey Comb Topology},
author={Himanshu Asnani and Abhay Karandikar},
journal={arXiv preprint arXiv:0711.1605},
year={2007},
archivePrefix={arXiv},
eprint={0711.1605},
primaryClass={cs.IT math.IT}
} | asnani2007asymptotic |
arxiv-1776 | 0711.1669 | Applying Software Defect Estimations: Using a Risk Matrix for Tuning Test Effort | <|reference_start|>Applying Software Defect Estimations: Using a Risk Matrix for Tuning Test Effort: Applying software defect esimation techniques and presenting this information in a compact and impactful decision table can clearly illustrate to collaborative groups how critical this position is in the overall development cycle. The Test Risk Matrix described here has proven to be a valuable addition to the management tools and approaches used in developing large scale software on several releases. Use of this matrix in development planning meetings can clarify the attendant risks and possible consequences of carrying out or bypassing specific test activities.<|reference_end|> | arxiv | @article{cusick2007applying,
title={Applying Software Defect Estimations: Using a Risk Matrix for Tuning
Test Effort},
author={James Cusick},
journal={arXiv preprint arXiv:0711.1669},
year={2007},
archivePrefix={arXiv},
eprint={0711.1669},
primaryClass={cs.SE cs.OH}
} | cusick2007applying |
arxiv-1777 | 0711.1682 | Data Structures for Mergeable Trees | <|reference_start|>Data Structures for Mergeable Trees: Motivated by an application in computational topology, we consider a novel variant of the problem of efficiently maintaining dynamic rooted trees. This variant requires merging two paths in a single operation. In contrast to the standard problem, in which only one tree arc changes at a time, a single merge operation can change many arcs. In spite of this, we develop a data structure that supports merges on an n-node forest in O(log^2 n) amortized time and all other standard tree operations in O(log n) time (amortized, worst-case, or randomized depending on the underlying data structure). For the special case that occurs in the motivating application, in which arbitrary arc deletions (cuts) are not allowed, we give a data structure with an O(log n) time bound per operation. This is asymptotically optimal under certain assumptions. For the even-more special case in which both cuts and parent queries are disallowed, we give an alternative O(log n)-time solution that uses standard dynamic trees as a black box. This solution also applies to the motivating application. Our methods use previous work on dynamic trees in various ways, but the analysis of each algorithm requires novel ideas. We also investigate lower bounds for the problem under various assumptions.<|reference_end|> | arxiv | @article{georgiadis2007data,
title={Data Structures for Mergeable Trees},
author={Loukas Georgiadis, Haim Kaplan, Nira Shafrir, Robert E. Tarjan, Renato
F. Werneck},
journal={arXiv preprint arXiv:0711.1682},
year={2007},
archivePrefix={arXiv},
eprint={0711.1682},
primaryClass={cs.DS}
} | georgiadis2007data |
arxiv-1778 | 0711.1723 | An analysis of a random algorithm for estimating all the matchings | <|reference_start|>An analysis of a random algorithm for estimating all the matchings: Counting the number of all the matchings on a bipartite graph has been transformed into calculating the permanent of a matrix obtained from the extended bipartite graph by Yan Huo, and Rasmussen presents a simple approach (RM) to approximate the permanent, which just yields a critical ratio O($n\omega(n)$) for almost all the 0-1 matrices, provided it's a simple promising practical way to compute this #P-complete problem. In this paper, the performance of this method will be shown when it's applied to compute all the matchings based on that transformation. The critical ratio will be proved to be very large with a certain probability, owning an increasing factor larger than any polynomial of $n$ even in the sense for almost all the 0-1 matrices. Hence, RM fails to work well when counting all the matchings via computing the permanent of the matrix. In other words, we must carefully utilize the known methods of estimating the permanent to count all the matchings through that transformation.<|reference_end|> | arxiv | @article{zhang2007an,
title={An analysis of a random algorithm for estimating all the matchings},
author={Jinshan Zhang, Yan Huo, and Fengshan Bai},
journal={arXiv preprint arXiv:0711.1723},
year={2007},
archivePrefix={arXiv},
eprint={0711.1723},
primaryClass={cs.CC cs.DM}
} | zhang2007an |
arxiv-1779 | 0711.1765 | Kinematic calibration of orthoglide-type mechanisms | <|reference_start|>Kinematic calibration of orthoglide-type mechanisms: The paper proposes a novel calibration approach for the Orthoglide-type mechanisms based on observations of the manipulator leg parallelism during mo-tions between the prespecified test postures. It employs a low-cost measuring system composed of standard comparator indicators attached to the universal magnetic stands. They are sequentially used for measuring the deviation of the relevant leg location while the manipulator moves the TCP along the Cartesian axes. Using the measured differences, the developed algorithm estimates the joint offsets that are treated as the most essential parameters to be adjusted. The sensitivity of the meas-urement methods and the calibration accuracy are also studied. Experimental re-sults are presented that demonstrate validity of the proposed calibration technique<|reference_end|> | arxiv | @article{pashkevich2007kinematic,
title={Kinematic calibration of orthoglide-type mechanisms},
author={Anatoly Pashkevich (ROBOTIC Laboratory, Irccyn), Damien Chablat
(IRCCyN), Philippe Wenger (IRCCyN)},
journal={Information Control Problems in Manufacturing 2006, Elsevier (Ed.)
(2006) 149-154},
year={2007},
archivePrefix={arXiv},
eprint={0711.1765},
primaryClass={cs.RO}
} | pashkevich2007kinematic |
arxiv-1780 | 0711.1766 | Achieving the Gaussian Rate-Distortion Function by Prediction | <|reference_start|>Achieving the Gaussian Rate-Distortion Function by Prediction: The "water-filling" solution for the quadratic rate-distortion function of a stationary Gaussian source is given in terms of its power spectrum. This formula naturally lends itself to a frequency domain "test-channel" realization. We provide an alternative time-domain realization for the rate-distortion function, based on linear prediction. This solution has some interesting implications, including the optimality at all distortion levels of pre/post filtered vector-quantized differential pulse code modulation (DPCM), and a duality relationship with decision-feedback equalization (DFE) for inter-symbol interference (ISI) channels.<|reference_end|> | arxiv | @article{zamir2007achieving,
title={Achieving the Gaussian Rate-Distortion Function by Prediction},
author={Ram Zamir, Yuval Kochman and Uri Erez},
journal={arXiv preprint arXiv:0711.1766},
year={2007},
archivePrefix={arXiv},
eprint={0711.1766},
primaryClass={cs.IT math.IT}
} | zamir2007achieving |
arxiv-1781 | 0711.1786 | A Mobile Computing Architecture for Numerical Simulation | <|reference_start|>A Mobile Computing Architecture for Numerical Simulation: The domain of numerical simulation is a place where the parallelization of numerical code is common. The definition of a numerical context means the configuration of resources such as memory, processor load and communication graph, with an evolving feature: the resources availability. A feature is often missing: the adaptability. It is not predictable and the adaptable aspect is essential. Without calling into question these implementations of these codes, we create an adaptive use of these implementations. Because the execution has to be driven by the availability of main resources, the components of a numeric computation have to react when their context changes. This paper offers a new architecture, a mobile computing architecture, based on mobile agents and JavaSpace. At the end of this paper, we apply our architecture to several case studies and obtain our first results.<|reference_end|> | arxiv | @article{dumont2007a,
title={A Mobile Computing Architecture for Numerical Simulation},
author={Cyril Dumont (LACL), Fabrice Mourlin (LACL)},
journal={arXiv preprint arXiv:0711.1786},
year={2007},
archivePrefix={arXiv},
eprint={0711.1786},
primaryClass={cs.DC}
} | dumont2007a |
arxiv-1782 | 0711.1814 | Building Rules on Top of Ontologies for the Semantic Web with Inductive Logic Programming | <|reference_start|>Building Rules on Top of Ontologies for the Semantic Web with Inductive Logic Programming: Building rules on top of ontologies is the ultimate goal of the logical layer of the Semantic Web. To this aim an ad-hoc mark-up language for this layer is currently under discussion. It is intended to follow the tradition of hybrid knowledge representation and reasoning systems such as $\mathcal{AL}$-log that integrates the description logic $\mathcal{ALC}$ and the function-free Horn clausal language \textsc{Datalog}. In this paper we consider the problem of automating the acquisition of these rules for the Semantic Web. We propose a general framework for rule induction that adopts the methodological apparatus of Inductive Logic Programming and relies on the expressive and deductive power of $\mathcal{AL}$-log. The framework is valid whatever the scope of induction (description vs. prediction) is. Yet, for illustrative purposes, we also discuss an instantiation of the framework which aims at description and turns out to be useful in Ontology Refinement. Keywords: Inductive Logic Programming, Hybrid Knowledge Representation and Reasoning Systems, Ontologies, Semantic Web. Note: To appear in Theory and Practice of Logic Programming (TPLP)<|reference_end|> | arxiv | @article{lisi2007building,
title={Building Rules on Top of Ontologies for the Semantic Web with Inductive
Logic Programming},
author={Francesca A. Lisi},
journal={arXiv preprint arXiv:0711.1814},
year={2007},
archivePrefix={arXiv},
eprint={0711.1814},
primaryClass={cs.AI cs.LG}
} | lisi2007building |
arxiv-1783 | 0711.1827 | The Three-Color and Two-Color Tantrix(TM) Rotation Puzzle Problems are NP-Complete via Parsimonious Reductions | <|reference_start|>The Three-Color and Two-Color Tantrix(TM) Rotation Puzzle Problems are NP-Complete via Parsimonious Reductions: Holzer and Holzer (Discrete Applied Mathematics 144(3):345--358, 2004) proved that the Tantrix(TM) rotation puzzle problem with four colors is NP-complete, and they showed that the infinite variant of this problem is undecidable. In this paper, we study the three-color and two-color Tantrix(TM) rotation puzzle problems (3-TRP and 2-TRP) and their variants. Restricting the number of allowed colors to three (respectively, to two) reduces the set of available Tantrix(TM) tiles from 56 to 14 (respectively, to 8). We prove that 3-TRP and 2-TRP are NP-complete, which answers a question raised by Holzer and Holzer in the affirmative. Since our reductions are parsimonious, it follows that the problems Unique-3-TRP and Unique-2-TRP are DP-complete under randomized reductions. We also show that the another-solution problems associated with 4-TRP, 3-TRP, and 2-TRP are NP-complete. Finally, we prove that the infinite variants of 3-TRP and 2-TRP are undecidable.<|reference_end|> | arxiv | @article{baumeister2007the,
title={The Three-Color and Two-Color Tantrix(TM) Rotation Puzzle Problems are
NP-Complete via Parsimonious Reductions},
author={Dorothea Baumeister and Joerg Rothe},
journal={arXiv preprint arXiv:0711.1827},
year={2007},
archivePrefix={arXiv},
eprint={0711.1827},
primaryClass={cs.CC}
} | baumeister2007the |
arxiv-1784 | 0711.1856 | Testing Kak's Conjecture on Binary Reciprocal of Primes and Cryptographic Applications | <|reference_start|>Testing Kak's Conjecture on Binary Reciprocal of Primes and Cryptographic Applications: This note considers reciprocal of primes in binary representation and shows that the conjecture that 0s exceed 1s in most cases continues to hold for primes less one million. The conjecture has also been tested for ternary representation with similar results. Some applications of this result to cryptography are discussed.<|reference_end|> | arxiv | @article{gangasani2007testing,
title={Testing Kak's Conjecture on Binary Reciprocal of Primes and
Cryptographic Applications},
author={Sumanth Kumar Reddy Gangasani},
journal={arXiv preprint arXiv:0711.1856},
year={2007},
archivePrefix={arXiv},
eprint={0711.1856},
primaryClass={cs.CR}
} | gangasani2007testing |
arxiv-1785 | 0711.1890 | A Geometric Interpretation of Fading in Wireless Networks: Theory and Applications | <|reference_start|>A Geometric Interpretation of Fading in Wireless Networks: Theory and Applications: In wireless networks with random node distribution, the underlying point process model and the channel fading process are usually considered separately. A unified framework is introduced that permits the geometric characterization of fading by incorporating the fading process into the point process model. Concretely, assuming nodes are distributed in a stationary Poisson point process in $\R^d$, the properties of the point processes that describe the path loss with fading are analyzed. The main applications are connectivity and broadcasting.<|reference_end|> | arxiv | @article{haenggi2007a,
title={A Geometric Interpretation of Fading in Wireless Networks: Theory and
Applications},
author={Martin Haenggi},
journal={arXiv preprint arXiv:0711.1890},
year={2007},
archivePrefix={arXiv},
eprint={0711.1890},
primaryClass={cs.IT math.IT}
} | haenggi2007a |
arxiv-1786 | 0711.1986 | Performance bounds and codes design criteria for channel decoding with a-priori information | <|reference_start|>Performance bounds and codes design criteria for channel decoding with a-priori information: In this article we focus on the problem of channel decoding in presence of a-priori information. In particular, assuming that the a-priori information reliability is not perfectly estimated at the receiver, we derive a novel analytical framework for evaluating the decoder's performance. It is derived the important result that a "good code", i.e., a code which allows to fully exploit the potential benefit of a-priori information, must associate information sequences with high Hamming weights to codewords with low Hamming weights. Basing on the proposed analysis, we analyze the performance of convolutional codes, random codes, and turbo codes. Moreover, we consider the transmission of correlated binary sources from independent nodes, a problem which has several practical applications, e.g. in the case of sensor networks. In this context, we propose a very simple joint source-channel turbo decoding scheme where each decoder works by exploiting a-priori information given by the other decoder. In the case of block fading channels, it is shown that the inherent correlation between information signals provide a form of non-cooperative diversity, thus allowing joint source-channel decoding to outperform separation-based schemes.<|reference_end|> | arxiv | @article{abrardo2007performance,
title={Performance bounds and codes design criteria for channel decoding with
a-priori information},
author={Andrea Abrardo},
journal={arXiv preprint arXiv:0711.1986},
year={2007},
archivePrefix={arXiv},
eprint={0711.1986},
primaryClass={cs.IT math.IT}
} | abrardo2007performance |
arxiv-1787 | 0711.1993 | Entropy of capacities on lattices and set systems | <|reference_start|>Entropy of capacities on lattices and set systems: We propose a definition for the entropy of capacities defined on lattices. Classical capacities are monotone set functions and can be seen as a generalization of probability measures. Capacities on lattices address the general case where the family of subsets is not necessarily the Boolean lattice of all subsets. Our definition encompasses the classical definition of Shannon for probability measures, as well as the entropy of Marichal defined for classical capacities. Some properties and examples are given.<|reference_end|> | arxiv | @article{honda2007entropy,
title={Entropy of capacities on lattices and set systems},
author={Aoi Honda, Michel Grabisch (CES)},
journal={Information Sciences 176 (2006) 3472-3489},
year={2007},
archivePrefix={arXiv},
eprint={0711.1993},
primaryClass={cs.DM math.ST stat.TH}
} | honda2007entropy |
arxiv-1788 | 0711.2010 | A Polynomial Time Algorithm for Graph Isomorphism | <|reference_start|>A Polynomial Time Algorithm for Graph Isomorphism: We claimed that there is a polynomial algorithm to test if two graphs are isomorphic. But the algorithm is wrong. It only tests if the adjacency matrices of two graphs have the same eigenvalues. There is a counterexample of two non-isomorphic graphs with the same eigenvalues.<|reference_end|> | arxiv | @article{czerwinski2007a,
title={A Polynomial Time Algorithm for Graph Isomorphism},
author={Reiner Czerwinski},
journal={arXiv preprint arXiv:0711.2010},
year={2007},
archivePrefix={arXiv},
eprint={0711.2010},
primaryClass={cs.CC}
} | czerwinski2007a |
arxiv-1789 | 0711.2023 | Empirical Evaluation of Four Tensor Decomposition Algorithms | <|reference_start|>Empirical Evaluation of Four Tensor Decomposition Algorithms: Higher-order tensor decompositions are analogous to the familiar Singular Value Decomposition (SVD), but they transcend the limitations of matrices (second-order tensors). SVD is a powerful tool that has achieved impressive results in information retrieval, collaborative filtering, computational linguistics, computational vision, and other fields. However, SVD is limited to two-dimensional arrays of data (two modes), and many potential applications have three or more modes, which require higher-order tensor decompositions. This paper evaluates four algorithms for higher-order tensor decomposition: Higher-Order Singular Value Decomposition (HO-SVD), Higher-Order Orthogonal Iteration (HOOI), Slice Projection (SP), and Multislice Projection (MP). We measure the time (elapsed run time), space (RAM and disk space requirements), and fit (tensor reconstruction accuracy) of the four algorithms, under a variety of conditions. We find that standard implementations of HO-SVD and HOOI do not scale up to larger tensors, due to increasing RAM requirements. We recommend HOOI for tensors that are small enough for the available RAM and MP for larger tensors.<|reference_end|> | arxiv | @article{turney2007empirical,
title={Empirical Evaluation of Four Tensor Decomposition Algorithms},
author={Peter D. Turney (National Research Council of Canada)},
journal={arXiv preprint arXiv:0711.2023},
year={2007},
number={ERB-1152, NRC-49877},
archivePrefix={arXiv},
eprint={0711.2023},
primaryClass={cs.LG cs.CL cs.IR}
} | turney2007empirical |
arxiv-1790 | 0711.2050 | Two Families of Quantum Codes Derived from Cyclic Codes | <|reference_start|>Two Families of Quantum Codes Derived from Cyclic Codes: We characterize the affine-invariant maximal extended cyclic codes. Then by the CSS construction, we derive from these codes a family of pure quantum codes. Also for ordnq even, a new family of degenerate quantum stabilizer codes is derived from the classical duadic codes. This answer an open problem asked by Aly et al.<|reference_end|> | arxiv | @article{guenda2007two,
title={Two Families of Quantum Codes Derived from Cyclic Codes},
author={Kenza Guenda},
journal={arXiv preprint arXiv:0711.2050},
year={2007},
archivePrefix={arXiv},
eprint={0711.2050},
primaryClass={cs.IT math.IT}
} | guenda2007two |
arxiv-1791 | 0711.2058 | Computer Model of a "Sense of Humour" I General Algorithm | <|reference_start|>Computer Model of a "Sense of Humour" I General Algorithm: A computer model of a "sense of humour" is proposed. The humorous effect is interpreted as a specific malfunction in the course of information processing due to the need for the rapid deletion of the false version transmitted into consciousness. The biological function of a sense of humour consists in speeding up the bringing of information into consciousness and in fuller use of the resources of the brain.<|reference_end|> | arxiv | @article{suslov2007computer,
title={Computer Model of a "Sense of Humour". I. General Algorithm},
author={I. M. Suslov (P.L.Kapitza Institute for Physical Problems, Moscow,
Russia)},
journal={Biofizika SSSR 37, 318 (1992) [Biophysics 37, 242 (1992)]},
year={2007},
archivePrefix={arXiv},
eprint={0711.2058},
primaryClass={q-bio.NC cs.AI}
} | suslov2007computer |
arxiv-1792 | 0711.2061 | Computer Model of a "Sense of Humour" II Realization in Neural Networks | <|reference_start|>Computer Model of a "Sense of Humour" II Realization in Neural Networks: The computer realization of a "sense of humour" requires the creation of an algorithm for solving the "linguistic problem", i.e. the problem of recognizing a continuous sequence of polysemantic images. Such algorithm may be realized in the Hopfield model of a neural network after its proper modification.<|reference_end|> | arxiv | @article{suslov2007computer,
title={Computer Model of a "Sense of Humour". II. Realization in Neural
Networks},
author={I. M. Suslov (P.L.Kapitza Institute for Physical Problems, Moscow,
Russia)},
journal={Biofizika SSSR 37, 325 (1992) [Biophysics 37, 249 (1992)]},
year={2007},
archivePrefix={arXiv},
eprint={0711.2061},
primaryClass={q-bio.NC cs.AI}
} | suslov2007computer |
arxiv-1793 | 0711.2062 | Autoregressive Time Series Forecasting of Computational Demand | <|reference_start|>Autoregressive Time Series Forecasting of Computational Demand: We study the predictive power of autoregressive moving average models when forecasting demand in two shared computational networks, PlanetLab and Tycoon. Demand in these networks is very volatile, and predictive techniques to plan usage in advance can improve the performance obtained drastically. Our key finding is that a random walk predictor performs best for one-step-ahead forecasts, whereas ARIMA(1,1,0) and adaptive exponential smoothing models perform better for two and three-step-ahead forecasts. A Monte Carlo bootstrap test is proposed to evaluate the continuous prediction performance of different models with arbitrary confidence and statistical significance levels. Although the prediction results differ between the Tycoon and PlanetLab networks, we observe very similar overall statistical properties, such as volatility dynamics.<|reference_end|> | arxiv | @article{sandholm2007autoregressive,
title={Autoregressive Time Series Forecasting of Computational Demand},
author={Thomas Sandholm},
journal={arXiv preprint arXiv:0711.2062},
year={2007},
archivePrefix={arXiv},
eprint={0711.2062},
primaryClass={cs.DC}
} | sandholm2007autoregressive |
arxiv-1794 | 0711.2087 | Query Evaluation and Optimization in the Semantic Web | <|reference_start|>Query Evaluation and Optimization in the Semantic Web: We address the problem of answering Web ontology queries efficiently. An ontology is formalized as a Deductive Ontology Base (DOB), a deductive database that comprises the ontology's inference axioms and facts. A cost-based query optimization technique for DOB is presented. A hybrid cost model is proposed to estimate the cost and cardinality of basic and inferred facts. Cardinality and cost of inferred facts are estimated using an adaptive sampling technique, while techniques of traditional relational cost models are used for estimating the cost of basic facts and conjunctive ontology queries. Finally, we implement a dynamic-programming optimization algorithm to identify query evaluation plans that minimize the number of intermediate inferred facts. We modeled a subset of the Web ontology language OWL Lite as a DOB, and performed an experimental study to analyze the predictive capacity of our cost model and the benefits of the query optimization technique. Our study has been conducted over synthetic and real-world OWL ontologies, and shows that the techniques are accurate and improve query performance. To appear in Theory and Practice of Logic Programming (TPLP).<|reference_end|> | arxiv | @article{ruckhaus2007query,
title={Query Evaluation and Optimization in the Semantic Web},
author={Edna Ruckhaus, Eduardo Ruiz, Maria-Esther Vidal},
journal={arXiv preprint arXiv:0711.2087},
year={2007},
archivePrefix={arXiv},
eprint={0711.2087},
primaryClass={cs.DB cs.LO}
} | ruckhaus2007query |
arxiv-1795 | 0711.2102 | Patterns of iid Sequences and Their Entropy - Part II: Bounds for Some Distributions | <|reference_start|>Patterns of iid Sequences and Their Entropy - Part II: Bounds for Some Distributions: A pattern of a sequence is a sequence of integer indices with each index describing the order of first occurrence of the respective symbol in the original sequence. In a recent paper, tight general bounds on the block entropy of patterns of sequences generated by independent and identically distributed (i.i.d.) sources were derived. In this paper, precise approximations are provided for the pattern block entropies for patterns of sequences generated by i.i.d. uniform and monotonic distributions, including distributions over the integers, and the geometric distribution. Numerical bounds on the pattern block entropies of these distributions are provided even for very short blocks. Tight bounds are obtained even for distributions that have infinite i.i.d. entropy rates. The approximations are obtained using general bounds and their derivation techniques. Conditional index entropy is also studied for distributions over smaller alphabets.<|reference_end|> | arxiv | @article{shamir2007patterns,
title={Patterns of i.i.d. Sequences and Their Entropy - Part II: Bounds for
Some Distributions},
author={Gil I. Shamir},
journal={arXiv preprint arXiv:0711.2102},
year={2007},
archivePrefix={arXiv},
eprint={0711.2102},
primaryClass={cs.IT math.IT}
} | shamir2007patterns |
arxiv-1796 | 0711.2104 | On the Information Rates of the Plenoptic Function | <|reference_start|>On the Information Rates of the Plenoptic Function: The {\it plenoptic function} (Adelson and Bergen, 91) describes the visual information available to an observer at any point in space and time. Samples of the plenoptic function (POF) are seen in video and in general visual content, and represent large amounts of information. In this paper we propose a stochastic model to study the compression limits of the plenoptic function. In the proposed framework, we isolate the two fundamental sources of information in the POF: the one representing the camera motion and the other representing the information complexity of the "reality" being acquired and transmitted. The sources of information are combined, generating a stochastic process that we study in detail. We first propose a model for ensembles of realities that do not change over time. The proposed model is simple in that it enables us to derive precise coding bounds in the information-theoretic sense that are sharp in a number of cases of practical interest. For this simple case of static realities and camera motion, our results indicate that coding practice is in accordance with optimal coding from an information-theoretic standpoint. The model is further extended to account for visual realities that change over time. We derive bounds on the lossless and lossy information rates for this dynamic reality model, stating conditions under which the bounds are tight. Examples with synthetic sources suggest that in the presence of scene dynamics, simple hybrid coding using motion/displacement estimation with DPCM performs considerably suboptimally relative to the true rate-distortion bound.<|reference_end|> | arxiv | @article{cunha2007on,
title={On the Information Rates of the Plenoptic Function},
author={Arthur Cunha, Minh Do, and Martin Vetterli},
journal={arXiv preprint arXiv:0711.2104},
year={2007},
archivePrefix={arXiv},
eprint={0711.2104},
primaryClass={cs.IT cs.CV math.IT math.PR}
} | cunha2007on |
arxiv-1797 | 0711.2112 | Bi-capacities -- Part II: the Choquet integral | <|reference_start|>Bi-capacities -- Part II: the Choquet integral: Bi-capacities arise as a natural generalization of capacities (or fuzzy measures) in a context of decision making where underlying scales are bipolar. They are able to capture a wide variety of decision behaviours, encompassing models such as Cumulative Prospect Theory (CPT). The aim of this paper in two parts is to present the machinery behind bi-capacities, and thus remains on a rather theoretical level, although some parts are firmly rooted in decision theory, notably cooperative game theory. The present second part focuses on the definition of Choquet integral. We give several expressions of it, including an expression w.r.t. the M\"obius transform. This permits to express the Choquet integral for 2-additive bi-capacities w.r.t. the interaction index.<|reference_end|> | arxiv | @article{grabisch2007bi-capacities,
title={Bi-capacities -- Part II: the Choquet integral},
author={Michel Grabisch (CES), Christophe Labreuche (TRT)},
journal={Fuzzy Sets and Systems (2005) 237-259},
year={2007},
archivePrefix={arXiv},
eprint={0711.2112},
primaryClass={cs.DM cs.GT}
} | grabisch2007bi-capacities |
arxiv-1798 | 0711.2114 | Bi-capacities -- Part I: definition, M\"obius transform and interaction | <|reference_start|>Bi-capacities -- Part I: definition, M\"obius transform and interaction: Bi-capacities arise as a natural generalization of capacities (or fuzzy measures) in a context of decision making where underlying scales are bipolar. They are able to capture a wide variety of decision behaviours, encompassing models such as Cumulative Prospect Theory (CPT). The aim of this paper in two parts is to present the machinery behind bi-capacities, and thus remains on a rather theoretical level, although some parts are firmly rooted in decision theory, notably cooperative game theory. The present first part is devoted to the introduction of bi-capacities and the structure on which they are defined. We define the M\"obius transform of bi-capacities, by just applying the well known theory of M\" obius functions as established by Rota to the particular case of bi-capacities. Then, we introduce derivatives of bi-capacities, by analogy with what was done for pseudo-Boolean functions (another view of capacities and set functions), and this is the key point to introduce the Shapley value and the interaction index for bi-capacities. This is done in a cooperative game theoretic perspective. In summary, all familiar notions used for fuzzy measures are available in this more general framework.<|reference_end|> | arxiv | @article{grabisch2007bi-capacities,
title={Bi-capacities -- Part I: definition, M\"obius transform and interaction},
author={Michel Grabisch (CES), Christophe Labreuche (TRT)},
journal={Fuzzy Sets and Systems (2005) 211-236},
year={2007},
archivePrefix={arXiv},
eprint={0711.2114},
primaryClass={cs.DM cs.GT}
} | grabisch2007bi-capacities |
arxiv-1799 | 0711.2115 | Derivative of functions over lattices as a basis for the notion of interaction between attributes | <|reference_start|>Derivative of functions over lattices as a basis for the notion of interaction between attributes: The paper proposes a general notion of interaction between attributes, which can be applied to many fields in decision making and data analysis. It generalizes the notion of interaction defined for criteria modelled by capacities, by considering functions defined on lattices. For a given problem, the lattice contains for each attribute the partially ordered set of remarkable points or levels. The interaction is based on the notion of derivative of a function defined on a lattice, and appears as a generalization of the Shapley value or other probabilistic values.<|reference_end|> | arxiv | @article{grabisch2007derivative,
title={Derivative of functions over lattices as a basis for the notion of
interaction between attributes},
author={Michel Grabisch (CES), Christophe Labreuche (TRT)},
journal={Annals of Mathematics and Artificial Intelligence 49 (2007)
151-170},
year={2007},
archivePrefix={arXiv},
eprint={0711.2115},
primaryClass={cs.DM cs.GT}
} | grabisch2007derivative |
arxiv-1800 | 0711.2116 | A numerical approach for 3D manufacturing tolerances synthesis | <|reference_start|>A numerical approach for 3D manufacturing tolerances synthesis: Making a product conform to the functional requirements indicated by the customer suppose to be able to manage the manufacturing process chosen to realise the parts. A simulation step is generally performed to verify that the expected generated deviations fit with these requirements. It is then necessary to assess the actual deviations of the process in progress. This is usually done by the verification of the conformity of the workpiece to manufacturing tolerances at the end of each set-up. It is thus necessary to determine these manufacturing tolerances. This step is called "manufacturing tolerance synthesis". In this paper, a numerical method is proposed to perform 3D manufacturing tolerances synthesis. This method uses the result of the numerical analysis of tolerances to determine influent mall displacement of surfaces. These displacements are described by small displacements torsors. An algorithm is then proposed to determine suitable ISO manufacturing tolerances.<|reference_end|> | arxiv | @article{vignat2007a,
title={A numerical approach for 3D manufacturing tolerances synthesis},
author={Fr'ed'eric Vignat (LGS), Franc{c}ois Villeneuve (LGS)},
journal={Dans Proceedings of the 10th CIRP International Seminar on
Computer Aided Tolerancing - 10th CIRP International Seminar on Computer
Aided Tolerancing, Erlangen : Allemagne (2007)},
year={2007},
archivePrefix={arXiv},
eprint={0711.2116},
primaryClass={cs.CE}
} | vignat2007a |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.