corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-3501
0804.4714
Network Structure and Dynamics, and Emergence of Robustness by Stabilizing Selection in an Artificial Genome
<|reference_start|>Network Structure and Dynamics, and Emergence of Robustness by Stabilizing Selection in an Artificial Genome: Genetic regulation is a key component in development, but a clear understanding of the structure and dynamics of genetic networks is not yet at hand. In this work we investigate these properties within an artificial genome model originally introduced by Reil. We analyze statistical properties of randomly generated genomes both on the sequence- and network level, and show that this model correctly predicts the frequency of genes in genomes as found in experimental data. Using an evolutionary algorithm based on stabilizing selection for a phenotype, we show that robustness against single base mutations, as well as against random changes in initial network states that mimic stochastic fluctuations in environmental conditions, can emerge in parallel. Evolved genomes exhibit characteristic patterns on both sequence and network level.<|reference_end|>
arxiv
@article{rohlf2008network, title={Network Structure and Dynamics, and Emergence of Robustness by Stabilizing Selection in an Artificial Genome}, author={Thimo Rohlf and Chris Winkler}, journal={arXiv preprint arXiv:0804.4714}, year={2008}, archivePrefix={arXiv}, eprint={0804.4714}, primaryClass={q-bio.MN cond-mat.dis-nn cs.NE q-bio.GN q-bio.PE} }
rohlf2008network
arxiv-3502
0804.4717
Intelligent Unmanned Explorer for Deep Space Exploration
<|reference_start|>Intelligent Unmanned Explorer for Deep Space Exploration: asteroids or comets have received remarkable attention in the world. In small body explorations, especially, detailed in-situ surface exploration by tiny rover is one of effective and fruitful means and is expected to make strong contributions towards scientific studies. JAXA ISAS is promoting MUSES C mission, which is the worlds first sample and return attempt to or from the near earth asteroid. Hayabusa spacecraft in MUSES C mission took the tiny rover, which was expected to perform the in-situ surface exploration by hopping. This paper describes the system design, mobility and intelligence of the developed unmanned explorer. This paper also presents the ground experimental results and the flight results.<|reference_end|>
arxiv
@article{kubota2008intelligent, title={Intelligent Unmanned Explorer for Deep Space Exploration}, author={T. Kubota and T. Yoshimitsu}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-A004-OP}, year={2008}, archivePrefix={arXiv}, eprint={0804.4717}, primaryClass={cs.RO} }
kubota2008intelligent
arxiv-3503
0804.4740
An Affine-invariant Time-dependent Triangulation of Spatio-temporal Data
<|reference_start|>An Affine-invariant Time-dependent Triangulation of Spatio-temporal Data: In the geometric data model for spatio-temporal data, introduced by Chomicki and Revesz, spatio-temporal data are modelled as a finite collection of triangles that are transformed by time-dependent affinities of the plane. To facilitate querying and animation of spatio-temporal data, we present a normal form for data in the geometric data model. We propose an algorithm for constructing this normal form via a spatio-temporal triangulation of geometric data objects. This triangulation algorithm generates new geometric data objects that partition the given objects both in space and in time. A particular property of the proposed partition is that it is invariant under time-dependent affine transformations, and hence independent of the particular choice of coordinate system used to describe he spatio-temporal data in. We can show that our algorithm works correctly and has a polynomial time complexity (of reasonably low degree in the number of input triangles and the maximal degree of the polynomial functions that describe the transformation functions). We also discuss several possible applications of this spatio-temporal triangulation.<|reference_end|>
arxiv
@article{haesevoets2008an, title={An Affine-invariant Time-dependent Triangulation of Spatio-temporal Data}, author={Sofie Haesevoets and Bart Kuijpers}, journal={arXiv preprint arXiv:0804.4740}, year={2008}, archivePrefix={arXiv}, eprint={0804.4740}, primaryClass={cs.CG cs.DB} }
haesevoets2008an
arxiv-3504
0804.4741
The Effect of Structural Diversity of an Ensemble of Classifiers on Classification Accuracy
<|reference_start|>The Effect of Structural Diversity of an Ensemble of Classifiers on Classification Accuracy: This paper aims to showcase the measure of structural diversity of an ensemble of 9 classifiers and then map a relationship between this structural diversity and accuracy. The structural diversity was induced by having different architectures or structures of the classifiers The Genetical Algorithms (GA) were used to derive the relationship between diversity and the classification accuracy by evolving the classifiers and then picking 9 classifiers out on an ensemble of 60 classifiers. It was found that as the ensemble became diverse the accuracy improved. However at a certain diversity measure the accuracy began to drop. The Kohavi-Wolpert variance method is used to measure the diversity of the ensemble. A method of voting is used to aggregate the results from each classifier. The lowest error was observed at a diversity measure of 0.16 with a mean square error of 0.274, when taking 0.2024 as maximum diversity measured. The parameters that were varied were: the number of hidden nodes, learning rate and the activation function.<|reference_end|>
arxiv
@article{masisi2008the, title={The Effect of Structural Diversity of an Ensemble of Classifiers on Classification Accuracy}, author={Lesedi Masisi, Fulufhelo V. Nelwamondo and Tshilidzi Marwala}, journal={arXiv preprint arXiv:0804.4741}, year={2008}, archivePrefix={arXiv}, eprint={0804.4741}, primaryClass={cs.LG} }
masisi2008the
arxiv-3505
0804.4744
Lattice Problems, Gauge Functions and Parameterized Algorithms
<|reference_start|>Lattice Problems, Gauge Functions and Parameterized Algorithms: Given a k-dimensional subspace M\subseteq \R^n and a full rank integer lattice L\subseteq \R^n, the \emph{subspace avoiding problem} SAP is to find a shortest vector in L\setminus M. Treating k as a parameter, we obtain new parameterized approximation and exact algorithms for SAP based on the AKS sieving technique. More precisely, we give a randomized $(1+\epsilon)$-approximation algorithm for parameterized SAP that runs in time 2^{O(n)}.(1/\epsilon)^k, where the parameter k is the dimension of the subspace M. Thus, we obtain a 2^{O(n)} time algorithm for \epsilon=2^{-O(n/k)}. We also give a 2^{O(n+k\log k)} exact algorithm for the parameterized SAP for any \ell_p norm. Several of our algorithms work for all gauge functions as metric with some natural restrictions, in particular for all \ell_p norms. We also prove an \Omega(2^n) lower bound on the query complexity of AKS sieving based exact algorithms for SVP that accesses the gauge function as oracle.<|reference_end|>
arxiv
@article{arvind2008lattice, title={Lattice Problems, Gauge Functions and Parameterized Algorithms}, author={V. Arvind and Pushkar S. Joglekar}, journal={arXiv preprint arXiv:0804.4744}, year={2008}, archivePrefix={arXiv}, eprint={0804.4744}, primaryClass={cs.CC cs.DS} }
arvind2008lattice
arxiv-3506
0804.4749
Study of improving nano-contouring performance by employing cross-coupling controller
<|reference_start|>Study of improving nano-contouring performance by employing cross-coupling controller: For the tracking stage path planning, we design a two-axis cross-coupling control system which uses the PI controller to compensate the contour error between axes. In this paper, the stage adoptive is designed by our laboratory (Precision Machine Center of National Formosa University). The cross-coupling controller calculates the actuating signal of each axis by combining multi-axes position error. Hence, the cross-coupling controller improves the stage tracking ability and decreases the contour error. The experiments show excellent stage motion. This finding confirms that the proposed method is a powerful and efficient tool for improving stage tracking ability. Also found were the stages tracking to minimize contour error of two types circular to approximately 25nm.<|reference_end|>
arxiv
@article{jywe2008study, title={Study of improving nano-contouring performance by employing cross-coupling controller}, author={Wen Yuh Jywe, Shih Shin Chen, Hung-Shu Wang, Chien Hung Liu, Hsin Hung Jwo, Yun Feng Teng, and Tung Hsien Hsieh}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-C002}, year={2008}, archivePrefix={arXiv}, eprint={0804.4749}, primaryClass={cs.RO} }
jywe2008study
arxiv-3507
0804.4750
The Numerical Control Design for a Pair of Dubins Vehicles
<|reference_start|>The Numerical Control Design for a Pair of Dubins Vehicles: In this paper, a model of a pair of Dubins vehicles is considered. The vehicles move from an initial position and orientation to final position and orientation. A long the motion, the two vehicles are not allowed to collide however the two vehicles cant to far each other. The optimal control of the vehicle is found using the Pontryagins Maximum Principle (PMP). This PMP leads to a Hamiltonian system consisting of a system of differential equation and its adjoint. The originally differential equation has initial and final condition but the adjoint system doesn't have one. The classical difficulty is solved numerically by the greatest gradient descent method. Some simulation results are presented in this paper.<|reference_end|>
arxiv
@article{tjahjana2008the, title={The Numerical Control Design for a Pair of Dubins Vehicles}, author={Heru Tjahjana, Iwan Pranoto, Hari Muhammad, J. Naiborhu, and Miswanto}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-C003}, year={2008}, archivePrefix={arXiv}, eprint={0804.4750}, primaryClass={cs.RO} }
tjahjana2008the
arxiv-3508
0804.4752
Simulation of Dynamic Yaw Stability Derivatives of a Bird Using CFD
<|reference_start|>Simulation of Dynamic Yaw Stability Derivatives of a Bird Using CFD: Simulation results on dynamic yaw stability derivatives of a gull bird by means of computational fluid dynamics are presented. Two different kinds of motions are used for determining the dynamic yaw stability derivatives CNr and CNbeta . Concerning the first one, simple lateral translation and yaw rotary motions in yaw are considered. The second one consists of combined motions. To determine dynamic yaw stability derivatives of the bird, the simulation of an unsteady flow with a bird model showing a harmonic motion is performed. The unsteady flow solution for each time step is obtained by solving unsteady Euler equations based on a finite volume approach for a smaller reduced frequency. Then, an evaluation of unsteady forces and moments for one cycle is conducted using harmonic Fourier analysis. The results on the dynamic yaw stability derivatives for both simulations of the model motion show a good agreement.<|reference_end|>
arxiv
@article{moelyadi2008simulation, title={Simulation of Dynamic Yaw Stability Derivatives of a Bird Using CFD}, author={M.A. Moelyadi, and G. Sachs}, journal={arXiv preprint arXiv:0804.4752}, year={2008}, number={ICIUS2007-B012}, archivePrefix={arXiv}, eprint={0804.4752}, primaryClass={cs.RO} }
moelyadi2008simulation
arxiv-3509
0804.4753
Wavelet Based Iterative Learning Control with Fuzzy PD Feedback for Position Tracking of A Pneumatic Servo System
<|reference_start|>Wavelet Based Iterative Learning Control with Fuzzy PD Feedback for Position Tracking of A Pneumatic Servo System: In this paper, a wavelet-based iterative learning control (WILC) scheme with Fuzzy PD feedback is presented for a pneumatic control system with nonsmooth nonlinearities and uncertain parameters. The wavelet transform is employed to extract the learnable dynamics from measured output signal before it can be used to update the control profile. The wavelet transform is adopted to decompose the original signal into many low-resolution signals that contain the learnable and unlearnable parts. The desired control profile is then compared with the learnable part of the transformed signal. Thus, the effects from unlearnable dynamics on the controlled system can be attenuated by a Fuzzy PD feedback controller. As for the rules of Fuzzy PD controller in the feedback loop, a genetic algorithm (GA) is employed to search for the inference rules of optimization. A proportional-valve controlled pneumatic cylinder actuator system is used as the control target for simulation. Simulation results have shown a much-improved positiontracking performance.<|reference_end|>
arxiv
@article{huang2008wavelet, title={Wavelet Based Iterative Learning Control with Fuzzy PD Feedback for Position Tracking of A Pneumatic Servo System}, author={C. E. Huang, and J. S. Chen}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-A001}, year={2008}, archivePrefix={arXiv}, eprint={0804.4753}, primaryClass={cs.RO} }
huang2008wavelet
arxiv-3510
0804.4754
Positive Real Synthesis of Networked Control System An LMI Approach
<|reference_start|>Positive Real Synthesis of Networked Control System An LMI Approach: This paper presents the positive real analysis and synthesis for Networked Control Systems (NCS) in discrete time. Based on the definition of passivity, the sufficient condition of NCS is given by stochastic Lyapunov functional. The controller via state feedback is designed to guarantee the stability of NCS and closed-loop positive realness. It is shown that a mode-dependent positive real controller exists if a set of coupled linear matrix inequalities has solutions. The controller can be then constructed in terms of the solutions.<|reference_end|>
arxiv
@article{riyanto2008positive, title={Positive Real Synthesis of Networked Control System An LMI Approach}, author={Bambang Riyanto, and Imam Arifin}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-C005}, year={2008}, archivePrefix={arXiv}, eprint={0804.4754}, primaryClass={cs.RO} }
riyanto2008positive
arxiv-3511
0804.4757
Analysis of Stability, Response and LQR Controller Design of a Small Scale Helicopter Dynamics
<|reference_start|>Analysis of Stability, Response and LQR Controller Design of a Small Scale Helicopter Dynamics: This paper presents how to use feedback controller with helicopter dynamics state space model. A simplified analysis is presented for controller design using LQR of small scale helicopters for axial and forward flights. Our approach is simple and gives the basic understanding about how to develop controller for solving the stability of linear helicopter flight dynamics.<|reference_end|>
arxiv
@article{dharmayanda2008analysis, title={Analysis of Stability, Response and LQR Controller Design of a Small Scale Helicopter Dynamics}, author={Hardian Reza Dharmayanda, Taesam Kang, Young Jae Lee, and Sangkyung Sung}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-C006}, year={2008}, archivePrefix={arXiv}, eprint={0804.4757}, primaryClass={cs.RO} }
dharmayanda2008analysis
arxiv-3512
0804.4759
Design and control of dynamical quantum processes in ortho para H2 conversion on surfaces
<|reference_start|>Design and control of dynamical quantum processes in ortho para H2 conversion on surfaces: We present here a novel, cost-effective method for increasing and controlling the ortho para H2 (o p H2) conversion yield. First, we invoke two processes derived from fundamental, surface science insights, based on the effect of molecular orientation on the hydrogen solid surface reaction, i.e., dynamical quantum filtering and steering, and apply them to enhance the o p H2 conversion yield. Second, we find an important factor that can significantly influence the yield i.e., inhomogeneity of spin density distribution. This factor gives us a promising possibility to increase the yield and to find the best catalyst e.g., design of materials that can function as catalysts for the o p H2 conversion.<|reference_end|>
arxiv
@article{muhida2008design, title={Design and control of dynamical quantum processes in ortho para H2 conversion on surfaces}, author={Rifki Muhida, Riza Muhida, Wilson A. Dino, Hiroshi Nakanishi, Hideaki Kasai}, journal={Proceedings of the International Conference on Intelligent Unmanned System (ICIUS 2007), Bali, Indonesia, October 24-25, 2007, Paper No. ICIUS2007-C008}, year={2008}, archivePrefix={arXiv}, eprint={0804.4759}, primaryClass={cs.RO} }
muhida2008design
arxiv-3513
0804.4774
A Projection Method for Derivation of Non-Shannon-Type Information Inequalities
<|reference_start|>A Projection Method for Derivation of Non-Shannon-Type Information Inequalities: In 1998, Zhang and Yeung found the first unconditional non-Shannon-type information inequality. Recently, Dougherty, Freiling and Zeger gave six new unconditional non-Shannon-type information inequalities. This work generalizes their work and provides a method to systematically derive non-Shannon-type information inequalities. An application of this method reveals new 4-variable non-Shannon-type information inequalities.<|reference_end|>
arxiv
@article{xu2008a, title={A Projection Method for Derivation of Non-Shannon-Type Information Inequalities}, author={Weidong Xu, Jia Wang, Jun Sun}, journal={arXiv preprint arXiv:0804.4774}, year={2008}, archivePrefix={arXiv}, eprint={0804.4774}, primaryClass={cs.IT math.IT} }
xu2008a
arxiv-3514
0804.4799
A Few More Quadratic APN Functions
<|reference_start|>A Few More Quadratic APN Functions: We present two infinite families of APN functions where the degree of the field is divisible by 3 but not 9. Our families contain two already known families as special cases. We also discuss the inequivalence proof (by computation) which shows that these functions are new.<|reference_end|>
arxiv
@article{bracken2008a, title={A Few More Quadratic APN Functions}, author={Carl Bracken, Eimear Byrne, Nadya Markin and Gary McGuire}, journal={arXiv preprint arXiv:0804.4799}, year={2008}, archivePrefix={arXiv}, eprint={0804.4799}, primaryClass={cs.IT math.IT} }
bracken2008a
arxiv-3515
0804.4808
Solving Time of Least Square Systems in Sigma-Pi Unit Networks
<|reference_start|>Solving Time of Least Square Systems in Sigma-Pi Unit Networks: The solving of least square systems is a useful operation in neurocomputational modeling of learning, pattern matching, and pattern recognition. In these last two cases, the solution must be obtained on-line, thus the time required to solve a system in a plausible neural architecture is critical. This paper presents a recurrent network of Sigma-Pi neurons, whose solving time increases at most like the logarithm of the system size, and of its condition number, which provides plausible computation times for biological systems.<|reference_end|>
arxiv
@article{courrieu2008solving, title={Solving Time of Least Square Systems in Sigma-Pi Unit Networks}, author={Pierre Courrieu (LPC)}, journal={Neural Information Processing - Letters and Reviews 4, 3 (2004) 39-45}, year={2008}, archivePrefix={arXiv}, eprint={0804.4808}, primaryClass={cs.NE} }
courrieu2008solving
arxiv-3516
0804.4809
Fast Computation of Moore-Penrose Inverse Matrices
<|reference_start|>Fast Computation of Moore-Penrose Inverse Matrices: Many neural learning algorithms require to solve large least square systems in order to obtain synaptic weights. Moore-Penrose inverse matrices allow for solving such systems, even with rank deficiency, and they provide minimum-norm vectors of synaptic weights, which contribute to the regularization of the input-output mapping. It is thus of interest to develop fast and accurate algorithms for computing Moore-Penrose inverse matrices. In this paper, an algorithm based on a full rank Cholesky factorization is proposed. The resulting pseudoinverse matrices are similar to those provided by other algorithms. However the computation time is substantially shorter, particularly for large systems.<|reference_end|>
arxiv
@article{courrieu2008fast, title={Fast Computation of Moore-Penrose Inverse Matrices}, author={Pierre Courrieu (LPC)}, journal={Neural Information Processing - Letters and Reviews 8, 2 (2005) 25-29}, year={2008}, archivePrefix={arXiv}, eprint={0804.4809}, primaryClass={cs.NE} }
courrieu2008fast
arxiv-3517
0804.4815
Tight local approximation results for max-min linear programs
<|reference_start|>Tight local approximation results for max-min linear programs: In a bipartite max-min LP, we are given a bipartite graph $\myG = (V \cup I \cup K, E)$, where each agent $v \in V$ is adjacent to exactly one constraint $i \in I$ and exactly one objective $k \in K$. Each agent $v$ controls a variable $x_v$. For each $i \in I$ we have a nonnegative linear constraint on the variables of adjacent agents. For each $k \in K$ we have a nonnegative linear objective function of the variables of adjacent agents. The task is to maximise the minimum of the objective functions. We study local algorithms where each agent $v$ must choose $x_v$ based on input within its constant-radius neighbourhood in $\myG$. We show that for every $\epsilon>0$ there exists a local algorithm achieving the approximation ratio ${\Delta_I (1 - 1/\Delta_K)} + \epsilon$. We also show that this result is the best possible -- no local algorithm can achieve the approximation ratio ${\Delta_I (1 - 1/\Delta_K)}$. Here $\Delta_I$ is the maximum degree of a vertex $i \in I$, and $\Delta_K$ is the maximum degree of a vertex $k \in K$. As a methodological contribution, we introduce the technique of graph unfolding for the design of local approximation algorithms.<|reference_end|>
arxiv
@article{floréen2008tight, title={Tight local approximation results for max-min linear programs}, author={Patrik Flor'een, Marja Hassinen, Petteri Kaski, Jukka Suomela}, journal={arXiv preprint arXiv:0804.4815}, year={2008}, doi={10.1007/978-3-540-92862-1_2}, archivePrefix={arXiv}, eprint={0804.4815}, primaryClass={cs.DC} }
floréen2008tight
arxiv-3518
0804.4819
The Minimum Backlog Problem
<|reference_start|>The Minimum Backlog Problem: We study the minimum backlog problem (MBP). This online problem arises, e.g., in the context of sensor networks. We focus on two main variants of MBP. The discrete MBP is a 2-person game played on a graph $G=(V,E)$. The player is initially located at a vertex of the graph. In each time step, the adversary pours a total of one unit of water into cups that are located on the vertices of the graph, arbitrarily distributing the water among the cups. The player then moves from her current vertex to an adjacent vertex and empties the cup at that vertex. The player's objective is to minimize the backlog, i.e., the maximum amount of water in any cup at any time. The geometric MBP is a continuous-time version of the MBP: the cups are points in the two-dimensional plane, the adversary pours water continuously at a constant rate, and the player moves in the plane with unit speed. Again, the player's objective is to minimize the backlog. We show that the competitive ratio of any algorithm for the MBP has a lower bound of $\Omega(D)$, where $D$ is the diameter of the graph (for the discrete MBP) or the diameter of the point set (for the geometric MBP). Therefore we focus on determining a strategy for the player that guarantees a uniform upper bound on the absolute value of the backlog. For the absolute value of the backlog there is a trivial lower bound of $\Omega(D)$, and the deamortization analysis of Dietz and Sleator gives an upper bound of $O(D\log N)$ for $N$ cups. Our main result is a tight upper bound for the geometric MBP: we show that there is a strategy for the player that guarantees a backlog of $O(D)$, independently of the number of cups.<|reference_end|>
arxiv
@article{bender2008the, title={The Minimum Backlog Problem}, author={Michael A. Bender, S'andor P. Fekete, Alexander Kr"oller, Vincenzo Liberatore, Joseph S. B. Mitchell, Valentin Polishchuk, Jukka Suomela}, journal={Theoretical Computer Science 605 (2015), 51-61}, year={2008}, doi={10.1016/j.tcs.2015.08.027}, archivePrefix={arXiv}, eprint={0804.4819}, primaryClass={cs.DS} }
bender2008the
arxiv-3519
0804.4859
The communication complexity of non-signaling distributions
<|reference_start|>The communication complexity of non-signaling distributions: We study a model of communication complexity that encompasses many well-studied problems, including classical and quantum communication complexity, the complexity of simulating distributions arising from bipartite measurements of shared quantum states, and XOR games. In this model, Alice gets an input x, Bob gets an input y, and their goal is to each produce an output a,b distributed according to some pre-specified joint distribution p(a,b|x,y). We introduce a new technique based on affine combinations of lower-complexity distributions. Specifically, we introduce two complexity measures, one which gives lower bounds on classical communication, and one for quantum communication. These measures can be expressed as convex optimization problems. We show that the dual formulations have a striking interpretation, since they coincide with maximum violations of Bell and Tsirelson inequalities. The dual expressions are closely related to the winning probability of XOR games. These lower bounds subsume many known communication complexity lower bound methods, most notably the recent lower bounds of Linial and Shraibman for the special case of Boolean functions. We show that the gap between the quantum and classical lower bounds is at most linear in the size of the support of the distribution, and does not depend on the size of the inputs. This translates into a bound on the gap between maximal Bell and Tsirelson inequality violations, which was previously known only for the case of distributions with Boolean outcomes and uniform marginals. Finally, we give an exponential upper bound on quantum and classical communication complexity in the simultaneous messages model, for any non-signaling distribution. One consequence is a simple proof that any quantum distribution can be approximated with a constant number of bits of communication.<|reference_end|>
arxiv
@article{degorre2008the, title={The communication complexity of non-signaling distributions}, author={Julien Degorre, Marc Kaplan, Sophie Laplante and J'er'emie Roland}, journal={In MFCS'09, LNCS, vol 5734, 270-281 (2009). Quantum Information & Computation, 11(7&8):649-676 (2011)}, year={2008}, doi={10.1007/978-3-642-03816-7_24}, archivePrefix={arXiv}, eprint={0804.4859}, primaryClass={quant-ph cs.CC} }
degorre2008the
arxiv-3520
0804.4865
Characterizing Video Responses in Social Networks
<|reference_start|>Characterizing Video Responses in Social Networks: Video sharing sites, such as YouTube, use video responses to enhance the social interactions among their users. The video response feature allows users to interact and converse through video, by creating a video sequence that begins with an opening video and followed by video responses from other users. Our characterization is over 3.4 million videos and 400,000 video responses collected from YouTube during a 7-day period. We first analyze the characteristics of the video responses, such as popularity, duration, and geography. We then examine the social networks that emerge from the video response interactions.<|reference_end|>
arxiv
@article{benevenuto2008characterizing, title={Characterizing Video Responses in Social Networks}, author={Fabricio Benevenuto, Fernando Duarte, Tiago Rodrigues, Virgilio Almeida, Jussara Almeida, Keith Ross}, journal={arXiv preprint arXiv:0804.4865}, year={2008}, archivePrefix={arXiv}, eprint={0804.4865}, primaryClass={cs.MM cs.CY cs.HC} }
benevenuto2008characterizing
arxiv-3521
0804.4866
Sum-Capacity of Ergodic Fading Interference and Compound Multiaccess Channels
<|reference_start|>Sum-Capacity of Ergodic Fading Interference and Compound Multiaccess Channels: The problem of resource allocation is studied for two-sender two-receiver fading Gaussian interference channels (IFCs) and compound multiaccess channels (C-MACs). The senders in an IFC communicate with their own receiver (unicast) while those in a C-MAC communicate with both receivers (multicast). The instantaneous fading state between every transmit-receive pair in this network is assumed to be known at all transmitters and receivers. Under an average power constraint at each source, the sum-capacity of the C-MAC and the power policy that achieves this capacity is developed. The conditions defining the classes of strong and very strong ergodic IFCs are presented and the multicast sum-capacity is shown to be tight for both classes.<|reference_end|>
arxiv
@article{sankar2008sum-capacity, title={Sum-Capacity of Ergodic Fading Interference and Compound Multiaccess Channels}, author={Lalitha Sankar, Elza Erkip, and H. Vincent Poor}, journal={arXiv preprint arXiv:0804.4866}, year={2008}, doi={10.1109/ISIT.2008.4595485}, archivePrefix={arXiv}, eprint={0804.4866}, primaryClass={cs.IT math.IT} }
sankar2008sum-capacity
arxiv-3522
0804.4881
Search Space Contraction in Canonical Labeling of Graphs
<|reference_start|>Search Space Contraction in Canonical Labeling of Graphs: The individualization-refinement paradigm for computing a canonical labeling and the automorphism group of a graph is investigated. A new algorithmic design aimed at reducing the size of the associated search space is introduced, and a new tool, named "Traces", is presented, together with experimental results and comparisons with existing software, such as McKay's "nauty". It is shown that the approach presented here leads to a huge reduction in the search space, thereby making computation feasible for several classes of graphs which are hard for all the main canonical labeling tools in the literature.<|reference_end|>
arxiv
@article{piperno2008search, title={Search Space Contraction in Canonical Labeling of Graphs}, author={Adolfo Piperno}, journal={arXiv preprint arXiv:0804.4881}, year={2008}, archivePrefix={arXiv}, eprint={0804.4881}, primaryClass={cs.DS cs.DM} }
piperno2008search
arxiv-3523
0804.4885
SimDialog: A visual game dialog editor
<|reference_start|>SimDialog: A visual game dialog editor: SimDialog is a visual editor for dialog in computer games. This paper presents the design of SimDialog, illustrating how script writers and non-programmers can easily create dialog for video games with complex branching structures and dynamic response characteristics. The system creates dialog as a directed graph. This allows for play using the dialog with a state-based cause and effect system that controls selection of non-player character responses and can provide a basic scoring mechanism for games.<|reference_end|>
arxiv
@article{owen2008simdialog:, title={SimDialog: A visual game dialog editor}, author={C. Owen, F. Biocca, C. Bohil, J. Conley}, journal={arXiv preprint arXiv:0804.4885}, year={2008}, archivePrefix={arXiv}, eprint={0804.4885}, primaryClass={cs.HC cs.AI} }
owen2008simdialog:
arxiv-3524
0804.4896
Monotony in Service Orchestrations
<|reference_start|>Monotony in Service Orchestrations: Web Service orchestrations are compositions of different Web Services to form a new service. The services called during the orchestration guarantee a given performance to the orchestrater, usually in the form of contracts. These contracts can be used by the orchestrater to deduce the contract it can offer to its own clients, by performing contract composition. An implicit assumption in contract based QoS management is: "the better the component services perform, the better the orchestration's performance will be". Thus, contract based QoS management for Web services orchestrations implicitly assumes monotony. In some orchestrations, however, monotony can be violated, i.e., the performance of the orchestration improves when the performance of a component service degrades. This is highly undesirable since it can render the process of contract composition inconsistent. In this paper we define monotony for orchestrations modelled by Colored Occurrence Nets (CO-nets) and we characterize the classes of monotonic orchestrations. We show that few orchestrations are indeed monotonic, mostly since latency can be traded for quality of data. We also propose a sound refinement of monotony, called conditional monotony, which forbids this kind of cheating and show that conditional monotony is widely satisfied by orchestrations. This finding leads to reconsidering the way SLAs should be formulated.<|reference_end|>
arxiv
@article{bouillard2008monotony, title={Monotony in Service Orchestrations}, author={Anne Bouillard (IRISA), Sidney Rosario (IRISA), Albert Benveniste (IRISA), Stefan Haar (IRISA)}, journal={arXiv preprint arXiv:0804.4896}, year={2008}, number={RR-6528}, archivePrefix={arXiv}, eprint={0804.4896}, primaryClass={cs.NI} }
bouillard2008monotony
arxiv-3525
0804.4898
A Quadratic Loss Multi-Class SVM
<|reference_start|>A Quadratic Loss Multi-Class SVM: Using a support vector machine requires to set two types of hyperparameters: the soft margin parameter C and the parameters of the kernel. To perform this model selection task, the method of choice is cross-validation. Its leave-one-out variant is known to produce an estimator of the generalization error which is almost unbiased. Its major drawback rests in its time requirement. To overcome this difficulty, several upper bounds on the leave-one-out error of the pattern recognition SVM have been derived. Among those bounds, the most popular one is probably the radius-margin bound. It applies to the hard margin pattern recognition SVM, and by extension to the 2-norm SVM. In this report, we introduce a quadratic loss M-SVM, the M-SVM^2, as a direct extension of the 2-norm SVM to the multi-class case. For this machine, a generalized radius-margin bound is then established.<|reference_end|>
arxiv
@article{monfrini2008a, title={A Quadratic Loss Multi-Class SVM}, author={Emmanuel Monfrini (LORIA), Yann Guermeur (LORIA)}, journal={arXiv preprint arXiv:0804.4898}, year={2008}, archivePrefix={arXiv}, eprint={0804.4898}, primaryClass={cs.LG} }
monfrini2008a
arxiv-3526
0805.0012
Joint Physical Layer Coding and Network Coding for Bi-Directional Relaying
<|reference_start|>Joint Physical Layer Coding and Network Coding for Bi-Directional Relaying: We consider the problem of two transmitters wishing to exchange information through a relay in the middle. The channels between the transmitters and the relay are assumed to be synchronized, average power constrained additive white Gaussian noise channels with a real input with signal-to-noise ratio (SNR) of snr. An upper bound on the capacity is 1/2 log(1+ snr) bits per transmitter per use of the medium-access phase and broadcast phase of the bi-directional relay channel. We show that using lattice codes and lattice decoding, we can obtain a rate of 1/2 log(0.5 + snr) bits per transmitter, which is essentially optimal at high SNRs. The main idea is to decode the sum of the codewords modulo a lattice at the relay followed by a broadcast phase which performs Slepian-Wolf coding with structured codes. For asymptotically low SNR's, jointly decoding the two transmissions at the relay (MAC channel) is shown to be optimal. We also show that if the two transmitters use identical lattices with minimum angle decoding, we can achieve the same rate of 1/2 log(0.5 + snr). The proposed scheme can be thought of as a joint physical layer, network layer code which outperforms other recently proposed analog network coding schemes.<|reference_end|>
arxiv
@article{wilson2008joint, title={Joint Physical Layer Coding and Network Coding for Bi-Directional Relaying}, author={Makesh Pravin Wilson, Krishna Narayanan, Henry Pfister and Alex Sprintson}, journal={arXiv preprint arXiv:0805.0012}, year={2008}, archivePrefix={arXiv}, eprint={0805.0012}, primaryClass={cs.IT math.IT} }
wilson2008joint
arxiv-3527
0805.0022
Straight Skeletons of Three-Dimensional Polyhedra
<|reference_start|>Straight Skeletons of Three-Dimensional Polyhedra: This paper studies the straight skeleton of polyhedra in three dimensions. We first address voxel-based polyhedra (polycubes), formed as the union of a collection of cubical (axis-aligned) voxels. We analyze the ways in which the skeleton may intersect each voxel of the polyhedron, and show that the skeleton may be constructed by a simple voxel-sweeping algorithm taking constant time per voxel. In addition, we describe a more complex algorithm for straight skeletons of voxel-based polyhedra, which takes time proportional to the area of the surfaces of the straight skeleton rather than the volume of the polyhedron. We also consider more general polyhedra with axis-parallel edges and faces, and show that any n-vertex polyhedron of this type has a straight skeleton with O(n^2) features. We provide algorithms for constructing the straight skeleton, with running time O(min(n^2 log n, k log^{O(1)} n)) where k is the output complexity. Next, we discuss the straight skeleton of a general nonconvex polyhedron. We show that it has an ambiguity issue, and suggest a consistent method to resolve it. We prove that the straight skeleton of a general polyhedron has a superquadratic complexity in the worst case. Finally, we report on an implementation of a simple algorithm for the general case.<|reference_end|>
arxiv
@article{barequet2008straight, title={Straight Skeletons of Three-Dimensional Polyhedra}, author={Gill Barequet, David Eppstein, Michael T. Goodrich, and Amir Vaxman}, journal={arXiv preprint arXiv:0805.0022}, year={2008}, archivePrefix={arXiv}, eprint={0805.0022}, primaryClass={cs.CG} }
barequet2008straight
arxiv-3528
0805.0034
Diversity Order Gain with Noisy Feedback in Multiple Access Channels
<|reference_start|>Diversity Order Gain with Noisy Feedback in Multiple Access Channels: In this paper, we study the effect of feedback channel noise on the diversity-multiplexing tradeoff in multiuser MIMO systems using quantized feedback, where each user has m transmit antennas and the base-station receiver has n antennas. We derive an achievable tradeoff and use it to show that in SNR-symmetric channels, a single bit of imperfect feedback is sufficient to double the maximum diversity order to 2mn compared to when there is no feedback (maximum is mn at multiplexing gain of zero). Further, additional feedback bits do not increase this maximum diversity order beyond 2mn. Finally, the above diversity order gain of mn over non-feedback systems can also be achieved for higher multiplexing gains, albeit requiring more than one bit of feedback.<|reference_end|>
arxiv
@article{aggarwal2008diversity, title={Diversity Order Gain with Noisy Feedback in Multiple Access Channels}, author={Vaneet Aggarwal, Ashutosh Sabharwal}, journal={arXiv preprint arXiv:0805.0034}, year={2008}, doi={10.1109/ISIT.2008.4595017}, archivePrefix={arXiv}, eprint={0805.0034}, primaryClass={cs.IT math.IT} }
aggarwal2008diversity
arxiv-3529
0805.0050
On the k-pairs problem
<|reference_start|>On the k-pairs problem: We consider network coding rates for directed and undirected $k$-pairs networks. For directed networks, meagerness is known to be an upper bound on network coding rates. We show that network coding rate can be $\Theta(|V|)$ multiplicative factor smaller than meagerness. For the undirected case, we show some progress in the direction of the $k$-pairs conjecture.<|reference_end|>
arxiv
@article{al-bashabsheh2008on, title={On the k-pairs problem}, author={Ali Al-Bashabsheh, Abbas Yongacoglu}, journal={arXiv preprint arXiv:0805.0050}, year={2008}, archivePrefix={arXiv}, eprint={0805.0050}, primaryClass={cs.IT math.IT} }
al-bashabsheh2008on
arxiv-3530
0805.0051
Communicating the sum of sources over a network
<|reference_start|>Communicating the sum of sources over a network: We consider a network (that is capable of network coding) with a set of sources and terminals, where each terminal is interested in recovering the sum of the sources. Considering directed acyclic graphs with unit capacity edges and independent, unit-entropy sources, we show the rate region when (a) there are two sources and $n$ terminals, and (b) $n$ sources and two terminals. In these cases as long as there exists at least one path from each source to each terminal we demonstrate that there exists a valid assignment of coding vectors to the edges such that the terminals can recover the sum of the sources.<|reference_end|>
arxiv
@article{ramamoorthy2008communicating, title={Communicating the sum of sources over a network}, author={Aditya Ramamoorthy}, journal={arXiv preprint arXiv:0805.0051}, year={2008}, archivePrefix={arXiv}, eprint={0805.0051}, primaryClass={cs.IT math.IT} }
ramamoorthy2008communicating
arxiv-3531
0805.0053
Particle Filtering for Large Dimensional State Spaces with Multimodal Observation Likelihoods
<|reference_start|>Particle Filtering for Large Dimensional State Spaces with Multimodal Observation Likelihoods: We study efficient importance sampling techniques for particle filtering (PF) when either (a) the observation likelihood (OL) is frequently multimodal or heavy-tailed, or (b) the state space dimension is large or both. When the OL is multimodal, but the state transition pdf (STP) is narrow enough, the optimal importance density is usually unimodal. Under this assumption, many techniques have been proposed. But when the STP is broad, this assumption does not hold. We study how existing techniques can be generalized to situations where the optimal importance density is multimodal, but is unimodal conditioned on a part of the state vector. Sufficient conditions to test for the unimodality of this conditional posterior are derived. The number of particles, N, to accurately track using a PF increases with state space dimension, thus making any regular PF impractical for large dimensional tracking problems. We propose a solution that partially addresses this problem. An important class of large dimensional problems with multimodal OL is tracking spatially varying physical quantities such as temperature or pressure in a large area using a network of sensors which may be nonlinear and/or may have non-negligible failure probabilities.<|reference_end|>
arxiv
@article{vaswani2008particle, title={Particle Filtering for Large Dimensional State Spaces with Multimodal Observation Likelihoods}, author={Namrata Vaswani}, journal={IEEE Trans. Sig. Proc., vol. 56(10-1), pp. 4583-4597, Oct. 2008}, year={2008}, doi={10.1109/TSP.2008.925969}, archivePrefix={arXiv}, eprint={0805.0053}, primaryClass={cs.IT math.IT math.ST stat.ME stat.TH} }
vaswani2008particle
arxiv-3532
0805.0065
Communication Requirements for Generating Correlated Random Variables
<|reference_start|>Communication Requirements for Generating Correlated Random Variables: Two familiar notions of correlation are rediscovered as extreme operating points for simulating a discrete memoryless channel, in which a channel output is generated based only on a description of the channel input. Wyner's "common information" coincides with the minimum description rate needed. However, when common randomness independent of the input is available, the necessary description rate reduces to Shannon's mutual information. This work characterizes the optimal tradeoff between the amount of common randomness used and the required rate of description.<|reference_end|>
arxiv
@article{cuff2008communication, title={Communication Requirements for Generating Correlated Random Variables}, author={Paul Cuff (Stanford University)}, journal={arXiv preprint arXiv:0805.0065}, year={2008}, archivePrefix={arXiv}, eprint={0805.0065}, primaryClass={cs.IT cs.GT math.IT math.PR} }
cuff2008communication
arxiv-3533
0805.0087
Universe Detectors for Sybil Defense in Ad Hoc Wireless Networks
<|reference_start|>Universe Detectors for Sybil Defense in Ad Hoc Wireless Networks: The Sybil attack in unknown port networks such as wireless is not considered tractable. A wireless node is not capable of independently differentiating the universe of real nodes from the universe of arbitrary non-existent fictitious nodes created by the attacker. Similar to failure detectors, we propose to use universe detectors to help nodes determine which universe is real. In this paper, we (i) define several variants of the neighborhood discovery problem under Sybil attack (ii) propose a set of matching universe detectors (iii) demonstrate the necessity of additional topological constraints for the problems to be solvable: node density and communication range; (iv) present SAND -- an algorithm that solves these problems with the help of appropriate universe detectors, this solution demonstrates that the proposed universe detectors are the weakest detectors possible for each problem.<|reference_end|>
arxiv
@article{vora2008universe, title={Universe Detectors for Sybil Defense in Ad Hoc Wireless Networks}, author={Adnan Vora, Mikhail Nesterenko, S'ebastien Tixeuil (LIP6), Sylvie Dela"et (LRI)}, journal={arXiv preprint arXiv:0805.0087}, year={2008}, number={RR-6529}, archivePrefix={arXiv}, eprint={0805.0087}, primaryClass={cs.DC cs.CR cs.NI} }
vora2008universe
arxiv-3534
0805.0092
Cellular Systems with Full-Duplex Compress-and-Forward Relaying and Cooperative Base Stations
<|reference_start|>Cellular Systems with Full-Duplex Compress-and-Forward Relaying and Cooperative Base Stations: In this paper the advantages provided by multicell processing of signals transmitted by mobile terminals (MTs) which are received via dedicated relay terminals (RTs) are studied. It is assumed that each RT is capable of full-duplex operation and receives the transmission of adjacent relay terminals. Focusing on intra-cell TDMA and non-fading channels, a simplified relay-aided uplink cellular model based on a model introduced by Wyner is considered. Assuming a nomadic application in which the RTs are oblivious to the MTs' codebooks, a form of distributed compress-and-forward (CF) scheme with decoder side information is employed. The per-cell sum-rate of the CF scheme is derived and is given as a solution of a simple fixed point equation. This achievable rate reveals that the CF scheme is able to completely eliminate the inter-relay interference, and it approaches a ``cut-set-like'' upper bound for strong RTs transmission power. The CF rate is also shown to surpass the rate of an amplify-and-forward scheme via numerical calculations for a wide range of the system parameters.<|reference_end|>
arxiv
@article{somekh2008cellular, title={Cellular Systems with Full-Duplex Compress-and-Forward Relaying and Cooperative Base Stations}, author={Oren Somekh, Osvaldo Simeone, H. Vincent Poor, and Shlomo Shamai (Shitz)}, journal={arXiv preprint arXiv:0805.0092}, year={2008}, doi={10.1109/ISIT.2008.4595357}, archivePrefix={arXiv}, eprint={0805.0092}, primaryClass={cs.IT math.IT} }
somekh2008cellular
arxiv-3535
0805.0108
The Gaussian Wiretap Channel with a Helping Interferer
<|reference_start|>The Gaussian Wiretap Channel with a Helping Interferer: Due to the broadcast nature of the wireless medium, wireless communication is susceptible to adversarial eavesdropping. This paper describes how eavesdropping can potentially be defeated by exploiting the superposition nature of the wireless medium. A Gaussian wire-tap channel with a helping interferer (WTC-HI) is considered in which a transmitter sends confidential messages to its intended receiver in the presence of a passive eavesdropper and with the help of an interferer. The interferer, which does not know the confidential message assists the confidential message transmission by sending a signal that is independent of the transmitted message. An achievable secrecy rate and a Sato-type upper bound on the secrecy capacity are given for the Gaussian WTC-HI. Through numerical analysis, it is found that the upper bound is close to the achievable secrecy rate when the interference is weak for symmetric interference channels, and under more general conditions for asymmetric Gaussian interference channels.<|reference_end|>
arxiv
@article{tang2008the, title={The Gaussian Wiretap Channel with a Helping Interferer}, author={Xiaojun Tang, Ruoheng Liu, Predrag Spasojevic, H. Vincent Poor}, journal={arXiv preprint arXiv:0805.0108}, year={2008}, archivePrefix={arXiv}, eprint={0805.0108}, primaryClass={cs.IT cs.CR math.IT} }
tang2008the
arxiv-3536
0805.0120
Nonnegative Matrix Factorization via Rank-One Downdate
<|reference_start|>Nonnegative Matrix Factorization via Rank-One Downdate: Nonnegative matrix factorization (NMF) was popularized as a tool for data mining by Lee and Seung in 1999. NMF attempts to approximate a matrix with nonnegative entries by a product of two low-rank matrices, also with nonnegative entries. We propose an algorithm called rank-one downdate (R1D) for computing a NMF that is partly motivated by singular value decomposition. This algorithm computes the dominant singular values and vectors of adaptively determined submatrices of a matrix. On each iteration, R1D extracts a rank-one submatrix from the dataset according to an objective function. We establish a theoretical result that maximizing this objective function corresponds to correctly classifying articles in a nearly separable corpus. We also provide computational experiments showing the success of this method in identifying features in realistic datasets.<|reference_end|>
arxiv
@article{biggs2008nonnegative, title={Nonnegative Matrix Factorization via Rank-One Downdate}, author={Michael Biggs, Ali Ghodsi, Stephen Vavasis}, journal={arXiv preprint arXiv:0805.0120}, year={2008}, archivePrefix={arXiv}, eprint={0805.0120}, primaryClass={cs.IR cs.NA} }
biggs2008nonnegative
arxiv-3537
0805.0129
On some entropy functionals derived from R\'enyi information divergence
<|reference_start|>On some entropy functionals derived from R\'enyi information divergence: We consider the maximum entropy problems associated with R\'enyi $Q$-entropy, subject to two kinds of constraints on expected values. The constraints considered are a constraint on the standard expectation, and a constraint on the generalized expectation as encountered in nonextensive statistics. The optimum maximum entropy probability distributions, which can exhibit a power-law behaviour, are derived and characterized. The R\'enyi entropy of the optimum distributions can be viewed as a function of the constraint. This defines two families of entropy functionals in the space of possible expected values. General properties of these functionals, including nonnegativity, minimum, convexity, are documented. Their relationships as well as numerical aspects are also discussed. Finally, we work out some specific cases for the reference measure $Q(x)$ and recover in a limit case some well-known entropies.<|reference_end|>
arxiv
@article{bercher2008on, title={On some entropy functionals derived from R\'enyi information divergence}, author={Jean-Franc{c}ois Bercher (LSS, IGM-LabInfo)}, journal={Information Sciences 178, 12 (2008) 2489-2506}, year={2008}, doi={10.1016/j.ins.2008.02.003}, archivePrefix={arXiv}, eprint={0805.0129}, primaryClass={cs.IT cond-mat.other math.IT} }
bercher2008on
arxiv-3538
0805.0131
Diversity-Multiplexing Tradeoff in Selective-Fading Multiple-Access MIMO Channels
<|reference_start|>Diversity-Multiplexing Tradeoff in Selective-Fading Multiple-Access MIMO Channels: We establish the optimal diversity-multiplexing (DM) tradeoff of coherent selective-fading multiple-access multiple-input multiple-output (MIMO) channels and provide corresponding code design criteria. As a byproduct, on the conceptual level, we find an interesting relation between the DM tradeoff framework and the notion of dominant error event regions which was first introduced in the AWGN case by Gallager, IEEE Trans. IT, 1985. This relation allows to accurately characterize the error mechanisms in MIMO fading multiple-access channels. In particular, we find that, for a given rate tuple, the maximum achievable diversity order is determined by the error event that dominates the total error probability exponentially in SNR. Finally, we show that the distributed space-time code construction proposed recently by Badr and Belfiore, Int. Zurich Seminar on Commun., 2008, satisfies the code design criteria derived in this paper.<|reference_end|>
arxiv
@article{coronel2008diversity-multiplexing, title={Diversity-Multiplexing Tradeoff in Selective-Fading Multiple-Access MIMO Channels}, author={Pedro Coronel and Markus G"artner and Helmut B"olcskei}, journal={arXiv preprint arXiv:0805.0131}, year={2008}, archivePrefix={arXiv}, eprint={0805.0131}, primaryClass={cs.IT math.IT} }
coronel2008diversity-multiplexing
arxiv-3539
0805.0149
On Recovery of Sparse Signals via $\ell_1$ Minimization
<|reference_start|>On Recovery of Sparse Signals via $\ell_1$ Minimization: This article considers constrained $\ell_1$ minimization methods for the recovery of high dimensional sparse signals in three settings: noiseless, bounded error and Gaussian noise. A unified and elementary treatment is given in these noise settings for two $\ell_1$ minimization methods: the Dantzig selector and $\ell_1$ minimization with an $\ell_2$ constraint. The results of this paper improve the existing results in the literature by weakening the conditions and tightening the error bounds. The improvement on the conditions shows that signals with larger support can be recovered accurately. This paper also establishes connections between restricted isometry property and the mutual incoherence property. Some results of Candes, Romberg and Tao (2006) and Donoho, Elad, and Temlyakov (2006) are extended.<|reference_end|>
arxiv
@article{cai2008on, title={On Recovery of Sparse Signals via $\ell_1$ Minimization}, author={T. Tony Cai, Guangwu Xu, and Jun Zhang}, journal={arXiv preprint arXiv:0805.0149}, year={2008}, archivePrefix={arXiv}, eprint={0805.0149}, primaryClass={cs.LG} }
cai2008on
arxiv-3540
0805.0154
The Tsallis entropy and the Shannon entropy of a universal probability
<|reference_start|>The Tsallis entropy and the Shannon entropy of a universal probability: We study the properties of Tsallis entropy and Shannon entropy from the point of view of algorithmic randomness. In algorithmic information theory, there are two equivalent ways to define the program-size complexity K(s) of a given finite binary string s. In the standard way, K(s) is defined as the length of the shortest input string for the universal self-delimiting Turing machine to output s. In the other way, the so-called universal probability m is introduced first, and then K(s) is defined as -log_2 m(s) without reference to the concept of program-size. In this paper, we investigate the properties of the Shannon entropy, the power sum, and the Tsallis entropy of a universal probability by means of the notion of program-size complexity. We determine the convergence or divergence of each of these three quantities, and evaluate its degree of randomness if it converges.<|reference_end|>
arxiv
@article{tadaki2008the, title={The Tsallis entropy and the Shannon entropy of a universal probability}, author={Kohtaro Tadaki}, journal={2008 IEEE International Symposium on Information Theory, Toronto, ON (2008) 2111-2115}, year={2008}, doi={10.1109/ISIT.2008.4595362}, archivePrefix={arXiv}, eprint={0805.0154}, primaryClass={cs.IT cs.CC math.IT} }
tadaki2008the
arxiv-3541
0805.0162
Morphing of Triangular Meshes in Shape Space
<|reference_start|>Morphing of Triangular Meshes in Shape Space: We present a novel approach to morph between two isometric poses of the same non-rigid object given as triangular meshes. We model the morphs as linear interpolations in a suitable shape space $\mathcal{S}$. For triangulated 3D polygons, we prove that interpolating linearly in this shape space corresponds to the most isometric morph in $\mathbb{R}^3$. We then extend this shape space to arbitrary triangulations in 3D using a heuristic approach and show the practical use of the approach using experiments. Furthermore, we discuss a modified shape space that is useful for isometric skeleton morphing. All of the newly presented approaches solve the morphing problem without the need to solve a minimization problem.<|reference_end|>
arxiv
@article{wuhrer2008morphing, title={Morphing of Triangular Meshes in Shape Space}, author={Stefanie Wuhrer, Prosenjit Bose, Chang Shu, Joseph O'Rourke, Alan Brunton}, journal={International Journal of Shape Modeling, 16(1-2):195-212, 2010}, year={2008}, doi={10.1142/S0218654310001341}, archivePrefix={arXiv}, eprint={0805.0162}, primaryClass={cs.CG cs.GR} }
wuhrer2008morphing
arxiv-3542
0805.0173
A Computer Search for N1L-Configurations
<|reference_start|>A Computer Search for N1L-Configurations: In an earlier paper the author defined N1L configurations, and stated a conjecture concerning them which would lead to an improvement by a constant factor to the sphere-packing bound for linear double error correcting codes. Here a computer search is presented, in an effort to gather evidence on the conjecture.<|reference_end|>
arxiv
@article{dowd2008a, title={A Computer Search for N1L-Configurations}, author={Martin Dowd}, journal={arXiv preprint arXiv:0805.0173}, year={2008}, archivePrefix={arXiv}, eprint={0805.0173}, primaryClass={cs.IT math.IT} }
dowd2008a
arxiv-3543
0805.0184
Information, Energy and Density for Ad Hoc Sensor Networks over Correlated Random Fields: Large Deviations Analysis
<|reference_start|>Information, Energy and Density for Ad Hoc Sensor Networks over Correlated Random Fields: Large Deviations Analysis: Using large deviations results that characterize the amount of information per node on a two-dimensional (2-D) lattice, asymptotic behavior of a sensor network deployed over a correlated random field for statistical inference is investigated. Under a 2-D hidden Gauss-Markov random field model with symmetric first order conditional autoregression, the behavior of the total information [nats] and energy efficiency [nats/J] defined as the ratio of total gathered information to the required energy is obtained as the coverage area, node density and energy vary.<|reference_end|>
arxiv
@article{sung2008information,, title={Information, Energy and Density for Ad Hoc Sensor Networks over Correlated Random Fields: Large Deviations Analysis}, author={Youngchul Sung, H. Vincent Poor and Heejung Yu}, journal={arXiv preprint arXiv:0805.0184}, year={2008}, doi={10.1109/ISIT.2008.4595256}, archivePrefix={arXiv}, eprint={0805.0184}, primaryClass={cs.IT math.IT} }
sung2008information,
arxiv-3544
0805.0192
Specification of an extensible and portable file format for electronic structure and crystallographic data
<|reference_start|>Specification of an extensible and portable file format for electronic structure and crystallographic data: In order to allow different software applications, in constant evolution, to interact and exchange data, flexible file formats are needed. A file format specification for different types of content has been elaborated to allow communication of data for the software developed within the European Network of Excellence "NANOQUANTA", focusing on first-principles calculations of materials and nanosystems. It might be used by other software as well, and is described here in detail. The format relies on the NetCDF binary input/output library, already used in many different scientific communities, that provides flexibility as well as portability accross languages and platforms. Thanks to NetCDF, the content can be accessed by keywords, ensuring the file format is extensible and backward compatible.<|reference_end|>
arxiv
@article{gonze2008specification, title={Specification of an extensible and portable file format for electronic structure and crystallographic data}, author={X. Gonze (1,2), C.-O. Almbladh (1,3), A. Cucca (1,4), D. Caliste (1,2,5), C. Freysoldt (1,6), M. A. L. Marques (1,7,8), V. Olevano (1,4,9), Y. Pouillon (1,2,10), M.J. Verstraete (1,11) ((1) European Theoretical Spectroscopy Facility, (2) Universit'e Catholique de Louvain, Louvain-la-Neuve, Belgium (3) University of Lund, Lund, Sweden (4) LSI, CNRS-CEA, Ecole Polytechnique, Palaiseau, France, (5) C.E.A. Grenoble, Grenoble, France, (6) Fritz-Haber-Institut, Berlin, Germany, (7) U. Lyon 1, Villeurbanne, France, (8) U. Coimbra, Coimbra, Portugal, (9) Institut NEEL, CNRS and U. Joseph Fourier, Grenoble, France, (10) Universidad del Pais Vasco UPV/EHU, Donostia-San Sebasti`an, Spain, (11) U. York, York, United Kingdom)}, journal={arXiv preprint arXiv:0805.0192}, year={2008}, archivePrefix={arXiv}, eprint={0805.0192}, primaryClass={cs.DL cond-mat.mtrl-sci cs.DB} }
gonze2008specification
arxiv-3545
0805.0197
Flatness of the Energy Landscape for Horn Clauses
<|reference_start|>Flatness of the Energy Landscape for Horn Clauses: The Little-Hopfield neural network programmed with Horn clauses is studied. We argue that the energy landscape of the system, corresponding to the inconsistency function for logical interpretations of the sets of Horn clauses, has minimal ruggedness. This is supported by computer simulations.<|reference_end|>
arxiv
@article{sathasivam2008flatness, title={Flatness of the Energy Landscape for Horn Clauses}, author={Saratha Sathasivam (USM), Wan Ahmad Tajuddin Wan Abdullah (Univ. Malaya)}, journal={Matematika 23 (2007) 147-156}, year={2008}, archivePrefix={arXiv}, eprint={0805.0197}, primaryClass={cond-mat.dis-nn cs.NE} }
sathasivam2008flatness
arxiv-3546
0805.0200
(m,k)-firm constraints and DBP scheduling: impact of the initial k-sequence and exact schedulability test
<|reference_start|>(m,k)-firm constraints and DBP scheduling: impact of the initial k-sequence and exact schedulability test: In this paper we study the scheduling of (m,k)-firm synchronous periodic task systems using the Distance Based Priority (DBP) scheduler. We first show three phenomena: (i) choosing, for each task, the initial k-sequence 1^k is not optimal, (ii) we can even start the scheduling from a (fictive) error state (in regard to the initial k-sequence) and (iii) the period of feasible DBP-schedules is not necessarily the task hyper-period. We then show that any feasible DBP-schedule is periodic and we upper-bound the length of that period. Lastly, based on our periodicity result we provide an exact schedulability test.<|reference_end|>
arxiv
@article{goossens2008(m,k)-firm, title={(m,k)-firm constraints and DBP scheduling: impact of the initial k-sequence and exact schedulability test}, author={Jo"el Goossens}, journal={arXiv preprint arXiv:0805.0200}, year={2008}, archivePrefix={arXiv}, eprint={0805.0200}, primaryClass={cs.OS} }
goossens2008(m,k)-firm
arxiv-3547
0805.0202
A Pseudo-Boolean Solution to the Maximum Quartet Consistency Problem
<|reference_start|>A Pseudo-Boolean Solution to the Maximum Quartet Consistency Problem: Determining the evolutionary history of a given biological data is an important task in biological sciences. Given a set of quartet topologies over a set of taxa, the Maximum Quartet Consistency (MQC) problem consists of computing a global phylogeny that satisfies the maximum number of quartets. A number of solutions have been proposed for the MQC problem, including Dynamic Programming, Constraint Programming, and more recently Answer Set Programming (ASP). ASP is currently the most efficient approach for optimally solving the MQC problem. This paper proposes encoding the MQC problem with pseudo-Boolean (PB) constraints. The use of PB allows solving the MQC problem with efficient PB solvers, and also allows considering different modeling approaches for the MQC problem. Initial results are promising, and suggest that PB can be an effective alternative for solving the MQC problem.<|reference_end|>
arxiv
@article{morgado2008a, title={A Pseudo-Boolean Solution to the Maximum Quartet Consistency Problem}, author={Antonio Morgado, Joao Marques-Silva}, journal={arXiv preprint arXiv:0805.0202}, year={2008}, archivePrefix={arXiv}, eprint={0805.0202}, primaryClass={cs.AI cs.LO} }
morgado2008a
arxiv-3548
0805.0231
CMA-ES with Two-Point Step-Size Adaptation
<|reference_start|>CMA-ES with Two-Point Step-Size Adaptation: We combine a refined version of two-point step-size adaptation with the covariance matrix adaptation evolution strategy (CMA-ES). Additionally, we suggest polished formulae for the learning rate of the covariance matrix and the recombination weights. In contrast to cumulative step-size adaptation or to the 1/5-th success rule, the refined two-point adaptation (TPA) does not rely on any internal model of optimality. In contrast to conventional self-adaptation, the TPA will achieve a better target step-size in particular with large populations. The disadvantage of TPA is that it relies on two additional objective function<|reference_end|>
arxiv
@article{hansen2008cma-es, title={CMA-ES with Two-Point Step-Size Adaptation}, author={Nikolaus Hansen (INRIA Futurs)}, journal={arXiv preprint arXiv:0805.0231}, year={2008}, number={RR-6527}, archivePrefix={arXiv}, eprint={0805.0231}, primaryClass={cs.NE} }
hansen2008cma-es
arxiv-3549
0805.0241
Asymptotically Good LDPC Convolutional Codes Based on Protographs
<|reference_start|>Asymptotically Good LDPC Convolutional Codes Based on Protographs: LDPC convolutional codes have been shown to be capable of achieving the same capacity-approaching performance as LDPC block codes with iterative message-passing decoding. In this paper, asymptotic methods are used to calculate a lower bound on the free distance for several ensembles of asymptotically good protograph-based LDPC convolutional codes. Further, we show that the free distance to constraint length ratio of the LDPC convolutional codes exceeds the minimum distance to block length ratio of corresponding LDPC block codes.<|reference_end|>
arxiv
@article{mitchell2008asymptotically, title={Asymptotically Good LDPC Convolutional Codes Based on Protographs}, author={David G. M. Mitchell, Ali E. Pusane, Kamil Sh. Zigangirov, and Daniel J. Costello, Jr}, journal={arXiv preprint arXiv:0805.0241}, year={2008}, doi={10.1109/ISIT.2008.4595143}, archivePrefix={arXiv}, eprint={0805.0241}, primaryClass={cs.IT math.IT} }
mitchell2008asymptotically
arxiv-3550
0805.0268
Towards Exploring Fundamental Limits of System-Specific Cryptanalysis Within Limited Attack Classes: Application to ABSG
<|reference_start|>Towards Exploring Fundamental Limits of System-Specific Cryptanalysis Within Limited Attack Classes: Application to ABSG: A new approach on cryptanalysis is proposed where the goal is to explore the fundamental limits of a specific class of attacks against a particular cryptosystem. As a first step, the approach is applied on ABSG, which is an LFSR-based stream cipher where irregular decimation techniques are utilized. Consequently, under some mild assumptions, which are common in cryptanalysis, the tight lower bounds on the algorithmic complexity of successful Query-Based Key-Recovery attacks are derived for two different setups of practical interest. The proofs rely on the concept of ``typicality'' of information theory.<|reference_end|>
arxiv
@article{altug2008towards, title={Towards Exploring Fundamental Limits of System-Specific Cryptanalysis Within Limited Attack Classes: Application to ABSG}, author={Yucel Altug and M. Kivanc Mihcak}, journal={arXiv preprint arXiv:0805.0268}, year={2008}, archivePrefix={arXiv}, eprint={0805.0268}, primaryClass={cs.CR cs.IT math.IT} }
altug2008towards
arxiv-3551
0805.0272
Capacity of The Discrete-Time Non-Coherent Memoryless Gaussian Channels at Low SNR
<|reference_start|>Capacity of The Discrete-Time Non-Coherent Memoryless Gaussian Channels at Low SNR: We address the capacity of a discrete-time memoryless Gaussian channel, where the channel state information (CSI) is neither available at the transmitter nor at the receiver. The optimal capacity-achieving input distribution at low signal-to-noise ratio (SNR) is precisely characterized, and the exact capacity of a non-coherent channel is derived. The derived relations allow to better understanding the capacity of non-coherent channels at low SNR. Then, we compute the non-coherence penalty and give a more precise characterization of the sub-linear term in SNR. Finally, in order to get more insight on how the optimal input varies with SNR, upper and lower bounds on the non-zero mass point location of the capacity-achieving input are given.<|reference_end|>
arxiv
@article{rezki2008capacity, title={Capacity of The Discrete-Time Non-Coherent Memoryless Gaussian Channels at Low SNR}, author={Z. Rezki and David Haccoun and Franc{c}ois Gagnon}, journal={arXiv preprint arXiv:0805.0272}, year={2008}, archivePrefix={arXiv}, eprint={0805.0272}, primaryClass={cs.IT math.IT} }
rezki2008capacity
arxiv-3552
0805.0307
Disentangling Visibility and Self-Promotion Bias in the arXiv:astro-ph Positional Citation Effect
<|reference_start|>Disentangling Visibility and Self-Promotion Bias in the arXiv:astro-ph Positional Citation Effect: We established in an earlier study that articles listed at or near the top of the daily arXiv:astro-ph mailings receive on average significantly more citations than articles further down the list. In our earlier work we were not able to decide whether this positional citation effect was due to author self-promotion of intrinsically more citable papers or whether papers are cited more often simply because they are at the top of the astro-ph listing. Using new data we can now disentangle both effects. Based on their submission times we separate articles into a self-promoted sample and a sample of articles that achieved a high rank on astro-ph by chance and compare their citation distributions with those of articles on lower astro-ph positions. We find that the positional citation effect is a superposition of self-promotion and visibility bias.<|reference_end|>
arxiv
@article{dietrich2008disentangling, title={Disentangling Visibility and Self-Promotion Bias in the arXiv:astro-ph Positional Citation Effect}, author={J. P. Dietrich}, journal={arXiv preprint arXiv:0805.0307}, year={2008}, doi={10.1086/589836}, archivePrefix={arXiv}, eprint={0805.0307}, primaryClass={astro-ph cs.DL} }
dietrich2008disentangling
arxiv-3553
0805.0330
Alternating Automata on Data Trees and XPath Satisfiability
<|reference_start|>Alternating Automata on Data Trees and XPath Satisfiability: A data tree is an unranked ordered tree whose every node is labelled by a letter from a finite alphabet and an element ("datum") from an infinite set, where the latter can only be compared for equality. The article considers alternating automata on data trees that can move downward and rightward, and have one register for storing data. The main results are that nonemptiness over finite data trees is decidable but not primitive recursive, and that nonemptiness of safety automata is decidable but not elementary. The proofs use nondeterministic tree automata with faulty counters. Allowing upward moves, leftward moves, or two registers, each causes undecidability. As corollaries, decidability is obtained for two data-sensitive fragments of the XPath query language.<|reference_end|>
arxiv
@article{jurdzinski2008alternating, title={Alternating Automata on Data Trees and XPath Satisfiability}, author={Marcin Jurdzinski and Ranko Lazic}, journal={arXiv preprint arXiv:0805.0330}, year={2008}, archivePrefix={arXiv}, eprint={0805.0330}, primaryClass={cs.LO cs.DB cs.FL} }
jurdzinski2008alternating
arxiv-3554
0805.0337
On Distributed Function Computation in Structure-Free Random Networks
<|reference_start|>On Distributed Function Computation in Structure-Free Random Networks: We consider in-network computation of MAX in a structure-free random multihop wireless network. Nodes do not know their relative or absolute locations and use the Aloha MAC protocol. For one-shot computation, we describe a protocol in which the MAX value becomes available at the origin in $O(\sqrt{n/\log n})$ slots with high probability. This is within a constant factor of that required by the best coordinated protocol. A minimal structure (knowledge of hop-distance from the sink) is imposed on the network and with this structure, we describe a protocol for pipelined computation of MAX that achieves a rate of $\Omega(1/(\log^2 n)).$<|reference_end|>
arxiv
@article{kamath2008on, title={On Distributed Function Computation in Structure-Free Random Networks}, author={Sudeep Kamath and D. Manjunath}, journal={arXiv preprint arXiv:0805.0337}, year={2008}, archivePrefix={arXiv}, eprint={0805.0337}, primaryClass={cs.IT math.IT} }
kamath2008on
arxiv-3555
0805.0360
Prediction and Mitigation of Crush Conditions in Emergency Evacuations
<|reference_start|>Prediction and Mitigation of Crush Conditions in Emergency Evacuations: Several simulation environments exist for the simulation of large-scale evacuations of buildings, ships, or other enclosed spaces. These offer sophisticated tools for the study of human behaviour, the recreation of environmental factors such as fire or smoke, and the inclusion of architectural or structural features, such as elevators, pillars and exits. Although such simulation environments can provide insights into crowd behaviour, they lack the ability to examine potentially dangerous forces building up within a crowd. These are commonly referred to as crush conditions, and are a common cause of death in emergency evacuations. In this paper, we describe a methodology for the prediction and mitigation of crush conditions. The paper is organised as follows. We first establish the need for such a model, defining the main factors that lead to crush conditions, and describing several exemplar case studies. We then examine current methods for studying crush, and describe their limitations. From this, we develop a three-stage hybrid approach, using a combination of techniques. We conclude with a brief discussion of the potential benefits of our approach.<|reference_end|>
arxiv
@article{harding2008prediction, title={Prediction and Mitigation of Crush Conditions in Emergency Evacuations}, author={Peter J. Harding, Martyn Amos and Steve Gwynne}, journal={arXiv preprint arXiv:0805.0360}, year={2008}, archivePrefix={arXiv}, eprint={0805.0360}, primaryClass={cs.CE cs.MA} }
harding2008prediction
arxiv-3556
0805.0375
Wireless Secrecy in Cellular Systems with Infrastructure--Aided Cooperation
<|reference_start|>Wireless Secrecy in Cellular Systems with Infrastructure--Aided Cooperation: In cellular systems, confidentiality of uplink transmission with respect to eavesdropping terminals can be ensured by creating intentional inteference via scheduling of concurrent downlink transmissions. In this paper, this basic idea is explored from an information-theoretic standpoint by focusing on a two-cell scenario where the involved base stations are connected via a finite-capacity backbone link. A number of transmission strategies are considered that aim at improving uplink confidentiality under constraints on the downlink rate that acts as an interfering signal. The strategies differ mainly in the way the backbone link is exploited by the cooperating downlink- to the uplink-operated base stations. Achievable rates are derived for both the Gaussian (unfaded) and the fading cases, under different assumptions on the channel state information available at different nodes. Numerical results are also provided to corroborate the analysis. Overall, the analysis reveals that a combination of scheduling and base station cooperation is a promising means to improve transmission confidentiality in cellular systems.<|reference_end|>
arxiv
@article{popovski2008wireless, title={Wireless Secrecy in Cellular Systems with Infrastructure--Aided Cooperation}, author={Petar Popovski and Osvaldo Simeone}, journal={arXiv preprint arXiv:0805.0375}, year={2008}, archivePrefix={arXiv}, eprint={0805.0375}, primaryClass={cs.IT cs.CR math.IT} }
popovski2008wireless
arxiv-3557
0805.0389
Algorithms for Probabilistically-Constrained Models of Risk-Averse Stochastic Optimization with Black-Box Distributions
<|reference_start|>Algorithms for Probabilistically-Constrained Models of Risk-Averse Stochastic Optimization with Black-Box Distributions: We consider various stochastic models that incorporate the notion of risk-averseness into the standard 2-stage recourse model, and develop novel techniques for solving the algorithmic problems arising in these models. A key notable feature of our work that distinguishes it from work in some other related models, such as the (standard) budget model and the (demand-) robust model, is that we obtain results in the black-box setting, that is, where one is given only sampling access to the underlying distribution. Our first model, which we call the risk-averse budget model, incorporates the notion of risk-averseness via a probabilistic constraint that restricts the probability (according to the underlying distribution) with which the second-stage cost may exceed a given budget B to at most a given input threshold \rho. We also a consider a closely-related model that we call the risk-averse robust model, where we seek to minimize the first-stage cost and the (1-\rho)-quantile of the second-stage cost. We obtain approximation algorithms for a variety of combinatorial optimization problems including the set cover, vertex cover, multicut on trees, min cut, and facility location problems, in the risk-averse budget and robust models with black-box distributions. We obtain near-optimal solutions that preserve the budget approximately and incur a small blow-up of the probability threshold (both of which are unavoidable). To the best of our knowledge, these are the first approximation results for problems involving probabilistic constraints and black-box distributions. A major component of our results is a fully polynomial approximation scheme for solving the LP-relaxation of the risk-averse problem.<|reference_end|>
arxiv
@article{swamy2008algorithms, title={Algorithms for Probabilistically-Constrained Models of Risk-Averse Stochastic Optimization with Black-Box Distributions}, author={Chaitanya Swamy}, journal={arXiv preprint arXiv:0805.0389}, year={2008}, archivePrefix={arXiv}, eprint={0805.0389}, primaryClass={cs.DS cs.CC cs.DM} }
swamy2008algorithms
arxiv-3558
0805.0438
Network-based consensus averaging with general noisy channels
<|reference_start|>Network-based consensus averaging with general noisy channels: This paper focuses on the consensus averaging problem on graphs under general noisy channels. We study a particular class of distributed consensus algorithms based on damped updates, and using the ordinary differential equation method, we prove that the updates converge almost surely to exact consensus for finite variance noise. Our analysis applies to various types of stochastic disturbances, including errors in parameters, transmission noise, and quantization noise. Under a suitable stability condition, we prove that the error is asymptotically Gaussian, and we show how the asymptotic covariance is specified by the graph Laplacian. For additive parameter noise, we show how the scaling of the asymptotic MSE is controlled by the spectral gap of the Laplacian.<|reference_end|>
arxiv
@article{rajagopal2008network-based, title={Network-based consensus averaging with general noisy channels}, author={Ram Rajagopal, Martin J. Wainwright}, journal={arXiv preprint arXiv:0805.0438}, year={2008}, archivePrefix={arXiv}, eprint={0805.0438}, primaryClass={cs.IT math.IT} }
rajagopal2008network-based
arxiv-3559
0805.0444
Two-enqueuer queue in Common2
<|reference_start|>Two-enqueuer queue in Common2: The question of whether all shared objects with consensus number 2 belong to Common2, the set of objects that can be implemented in a wait-free manner by any type of consensus number 2, was first posed by Herlihy. In the absence of general results, several researchers have obtained implementations for restricted-concurrency versions of FIFO queues. We present the first Common2 algorithm for a queue with two enqueuers and any number of dequeuers.<|reference_end|>
arxiv
@article{eisenstat2008two-enqueuer, title={Two-enqueuer queue in Common2}, author={David Eisenstat}, journal={arXiv preprint arXiv:0805.0444}, year={2008}, archivePrefix={arXiv}, eprint={0805.0444}, primaryClass={cs.DC} }
eisenstat2008two-enqueuer
arxiv-3560
0805.0455
From a set of parts to an indivisible whole Part II: Operations in an open comparative mode
<|reference_start|>From a set of parts to an indivisible whole Part II: Operations in an open comparative mode: This paper describes a new method, HGV2C, for pattern analysis. The HGV2C method involves the construction of a computer ego (CE) based on an individual object that can be either a part of the system under analysis or a newly created object based on a certain hypothesis. The CE provides a capability to analyze data from a specific standpoint, e.g. from a viewpoint of a certain object. The CE is constructed from two identical copies of a query object, and its functioning mechanism involves: a hypothesis-parameter (HP) and infothyristor (IT). HP is a parameter that is introduced into an existing set of parameters. The HP value for one of the clones of a query object is set to equal 1, whereas for another clone it is greater than 1. The IT is based on the previously described algorithm of iterative averaging and performs three functions: 1) computation of a similarity matrix for the group of three objects including two clones of a query object and a target object; 2) division of the group into two alternative subgroups; and 3) a successive increase of the HP weight in the totality of all the parameters. Initially, both clones of the query object appear together in one of the subgroups as all of their parameter values, except the HP, are identical. At a certain point of the HP multiplication, one of the clones moves to the group of the target object. A respective number of the HP multiplications represents the dissimilarity (D) between the query and target objects. The product of D multiplied by the difference in HP values of the clones is strictly constant and linearly increases as the difference in HP values of the clones decreases. This new approach to knowledge representation is demonstrated on the example of population pyramids of 220 countries.<|reference_end|>
arxiv
@article{andreev2008from, title={From a set of parts to an indivisible whole. Part II: Operations in an open comparative mode}, author={Leonid Andreev}, journal={arXiv preprint arXiv:0805.0455}, year={2008}, archivePrefix={arXiv}, eprint={0805.0455}, primaryClass={cs.OH} }
andreev2008from
arxiv-3561
0805.0459
Phase transition in SONFIS&SORST
<|reference_start|>Phase transition in SONFIS&SORST: In this study, we introduce general frame of MAny Connected Intelligent Particles Systems (MACIPS). Connections and interconnections between particles get a complex behavior of such merely simple system (system in system).Contribution of natural computing, under information granulation theory, are the main topics of this spacious skeleton. Upon this clue, we organize two algorithms involved a few prominent intelligent computing and approximate reasoning methods: self organizing feature map (SOM), Neuro- Fuzzy Inference System and Rough Set Theory (RST). Over this, we show how our algorithms can be taken as a linkage of government-society interaction, where government catches various fashions of behavior: solid (absolute) or flexible. So, transition of such society, by changing of connectivity parameters (noise) from order to disorder is inferred. Add to this, one may find an indirect mapping among financial systems and eventual market fluctuations with MACIPS. Keywords: phase transition, SONFIS, SORST, many connected intelligent particles system, society-government interaction<|reference_end|>
arxiv
@article{owladeghaffari2008phase, title={Phase transition in SONFIS&SORST}, author={Hamed Owladeghaffari}, journal={arXiv preprint arXiv:0805.0459}, year={2008}, doi={10.1007/978-3-540-88425-5_35}, archivePrefix={arXiv}, eprint={0805.0459}, primaryClass={cs.AI} }
owladeghaffari2008phase
arxiv-3562
0805.0498
The Tractability of Model-Checking for LTL: The Good, the Bad, and the Ugly Fragments
<|reference_start|>The Tractability of Model-Checking for LTL: The Good, the Bad, and the Ugly Fragments: In a seminal paper from 1985, Sistla and Clarke showed that the model-checking problem for Linear Temporal Logic (LTL) is either NP-complete or PSPACE-complete, depending on the set of temporal operators used. If, in contrast, the set of propositional operators is restricted, the complexity may decrease. This paper systematically studies the model-checking problem for LTL formulae over restricted sets of propositional and temporal operators. For almost all combinations of temporal and propositional operators, we determine whether the model-checking problem is tractable (in P) or intractable (NP-hard). We then focus on the tractable cases, showing that they all are NL-complete or even logspace solvable. This leads to a surprising gap in complexity between tractable and intractable cases. It is worth noting that our analysis covers an infinite set of problems, since there are infinitely many sets of propositional operators.<|reference_end|>
arxiv
@article{bauland2008the, title={The Tractability of Model-Checking for LTL: The Good, the Bad, and the Ugly Fragments}, author={Michael Bauland (Knipp GmbH, Germany), Martin Mundhenk (Univ. Jena, Germany), Thomas Schneider (Univ. of Manchester, UK), Henning Schnoor (Univ. Kiel, Germany), Ilka Schnoor (Univ. Luebeck, Germany), Heribert Vollmer (Univ. Hannover, Germany)}, journal={arXiv preprint arXiv:0805.0498}, year={2008}, number={ECCC Report TR08-028}, archivePrefix={arXiv}, eprint={0805.0498}, primaryClass={cs.LO cs.CC} }
bauland2008the
arxiv-3563
0805.0501
Decoding Generalized Concatenated Codes Using Interleaved Reed-Solomon Codes
<|reference_start|>Decoding Generalized Concatenated Codes Using Interleaved Reed-Solomon Codes: Generalized Concatenated codes are a code construction consisting of a number of outer codes whose code symbols are protected by an inner code. As outer codes, we assume the most frequently used Reed-Solomon codes; as inner code, we assume some linear block code which can be decoded up to half its minimum distance. Decoding up to half the minimum distance of Generalized Concatenated codes is classically achieved by the Blokh-Zyablov-Dumer algorithm, which iteratively decodes by first using the inner decoder to get an estimate of the outer code words and then using an outer error/erasure decoder with a varying number of erasures determined by a set of pre-calculated thresholds. In this paper, a modified version of the Blokh-Zyablov-Dumer algorithm is proposed, which exploits the fact that a number of outer Reed-Solomon codes with average minimum distance d can be grouped into one single Interleaved Reed-Solomon code which can be decoded beyond d/2. This allows to skip a number of decoding iterations on the one hand and to reduce the complexity of each decoding iteration significantly - while maintaining the decoding performance - on the other.<|reference_end|>
arxiv
@article{senger2008decoding, title={Decoding Generalized Concatenated Codes Using Interleaved Reed-Solomon Codes}, author={Christian Senger, Vladimir Sidorenko, Martin Bossert, Victor Zyablov}, journal={arXiv preprint arXiv:0805.0501}, year={2008}, doi={10.1109/ISIT.2008.4595300}, archivePrefix={arXiv}, eprint={0805.0501}, primaryClass={cs.IT math.IT} }
senger2008decoding
arxiv-3564
0805.0507
Spread Codes and Spread Decoding in Network Coding
<|reference_start|>Spread Codes and Spread Decoding in Network Coding: In this paper we introduce the class of Spread Codes for the use in random network coding. Spread Codes are based on the construction of spreads in finite projective geometry. The major contribution of the paper is an efficient decoding algorithm of spread codes up to half the minimum distance.<|reference_end|>
arxiv
@article{manganiello2008spread, title={Spread Codes and Spread Decoding in Network Coding}, author={Felice Manganiello, Elisa Gorla and Joachim Rosenthal}, journal={Proceedings of the 2008 IEEE International Symposium on Information Theory, Toronto, ON, Canada, July 6 - 11, 2008}, year={2008}, doi={10.1109/ISIT.2008.4595113}, archivePrefix={arXiv}, eprint={0805.0507}, primaryClass={cs.IT math.IT} }
manganiello2008spread
arxiv-3565
0805.0510
Iterative Hard Thresholding for Compressed Sensing
<|reference_start|>Iterative Hard Thresholding for Compressed Sensing: Compressed sensing is a technique to sample compressible signals below the Nyquist rate, whilst still allowing near optimal reconstruction of the signal. In this paper we present a theoretical analysis of the iterative hard thresholding algorithm when applied to the compressed sensing recovery problem. We show that the algorithm has the following properties (made more precise in the main text of the paper) - It gives near-optimal error guarantees. - It is robust to observation noise. - It succeeds with a minimum number of observations. - It can be used with any sampling operator for which the operator and its adjoint can be computed. - The memory requirement is linear in the problem size. - Its computational complexity per iteration is of the same order as the application of the measurement operator or its adjoint. - It requires a fixed number of iterations depending only on the logarithm of a form of signal to noise ratio of the signal. - Its performance guarantees are uniform in that they only depend on properties of the sampling operator and signal sparsity.<|reference_end|>
arxiv
@article{blumensath2008iterative, title={Iterative Hard Thresholding for Compressed Sensing}, author={Thomas Blumensath and Mike E. Davies}, journal={arXiv preprint arXiv:0805.0510}, year={2008}, archivePrefix={arXiv}, eprint={0805.0510}, primaryClass={cs.IT cs.NA math.IT math.NA} }
blumensath2008iterative
arxiv-3566
0805.0514
Efficient recovering of operation tables of black box groups and rings
<|reference_start|>Efficient recovering of operation tables of black box groups and rings: People have been studying the following problem: Given a finite set S with a hidden (black box) binary operation * on S which might come from a group law, and suppose you have access to an oracle that you can ask for the operation x*y of single pairs (x,y) you choose. What is the minimal number of queries to the oracle until the whole binary operation is recovered, i.e. you know x*y for all x,y in S? This problem can trivially be solved by using |S|^2 queries to the oracle, so the question arises under which circumstances you can succeed with a significantly smaller number of queries. In this presentation we give a lower bound on the number of queries needed for general binary operations. On the other hand, we present algorithms solving this problem by using |S| queries, provided that * is an abelian group operation. We also investigate black box rings and give lower and upper bounds for the number of queries needed to solve product recovering in this case.<|reference_end|>
arxiv
@article{zumbragel2008efficient, title={Efficient recovering of operation tables of black box groups and rings}, author={Jens Zumbragel, Gerard Maze, Joachim Rosenthal}, journal={arXiv preprint arXiv:0805.0514}, year={2008}, archivePrefix={arXiv}, eprint={0805.0514}, primaryClass={cs.IT cs.DM math.GR math.IT} }
zumbragel2008efficient
arxiv-3567
0805.0516
The Gaussian MAC with Conferencing Encoders
<|reference_start|>The Gaussian MAC with Conferencing Encoders: We derive the capacity region of the Gaussian version of Willems's two-user MAC with conferencing encoders. This setting differs from the classical MAC in that, prior to each transmission block, the two transmitters can communicate with each other over noise-free bit-pipes of given capacities. The derivation requires a new technique for proving the optimality of Gaussian input distributions in certain mutual information maximizations under a Markov constraint. We also consider a Costa-type extension of the Gaussian MAC with conferencing encoders. In this extension, the channel can be described as a two-user MAC with Gaussian noise and Gaussian interference where the interference is known non-causally to the encoders but not to the decoder. We show that as in Costa's setting the interference sequence can be perfectly canceled, i.e., that the capacity region without interference can be achieved.<|reference_end|>
arxiv
@article{bross2008the, title={The Gaussian MAC with Conferencing Encoders}, author={Shraga I. Bross, Amos Lapidoth, Michele A. Wigger}, journal={arXiv preprint arXiv:0805.0516}, year={2008}, archivePrefix={arXiv}, eprint={0805.0516}, primaryClass={cs.IT math.IT} }
bross2008the
arxiv-3568
0805.0517
Analysis of the Deterministic Polynomial Time Solvability of the 0-1-Knapsack Problem
<|reference_start|>Analysis of the Deterministic Polynomial Time Solvability of the 0-1-Knapsack Problem: Previously the author has demonstrated that a representative polynomial search partition is required to solve a NP-complete problem in deterministic polynomial time. It has also been demonstrated that finding such a partition can only be done in deterministic polynomial time if the form of the problem provides a simple method for producing the partition. It is the purpose of this article to demonstrate that no deterministic polynomial time method exists to produce a representative polynomial search partition for the Knapsack problem.<|reference_end|>
arxiv
@article{meek2008analysis, title={Analysis of the Deterministic Polynomial Time Solvability of the 0-1-Knapsack Problem}, author={Jerrald Meek}, journal={arXiv preprint arXiv:0805.0517}, year={2008}, archivePrefix={arXiv}, eprint={0805.0517}, primaryClass={cs.CC} }
meek2008analysis
arxiv-3569
0805.0521
On the Capacity of Free-Space Optical Intensity Channels
<|reference_start|>On the Capacity of Free-Space Optical Intensity Channels: New upper and lower bounds are presented on the capacity of the free-space optical intensity channel. This channel is characterized by inputs that are nonnegative (representing the transmitted optical intensity) and by outputs that are corrupted by additive white Gaussian noise (because in free space the disturbances arise from many independent sources). Due to battery and safety reasons the inputs are simultaneously constrained in both their average and peak power. For a fixed ratio of the average power to the peak power the difference between the upper and the lower bounds tends to zero as the average power tends to infinity, and the ratio of the upper and lower bounds tends to one as the average power tends to zero. The case where only an average-power constraint is imposed on the input is treated separately. In this case, the difference of the upper and lower bound tends to 0 as the average power tends to infinity, and their ratio tends to a constant as the power tends to zero.<|reference_end|>
arxiv
@article{lapidoth2008on, title={On the Capacity of Free-Space Optical Intensity Channels}, author={Amos Lapidoth, Stefan M. Moser, Michele A. Wigger}, journal={arXiv preprint arXiv:0805.0521}, year={2008}, archivePrefix={arXiv}, eprint={0805.0521}, primaryClass={cs.IT math.IT} }
lapidoth2008on
arxiv-3570
0805.0541
Climate modification directed by control theory
<|reference_start|>Climate modification directed by control theory: Climate modification measures to counteract global warming receive some more new attentions in these years. Most current researches only discuss the impact of these measures to climate, but how to design such a climate regulator is still unknown. This paper shows the control theory could give the systematic direction for climate modification. But the control analyzing also reveals that climate modifications should only be regarded as a last-ditch measure.<|reference_end|>
arxiv
@article{liang2008climate, title={Climate modification directed by control theory}, author={Wang Liang}, journal={arXiv preprint arXiv:0805.0541}, year={2008}, archivePrefix={arXiv}, eprint={0805.0541}, primaryClass={cs.OH} }
liang2008climate
arxiv-3571
0805.0560
An Approach to Learning Research with a Wireless Sensor Network in an Outdoor Setting
<|reference_start|>An Approach to Learning Research with a Wireless Sensor Network in an Outdoor Setting: Automated collection of environmental data may be accomplished with wireless sensor networks (WSNs). In this paper, a general discussion of WSNs is given for the gathering of data for educational research. WSNs have the capability to enhance the scope of a researcher to include multiple streams of data: environmental, location, cyberdata, video, and RFID. The location of data stored in a database can allow reconstruction of the learning activity for the evaluation of significance at a later time. A brief overview of the technology forms the basis of an exploration of a setting used for outdoor learning.<|reference_end|>
arxiv
@article{anderson2008an, title={An Approach to Learning Research with a Wireless Sensor Network in an Outdoor Setting}, author={Tom Adam Frederic Anderson, Yean-Fu Wen}, journal={arXiv preprint arXiv:0805.0560}, year={2008}, archivePrefix={arXiv}, eprint={0805.0560}, primaryClass={cs.CY} }
anderson2008an
arxiv-3572
0805.0577
Infinity-Norm Sphere-Decoding
<|reference_start|>Infinity-Norm Sphere-Decoding: The most promising approaches for efficient detection in multiple-input multiple-output (MIMO) wireless systems are based on sphere-decoding (SD). The conventional (and optimum) norm that is used to conduct the tree traversal step in SD is the l-2 norm. It was, however, recently observed that using the l-infinity norm instead reduces the hardware complexity of SD considerably at only a marginal performance loss. These savings result from a reduction in the length of the critical path in the circuit and the silicon area required for metric computation, but are also, as observed previously through simulation results, a consequence of a reduction in the computational (i.e., algorithmic) complexity. The aim of this paper is an analytical performance and computational complexity analysis of l-infinity norm SD. For i.i.d. Rayleigh fading MIMO channels, we show that l-infinity norm SD achieves full diversity order with an asymptotic SNR gap, compared to l-2 norm SD, that increases at most linearly in the number of receive antennas. Moreover, we provide a closed-form expression for the computational complexity of l-infinity norm SD based on which we establish that its complexity scales exponentially in the system size. Finally, we characterize the tree pruning behavior of l-infinity norm SD and show that it behaves fundamentally different from that of l-2 norm SD.<|reference_end|>
arxiv
@article{seethaler2008infinity-norm, title={Infinity-Norm Sphere-Decoding}, author={Dominik Seethaler and Helmut B"olcskei}, journal={arXiv preprint arXiv:0805.0577}, year={2008}, archivePrefix={arXiv}, eprint={0805.0577}, primaryClass={cs.IT math.IT} }
seethaler2008infinity-norm
arxiv-3573
0805.0585
Discrete Mathematics for Computer Science, Some Notes
<|reference_start|>Discrete Mathematics for Computer Science, Some Notes: These are notes on discrete mathematics for computer scientists. The presentation is somewhat unconventional. Indeed I begin with a discussion of the basic rules of mathematical reasoning and of the notion of proof formalized in a natural deduction system ``a la Prawitz''. The rest of the material is more or less traditional but I emphasize partial functions more than usual (after all, programs may not terminate for all input) and I provide a fairly complete account of the basic concepts of graph theory.<|reference_end|>
arxiv
@article{gallier2008discrete, title={Discrete Mathematics for Computer Science, Some Notes}, author={Jean Gallier}, journal={arXiv preprint arXiv:0805.0585}, year={2008}, archivePrefix={arXiv}, eprint={0805.0585}, primaryClass={cs.DM cs.LO} }
gallier2008discrete
arxiv-3574
0805.0589
Cascaded Orthogonal Space-Time Block Codes for Wireless Multi-Hop Relay Networks
<|reference_start|>Cascaded Orthogonal Space-Time Block Codes for Wireless Multi-Hop Relay Networks: Distributed space-time block coding is a diversity technique to mitigate the effects of fading in multi-hop wireless networks, where multiple relay stages are used by a source to communicate with its destination. This paper proposes a new distributed space-time block code called the cascaded orthogonal space-time block code (COSTBC) for the case where the source and destination are equipped with multiple antennas and each relay stage has one or more single antenna relays. Each relay stage is assumed to have receive channel state information (CSI) for all the channels from the source to itself, while the destination is assumed to have receive CSI for all the channels. To construct the COSTBC, multiple orthogonal space-time block codes are used in cascade by the source and each relay stage. In the COSTBC, each relay stage separates the constellation symbols of the orthogonal space-time block code sent by the preceding relay stage using its CSI, and then transmits another orthogonal space-time block code to the next relay stage. COSTBCs are shown to achieve the maximum diversity gain in a multi-hop wireless network with flat Rayleigh fading channels. Several explicit constructions of COSTBCs are also provided for two-hop wireless networks with two and four source antennas and relay nodes. It is also shown that COSTBCs require minimum decoding complexity thanks to the connection to orthogonal space-time block codes.<|reference_end|>
arxiv
@article{vaze2008cascaded, title={Cascaded Orthogonal Space-Time Block Codes for Wireless Multi-Hop Relay Networks}, author={Rahul Vaze and Robert W. Heath Jr}, journal={arXiv preprint arXiv:0805.0589}, year={2008}, archivePrefix={arXiv}, eprint={0805.0589}, primaryClass={cs.IT math.IT} }
vaze2008cascaded
arxiv-3575
0805.0612
Upper bounds for alpha-domination parameters
<|reference_start|>Upper bounds for alpha-domination parameters: In this paper, we provide a new upper bound for the alpha-domination number. This result generalises the well-known Caro-Roditty bound for the domination number of a graph. The same probabilistic construction is used to generalise another well-known upper bound for the classical domination in graphs. We also prove similar upper bounds for the alpha-rate domination number, which combines the concepts of alpha-domination and k-tuple domination.<|reference_end|>
arxiv
@article{gagarin2008upper, title={Upper bounds for alpha-domination parameters}, author={Andrei Gagarin, Anush Poghosyan, and Vadim E. Zverovich}, journal={Graphs Combin. 25 (2009), no. 4, pp. 513-520}, year={2008}, doi={10.1007/s00373-009-0864-6}, archivePrefix={arXiv}, eprint={0805.0612}, primaryClass={math.CO cs.DM} }
gagarin2008upper
arxiv-3576
0805.0615
On Expanded Cyclic Codes
<|reference_start|>On Expanded Cyclic Codes: The paper has a threefold purpose. The first purpose is to present an explicit description of expanded cyclic codes defined in $\GF(q^m)$. The proposed explicit construction of expanded generator matrix and expanded parity check matrix maintains the symbol-wise algebraic structure and thus keeps many important original characteristics. The second purpose of this paper is to identify a class of constant-weight cyclic codes. Specifically, we show that a well-known class of $q$-ary BCH codes excluding the all-zero codeword are constant-weight cyclic codes. Moreover, we show this class of codes achieve the Plotkin bound. The last purpose of the paper is to characterize expanded cyclic codes utilizing the proposed expanded generator matrix and parity check matrix. We characterize the properties of component codewords of a codeword and particularly identify the precise conditions under which a codeword can be represented by a subbasis. Our developments reveal an alternative while more general view on the subspace subcodes of Reed-Solomon codes. With the new insights, we present an improved lower bound on the minimum distance of an expanded cyclic code by exploiting the generalized concatenated structure. We also show that the fixed-rate binary expanded Reed-Solomon codes are asymptotically "bad", in the sense that the ratio of minimum distance over code length diminishes with code length going to infinity. It overturns the prevalent conjecture that they are "good" codes and deviates from the ensemble of generalized Reed-Solomon codes which asymptotically achieves the Gilbert-Varshamov bound.<|reference_end|>
arxiv
@article{wu2008on, title={On Expanded Cyclic Codes}, author={Yingquan Wu}, journal={arXiv preprint arXiv:0805.0615}, year={2008}, archivePrefix={arXiv}, eprint={0805.0615}, primaryClass={cs.IT cs.CC math.IT math.RA} }
wu2008on
arxiv-3577
0805.0642
Order to Disorder Transitions in Hybrid Intelligent Systems: a Hatch to the Interactions of Nations -Governments
<|reference_start|>Order to Disorder Transitions in Hybrid Intelligent Systems: a Hatch to the Interactions of Nations -Governments: In this study, under general frame of MAny Connected Intelligent Particles Systems (MACIPS), we reproduce two new simple subsets of such intelligent complex network, namely hybrid intelligent systems, involved a few prominent intelligent computing and approximate reasoning methods: self organizing feature map (SOM), Neuro-Fuzzy Inference System and Rough Set Theory (RST). Over this, we show how our algorithms can be construed as a linkage of government-society interaction, where government catches various fashions of behavior: solid (absolute) or flexible. So, transition of such society, by changing of connectivity parameters (noise) from order to disorder is inferred. Add to this, one may find an indirect mapping among financial systems and eventual market fluctuations with MACIPS.<|reference_end|>
arxiv
@article{owladeghaffari2008order, title={Order to Disorder Transitions in Hybrid Intelligent Systems: a Hatch to the Interactions of Nations -Governments}, author={Hamed Owladeghaffari}, journal={arXiv preprint arXiv:0805.0642}, year={2008}, archivePrefix={arXiv}, eprint={0805.0642}, primaryClass={cs.AI cs.IT math.IT} }
owladeghaffari2008order
arxiv-3578
0805.0648
3D Building Model Fitting Using A New Kinetic Framework
<|reference_start|>3D Building Model Fitting Using A New Kinetic Framework: We describe a new approach to fit the polyhedron describing a 3D building model to the point cloud of a Digital Elevation Model (DEM). We introduce a new kinetic framework that hides to its user the combinatorial complexity of determining or maintaining the polyhedron topology, allowing the design of a simple variational optimization. This new kinetic framework allows the manipulation of a bounded polyhedron with simple faces by specifying the target plane equations of each of its faces. It proceeds by evolving continuously from the polyhedron defined by its initial topology and its initial plane equations to a polyhedron that is as topologically close as possible to the initial polyhedron but with the new plane equations. This kinetic framework handles internally the necessary topological changes that may be required to keep the faces simple and the polyhedron bounded. For each intermediate configurations where the polyhedron looses the simplicity of its faces or its boundedness, the simplest topological modification that is able to reestablish the simplicity and the boundedness is performed.<|reference_end|>
arxiv
@article{brédif20083d, title={3D Building Model Fitting Using A New Kinetic Framework}, author={Mathieu Br'edif, Dider Boldo, Marc Pierrot-Deseilligny, Henri Ma^itre}, journal={arXiv preprint arXiv:0805.0648}, year={2008}, archivePrefix={arXiv}, eprint={0805.0648}, primaryClass={cs.CG} }
brédif20083d
arxiv-3579
0805.0650
Plat_Forms -- a contest: The web development platform comparison
<|reference_start|>Plat_Forms -- a contest: The web development platform comparison: "Plat_Forms" is a competition in which top-class teams of three programmers compete to implement the same requirements for a web-based system within 30 hours, each team using a different technology platform (Java EE, .NET, PHP, Perl, Python, or Ruby on Rails). The results will provide new insights into the real (rather than purported) pros, cons, and emergent properties of each platform. The evaluation will analyze many aspects of each solution, both external (usability, functionality, reliability, performance, etc.) and internal (structure, understandability, flexibility, etc.).<|reference_end|>
arxiv
@article{prechelt2008plat_forms, title={Plat_Forms -- a contest: The web development platform comparison}, author={Lutz Prechelt}, journal={arXiv preprint arXiv:0805.0650}, year={2008}, number={Technical Report TR-B-06-11}, archivePrefix={arXiv}, eprint={0805.0650}, primaryClass={cs.SE} }
prechelt2008plat_forms
arxiv-3580
0805.0697
Stochastic Optimization Approaches for Solving Sudoku
<|reference_start|>Stochastic Optimization Approaches for Solving Sudoku: In this paper the Sudoku problem is solved using stochastic search techniques and these are: Cultural Genetic Algorithm (CGA), Repulsive Particle Swarm Optimization (RPSO), Quantum Simulated Annealing (QSA) and the Hybrid method that combines Genetic Algorithm with Simulated Annealing (HGASA). The results obtained show that the CGA, QSA and HGASA are able to solve the Sudoku puzzle with CGA finding a solution in 28 seconds, while QSA finding a solution in 65 seconds and HGASA in 1.447 seconds. This is mainly because HGASA combines the parallel searching of GA with the flexibility of SA. The RPSO was found to be unable to solve the puzzle.<|reference_end|>
arxiv
@article{perez2008stochastic, title={Stochastic Optimization Approaches for Solving Sudoku}, author={Meir Perez and Tshilidzi Marwala}, journal={arXiv preprint arXiv:0805.0697}, year={2008}, doi={10.1016/j.eswa.2012.04.019}, archivePrefix={arXiv}, eprint={0805.0697}, primaryClass={cs.NE} }
perez2008stochastic
arxiv-3581
0805.0730
Quasiperiodic and Lyndon episturmian words
<|reference_start|>Quasiperiodic and Lyndon episturmian words: Recently the second two authors characterized quasiperiodic Sturmian words, proving that a Sturmian word is non-quasiperiodic if and only if it is an infinite Lyndon word. Here we extend this study to episturmian words (a natural generalization of Sturmian words) by describing all the quasiperiods of an episturmian word, which yields a characterization of quasiperiodic episturmian words in terms of their "directive words". Even further, we establish a complete characterization of all episturmian words that are Lyndon words. Our main results show that, unlike the Sturmian case, there is a much wider class of episturmian words that are non-quasiperiodic, besides those that are infinite Lyndon words. Our key tools are morphisms and directive words, in particular "normalized" directive words, which we introduced in an earlier paper. Also of importance is the use of "return words" to characterize quasiperiodic episturmian words, since such a method could be useful in other contexts.<|reference_end|>
arxiv
@article{glen2008quasiperiodic, title={Quasiperiodic and Lyndon episturmian words}, author={Amy Glen, Florence Lev'e, Gw'ena"el Richomme}, journal={Theoretical Computer Science 409 (2008) 578-600}, year={2008}, doi={10.1016/j.tcs.2008.09.056}, archivePrefix={arXiv}, eprint={0805.0730}, primaryClass={math.CO cs.DM} }
glen2008quasiperiodic
arxiv-3582
0805.0740
Diversity-Integration Trade-offs in MIMO Detection
<|reference_start|>Diversity-Integration Trade-offs in MIMO Detection: In this work, a MIMO detection problem is considered. At first, we derive the Generalized Likelihood Ratio Test (GLRT) for arbitrary transmitted signals and arbitrary time-correlation of the disturbance. Then, we investigate design criteria for the transmitted waveforms in both power-unlimited and power-limited systems and we study the interplay among the rank of the optimized code matrix, the number of transmit diversity paths and the amount of energy integrated along each path. The results show that increasing the rank of the code matrix allows generating a larger number of diversity paths at the price of reducing the average signal-to-clutter level along each path.<|reference_end|>
arxiv
@article{de maio2008diversity-integration, title={Diversity-Integration Trade-offs in MIMO Detection}, author={Antonio De Maio, Marco Lops, Luca Venturino}, journal={IEEE Transactions on Signal Processing, Vol. 56, no. 10, pp. 5051-5061, October 2008}, year={2008}, doi={10.1109/TSP.2008.928693}, archivePrefix={arXiv}, eprint={0805.0740}, primaryClass={cs.OH cs.IT math.IT} }
de maio2008diversity-integration
arxiv-3583
0805.0747
Pruning Attribute Values From Data Cubes with Diamond Dicing
<|reference_start|>Pruning Attribute Values From Data Cubes with Diamond Dicing: Data stored in a data warehouse are inherently multidimensional, but most data-pruning techniques (such as iceberg and top-k queries) are unidimensional. However, analysts need to issue multidimensional queries. For example, an analyst may need to select not just the most profitable stores or--separately--the most profitable products, but simultaneous sets of stores and products fulfilling some profitability constraints. To fill this need, we propose a new operator, the diamond dice. Because of the interaction between dimensions, the computation of diamonds is challenging. We present the first diamond-dicing experiments on large data sets. Experiments show that we can compute diamond cubes over fact tables containing 100 million facts in less than 35 minutes using a standard PC.<|reference_end|>
arxiv
@article{webb2008pruning, title={Pruning Attribute Values From Data Cubes with Diamond Dicing}, author={Hazel Webb, Owen Kaser, Daniel Lemire}, journal={arXiv preprint arXiv:0805.0747}, year={2008}, number={TR-08-011 (UNB Saint John)}, archivePrefix={arXiv}, eprint={0805.0747}, primaryClass={cs.DB cs.DS} }
webb2008pruning
arxiv-3584
0805.0766
Sponsored Search Auctions with Markovian Users
<|reference_start|>Sponsored Search Auctions with Markovian Users: Sponsored search involves running an auction among advertisers who bid in order to have their ad shown next to search results for specific keywords. Currently, the most popular auction for sponsored search is the "Generalized Second Price" (GSP) auction in which advertisers are assigned to slots in the decreasing order of their "score," which is defined as the product of their bid and click-through rate. In the past few years, there has been significant research on the game-theoretic issues that arise in an advertiser's interaction with the mechanism as well as possible redesigns of the mechanism, but this ranking order has remained standard. From a search engine's perspective, the fundamental question is: what is the best assignment of advertisers to slots? Here "best" could mean "maximizing user satisfaction," "most efficient," "revenue-maximizing," "simplest to interact with," or a combination of these. To answer this question we need to understand the behavior of a search engine user when she sees the displayed ads, since that defines the commodity the advertisers are bidding on, and its value. Most prior work has assumed that the probability of a user clicking on an ad is independent of the other ads shown on the page. We propose a simple Markovian user model that does not make this assumption. We then present an algorithm to determine the most efficient assignment under this model, which turns out to be different than that of GSP. A truthful auction then follows from an application of the Vickrey-Clarke-Groves (VCG) mechanism. Further, we show that our assignment has many of the desirable properties of GSP that makes bidding intuitive. At the technical core of our result are a number of insights about the structure of the optimal assignment.<|reference_end|>
arxiv
@article{aggarwal2008sponsored, title={Sponsored Search Auctions with Markovian Users}, author={Gagan Aggarwal, Jon Feldman, S. Muthukrishnan and Martin Pal}, journal={arXiv preprint arXiv:0805.0766}, year={2008}, archivePrefix={arXiv}, eprint={0805.0766}, primaryClass={cs.GT} }
aggarwal2008sponsored
arxiv-3585
0805.0782
A new queueing strategy for the Adversarial Queueing Theory
<|reference_start|>A new queueing strategy for the Adversarial Queueing Theory: In the today's Internet and TCP/IP-networks, the queueing of packets is commonly implemented using the protocol FIFO (First In First Out). Unfortunately, FIFO performs poorly in the Adversarial Queueing Theory. Other queueing strategies are researched in this model and better results are performed by alternative queueing strategies, e.g. LIS (Longest In System). This article introduces a new queueing protocol called interval-strategy that is concerned with the reduction from dynamic to static routing. We discuss the maximum system time for a packet and estimate with up-to-date results how this can be achieved. We figure out the maximum amount of time where a packet can spend in the network (i.e. worst case system time), and argue that the universal instability of the presented interval-strategy can be reached through these results. When a large group of queueing strategies is used for queueing, we prove that the interval-strategy will be universally unstable. Finally, we calculate the maximum time of the static routing to reach an universal stable and polynomial - in detail linear - bounded interval-strategy. Afterwards we close - in order to check this upper bound - with up-to-date results about the delivery times in static routing.<|reference_end|>
arxiv
@article{hilker2008a, title={A new queueing strategy for the Adversarial Queueing Theory}, author={Michael Hilker and Christoph Schommer}, journal={Proceedings, IPSI-2005, December 2005, Bled, Slovenia}, year={2008}, archivePrefix={arXiv}, eprint={0805.0782}, primaryClass={cs.NI} }
hilker2008a
arxiv-3586
0805.0783
Relational Parametricity and Separation Logic
<|reference_start|>Relational Parametricity and Separation Logic: Separation logic is a recent extension of Hoare logic for reasoning about programs with references to shared mutable data structures. In this paper, we provide a new interpretation of the logic for a programming language with higher types. Our interpretation is based on Reynolds's relational parametricity, and it provides a formal connection between separation logic and data abstraction.<|reference_end|>
arxiv
@article{birkedal2008relational, title={Relational Parametricity and Separation Logic}, author={Lars Birkedal and Hongseok Yang}, journal={Logical Methods in Computer Science, Volume 4, Issue 2 (May 15, 2008) lmcs:825}, year={2008}, doi={10.2168/LMCS-4(2:6)2008}, archivePrefix={arXiv}, eprint={0805.0783}, primaryClass={cs.LO} }
birkedal2008relational
arxiv-3587
0805.0785
AGNOSCO - Identification of Infected Nodes with artificial Ant Colonies
<|reference_start|>AGNOSCO - Identification of Infected Nodes with artificial Ant Colonies: If a computer node is infected by a virus, worm or a backdoor, then this is a security risk for the complete network structure where the node is associated. Existing Network Intrusion Detection Systems (NIDS) provide a certain amount of support for the identification of such infected nodes but suffer from the need of plenty of communication and computational power. In this article, we present a novel approach called AGNOSCO to support the identification of infected nodes through the usage of artificial ant colonies. It is shown that AGNOSCO overcomes the communication and computational power problem while identifying infected nodes properly.<|reference_end|>
arxiv
@article{hilker2008agnosco, title={AGNOSCO - Identification of Infected Nodes with artificial Ant Colonies}, author={Michael Hilker and Christoph Schommer}, journal={Proceedings of the 6th International Conference on Recent Advances in Soft Computing (RASC2006), July 2006, Canterbury, United Kingdom}, year={2008}, archivePrefix={arXiv}, eprint={0805.0785}, primaryClass={cs.AI cs.MA} }
hilker2008agnosco
arxiv-3588
0805.0802
An Information-Theoretical View of Network-Aware Malware Attacks
<|reference_start|>An Information-Theoretical View of Network-Aware Malware Attacks: This work investigates three aspects: (a) a network vulnerability as the non-uniform vulnerable-host distribution, (b) threats, i.e., intelligent malwares that exploit such a vulnerability, and (c) defense, i.e., challenges for fighting the threats. We first study five large data sets and observe consistent clustered vulnerable-host distributions. We then present a new metric, referred to as the non-uniformity factor, which quantifies the unevenness of a vulnerable-host distribution. This metric is essentially the Renyi information entropy and better characterizes the non-uniformity of a distribution than the Shannon entropy. Next, we analyze the propagation speed of network-aware malwares in view of information theory. In particular, we draw a relationship between Renyi entropies and randomized epidemic malware-scanning algorithms. We find that the infection rates of malware-scanning methods are characterized by the Renyi entropies that relate to the information bits in a non-unform vulnerable-host distribution extracted by a randomized scanning algorithm. Meanwhile, we show that a representative network-aware malware can increase the spreading speed by exactly or nearly a non-uniformity factor when compared to a random-scanning malware at an early stage of malware propagation. This quantifies that how much more rapidly the Internet can be infected at the early stage when a malware exploits an uneven vulnerable-host distribution as a network-wide vulnerability. Furthermore, we analyze the effectiveness of defense strategies on the spread of network-aware malwares. Our results demonstrate that counteracting network-aware malwares is a significant challenge for the strategies that include host-based defense and IPv6.<|reference_end|>
arxiv
@article{chen2008an, title={An Information-Theoretical View of Network-Aware Malware Attacks}, author={Zesheng Chen and Chuanyi Ji}, journal={arXiv preprint arXiv:0805.0802}, year={2008}, archivePrefix={arXiv}, eprint={0805.0802}, primaryClass={cs.CR cs.IT cs.NI math.IT} }
chen2008an
arxiv-3589
0805.0845
Presentation of a Game Semantics for First-Order Propositional Logic
<|reference_start|>Presentation of a Game Semantics for First-Order Propositional Logic: Game semantics aim at describing the interactive behaviour of proofs by interpreting formulas as games on which proofs induce strategies. In this article, we introduce a game semantics for a fragment of first order propositional logic. One of the main difficulties that has to be faced when constructing such semantics is to make them precise by characterizing definable strategies - that is strategies which actually behave like a proof. This characterization is usually done by restricting to the model to strategies satisfying subtle combinatory conditions such as innocence, whose preservation under composition is often difficult to show. Here, we present an original methodology to achieve this task which requires to combine tools from game semantics, rewriting theory and categorical algebra. We introduce a diagrammatic presentation of definable strategies by the means of generators and relations: those strategies can be generated from a finite set of ``atomic'' strategies and that the equality between strategies generated in such a way admits a finite axiomatization. These generators satisfy laws which are a variation of bialgebras laws, thus bridging algebra and denotational semantics in a clean and unexpected way.<|reference_end|>
arxiv
@article{mimram2008presentation, title={Presentation of a Game Semantics for First-Order Propositional Logic}, author={Samuel Mimram (PPS)}, journal={arXiv preprint arXiv:0805.0845}, year={2008}, archivePrefix={arXiv}, eprint={0805.0845}, primaryClass={cs.LO math.CT math.LO} }
mimram2008presentation
arxiv-3590
0805.0849
SANA - Network Protection through artificial Immunity
<|reference_start|>SANA - Network Protection through artificial Immunity: Current network protection systems use a collection of intelligent components - e.g. classifiers or rule-based firewall systems to detect intrusions and anomalies and to secure a network against viruses, worms, or trojans. However, these network systems rely on individuality and support an architecture with less collaborative work of the protection components. They give less administration support for maintenance, but offer a large number of individual single points of failures - an ideal situation for network attacks to succeed. In this work, we discuss the required features, the performance, and the problems of a distributed protection system called SANA. It consists of a cooperative architecture, it is motivated by the human immune system, where the components correspond to artificial immune cells that are connected for their collaborative work. SANA promises a better protection against intruders than common known protection systems through an adaptive self-management while keeping the resources efficiently by an intelligent reduction of redundant tasks. We introduce a library of several novel and common used protection components and evaluate the performance of SANA by a proof-of-concept implementation.<|reference_end|>
arxiv
@article{hilker2008sana, title={SANA - Network Protection through artificial Immunity}, author={Michael Hilker and Christoph Schommer}, journal={Proceedings of the 2nd International Workshop on Theory of Computer Viruses (TCV 2007), May 2007, Nancy, France}, year={2008}, archivePrefix={arXiv}, eprint={0805.0849}, primaryClass={cs.CR cs.MA} }
hilker2008sana
arxiv-3591
0805.0850
Service Oriented Architecture in Network Security - a novel Organisation in Security Systems
<|reference_start|>Service Oriented Architecture in Network Security - a novel Organisation in Security Systems: Current network security systems are a collection of various security components, which are directly installed in the operating system. These check the whole node for suspicious behaviour. Armouring intrusions e.g. have the ability to hide themselves from being checked. We present in this paper an alternative organisation of security systems. The node is completely virtualized with current virtualization systems so that the operating system with applications and the security system is distinguished. The security system then checks the node from outside and the right security components are provided through a service oriented architecture. Due to the running in a virtual machine, the infected nodes can be halted, duplicated, and moved to other nodes for further analysis and legal aspects. This organisation is in this article analysed and a preliminary implementation showing promising results are discussed.<|reference_end|>
arxiv
@article{hilker2008service, title={Service Oriented Architecture in Network Security - a novel Organisation in Security Systems}, author={Michael Hilker and Christoph Schommer}, journal={Proceedings of the 3rd International Workshop on Theory of Computer Viruses (TCV 2008), May 2008, Nancy, France}, year={2008}, archivePrefix={arXiv}, eprint={0805.0850}, primaryClass={cs.CR cs.DC} }
hilker2008service
arxiv-3592
0805.0851
Bounds for self-stabilization in unidirectional networks
<|reference_start|>Bounds for self-stabilization in unidirectional networks: A distributed algorithm is self-stabilizing if after faults and attacks hit the system and place it in some arbitrary global state, the systems recovers from this catastrophic situation without external intervention in finite time. Unidirectional networks preclude many common techniques in self-stabilization from being used, such as preserving local predicates. In this paper, we investigate the intrinsic complexity of achieving self-stabilization in unidirectional networks, and focus on the classical vertex coloring problem. When deterministic solutions are considered, we prove a lower bound of $n$ states per process (where $n$ is the network size) and a recovery time of at least $n(n-1)/2$ actions in total. We present a deterministic algorithm with matching upper bounds that performs in arbitrary graphs. When probabilistic solutions are considered, we observe that at least $\Delta + 1$ states per process and a recovery time of $\Omega(n)$ actions in total are required (where $\Delta$ denotes the maximal degree of the underlying simple undirected graph). We present a probabilistically self-stabilizing algorithm that uses $\mathtt{k}$ states per process, where $\mathtt{k}$ is a parameter of the algorithm. When $\mathtt{k}=\Delta+1$, the algorithm recovers in expected $O(\Delta n)$ actions. When $\mathtt{k}$ may grow arbitrarily, the algorithm recovers in expected O(n) actions in total. Thus, our algorithm can be made optimal with respect to space or time complexity.<|reference_end|>
arxiv
@article{bernard2008bounds, title={Bounds for self-stabilization in unidirectional networks}, author={Samuel Bernard (LIP6), St'ephane Devismes (LRI), Maria Gradinariu Potop-Butucaru (LIP6, INRIA Rocquencourt), S'ebastien Tixeuil (LIP6)}, journal={arXiv preprint arXiv:0805.0851}, year={2008}, number={RR-6524}, archivePrefix={arXiv}, eprint={0805.0851}, primaryClass={cs.DS cs.CC cs.DC cs.NI} }
bernard2008bounds
arxiv-3593
0805.0854
Sub-$\mu$ structured Lotus Surfaces Manufacturing
<|reference_start|>Sub-$\mu$ structured Lotus Surfaces Manufacturing: Sub-micro structured surfaces allow modifying the behavior of polymer films or components. Especially in micro fluidics a lotus-like characteristic is requested for many applications. Structure details with a high aspect ratio are necessary to decouple the bottom and the top of the functional layer. Unlike to stochastic methods, patterning with a LIGA-mold insert it is possible to structure surfaces very uniformly or even with controlled variations (e.g. with gradients). In this paper we present the process chain to realize polymer sub-micro structures with minimum lateral feature size of 400 nm and up to 4 micrometers high.<|reference_end|>
arxiv
@article{worgull2008sub-$\mu$, title={Sub-$\mu$ structured Lotus Surfaces Manufacturing}, author={M. Worgull, M. Heckele, T. Mappes, B. Matthis, G. Tosello, T. Metz, J. Gavillet (LETI), P. Koltay, H. N. Hansen}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0854}, primaryClass={cs.OH} }
worgull2008sub-$\mu$
arxiv-3594
0805.0855
Linear and Non Linear Behaviour of Mechanical Resonators for Optimized Inertial Electromagnetic Microgenerators
<|reference_start|>Linear and Non Linear Behaviour of Mechanical Resonators for Optimized Inertial Electromagnetic Microgenerators: The need for wearable or abandoned microsystems, as well as the trend to a lower power consumption of electronic devices, make miniaturized renewable energy generators a viable alternative to batteries. Among the different alternatives, an interesting option is the use of inertial microgenerators for energy scavenging from vibrations present in the environment. These devices constitute perpetual energy sources without the need for refilling, thus being well suited for abandoned sensors, wireless systems or microsystems which must be embedded within the structure, without outside physical connections. Different electromagnetic energy scavenging devices have been described in the literature [1,2,3], based on the use of a velocity damped resonator, which is well suited for harvesting of vibrational energy induced by the operation of machines. These vibrations are characterized by a well defined frequency (in the range between few Hz's and few kHz's) and low displacement amplitudes. Adjusting the resonant frequency of the system to that of the vibrations allows amplification of these low amplitude displacements. Moreover, for these applications, the use of an electromagnetic device has the potential advantages of a good level of compatibility with Si Microsystem technology, as well as the possibility of relatively high electromechanical coupling with simple designs.<|reference_end|>
arxiv
@article{serre2008linear, title={Linear and Non Linear Behaviour of Mechanical Resonators for Optimized Inertial Electromagnetic Microgenerators}, author={C. Serre, A. P'erez-Rodrigueza, N. Fondevilla, E. Martincic (IEF), J.R. Morante, J. Montserrat, J. Esteve}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0855}, primaryClass={cs.OH} }
serre2008linear
arxiv-3595
0805.0856
Design And Fabrication of Condenser Microphone Using Wafer Transfer And Micro-electroplating Technique
<|reference_start|>Design And Fabrication of Condenser Microphone Using Wafer Transfer And Micro-electroplating Technique: A novel fabrication process, which uses wafer transfer and micro-electroplating technique, has been proposed and tested. In this paper, the effects of the diaphragm thickness and stress, the air-gap thickness, and the area ratio of acoustic holes to backplate on the sensitivity of the condenser microphone have been demonstrated since the performance of the microphone depends on these parameters. The microphone diaphragm has been designed with a diameter and thickness of 1.9 mm and 0.6 $\mu$m, respectively, an air-gap thickness of 10 $\mu$m, and a 24% area ratio of acoustic holes to backplate. To obtain a lower initial stress, the material used for the diaphragm is polyimide. The measured sensitivities of the microphone at the bias voltages of 24 V and 12 V are -45.3 and -50.2 dB/Pa (at 1 kHz), respectively. The fabricated microphone shows a flat frequency response extending to 20 kHz.<|reference_end|>
arxiv
@article{shu2008design, title={Design And Fabrication of Condenser Microphone Using Wafer Transfer And Micro-electroplating Technique}, author={Zhen-Zhun Shu, Ming-Li Ke, Guan-Wei Chen, Ray Hua Horng, Chao-Chih Chang, Jean-Yih Tsai, Chung-Ching Lai, Ji-Liang Chen}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0856}, primaryClass={cs.OH} }
shu2008design
arxiv-3596
0805.0857
Porous Alumina Based Capacitive MEMS RH Sensor
<|reference_start|>Porous Alumina Based Capacitive MEMS RH Sensor: The aim of a joint research and development project at the BME and HWU is to produce a cheap, reliable, low-power and CMOS-MEMS process compatible capacitive type relative humidity (RH) sensor that can be incorporated into a state-of-the-art, wireless sensor network. In this paper we discuss the preparation of our new capacitive structure based on post-CMOS MEMS processes and the methods which were used to characterize the thin film porous alumina sensing layer. The average sensitivity is approx. 15 pF/RH% which is more than a magnitude higher than the values found in the literature. The sensor is equipped with integrated resistive heating, which can be used for maintenance to reduce drift, or for keeping the sensing layer at elevated temperature, as an alternative method for temperature-dependence cancellation.<|reference_end|>
arxiv
@article{juhasz2008porous, title={Porous Alumina Based Capacitive MEMS RH Sensor}, author={L. Juhasz, A. Vass-Varnai, Veronika Timar-Horvath, Marc Desmulliez, Resh Dhariwal}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0857}, primaryClass={cs.OH} }
juhasz2008porous
arxiv-3597
0805.0858
Integrated RF MEMS/CMOS Devices
<|reference_start|>Integrated RF MEMS/CMOS Devices: A maskless post-processing technique for CMOS chips is developed that enables the fabrication of RF MEMS parallel-plate capacitors with a high quality factor and a very compact size. Simulations and measured results are presented for several MEMS/CMOS capacitors. A 2-pole coupled line tunable bandpass filter with a center frequency of 9.5 GHz is designed, fabricated and tested. A tuning range of 17% is achieved using integrated variable MEMS/CMOS capacitors with a quality factor exceeding 20. The tunable filter occupies a chip area of 1.2 x 2.1 mm2.<|reference_end|>
arxiv
@article{mansour2008integrated, title={Integrated RF MEMS/CMOS Devices}, author={R. R. Mansour, S. Fouladi, M. Bakeri-Kassem}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0858}, primaryClass={cs.OH} }
mansour2008integrated
arxiv-3598
0805.0859
Design Methodology and Manufacture of a Microinductor
<|reference_start|>Design Methodology and Manufacture of a Microinductor: Potential core materials to supersede ferrite in the 0.5-10 MHz frequency range are investigated. The performance of electrodeposited nickel-iron, cobalt-iron-copper alloys and the commercial alloy Vitrovac 6025 have been assessed through their inclusion within a custom-made solenoid microinductor. Although the present inductor, at 500 KHz, achieves 77% power efficiency for 24.7W/cm3 power density, an optimized process predicts a power efficiency of 97% for 30.83W/cm3 power density. The principle issues regarding microinductor design and performance are discussed.<|reference_end|>
arxiv
@article{flynn2008design, title={Design Methodology and Manufacture of a Microinductor}, author={D. Flynn, Marc Desmulliez}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0859}, primaryClass={cs.OH} }
flynn2008design
arxiv-3599
0805.0860
Megasonic Enhanced Electrodeposition
<|reference_start|>Megasonic Enhanced Electrodeposition: A novel way of filling high aspect ratio vertical interconnection (microvias) with an aspect ratio of >2:1 is presented. High frequency acoustic streaming at megasonic frequencies enables the decrease of the Nernst-diffusion layer down to the sub-micron range, allowing thereby conformal electrodeposition in deep grooves. Higher throughput and better control over the deposition properties are possible for the manufacturing of interconnections and metal-based MEMS.<|reference_end|>
arxiv
@article{kaufmann2008megasonic, title={Megasonic Enhanced Electrodeposition}, author={Jens Georg Kaufmann, Marc Desmulliez, D. Price}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0860}, primaryClass={cs.OH} }
kaufmann2008megasonic
arxiv-3600
0805.0861
UV Direct-Writing of Metals on Polyimide
<|reference_start|>UV Direct-Writing of Metals on Polyimide: Conductive micro-patterned copper tracks were fabricated by UV direct-writing of a nanoparticle silver seed layer followed by selective electroless copper deposition. Silver ions were first incorporated into a hydrolyzed polyimide surface layer by wet chemical treatment. A photoreactive polymer coating, methoxy poly(ethylene glycol) (MPEG) was coated on top of the substrate prior to UV irradiation. Electrons released through the interaction between the MPEG molecules and UV photons allowed the reduction of the silver ions across the MPEG/doped polyimide interface. The resultant silver seed layer has a cluster morphology which is suitable for the initiation of electroless plating. Initial results showed that the deposited copper tracks were in good agreement with the track width on the photomask and laser direct-writing can also fabricate smaller line width metal tracks with good accuracy. The facile fabrication presented here can be carried out in air, at atmospheric pressure, and on contoured surfaces.<|reference_end|>
arxiv
@article{ng2008uv, title={UV Direct-Writing of Metals on Polyimide}, author={Jack Hoyd-Gigg Ng, Marc Desmulliez, Aongus Mccarthy, Himanshu Suyal, Kevin Prior, Duncan P. Hand}, journal={Dans Symposium on Design, Test, Integration and Packaging of MEMS/MOEMS - DTIP 2008, Nice : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0805.0861}, primaryClass={cs.OH} }
ng2008uv