corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-675101
cs/0611054
How Random is a Coin Toss? Bayesian Inference and the Symbolic Dynamics of Deterministic Chaos
<|reference_start|>How Random is a Coin Toss? Bayesian Inference and the Symbolic Dynamics of Deterministic Chaos: Symbolic dynamics has proven to be an invaluable tool in analyzing the mechanisms that lead to unpredictability and random behavior in nonlinear dynamical systems. Surprisingly, a discrete partition of continuous state space can produce a coarse-grained description of the behavior that accurately describes the invariant properties of an underlying chaotic attractor. In particular, measures of the rate of information production--the topological and metric entropy rates--can be estimated from the outputs of Markov or generating partitions. Here we develop Bayesian inference for k-th order Markov chains as a method to finding generating partitions and estimating entropy rates from finite samples of discretized data produced by coarse-grained dynamical systems.<|reference_end|>
arxiv
@article{strelioff2006how, title={How Random is a Coin Toss? Bayesian Inference and the Symbolic Dynamics of Deterministic Chaos}, author={Christopher C. Strelioff and James P. Crutchfield}, journal={arXiv preprint arXiv:cs/0611054}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611054}, primaryClass={cs.LG cs.IT math.IT nlin.CD} }
strelioff2006how
arxiv-675102
cs/0611055
A Low-Footprint Class Loading Mechanism for Embedded Java Virtual Machines
<|reference_start|>A Low-Footprint Class Loading Mechanism for Embedded Java Virtual Machines: This paper shows that it is possible to dramatically reduce the memory consumption of classes loaded in an embedded Java virtual machine without reducing its functionalities. We describe how to pack the constant pool by deleting entries which are only used during the class loading process. We present some benchmarks which demonstrate the efficiency of this mechanism. We finally suggest some additional optimizations which can be applied if some restrictions to the functionalities of the virtual machine can be tolerated.<|reference_end|>
arxiv
@article{rippert2006a, title={A Low-Footprint Class Loading Mechanism for Embedded Java Virtual Machines}, author={Christophe Rippert (INRIA Futurs), Alexandre Courbot (INRIA Futurs), Gilles Grimaud (INRIA Futurs, LIFL)}, journal={Dans 3rd ACM International Conference on the Principles and Practice of Programming in Java (2004)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611055}, primaryClass={cs.OS} }
rippert2006a
arxiv-675103
cs/0611056
Using Existing Network Simulators for Power-Aware Self-Organizing Wireless Sensor Network Protocols
<|reference_start|>Using Existing Network Simulators for Power-Aware Self-Organizing Wireless Sensor Network Protocols: In this document, we compare three existing simulation platforms (OPNET Modeler, Network Simulator 2, Georgia Tech Sensor Network Simulator). Our comparative study focuses on ease of use, scalability, ease of implementing power consumption model and physical layer modeling accuracy, mainly. Conclusions of this study are presented, and will help us decide which simulating environment to use for evaluating power-aware self-organizing sensor networks protocols.<|reference_end|>
arxiv
@article{watteyne2006using, title={Using Existing Network Simulators for Power-Aware Self-Organizing Wireless Sensor Network Protocols}, author={Thomas Watteyne (INRIA Rh^one-Alpes)}, journal={arXiv preprint arXiv:cs/0611056}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611056}, primaryClass={cs.NI} }
watteyne2006using
arxiv-675104
cs/0611057
Formalising Sylow's theorems in Coq
<|reference_start|>Formalising Sylow's theorems in Coq: This report presents a formalisation of Sylow's theorems done in {\sc Coq}. The formalisation has been done in a couple of weeks on top of Georges Gonthier's {\sc ssreflect} \cite{ssreflect}. There were two ideas behind formalising Sylow's theorems. The first one was to get familiar with Georges way of doing proofs. The second one was to contribute to the collective effort to formalise a large subset of group theory in {\sc Coq} with some non-trivial proofs.}<|reference_end|>
arxiv
@article{thery2006formalising, title={Formalising Sylow's theorems in Coq}, author={Laurent Thery (INRIA Sophia Antipolis)}, journal={arXiv preprint arXiv:cs/0611057}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611057}, primaryClass={cs.LO} }
thery2006formalising
arxiv-675105
cs/0611058
Advances in Self Organising Maps
<|reference_start|>Advances in Self Organising Maps: The Self-Organizing Map (SOM) with its related extensions is the most popular artificial neural algorithm for use in unsupervised learning, clustering, classification and data visualization. Over 5,000 publications have been reported in the open literature, and many commercial projects employ the SOM as a tool for solving hard real-world problems. Each two years, the "Workshop on Self-Organizing Maps" (WSOM) covers the new developments in the field. The WSOM series of conferences was initiated in 1997 by Prof. Teuvo Kohonen, and has been successfully organized in 1997 and 1999 by the Helsinki University of Technology, in 2001 by the University of Lincolnshire and Humberside, and in 2003 by the Kyushu Institute of Technology. The Universit\'{e} Paris I Panth\'{e}on Sorbonne (SAMOS-MATISSE research centre) organized WSOM 2005 in Paris on September 5-8, 2005.<|reference_end|>
arxiv
@article{cottrell2006advances, title={Advances in Self Organising Maps}, author={Marie Cottrell (CES, SAMOS), Michel Verleysen (DICE)}, journal={Neural Networks Volume 19, Issues 6-7 (2006) 721-722}, year={2006}, doi={10.1016/j.neunet.2006.05.011}, archivePrefix={arXiv}, eprint={cs/0611058}, primaryClass={cs.NE math.ST nlin.AO stat.TH} }
cottrell2006advances
arxiv-675106
cs/0611059
Is the cyclic prefix necessary?
<|reference_start|>Is the cyclic prefix necessary?: We show that one can do away with the cyclic prefix (CP) for SC-FDE and OFDM at the cost of a moderate increase in the complexity of a DFT-based receiver. Such an approach effectively deals with the decrease in the number of channel uses due to the introduction of the CP. It is shown that the SINR for SC-FDE remains the same asymptotically with the proposed receiver without CP as that of the conventional receiver with CP. The results are shown for $N_t$ transmit antennas and $N_r$ receive antennas where $N_r \geq N_t$.<|reference_end|>
arxiv
@article{sharma2006is, title={Is the cyclic prefix necessary?}, author={Naresh Sharma and Ashok Armen Tikku}, journal={arXiv preprint arXiv:cs/0611059}, year={2006}, doi={10.1109/ISIT.2006.261688}, archivePrefix={arXiv}, eprint={cs/0611059}, primaryClass={cs.IT math.IT} }
sharma2006is
arxiv-675107
cs/0611060
The effect of 'Open Access' upon citation impact: An analysis of ArXiv's Condensed Matter Section
<|reference_start|>The effect of 'Open Access' upon citation impact: An analysis of ArXiv's Condensed Matter Section: This article statistically analyses how the citation impact of articles deposited in the Condensed Matter section of the preprint server ArXiv (hosted by Cornell University), and subsequently published in a scientific journal, compares to that of articles in the same journal that were not deposited in that archive. Its principal aim is to further illustrate and roughly estimate the effect of two factors, 'early view' and 'quality bias', upon differences in citation impact between these two sets of papers, using citation data from Thomson Scientific's Web of Science. It presents estimates for a number of journals in the field of condensed matter physics. In order to discriminate between an 'open access' effect and an early view effect, longitudinal citation data was analysed covering a time period as long as 7 years. Quality bias was measured by calculating ArXiv citation impact differentials at the level of individual authors publishing in a journal, taking into account co-authorship. The analysis provided evidence of a strong quality bias and early view effect. Correcting for these effects, there is in a sample of 6 condensed matter physics journals studied in detail, no sign of a general 'open access advantage' of papers deposited in ArXiv. The study does provide evidence that ArXiv accelerates citation, due to the fact that that ArXiv makes papers earlier available rather than that it makes papers freely available.<|reference_end|>
arxiv
@article{moed2006the, title={The effect of 'Open Access' upon citation impact: An analysis of ArXiv's Condensed Matter Section}, author={Henk F. Moed}, journal={arXiv preprint arXiv:cs/0611060}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611060}, primaryClass={cs.DL cs.IR physics.soc-ph} }
moed2006the
arxiv-675108
cs/0611061
Multivariate Integral Perturbation Techniques - I (Theory)
<|reference_start|>Multivariate Integral Perturbation Techniques - I (Theory): We present a quasi-analytic perturbation expansion for multivariate N-dimensional Gaussian integrals. The perturbation expansion is an infinite series of lower-dimensional integrals (one-dimensional in the simplest approximation). This perturbative idea can also be applied to multivariate Student-t integrals. We evaluate the perturbation expansion explicitly through 2nd order, and discuss the convergence, including enhancement using Pade approximants. Brief comments on potential applications in finance are given, including options, models for credit risk and derivatives, and correlation sensitivities.<|reference_end|>
arxiv
@article{dash2006multivariate, title={Multivariate Integral Perturbation Techniques - I (Theory)}, author={Jan W. Dash}, journal={arXiv preprint arXiv:cs/0611061}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611061}, primaryClass={cs.CE cs.NA} }
dash2006multivariate
arxiv-675109
cs/0611062
A framework for compositional verification of security protocols
<|reference_start|>A framework for compositional verification of security protocols: Automatic security protocol analysis is currently feasible only for small protocols. Since larger protocols quite often are composed of many small protocols, compositional analysis is an attractive, but non-trivial approach. We have developed a framework for compositional analysis of a large class of security protocols. The framework is intended to facilitate automatic as well as manual verification of large structured security protocols. Our approach is to verify properties of component protocols in a multi-protocol environment, then deduce properties about the composed protocol. To reduce the complexity of multi-protocol verification, we introduce a notion of protocol independence and prove a number of theorems that enable analysis of independent component protocols in isolation. To illustrate the applicability of our framework to real-world protocols, we study a key establishment sequence in WiMax consisting of three subprotocols. Except for a small amount of trivial reasoning, the analysis is done using automatic tools.<|reference_end|>
arxiv
@article{andova2006a, title={A framework for compositional verification of security protocols}, author={Suzana Andova, Cas Cremers, Kristian Gjosteen, Sjouke Mauw, Stig F. Mjolsnes, Sasa Radomirovic}, journal={arXiv preprint arXiv:cs/0611062}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611062}, primaryClass={cs.CR} }
andova2006a
arxiv-675110
cs/0611063
Characterizing Optimal Adword Auctions
<|reference_start|>Characterizing Optimal Adword Auctions: We present a number of models for the adword auctions used for pricing advertising slots on search engines such as Google, Yahoo! etc. We begin with a general problem formulation which allows the privately known valuation per click to be a function of both the identity of the advertiser and the slot. We present a compact characterization of the set of all deterministic incentive compatible direct mechanisms for this model. This new characterization allows us to conclude that there are incentive compatible mechanisms for this auction with a multi-dimensional type-space that are {\em not} affine maximizers. Next, we discuss two interesting special cases: slot independent valuation and slot independent valuation up to a privately known slot and zero thereafter. For both of these special cases, we characterize revenue maximizing and efficiency maximizing mechanisms and show that these mechanisms can be computed with a worst case computational complexity $O(n^2m^2)$ and $O(n^2m^3)$ respectively, where $n$ is number of bidders and $m$ is number of slots. Next, we characterize optimal rank based allocation rules and propose a new mechanism that we call the customized rank based allocation. We report the results of a numerical study that compare the revenue and efficiency of the proposed mechanisms. The numerical results suggest that customized rank-based allocation rule is significantly superior to the rank-based allocation rules.<|reference_end|>
arxiv
@article{iyengar2006characterizing, title={Characterizing Optimal Adword Auctions}, author={Garud Iyengar and Anuj Kumar}, journal={arXiv preprint arXiv:cs/0611063}, year={2006}, number={CORC Technical Report TR-2006-04 at Computational Optimization Research Center at Columbia University}, archivePrefix={arXiv}, eprint={cs/0611063}, primaryClass={cs.GT} }
iyengar2006characterizing
arxiv-675111
cs/0611064
Distributed Link Scheduling with Constant Overhead
<|reference_start|>Distributed Link Scheduling with Constant Overhead: This paper proposes a new class of simple, distributed algorithms for scheduling in wireless networks. The algorithms generate new schedules in a distributed manner via simple local changes to existing schedules. The class is parameterized by integers $k\geq 1$. We show that algorithm $k$ of our class achieves $k/(k+2)$ of the capacity region, for every $k\geq 1$. The algorithms have small and constant worst-case overheads: in particular, algorithm $k$ generates a new schedule using {\em (a)} time less than $4k+2$ round-trip times between neighboring nodes in the network, and {\em (b)} at most three control transmissions by any given node, for any $k$. The control signals are explicitly specified, and face the same interference effects as normal data transmissions. Our class of distributed wireless scheduling algorithms are the first ones guaranteed to achieve any fixed fraction of the capacity region while using small and constant overheads that do not scale with network size. The parameter $k$ explicitly captures the tradeoff between control overhead and scheduler throughput performance and provides a tuning knob protocol designers can use to harness this trade-off in practice.<|reference_end|>
arxiv
@article{sanghavi2006distributed, title={Distributed Link Scheduling with Constant Overhead}, author={Sujay Sanghavi, Loc Bui, R. Srikant}, journal={arXiv preprint arXiv:cs/0611064}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611064}, primaryClass={cs.NI cs.PF} }
sanghavi2006distributed
arxiv-675112
cs/0611065
On the security of new key exchange protocols based on the triple decomposition problem
<|reference_start|>On the security of new key exchange protocols based on the triple decomposition problem: We show that two new key exchange protocols with security based on the triple DP may have security based on the MSCSP.<|reference_end|>
arxiv
@article{chowdhury2006on, title={On the security of new key exchange protocols based on the triple decomposition problem}, author={M. M. Chowdhury}, journal={arXiv preprint arXiv:cs/0611065}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611065}, primaryClass={cs.CR} }
chowdhury2006on
arxiv-675113
cs/0611066
A modular eballot system - V06
<|reference_start|>A modular eballot system - V06: We consider a reasonably simple voting system which can be implemented for web-based ballots. Simplicity, modularity and the requirement of compatibility with current web browsers leads to a system which satisfies a set of security requirements for a ballot system which is not complete but sufficient in many cases. Due to weak-eligibility and vote-selling, this system cannot be used for political or similar ballots.<|reference_end|>
arxiv
@article{pasquinucci2006a, title={A modular eballot system - V0.6}, author={Andrea Pasquinucci}, journal={arXiv preprint arXiv:cs/0611066}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611066}, primaryClass={cs.CR} }
pasquinucci2006a
arxiv-675114
cs/0611067
Implementing the modular eballot system V06
<|reference_start|>Implementing the modular eballot system V06: We describe a practical implementation of the modular eballot system proposed in ref.[1]<|reference_end|>
arxiv
@article{pasquinucci2006implementing, title={Implementing the modular eballot system V0.6}, author={Andrea Pasquinucci}, journal={arXiv preprint arXiv:cs/0611067}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611067}, primaryClass={cs.CR} }
pasquinucci2006implementing
arxiv-675115
cs/0611068
Wikipedia: organisation from a bottom-up approach
<|reference_start|>Wikipedia: organisation from a bottom-up approach: Wikipedia can be considered as an extreme form of a self-managing team, as a means of labour division. One could expect that this bottom-up approach, with the absense of top-down organisational control, would lead to a chaos, but our analysis shows that this is not the case. In the Dutch Wikipedia, an integrated and coherent data structure is created, while at the same time users succeed in distributing roles by self-selection. Some users focus on an area of expertise, while others edit over the whole encyclopedic range. This constitutes our conclusion that Wikipedia, in general, is a successful example of a self-managing team.<|reference_end|>
arxiv
@article{spek2006wikipedia:, title={Wikipedia: organisation from a bottom-up approach}, author={Sander Spek, Eric Postma and H. Jaap van den Herik}, journal={arXiv preprint arXiv:cs/0611068}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611068}, primaryClass={cs.DL cs.CY} }
spek2006wikipedia:
arxiv-675116
cs/0611069
Scaling Construction Grammar up to Production Systems: the SCIM
<|reference_start|>Scaling Construction Grammar up to Production Systems: the SCIM: While a great effort has concerned the development of fully integrated modular understanding systems, few researches have focused on the problem of unifying existing linguistic formalisms with cognitive processing models. The Situated Constructional Interpretation Model is one of these attempts. In this model, the notion of "construction" has been adapted in order to be able to mimic the behavior of Production Systems. The Construction Grammar approach establishes a model of the relations between linguistic forms and meaning, by the mean of constructions. The latter can be considered as pairings from a topologically structured space to an unstructured space, in some way a special kind of production rules.<|reference_end|>
arxiv
@article{pitel2006scaling, title={Scaling Construction Grammar up to Production Systems: the SCIM}, author={Guillaume Pitel (INRIA Lorraine - LORIA)}, journal={Dans Scalable Natural Language Understanding 2006 (2006)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611069}, primaryClass={cs.CL} }
pitel2006scaling
arxiv-675117
cs/0611070
Hierarchical Cooperation Achieves Optimal Capacity Scaling in Ad Hoc Networks
<|reference_start|>Hierarchical Cooperation Achieves Optimal Capacity Scaling in Ad Hoc Networks: n source and destination pairs randomly located in an area want to communicate with each other. Signals transmitted from one user to another at distance r apart are subject to a power loss of r^{-alpha}, as well as a random phase. We identify the scaling laws of the information theoretic capacity of the network. In the case of dense networks, where the area is fixed and the density of nodes increasing, we show that the total capacity of the network scales linearly with n. This improves on the best known achievability result of n^{2/3} of Aeron and Saligrama, 2006. In the case of extended networks, where the density of nodes is fixed and the area increasing linearly with n, we show that this capacity scales as n^{2-alpha/2} for 2<alpha<3 and sqrt{n} for alpha>3. The best known earlier result (Xie and Kumar 2006) identified the scaling law for alpha > 4. Thus, much better scaling than multihop can be achieved in dense networks, as well as in extended networks with low attenuation. The performance gain is achieved by intelligent node cooperation and distributed MIMO communication. The key ingredient is a hierarchical and digital architecture for nodal exchange of information for realizing the cooperation.<|reference_end|>
arxiv
@article{ozgur2006hierarchical, title={Hierarchical Cooperation Achieves Optimal Capacity Scaling in Ad Hoc Networks}, author={Ayfer Ozgur, Olivier Leveque, David Tse}, journal={arXiv preprint arXiv:cs/0611070}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611070}, primaryClass={cs.IT math.IT} }
ozgur2006hierarchical
arxiv-675118
cs/0611071
Capabilities Engineering: Constructing Change-Tolerant Systems
<|reference_start|>Capabilities Engineering: Constructing Change-Tolerant Systems: We propose a Capabilities-based approach for building long-lived, complex systems that have lengthy development cycles. User needs and technology evolve during these extended development periods, and thereby, inhibit a fixed requirements-oriented solution specification. In effect, for complex emergent systems, the traditional approach of baselining requirements results in an unsatisfactory system. Therefore, we present an alternative approach, Capabilities Engineering, which mathematically exploits the structural semantics of the Function Decomposition graph - a representation of user needs - to formulate Capabilities. For any given software system, the set of derived Capabilities embodies change-tolerant characteristics. More specifically, each individual Capability is a functional abstraction constructed to be highly cohesive and to be minimally coupled with its neighbors. Moreover, the Capability set is chosen to accommodate an incremental development approach, and to reflect the constraints of technology feasibility and implementation schedules. We discuss our validation activities to empirically prove that the Capabilities-based approach results in change-tolerant systems.<|reference_end|>
arxiv
@article{ravichandar2006capabilities, title={Capabilities Engineering: Constructing Change-Tolerant Systems}, author={Ramya Ravichandar, James D. Arthur, Shawn A. Bohner}, journal={arXiv preprint arXiv:cs/0611071}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611071}, primaryClass={cs.SE} }
ravichandar2006capabilities
arxiv-675119
cs/0611072
Reconciling Synthesis and Decomposition: A Composite Approach to Capability Identification
<|reference_start|>Reconciling Synthesis and Decomposition: A Composite Approach to Capability Identification: Stakeholders' expectations and technology constantly evolve during the lengthy development cycles of a large-scale computer based system. Consequently, the traditional approach of baselining requirements results in an unsatisfactory system because it is ill-equipped to accommodate such change. In contrast, systems constructed on the basis of Capabilities are more change-tolerant; Capabilities are functional abstractions that are neither as amorphous as user needs nor as rigid as system requirements. Alternatively, Capabilities are aggregates that capture desired functionality from the users' needs, and are designed to exhibit desirable software engineering characteristics of high cohesion, low coupling and optimum abstraction levels. To formulate these functional abstractions we develop and investigate two algorithms for Capability identification: Synthesis and Decomposition. The synthesis algorithm aggregates detailed rudimentary elements of the system to form Capabilities. In contrast, the decomposition algorithm determines Capabilities by recursively partitioning the overall mission of the system into more detailed entities. Empirical analysis on a small computer based library system reveals that neither approach is sufficient by itself. However, a composite algorithm based on a complementary approach reconciling the two polar perspectives results in a more feasible set of Capabilities. In particular, the composite algorithm formulates Capabilities using the cohesion and coupling measures as defined by the decomposition algorithm and the abstraction level as determined by the synthesis algorithm.<|reference_end|>
arxiv
@article{ravichandar2006reconciling, title={Reconciling Synthesis and Decomposition: A Composite Approach to Capability Identification}, author={Ramya Ravichandar, James D. Arthur, Robert P. Broadwater}, journal={arXiv preprint arXiv:cs/0611072}, year={2006}, doi={10.1109/ECBS.2007.61}, archivePrefix={arXiv}, eprint={cs/0611072}, primaryClass={cs.SE} }
ravichandar2006reconciling
arxiv-675120
cs/0611073
Prefix Codes for Power Laws with Countable Support
<|reference_start|>Prefix Codes for Power Laws with Countable Support: In prefix coding over an infinite alphabet, methods that consider specific distributions generally consider those that decline more quickly than a power law (e.g., Golomb coding). Particular power-law distributions, however, model many random variables encountered in practice. For such random variables, compression performance is judged via estimates of expected bits per input symbol. This correspondence introduces a family of prefix codes with an eye towards near-optimal coding of known distributions. Compression performance is precisely estimated for well-known probability distributions using these codes and using previously known prefix codes. One application of these near-optimal codes is an improved representation of rational numbers.<|reference_end|>
arxiv
@article{baer2006prefix, title={Prefix Codes for Power Laws with Countable Support}, author={Michael B. Baer}, journal={Information Theory, 2008. ISIT 2008. IEEE International Symposium on}, year={2006}, doi={10.1109/ISIT.2008.4595434}, archivePrefix={arXiv}, eprint={cs/0611073}, primaryClass={cs.IT math.IT} }
baer2006prefix
arxiv-675121
cs/0611074
On "P = NP: Linear Programming Formulation of the Traveling Salesman Problem": A reply to Hofman's Claim of a "Counter-Example"
<|reference_start|>On "P = NP: Linear Programming Formulation of the Traveling Salesman Problem": A reply to Hofman's Claim of a "Counter-Example": We show that Hofman's claim of a "counter-example" to Diaby's LP formulation of the TSP is invalid.<|reference_end|>
arxiv
@article{diaby2006on, title={On "P = NP: Linear Programming Formulation of the Traveling Salesman Problem": A reply to Hofman's Claim of a "Counter-Example"}, author={Moustapha Diaby}, journal={arXiv preprint arXiv:cs/0611074}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611074}, primaryClass={cs.CC cs.DM} }
diaby2006on
arxiv-675122
cs/0611075
Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part I: The Case of Deterministic Channels
<|reference_start|>Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part I: The Case of Deterministic Channels: This is Part I of a two-part paper series that studies the use of the proportional fairness (PF) utility function as the basis for capacity allocation and scheduling in multi-channel multi-rate wireless networks. The contributions of Part I are threefold. (i) First, we lay down the theoretical foundation for PF. Specifically, we present the fundamental properties and physical/economic interpretation of PF. We show by general mathematical arguments that PF leads to equal airtime allocation to users for the single-channel case; and equal equivalent airtime allocation to users for the multi-channel case, where the equivalent airtime enjoyed by a user is a weighted sum of the airtimes enjoyed by the user on all channels, with the weight of a channel being the price or value of that channel. We also establish the Pareto efficiency of PF solutions. (ii) Second, we derive characteristics of PF solutions that are useful for the construction of PF-optimization algorithms. We present several PF-optimization algorithms, including a fast algorithm that is amenable to parallel implementation. (iii) Third, we study the use of PF utility for capacity allocation in large-scale WiFi networks consisting of many adjacent wireless LANs. We find that the PF solution simultaneously achieves higher system throughput, better fairness, and lower outage probability with respect to the default solution given by today's 802.11 commercial products. Part II of this paper series extends our investigation to the time-varying-channel case in which the data rates enjoyed by users over the channels vary dynamically over time<|reference_end|>
arxiv
@article{liew2006proportional, title={Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part I: The Case of Deterministic Channels}, author={Soung Chang Liew and Ying Jun Zhang}, journal={arXiv preprint arXiv:cs/0611075}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611075}, primaryClass={cs.NI cs.IT cs.PF math.IT} }
liew2006proportional
arxiv-675123
cs/0611076
Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part II: The Case of Time-Varying Channels
<|reference_start|>Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part II: The Case of Time-Varying Channels: This is Part II of a two-part paper series that studies the use of the proportional fairness (PF) utility function as the basis for capacity allocation and scheduling in multi-channel multi-rate wireless networks. The contributions of Part II are twofold. (i) First, we extend the problem formulation, theoretical results, and algorithms to the case of time-varying channels, where opportunistic capacity allocation and scheduling can be exploited to improve system performance. We lay down the theoretical foundation for optimization that "couples" the time-varying characteristic of channels with the requirements of the underlying applications into one consideration. In particular, the extent to which opportunistic optimization is possible is not just a function of how fast the channel characteristics vary, but also a function of the elasticity of the underlying applications for delayed capacity allocation. (ii) Second, building upon our theoretical framework and results, we study subcarrier allocation and scheduling in orthogonal frequency division multiplexing (OFDM) cellular wireless networks. We introduce the concept of a W-normalized Doppler frequency to capture the extent to which opportunistic scheduling can be exploited to achieve throughput-fairness performance gain. We show that a "look-back PF" scheduling can strike a good balance between system throughput and fairness while taking the underlying application requirements into account.<|reference_end|>
arxiv
@article{liew2006proportional, title={Proportional Fairness in Multi-channel Multi-rate Wireless Networks-Part II: The Case of Time-Varying Channels}, author={Soung Chang Liew and Ying Jun Zhang}, journal={arXiv preprint arXiv:cs/0611076}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611076}, primaryClass={cs.PF cs.IT cs.NI math.IT} }
liew2006proportional
arxiv-675124
cs/0611077
Evolutionary Optimization in an Algorithmic Setting
<|reference_start|>Evolutionary Optimization in an Algorithmic Setting: Evolutionary processes proved very useful for solving optimization problems. In this work, we build a formalization of the notion of cooperation and competition of multiple systems working toward a common optimization goal of the population using evolutionary computation techniques. It is justified that evolutionary algorithms are more expressive than conventional recursive algorithms. Three subclasses of evolutionary algorithms are proposed here: bounded finite, unbounded finite and infinite types. Some results on completeness, optimality and search decidability for the above classes are presented. A natural extension of Evolutionary Turing Machine model developed in this paper allows one to mathematically represent and study properties of cooperation and competition in a population of optimized species.<|reference_end|>
arxiv
@article{burgin2006evolutionary, title={Evolutionary Optimization in an Algorithmic Setting}, author={Mark Burgin and Eugene Eberbach}, journal={arXiv preprint arXiv:cs/0611077}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611077}, primaryClass={cs.NE cs.AI} }
burgin2006evolutionary
arxiv-675125
cs/0611078
Safety Evaluation of Critical Applications Distributed on TDMA-Based Networks
<|reference_start|>Safety Evaluation of Critical Applications Distributed on TDMA-Based Networks: Critical embedded systems have to provide a high level of dependability. In automotive domain, for example, TDMA protocols are largely recommended because of their deterministic behavior. Nevertheless, under the transient environmental perturbations, the loss of communication cycles may occur with a certain probability and, consequently, the system may fail. This paper analyzes the impact of the transient perturbations (especially due to Electromagnetic Interferences) on the dependability of systems distributed on TDMA-based networks. The dependability of such system is modeled as that of "consecutive-k-out-of-n:F" systems and we provide a efficient way for its evaluation.<|reference_end|>
arxiv
@article{simonot-lion2006safety, title={Safety Evaluation of Critical Applications Distributed on TDMA-Based Networks}, author={Franc{c}oise Simonot-Lion (INRIA Lorraine - LORIA), Franc{c}ois Simonot (IECN), Ye-Qiong Song (INRIA Lorraine - LORIA)}, journal={Dans Third Taiwanese-French Conference on Information Technology, TFIT'2006 (2006)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611078}, primaryClass={cs.OH} }
simonot-lion2006safety
arxiv-675126
cs/0611079
Managing network congestion with a Kohonen-based RED queue
<|reference_start|>Managing network congestion with a Kohonen-based RED queue: The behaviour of the TCP AIMD algorithm is known to cause queue length oscillations when congestion occurs at a router output link. Indeed, due to these queueing variations, end-to-end applications experience large delay jitter. Many studies have proposed efficient Active Queue Management (AQM) mechanisms in order to reduce queue oscillations and stabilize the queue length. These AQM are mostly improvements of the Random Early Detection (RED) model. Unfortunately, these enhancements do not react in a similar manner for various network conditions and are strongly sensitive to their initial setting parameters. Although this paper proposes a solution to overcome the difficulties of setting these parameters by using a Kohonen neural network model, another goal of this study is to investigate whether cognitive intelligence could be placed in the core network to solve such stability problem. In our context, we use results from the neural network area to demonstrate that our proposal, named Kohonen-RED (KRED), enables a stable queue length without complex parameters setting and passive measurements.<|reference_end|>
arxiv
@article{lochin2006managing, title={Managing network congestion with a Kohonen-based RED queue}, author={Emmanuel Lochin and Bruno Talavera}, journal={arXiv preprint arXiv:cs/0611079}, year={2006}, doi={10.1016/j.engappai.2010.10.012}, archivePrefix={arXiv}, eprint={cs/0611079}, primaryClass={cs.NI cs.NE} }
lochin2006managing
arxiv-675127
cs/0611080
A Multi-server Scheduling Framework for Resource Allocation in Wireless Multi-carrier Networks
<|reference_start|>A Multi-server Scheduling Framework for Resource Allocation in Wireless Multi-carrier Networks: Multiuser resource allocation has recently been recognized as an effective methodology for enhancing the power and spectrum efficiency in OFDM (orthogonal frequency division multiplexing) systems. It is, however, not directly applicable to current packet-switched networks, because (i) most existing packet-scheduling schemes are based on a single-server model and do not serve multiple users at the same time; and (ii) the conventional separate design of MAC (medium access control) packet scheduling and PHY (physical) resource allocation yields inefficient resource utilization. In this paper, we propose a cross-layer resource allocation algorithm based on a novel multi-server scheduling framework to achieve overall high system power efficiency in packet-switched OFDM networks. Our contribution is four fold: (i) we propose and analyze a MPGPS (multi-server packetized general processor sharing) service discipline that serves multiple users at the same time and facilitates multiuser resource allocation; (ii) we present a MPGPS-based joint MAC-PHY resource allocation scheme that incorporates packet scheduling, subcarrier allocation, and power allocation in an integrated framework; (iii) by investigating the fundamental tradeoff between multiuser-diversity and queueing performance, we present an A-MPGPS (adaptive MPGPS) service discipline that strikes balance between power efficiency and queueing performance; and (iv) we extend MPGPS to an O-MPGPS (opportunistic MPGPS) service discipline to further enhance the resource utilization efficiency.<|reference_end|>
arxiv
@article{zhang2006a, title={A Multi-server Scheduling Framework for Resource Allocation in Wireless Multi-carrier Networks}, author={Ying Jun Zhang}, journal={arXiv preprint arXiv:cs/0611080}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611080}, primaryClass={cs.NA cs.CE cs.NI cs.PF} }
zhang2006a
arxiv-675128
cs/0611081
The Importance of the Algorithmic Information Theory to Construct a Possible Example Where NP # P - II: An Irreducible Sentence
<|reference_start|>The Importance of the Algorithmic Information Theory to Construct a Possible Example Where NP # P - II: An Irreducible Sentence: In this short communication it is discussed the relation between disentangled states and algorithmic information theory aiming to construct an irreducible sentence whose length increases in a non-polynomial way when the number of qubits increases.<|reference_end|>
arxiv
@article{ramos2006the, title={The Importance of the Algorithmic Information Theory to Construct a Possible Example Where NP # P - II: An Irreducible Sentence}, author={Rubens Viana Ramos}, journal={arXiv preprint arXiv:cs/0611081}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611081}, primaryClass={cs.CC cs.IT math.IT} }
ramos2006the
arxiv-675129
cs/0611082
The Computational Complexity of the Traveling Salesman Problem
<|reference_start|>The Computational Complexity of the Traveling Salesman Problem: In this note, we show that the Traveling Salesman Problem cannot be solved in polynomial-time on a classical computer.<|reference_end|>
arxiv
@article{feinstein2006the, title={The Computational Complexity of the Traveling Salesman Problem}, author={Craig Alan Feinstein}, journal={Global Journal of Computer Science and Technology, Volume 11 Issue 23, December 2011, pp 1-2}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611082}, primaryClass={cs.CC} }
feinstein2006the
arxiv-675130
cs/0611083
Environment of development of the programs of parametric creating of the drawings in CAD-system of renovation of the enterprises
<|reference_start|>Environment of development of the programs of parametric creating of the drawings in CAD-system of renovation of the enterprises: The main ideas, data structures, structure and realization of operations with them in environment of development of the programs of parametric creating of the drawings are considered for the needs of the automated design engineering system of renovation of the enterprises. The example of such program and example of application of this environment for creating the drawing of the base for equipment in CAD-system TechnoCAD GlassX are presented<|reference_end|>
arxiv
@article{migunov2006environment, title={Environment of development of the programs of parametric creating of the drawings in CAD-system of renovation of the enterprises}, author={Vladimir V. Migunov}, journal={arXiv preprint arXiv:cs/0611083}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611083}, primaryClass={cs.CE} }
migunov2006environment
arxiv-675131
cs/0611084
Large Scale In Silico Screening on Grid Infrastructures
<|reference_start|>Large Scale In Silico Screening on Grid Infrastructures: Large-scale grid infrastructures for in silico drug discovery open opportunities of particular interest to neglected and emerging diseases. In 2005 and 2006, we have been able to deploy large scale in silico docking within the framework of the WISDOM initiative against Malaria and Avian Flu requiring about 105 years of CPU on the EGEE, Auvergrid and TWGrid infrastructures. These achievements demonstrated the relevance of large-scale grid infrastructures for the virtual screening by molecular docking. This also allowed evaluating the performances of the grid infrastructures and to identify specific issues raised by large-scale deployment.<|reference_end|>
arxiv
@article{jacq2006large, title={Large Scale In Silico Screening on Grid Infrastructures}, author={N. Jacq (LPC-Clermont), V. Breton (LPC-Clermont), H.-Y. Chen, L.-Y. Ho, M. Hofmann, H.-C. Lee, Y. Legr'e (LPC-Clermont), S.-C. Lin, A. Maass, E. Medernach, I. Merelli, L. Milanesi, G. Rastelli, M. Reichstadt (LPC-Clermont), J. Salzemann (LPC-Clermont), H. Schwichtenberg, M. Sridhar, V. Kasam (LPC-Clermont), Y.-T. Wu, M. Zimmermann}, journal={arXiv preprint arXiv:cs/0611084}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611084}, primaryClass={cs.DC q-bio.QM} }
jacq2006large
arxiv-675132
cs/0611085
Fuzzy Logic Classification of Imaging Laser Desorption Fourier Transform Mass Spectrometry Data
<|reference_start|>Fuzzy Logic Classification of Imaging Laser Desorption Fourier Transform Mass Spectrometry Data: A fuzzy logic based classification engine has been developed for classifying mass spectra obtained with an imaging internal source Fourier transform mass spectrometer (I^2LD-FTMS). Traditionally, an operator uses the relative abundance of ions with specific mass-to-charge (m/z) ratios to categorize spectra. An operator does this by comparing the spectrum of m/z versus abundance of an unknown sample against a library of spectra from known samples. Automated positioning and acquisition allow I^2LD-FTMS to acquire data from very large grids, this would require classification of up to 3600 spectrum per hour to keep pace with the acquisition. The tedious job of classifying numerous spectra generated in an I^2LD-FTMS imaging application can be replaced by a fuzzy rule base if the cues an operator uses can be encapsulated. We present the translation of linguistic rules to a fuzzy classifier for mineral phases in basalt. This paper also describes a method for gathering statistics on ions, which are not currently used in the rule base, but which may be candidates for making the rule base more accurate and complete or to form new rule bases based on data obtained from known samples. A spatial method for classifying spectra with low membership values, based on neighboring sample classifications, is also presented.<|reference_end|>
arxiv
@article{mcjunkin2006fuzzy, title={Fuzzy Logic Classification of Imaging Laser Desorption Fourier Transform Mass Spectrometry Data}, author={Timothy R. McJunkin and Jill R. Scott}, journal={arXiv preprint arXiv:cs/0611085}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611085}, primaryClass={cs.AI} }
mcjunkin2006fuzzy
arxiv-675133
cs/0611086
Reliable Multi-Path Routing Schemes for Real-Time Streaming
<|reference_start|>Reliable Multi-Path Routing Schemes for Real-Time Streaming: In off-line streaming, packet level erasure resilient Forward Error Correction (FEC) codes rely on the unrestricted buffering time at the receiver. In real-time streaming, the extremely short playback buffering time makes FEC inefficient for protecting a single path communication against long link failures. It has been shown that one alternative path added to a single path route makes packet level FEC applicable even when the buffering time is limited. Further path diversity, however, increases the number of underlying links increasing the total link failure rate, requiring from the sender possibly more FEC packets. We introduce a scalar coefficient for rating a multi-path routing topology of any complexity. It is called Redundancy Overall Requirement (ROR) and is proportional to the total number of adaptive FEC packets required for protection of the communication. With the capillary routing algorithm, introduced in this paper we build thousands of multi-path routing patterns. By computing their ROR coefficients, we show that contrary to the expectations the overall requirement in FEC codes is reduced when the further diversity of dual-path routing is achieved by the capillary routing algorithm.<|reference_end|>
arxiv
@article{gabrielyan2006reliable, title={Reliable Multi-Path Routing Schemes for Real-Time Streaming}, author={Emin Gabrielyan, Roger D. Hersch}, journal={arXiv preprint arXiv:cs/0611086}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611086}, primaryClass={cs.NI cs.IT math.IT} }
gabrielyan2006reliable
arxiv-675134
cs/0611087
A Combined LIFO-Priority Scheme for Overload Control of E-commerce Web Servers
<|reference_start|>A Combined LIFO-Priority Scheme for Overload Control of E-commerce Web Servers: E-commerce Web-servers often face overload conditions during which revenue-generating requests may be dropped or abandoned due to an increase in the browsing requests. In this paper we present a simple, yet effective, mechanism for overload control of E-commerce Web-servers. We develop an E-commerce workload model that separates the browsing requests from revenue-generating transaction requests. During overload, we apply LIFO discipline in the browsing queues and use a dynamic priority model to service them. The transaction queues are given absolute priority over the browsing queues. This is called the LIFO-Pri scheduling discipline. Experimental results show that LIFO-Pri dramatically improves the overall Web-server throughput while also increasing the completion rate of revenue-generating requests. The Web-server was able to operate at nearly 60% of its maximum capacity even when offered load was 1.5 times its capacity. Further, when compared to a single queue FIFO system, there was a seven-fold increase in the number of completed revenue-generating requests during overload.<|reference_end|>
arxiv
@article{singhmar2006a, title={A Combined LIFO-Priority Scheme for Overload Control of E-commerce Web Servers}, author={Naresh Singhmar, Vipul Mathur, Varsha Apte, D. Manjunath}, journal={arXiv preprint arXiv:cs/0611087}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611087}, primaryClass={cs.PF} }
singhmar2006a
arxiv-675135
cs/0611088
T-Theory Applications to Online Algorithms for the Server Problem
<|reference_start|>T-Theory Applications to Online Algorithms for the Server Problem: Although largely unnoticed by the online algorithms community, T-theory, a field of discrete mathematics, has contributed to the development of several online algorithms for the k-server problem. A brief summary of the k-server problem, and some important application concepts of T-theory, are given. Additionally, a number of known k-server results are restated using the established terminology of T-theory. Lastly, a previously unpublished 3-competitiveness proof, using T-theory, for the Harmonic algorithm for two servers is presented.<|reference_end|>
arxiv
@article{larmore2006t-theory, title={T-Theory Applications to Online Algorithms for the Server Problem}, author={Lawrence L. Larmore and James A. Oravec}, journal={arXiv preprint arXiv:cs/0611088}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611088}, primaryClass={cs.DS cs.DM} }
larmore2006t-theory
arxiv-675136
cs/0611089
The Extraction and Complexity Limits of Graphical Models for Linear Codes
<|reference_start|>The Extraction and Complexity Limits of Graphical Models for Linear Codes: Two broad classes of graphical modeling problems for codes can be identified in the literature: constructive and extractive problems. The former class of problems concern the construction of a graphical model in order to define a new code. The latter class of problems concern the extraction of a graphical model for a (fixed) given code. The design of a new low-density parity-check code for some given criteria (e.g. target block length and code rate) is an example of a constructive problem. The determination of a graphical model for a classical linear block code which implies a decoding algorithm with desired performance and complexity characteristics is an example of an extractive problem. This work focuses on extractive graphical model problems and aims to lay out some of the foundations of the theory of such problems for linear codes. The primary focus of this work is a study of the space of all graphical models for a (fixed) given code. The tradeoff between cyclic topology and complexity in this space is characterized via the introduction of a new bound: the tree-inducing cut-set bound. The proposed bound provides a more precise characterization of this tradeoff than that which can be obtained using existing tools (e.g. the Cut-Set Bound) and can be viewed as a generalization of the square-root bound for tail-biting trellises to graphical models with arbitrary cyclic topologies. Searching the space of graphical models for a given code is then enabled by introducing a set of basic graphical model transformation operations which are shown to span this space. Finally, heuristics for extracting novel graphical models for linear block codes using these transformations are investigated.<|reference_end|>
arxiv
@article{halford2006the, title={The Extraction and Complexity Limits of Graphical Models for Linear Codes}, author={Thomas R. Halford and Keith M. Chugg}, journal={arXiv preprint arXiv:cs/0611089}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611089}, primaryClass={cs.IT math.IT} }
halford2006the
arxiv-675137
cs/0611090
Algebraic Soft-Decision Decoding of Reed-Solomon Codes Using Bit-level Soft Information
<|reference_start|>Algebraic Soft-Decision Decoding of Reed-Solomon Codes Using Bit-level Soft Information: The performance of algebraic soft-decision decoding of Reed-Solomon codes using bit-level soft information is investigated. Optimal multiplicity assignment strategies of algebraic soft-decision decoding with infinite cost are first studied over erasure channels and the binary symmetric channel. The corresponding decoding radii are calculated in closed forms and tight bounds on the error probability are derived. The multiplicity assignment strategy and the corresponding performance analysis are then generalized to characterize the decoding region of algebraic softdecision decoding over a mixed error and bit-level erasure channel. The bit-level decoding region of the proposed multiplicity assignment strategy is shown to be significantly larger than that of conventional Berlekamp-Massey decoding. As an application, a bit-level generalized minimum distance decoding algorithm is proposed. The proposed decoding compares favorably with many other Reed-Solomon soft-decision decoding algorithms over various channels. Moreover, owing to the simplicity of the proposed bit-level generalized minimum distance decoding, its performance can be tightly bounded using order statistics.<|reference_end|>
arxiv
@article{jiang2006algebraic, title={Algebraic Soft-Decision Decoding of Reed-Solomon Codes Using Bit-level Soft Information}, author={Jing Jiang and Krishna R. Narayanan}, journal={arXiv preprint arXiv:cs/0611090}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611090}, primaryClass={cs.IT math.IT} }
jiang2006algebraic
arxiv-675138
cs/0611091
Lossy Bulk Synchronous Parallel Processing Model for Very Large Scale Grids
<|reference_start|>Lossy Bulk Synchronous Parallel Processing Model for Very Large Scale Grids: The performance of a parallel algorithm in a very large scale grid is significantly influenced by the underlying Internet protocols and inter-connectivity. Many grid programming platforms use TCP due to its reliability, usually with some optimizations to reduce its costs. However, TCP does not perform well in a high bandwidth and high delay network environment. On the other hand, UDP is the fastest protocol available because it omits connection setup process, acknowledgments and retransmissions sacrificing reliable transfer. Many new bulk data transfer schemes using UDP for data transmission such as RBUDP, Tsunami, and SABUL have been introduced and shown to have better performance compared to TCP. In this paper, we consider the use of UDP and examine the relationship between packet loss and speedup with respect to the number of grid nodes. Our measurement suggests that packet loss rates between 5%-15% on average are not uncommon between PlanetLab nodes that are widely distributed over the Internet. We show that transmitting multiple copies of same packet produces higher speedup. We show the minimum number of packet duplication required to maximize the possible speedup for a given number of nodes using a BSP based model. Our work demonstrates that by using an appropriate number of packet copies, we can increase performance of parallel program.<|reference_end|>
arxiv
@article{sundararajan2006lossy, title={Lossy Bulk Synchronous Parallel Processing Model for Very Large Scale Grids}, author={Elankovan Sundararajan, Aaron Harwood, Kotagiri Ramamohanarao}, journal={arXiv preprint arXiv:cs/0611091}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611091}, primaryClass={cs.DC cs.CC cs.PF} }
sundararajan2006lossy
arxiv-675139
cs/0611092
Design approaches in technology enhanced learning
<|reference_start|>Design approaches in technology enhanced learning: Design is a critical to the successful development of any interactive learning environment (ILE). Moreover, in technology enhanced learning (TEL), the design process requires input from many diverse areas of expertise. As such, anyone undertaking tool development is required to directly address the design challenge from multiple perspectives. We provide a motivation and rationale for design approaches for learning technologies that draws upon Simon's seminal proposition of Design Science (Simon, 1969). We then review the application of Design Experiments (Brown, 1992) and Design Patterns (Alexander et al., 1977) and argue that a patterns approach has the potential to address many of the critical challenges faced by learning technologists.<|reference_end|>
arxiv
@article{mor2006design, title={Design approaches in technology enhanced learning}, author={Yishay Mor and Niall Winters}, journal={arXiv preprint arXiv:cs/0611092}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611092}, primaryClass={cs.SE cs.CY} }
mor2006design
arxiv-675140
cs/0611093
Effectiveness of Garbage Collection in MIT/GNU Scheme
<|reference_start|>Effectiveness of Garbage Collection in MIT/GNU Scheme: Scheme uses garbage collection for heap memory management. Ideally, garbage collectors should be able to reclaim all dead objects, i.e. objects that will not be used in future. However, garbage collectors collect only those dead objects that are not reachable from any program variable. Dead objects that are reachable from program variables are not reclaimed. In this paper we describe our experiments to measure the effectiveness of garbage collection in MIT/GNU Scheme. We compute the drag time of objects, i.e. the time for which an object remains in heap memory after its last use. The number of dead objects and the drag time together indicate opportunities for improving garbage collection. Our experiments reveal that up to 26% of dead objects remain in memory. The average drag time is up to 37% of execution time. Overall, we observe memory saving potential ranging from 9% to 65%.<|reference_end|>
arxiv
@article{karkare2006effectiveness, title={Effectiveness of Garbage Collection in MIT/GNU Scheme}, author={Amey Karkare, Amitabha Sanyal, Uday Khedker}, journal={arXiv preprint arXiv:cs/0611093}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611093}, primaryClass={cs.PL cs.PF} }
karkare2006effectiveness
arxiv-675141
cs/0611094
Reducing Order Enforcement Cost in Complex Query Plans
<|reference_start|>Reducing Order Enforcement Cost in Complex Query Plans: Algorithms that exploit sort orders are widely used to implement joins, grouping, duplicate elimination and other set operations. Query optimizers traditionally deal with sort orders by using the notion of interesting orders. The number of interesting orders is unfortunately factorial in the number of participating attributes. Optimizer implementations use heuristics to prune the number of interesting orders, but the quality of the heuristics is unclear. Increasingly complex decision support queries and increasing use of covering indices, which provide multiple alternative sort orders for relations, motivate us to better address the problem of optimization with interesting orders. We show that even a simplified version of optimization with sort orders is NP-hard and provide principled heuristics for choosing interesting orders. We have implemented the proposed techniques in a Volcano-style cost-based optimizer, and our performance study shows significant improvements in estimated cost. We also executed our plans on a widely used commercial database system, and on PostgreSQL, and found that actual execution times for our plans were significantly better than for plans generated by those systems in several cases.<|reference_end|>
arxiv
@article{guravannavar2006reducing, title={Reducing Order Enforcement Cost in Complex Query Plans}, author={Ravindra Guravannavar, S Sudarshan, Ajit A Diwan, Ch. Sobhan Babu}, journal={arXiv preprint arXiv:cs/0611094}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611094}, primaryClass={cs.DB} }
guravannavar2006reducing
arxiv-675142
cs/0611095
Dense Gaussian Sensor Networks: Minimum Achievable Distortion and the Order Optimality of Separation
<|reference_start|>Dense Gaussian Sensor Networks: Minimum Achievable Distortion and the Order Optimality of Separation: We investigate the optimal performance of dense sensor networks by studying the joint source-channel coding problem. The overall goal of the sensor network is to take measurements from an underlying random process, code and transmit those measurement samples to a collector node in a cooperative multiple access channel with potential feedback, and reconstruct the entire random process at the collector node. We provide lower and upper bounds for the minimum achievable expected distortion when the underlying random process is Gaussian. When the Gaussian random process satisfies some general conditions, we evaluate the lower and upper bounds explicitly, and show that they are of the same order for a wide range of power constraints. Thus, for these random processes, under these power constraints, we express the minimum achievable expected distortion as a function of the power constraint. Further, we show that the achievability scheme that achieves the lower bound on the distortion is a separation-based scheme that is composed of multi-terminal rate-distortion coding and amplify-and-forward channel coding. Therefore, we conclude that separation is order-optimal for the dense Gaussian sensor network scenario under consideration, when the underlying random process satisfies some general conditions.<|reference_end|>
arxiv
@article{liu2006dense, title={Dense Gaussian Sensor Networks: Minimum Achievable Distortion and the Order Optimality of Separation}, author={Nan Liu and Sennur Ulukus}, journal={arXiv preprint arXiv:cs/0611095}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611095}, primaryClass={cs.IT math.IT} }
liu2006dense
arxiv-675143
cs/0611096
On the Rate Distortion Function of Certain Sources with a Proportional Mean-Square Error Distortion Measure
<|reference_start|>On the Rate Distortion Function of Certain Sources with a Proportional Mean-Square Error Distortion Measure: New bounds on the rate distortion function of certain non-Gaussian sources, with a proportional-weighted mean-square error (MSE) distortion measure, are given. The growth, g, of the rate distortion function, as a result of changing from a non-weighted MSE distortion measure to a proportional-weighted distortion criterion is analyzed. It is shown that for a small distortion, d, the growth, g, and the difference between the rate distortion functions of a Gaussian memoryless source and a source with memory, both with the same marginal statistics and MSE distortion measure, share the same lower bound. Several examples and applications are also given.<|reference_end|>
arxiv
@article{binia2006on, title={On the Rate Distortion Function of Certain Sources with a Proportional Mean-Square Error Distortion Measure}, author={Jacob Binia}, journal={arXiv preprint arXiv:cs/0611096}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611096}, primaryClass={cs.IT math.IT} }
binia2006on
arxiv-675144
cs/0611097
Conditionally Cycle-Free Generalized Tanner Graphs: Theory and Application to High-Rate Serially Concatenated Codes
<|reference_start|>Conditionally Cycle-Free Generalized Tanner Graphs: Theory and Application to High-Rate Serially Concatenated Codes: Generalized Tanner graphs have been implicitly studied by a number of authors under the rubric of generalized parity-check matrices. This work considers the conditioning of binary hidden variables in such models in order to break all cycles and thus derive optimal soft-in soft-out (SISO) decoding algorithms. Conditionally cycle-free generalized Tanner graphs are shown to imply optimal SISO decoding algorithms for the first order Reed-Muller codes and their duals - the extended Hamming codes - which are substantially less complex than conventional bit-level trellis decoding. The study of low-complexity optimal SISO decoding algorithms for the family of extended Hamming codes is practically motivated. Specifically, it is shown that exended Hamming codes offer an attractive alternative to high-rate convolutional codes in terms of both performance and complexity for use in very high-rate, very low-floor, serially concatenated coding schemes.<|reference_end|>
arxiv
@article{halford2006conditionally, title={Conditionally Cycle-Free Generalized Tanner Graphs: Theory and Application to High-Rate Serially Concatenated Codes}, author={Thomas R. Halford and Keith M. Chugg}, journal={arXiv preprint arXiv:cs/0611097}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611097}, primaryClass={cs.IT math.IT} }
halford2006conditionally
arxiv-675145
cs/0611098
Analysis of an Efficient Distributed Algorithm for Mutual Exclusion (Average-Case Analysis of Path Reversal)
<|reference_start|>Analysis of an Efficient Distributed Algorithm for Mutual Exclusion (Average-Case Analysis of Path Reversal): The algorithm analysed by Na\"{i}mi, Trehe and Arnold was the very first distributed algorithm to solve the mutual exclusion problem in complete networks by using a dynamic logical tree structure as its basic distributed data structure, viz. a path reversal transformation in rooted n-node trees; besides, it was also the first one to achieve a logarithmic average-case message complexity. The present paper proposes a direct and general approach to compute the moments of the cost of path reversal. It basically uses one-one correspondences between combinatorial structures and the associated probability generating functions: the expected cost of path reversal is thus proved to be exactly $H_{n-1}$. Moreover, time and message complexity of the algorithm as well as randomized bounds on its worst-case message complexity in arbitrary networks are also given. The average-case analysis of path reversal and the analysis of this distributed algorithm for mutual exclusion are thus fully completed in the paper. The general techniques used should also prove available and fruitful when adapted to the most efficient recent tree-based distributed algorithms for mutual exclusion which require powerful tools, particularly for average-case analyses.<|reference_end|>
arxiv
@article{lavault2006analysis, title={Analysis of an Efficient Distributed Algorithm for Mutual Exclusion (Average-Case Analysis of Path Reversal)}, author={Christian Lavault (IRISA / INRIA Rennes)}, journal={LNCS 634 (1992) 133-144}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611098}, primaryClass={cs.DC cs.DS} }
lavault2006analysis
arxiv-675146
cs/0611099
On the space complexity of one-pass compression
<|reference_start|>On the space complexity of one-pass compression: We study how much memory one-pass compression algorithms need to compete with the best multi-pass algorithms. We call a one-pass algorithm an (f (n, \ell))-footprint compressor if, given $n$, $\ell$ and an $n$-ary string $S$, it stores $S$ in ((\rule{0ex}{2ex} O (H_\ell (S)) + o (\log n)) |S| + O (n^{\ell + 1} \log n)) bits -- where (H_\ell (S)) is the $\ell$th-order empirical entropy of $S$ -- while using at most (f (n, \ell)) bits of memory. We prove that, for any (\epsilon > 0) and some (f (n, \ell) \in O (n^{\ell + \epsilon} \log n)), there is an (f (n, \ell))-footprint compressor; on the other hand, there is no (f (n, \ell))-footprint compressor for (f (n, \ell) \in o (n^\ell \log n)).<|reference_end|>
arxiv
@article{gagie2006on, title={On the space complexity of one-pass compression}, author={Travis Gagie}, journal={arXiv preprint arXiv:cs/0611099}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611099}, primaryClass={cs.IT math.IT} }
gagie2006on
arxiv-675147
cs/0611100
Model Theory of Ultrafinitism I: Fuzzy Initial Segments of Arithmetics
<|reference_start|>Model Theory of Ultrafinitism I: Fuzzy Initial Segments of Arithmetics: This article is the first of an intended series of works on the model theory of Ultrafinitism. It is roughly divided into two parts. The first one addresses some of the issues related to ultrafinitistic programs, as well as some of the core ideas proposed thus far. The second part of the paper presents a model of ultrafinitistic arithmetics based on the notion of fuzzy initial segments of the standard natural numbers series. We also introduce a proof theory and a semantics for ultrafinitism through which feasibly consistent theories can be treated on the same footing as their classically consistent counterparts. We conclude with a brief sketch of a foundational program, that aims at reproducing the transfinite within the finite realm.<|reference_end|>
arxiv
@article{mannucci2006model, title={Model Theory of Ultrafinitism I: Fuzzy Initial Segments of Arithmetics}, author={Mirco A. Mannucci, Rose M. Cherubin}, journal={arXiv preprint arXiv:cs/0611100}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611100}, primaryClass={cs.LO} }
mannucci2006model
arxiv-675148
cs/0611101
Fourier meets M\"obius: fast subset convolution
<|reference_start|>Fourier meets M\"obius: fast subset convolution: We present a fast algorithm for the subset convolution problem: given functions f and g defined on the lattice of subsets of an n-element set N, compute their subset convolution f*g, defined for all S\subseteq N by (f * g)(S) = \sum_{T \subseteq S}f(T) g(S\setminus T), where addition and multiplication is carried out in an arbitrary ring. Via M\"{o}bius transform and inversion, our algorithm evaluates the subset convolution in O(n^2 2^n) additions and multiplications, substantially improving upon the straightforward O(3^n) algorithm. Specifically, if the input functions have an integer range {-M,-M+1,...,M}, their subset convolution over the ordinary sum-product ring can be computed in O^*(2^n log M) time; the notation O^* suppresses polylogarithmic factors. Furthermore, using a standard embedding technique we can compute the subset convolution over the max-sum or min-sum semiring in O^*(2^n M) time. To demonstrate the applicability of fast subset convolution, we present the first O^*(2^k n^2 + n m) algorithm for the minimum Steiner tree problem in graphs with n vertices, k terminals, and m edges with bounded integer weights, improving upon the O^*(3^k n + 2^k n^2 + n m) time bound of the classical Dreyfus-Wagner algorithm. We also discuss extensions to recent O^*(2^n)-time algorithms for covering and partitioning problems (Bj\"{o}rklund and Husfeldt, FOCS 2006; Koivisto, FOCS 2006).<|reference_end|>
arxiv
@article{björklund2006fourier, title={Fourier meets M\"{o}bius: fast subset convolution}, author={Andreas Bj"orklund, Thore Husfeldt, Petteri Kaski, Mikko Koivisto}, journal={arXiv preprint arXiv:cs/0611101}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611101}, primaryClass={cs.DS cs.DM math.CO} }
björklund2006fourier
arxiv-675149
cs/0611102
Extending the Trusted Path in Client-Server Interaction
<|reference_start|>Extending the Trusted Path in Client-Server Interaction: We present a method to secure the complete path between a server and the local human user at a network node. This is useful for scenarios like internet banking, electronic signatures, or online voting. Protection of input authenticity and output integrity and authenticity is accomplished by a combination of traditional and novel technologies, e.g., SSL, ActiveX, and DirectX. Our approach does not require administrative privileges to deploy and is hence suitable for consumer applications. Results are based on the implementation of a proof-of-concept application for the Windows platform.<|reference_end|>
arxiv
@article{langweg2006extending, title={Extending the Trusted Path in Client-Server Interaction}, author={Hanno Langweg and Tommy Kristiansen}, journal={arXiv preprint arXiv:cs/0611102}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611102}, primaryClass={cs.CR} }
langweg2006extending
arxiv-675150
cs/0611103
Barriers and local minima in energy landscapes of stochastic local search
<|reference_start|>Barriers and local minima in energy landscapes of stochastic local search: A local search algorithm operating on an instance of a Boolean constraint satisfaction problem (in particular, k-SAT) can be viewed as a stochastic process traversing successive adjacent states in an ``energy landscape'' defined by the problem instance on the n-dimensional Boolean hypercube. We investigate analytically the worst-case topography of such landscapes in the context of satisfiable k-SAT via a random ensemble of satisfiable ``k-regular'' linear equations modulo 2. We show that for each fixed k=3,4,..., the typical k-SAT energy landscape induced by an instance drawn from the ensemble has a set of 2^{\Omega(n)} local energy minima, each separated by an unconditional \Omega(n) energy barrier from each of the O(1) ground states, that is, solution states with zero energy. The main technical aspect of the analysis is that a random k-regular 0/1 matrix constitutes a strong boundary expander with almost full GF(2)-linear rank, a property which also enables us to prove a 2^{\Omega(n)} lower bound for the expected number of steps required by the focused random walk heuristic to solve typical instances drawn from the ensemble. These results paint a grim picture of the worst-case topography of k-SAT for local search, and constitute apparently the first rigorous analysis of the growth of energy barriers in a random ensemble of k-SAT landscapes as the number of variables n is increased.<|reference_end|>
arxiv
@article{kaski2006barriers, title={Barriers and local minima in energy landscapes of stochastic local search}, author={Petteri Kaski}, journal={arXiv preprint arXiv:cs/0611103}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611103}, primaryClass={cs.CC cond-mat.stat-mech} }
kaski2006barriers
arxiv-675151
cs/0611104
Learning and discrimination through STDP in a top-down modulated associative memory
<|reference_start|>Learning and discrimination through STDP in a top-down modulated associative memory: This article underlines the learning and discrimination capabilities of a model of associative memory based on artificial networks of spiking neurons. Inspired from neuropsychology and neurobiology, the model implements top-down modulations, as in neocortical layer V pyramidal neurons, with a learning rule based on synaptic plasticity (STDP), for performing a multimodal association learning task. A temporal correlation method of analysis proves the ability of the model to associate specific activity patterns to different samples of stimulation. Even in the absence of initial learning and with continuously varying weights, the activity patterns become stable enough for discrimination.<|reference_end|>
arxiv
@article{mouraud2006learning, title={Learning and discrimination through STDP in a top-down modulated associative memory}, author={Anthony Mouraud (ISC, GRIMAAG), H'el`ene Paugam-Moisy (ISC)}, journal={Proceedings of 14 European Symposium on Artificial Neural Networks (ESANN 2006) (03/2006) 611-616}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611104}, primaryClass={cs.NE cs.AI} }
mouraud2006learning
arxiv-675152
cs/0611105
Clustering and Sharing Incentives in BitTorrent Systems
<|reference_start|>Clustering and Sharing Incentives in BitTorrent Systems: Peer-to-peer protocols play an increasingly instrumental role in Internet content distribution. Consequently, it is important to gain a full understanding of how these protocols behave in practice and how their parameters impact overall performance. We present the first experimental investigation of the peer selection strategy of the popular BitTorrent protocol in an instrumented private torrent. By observing the decisions of more than 40 nodes, we validate three BitTorrent properties that, though widely believed to hold, have not been demonstrated experimentally. These include the clustering of similar-bandwidth peers, the effectiveness of BitTorrent's sharing incentives, and the peers' high average upload utilization. In addition, our results show that BitTorrent's new choking algorithm in seed state provides uniform service to all peers, and that an underprovisioned initial seed leads to the absence of peer clustering and less effective sharing incentives. Based on our observations, we provide guidelines for seed provisioning by content providers, and discuss a tracker protocol extension that addresses an identified limitation of the protocol.<|reference_end|>
arxiv
@article{legout2006clustering, title={Clustering and Sharing Incentives in BitTorrent Systems}, author={Arnaud Legout (INRIA Sophia Antipolis / INRIA Rh^one-Alpes), Nikitas Liogkas, Eddie Kohler, Lixia Zhang}, journal={arXiv preprint arXiv:cs/0611105}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611105}, primaryClass={cs.NI} }
legout2006clustering
arxiv-675153
cs/0611106
Mixing and non-mixing local minima of the entropy contrast for blind source separation
<|reference_start|>Mixing and non-mixing local minima of the entropy contrast for blind source separation: In this paper, both non-mixing and mixing local minima of the entropy are analyzed from the viewpoint of blind source separation (BSS); they correspond respectively to acceptable and spurious solutions of the BSS problem. The contribution of this work is twofold. First, a Taylor development is used to show that the \textit{exact} output entropy cost function has a non-mixing minimum when this output is proportional to \textit{any} of the non-Gaussian sources, and not only when the output is proportional to the lowest entropic source. Second, in order to prove that mixing entropy minima exist when the source densities are strongly multimodal, an entropy approximator is proposed. The latter has the major advantage that an error bound can be provided. Even if this approximator (and the associated bound) is used here in the BSS context, it can be applied for estimating the entropy of any random variable with multimodal density.<|reference_end|>
arxiv
@article{vrins2006mixing, title={Mixing and non-mixing local minima of the entropy contrast for blind source separation}, author={F. Vrins, D.-T. Pham and M. Verleysen}, journal={arXiv preprint arXiv:cs/0611106}, year={2006}, doi={10.1109/TIT.2006.890716}, archivePrefix={arXiv}, eprint={cs/0611106}, primaryClass={cs.IT math.IT} }
vrins2006mixing
arxiv-675154
cs/0611107
Rectangular Layouts and Contact Graphs
<|reference_start|>Rectangular Layouts and Contact Graphs: Contact graphs of isothetic rectangles unify many concepts from applications including VLSI and architectural design, computational geometry, and GIS. Minimizing the area of their corresponding {\em rectangular layouts} is a key problem. We study the area-optimization problem and show that it is NP-hard to find a minimum-area rectangular layout of a given contact graph. We present O(n)-time algorithms that construct $O(n^2)$-area rectangular layouts for general contact graphs and $O(n\log n)$-area rectangular layouts for trees. (For trees, this is an $O(\log n)$-approximation algorithm.) We also present an infinite family of graphs (rsp., trees) that require $\Omega(n^2)$ (rsp., $\Omega(n\log n)$) area. We derive these results by presenting a new characterization of graphs that admit rectangular layouts using the related concept of {\em rectangular duals}. A corollary to our results relates the class of graphs that admit rectangular layouts to {\em rectangle of influence drawings}.<|reference_end|>
arxiv
@article{buchsbaum2006rectangular, title={Rectangular Layouts and Contact Graphs}, author={Adam L. Buchsbaum, Emden R. Gansner, Cecilia M. Procopiuc, Suresh Venkatasubramanian}, journal={arXiv preprint arXiv:cs/0611107}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611107}, primaryClass={cs.DS cs.DM} }
buchsbaum2006rectangular
arxiv-675155
cs/0611108
On the Complexity of Processing Massive, Unordered, Distributed Data
<|reference_start|>On the Complexity of Processing Massive, Unordered, Distributed Data: An existing approach for dealing with massive data sets is to stream over the input in few passes and perform computations with sublinear resources. This method does not work for truly massive data where even making a single pass over the data with a processor is prohibitive. Successful log processing systems in practice such as Google's MapReduce and Apache's Hadoop use multiple machines. They efficiently perform a certain class of highly distributable computations defined by local computations that can be applied in any order to the input. Motivated by the success of these systems, we introduce a simple algorithmic model for massive, unordered, distributed (mud) computation. We initiate the study of understanding its computational complexity. Our main result is a positive one: any unordered function that can be computed by a streaming algorithm can also be computed with a mud algorithm, with comparable space and communication complexity. We extend this result to some useful classes of approximate and randomized streaming algorithms. We also give negative results, using communication complexity arguments to prove that extensions to private randomness, promise problems and indeterminate functions are impossible. We believe that the line of research we introduce in this paper has the potential for tremendous impact. The distributed systems that motivate our work successfully process data at an unprecedented scale, distributed over hundreds or even thousands of machines, and perform hundreds of such analyses each day. The mud model (and its generalizations) inspire a set of complexity-theoretic questions that lie at their heart.<|reference_end|>
arxiv
@article{feldman2006on, title={On the Complexity of Processing Massive, Unordered, Distributed Data}, author={Jon Feldman, S. Muthukrishnan, Anastasios Sidiropoulos, Cliff Stein, Zoya Svitkina}, journal={arXiv preprint arXiv:cs/0611108}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611108}, primaryClass={cs.CC cs.DC} }
feldman2006on
arxiv-675156
cs/0611109
An approach to RAID-6 based on cyclic groups of a prime order
<|reference_start|>An approach to RAID-6 based on cyclic groups of a prime order: As the size of data storing arrays of disks grows, it becomes vital to protect data against double disk failures. A popular method of protection is via the Reed-Solomon (RS) code with two parity words. In the present paper we construct alternative examples of linear block codes protecting against two erasures. Our construction is based on an abstract notion of cone. Concrete cones are constructed via matrix representations of cyclic groups of prime order. In particular, this construction produces EVENODD code. Interesting conditions on the prime number arise in our analysis of these codes. At the end, we analyse an assembly implementation of the corresponding system on a general purpose processor and compare its write and recovery speed with the standard DP-RAID system.<|reference_end|>
arxiv
@article{jackson2006an, title={An approach to RAID-6 based on cyclic groups of a prime order}, author={R. Jackson, D. Rumynin, O. Zaboronski}, journal={arXiv preprint arXiv:cs/0611109}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611109}, primaryClass={cs.IT math.IT math.NT} }
jackson2006an
arxiv-675157
cs/0611110
The Implications of Network-Centric Software Systems on Software Architecture: A Critical Evaluation
<|reference_start|>The Implications of Network-Centric Software Systems on Software Architecture: A Critical Evaluation: The purpose of this paper is to evaluate the impact of emerging network-centric software systems on the field of software architecture. We first develop an insight concerning the term "network-centric" by presenting its origin and its implications within the context of software architecture. On the basis of this insight, we present our definition of a network-centric framework and its distinguishing characteristics. We then enumerate the challenges that face the field of software architecture as software development shifts from a platform-centric to a network-centric model. In order to face these challenges, we propose a formal approach embodied in a new architectural style that supports overcoming these challenges at the architectural level. Finally, we conclude by presenting an illustrative example to demonstrate the usefulness of the concepts of network centricity, summarizing our contributions, and linking our approach to future work that needs to be done in this area.<|reference_end|>
arxiv
@article{chigani2006the, title={The Implications of Network-Centric Software Systems on Software Architecture: A Critical Evaluation}, author={Amine Chigani, James D. Arthur}, journal={arXiv preprint arXiv:cs/0611110}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611110}, primaryClass={cs.SE} }
chigani2006the
arxiv-675158
cs/0611111
Distributed Control of Microscopic Robots in Biomedical Applications
<|reference_start|>Distributed Control of Microscopic Robots in Biomedical Applications: Current developments in molecular electronics, motors and chemical sensors could enable constructing large numbers of devices able to sense, compute and act in micron-scale environments. Such microscopic machines, of sizes comparable to bacteria, could simultaneously monitor entire populations of cells individually in vivo. This paper reviews plausible capabilities for microscopic robots and the physical constraints due to operation in fluids at low Reynolds number, diffusion-limited sensing and thermal noise from Brownian motion. Simple distributed controls are then presented in the context of prototypical biomedical tasks, which require control decisions on millisecond time scales. The resulting behaviors illustrate trade-offs among speed, accuracy and resource use. A specific example is monitoring for patterns of chemicals in a flowing fluid released at chemically distinctive sites. Information collected from a large number of such devices allows estimating properties of cell-sized chemical sources in a macroscopic volume. The microscopic devices moving with the fluid flow in small blood vessels can detect chemicals released by tissues in response to localized injury or infection. We find the devices can readily discriminate a single cell-sized chemical source from the background chemical concentration, providing high-resolution sensing in both time and space. By contrast, such a source would be difficult to distinguish from background when diluted throughout the blood volume as obtained with a blood sample.<|reference_end|>
arxiv
@article{hogg2006distributed, title={Distributed Control of Microscopic Robots in Biomedical Applications}, author={Tad Hogg}, journal={arXiv preprint arXiv:cs/0611111}, year={2006}, doi={10.1007/978-1-4471-5113-5_8}, archivePrefix={arXiv}, eprint={cs/0611111}, primaryClass={cs.RO cs.MA} }
hogg2006distributed
arxiv-675159
cs/0611112
Channel Coding: The Road to Channel Capacity
<|reference_start|>Channel Coding: The Road to Channel Capacity: Starting from Shannon's celebrated 1948 channel coding theorem, we trace the evolution of channel coding from Hamming codes to capacity-approaching codes. We focus on the contributions that have led to the most significant improvements in performance vs. complexity for practical applications, particularly on the additive white Gaussian noise (AWGN) channel. We discuss algebraic block codes, and why they did not prove to be the way to get to the Shannon limit. We trace the antecedents of today's capacity-approaching codes: convolutional codes, concatenated codes, and other probabilistic coding schemes. Finally, we sketch some of the practical applications of these codes.<|reference_end|>
arxiv
@article{costello2006channel, title={Channel Coding: The Road to Channel Capacity}, author={Daniel J. Costello Jr., and G. David Forney Jr}, journal={arXiv preprint arXiv:cs/0611112}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611112}, primaryClass={cs.IT math.IT} }
costello2006channel
arxiv-675160
cs/0611113
An Anthological Review of Research Utilizing MontyLingua, a Python-Based End-to-End Text Processor
<|reference_start|>An Anthological Review of Research Utilizing MontyLingua, a Python-Based End-to-End Text Processor: MontyLingua, an integral part of ConceptNet which is currently the largest commonsense knowledge base, is an English text processor developed using Python programming language in MIT Media Lab. The main feature of MontyLingua is the coverage for all aspects of English text processing from raw input text to semantic meanings and summary generation, yet each component in MontyLingua is loosely-coupled to each other at the architectural and code level, which enabled individual components to be used independently or substituted. However, there has been no review exploring the role of MontyLingua in recent research work utilizing it. This paper aims to review the use of and roles played by MontyLingua and its components in research work published in 19 articles between October 2004 and August 2006. We had observed a diversified use of MontyLingua in many different areas, both generic and domain-specific. Although the use of text summarizing component had not been observe, we are optimistic that it will have a crucial role in managing the current trend of information overload in future research.<|reference_end|>
arxiv
@article{ling2006an, title={An Anthological Review of Research Utilizing MontyLingua, a Python-Based End-to-End Text Processor}, author={Maurice HT Ling}, journal={Ling, Maurice HT. 2006. An Anthological Review of Research Utilizing MontyLingua, a Python-Based End-to-End Text Processor. The Python Papers 1 (1): 5-13}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611113}, primaryClass={cs.CL} }
ling2006an
arxiv-675161
cs/0611114
Very Sparse Stable Random Projections, Estimators and Tail Bounds for Stable Random Projections
<|reference_start|>Very Sparse Stable Random Projections, Estimators and Tail Bounds for Stable Random Projections: This paper will focus on three different aspects in improving the current practice of stable random projections. Firstly, we propose {\em very sparse stable random projections} to significantly reduce the processing and storage cost, by replacing the $\alpha$-stable distribution with a mixture of a symmetric $\alpha$-Pareto distribution (with probability $\beta$, $0<\beta\leq1$) and a point mass at the origin (with a probability $1-\beta$). This leads to a significant $\frac{1}{\beta}$-fold speedup for small $\beta$. Secondly, we provide an improved estimator for recovering the original $l_\alpha$ norms from the projected data. The standard estimator is based on the (absolute) sample median, while we suggest using the geometric mean. The geometric mean estimator we propose is strictly unbiased and is easier to study. Moreover, the geometric mean estimator is more accurate, especially non-asymptotically. Thirdly, we provide an adequate answer to the basic question of how many projections (samples) are needed for achieving some pre-specified level of accuracy. \cite{Proc:Indyk_FOCS00,Article:Indyk_TKDE03} did not provide a criterion that can be used in practice. The geometric mean estimator we propose allows us to derive sharp tail bounds which can be expressed in exponential forms with constants explicitly given.<|reference_end|>
arxiv
@article{li2006very, title={Very Sparse Stable Random Projections, Estimators and Tail Bounds for Stable Random Projections}, author={Ping Li}, journal={arXiv preprint arXiv:cs/0611114}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611114}, primaryClass={cs.DS cs.IT cs.LG math.IT} }
li2006very
arxiv-675162
cs/0611115
A higher-order active contour model of a `gas of circles' and its application to tree crown extraction
<|reference_start|>A higher-order active contour model of a `gas of circles' and its application to tree crown extraction: Many image processing problems involve identifying the region in the image domain occupied by a given entity in the scene. Automatic solution of these problems requires models that incorporate significant prior knowledge about the shape of the region. Many methods for including such knowledge run into difficulties when the topology of the region is unknown a priori, for example when the entity is composed of an unknown number of similar objects. Higher-order active contours (HOACs) represent one method for the modelling of non-trivial prior knowledge about shape without necessarily constraining region topology, via the inclusion of non-local interactions between region boundary points in the energy defining the model. The case of an unknown number of circular objects arises in a number of domains, e.g. medical, biological, nanotechnological, and remote sensing imagery. Regions composed of an a priori unknown number of circles may be referred to as a `gas of circles'. In this report, we present a HOAC model of a `gas of circles'. In order to guarantee stable circles, we conduct a stability analysis via a functional Taylor expansion of the HOAC energy around a circular shape. This analysis fixes one of the model parameters in terms of the others and constrains the rest. In conjunction with a suitable likelihood energy, we apply the model to the extraction of tree crowns from aerial imagery, and show that the new model outperforms other techniques.<|reference_end|>
arxiv
@article{horvath2006a, title={A higher-order active contour model of a `gas of circles' and its application to tree crown extraction}, author={Peter Horvath (INRIA Sophia Antipolis), Ian Jermyn (INRIA Sophia Antipolis), Zoltan Kato, Josiane Zerubia (INRIA Sophia Antipolis)}, journal={arXiv preprint arXiv:cs/0611115}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611115}, primaryClass={cs.CV} }
horvath2006a
arxiv-675163
cs/0611116
Discovering Network Topology in the Presence of Byzantine Faults
<|reference_start|>Discovering Network Topology in the Presence of Byzantine Faults: We study the problem of Byzantine-robust topology discovery in an arbitrary asynchronous network. We formally state the weak and strong versions of the problem. The weak version requires that either each node discovers the topology of the network or at least one node detects the presence of a faulty node. The strong version requires that each node discovers the topology regardless of faults. We focus on non-cryptographic solutions to these problems. We explore their bounds. We prove that the weak topology discovery problem is solvable only if the connectivity of the network exceeds the number of faults in the system. Similarly, we show that the strong version of the problem is solvable only if the network connectivity is more than twice the number of faults. We present solutions to both versions of the problem. The presented algorithms match the established graph connectivity bounds. The algorithms do not require the individual nodes to know either the diameter or the size of the network. The message complexity of both programs is low polynomial with respect to the network size. We describe how our solutions can be extended to add the property of termination, handle topology changes and perform neighborhood discovery.<|reference_end|>
arxiv
@article{nesterenko2006discovering, title={Discovering Network Topology in the Presence of Byzantine Faults}, author={Mikhail Nesterenko and S'ebastien Tixeuil}, journal={13th Colloquium on Structural Information and Communication Complexity (SIROCCO), LNCS Volume 4056 pp. 212-226, Chester, UK, July 2006}, year={2006}, doi={10.1007/11780823_17}, archivePrefix={arXiv}, eprint={cs/0611116}, primaryClass={cs.DC cs.DS cs.OS} }
nesterenko2006discovering
arxiv-675164
cs/0611117
2FACE: Bi-Directional Face Traversal for Efficient Geometric Routing
<|reference_start|>2FACE: Bi-Directional Face Traversal for Efficient Geometric Routing: We propose bi-directional face traversal algorithm $2FACE$ to shorten the path the message takes to reach the destination in geometric routing. Our algorithm combines the practicality of the best single-direction traversal algorithms with the worst case message complexity of $O(|E|)$, where $E$ is the number of network edges. We apply $2FACE$ to a variety of geometric routing algorithms. Our simulation results indicate that bi-directional face traversal decreases the latency of message delivery two to three times compared to single direction face traversal. The thus selected path approaches the shortest possible route. This gain in speed comes with a similar message overhead increase. We describe an algorithm which compensates for this message overhead by recording the preferable face traversal direction. Thus, if a source has several messages to send to the destination, the subsequent messages follow the shortest route. Our simulation results show that with most geometric routing algorithms the message overhead of finding the short route by bi-directional face traversal is compensated within two to four repeat messages.<|reference_end|>
arxiv
@article{miyashita20062face:, title={2FACE: Bi-Directional Face Traversal for Efficient Geometric Routing}, author={Mark Miyashita and Mikhail Nesterenko}, journal={arXiv preprint arXiv:cs/0611117}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611117}, primaryClass={cs.DC cs.DS cs.OS} }
miyashita20062face:
arxiv-675165
cs/0611118
A Neutrosophic Description Logic
<|reference_start|>A Neutrosophic Description Logic: Description Logics (DLs) are appropriate, widely used, logics for managing structured knowledge. They allow reasoning about individuals and concepts, i.e. set of individuals with common properties. Typically, DLs are limited to dealing with crisp, well defined concepts. That is, concepts for which the problem whether an individual is an instance of it is yes/no question. More often than not, the concepts encountered in the real world do not have a precisely defined criteria of membership: we may say that an individual is an instance of a concept only to a certain degree, depending on the individual's properties. The DLs that deal with such fuzzy concepts are called fuzzy DLs. In order to deal with fuzzy, incomplete, indeterminate and inconsistent concepts, we need to extend the fuzzy DLs, combining the neutrosophic logic with a classical DL. In particular, concepts become neutrosophic (here neutrosophic means fuzzy, incomplete, indeterminate, and inconsistent), thus reasoning about neutrosophic concepts is supported. We'll define its syntax, its semantics, and describe its properties.<|reference_end|>
arxiv
@article{wang2006a, title={A Neutrosophic Description Logic}, author={Haibin Wang, Andre Rogatko, Florentin Smarandache, Rajshekhar Sunderraman}, journal={Proceedings of 2006 IEEE International Conference on Granular Computing, edited by Yan-Qing Zhang and Tsau Young Lin, Georgia State University, Atlanta, pp. 305-308, 2006}, year={2006}, doi={10.1142/S1793005708001100}, archivePrefix={arXiv}, eprint={cs/0611118}, primaryClass={cs.AI} }
wang2006a
arxiv-675166
cs/0611119
Expressiveness of Metric modalities for continuous time
<|reference_start|>Expressiveness of Metric modalities for continuous time: We prove a conjecture by A. Pnueli and strengthen it showing a sequence of "counting modalities" none of which is expressible in the temporal logic generated by the previous modalities, over the real line, or over the positive reals. Moreover, there is no finite temporal logic that can express all of them over the real line, so that no finite metric temporal logic is expressively complete.<|reference_end|>
arxiv
@article{hirshfeld2006expressiveness, title={Expressiveness of Metric modalities for continuous time}, author={Yoram Hirshfeld and Alexander Rabinovich}, journal={Logical Methods in Computer Science, Volume 3, Issue 1 (February 23, 2007) lmcs:2225}, year={2006}, doi={10.2168/LMCS-3(1:3)2007}, archivePrefix={arXiv}, eprint={cs/0611119}, primaryClass={cs.LO} }
hirshfeld2006expressiveness
arxiv-675167
cs/0611120
Wireless Information-Theoretic Security - Part I: Theoretical Aspects
<|reference_start|>Wireless Information-Theoretic Security - Part I: Theoretical Aspects: In this two-part paper, we consider the transmission of confidential data over wireless wiretap channels. The first part presents an information-theoretic problem formulation in which two legitimate partners communicate over a quasi-static fading channel and an eavesdropper observes their transmissions through another independent quasi-static fading channel. We define the secrecy capacity in terms of outage probability and provide a complete characterization of the maximum transmission rate at which the eavesdropper is unable to decode any information. In sharp contrast with known results for Gaussian wiretap channels (without feedback), our contribution shows that in the presence of fading information-theoretic security is achievable even when the eavesdropper has a better average signal-to-noise ratio (SNR) than the legitimate receiver - fading thus turns out to be a friend and not a foe. The issue of imperfect channel state information is also addressed. Practical schemes for wireless information-theoretic security are presented in Part II, which in some cases comes close to the secrecy capacity limits given in this paper.<|reference_end|>
arxiv
@article{bloch2006wireless, title={Wireless Information-Theoretic Security - Part I: Theoretical Aspects}, author={Matthieu Bloch, Joao Barros, Miguel R. D. Rodrigues, and Steven W. McLaughlin}, journal={arXiv preprint arXiv:cs/0611120}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611120}, primaryClass={cs.IT math.IT} }
bloch2006wireless
arxiv-675168
cs/0611121
Wireless Information-Theoretic Security - Part II: Practical Implementation
<|reference_start|>Wireless Information-Theoretic Security - Part II: Practical Implementation: In Part I of this two-part paper on confidential communication over wireless channels, we studied the fundamental security limits of quasi-static fading channels from the point of view of outage secrecy capacity with perfect and imperfect channel state information. In Part II, we develop a practical secret key agreement protocol for Gaussian and quasi-static fading wiretap channels. The protocol uses a four-step procedure to secure communications: establish common randomness via an opportunistic transmission, perform message reconciliation, establish a common key via privacy amplification, and use of the key. We introduce a new reconciliation procedure that uses multilevel coding and optimized low density parity check codes which in some cases comes close to achieving the secrecy capacity limits established in Part I. Finally, we develop new metrics for assessing average secure key generation rates and show that our protocol is effective in secure key renewal.<|reference_end|>
arxiv
@article{bloch2006wireless, title={Wireless Information-Theoretic Security - Part II: Practical Implementation}, author={Matthieu Bloch, Joao Barros, Miguel R. D. Rodrigues and Steven W. McLaughlin}, journal={arXiv preprint arXiv:cs/0611121}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611121}, primaryClass={cs.IT math.IT} }
bloch2006wireless
arxiv-675169
cs/0611122
Knowledge Representation Concepts for Automated SLA Management
<|reference_start|>Knowledge Representation Concepts for Automated SLA Management: Outsourcing of complex IT infrastructure to IT service providers has increased substantially during the past years. IT service providers must be able to fulfil their service-quality commitments based upon predefined Service Level Agreements (SLAs) with the service customer. They need to manage, execute and maintain thousands of SLAs for different customers and different types of services, which needs new levels of flexibility and automation not available with the current technology. The complexity of contractual logic in SLAs requires new forms of knowledge representation to automatically draw inferences and execute contractual agreements. A logic-based approach provides several advantages including automated rule chaining allowing for compact knowledge representation as well as flexibility to adapt to rapidly changing business requirements. We suggest adequate logical formalisms for representation and enforcement of SLA rules and describe a proof-of-concept implementation. The article describes selected formalisms of the ContractLog KR and their adequacy for automated SLA management and presents results of experiments to demonstrate flexibility and scalability of the approach.<|reference_end|>
arxiv
@article{paschke2006knowledge, title={Knowledge Representation Concepts for Automated SLA Management}, author={Adrian Paschke, Martin Bichler}, journal={arXiv preprint arXiv:cs/0611122}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611122}, primaryClass={cs.SE cs.AI cs.LO cs.PL} }
paschke2006knowledge
arxiv-675170
cs/0611123
Functional Bregman Divergence and Bayesian Estimation of Distributions
<|reference_start|>Functional Bregman Divergence and Bayesian Estimation of Distributions: A class of distortions termed functional Bregman divergences is defined, which includes squared error and relative entropy. A functional Bregman divergence acts on functions or distributions, and generalizes the standard Bregman divergence for vectors and a previous pointwise Bregman divergence that was defined for functions. A recently published result showed that the mean minimizes the expected Bregman divergence. The new functional definition enables the extension of this result to the continuous case to show that the mean minimizes the expected functional Bregman divergence over a set of functions or distributions. It is shown how this theorem applies to the Bayesian estimation of distributions. Estimation of the uniform distribution from independent and identically drawn samples is used as a case study.<|reference_end|>
arxiv
@article{frigyik2006functional, title={Functional Bregman Divergence and Bayesian Estimation of Distributions}, author={B. A. Frigyik, S. Srivastava, and M. R. Gupta}, journal={arXiv preprint arXiv:cs/0611123}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611123}, primaryClass={cs.IT cs.LG math.IT} }
frigyik2006functional
arxiv-675171
cs/0611124
Low-rank matrix factorization with attributes
<|reference_start|>Low-rank matrix factorization with attributes: We develop a new collaborative filtering (CF) method that combines both previously known users' preferences, i.e. standard CF, as well as product/user attributes, i.e. classical function approximation, to predict a given user's interest in a particular product. Our method is a generalized low rank matrix completion problem, where we learn a function whose inputs are pairs of vectors -- the standard low rank matrix completion problem being a special case where the inputs to the function are the row and column indices of the matrix. We solve this generalized matrix completion problem using tensor product kernels for which we also formally generalize standard kernel properties. Benchmark experiments on movie ratings show the advantages of our generalized matrix completion method over the standard matrix completion one with no information about movies or people, as well as over standard multi-task or single task learning methods.<|reference_end|>
arxiv
@article{abernethy2006low-rank, title={Low-rank matrix factorization with attributes}, author={Jacob Abernethy, Francis Bach, Theodoros Evgeniou, Jean-Philippe Vert}, journal={arXiv preprint arXiv:cs/0611124}, year={2006}, number={N-24/06/MM}, archivePrefix={arXiv}, eprint={cs/0611124}, primaryClass={cs.LG cs.AI cs.IR} }
abernethy2006low-rank
arxiv-675172
cs/0611125
Relay Channels with Confidential Messages
<|reference_start|>Relay Channels with Confidential Messages: We consider a relay channel where a relay helps the transmission of messages from one sender to one receiver. The relay is considered not only as a sender that helps the message transmission but as a wire-tapper who can obtain some knowledge about the transmitted messages. In this paper we study the coding problem of the relay channel under the situation that some of transmitted messages are confidential to the relay. A security of such confidential messages is measured by the conditional entropy. The rate region is defined by the set of transmission rates for which messages are reliably transmitted and the security of confidential messages is larger than a prescribed level. In this paper we give two definition of the rate region. We first define the rate region in the case of deterministic encoder and call it the deterministic rate region. Next, we define the rate region in the case of stochastic encoder and call it the stochastic rate region. We derive explicit inner and outer bounds for the above two rate regions and present a class of relay channels where two bounds match. Furthermore, we show that stochastic encoder can enlarge the rate region. We also evaluate the deterministic rate region of the Gaussian relay channel with confidential messages.<|reference_end|>
arxiv
@article{oohama2006relay, title={Relay Channels with Confidential Messages}, author={Yasutada Oohama}, journal={arXiv preprint arXiv:cs/0611125}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611125}, primaryClass={cs.IT math.IT} }
oohama2006relay
arxiv-675173
cs/0611126
Hereditary Discrepancies in Different Numbers of Colors II
<|reference_start|>Hereditary Discrepancies in Different Numbers of Colors II: We bound the hereditary discrepancy of a hypergraph $\HH$ in two colors in terms of its hereditary discrepancy in $c$ colors. We show that $\herdisc(\HH,2) \le K c \herdisc(\HH,c)$, where $K$ is some absolute constant. This bound is sharp.<|reference_end|>
arxiv
@article{doerr2006hereditary, title={Hereditary Discrepancies in Different Numbers of Colors II}, author={Benjamin Doerr, Mahmoud Fouz}, journal={arXiv preprint arXiv:cs/0611126}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611126}, primaryClass={cs.DM} }
doerr2006hereditary
arxiv-675174
cs/0611127
Coupling Methodology within the Software Platform Alliances
<|reference_start|>Coupling Methodology within the Software Platform Alliances: CEA, ANDRA and EDF are jointly developing the software platform ALLIANCES which aim is to produce a tool for the simulation of nuclear waste storage and disposal repository. This type of simulations deals with highly coupled thermo-hydro-mechanical and chemical (T-H-M-C) processes. A key objective of Alliances is to give the capability for coupling algorithms development between existing codes. The aim of this paper is to present coupling methodology use in the context of this software platform.<|reference_end|>
arxiv
@article{montarnal2006coupling, title={Coupling Methodology within the Software Platform Alliances}, author={Philippe Montarnal, Alain Dimier, Estelle Deville, Erwan Adam, J'er^ome Gaombalet, Alain Bengaouer, Laurent Loth, Cl'ement Chavant}, journal={Computational Methods for Coupled Problems in Science and Engineering (04/2005) CD-ROM}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611127}, primaryClass={cs.MS cs.CE} }
montarnal2006coupling
arxiv-675175
cs/0611128
Scale-Free Overlay Topologies with Hard Cutoffs for Unstructured Peer-to-Peer Networks
<|reference_start|>Scale-Free Overlay Topologies with Hard Cutoffs for Unstructured Peer-to-Peer Networks: In unstructured peer-to-peer (P2P) networks, the overlay topology (or connectivity graph) among peers is a crucial component in addition to the peer/data organization and search. Topological characteristics have profound impact on the efficiency of search on such unstructured P2P networks as well as other networks. It has been well-known that search on small-world topologies of N nodes can be as efficient as O(ln N), while scale-free (power-law) topologies offer even better search efficiencies like as good as O(lnln N) for a range of degree distribution exponents. However, generation and maintenance of such scale-free topologies are hard to realize in a distributed and potentially uncooperative environments as in the P2P networks. A key limitation of scale-free topologies is the high load (i.e. high degree) on very few number of hub nodes. In a typical unstructured P2P network, peers are not willing to maintain high degrees/loads as they may not want to store large number of entries for construction of the overlay topology. So, to achieve fairness and practicality among all peers, hard cutoffs on the number of entries are imposed by the individual peers, which limits scale-freeness of the overall topology. Thus, efficiency of the flooding search reduces as the size of the hard cutoff does. We investigate construction of scale-free topologies with hard cutoffs (i.e. there are not any major hubs) and effect of these hard cutoffs on the search efficiency. Interestingly, we observe that the efficiency of normalized flooding and random walk search algorithms increases as the hard cutoff decreases.<|reference_end|>
arxiv
@article{guclu2006scale-free, title={Scale-Free Overlay Topologies with Hard Cutoffs for Unstructured Peer-to-Peer Networks}, author={Hasan Guclu and Murat Yuksel}, journal={arXiv preprint arXiv:cs/0611128}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611128}, primaryClass={cs.NI cs.DC} }
guclu2006scale-free
arxiv-675176
cs/0611129
Shannon's secrecy system with informed receivers and its application to systematic coding for wiretapped channels
<|reference_start|>Shannon's secrecy system with informed receivers and its application to systematic coding for wiretapped channels: Shannon's secrecy system is studied in a setting, where both the legitimate decoder and the wiretapper have access to side information sequences correlated to the source, but the wiretapper receives both the coded information and the side information via channels that are more noisy than the respective channels of the legitmate decoder, which in turn, also shares a secret key with the encoder. A single--letter characterization is provided for the achievable region in the space of five figures of merit: the equivocation at the wiretapper, the key rate, the distortion of the source reconstruction at the legitimate receiver, the bandwidth expansion factor of the coded channels, and the average transmission cost (generalized power). Beyond the fact that this is an extension of earlier studies, it also provides a framework for studying fundamental performance limits of systematic codes in the presence of a wiretap channel. The best achievable performance of systematic codes is then compared to that of a general code in several respects, and a few examples are given.<|reference_end|>
arxiv
@article{merhav2006shannon's, title={Shannon's secrecy system with informed receivers and its application to systematic coding for wiretapped channels}, author={Neri Merhav}, journal={arXiv preprint arXiv:cs/0611129}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611129}, primaryClass={cs.IT math.IT} }
merhav2006shannon's
arxiv-675177
cs/0611130
Quantitative Analysis of the Publishing Landscape in High-Energy Physics
<|reference_start|>Quantitative Analysis of the Publishing Landscape in High-Energy Physics: World-wide collaboration in high-energy physics (HEP) is a tradition which dates back several decades, with scientific publications mostly coauthored by scientists from different countries. This coauthorship phenomenon makes it difficult to identify precisely the ``share'' of each country in HEP scientific production. One year's worth of HEP scientific articles published in peer-reviewed journals is analysed and their authors are uniquely assigned to countries. This method allows the first correct estimation on a ``pro rata'' basis of the share of HEP scientific publishing among several countries and institutions. The results provide an interesting insight into the geographical collaborative patterns of the HEP community. The HEP publishing landscape is further analysed to provide information on the journals favoured by the HEP community and on the geographical variation of their author bases. These results provide quantitative input to the ongoing debate on the possible transition of HEP publishing to an Open Access model.<|reference_end|>
arxiv
@article{mele2006quantitative, title={Quantitative Analysis of the Publishing Landscape in High-Energy Physics}, author={Salvatore Mele, David Dallman, Jens Vigen, Joanne Yeomans}, journal={JHEP0612:S01,2006}, year={2006}, doi={10.1088/1126-6708/2006/12/S01}, number={CERN-OPEN-2006-065}, archivePrefix={arXiv}, eprint={cs/0611130}, primaryClass={cs.DL hep-ex hep-ph hep-th} }
mele2006quantitative
arxiv-675178
cs/0611131
Scatter Networks: A New Approach for Analyzing Information Scatter on the Web
<|reference_start|>Scatter Networks: A New Approach for Analyzing Information Scatter on the Web: Information on any given topic is often scattered across the web. Previously this scatter has been characterized through the distribution of a set of facts (i.e. pieces of information) across web pages, showing that typically a few pages contain many facts on the topic, while many pages contain just a few. While such approaches have revealed important scatter phenomena, they are lossy in that they conceal how specific facts (e.g. rare facts) occur in specific types of pages (e.g. fact-rich pages). To reveal such regularities, we construct bi-partite networks, consisting of two types of vertices: the facts contained in webpages and the webpages themselves. Such a representation enables the application of a series of network analysis techniques, revealing structural features such as connectivity, robustness, and clustering. We discuss the implications of each of these features to the users' ability to find comprehensive information online. Finally, we compare the bipartite graph structure of webpages and facts with the hyperlink structure between the webpages.<|reference_end|>
arxiv
@article{adamic2006scatter, title={Scatter Networks: A New Approach for Analyzing Information Scatter on the Web}, author={Lada A. Adamic, Suresh K. Bhavnani and Xiaolin Shi}, journal={arXiv preprint arXiv:cs/0611131}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611131}, primaryClass={cs.IR} }
adamic2006scatter
arxiv-675179
cs/0611132
The specifications making in complex CAD-system of renovation of the enterprises on the basis of modules in the drawing and electronic catalogues
<|reference_start|>The specifications making in complex CAD-system of renovation of the enterprises on the basis of modules in the drawing and electronic catalogues: The experience of automation of the specifications making of the projects of renovation of the industrial enterprises is described, being based on the special modules in the drawing containing the visible image and additional parameters, and electronic catalogues<|reference_end|>
arxiv
@article{migunov2006the, title={The specifications making in complex CAD-system of renovation of the enterprises on the basis of modules in the drawing and electronic catalogues}, author={Vladimir V. Migunov}, journal={arXiv preprint arXiv:cs/0611132}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611132}, primaryClass={cs.CE} }
migunov2006the
arxiv-675180
cs/0611133
The modelling of the automation schemes of technological processes in CAD-system of renovation of the enterprises
<|reference_start|>The modelling of the automation schemes of technological processes in CAD-system of renovation of the enterprises: According to the requirements of the Russian standards, the automation schemes are necessary practically in each project of renovation of industrial buildings and facilities, in which any technological processes are realized. The model representations of the automation schemes in CAD-system TechnoCAD GlassX are described. The models follow a principle "to exclude a repeated input operations"<|reference_end|>
arxiv
@article{migunov2006the, title={The modelling of the automation schemes of technological processes in CAD-system of renovation of the enterprises}, author={Vladimir V. Migunov}, journal={arXiv preprint arXiv:cs/0611133}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611133}, primaryClass={cs.CE} }
migunov2006the
arxiv-675181
cs/0611134
Hard Disk Drive as a Magnetomechanical Logic Device
<|reference_start|>Hard Disk Drive as a Magnetomechanical Logic Device: We consider the conditions how two binary numbers can be superimposed on the same track with the use of different recording magnetic fields. As a result the average magnetization of longitudinal medium along the track can have three states: -M, 0 and +M. Possibility to perform logic operations with these states is considered. We demonstrate OR, AND, XOR and NOT operations and discuss a modification of a recording device.<|reference_end|>
arxiv
@article{safonov2006hard, title={Hard Disk Drive as a Magnetomechanical Logic Device}, author={Vladimir L. Safonov}, journal={arXiv preprint arXiv:cs/0611134}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611134}, primaryClass={cs.OH cs.AR} }
safonov2006hard
arxiv-675182
cs/0611135
Genetic Programming for Kernel-based Learning with Co-evolving Subsets Selection
<|reference_start|>Genetic Programming for Kernel-based Learning with Co-evolving Subsets Selection: Support Vector Machines (SVMs) are well-established Machine Learning (ML) algorithms. They rely on the fact that i) linear learning can be formalized as a well-posed optimization problem; ii) non-linear learning can be brought into linear learning thanks to the kernel trick and the mapping of the initial search space onto a high dimensional feature space. The kernel is designed by the ML expert and it governs the efficiency of the SVM approach. In this paper, a new approach for the automatic design of kernels by Genetic Programming, called the Evolutionary Kernel Machine (EKM), is presented. EKM combines a well-founded fitness function inspired from the margin criterion, and a co-evolution framework ensuring the computational scalability of the approach. Empirical validation on standard ML benchmark demonstrates that EKM is competitive using state-of-the-art SVMs with tuned hyper-parameters.<|reference_end|>
arxiv
@article{gagné2006genetic, title={Genetic Programming for Kernel-based Learning with Co-evolving Subsets Selection}, author={Christian Gagn'e (INRIA Futurs, ISI), Marc Schoenauer (INRIA Futurs, LRI), Mich`ele Sebag (LRI), Marco Tomassini (ISI)}, journal={Dans PPSN'06, 4193 (2006) 1008-1017}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611135}, primaryClass={cs.AI} }
gagné2006genetic
arxiv-675183
cs/0611136
Neural Computation with Rings of Quasiperiodic Oscillators
<|reference_start|>Neural Computation with Rings of Quasiperiodic Oscillators: We describe the use of quasiperiodic oscillators for computation and control of robots. We also describe their relationship to central pattern generators in simple organisms and develop a group theory for describing the dynamics of these systems.<|reference_end|>
arxiv
@article{rietman2006neural, title={Neural Computation with Rings of Quasiperiodic Oscillators}, author={E.A. Rietman and R.W. Hillis}, journal={arXiv preprint arXiv:cs/0611136}, year={2006}, number={PSI SR-1278}, archivePrefix={arXiv}, eprint={cs/0611136}, primaryClass={cs.RO} }
rietman2006neural
arxiv-675184
cs/0611137
Rhythms of social interaction: messaging within a massive online network
<|reference_start|>Rhythms of social interaction: messaging within a massive online network: We have analyzed the fully-anonymized headers of 362 million messages exchanged by 4.2 million users of Facebook, an online social network of college students, during a 26 month interval. The data reveal a number of strong daily and weekly regularities which provide insights into the time use of college students and their social lives, including seasonal variations. We also examined how factors such as school affiliation and informal online friend lists affect the observed behavior and temporal patterns. Finally, we show that Facebook users appear to be clustered by school with respect to their temporal messaging patterns.<|reference_end|>
arxiv
@article{golder2006rhythms, title={Rhythms of social interaction: messaging within a massive online network}, author={Scott Golder, Dennis M. Wilkinson, Bernardo A. Huberman}, journal={arXiv preprint arXiv:cs/0611137}, year={2006}, doi={10.1007/978-1-84628-905-7_3}, archivePrefix={arXiv}, eprint={cs/0611137}, primaryClass={cs.CY physics.soc-ph} }
golder2006rhythms
arxiv-675185
cs/0611138
Functional Brain Imaging with Multi-Objective Multi-Modal Evolutionary Optimization
<|reference_start|>Functional Brain Imaging with Multi-Objective Multi-Modal Evolutionary Optimization: Functional brain imaging is a source of spatio-temporal data mining problems. A new framework hybridizing multi-objective and multi-modal optimization is proposed to formalize these data mining problems, and addressed through Evolutionary Computation (EC). The merits of EC for spatio-temporal data mining are demonstrated as the approach facilitates the modelling of the experts' requirements, and flexibly accommodates their changing goals.<|reference_end|>
arxiv
@article{krmicek2006functional, title={Functional Brain Imaging with Multi-Objective Multi-Modal Evolutionary Optimization}, author={Vojtech Krmicek (INRIA Futurs, LRI), Mich`ele Sebag (INRIA Futurs, LRI)}, journal={Dans PPSN'06, 4193 (2006) 382-391}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611138}, primaryClass={cs.AI} }
krmicek2006functional
arxiv-675186
cs/0611139
Static Safety for an Actor Dedicated Process Calculus by Abstract Interpretation
<|reference_start|>Static Safety for an Actor Dedicated Process Calculus by Abstract Interpretation: The actor model eases the definition of concurrent programs with non uniform behaviors. Static analysis of such a model was previously done in a data-flow oriented way, with type systems. This approach was based on constraint set resolution and was not able to deal with precise properties for communications of behaviors. We present here a new approach, control-flow oriented, based on the abstract interpretation framework, able to deal with communication of behaviors. Within our new analyses, we are able to verify most of the previous properties we observed as well as new ones, principally based on occurrence counting.<|reference_end|>
arxiv
@article{garoche2006static, title={Static Safety for an Actor Dedicated Process Calculus by Abstract Interpretation}, author={Pierre-Lo"ic Garoche (IRIT), Marc Pantel (IRIT), Xavier Thirioux (IRIT)}, journal={Formal Methods for Open Object-Based Distributed Systems (26/05/2006) 78-92}, year={2006}, doi={10.1007/11768869_8}, archivePrefix={arXiv}, eprint={cs/0611139}, primaryClass={cs.DC} }
garoche2006static
arxiv-675187
cs/0611140
On the Benefits of Inoculation, an Example in Train Scheduling
<|reference_start|>On the Benefits of Inoculation, an Example in Train Scheduling: The local reconstruction of a railway schedule following a small perturbation of the traffic, seeking minimization of the total accumulated delay, is a very difficult and tightly constrained combinatorial problem. Notoriously enough, the railway company's public image degrades proportionally to the amount of daily delays, and the same goes for its profit! This paper describes an inoculation procedure which greatly enhances an evolutionary algorithm for train re-scheduling. The procedure consists in building the initial population around a pre-computed solution based on problem-related information available beforehand. The optimization is performed by adapting times of departure and arrival, as well as allocation of tracks, for each train at each station. This is achieved by a permutation-based evolutionary algorithm that relies on a semi-greedy heuristic scheduler to gradually reconstruct the schedule by inserting trains one after another. Experimental results are presented on various instances of a large real-world case involving around 500 trains and more than 1 million constraints. In terms of competition with commercial math ematical programming tool ILOG CPLEX, it appears that within a large class of instances, excluding trivial instances as well as too difficult ones, and with very few exceptions, a clever initialization turns an encouraging failure into a clear-cut success auguring of substantial financial savings.<|reference_end|>
arxiv
@article{semet2006on, title={On the Benefits of Inoculation, an Example in Train Scheduling}, author={Yann Semet (INRIA Futurs), Marc Schoenauer (INRIA Futurs)}, journal={Dans GECCO-2006 (2006)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611140}, primaryClass={cs.AI cs.NE} }
semet2006on
arxiv-675188
cs/0611141
A Generic Global Constraint based on MDDs
<|reference_start|>A Generic Global Constraint based on MDDs: The paper suggests the use of Multi-Valued Decision Diagrams (MDDs) as the supporting data structure for a generic global constraint. We give an algorithm for maintaining generalized arc consistency (GAC) on this constraint that amortizes the cost of the GAC computation over a root-to-terminal path in the search tree. The technique used is an extension of the GAC algorithm for the regular language constraint on finite length input. Our approach adds support for skipped variables, maintains the reduced property of the MDD dynamically and provides domain entailment detection. Finally we also show how to adapt the approach to constraint types that are closely related to MDDs, such as AOMDDs and Case DAGs.<|reference_end|>
arxiv
@article{tiedemann2006a, title={A Generic Global Constraint based on MDDs}, author={Peter Tiedemann, Henrik Reif Andersen and Rasmus Pagh}, journal={arXiv preprint arXiv:cs/0611141}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611141}, primaryClass={cs.AI} }
tiedemann2006a
arxiv-675189
cs/0611142
A Symbolic Intruder Model for Hash-Collision Attacks
<|reference_start|>A Symbolic Intruder Model for Hash-Collision Attacks: In the recent years, several practical methods have been published to compute collisions on some commonly used hash functions. In this paper we present a method to take into account, at the symbolic level, that an intruder actively attacking a protocol execution may use these collision algorithms in reasonable time during the attack. Our decision procedure relies on the reduction of constraint solving for an intruder exploiting the collision properties of hush functions to constraint solving for an intruder operating on words.<|reference_end|>
arxiv
@article{chevalier2006a, title={A Symbolic Intruder Model for Hash-Collision Attacks}, author={Yannick Chevalier (IRIT), Mounira Kourjieh (IRIT)}, journal={CSTVA'06, France (25/09/2006)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611142}, primaryClass={cs.CR} }
chevalier2006a
arxiv-675190
cs/0611143
An informational approach to the global optimization of expensive-to-evaluate functions
<|reference_start|>An informational approach to the global optimization of expensive-to-evaluate functions: In many global optimization problems motivated by engineering applications, the number of function evaluations is severely limited by time or cost. To ensure that each evaluation contributes to the localization of good candidates for the role of global minimizer, a sequential choice of evaluation points is usually carried out. In particular, when Kriging is used to interpolate past evaluations, the uncertainty associated with the lack of information on the function can be expressed and used to compute a number of criteria accounting for the interest of an additional evaluation at any given point. This paper introduces minimizer entropy as a new Kriging-based criterion for the sequential choice of points at which the function should be evaluated. Based on \emph{stepwise uncertainty reduction}, it accounts for the informational gain on the minimizer expected from a new evaluation. The criterion is approximated using conditional simulations of the Gaussian process model behind Kriging, and then inserted into an algorithm similar in spirit to the \emph{Efficient Global Optimization} (EGO) algorithm. An empirical comparison is carried out between our criterion and \emph{expected improvement}, one of the reference criteria in the literature. Experimental results indicate major evaluation savings over EGO. Finally, the method, which we call IAGO (for Informational Approach to Global Optimization) is extended to robust optimization problems, where both the factors to be tuned and the function evaluations are corrupted by noise.<|reference_end|>
arxiv
@article{villemonteix2006an, title={An informational approach to the global optimization of expensive-to-evaluate functions}, author={Julien Villemonteix, Emmanuel Vazquez, Eric Walter}, journal={arXiv preprint arXiv:cs/0611143}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611143}, primaryClass={cs.NA} }
villemonteix2006an
arxiv-675191
cs/0611144
Coding Improves the Optimal Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks: Two-Dimensional IID Mobility Models
<|reference_start|>Coding Improves the Optimal Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks: Two-Dimensional IID Mobility Models: In this paper, we investigate the delay-throughput trade-offs in mobile ad-hoc networks under two-dimensional i.i.d. mobility models. We consider two mobility time-scales: (i) Fast mobility where node mobility is at the same time-scale as data transmissions; (ii) Slow mobility where node mobility is assumed to occur at a much slower time-scale than data transmissions. Given a delay constraint $D,$ the main results are as follows: (1) For the two-dimensional i.i.d. mobility model with fast mobiles, the maximum throughput per source-destination (S-D) pair is shown to be $O(\sqrt{D/n}),$ where $n$ is the number of mobiles. (2) For the two-dimensional i.i.d. mobility model with slow mobiles, the maximum throughput per S-D pair is shown to be $O(\sqrt[3]{D/n}).$ (3) For each case, we propose a joint coding-scheduling algorithm to achieve the optimal delay-throughput trade-offs.<|reference_end|>
arxiv
@article{ying2006coding, title={Coding Improves the Optimal Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks: Two-Dimensional I.I.D. Mobility Models}, author={Lei Ying, Sichao Yang and R. Srikant}, journal={arXiv preprint arXiv:cs/0611144}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611144}, primaryClass={cs.NI cs.IT math.IT} }
ying2006coding
arxiv-675192
cs/0611145
A Unified View of TD Algorithms; Introducing Full-Gradient TD and Equi-Gradient Descent TD
<|reference_start|>A Unified View of TD Algorithms; Introducing Full-Gradient TD and Equi-Gradient Descent TD: This paper addresses the issue of policy evaluation in Markov Decision Processes, using linear function approximation. It provides a unified view of algorithms such as TD(lambda), LSTD(lambda), iLSTD, residual-gradient TD. It is asserted that they all consist in minimizing a gradient function and differ by the form of this function and their means of minimizing it. Two new schemes are introduced in that framework: Full-gradient TD which uses a generalization of the principle introduced in iLSTD, and EGD TD, which reduces the gradient by successive equi-gradient descents. These three algorithms form a new intermediate family with the interesting property of making much better use of the samples than TD while keeping a gradient descent scheme, which is useful for complexity issues and optimistic policy iteration.<|reference_end|>
arxiv
@article{loth2006a, title={A Unified View of TD Algorithms; Introducing Full-Gradient TD and Equi-Gradient Descent TD}, author={Manuel Loth (INRIA Futurs), Philippe Preux (INRIA Futurs)}, journal={Dans European Symposium on Artificial Neural Networks (2006)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611145}, primaryClass={cs.LG} }
loth2006a
arxiv-675193
cs/0611146
Linear-Codes-Based Lossless Joint Source-Channel Coding for Multiple-Access Channels
<|reference_start|>Linear-Codes-Based Lossless Joint Source-Channel Coding for Multiple-Access Channels: A general lossless joint source-channel coding (JSCC) scheme based on linear codes and random interleavers for multiple-access channels (MACs) is presented and then analyzed in this paper. By the information-spectrum approach and the code-spectrum approach, it is shown that a linear code with a good joint spectrum can be used to establish limit-approaching lossless JSCC schemes for correlated general sources and general MACs, where the joint spectrum is a generalization of the input-output weight distribution. Some properties of linear codes with good joint spectra are investigated. A formula on the "distance" property of linear codes with good joint spectra is derived, based on which, it is further proved that, the rate of any systematic codes with good joint spectra cannot be larger than the reciprocal of the corresponding alphabet cardinality, and any sparse generator matrices cannot yield linear codes with good joint spectra. The problem of designing arbitrary rate coding schemes is also discussed. A novel idea called "generalized puncturing" is proposed, which makes it possible that one good low-rate linear code is enough for the design of coding schemes with multiple rates. Finally, various coding problems of MACs are reviewed in a unified framework established by the code-spectrum approach, under which, criteria and candidates of good linear codes in terms of spectrum requirements for such problems are clearly presented.<|reference_end|>
arxiv
@article{yang2006linear-codes-based, title={Linear-Codes-Based Lossless Joint Source-Channel Coding for Multiple-Access Channels}, author={Shengtian Yang, Yan Chen, Peiliang Qiu}, journal={IEEE Trans. Inf. Theory 55 (2009) 1468-1486}, year={2006}, doi={10.1109/TIT.2009.2013009}, archivePrefix={arXiv}, eprint={cs/0611146}, primaryClass={cs.IT math.IT} }
yang2006linear-codes-based
arxiv-675194
cs/0611147
P is not equal to NP
<|reference_start|>P is not equal to NP: This submission has been withdrawn at the request of the author.<|reference_end|>
arxiv
@article{g2006p, title={P is not equal to NP}, author={Raju Renjit. G}, journal={arXiv preprint arXiv:cs/0611147}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611147}, primaryClass={cs.CC} }
g2006p
arxiv-675195
cs/0611148
Next Generation Language Resources using GRID
<|reference_start|>Next Generation Language Resources using GRID: This paper presents a case study concerning the challenges and requirements posed by next generation language resources, realized as an overall model of open, distributed and collaborative language infrastructure. If a sort of "new paradigm" is required, we think that the emerging and still evolving technology connected to Grid computing is a very interesting and suitable one for a concrete realization of this vision. Given the current limitations of Grid computing, it is very important to test the new environment on basic language analysis tools, in order to get the feeling of what are the potentialities and possible limitations connected to its use in NLP. For this reason, we have done some experiments on a module of Linguistic Miner, i.e. the extraction of linguistic patterns from restricted domain corpora.<|reference_end|>
arxiv
@article{calzolari2006next, title={Next Generation Language Resources using GRID}, author={Federico Calzolari, Eva Sassolini, Manuela Sassi, Sebastiana Cucurullo, Eugenio Picchi, Francesca Bertagna, Alessandro Enea, Monica Monachini, Claudia Soria, Nicoletta Calzolari}, journal={Language Resources and Evaluation Conference LREC 2006 proceedings pp.1858-1861, Genoa [Italy]}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611148}, primaryClass={cs.DC cs.CL} }
calzolari2006next
arxiv-675196
cs/0611149
Comparison between Networked Control System behaviour based on CAN and Switched Ethernet networks
<|reference_start|>Comparison between Networked Control System behaviour based on CAN and Switched Ethernet networks: The distributed control systems are more and more used in many industrial applications. These systems are often referred as "Networked control systems". The goal of this paper is to show the network influence on feedback control systems. Two networks are considered: Switched Ethernet network and CAN fieldbus. The first one represents the non deterministic network and second one represents the deterministic one. Several scenarii are studied to analyse the stability of system according to different network parameters (packets losses, congestion and frame priority). The Truetime simulator is used in this work.<|reference_end|>
arxiv
@article{brahimi2006comparison, title={Comparison between Networked Control System behaviour based on CAN and Switched Ethernet networks}, author={Belynda Brahimi (CRAN), Eric Rondeau (CRAN), Christophe Aubrun (CRAN)}, journal={2nd Workshop on Networked Control Systems : Tolerant to fault (23/11/2006) 7 pages}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611149}, primaryClass={cs.NI} }
brahimi2006comparison
arxiv-675197
cs/0611150
A Novel Bayesian Classifier using Copula Functions
<|reference_start|>A Novel Bayesian Classifier using Copula Functions: A useful method for representing Bayesian classifiers is through \emph{discriminant functions}. Here, using copula functions, we propose a new model for discriminants. This model provides a rich and generalized class of decision boundaries. These decision boundaries significantly boost the classification accuracy especially for high dimensional feature spaces. We strengthen our analysis through simulation results.<|reference_end|>
arxiv
@article{sathe2006a, title={A Novel Bayesian Classifier using Copula Functions}, author={Saket Sathe}, journal={arXiv preprint arXiv:cs/0611150}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611150}, primaryClass={cs.LG cs.AI cs.IR} }
sathe2006a
arxiv-675198
cs/0611151
Collaborative design : managing task interdependencies and multiple perspectives
<|reference_start|>Collaborative design : managing task interdependencies and multiple perspectives: This paper focuses on two characteristics of collaborative design with respect to cooperative work: the importance of work interdependencies linked to the nature of design problems; and the fundamental function of design cooperative work arrangement which is the confrontation and combination of perspectives. These two intrinsic characteristics of the design work stress specific cooperative processes: coordination processes in order to manage task interdependencies, establishment of common ground and negotiation mechanisms in order to manage the integration of multiple perspectives in design.<|reference_end|>
arxiv
@article{détienne2006collaborative, title={Collaborative design : managing task interdependencies and multiple perspectives}, author={Franc{c}oise D'etienne (INRIA)}, journal={Interacting With Computers 18, 1 (2006) 1-20}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611151}, primaryClass={cs.HC} }
détienne2006collaborative
arxiv-675199
cs/0611152
Viewpoints in co-design: a field study in concurrent engineering
<|reference_start|>Viewpoints in co-design: a field study in concurrent engineering: We present a field study aimed at analysing the use of viewpoints in co-design meetings. A viewpoint is a representation characterised by a certain combination of constraints. Three types of viewpoints are distinguished: prescribed viewpoint, discipline-specific viewpoint and integrated viewpoint. The contribution of our work consists in characterising the viewpoints of various stakeholders involved in co-design ("design office" disciplines, and production and maintenance disciplines), the dynamics of viewpoints confrontation and the cooperative modes that enable these different viewpoints to be integrated.<|reference_end|>
arxiv
@article{détienne2006viewpoints, title={Viewpoints in co-design: a field study in concurrent engineering}, author={Franc{c}oise D'etienne (INRIA), G'eraldine Martin, Elisabeth Lavigne}, journal={Design Studies 26, 3 (2005) 215-241}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611152}, primaryClass={cs.HC} }
détienne2006viewpoints
arxiv-675200
cs/0611153
Changing our view on design evaluation meetings methodology: a study of software technical review meetings
<|reference_start|>Changing our view on design evaluation meetings methodology: a study of software technical review meetings: By contrast to design meetings, design evaluation meetings (DEMs) have generally been considered as situations in which, according to DEMs methodologies, design activities are quite marginal. In a study of DEMs in software development, i.e. in technical review meetings following a particular review methodology, we showed: (i) the occurrence of design activities as part of an argumentation process; (ii) the relative importance of cognitive synchronisation as a prerequisite for evaluation; (iii) the important role played in evaluation by argumentation that makes explicit the underlying design rationale (DR). On the basis of our results, we discuss the potential for using DR methodologies in this kind of meetings.<|reference_end|>
arxiv
@article{d'astous2006changing, title={Changing our view on design evaluation meetings methodology: a study of software technical review meetings}, author={Patrick D'Astous, Franc{c}oise D'etienne (INRIA), Willemien Visser (INRIA), Pierre Robillard}, journal={Design Studies (2004)}, year={2006}, archivePrefix={arXiv}, eprint={cs/0611153}, primaryClass={cs.HC} }
d'astous2006changing