corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-677001 | nlin/0408040 | Notes on information geometry and evolutionary processes | <|reference_start|>Notes on information geometry and evolutionary processes: In order to analyze and extract different structural properties of distributions, one can introduce different coordinate systems over the manifold of distributions. In Evolutionary Computation, the Walsh bases and the Building Block Bases are often used to describe populations, which simplifies the analysis of evolutionary operators applying on populations. Quite independent from these approaches, information geometry has been developed as a geometric way to analyze different order dependencies between random variables (e.g., neural activations or genes). In these notes I briefly review the essentials of various coordinate bases and of information geometry. The goal is to give an overview and make the approaches comparable. Besides introducing meaningful coordinate bases, information geometry also offers an explicit way to distinguish different order interactions and it offers a geometric view on the manifold and thereby also on operators that apply on the manifold. For instance, uniform crossover can be interpreted as an orthogonal projection of a population along an m-geodesic, monotonously reducing the theta-coordinates that describe interactions between genes.<|reference_end|> | arxiv | @article{toussaint2004notes,
title={Notes on information geometry and evolutionary processes},
author={Marc Toussaint},
journal={arXiv preprint arXiv:nlin/0408040},
year={2004},
archivePrefix={arXiv},
eprint={nlin/0408040},
primaryClass={nlin.AO cs.NE}
} | toussaint2004notes |
arxiv-677002 | nlin/0409013 | Epistemic communities: description and hierarchic categorization | <|reference_start|>Epistemic communities: description and hierarchic categorization: Social scientists have shown an increasing interest in understanding the structure of knowledge communities, and particularly the organization of "epistemic communities", that is groups of agents sharing common knowledge concerns. However, most existing approaches are based only on either social relationships or semantic similarity, while there has been roughly no attempt to link social and semantic aspects. In this paper, we introduce a formal framework addressing this issue and propose a method based on Galois lattices (or concept lattices) for categorizing epistemic communities in an automated and hierarchically structured fashion. Suggesting that our process allows us to rebuild a whole community structure and taxonomy, and notably fields and subfields gathering a certain proportion of agents, we eventually apply it to empirical data to exhibit these alleged structural properties, and successfully compare our results with categories spontaneously given by domain experts.<|reference_end|> | arxiv | @article{roth2004epistemic,
title={Epistemic communities: description and hierarchic categorization},
author={Camille Roth (CREA), Paul Bourgine (CREA)},
journal={Mathematical Population Studies 12(2) (2005) 107-130},
year={2004},
doi={10.1080/08898480590931404},
archivePrefix={arXiv},
eprint={nlin/0409013},
primaryClass={nlin.AO cs.IR}
} | roth2004epistemic |
arxiv-677003 | nlin/0410062 | A method to discern complexity in two-dimensional patterns generated by coupled map lattices | <|reference_start|>A method to discern complexity in two-dimensional patterns generated by coupled map lattices: Complex patterns generated by the time evolution of a one-dimensional digitalized coupled map lattice are quantitatively analyzed. A method for discerning complexity among the different patterns is implemented. The quantitative results indicate two zones in parameter space where the dynamics shows the most complex patterns. These zones are located on the two edges of an absorbent region where the system displays spatio-temporal intermittency.<|reference_end|> | arxiv | @article{sanchez2004a,
title={A method to discern complexity in two-dimensional patterns generated by
coupled map lattices},
author={Juan Sanchez and Ricardo Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0410062},
year={2004},
doi={10.1016/j.physa.2005.02.058},
archivePrefix={arXiv},
eprint={nlin/0410062},
primaryClass={nlin.PS cond-mat.dis-nn cs.CG math.DS nlin.CD q-bio.QM}
} | sanchez2004a |
arxiv-677004 | nlin/0411063 | Detecting synchronization in spatially extended discrete systems by complexity measurements | <|reference_start|>Detecting synchronization in spatially extended discrete systems by complexity measurements: The synchronization of two stochastically coupled one-dimensional cellular automata (CA) is analyzed. It is shown that the transition to synchronization is characterized by a dramatic increase of the statistical complexity of the patterns generated by the difference automaton. This singular behavior is verified to be present in several CA rules displaying complex behavior.<|reference_end|> | arxiv | @article{sánchez2004detecting,
title={Detecting synchronization in spatially extended discrete systems by
complexity measurements},
author={Juan R. S'anchez and Ricardo L'opez-Ruiz},
journal={arXiv preprint arXiv:nlin/0411063},
year={2004},
archivePrefix={arXiv},
eprint={nlin/0411063},
primaryClass={nlin.CG cond-mat.dis-nn cs.MA math.DS nlin.PS q-bio.QM}
} | sánchez2004detecting |
arxiv-677005 | nlin/0411066 | Self-Organizing Traffic Lights | <|reference_start|>Self-Organizing Traffic Lights: Steering traffic in cities is a very complex task, since improving efficiency involves the coordination of many actors. Traditional approaches attempt to optimize traffic lights for a particular density and configuration of traffic. The disadvantage of this lies in the fact that traffic densities and configurations change constantly. Traffic seems to be an adaptation problem rather than an optimization problem. We propose a simple and feasible alternative, in which traffic lights self-organize to improve traffic flow. We use a multi-agent simulation to study three self-organizing methods, which are able to outperform traditional rigid and adaptive methods. Using simple rules and no direct communication, traffic lights are able to self-organize and adapt to changing traffic conditions, reducing waiting times, number of stopped cars, and increasing average speeds.<|reference_end|> | arxiv | @article{gershenson2004self-organizing,
title={Self-Organizing Traffic Lights},
author={Carlos Gershenson},
journal={Complex Systems 16(1): 29-53. 2005},
year={2004},
archivePrefix={arXiv},
eprint={nlin/0411066},
primaryClass={nlin.AO cond-mat.stat-mech cs.AI cs.MA}
} | gershenson2004self-organizing |
arxiv-677006 | nlin/0501018 | Return-Map Cryptanalysis Revisited | <|reference_start|>Return-Map Cryptanalysis Revisited: As a powerful cryptanalysis tool, the method of return-map attacks can be used to extract secret messages masked by chaos in secure communication schemes. Recently, a simple defensive mechanism was presented to enhance the security of chaotic parameter modulation schemes against return-map attacks. Two techniques are combined in the proposed defensive mechanism: multistep parameter modulation and alternative driving of two different transmitter variables. This paper re-studies the security of this proposed defensive mechanism against return-map attacks, and points out that the security was much over-estimated in the original publication for both ciphertext-only attack and known/chosen-plaintext attacks. It is found that a deterministic relationship exists between the shape of the return map and the modulated parameter, and that such a relationship can be used to dramatically enhance return-map attacks thereby making them quite easy to break the defensive mechanism.<|reference_end|> | arxiv | @article{li2005return-map,
title={Return-Map Cryptanalysis Revisited},
author={Shujun Li, Guanrong Chen and Gonzalo 'Alvarez},
journal={International Journal of Bifurcation and Chaos, vol. 16, no. 5,
pp. 1557-1568, 2006},
year={2005},
doi={10.1142/S0218127406015507},
archivePrefix={arXiv},
eprint={nlin/0501018},
primaryClass={nlin.CD cs.CR}
} | li2005return-map |
arxiv-677007 | nlin/0502027 | Two-body problem on a sphere Reduction, stochasticity, periodic orbits | <|reference_start|>Two-body problem on a sphere Reduction, stochasticity, periodic orbits: We consider the problem of two interacting particles on a sphere. The potential of the interaction depends on the distance between the particles. The case of Newtonian-type potentials is studied in most detail. We reduce this system to a system with two degrees of freedom and give a number of remarkable periodic orbits. We also discuss integrability and stochastization of the motion.<|reference_end|> | arxiv | @article{borisov2005two-body,
title={Two-body problem on a sphere. Reduction, stochasticity, periodic orbits},
author={A.V. Borisov, I.S. Mamaev, A.A. Kilin},
journal={Regular and Chaotic Dynamics, 2004 Volume 9 Number 3},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0502027},
primaryClass={nlin.CD cs.OH nlin.SI}
} | borisov2005two-body |
arxiv-677008 | nlin/0503024 | Galton Board | <|reference_start|>Galton Board: In this paper, we present results of simulations of a model of the Galton board for various degrees of elasticity of the ball-to-nail collision.<|reference_end|> | arxiv | @article{kozlov2005galton,
title={Galton Board},
author={V. V. Kozlov, M. Yu. Mitrofanova},
journal={Regular and Chaotic Dynamics, 2003 Volume 8 Number 4},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0503024},
primaryClass={nlin.CD cs.OH physics.class-ph}
} | kozlov2005galton |
arxiv-677009 | nlin/0504059 | Discrete Physics: a new way to look at cryptography | <|reference_start|>Discrete Physics: a new way to look at cryptography: This paper shows that Physics is very close to the substitution-diffusion paradigm of symmetric ciphers. Based on this analogy, we propose a new cryptographic algorithm. Statistical Physics gives design principles to devise fast, scalable and secure encryption systems. In particular, increasing space dimension and considering larger data blocks improve both speed and security, allowing us to reach high throughput (larger than 10Gb/s on dedicated HW). The physical approach enlarges the way to look at cryptography and is expected to bring new tools and concepts to better understand and quantify security aspects.<|reference_end|> | arxiv | @article{chopard2005discrete,
title={Discrete Physics: a new way to look at cryptography},
author={B.Chopard and S.Marconi},
journal={arXiv preprint arXiv:nlin/0504059},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0504059},
primaryClass={nlin.CG cs.CR}
} | chopard2005discrete |
arxiv-677010 | nlin/0505009 | A General Methodology for Designing Self-Organizing Systems | <|reference_start|>A General Methodology for Designing Self-Organizing Systems: Our technologies complexify our environments. Thus, new technologies need to deal with more and more complexity. Several efforts have been made to deal with this complexity using the concept of self-organization. However, in order to promote its use and understanding, we must first have a pragmatic understanding of complexity and self-organization. This paper presents a conceptual framework for speaking about self-organizing systems. The aim is to provide a methodology useful for designing and controlling systems developed to solve complex problems. First, practical notions of complexity and self-organization are given. Then, starting from the agent metaphor, a conceptual framework is presented. This provides formal ways of speaking about "satisfaction" of elements and systems. The main premise of the methodology claims that reducing the "friction" or "interference" of interactions between elements of a system will result in a higher "satisfaction" of the system, i.e. better performance. The methodology discusses different ways in which this can be achieved. A case study on self-organizing traffic lights illustrates the ideas presented in the paper.<|reference_end|> | arxiv | @article{gershenson2005a,
title={A General Methodology for Designing Self-Organizing Systems},
author={Carlos Gershenson},
journal={arXiv preprint arXiv:nlin/0505009},
year={2005},
number={ECCO working paper 2005-05},
archivePrefix={arXiv},
eprint={nlin/0505009},
primaryClass={nlin.AO cs.GL cs.SE physics.soc-ph}
} | gershenson2005a |
arxiv-677011 | nlin/0505043 | A network analysis of committees in the United States House of Representatives | <|reference_start|>A network analysis of committees in the United States House of Representatives: Network theory provides a powerful tool for the representation and analysis of complex systems of interacting agents. Here we investigate the United States House of Representatives network of committees and subcommittees, with committees connected according to ``interlocks'' or common membership. Analysis of this network reveals clearly the strong links between different committees, as well as the intrinsic hierarchical structure within the House as a whole. We show that network theory, combined with the analysis of roll call votes using singular value decomposition, successfully uncovers political and organizational correlations between committees in the House without the need to incorporate other political information.<|reference_end|> | arxiv | @article{porter2005a,
title={A network analysis of committees in the United States House of
Representatives},
author={Mason A. Porter, Peter J. Mucha, M.E.J. Newman, and Casey M. Warmbrand},
journal={PNAS, Vol. 102, No. 20: 7057-7062 (2005)},
year={2005},
doi={10.1073/pnas.0500191102},
archivePrefix={arXiv},
eprint={nlin/0505043},
primaryClass={nlin.AO cs.MA math.ST physics.data-an physics.soc-ph stat.TH}
} | porter2005a |
arxiv-677012 | nlin/0506030 | Chaos in computer performance | <|reference_start|>Chaos in computer performance: Modern computer microprocessors are composed of hundreds of millions of transistors that interact through intricate protocols. Their performance during program execution may be highly variable and present aperiodic oscillations. In this paper, we apply current nonlinear time series analysis techniques to the performances of modern microprocessors during the execution of prototypical programs. Our results present pieces of evidence strongly supporting that the high variability of the performance dynamics during the execution of several programs display low-dimensional deterministic chaos, with sensitivity to initial conditions comparable to textbook models. Taken together, these results show that the instantaneous performances of modern microprocessors constitute a complex (or at least complicated) system and would benefit from analysis with modern tools of nonlinear and complexity science.<|reference_end|> | arxiv | @article{berry2005chaos,
title={Chaos in computer performance},
author={Hugues Berry (ALCHEMY), Daniel Gracia P'erez (ALCHEMY), Olivier Temam
(ALCHEMY)},
journal={arXiv preprint arXiv:nlin/0506030},
year={2005},
doi={10.1063/1.2159147},
archivePrefix={arXiv},
eprint={nlin/0506030},
primaryClass={nlin.AO cond-mat.dis-nn cond-mat.stat-mech cs.AR nlin.CD}
} | berry2005chaos |
arxiv-677013 | nlin/0506061 | Transmitting a signal by amplitude modulation in a chaotic network | <|reference_start|>Transmitting a signal by amplitude modulation in a chaotic network: We discuss the ability of a network with non linear relays and chaotic dynamics to transmit signals, on the basis of a linear response theory developed by Ruelle \cite{Ruelle} for dissipative systems. We show in particular how the dynamics interfere with the graph topology to produce an effective transmission network, whose topology depends on the signal, and cannot be directly read on the ``wired'' network. This leads one to reconsider notions such as ``hubs''. Then, we show examples where, with a suitable choice of the carrier frequency (resonance), one can transmit a signal from a node to another one by amplitude modulation, \textit{in spite of chaos}. Also, we give an example where a signal, transmitted to any node via different paths, can only be recovered by a couple of \textit{specific} nodes. This opens the possibility for encoding data in a way such that the recovery of the signal requires the knowledge of the carrier frequency \textit{and} can be performed only at some specific node.<|reference_end|> | arxiv | @article{cessac2005transmitting,
title={Transmitting a signal by amplitude modulation in a chaotic network},
author={B. Cessac, J.A. Sepulchre},
journal={Chaos, 16, 013104 (2006).},
year={2005},
doi={10.1063/1.2126813},
archivePrefix={arXiv},
eprint={nlin/0506061},
primaryClass={nlin.CD cond-mat.stat-mech cs.NE}
} | cessac2005transmitting |
arxiv-677014 | nlin/0507021 | Measuring Generalized Preferential Attachment in Dynamic Social Networks | <|reference_start|>Measuring Generalized Preferential Attachment in Dynamic Social Networks: The mechanism of preferential attachment underpins most recent social network formation models. Yet few authors attempt to check or quantify assumptions on this mechanism. We call generalized preferential attachment any kind of preference to interact with other agents with respect to any node property. We then introduce tools for measuring empirically and characterizing comprehensively such phenomena, and apply these tools to a socio-semantic network of scientific collaborations, investigating in particular homophilic behavior. This opens the way to a whole class of realistic and credible social network morphogenesis models.<|reference_end|> | arxiv | @article{roth2005measuring,
title={Measuring Generalized Preferential Attachment in Dynamic Social Networks},
author={Camille Roth (CREA)},
journal={arXiv preprint arXiv:nlin/0507021},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0507021},
primaryClass={nlin.AO cond-mat.dis-nn cs.DM cs.NI physics.soc-ph}
} | roth2005measuring |
arxiv-677015 | nlin/0508006 | Metamimetic Games : Modeling Metadynamics in Social Cognition | <|reference_start|>Metamimetic Games : Modeling Metadynamics in Social Cognition: Imitation is fundamental in the understanding of social system dynamics. But the diversity of imitation rules employed by modelers proves that the modeling of mimetic processes cannot avoid the traditional problem of endogenization of all the choices, including the one of the mimetic rules. Starting from the remark that human reflexive capacities are the ground for a new class of mimetic rules, I propose a formal framework, metamimetic games, that enable to endogenize the distribution of imitation rules while being human specific. The corresponding concepts of equilibrium - counterfactually stable state - and attractor are introduced. Finally, I give an interpretation of social differentiation in terms of cultural co-evolution among a set of possible motivations, which departs from the traditional view of optimization indexed to criteria that exist prior to the activity of agents.<|reference_end|> | arxiv | @article{chavalarias2005metamimetic,
title={Metamimetic Games : Modeling Metadynamics in Social Cognition},
author={David Chavalarias (CREA)},
journal={Journal of Artificial Societies and Social Simulation vol. 9, no.
2 (2006) 32 p},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0508006},
primaryClass={nlin.AO cs.MA nlin.CG}
} | chavalarias2005metamimetic |
arxiv-677016 | nlin/0509007 | Lattices for Dynamic, Hierarchic & Overlapping Categorization: the Case of Epistemic Communities | <|reference_start|>Lattices for Dynamic, Hierarchic & Overlapping Categorization: the Case of Epistemic Communities: We present a method for hierarchic categorization and taxonomy evolution description. We focus on the structure of epistemic communities (ECs), or groups of agents sharing common knowledge concerns. Introducing a formal framework based on Galois lattices, we categorize ECs in an automated and hierarchically structured way and propose criteria for selecting the most relevant epistemic communities - for instance, ECs gathering a certain proportion of agents and thus prototypical of major fields. This process produces a manageable, insightful taxonomy of the community. Then, the longitudinal study of these static pictures makes possible an historical description. In particular, we capture stylized facts such as field progress, decline, specialization, interaction (merging or splitting), and paradigm emergence. The detection of such patterns in social networks could fruitfully be applied to other contexts.<|reference_end|> | arxiv | @article{roth2005lattices,
title={Lattices for Dynamic, Hierarchic & Overlapping Categorization: the Case
of Epistemic Communities},
author={Camille Roth (CREA), Paul Bourgine (CREA)},
journal={arXiv preprint arXiv:nlin/0509007},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0509007},
primaryClass={nlin.AO cs.AI cs.DL cs.IR}
} | roth2005lattices |
arxiv-677017 | nlin/0511015 | Combinatorial Approach to Object Analysis | <|reference_start|>Combinatorial Approach to Object Analysis: We present a perceptional mathematical model for image and signal analysis. A resemblance measure is defined, and submitted to an innovating combinatorial optimization algorithm. Numerical Simulations are also presented<|reference_end|> | arxiv | @article{kanhouche2005combinatorial,
title={Combinatorial Approach to Object Analysis},
author={Rami Kanhouche (CMLA)},
journal={arXiv preprint arXiv:nlin/0511015},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0511015},
primaryClass={nlin.AO cs.LG}
} | kanhouche2005combinatorial |
arxiv-677018 | nlin/0511018 | The Role of Redundancy in the Robustness of Random Boolean Networks | <|reference_start|>The Role of Redundancy in the Robustness of Random Boolean Networks: Evolution depends on the possibility of successfully exploring fitness landscapes via mutation and recombination. With these search procedures, exploration is difficult in "rugged" fitness landscapes, where small mutations can drastically change functionalities in an organism. Random Boolean networks (RBNs), being general models, can be used to explore theories of how evolution can take place in rugged landscapes; or even change the landscapes. In this paper, we study the effect that redundant nodes have on the robustness of RBNs. Using computer simulations, we have found that the addition of redundant nodes to RBNs increases their robustness. We conjecture that redundancy is a way of "smoothening" fitness landscapes. Therefore, redundancy can facilitate evolutionary searches. However, too much redundancy could reduce the rate of adaptation of an evolutionary process. Our results also provide supporting evidence in favour of Kauffman's conjecture (Kauffman, 2000, p.195).<|reference_end|> | arxiv | @article{gershenson2005the,
title={The Role of Redundancy in the Robustness of Random Boolean Networks},
author={Carlos Gershenson, Stuart A. Kauffman, and Ilya Shmulevich},
journal={arXiv preprint arXiv:nlin/0511018},
year={2005},
number={ECCO Working Paper 2005-08},
archivePrefix={arXiv},
eprint={nlin/0511018},
primaryClass={nlin.AO cond-mat.stat-mech cs.CC nlin.CG physics.bio-ph q-bio.MN q-bio.QM}
} | gershenson2005the |
arxiv-677019 | nlin/0511061 | Self-synchronization of Cellular Automata: an attempt to control patterns | <|reference_start|>Self-synchronization of Cellular Automata: an attempt to control patterns: The searching for the stable patterns in the evolution of cellular automata is implemented using stochastic synchronization between the present structures of the system and its precedent configurations. For most of the known evolution rules with complex behavior a dynamic competition among all the possible stable patterns is established and no stationary regime is reached. For the particular rule coded by the decimal number 18, a self-synchronization phenomenon can be obtained, even when strong modifications to the synchronization method are applied.<|reference_end|> | arxiv | @article{sanchez2005self-synchronization,
title={Self-synchronization of Cellular Automata: an attempt to control
patterns},
author={J.R. Sanchez and R. Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0511061},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0511061},
primaryClass={nlin.CG cond-mat.dis-nn cs.DM math.DS}
} | sanchez2005self-synchronization |
arxiv-677020 | nlin/0511072 | Connectivity and Cost Trade-offs in Multihop Wireless Networks | <|reference_start|>Connectivity and Cost Trade-offs in Multihop Wireless Networks: Ad-hoc wireless networks are of increasing importance in communication and are frequently constrained by energy use. Here we propose a distributed, non-hierarchical adaptive method using preferential detachment for adjusting node transmission power to reduce overall power consumption without violating network load limitations. We derive a cost and path length trade-off diagram that establishes the bounds of effectiveness of the adaptive strategy and compare it with uniform node transmission strategy for several node topologies. We achieve cost savings as high as 90% for specific topologies.<|reference_end|> | arxiv | @article{lim2005connectivity,
title={Connectivity and Cost Trade-offs in Multihop Wireless Networks},
author={May Lim, Dan Braha, Sanith Wijesinghe, Stephenson Tucker, Yaneer
Bar-Yam},
journal={arXiv preprint arXiv:nlin/0511072},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0511072},
primaryClass={nlin.AO cs.NI}
} | lim2005connectivity |
arxiv-677021 | nlin/0512015 | A Predictive Theory of Games | <|reference_start|>A Predictive Theory of Games: Conventional noncooperative game theory hypothesizes that the joint strategy of a set of players in a game must satisfy an "equilibrium concept". All other joint strategies are considered impossible; the only issue is what equilibrium concept is "correct". This hypothesis violates the desiderata underlying probability theory. Indeed, probability theory renders moot the problem of what equilibrium concept is correct - every joint strategy can arise with non-zero probability. Rather than a first-principles derivation of an equilibrium concept, game theory requires a first-principles derivation of a distribution over joint (mixed) strategies. This paper shows how information theory can provide such a distribution over joint strategies. If a scientist external to the game wants to distill such a distribution to a point prediction, that prediction should be set by decision theory, using their (!) loss function. So the predicted joint strategy - the "equilibrium concept" - varies with the external scientist's loss function. It is shown here that in many games, having a probability distribution with support restricted to Nash equilibria - as stipulated by conventional game theory - is impossible. It is also show how to: i) Derive an information-theoretic quantification of a player's degree of rationality; ii) Derive bounded rationality as a cost of computation; iii) Elaborate the close formal relationship between game theory and statistical physics; iv) Use this relationship to extend game theory to allow stochastically varying numbers of players.<|reference_end|> | arxiv | @article{wolpert2005a,
title={A Predictive Theory of Games},
author={David H. Wolpert},
journal={arXiv preprint arXiv:nlin/0512015},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0512015},
primaryClass={nlin.AO cond-mat.stat-mech cs.GT math.GM math.PR}
} | wolpert2005a |
arxiv-677022 | nlin/0512017 | Symmetry pattern transition in cellular automata with complex behavior | <|reference_start|>Symmetry pattern transition in cellular automata with complex behavior: A transition from asymmetric to symmetric patterns in time-dependent extended systems is described. It is found that one dimensional cellular automata, started from fully random initial conditions, can be forced to evolve into complex symmetrical patterns by stochastically coupling a proportion $p$ of pairs of sites located at equal distance from the center of the lattice. A nontrivial critical value of $p$ must be surpassed in order to obtain symmetrical patterns during the evolution. This strategy is able to classify the cellular automata rules -with complex behavior- between those that support time-dependent symmetric patterns and those which do not support such kind of patterns.<|reference_end|> | arxiv | @article{sanchez2005symmetry,
title={Symmetry pattern transition in cellular automata with complex behavior},
author={J.R. Sanchez and R. Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0512017},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0512017},
primaryClass={nlin.CG cond-mat.dis-nn cs.DM math.DS}
} | sanchez2005symmetry |
arxiv-677023 | nlin/0512048 | Modeling Endogenous Social Networks: the Example of Emergence and Stability of Cooperation without Refusal | <|reference_start|>Modeling Endogenous Social Networks: the Example of Emergence and Stability of Cooperation without Refusal: Aggregated phenomena in social sciences and economics are highly dependent on the way individuals interact. To help understanding the interplay between socio-economic activities and underlying social networks, this paper studies a sequential prisoner's dilemma with binary choice. It proposes an analytical and computational insight about the role of endogenous networks in emergence and sustainability of cooperation and exhibits an alternative to the choice and refusal mechanism that is often proposed to explain cooperation. The study focuses on heterogeneous equilibriums and emergence of cooperation from an all-defector state that are the two stylized facts that this model successfully reconstructs.<|reference_end|> | arxiv | @article{chavalarias2005modeling,
title={Modeling Endogenous Social Networks: the Example of Emergence and
Stability of Cooperation without Refusal},
author={David Chavalarias (CREA)},
journal={arXiv preprint arXiv:nlin/0512048},
year={2005},
archivePrefix={arXiv},
eprint={nlin/0512048},
primaryClass={nlin.AO cond-mat.other cs.GT cs.MA cs.OH q-bio.OT q-bio.PE}
} | chavalarias2005modeling |
arxiv-677024 | nlin/0603076 | Approximating the Amplitude and Form of Limit Cycles in the Weakly Nonlinear Regime of Lienard Systems | <|reference_start|>Approximating the Amplitude and Form of Limit Cycles in the Weakly Nonlinear Regime of Lienard Systems: Li\'{e}nard equations, $\ddot{x}+\epsilon f(x)\dot{x}+x=0$, with $f(x)$ an even continuous function are considered. In the weakly nonlinear regime ($\epsilon\to 0$), the number and an order zero in $\epsilon$ approximation of the amplitude of limit cycles present in this type of systems can be obtained by applying a methodology recently proposed by the authors [L\'opez-Ruiz R, L\'opez JL. Bifurcation curves of limit cycles in some Li\'enard systems. Int J Bifurcat Chaos 2000; 10:971-980]. In the present work, that method is carried forward to higher orders in $\epsilon$ and is embedded in a general recursive algorithm capable to approximate the form of the limit cycles and to correct their amplitudes as an expansion in powers of $\epsilon$. Several examples showing the application of this scheme are given.<|reference_end|> | arxiv | @article{lopez2006approximating,
title={Approximating the Amplitude and Form of Limit Cycles in the Weakly
Nonlinear Regime of Lienard Systems},
author={Jose-Luis Lopez and Ricardo Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0603076},
year={2006},
doi={10.1016/j.chaos.2006.04.031},
archivePrefix={arXiv},
eprint={nlin/0603076},
primaryClass={nlin.AO cs.DM math.DS}
} | lopez2006approximating |
arxiv-677025 | nlin/0605009 | Sufficient set of integrability conditions of an orthonomic system | <|reference_start|>Sufficient set of integrability conditions of an orthonomic system: Every orthonomic system of partial differential equations is known to possess a finite number of integrability conditions sufficient to ensure the validity of all. Herewith we offer an efficient algorithm to construct a sufficient set of integrability conditions free of redundancies.<|reference_end|> | arxiv | @article{marvan2006sufficient,
title={Sufficient set of integrability conditions of an orthonomic system},
author={M. Marvan},
journal={Found. Comput. Math. 9 (2009) 651-674},
year={2006},
doi={10.1007/s10208-008-9039-8},
archivePrefix={arXiv},
eprint={nlin/0605009},
primaryClass={nlin.SI cs.SC}
} | marvan2006sufficient |
arxiv-677026 | nlin/0605025 | The Limit Cycles of Lienard Equations in the Weakly Nonlinear Regime | <|reference_start|>The Limit Cycles of Lienard Equations in the Weakly Nonlinear Regime: Li\'enard equations of the form $\ddot{x}+\epsilon f(x)\dot{x}+x=0$, with $f(x)$ an even function, are considered in the weakly nonlinear regime ($\epsilon\to 0$). A perturbative algorithm for obtaining the number, amplitude and shape of the limit cycles of these systems is given. The validity of this algorithm is shown and several examples illustrating its application are given. In particular, an ${\mathcal O}(\epsilon^8)$ approximation for the amplitude of the van der Pol limit cycle is explicitly obtained.<|reference_end|> | arxiv | @article{lopez2006the,
title={The Limit Cycles of Lienard Equations in the Weakly Nonlinear Regime},
author={Jose-Luis Lopez and Ricardo Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0605025},
year={2006},
archivePrefix={arXiv},
eprint={nlin/0605025},
primaryClass={nlin.AO cs.DM math.DS}
} | lopez2006the |
arxiv-677027 | nlin/0605029 | Three Logistic Models for the Ecological and Economic Interactions: Symbiosis, Predator-Prey and Competition | <|reference_start|>Three Logistic Models for the Ecological and Economic Interactions: Symbiosis, Predator-Prey and Competition: If one isolated species (corporation) is supposed to evolve following the logistic mapping, then we are tempted to think that the dynamics of two species (corporations) can be expressed by a coupled system of two discrete logistic equations. As three basic relationships between two species are present in Nature, namely symbiosis, predator-prey and competition, three different models are obtained. Each model is a cubic two-dimensional discrete logistic-type equation with its own dynamical properties: stationary regime, periodicity, quasi-periodicity and chaos. We also propose that these models could be useful for thinking in the different interactions happening in the economic world, as for instance for the competition and the collaboration between corporations. Furthermore, these models could be considered as the basic ingredients to construct more complex interactions in the ecological and economic networks.<|reference_end|> | arxiv | @article{lopez-ruiz2006three,
title={Three Logistic Models for the Ecological and Economic Interactions:
Symbiosis, Predator-Prey and Competition},
author={Ricardo Lopez-Ruiz and Daniele Fournier-Prunaret},
journal={arXiv preprint arXiv:nlin/0605029},
year={2006},
archivePrefix={arXiv},
eprint={nlin/0605029},
primaryClass={nlin.AO cs.MA math.DS}
} | lopez-ruiz2006three |
arxiv-677028 | nlin/0608020 | Cryptanalysis of a chaotic block cipher with external key and its improved version | <|reference_start|>Cryptanalysis of a chaotic block cipher with external key and its improved version: Recently, Pareek et al. proposed a symmetric key block cipher using multiple one-dimensional chaotic maps. This paper reports some new findings on the security problems of this kind of chaotic cipher: 1) a number of weak keys exists; 2) some important intermediate data of the cipher are not sufficiently random; 3) the whole secret key can be broken by a known-plaintext attack with only 120 consecutive known plain-bytes in one known plaintext. In addition, it is pointed out that an improved version of the chaotic cipher proposed by Wei et al. still suffers from all the same security defects.<|reference_end|> | arxiv | @article{li2006cryptanalysis,
title={Cryptanalysis of a chaotic block cipher with external key and its
improved version},
author={Chengqing Li, Shujun Li, Gonzalo 'Alvarez, Guanrong Chen and
Kwok-Tung Lo},
journal={Chaos, Solitons & Fractals, vol. 37, no. 1, pp. 299-307, 2008},
year={2006},
doi={10.1016/j.chaos.2006.08.025},
archivePrefix={arXiv},
eprint={nlin/0608020},
primaryClass={nlin.CD cs.CR}
} | li2006cryptanalysis |
arxiv-677029 | nlin/0609033 | Fame Emerges as a Result of Small Memory | <|reference_start|>Fame Emerges as a Result of Small Memory: A dynamic memory model is proposed in which an agent ``learns'' a new agent by means of recommendation. The agents can also ``remember'' and ``forget''. The memory size is decreased while the population size is kept constant. ``Fame'' emerged as a few agents become very well known in expense of the majority being completely forgotten. The minimum and the maximum of fame change linearly with the relative memory size. The network properties of the who-knows-who graph, which represents the state of the system, are investigated.<|reference_end|> | arxiv | @article{bingol2006fame,
title={Fame Emerges as a Result of Small Memory},
author={Haluk Bingol},
journal={Physical Review E 77, 036118, 2008},
year={2006},
doi={10.1103/PhysRevE.77.036118},
archivePrefix={arXiv},
eprint={nlin/0609033},
primaryClass={nlin.AO cs.CY cs.MA physics.soc-ph}
} | bingol2006fame |
arxiv-677030 | nlin/0609038 | From Neuron to Neural Networks dynamics | <|reference_start|>From Neuron to Neural Networks dynamics: This paper presents an overview of some techniques and concepts coming from dynamical system theory and used for the analysis of dynamical neural networks models. In a first section, we describe the dynamics of the neuron, starting from the Hodgkin-Huxley description, which is somehow the canonical description for the ``biological neuron''. We discuss some models reducing the Hodgkin-Huxley model to a two dimensional dynamical system, keeping one of the main feature of the neuron: its excitability. We present then examples of phase diagram and bifurcation analysis for the Hodgin-Huxley equations. Finally, we end this section by a dynamical system analysis for the nervous flux propagation along the axon. We then consider neuron couplings, with a brief description of synapses, synaptic plasticiy and learning, in a second section. We also briefly discuss the delicate issue of causal action from one neuron to another when complex feedback effects and non linear dynamics are involved. The third section presents the limit of weak coupling and the use of normal forms technics to handle this situation. We consider then several examples of recurrent models with different type of synaptic interactions (symmetric, cooperative, random). We introduce various techniques coming from statistical physics and dynamical systems theory. A last section is devoted to a detailed example of recurrent model where we go in deep in the analysis of the dynamics and discuss the effect of learning on the neuron dynamics. We also present recent methods allowing the analysis of the non linear effects of the neural dynamics on signal propagation and causal action. An appendix, presenting the main notions of dynamical systems theory useful for the comprehension of the chapter, has been added for the convenience of the reader.<|reference_end|> | arxiv | @article{cessac2006from,
title={From Neuron to Neural Networks dynamics},
author={B. Cessac, M. Samuelides},
journal={EPJ Special Topics "Topics in Dynamical Neural Networks : From
Large Scale Neural Networks to Motor Control and Vision", Vol. 142, Num. 1,
7-88, (2007).},
year={2006},
archivePrefix={arXiv},
eprint={nlin/0609038},
primaryClass={nlin.AO cond-mat.dis-nn cs.NE}
} | cessac2006from |
arxiv-677031 | nlin/0609042 | A Formal Treatment of Generalized Preferential Attachment and its Empirical Validation | <|reference_start|>A Formal Treatment of Generalized Preferential Attachment and its Empirical Validation: Generalized preferential attachment is defined as the tendency of a vertex to acquire new links in the future with respect to a particular vertex property. Understanding which properties influence link acquisition tendency (LAT) gives us a predictive power to estimate the future growth of network and insight about the actual dynamics governing the complex networks. In this study, we explore the effect of age and degree on LAT by analyzing data collected from a new complex-network growth dataset. We found that LAT and degree of a vertex are linearly correlated in accordance with previous studies. Interestingly, the relation between LAT and age of a vertex is found to be in conflict with the known models of network growth. We identified three different periods in the network's lifetime where the relation between age and LAT is strongly positive, almost stationary and negative correspondingly.<|reference_end|> | arxiv | @article{herdagdelen2006a,
title={A Formal Treatment of Generalized Preferential Attachment and its
Empirical Validation},
author={Amac Herdagdelen, Eser Aygun, Haluk Bingol},
journal={EPL 78 No 6 (June 2007) 60007},
year={2006},
doi={10.1209/0295-5075/78/60007},
archivePrefix={arXiv},
eprint={nlin/0609042},
primaryClass={nlin.AO cond-mat.stat-mech cs.CY physics.data-an}
} | herdagdelen2006a |
arxiv-677032 | nlin/0610040 | Self-organizing traffic lights: A realistic simulation | <|reference_start|>Self-organizing traffic lights: A realistic simulation: We have previously shown in an abstract simulation (Gershenson, 2005) that self-organizing traffic lights can improve greatly traffic flow for any density. In this paper, we extend these results to a realistic setting, implementing self-organizing traffic lights in an advanced traffic simulator using real data from a Brussels avenue. On average, for different traffic densities, travel waiting times are reduced by 50% compared to the current green wave method.<|reference_end|> | arxiv | @article{cools2006self-organizing,
title={Self-organizing traffic lights: A realistic simulation},
author={Seung-Bae Cools, Carlos Gershenson, and Bart D'Hooghe (Vrije
Universiteit Brussel)},
journal={In Prokopenko, M. (Ed.). Self-Organization: Applied Multi-Agent
Systems, Chapter 3, pp. 41-49. Springer, London. 2007},
year={2006},
doi={10.1007/978-1-84628-982-8_3},
archivePrefix={arXiv},
eprint={nlin/0610040},
primaryClass={nlin.AO cond-mat.stat-mech cs.AI physics.comp-ph physics.soc-ph}
} | cools2006self-organizing |
arxiv-677033 | nlin/0611044 | Why the Maxwellian Distribution is the Attractive Fixed Point of the Boltzmann Equation | <|reference_start|>Why the Maxwellian Distribution is the Attractive Fixed Point of the Boltzmann Equation: The origin of the Boltzmann factor is revisited. An alternative derivation from the microcanonical picture is given. The Maxwellian distribution in a mono-dimensional ideal gas is obtained by following this insight. Other possible applications, as for instance the obtaining of the wealth distribution in the human society, are suggested in the remarks.<|reference_end|> | arxiv | @article{lopez-ruiz2006why,
title={Why the Maxwellian Distribution is the Attractive Fixed Point of the
Boltzmann Equation},
author={Ricardo Lopez-Ruiz and Xavier Calbet},
journal={arXiv preprint arXiv:nlin/0611044},
year={2006},
archivePrefix={arXiv},
eprint={nlin/0611044},
primaryClass={nlin.CD cond-mat.stat-mech cs.MA math.ST physics.class-ph stat.TH}
} | lopez-ruiz2006why |
arxiv-677034 | nlin/0611054 | A Model of a Trust-based Recommendation System on a Social Network | <|reference_start|>A Model of a Trust-based Recommendation System on a Social Network: In this paper, we present a model of a trust-based recommendation system on a social network. The idea of the model is that agents use their social network to reach information and their trust relationships to filter it. We investigate how the dynamics of trust among agents affect the performance of the system by comparing it to a frequency-based recommendation system. Furthermore, we identify the impact of network density, preference heterogeneity among agents, and knowledge sparseness to be crucial factors for the performance of the system. The system self-organises in a state with performance near to the optimum; the performance on the global level is an emergent property of the system, achieved without explicit coordination from the local interactions of agents.<|reference_end|> | arxiv | @article{walter2006a,
title={A Model of a Trust-based Recommendation System on a Social Network},
author={Frank E. Walter, Stefano Battiston, Frank Schweitzer},
journal={Journal of Autonomous Agents and Multi-Agent Systems, vol. 16, no.
1 (2008), pp. 57-74},
year={2006},
doi={10.1007/s10458-007-9021-x},
archivePrefix={arXiv},
eprint={nlin/0611054},
primaryClass={nlin.AO cs.IR physics.soc-ph}
} | walter2006a |
arxiv-677035 | nlin/0702001 | Bistability: a common feature in some "aggregates" of logistic maps | <|reference_start|>Bistability: a common feature in some "aggregates" of logistic maps: As it was argued by Anderson [Science 177, 393 (1972)], the "reductionist" hypothesis does not by any means imply a "constructionist" one. Hence, in general, the behavior of large and complex aggregates of elementary components can not be understood nor extrapolated from the properties of a few components. Following this insight, we have simulated different "aggregates" of logistic maps according to a particular coupling scheme. All these aggregates show a similar pattern of dynamical properties, concretely a bistable behavior, that is also found in a network of many units of the same type, independently of the number of components and of the interconnection topology. A qualitative relationship with brain-like systems is suggested.<|reference_end|> | arxiv | @article{lopez-ruiz2007bistability:,
title={Bistability: a common feature in some "aggregates" of logistic maps},
author={Ricardo Lopez-Ruiz and Daniele Fournier-Prunaret},
journal={arXiv preprint arXiv:nlin/0702001},
year={2007},
archivePrefix={arXiv},
eprint={nlin/0702001},
primaryClass={nlin.AO cs.NE}
} | lopez-ruiz2007bistability: |
arxiv-677036 | nlin/0703036 | Statistical User Model for the Internet Access | <|reference_start|>Statistical User Model for the Internet Access: A new statistical based model approach to characterize a user's behavior in an Internet access link is presented. The real patterns of Internet traffic in a heterogeneous Campus Network are studied. We find three clearly different patterns of individual user's behavior, study their common features and group particular users behaving alike in three clusters. This allows us to build a probabilistic mixture model, that can explain the expected global behavior for the three different types of users. We discuss the implications of this emergent phenomenology in the field of multi-agent complex systems.<|reference_end|> | arxiv | @article{pellicer-lostao2007statistical,
title={Statistical User Model for the Internet Access},
author={Carmen Pellicer-Lostao, Daniel Morato and Ricardo Lopez-Ruiz},
journal={arXiv preprint arXiv:nlin/0703036},
year={2007},
archivePrefix={arXiv},
eprint={nlin/0703036},
primaryClass={nlin.AO cond-mat.stat-mech cs.MA cs.NI}
} | pellicer-lostao2007statistical |
arxiv-677037 | nlin/0703050 | Competition of Self-Organized Rotating Spiral Autowaves in a Nonequilibrium Dissipative System of Three-Level Phaser | <|reference_start|>Competition of Self-Organized Rotating Spiral Autowaves in a Nonequilibrium Dissipative System of Three-Level Phaser: We present results of cellular automata based investigations of rotating spiral autowaves in a nonequilibrium excitable medium which models three-level paramagnetic microwave phonon laser (phaser). The computational model is described in arXiv:cond-mat/0410460v2 and arXiv:cond-mat/0602345v1 . We have observed several new scenarios of self-organization, competition and dynamical stabilization of rotating spiral autowaves under conditions of cross-relaxation between three-level active centers. In particular, phenomena of inversion of topological charge, as well as processes of regeneration and replication of rotating spiral autowaves in various excitable media were revealed and visualized for mesoscopic-scale areas of phaser-type active systems, which model real phaser devices.<|reference_end|> | arxiv | @article{makovetskii2007competition,
title={Competition of Self-Organized Rotating Spiral Autowaves in a
Nonequilibrium Dissipative System of Three-Level Phaser},
author={D. N. Makovetskii},
journal={Proc. 6-th Intl. Kharkov Symp. "Physics and Engineering of
Microwaves, Millimeter and Submillimeter Waves (MSMW'2007)", Kharkov, June
25-30, 2007. Vol.2. Report VI-16},
year={2007},
archivePrefix={arXiv},
eprint={nlin/0703050},
primaryClass={nlin.CG cs.NE nlin.AO}
} | makovetskii2007competition |
arxiv-677038 | nucl-ex/0111003 | A slow control system for the Garfield apparatus | <|reference_start|>A slow control system for the Garfield apparatus: The major part of the GARFIELD apparatus electronics are monitored and set up through a slow control system, which has been developed at LNL. A software package based on Lab View has been dedicated to the setting and control of 16 channels integrated Amplifiers and Constant Fraction Discriminators. GPIB controllers and GPIB-ENET interfaces have been used for the communication between the Personal Computer and the front-end of the electronics.<|reference_end|> | arxiv | @article{giacchini2001a,
title={A slow control system for the Garfield apparatus},
author={M. Giacchini, F. Gramegna, S. Bertocco},
journal={eConf C011127 (2001) TUAP014},
year={2001},
archivePrefix={arXiv},
eprint={nucl-ex/0111003},
primaryClass={nucl-ex cs.NI}
} | giacchini2001a |
arxiv-677039 | nucl-th/0605001 | Isospin asymmetry in nuclei and nuclear symmetry energy | <|reference_start|>Isospin asymmetry in nuclei and nuclear symmetry energy: The volume and surface symmetry parts of the nuclear symmetry energy and other coefficients of the liquid droplet model are determined from the measured atomic masses by the maximum likelihood estimator. The volume symmetry energy coefficient extracted from finite nuclei provides a constraint on the nuclear symmetry energy. This approach also yields the neutron skin of a finite nucleus through its relationship with the volume and surface symmetry terms and the Coulomb energy coefficient. The description of nuclear matter from the isoscalar and isovector components of the density dependent M3Y effective interaction provide a value of the symmetry energy that is consistent with the empirical value of the symmetry energy extracted from measured atomic masses and with other modern theoretical descriptions of nuclear matter.<|reference_end|> | arxiv | @article{mukhopadhyay2006isospin,
title={Isospin asymmetry in nuclei and nuclear symmetry energy},
author={Tapan Mukhopadhyay and D.N. Basu},
journal={ActaPhys.Polon.B38:3225-3236,2007},
year={2006},
archivePrefix={arXiv},
eprint={nucl-th/0605001},
primaryClass={nucl-th cs.NA physics.data-an stat.AP}
} | mukhopadhyay2006isospin |
arxiv-677040 | physics/0001048 | High-resolution path-integral development of financial options | <|reference_start|>High-resolution path-integral development of financial options: The Black-Scholes theory of option pricing has been considered for many years as an important but very approximate zeroth-order description of actual market behavior. We generalize the functional form of the diffusion of these systems and also consider multi-factor models including stochastic volatility. Daily Eurodollar futures prices and implied volatilities are fit to determine exponents of functional behavior of diffusions using methods of global optimization, Adaptive Simulated Annealing (ASA), to generate tight fits across moving time windows of Eurodollar contracts. These short-time fitted distributions are then developed into long-time distributions using a robust non-Monte Carlo path-integral algorithm, PATHINT, to generate prices and derivatives commonly used by option traders.<|reference_end|> | arxiv | @article{ingber2000high-resolution,
title={High-resolution path-integral development of financial options},
author={Lester Ingber},
journal={Physica A 283 (3-4) pp. 529-558 (2000)},
year={2000},
doi={10.1016/S0378-4371(00)00229-6},
archivePrefix={arXiv},
eprint={physics/0001048},
primaryClass={physics.comp-ph cs.CE physics.data-an q-fin.PR}
} | ingber2000high-resolution |
arxiv-677041 | physics/0002054 | Evolution of differentiated expression patterns in digital organisms | <|reference_start|>Evolution of differentiated expression patterns in digital organisms: We investigate the evolutionary processes behind the development and optimization of multiple threads of execution in digital organisms using the avida platform, a software package that implements Darwinian evolution on populations of self-replicating computer programs. The system is seeded with a linearly executed ancestor capable only of reproducing its own genome, whereas its underlying language has the capacity for multiple threads of execution (i.e., simultaneous expression of sections of the genome.) We witness the evolution to multi-threaded organisms and track the development of distinct expression patterns. Additionally, we examine both the evolvability of multi-threaded organisms and the level of thread differentiation as a function of environmental complexity, and find that differentiation is more pronounced in complex environments.<|reference_end|> | arxiv | @article{ofria2000evolution,
title={Evolution of differentiated expression patterns in digital organisms},
author={Charles Ofria, Christoph Adami, Travis C. Collier, and Grace K. Hsu
(California Institute of Technology)},
journal={Lect. Notes Artif. Intell. 1674 (1999) 129-138},
year={2000},
archivePrefix={arXiv},
eprint={physics/0002054},
primaryClass={physics.bio-ph cs.NE q-bio.PE}
} | ofria2000evolution |
arxiv-677042 | physics/0004057 | The information bottleneck method | <|reference_start|>The information bottleneck method: We define the relevant information in a signal $x\in X$ as being the information that this signal provides about another signal $y\in \Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\X$ that preserves the maximum information about $\Y$. That is, we squeeze the information that $\X$ provides about $\Y$ through a `bottleneck' formed by a limited set of codewords $\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\x)$ emerges from the joint statistics of $\X$ and $\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \to \tX$ and $\tX \to \Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.<|reference_end|> | arxiv | @article{tishby2000the,
title={The information bottleneck method},
author={Naftali Tishby (Hebrew University and NEC Research Institute),
Fernando C. Pereira (ATT Shannon Laboratory), and William Bialek (NEC
Research Institute)},
journal={arXiv preprint arXiv:physics/0004057},
year={2000},
archivePrefix={arXiv},
eprint={physics/0004057},
primaryClass={physics.data-an cond-mat.dis-nn cs.LG nlin.AO}
} | tishby2000the |
arxiv-677043 | physics/0005058 | On the computational capabilities of physical systems part I: the impossibility of infallible computation | <|reference_start|>On the computational capabilities of physical systems part I: the impossibility of infallible computation: In this first of two papers, strong limits on the accuracy of physical computation are established. First it is proven that there cannot be a physical computer C to which one can pose any and all computational tasks concerning the physical universe. Next it is proven that no physical computer C can correctly carry out any computational task in the subset of such tasks that can be posed to C. As a particular example, this means that there cannot be a physical computer that can, for any physical system external to that computer, take the specification of that external system's state as input and then correctly predict its future state before that future state actually occurs. The results also mean that there cannot exist an infallible, general-purpose observation apparatus, and that there cannot be an infallible, general-purpose control apparatus. These results do not rely on systems that are infinite, and/or non-classical, and/or obey chaotic dynamics. They also hold even if one uses an infinitely fast, infinitely dense computer, with computational powers greater than that of a Turing Machine.<|reference_end|> | arxiv | @article{wolpert2000on,
title={On the computational capabilities of physical systems part I: the
impossibility of infallible computation},
author={David H. Wolpert},
journal={arXiv preprint arXiv:physics/0005058},
year={2000},
archivePrefix={arXiv},
eprint={physics/0005058},
primaryClass={physics.comp-ph cond-mat.stat-mech cs.CC math-ph math.MP physics.gen-ph}
} | wolpert2000on |
arxiv-677044 | physics/0005059 | On the computational capabilities of physical systems part II: relationship with conventional computer science | <|reference_start|>On the computational capabilities of physical systems part II: relationship with conventional computer science: In the first of this pair of papers, it was proven that that no physical computer can correctly carry out all computational tasks that can be posed to it. The generality of this result follows from its use of a novel definition of computation, ``physical computation''. This second paper of the pair elaborates the mathematical structure and impossibility results associated with physical computation. Analogues of Chomsky hierarcy results concerning universal Turing Machines and the Halting theorem are derived, as are results concerning the (im)possibility of certain kinds of error-correcting codes. In addition, an analogue of algorithmic information complexity, ``prediction complexity'', is elaborated. A task-independent bound is derived on how much the prediction complexity of a computational task can differ for two different universal physical computers used to solve that task, a bound similar to the ``encoding'' bound governing how much the algorithm information complexity of a Turing machine calculation can differ for two universal Turing machines. Finally, it is proven that either the Hamiltonian of our universe proscribes a certain type of computation, or prediction complexity is unique (unlike algorithmic information complexity).<|reference_end|> | arxiv | @article{wolpert2000on,
title={On the computational capabilities of physical systems part II:
relationship with conventional computer science},
author={David H. Wolpert},
journal={arXiv preprint arXiv:physics/0005059},
year={2000},
archivePrefix={arXiv},
eprint={physics/0005059},
primaryClass={physics.comp-ph cond-mat.stat-mech cs.CC math.OC physics.gen-ph}
} | wolpert2000on |
arxiv-677045 | physics/0005062 | Applying MDL to Learning Best Model Granularity | <|reference_start|>Applying MDL to Learning Best Model Granularity: The Minimum Description Length (MDL) principle is solidly based on a provably ideal method of inference using Kolmogorov complexity. We test how the theory behaves in practice on a general problem in model selection: that of learning the best model granularity. The performance of a model depends critically on the granularity, for example the choice of precision of the parameters. Too high precision generally involves modeling of accidental noise and too low precision may lead to confusion of models that should be distinguished. This precision is often determined ad hoc. In MDL the best model is the one that most compresses a two-part code of the data set: this embodies ``Occam's Razor.'' In two quite different experimental settings the theoretical value determined using MDL coincides with the best value found experimentally. In the first experiment the task is to recognize isolated handwritten characters in one subject's handwriting, irrespective of size and orientation. Based on a new modification of elastic matching, using multiple prototypes per character, the optimal prediction rate is predicted for the learned parameter (length of sampling interval) considered most likely by MDL, which is shown to coincide with the best value found experimentally. In the second experiment the task is to model a robot arm with two degrees of freedom using a three layer feed-forward neural network where we need to determine the number of nodes in the hidden layer giving best modeling performance. The optimal model (the one that extrapolizes best on unseen examples) is predicted for the number of nodes in the hidden layer considered most likely by MDL, which again is found to coincide with the best value found experimentally.<|reference_end|> | arxiv | @article{gao2000applying,
title={Applying MDL to Learning Best Model Granularity},
author={Qiong Gao (Chinese Academy of Sciences), Ming Li (University of
California, Santa Barbara), Paul Vitanyi (CWI and University of Amsterdam)},
journal={arXiv preprint arXiv:physics/0005062},
year={2000},
archivePrefix={arXiv},
eprint={physics/0005062},
primaryClass={physics.data-an cs.AI cs.CV}
} | gao2000applying |
arxiv-677046 | physics/0005074 | Evolution of Biological Complexity | <|reference_start|>Evolution of Biological Complexity: In order to make a case for or against a trend in the evolution of complexity in biological evolution, complexity needs to be both rigorously defined and measurable. A recent information-theoretic (but intuitively evident) definition identifies genomic complexity with the amount of information a sequence stores about its environment. We investigate the evolution of genomic complexity in populations of digital organisms and monitor in detail the evolutionary transitions that increase complexity. We show that because natural selection forces genomes to behave as a natural ``Maxwell Demon'', within a fixed environment genomic complexity is forced to increase.<|reference_end|> | arxiv | @article{adami2000evolution,
title={Evolution of Biological Complexity},
author={Christoph Adami (Caltech), Charles Ofria (MSU), and Travis C. Collier
(UCLA)},
journal={Proc. Nat. Acad. Sci (USA) 97 (2000) 4463},
year={2000},
doi={10.1073/pnas.97.9.4463},
archivePrefix={arXiv},
eprint={physics/0005074},
primaryClass={physics.bio-ph cond-mat.stat-mech cs.CC nlin.AO physics.data-an q-bio.PE}
} | adami2000evolution |
arxiv-677047 | physics/0007070 | Predictability, complexity and learning | <|reference_start|>Predictability, complexity and learning: We define {\em predictive information} $I_{\rm pred} (T)$ as the mutual information between the past and the future of a time series. Three qualitatively different behaviors are found in the limit of large observation times $T$: $I_{\rm pred} (T)$ can remain finite, grow logarithmically, or grow as a fractional power law. If the time series allows us to learn a model with a finite number of parameters, then $I_{\rm pred} (T)$ grows logarithmically with a coefficient that counts the dimensionality of the model space. In contrast, power--law growth is associated, for example, with the learning of infinite parameter (or nonparametric) models such as continuous functions with smoothness constraints. There are connections between the predictive information and measures of complexity that have been defined both in learning theory and in the analysis of physical systems through statistical mechanics and dynamical systems theory. Further, in the same way that entropy provides the unique measure of available information consistent with some simple and plausible conditions, we argue that the divergent part of $I_{\rm pred} (T)$ provides the unique measure for the complexity of dynamics underlying a time series. Finally, we discuss how these ideas may be useful in different problems in physics, statistics, and biology.<|reference_end|> | arxiv | @article{bialek2000predictability,,
title={Predictability, complexity and learning},
author={William Bialek, Ilya Nemenman, and Naftali Tishby},
journal={Neural Computation 13, 2409-2463 (2001)},
year={2000},
archivePrefix={arXiv},
eprint={physics/0007070},
primaryClass={physics.data-an cond-mat.dis-nn cond-mat.other cs.LG nlin.AO q-bio.OT}
} | bialek2000predictability, |
arxiv-677048 | physics/0007075 | Optimization of Trading Physics Models of Markets | <|reference_start|>Optimization of Trading Physics Models of Markets: We describe an end-to-end real-time S&P futures trading system. Inner-shell stochastic nonlinear dynamic models are developed, and Canonical Momenta Indicators (CMI) are derived from a fitted Lagrangian used by outer-shell trading models dependent on these indicators. Recursive and adaptive optimization using Adaptive Simulated Annealing (ASA) is used for fitting parameters shared across these shells of dynamic and trading models.<|reference_end|> | arxiv | @article{ingber2000optimization,
title={Optimization of Trading Physics Models of Markets},
author={Lester Ingber and Radu Paul Mondescu},
journal={arXiv preprint arXiv:physics/0007075},
year={2000},
archivePrefix={arXiv},
eprint={physics/0007075},
primaryClass={physics.comp-ph cond-mat.stat-mech cs.CE physics.data-an q-fin.ST}
} | ingber2000optimization |
arxiv-677049 | physics/0009032 | Information theory and learning: a physical approach | <|reference_start|>Information theory and learning: a physical approach: We try to establish a unified information theoretic approach to learning and to explore some of its applications. First, we define {\em predictive information} as the mutual information between the past and the future of a time series, discuss its behavior as a function of the length of the series, and explain how other quantities of interest studied previously in learning theory - as well as in dynamical systems and statistical mechanics - emerge from this universally definable concept. We then prove that predictive information provides the {\em unique measure for the complexity} of dynamics underlying the time series and show that there are classes of models characterized by {\em power-law growth of the predictive information} that are qualitatively more complex than any of the systems that have been investigated before. Further, we investigate numerically the learning of a nonparametric probability density, which is an example of a problem with power-law complexity, and show that the proper Bayesian formulation of this problem provides for the `Occam' factors that punish overly complex models and thus allow one {\em to learn not only a solution within a specific model class, but also the class itself} using the data only and with very few a priori assumptions. We study a possible {\em information theoretic method} that regularizes the learning of an undersampled discrete variable, and show that learning in such a setup goes through stages of very different complexities. Finally, we discuss how all of these ideas may be useful in various problems in physics, statistics, and, most importantly, biology.<|reference_end|> | arxiv | @article{nemenman2000information,
title={Information theory and learning: a physical approach},
author={Ilya Nemenman},
journal={arXiv preprint arXiv:physics/0009032},
year={2000},
archivePrefix={arXiv},
eprint={physics/0009032},
primaryClass={physics.data-an cond-mat.dis-nn cs.LG nlin.AO}
} | nemenman2000information |
arxiv-677050 | physics/0011053 | Faster Evaluation of Multidimensional Integrals | <|reference_start|>Faster Evaluation of Multidimensional Integrals: In a recent paper Keister proposed two quadrature rules as alternatives to Monte Carlo for certain multidimensional integrals and reported his test results. In earlier work we had shown that the quasi-Monte Carlo method with generalized Faure points is very effective for a variety of high dimensional integrals occurng in mathematical finance. In this paper we report test results of this method on Keister's examples of dimension 9 and 25, and also for examples of dimension 60, 80 and 100. For the 25 dimensional integral we achieved accuracy of 0.01 with less than 500 points while the two methods tested by Keister used more than 220,000 points. In all of our tests, for n sample points we obtained an empirical convergence rate proportional to n^{-1} rather than the n^{-1/2} of Monte Carlo.<|reference_end|> | arxiv | @article{papageorgiou2000faster,
title={Faster Evaluation of Multidimensional Integrals},
author={A. Papageorgiou and J. F. Traub},
journal={Computers in Physics, Nov/Dec, 1997, 574-578},
year={2000},
doi={10.1063/1.168616},
archivePrefix={arXiv},
eprint={physics/0011053},
primaryClass={physics.comp-ph cs.NA}
} | papageorgiou2000faster |
arxiv-677051 | physics/0101021 | Adaptive evolution on neutral networks | <|reference_start|>Adaptive evolution on neutral networks: We study the evolution of large but finite asexual populations evolving in fitness landscapes in which all mutations are either neutral or strongly deleterious. We demonstrate that despite the absence of higher fitness genotypes, adaptation takes place as regions with more advantageous distributions of neutral genotypes are discovered. Since these discoveries are typically rare events, the population dynamics can be subdivided into separate epochs, with rapid transitions between them. Within one epoch, the average fitness in the population is approximately constant. The transitions between epochs, however, are generally accompanied by a significant increase in the average fitness. We verify our theoretical considerations with two analytically tractable bitstring models.<|reference_end|> | arxiv | @article{wilke2001adaptive,
title={Adaptive evolution on neutral networks},
author={Claus O. Wilke (Caltech)},
journal={Bull. Math. Biol. 63:715-730, 2001},
year={2001},
archivePrefix={arXiv},
eprint={physics/0101021},
primaryClass={physics.bio-ph cond-mat.stat-mech cs.NE nlin.AO q-bio.PE}
} | wilke2001adaptive |
arxiv-677052 | physics/0102009 | Self-adaptive exploration in evolutionary search | <|reference_start|>Self-adaptive exploration in evolutionary search: We address a primary question of computational as well as biological research on evolution: How can an exploration strategy adapt in such a way as to exploit the information gained about the problem at hand? We first introduce an integrated formalism of evolutionary search which provides a unified view on different specific approaches. On this basis we discuss the implications of indirect modeling (via a ``genotype-phenotype mapping'') on the exploration strategy. Notions such as modularity, pleiotropy and functional phenotypic complex are discussed as implications. Then, rigorously reflecting the notion of self-adaptability, we introduce a new definition that captures self-adaptability of exploration: different genotypes that map to the same phenotype may represent (also topologically) different exploration strategies; self-adaptability requires a variation of exploration strategies along such a ``neutral space''. By this definition, the concept of neutrality becomes a central concern of this paper. Finally, we present examples of these concepts: For a specific grammar-type encoding, we observe a large variability of exploration strategies for a fixed phenotype, and a self-adaptive drift towards short representations with highly structured exploration strategy that matches the ``problem's structure''.<|reference_end|> | arxiv | @article{toussaint2001self-adaptive,
title={Self-adaptive exploration in evolutionary search},
author={Marc Toussaint},
journal={arXiv preprint arXiv:physics/0102009},
year={2001},
archivePrefix={arXiv},
eprint={physics/0102009},
primaryClass={physics.bio-ph cs.NE nlin.AO q-bio}
} | toussaint2001self-adaptive |
arxiv-677053 | physics/0106045 | A Continuous Model of Computation | <|reference_start|>A Continuous Model of Computation: Although the Turing-machine model of computation is widely used in computer science it is fundamentally inadequate as a foundation for the theory of modern scientific computation. The real-number model is described as an alternative. Physicists often choose continuous mathematical models for problems ranging from the dynamical systems of classical physics to the operator equations and path integrals of quantum mechanics.These mathematical models use the real or complex number fields and we argue that the real-number model of computation should be used in the study of the computational complexity of continuous mathematical models. The study of continuous complexity is called information-based complexity. In this expository article we apply information-based complexity to topics such as breaking the curse of dimensionality, approximating the calculation of path integrals, and solving ill-posed problems. Precise formulations of these ideas may be found in J. F. Traub and A. G. Werschulz, "Complexity and Information", Cambridge University Press, 1998.<|reference_end|> | arxiv | @article{traub2001a,
title={A Continuous Model of Computation},
author={J. F. Traub},
journal={Physics Today, May, 1999, 39-43},
year={2001},
archivePrefix={arXiv},
eprint={physics/0106045},
primaryClass={physics.comp-ph cs.NA math.NA}
} | traub2001a |
arxiv-677054 | physics/0209085 | The calculation of a normal force between multiparticle contacts using fractional operators | <|reference_start|>The calculation of a normal force between multiparticle contacts using fractional operators: This paper deals with the complex problem of how to simulate multiparticle contacts. The collision process is responsible for the transfer and dissipation of energy in granular media. A novel model of the interaction force between particles has been proposed and tested. Such model allows us to simulate multiparticle collisions and granular cohesion dynamics.<|reference_end|> | arxiv | @article{jacek2002the,
title={The calculation of a normal force between multiparticle contacts using
fractional operators},
author={Leszczynski Jacek},
journal={arXiv preprint arXiv:physics/0209085},
year={2002},
archivePrefix={arXiv},
eprint={physics/0209085},
primaryClass={physics.comp-ph cs.CE cs.NA math.NA physics.class-ph physics.geo-ph}
} | jacek2002the |
arxiv-677055 | physics/0210125 | Ownership and Trade from Evolutionary Games | <|reference_start|>Ownership and Trade from Evolutionary Games: Ownership and trade emerge from anarchy as evolutionary stable strategies. In these evolutionary game models, ownership status provides an endogenous asymmetrizing criterion enabling cheaper resolution of property conflicts.<|reference_end|> | arxiv | @article{yee2002ownership,
title={Ownership and Trade from Evolutionary Games},
author={Kenton K. Yee},
journal={International Review of Law and Economics, Vol. 23, No. 2, pp.
183-197, 2003},
year={2002},
number={http://papers.ssrn.com/sol3/papers.cfm?abstract_id=319102},
archivePrefix={arXiv},
eprint={physics/0210125},
primaryClass={physics.bio-ph cs.GT nlin.AO nlin.CG physics.soc-ph q-bio.PE}
} | yee2002ownership |
arxiv-677056 | physics/0302034 | Power and beauty of interval methods | <|reference_start|>Power and beauty of interval methods: Interval calculus is a relatively new branch of mathematics. Initially understood as a set of tools to assess the quality of numerical calculations (rigorous control of rounding errors), it became a discipline in its own rights today. Interval methods are usefull whenever we have to deal with uncertainties, which can be rigorously bounded. Fuzzy sets, rough sets and probability calculus can perform similar tasks, yet only the interval methods are able to (dis)prove, with mathematical rigor, the (non)existence of desired solution(s). Known are several problems, not presented here, which cannot be effectively solved by any other means. This paper presents basic notions and main ideas of interval calculus and two examples of useful algorithms.<|reference_end|> | arxiv | @article{gutowski2003power,
title={Power and beauty of interval methods},
author={Marek W. Gutowski},
journal={arXiv preprint arXiv:physics/0302034},
year={2003},
archivePrefix={arXiv},
eprint={physics/0302034},
primaryClass={physics.data-an cs.DS physics.gen-ph}
} | gutowski2003power |
arxiv-677057 | physics/0304041 | Message Passing Fluids: molecules as processes in parallel computational fluids | <|reference_start|>Message Passing Fluids: molecules as processes in parallel computational fluids: In this paper we present the concept of MPF, Message Passing Fluid, an abstract fluid where the molecules move by mean of the informations that they exchange each other, on the basis of rules and methods of a generalized Cellular Automaton. The model is intended for its simulation by mean of message passing libraries on the field of parallel computing. We present a critical analysis of the necessary computational effort in a possible implementation of such an object.<|reference_end|> | arxiv | @article{argentini2003message,
title={Message Passing Fluids: molecules as processes in parallel computational
fluids},
author={Gianluca Argentini},
journal={Recent Advances in Parallel Virtual Machine and Message Passing
Interface: 10th European PVM/MPI Users' Group Meeting, Venice, Italy,
September 29 - October 2, 2003. Proceedings, LNCS 2840, Springer-Verlag,
2003, pg. 550-554},
year={2003},
archivePrefix={arXiv},
eprint={physics/0304041},
primaryClass={physics.flu-dyn cs.DC physics.comp-ph}
} | argentini2003message |
arxiv-677058 | physics/0306002 | The NorduGrid architecture and tools | <|reference_start|>The NorduGrid architecture and tools: The NorduGrid project designed a Grid architecture with the primary goal to meet the requirements of production tasks of the LHC experiments. While it is meant to be a rather generic Grid system, it puts emphasis on batch processing suitable for problems encountered in High Energy Physics. The NorduGrid architecture implementation uses the \globus{} as the foundation for various components, developed by the project. While introducing new services, the NorduGrid does not modify the Globus tools, such that the two can eventually co-exist. The NorduGrid topology is decentralized, avoiding a single point of failure. The NorduGrid architecture is thus a light-weight, non-invasive and dynamic one, while robust and scalable, capable of meeting most challenging tasks of High Energy Physics.<|reference_end|> | arxiv | @article{eerola2003the,
title={The NorduGrid architecture and tools},
author={P. Eerola, T. Ekelof, M. Ellert, J. R. Hansen, A. Konstantinov, B.
Konya, J. L. Nielsen, F. Ould-Saada, O. Smirnova, A. Waananen},
journal={arXiv preprint arXiv:physics/0306002},
year={2003},
archivePrefix={arXiv},
eprint={physics/0306002},
primaryClass={physics.comp-ph cs.DC}
} | eerola2003the |
arxiv-677059 | physics/0307117 | Symbolic stochastic dynamical systems viewed as binary N-step Markov chains | <|reference_start|>Symbolic stochastic dynamical systems viewed as binary N-step Markov chains: A theory of systems with long-range correlations based on the consideration of binary N-step Markov chains is developed. In the model, the conditional probability that the i-th symbol in the chain equals zero (or unity) is a linear function of the number of unities among the preceding N symbols. The correlation and distribution functions as well as the variance of number of symbols in the words of arbitrary length L are obtained analytically and numerically. A self-similarity of the studied stochastic process is revealed and the similarity group transformation of the chain parameters is presented. The diffusion Fokker-Planck equation governing the distribution function of the L-words is explored. If the persistent correlations are not extremely strong, the distribution function is shown to be the Gaussian with the variance being nonlinearly dependent on L. The applicability of the developed theory to the coarse-grained written and DNA texts is discussed.<|reference_end|> | arxiv | @article{usatenko2003symbolic,
title={Symbolic stochastic dynamical systems viewed as binary N-step Markov
chains},
author={O. V. Usatenko, V. A. Yampol'skii, K. E. Kechedzhy, and S. S. Mel'nyk},
journal={arXiv preprint arXiv:physics/0307117},
year={2003},
doi={10.1103/PhysRevE.68.061107},
archivePrefix={arXiv},
eprint={physics/0307117},
primaryClass={physics.data-an cond-mat.stat-mech cs.CL math-ph math.MP nlin.AO physics.class-ph}
} | usatenko2003symbolic |
arxiv-677060 | physics/0308041 | Ensembles of Protein Molecules as Statistical Analog Computers | <|reference_start|>Ensembles of Protein Molecules as Statistical Analog Computers: A class of analog computers built from large numbers of microscopic probabilistic machines is discussed. It is postulated that such computers are implemented in biological systems as ensembles of protein molecules. The formalism is based on an abstract computational model referred to as Protein Molecule Machine (PMM). A PMM is a continuous-time first-order Markov system with real input and output vectors, a finite set of discrete states, and the input-dependent conditional probability densities of state transitions. The output of a PMM is a function of its input and state. The components of input vector, called generalized potentials, can be interpreted as membrane potential, and concentrations of neurotransmitters. The components of output vector, called generalized currents, can represent ion currents, and the flows of second messengers. An Ensemble of PMMs (EPMM) is a set of independent identical PMMs with the same input vector, and the output vector equal to the sum of output vectors of individual PMMs. The paper suggests that biological neurons have much more sophisticated computational resources than the presently popular models of artificial neurons.<|reference_end|> | arxiv | @article{eliashberg2003ensembles,
title={Ensembles of Protein Molecules as Statistical Analog Computers},
author={Victor Eliashberg},
journal={arXiv preprint arXiv:physics/0308041},
year={2003},
archivePrefix={arXiv},
eprint={physics/0308041},
primaryClass={physics.bio-ph cs.AI cs.NE physics.comp-ph physics.data-an q-bio.NC}
} | eliashberg2003ensembles |
arxiv-677061 | physics/0402078 | Self-generated Self-similar Traffic | <|reference_start|>Self-generated Self-similar Traffic: Self-similarity in the network traffic has been studied from several aspects: both at the user side and at the network side there are many sources of the long range dependence. Recently some dynamical origins are also identified: the TCP adaptive congestion avoidance algorithm itself can produce chaotic and long range dependent throughput behavior, if the loss rate is very high. In this paper we show that there is a close connection between the static and dynamic origins of self-similarity: parallel TCPs can generate the self-similarity themselves, they can introduce heavily fluctuations into the background traffic and produce high effective loss rate causing a long range dependent TCP flow, however, the dropped packet ratio is low.<|reference_end|> | arxiv | @article{haga2004self-generated,
title={Self-generated Self-similar Traffic},
author={P. Haga, P. Pollner, G. Simon, I. Csabai, G. Vattay},
journal={Nonlinear Phenomena in Complex Systems Vol.6, No.4 2003},
year={2004},
archivePrefix={arXiv},
eprint={physics/0402078},
primaryClass={physics.data-an cond-mat.other cs.NI nlin.AO}
} | haga2004self-generated |
arxiv-677062 | physics/0405044 | Least Dependent Component Analysis Based on Mutual Information | <|reference_start|>Least Dependent Component Analysis Based on Mutual Information: We propose to use precise estimators of mutual information (MI) to find least dependent components in a linearly mixed signal. On the one hand this seems to lead to better blind source separation than with any other presently available algorithm. On the other hand it has the advantage, compared to other implementations of `independent' component analysis (ICA) some of which are based on crude approximations for MI, that the numerical values of the MI can be used for: (i) estimating residual dependencies between the output components; (ii) estimating the reliability of the output, by comparing the pairwise MIs with those of re-mixed components; (iii) clustering the output according to the residual interdependencies. For the MI estimator we use a recently proposed k-nearest neighbor based algorithm. For time sequences we combine this with delay embedding, in order to take into account non-trivial time correlations. After several tests with artificial data, we apply the resulting MILCA (Mutual Information based Least dependent Component Analysis) algorithm to a real-world dataset, the ECG of a pregnant woman. The software implementation of the MILCA algorithm is freely available at http://www.fz-juelich.de/nic/cs/software<|reference_end|> | arxiv | @article{stögbauer2004least,
title={Least Dependent Component Analysis Based on Mutual Information},
author={Harald St"ogbauer, Alexander Kraskov, Sergey A. Astakhov, and Peter
Grassberger},
journal={Phys. Rev. E 70, 066123 (2004)},
year={2004},
doi={10.1103/PhysRevE.70.066123},
archivePrefix={arXiv},
eprint={physics/0405044},
primaryClass={physics.comp-ph cs.IT math.IT physics.data-an q-bio.QM}
} | stögbauer2004least |
arxiv-677063 | physics/0405154 | The ATLAS Tile Calorimeter Test Beam Monitoring Program | <|reference_start|>The ATLAS Tile Calorimeter Test Beam Monitoring Program: During 2003 test beam session for ATLAS Tile Calorimeter a monitoring program has been developed to ease the setup of correct running condition and the assessment of data quality. The program has been built using the Online Software services provided by the ATLAS Online Software group. The first part of this note contains a brief overview of these services followed by the full description of Tile Calorimeter monitoring program architecture and features. Performances and future upgrades are discussed in the final part of this note.<|reference_end|> | arxiv | @article{adragna2004the,
title={The ATLAS Tile Calorimeter Test Beam Monitoring Program},
author={Paolo Adragna, Andrea Dotti, Chiara Roda (University of Pisa and
Istituto Nazionale di Fisica Nucleare, Sezione di Pisa)},
journal={arXiv preprint arXiv:physics/0405154},
year={2004},
number={ATL-TILECAL-2004-002},
archivePrefix={arXiv},
eprint={physics/0405154},
primaryClass={physics.ins-det cs.PF cs.SE}
} | adragna2004the |
arxiv-677064 | physics/0406023 | Maximum Entropy Multivariate Density Estimation: An exact goodness-of-fit approach | <|reference_start|>Maximum Entropy Multivariate Density Estimation: An exact goodness-of-fit approach: We consider the problem of estimating the population probability distribution given a finite set of multivariate samples, using the maximum entropy approach. In strict keeping with Jaynes' original definition, our precise formulation of the problem considers contributions only from the smoothness of the estimated distribution (as measured by its entropy) and the loss functional associated with its goodness-of-fit to the sample data, and in particular does not make use of any additional constraints that cannot be justified from the sample data alone. By mapping the general multivariate problem to a tractable univariate one, we are able to write down exact expressions for the goodness-of-fit of an arbitrary multivariate distribution to any given set of samples using both the traditional likelihood-based approach and a rigorous information-theoretic approach, thus solving a long-standing problem. As a corollary we also give an exact solution to the `forward problem' of determining the expected distributions of samples taken from a population with known probability distribution.<|reference_end|> | arxiv | @article{rahman2004maximum,
title={Maximum Entropy Multivariate Density Estimation: An exact
goodness-of-fit approach},
author={Sabbir Rahman and Mahbub Majumdar},
journal={arXiv preprint arXiv:physics/0406023},
year={2004},
number={Imperial/TP/3-04/14},
archivePrefix={arXiv},
eprint={physics/0406023},
primaryClass={physics.data-an cs.IT math.IT math.ST stat.TH}
} | rahman2004maximum |
arxiv-677065 | physics/0410226 | THE CAVES Project - Collaborative Analysis Versioning Environment System; THE CODESH Project - Collaborative Development Shell | <|reference_start|>THE CAVES Project - Collaborative Analysis Versioning Environment System; THE CODESH Project - Collaborative Development Shell: A key feature of collaboration in science and software development is to have a {\em log} of what and how is being done - for private use and reuse and for sharing selected parts with collaborators, which most often today are distributed geographically on an ever larger scale. Even better if this log is {\em automatic}, created on the fly while a scientist or software developer is working in a habitual way, without the need for extra efforts. The {\tt CAVES} and {\tt CODESH} projects address this problem in a novel way, building on the concepts of {\em virtual state} and {\em virtual transition} to provide an automatic persistent logbook for sessions of data analysis or software development in a collaborating group. A repository of sessions can be configured dynamically to record and make available the knowledge accumulated in the course of a scientific or software endeavor. Access can be controlled to define logbooks of private sessions and sessions shared within or between collaborating groups.<|reference_end|> | arxiv | @article{bourilkov2004the,
title={THE CAVES Project - Collaborative Analysis Versioning Environment
System; THE CODESH Project - Collaborative Development Shell},
author={Dimitri Bourilkov},
journal={Int.J.Mod.Phys. A20 (2005) 3889-3892},
year={2004},
doi={10.1142/S0217751X05027904},
number={GriPhyN 2004-73},
archivePrefix={arXiv},
eprint={physics/0410226},
primaryClass={physics.data-an cs.DC hep-ex physics.comp-ph}
} | bourilkov2004the |
arxiv-677066 | physics/0412029 | Spectral Mixture Decomposition by Least Dependent Component Analysis | <|reference_start|>Spectral Mixture Decomposition by Least Dependent Component Analysis: A recently proposed mutual information based algorithm for decomposing data into least dependent components (MILCA) is applied to spectral analysis, namely to blind recovery of concentrations and pure spectra from their linear mixtures. The algorithm is based on precise estimates of mutual information between measured spectra, which allows to assess and make use of actual statistical dependencies between them. We show that linear filtering performed by taking second derivatives effectively reduces the dependencies caused by overlapping spectral bands and, thereby, assists resolving pure spectra. In combination with second derivative preprocessing and alternating least squares postprocessing, MILCA shows decomposition performance comparable with or superior to specialized chemometrics algorithms. The results are illustrated on a number of simulated and experimental (infrared and Raman) mixture problems, including spectroscopy of complex biological materials. MILCA is available online at http://www.fz-juelich.de/nic/cs/software<|reference_end|> | arxiv | @article{astakhov2004spectral,
title={Spectral Mixture Decomposition by Least Dependent Component Analysis},
author={Sergey A. Astakhov, Harald St"ogbauer, Alexander Kraskov, Peter
Grassberger},
journal={arXiv preprint arXiv:physics/0412029},
year={2004},
archivePrefix={arXiv},
eprint={physics/0412029},
primaryClass={physics.data-an cs.IT math.IT physics.chem-ph}
} | astakhov2004spectral |
arxiv-677067 | physics/0501164 | Performance of an Operating High Energy Physics Data Grid: D0SAR-Grid | <|reference_start|>Performance of an Operating High Energy Physics Data Grid: D0SAR-Grid: The D0 experiment at Fermilab's Tevatron will record several petabytes of data over the next five years in pursuing the goals of understanding nature and searching for the origin of mass. Computing resources required to analyze these data far exceed capabilities of any one institution. Moreover, the widely scattered geographical distribution of D0 collaborators poses further serious difficulties for optimal use of human and computing resources. These difficulties will exacerbate in future high energy physics experiments, like the LHC. The computing grid has long been recognized as a solution to these problems. This technology is being made a more immediate reality to end users in D0 by developing a grid in the D0 Southern Analysis Region (D0SAR), D0SAR-Grid, using all available resources within it and a home-grown local task manager, McFarm. We will present the architecture in which the D0SAR-Grid is implemented, the use of technology and the functionality of the grid, and the experience from operating the grid in simulation, reprocessing and data analyses for a currently running HEP experiment.<|reference_end|> | arxiv | @article{abbott2005performance,
title={Performance of an Operating High Energy Physics Data Grid: D0SAR-Grid},
author={B. Abbott, P. Baringer, T. Bolton, Z. Greenwood, E. Gregores, H. Kim,
C. Leangsuksun, D. Meyer, N. Mondal, S. Novaes, B. Quinn, H. Severini, P.
Skubic, J. Snow, M. Sosebee, J. Yu},
journal={Int.J.Mod.Phys.A20:3874-3876,2005},
year={2005},
doi={10.1142/S0217751X05027850},
archivePrefix={arXiv},
eprint={physics/0501164},
primaryClass={physics.data-an cs.DC physics.ins-det}
} | abbott2005performance |
arxiv-677068 | physics/0502149 | Fine Grid Numerical Solutions of Triangular Cavity Flow | <|reference_start|>Fine Grid Numerical Solutions of Triangular Cavity Flow: Numerical solutions of 2-D steady incompressible flow inside a triangular cavity are presented. For the purpose of comparing our results with several different triangular cavity studies with different triangle geometries, a general triangle mapped onto a computational domain is considered. The Navier-Stokes equations in general curvilinear coordinates in streamfunction and vorticity formulation are numerically solved. Using a very fine grid mesh, the triangular cavity flow is solved for high Reynolds numbers. The results are compared with the numerical solutions found in the literature and also with analytical solutions as well. Detailed results are presented.<|reference_end|> | arxiv | @article{erturk2005fine,
title={Fine Grid Numerical Solutions of Triangular Cavity Flow},
author={E. Erturk and O. Gokcol},
journal={The European Physical Journal - Applied Physics 2007, Vol 38, pp
97-105},
year={2005},
doi={10.1051/epjap:2007057},
archivePrefix={arXiv},
eprint={physics/0502149},
primaryClass={physics.flu-dyn cs.NA math.NA physics.comp-ph}
} | erturk2005fine |
arxiv-677069 | physics/0504026 | Let Your CyberAlter Ego Share Information and Manage Spam | <|reference_start|>Let Your CyberAlter Ego Share Information and Manage Spam: Almost all of us have multiple cyberspace identities, and these {\em cyber}alter egos are networked together to form a vast cyberspace social network. This network is distinct from the world-wide-web (WWW), which is being queried and mined to the tune of billions of dollars everyday, and until recently, has gone largely unexplored. Empirically, the cyberspace social networks have been found to possess many of the same complex features that characterize its real counterparts, including scale-free degree distributions, low diameter, and extensive connectivity. We show that these topological features make the latent networks particularly suitable for explorations and management via local-only messaging protocols. {\em Cyber}alter egos can communicate via their direct links (i.e., using only their own address books) and set up a highly decentralized and scalable message passing network that can allow large-scale sharing of information and data. As one particular example of such collaborative systems, we provide a design of a spam filtering system, and our large-scale simulations show that the system achieves a spam detection rate close to 100%, while the false positive rate is kept around zero. This system has several advantages over other recent proposals (i) It uses an already existing network, created by the same social dynamics that govern our daily lives, and no dedicated peer-to-peer (P2P) systems or centralized server-based systems need be constructed; (ii) It utilizes a percolation search algorithm that makes the query-generated traffic scalable; (iii) The network has a built in trust system (just as in social networks) that can be used to thwart malicious attacks; iv) It can be implemented right now as a plugin to popular email programs, such as MS Outlook, Eudora, and Sendmail.<|reference_end|> | arxiv | @article{kong2005let,
title={Let Your CyberAlter Ego Share Information and Manage Spam},
author={Joseph S. Kong, P. Oscar Boykin, Behnam A. Rezaei, Nima Sarshar, Vwani
P. Roychowdhury},
journal={Collaborative Spam Filtering Using E-Mail Networks, IEEE Computer,
Vol. 39, No. 8, pages 67-73, 2006},
year={2005},
doi={10.1109/MC.2006.257},
archivePrefix={arXiv},
eprint={physics/0504026},
primaryClass={physics.soc-ph cond-mat.dis-nn cs.CY cs.NI}
} | kong2005let |
arxiv-677070 | physics/0504185 | Frequency of occurrence of numbers in the World Wide Web | <|reference_start|>Frequency of occurrence of numbers in the World Wide Web: The distribution of numbers in human documents is determined by a variety of diverse natural and human factors, whose relative significance can be evaluated by studying the numbers' frequency of occurrence. Although it has been studied since the 1880's, this subject remains poorly understood. Here, we obtain the detailed statistics of numbers in the World Wide Web, finding that their distribution is a heavy-tailed dependence which splits in a set of power-law ones. In particular, we find that the frequency of numbers associated to western calendar years shows an uneven behavior: 2004 represents a `singular critical' point, appearing with a strikingly high frequency; as we move away from it, the decreasing frequency allows us to compare the amounts of existing information on the past and on the future. Moreover, while powers of ten occur extremely often, allowing us to obtain statistics up to the huge 10^127, `non-round' numbers occur in a much more limited range, the variations of their frequencies being dramatically different from standard statistical fluctuations. These findings provide a view of the array of numbers used by humans as a highly non-equilibrium and inhomogeneous system, and shed a new light on an issue that, once fully investigated, could lead to a better understanding of many sociological and psychological phenomena.<|reference_end|> | arxiv | @article{dorogovtsev2005frequency,
title={Frequency of occurrence of numbers in the World Wide Web},
author={S.N. Dorogovtsev, J.F.F. Mendes, J.G. Oliveira},
journal={Physica A 360, 548 (2006)},
year={2005},
doi={10.1016/j.physa.2005.06.064},
archivePrefix={arXiv},
eprint={physics/0504185},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.DB math.ST stat.TH}
} | dorogovtsev2005frequency |
arxiv-677071 | physics/0505121 | Numerical Solutions of 2-D Steady Incompressible Flow in a Driven Skewed Cavity | <|reference_start|>Numerical Solutions of 2-D Steady Incompressible Flow in a Driven Skewed Cavity: The benchmark test case for non-orthogonal grid mesh, the "driven skewed cavity flow", first introduced by Demirdzic et al. (1992, IJNMF, 15, 329) for skew angles of alpha=30 and alpha=45, is reintroduced with a more variety of skew angles. The benchmark problem has non-orthogonal, skewed grid mesh with skew angle (alpha). The governing 2-D steady incompressible Navier-Stokes equations in general curvilinear coordinates are solved for the solution of driven skewed cavity flow with non-orthogonal grid mesh using a numerical method which is efficient and stable even at extreme skew angles. Highly accurate numerical solutions of the driven skewed cavity flow, solved using a fine grid (512x512) mesh, are presented for Reynolds number of 100 and 1000 for skew angles ranging between 15<alpha<165.<|reference_end|> | arxiv | @article{erturk2005numerical,
title={Numerical Solutions of 2-D Steady Incompressible Flow in a Driven Skewed
Cavity},
author={E. Erturk and B. Dursun},
journal={ZAMM - Journal of Applied Mathematics and Mechanics 2007, Vol 87,
pp 377-392},
year={2005},
doi={10.1002/zamm.200610322},
archivePrefix={arXiv},
eprint={physics/0505121},
primaryClass={physics.flu-dyn cs.NA math.NA physics.comp-ph}
} | erturk2005numerical |
arxiv-677072 | physics/0506155 | A low-cost parallel implementation of direct numerical simulation of wall turbulence | <|reference_start|>A low-cost parallel implementation of direct numerical simulation of wall turbulence: A numerical method for the direct numerical simulation of incompressible wall turbulence in rectangular and cylindrical geometries is presented. The distinctive feature resides in its design being targeted towards an efficient distributed-memory parallel computing on commodity hardware. The adopted discretization is spectral in the two homogeneous directions; fourth-order accurate, compact finite-difference schemes over a variable-spacing mesh in the wall-normal direction are key to our parallel implementation. The parallel algorithm is designed in such a way as to minimize data exchange among the computing machines, and in particular to avoid taking a global transpose of the data during the pseudo-spectral evaluation of the non-linear terms. The computing machines can then be connected to each other through low-cost network devices. The code is optimized for memory requirements, which can moreover be subdivided among the computing nodes. The layout of a simple, dedicated and optimized computing system based on commodity hardware is described. The performance of the numerical method on this computing system is evaluated and compared with that of other codes described in the literature, as well as with that of the same code implementing a commonly employed strategy for the pseudo-spectral calculation.<|reference_end|> | arxiv | @article{luchini2005a,
title={A low-cost parallel implementation of direct numerical simulation of
wall turbulence},
author={Paolo Luchini and Maurizio Quadrio},
journal={arXiv preprint arXiv:physics/0506155},
year={2005},
doi={10.1016/j.jcp.2005.06.003},
archivePrefix={arXiv},
eprint={physics/0506155},
primaryClass={physics.flu-dyn cs.DC physics.comp-ph}
} | luchini2005a |
arxiv-677073 | physics/0509039 | The Dynamics of Viral Marketing | <|reference_start|>The Dynamics of Viral Marketing: We present an analysis of a person-to-person recommendation network, consisting of 4 million people who made 16 million recommendations on half a million products. We observe the propagation of recommendations and the cascade sizes, which we explain by a simple stochastic model. We analyze how user behavior varies within user communities defined by a recommendation network. Product purchases follow a 'long tail' where a significant share of purchases belongs to rarely sold items. We establish how the recommendation network grows over time and how effective it is from the viewpoint of the sender and receiver of the recommendations. While on average recommendations are not very effective at inducing purchases and do not spread very far, we present a model that successfully identifies communities, product and pricing categories for which viral marketing seems to be very effective.<|reference_end|> | arxiv | @article{leskovec2005the,
title={The Dynamics of Viral Marketing},
author={Jure Leskovec, Lada A. Adamic and Bernardo A. Huberman},
journal={Leskovec, J., Adamic, L. A., and Huberman, B. A. 2007. The
dynamics of viral marketing. ACM Transactions on the Web, 1, 1 (May 2007)},
year={2005},
doi={10.1145/1232722.1232727},
archivePrefix={arXiv},
eprint={physics/0509039},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.DB cs.DS}
} | leskovec2005the |
arxiv-677074 | physics/0509075 | Sharp transition towards shared vocabularies in multi-agent systems | <|reference_start|>Sharp transition towards shared vocabularies in multi-agent systems: What processes can explain how very large populations are able to converge on the use of a particular word or grammatical construction without global coordination? Answering this question helps to understand why new language constructs usually propagate along an S-shaped curve with a rather sudden transition towards global agreement. It also helps to analyze and design new technologies that support or orchestrate self-organizing communication systems, such as recent social tagging systems for the web. The article introduces and studies a microscopic model of communicating autonomous agents performing language games without any central control. We show that the system undergoes a disorder/order transition, going trough a sharp symmetry breaking process to reach a shared set of conventions. Before the transition, the system builds up non-trivial scale-invariant correlations, for instance in the distribution of competing synonyms, which display a Zipf-like law. These correlations make the system ready for the transition towards shared conventions, which, observed on the time-scale of collective behaviors, becomes sharper and sharper with system size. This surprising result not only explains why human language can scale up to very large populations but also suggests ways to optimize artificial semiotic dynamics.<|reference_end|> | arxiv | @article{baronchelli2005sharp,
title={Sharp transition towards shared vocabularies in multi-agent systems},
author={A. Baronchelli, M. Felici, E. Caglioti, V. Loreto and L. Steels},
journal={J. Stat. Mech. (2006) P06014},
year={2005},
doi={10.1088/1742-5468/2006/06/P06014},
archivePrefix={arXiv},
eprint={physics/0509075},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.GT cs.MA}
} | baronchelli2005sharp |
arxiv-677075 | physics/0509134 | On the genre-fication of Music: a percolation approach (long version) | <|reference_start|>On the genre-fication of Music: a percolation approach (long version): In this paper, we analyze web-downloaded data on people sharing their music library. By attributing to each music group usual music genres (Rock, Pop...), and analysing correlations between music groups of different genres with percolation-idea based methods, we probe the reality of these subdivisions and construct a music genre cartography, with a tree representation. We also show the diversity of music genres with Shannon entropy arguments, and discuss an alternative objective way to classify music, that is based on the complex structure of the groups audience. Finally, a link is drawn with the theory of hidden variables in complex networks.<|reference_end|> | arxiv | @article{lambiotte2005on,
title={On the genre-fication of Music: a percolation approach (long version)},
author={R. Lambiotte and M. Ausloos},
journal={Eur. Phys. J. B 50, 183-188 (2006)},
year={2005},
doi={10.1140/epjb/e2006-00115-0},
archivePrefix={arXiv},
eprint={physics/0509134},
primaryClass={physics.soc-ph cs.DL}
} | lambiotte2005on |
arxiv-677076 | physics/0509136 | Totally Secure Classical Communication Utilizing Johnson (-like) Noise and Kirchoff's Law | <|reference_start|>Totally Secure Classical Communication Utilizing Johnson (-like) Noise and Kirchoff's Law: An absolutely secure, fast, inexpensive, robust, maintenance-free and low-power- consumption communication is proposed. The states of the information bit are represented by two resistance values. The sender and the receiver have such resistors available and they randomly select and connect one of them to the channel at the beginning of each clock period. The thermal noise voltage and current can be observed but Kirchoff's law provides only a second-order equation. A secure bit is communicated when the actual resistance values at the sender's side and the receiver's side differ. Then the second order equation yields the two resistance values but the eavesdropper is unable to determine the actual locations of the resistors and to find out the state of the sender's bit. The receiver knows that the sender has the inverse of his bit, similarly to quantum entanglement. The eavesdropper can decode the message if, for each bits, she inject current in the wire and measures the voltage change and the current changes in the two directions. However, in this way she gets discovered by the very first bit she decodes. Instead of thermal noise, proper external noise generators should be used when the communication is not aimed to be stealth.<|reference_end|> | arxiv | @article{kish2005totally,
title={Totally Secure Classical Communication Utilizing Johnson (-like) Noise
and Kirchoff's Law},
author={Laszlo B. Kish},
journal={Physics Letters A 352 (March, 2006) 178-182},
year={2005},
doi={10.1016/j.physleta.2005.11.062},
archivePrefix={arXiv},
eprint={physics/0509136},
primaryClass={physics.class-ph cs.CR physics.gen-ph quant-ph}
} | kish2005totally |
arxiv-677077 | physics/0510117 | Modeling bursts and heavy tails in human dynamics | <|reference_start|>Modeling bursts and heavy tails in human dynamics: Current models of human dynamics, used from risk assessment to communications, assume that human actions are randomly distributed in time and thus well approximated by Poisson processes. We provide direct evidence that for five human activity patterns the timing of individual human actions follow non-Poisson statistics, characterized by bursts of rapidly occurring events separated by long periods of inactivity. We show that the bursty nature of human behavior is a consequence of a decision based queuing process: when individuals execute tasks based on some perceived priority, the timing of the tasks will be heavy tailed, most tasks being rapidly executed, while a few experiencing very long waiting times. We discuss two queueing models that capture human activity. The first model assumes that there are no limitations on the number of tasks an individual can hadle at any time, predicting that the waiting time of the individual tasks follow a heavy tailed distribution with exponent alpha=3/2. The second model imposes limitations on the queue length, resulting in alpha=1. We provide empirical evidence supporting the relevance of these two models to human activity patterns. Finally, we discuss possible extension of the proposed queueing models and outline some future challenges in exploring the statistical mechanisms of human dynamics.<|reference_end|> | arxiv | @article{vazquez2005modeling,
title={Modeling bursts and heavy tails in human dynamics},
author={A. Vazquez, J. Gama Oliveira, Z. Dezso, K.-I. Goh, I. Kondor and A.-L.
Barabasi},
journal={Phys. Rev. E 73, 036127 (2006)},
year={2005},
doi={10.1103/PhysRevE.73.036127},
archivePrefix={arXiv},
eprint={physics/0510117},
primaryClass={physics.soc-ph cs.MA}
} | vazquez2005modeling |
arxiv-677078 | physics/0511064 | Logarithmic growth dynamics in software networks | <|reference_start|>Logarithmic growth dynamics in software networks: In a recent paper, Krapivsky and Redner (Phys. Rev. E, 71 (2005) 036118) proposed a new growing network model with new nodes being attached to a randomly selected node, as well to all ancestors of the target node. The model leads to a sparse graph with an average degree growing logarithmically with the system size. Here we present compeling evidence for software networks being the result of a similar class of growing dynamics. The predicted pattern of network growth, as well as the stationary in- and out-degree distributions are consistent with the model. Our results confirm the view of large-scale software topology being generated through duplication-rewiring mechanisms. Implications of these findings are outlined.<|reference_end|> | arxiv | @article{valverde2005logarithmic,
title={Logarithmic growth dynamics in software networks},
author={Sergi Valverde, Ricard V. Sole},
journal={Europhys. Lett. 72 (5) (2005)},
year={2005},
doi={10.1209/epl/i2005-10314-9},
archivePrefix={arXiv},
eprint={physics/0511064},
primaryClass={physics.soc-ph cond-mat.dis-nn cs.SE}
} | valverde2005logarithmic |
arxiv-677079 | physics/0511201 | Strategies for fast convergence in semiotic dynamics | <|reference_start|>Strategies for fast convergence in semiotic dynamics: Semiotic dynamics is a novel field that studies how semiotic conventions spread and stabilize in a population of agents. This is a central issue both for theoretical and technological reasons since large system made up of communicating agents, like web communities or artificial embodied agents teams, are getting widespread. In this paper we discuss a recently introduced simple multi-agent model which is able to account for the emergence of a shared vocabulary in a population of agents. In particular we introduce a new deterministic agents' playing strategy that strongly improves the performance of the game in terms of faster convergence and reduced cognitive effort for the agents.<|reference_end|> | arxiv | @article{baronchelli2005strategies,
title={Strategies for fast convergence in semiotic dynamics},
author={Andrea Baronchelli, Luca Dall'Asta, Alain Barrat, Vittorio Loreto},
journal={Artificial Life X, edited by L. M. Rocha et al., pages 480-485,
MIT press (2006)},
year={2005},
archivePrefix={arXiv},
eprint={physics/0511201},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.GT cs.MA}
} | baronchelli2005strategies |
arxiv-677080 | physics/0512045 | Topology Induced Coarsening in Language Games | <|reference_start|>Topology Induced Coarsening in Language Games: We investigate how very large populations are able to reach a global consensus, out of local "microscopic" interaction rules, in the framework of a recently introduced class of models of semiotic dynamics, the so-called Naming Game. We compare in particular the convergence mechanism for interacting agents embedded in a low-dimensional lattice with respect to the mean-field case. We highlight that in low-dimensions consensus is reached through a coarsening process which requires less cognitive effort of the agents, with respect to the mean-field case, but takes longer to complete. In 1-d the dynamics of the boundaries is mapped onto a truncated Markov process from which we analytically computed the diffusion coefficient. More generally we show that the convergence process requires a memory per agent scaling as N and lasts a time N^{1+2/d} in dimension d<5 (d=4 being the upper critical dimension), while in mean-field both memory and time scale as N^{3/2}, for a population of N agents. We present analytical and numerical evidences supporting this picture.<|reference_end|> | arxiv | @article{baronchelli2005topology,
title={Topology Induced Coarsening in Language Games},
author={A. Baronchelli, L. Dall'Asta, A. Barrat, V. Loreto},
journal={Phys. Rev. E 73, 015102(R) (2006)},
year={2005},
doi={10.1103/PhysRevE.73.015102},
archivePrefix={arXiv},
eprint={physics/0512045},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.GT cs.MA}
} | baronchelli2005topology |
arxiv-677081 | physics/0601118 | Learning about knowledge: A complex network approach | <|reference_start|>Learning about knowledge: A complex network approach: This article describes an approach to modeling knowledge acquisition in terms of walks along complex networks. Each subset of knowledge is represented as a node, and relations between such knowledge are expressed as edges. Two types of edges are considered, corresponding to free and conditional transitions. The latter case implies that a node can only be reached after visiting previously a set of nodes (the required conditions). The process of knowledge acquisition can then be simulated by considering the number of nodes visited as a single agent moves along the network, starting from its lowest layer. It is shown that hierarchical networks, i.e. networks composed of successive interconnected layers, arise naturally as a consequence of compositions of the prerequisite relationships between the nodes. In order to avoid deadlocks, i.e. unreachable nodes, the subnetwork in each layer is assumed to be a connected component. Several configurations of such hierarchical knowledge networks are simulated and the performance of the moving agent quantified in terms of the percentage of visited nodes after each movement. The Barab\'asi-Albert and random models are considered for the layer and interconnecting subnetworks. Although all subnetworks in each realization have the same number of nodes, several interconnectivities, defined by the average node degree of the interconnection networks, have been considered. Two visiting strategies are investigated: random choice among the existing edges and preferential choice to so far untracked edges. A series of interesting results are obtained, including the identification of a series of plateaux of knowledge stagnation in the case of the preferential movements strategy in presence of conditional edges.<|reference_end|> | arxiv | @article{costa2006learning,
title={Learning about knowledge: A complex network approach},
author={Luciano da Fontoura Costa},
journal={arXiv preprint arXiv:physics/0601118},
year={2006},
doi={10.1103/PhysRevE.74.026103},
archivePrefix={arXiv},
eprint={physics/0601118},
primaryClass={physics.soc-ph cond-mat.dis-nn cs.NE physics.comp-ph}
} | costa2006learning |
arxiv-677082 | physics/0601161 | Monte Carlo Algorithm for Least Dependent Non-Negative Mixture Decomposition | <|reference_start|>Monte Carlo Algorithm for Least Dependent Non-Negative Mixture Decomposition: We propose a simulated annealing algorithm (called SNICA for "stochastic non-negative independent component analysis") for blind decomposition of linear mixtures of non-negative sources with non-negative coefficients. The de-mixing is based on a Metropolis type Monte Carlo search for least dependent components, with the mutual information between recovered components as a cost function and their non-negativity as a hard constraint. Elementary moves are shears in two-dimensional subspaces and rotations in three-dimensional subspaces. The algorithm is geared at decomposing signals whose probability densities peak at zero, the case typical in analytical spectroscopy and multivariate curve resolution. The decomposition performance on large samples of synthetic mixtures and experimental data is much better than that of traditional blind source separation methods based on principal component analysis (MILCA, FastICA, RADICAL) and chemometrics techniques (SIMPLISMA, ALS, BTEM) The source codes of SNICA, MILCA and the MI estimator are freely available online at http://www.fz-juelich.de/nic/cs/software<|reference_end|> | arxiv | @article{astakhov2006monte,
title={Monte Carlo Algorithm for Least Dependent Non-Negative Mixture
Decomposition},
author={Sergey A. Astakhov, Harald St"ogbauer, Alexander Kraskov, Peter
Grassberger},
journal={Analytical Chemistry; 2006; 78(5) pp 1620 - 1627},
year={2006},
doi={10.1021/ac051707c},
archivePrefix={arXiv},
eprint={physics/0601161},
primaryClass={physics.chem-ph cond-mat.stat-mech cs.IT math.IT math.PR math.ST physics.comp-ph physics.data-an stat.TH}
} | astakhov2006monte |
arxiv-677083 | physics/0601203 | Optimal Traffic Networks | <|reference_start|>Optimal Traffic Networks: Inspired by studies on the airports' network and the physical Internet, we propose a general model of weighted networks via an optimization principle. The topology of the optimal network turns out to be a spanning tree that minimizes a combination of topological and metric quantities. It is characterized by a strongly heterogeneous traffic, non-trivial correlations between distance and traffic and a broadly distributed centrality. A clear spatial hierarchical organization, with local hubs distributing traffic in smaller regions, emerges as a result of the optimization. Varying the parameters of the cost function, different classes of trees are recovered, including in particular the minimum spanning tree and the shortest path tree. These results suggest that a variational approach represents an alternative and possibly very meaningful path to the study of the structure of complex weighted networks.<|reference_end|> | arxiv | @article{barthelemy2006optimal,
title={Optimal Traffic Networks},
author={Marc Barthelemy and Alessandro Flammini},
journal={J. Stat. Mech. (2006) L07002},
year={2006},
doi={10.1088/1742-5468/2006/07/L07002},
archivePrefix={arXiv},
eprint={physics/0601203},
primaryClass={physics.soc-ph cond-mat.dis-nn cs.GL}
} | barthelemy2006optimal |
arxiv-677084 | physics/0602026 | Preferential attachment in the growth of social networks: the case of Wikipedia | <|reference_start|>Preferential attachment in the growth of social networks: the case of Wikipedia: We present an analysis of the statistical properties and growth of the free on-line encyclopedia Wikipedia. By describing topics by vertices and hyperlinks between them as edges, we can represent this encyclopedia as a directed graph. The topological properties of this graph are in close analogy with that of the World Wide Web, despite the very different growth mechanism. In particular we measure a scale--invariant distribution of the in-- and out-- degree and we are able to reproduce these features by means of a simple statistical model. As a major consequence, Wikipedia growth can be described by local rules such as the preferential attachment mechanism, though users can act globally on the network.<|reference_end|> | arxiv | @article{capocci2006preferential,
title={Preferential attachment in the growth of social networks: the case of
Wikipedia},
author={A. Capocci, V.D.P. Servedio, F. Colaiori, L.S. Buriol, D. Donato, S.
Leonardi and G. Caldarelli},
journal={arXiv preprint arXiv:physics/0602026},
year={2006},
doi={10.1103/PhysRevE.74.036116},
archivePrefix={arXiv},
eprint={physics/0602026},
primaryClass={physics.soc-ph cs.OH}
} | capocci2006preferential |
arxiv-677085 | physics/0602033 | Community Structure in the United States House of Representatives | <|reference_start|>Community Structure in the United States House of Representatives: We investigate the networks of committee and subcommittee assignments in the United States House of Representatives from the 101st--108th Congresses, with the committees connected by ``interlocks'' or common membership. We examine the community structure in these networks using several methods, revealing strong links between certain committees as well as an intrinsic hierarchical structure in the House as a whole. We identify structural changes, including additional hierarchical levels and higher modularity, resulting from the 1994 election, in which the Republican party earned majority status in the House for the first time in more than forty years. We also combine our network approach with analysis of roll call votes using singular value decomposition to uncover correlations between the political and organizational structure of House committees.<|reference_end|> | arxiv | @article{porter2006community,
title={Community Structure in the United States House of Representatives},
author={Mason A. Porter, Peter J. Mucha, M. E. J. Newman, and A. J. Friend},
journal={arXiv preprint arXiv:physics/0602033},
year={2006},
doi={10.1063/1.2390556},
archivePrefix={arXiv},
eprint={physics/0602033},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.MA nlin.AO physics.data-an}
} | porter2006community |
arxiv-677086 | physics/0603002 | Functional dissipation microarrays for classification | <|reference_start|>Functional dissipation microarrays for classification: In this article, we describe a new method of extracting information from signals, called functional dissipation, that proves to be very effective for enhancing classification of high resolution, texture-rich data. Our algorithm bypasses to some extent the need to have very specialized feature extraction techniques, and can potentially be used as an intermediate, feature enhancement step in any classification scheme. Functional dissipation is based on signal transforms, but uses the transforms recursively to uncover new features. We generate a variety of masking functions and `extract' features with several generalized matching pursuit iterations. In each iteration, the recursive process modifies several coefficients of the transformed signal with the largest absolute values according to the specific masking function; in this way the greedy pursuit is turned into a slow, controlled, dissipation of the structure of the signal that, for some masking functions, enhances separation among classes. Our case study in this paper is the classification of crystallization patterns of amino acids solutions affected by the addition of small quantities of proteins.<|reference_end|> | arxiv | @article{napoletani2006functional,
title={Functional dissipation microarrays for classification},
author={D. Napoletani, D. C. Struppa, T. Sauer, V. Morozov, N. Vsevolodov and
C. Bailey},
journal={Pattern Recognition 40(12): 3393-3400 (2007)},
year={2006},
archivePrefix={arXiv},
eprint={physics/0603002},
primaryClass={physics.data-an cs.CV}
} | napoletani2006functional |
arxiv-677087 | physics/0606053 | Optimal estimation for Large-Eddy Simulation of turbulence and application to the analysis of subgrid models | <|reference_start|>Optimal estimation for Large-Eddy Simulation of turbulence and application to the analysis of subgrid models: The tools of optimal estimation are applied to the study of subgrid models for Large-Eddy Simulation of turbulence. The concept of optimal estimator is introduced and its properties are analyzed in the context of applications to a priori tests of subgrid models. Attention is focused on the Cook and Riley model in the case of a scalar field in isotropic turbulence. Using DNS data, the relevance of the beta assumption is estimated by computing (i) generalized optimal estimators and (ii) the error brought by this assumption alone. Optimal estimators are computed for the subgrid variance using various sets of variables and various techniques (histograms and neural networks). It is shown that optimal estimators allow a thorough exploration of models. Neural networks are proved to be relevant and very efficient in this framework, and further usages are suggested.<|reference_end|> | arxiv | @article{moreau2006optimal,
title={Optimal estimation for Large-Eddy Simulation of turbulence and
application to the analysis of subgrid models},
author={Antoine Moreau (LASMEA), Olivier Teytaud (LRI, INRIA Futurs),
Jean-Pierre Bertoglio (LMFA)},
journal={Physics of Fluids 18 (04/10/2006) 105101},
year={2006},
doi={10.1063/1.2357974},
archivePrefix={arXiv},
eprint={physics/0606053},
primaryClass={physics.class-ph cs.NE}
} | moreau2006optimal |
arxiv-677088 | physics/0607116 | Utilisation de la substitution sensorielle par \'electro-stimulation linguale pour la pr\'evention des escarres chez les parapl\'egiques Etude pr\'eliminaire | <|reference_start|>Utilisation de la substitution sensorielle par \'electro-stimulation linguale pour la pr\'evention des escarres chez les parapl\'egiques Etude pr\'eliminaire: Pressure ulcers are recognized as a major health issue in individuals with spinal cord injuries and new approaches to prevent this pathology are necessary. An innovative health strategy is being developed through the use of computer and sensory substitution via the tongue in order to compensate for the sensory loss in the buttock area for individuals with paraplegia. This sensory compensation will enable individuals with spinal cord injuries to be aware of a localized excess of pressure at the skin/seat interface and, consequently, will enable them to prevent the formation of pressure ulcers by relieving the cutaneous area of suffering. This work reports an initial evaluation of this approach and the feasibility of creating an adapted behavior, with a change in pressure as a response to electro-stimulated information on the tongue. Obtained during a clinical study in 10 healthy seated subjects, the first results are encouraging, with 92% success in 100 performed tests. These results, which have to be completed and validated in the paraplegic population, may lead to a new approach to education in health to prevent the formation of pressure ulcers within this population. Keywords: Spinal Cord Injuries, Pressure Ulcer, Sensory Substitution, Health Education, Biomedical Informatics.<|reference_end|> | arxiv | @article{moreau-gaudry2006utilisation,
title={Utilisation de la substitution sensorielle par \'{e}lectro-stimulation
linguale pour la pr\'{e}vention des escarres chez les parapl\'{e}giques.
Etude pr\'{e}liminaire},
author={Alexandre Moreau-Gaudry (TIMC - IMAG), Fabien Robineau (TIMC - IMAG),
Pierre-Fr'ed'eric Andr'e (CMUDD), Anne Prince (CMUDD), Pierre Pauget
(CMUDD), Jacques Demongeot (TIMC - IMAG), Yohan Payan (TIMC - IMAG)},
journal={L'escarre 30 (2006) 24-37},
year={2006},
archivePrefix={arXiv},
eprint={physics/0607116},
primaryClass={physics.med-ph cs.RO q-bio.NC}
} | moreau-gaudry2006utilisation |
arxiv-677089 | physics/0608166 | Information filtering via Iterative Refinement | <|reference_start|>Information filtering via Iterative Refinement: With the explosive growth of accessible information, expecially on the Internet, evaluation-based filtering has become a crucial task. Various systems have been devised aiming to sort through large volumes of information and select what is likely to be more relevant. In this letter we analyse a new ranking method, where the reputation of information providers is determined self-consistently.<|reference_end|> | arxiv | @article{laureti2006information,
title={Information filtering via Iterative Refinement},
author={P. Laureti, L. Moret, Y.-C. Zhang, Y.-K. Yu},
journal={Europhys. Lett., 75 (6), 1006 (2006)},
year={2006},
doi={10.1209/epl/i2006-10204-8},
archivePrefix={arXiv},
eprint={physics/0608166},
primaryClass={physics.data-an cs.IR physics.soc-ph}
} | laureti2006information |
arxiv-677090 | physics/0608185 | Updating Probabilities | <|reference_start|>Updating Probabilities: We show that Skilling's method of induction leads to a unique general theory of inductive inference, the method of Maximum relative Entropy (ME). The main tool for updating probabilities is the logarithmic relative entropy; other entropies such as those of Renyi or Tsallis are ruled out. We also show that Bayes updating is a special case of ME updating and thus, that the two are completely compatible.<|reference_end|> | arxiv | @article{caticha2006updating,
title={Updating Probabilities},
author={Ariel Caticha and Adom Giffin},
journal={arXiv preprint arXiv:physics/0608185},
year={2006},
doi={10.1063/1.2423258},
number={UAlbany-Physics 001},
archivePrefix={arXiv},
eprint={physics/0608185},
primaryClass={physics.data-an cond-mat.stat-mech cs.IT math.IT}
} | caticha2006updating |
arxiv-677091 | physics/0608293 | Automatic Trading Agent RMT based Portfolio Theory and Portfolio Selection | <|reference_start|>Automatic Trading Agent RMT based Portfolio Theory and Portfolio Selection: Portfolio theory is a very powerful tool in the modern investment theory. It is helpful in estimating risk of an investor's portfolio, which arises from our lack of information, uncertainty and incomplete knowledge of reality, which forbids a perfect prediction of future price changes. Despite of many advantages this tool is not known and is not widely used among investors on Warsaw Stock Exchange. The main reason for abandoning this method is a high level of complexity and immense calculations. The aim of this paper is to introduce an automatic decision - making system, which allows a single investor to use such complex methods of Modern Portfolio Theory (MPT). The key tool in MPT is an analysis of an empirical covariance matrix. This matrix, obtained from historical data is biased by such a high amount of statistical uncertainty, that it can be seen as random. By bringing into practice the ideas of Random Matrix Theory (RMT), the noise is removed or significantly reduced, so the future risk and return are better estimated and controlled. This concepts are applied to the Warsaw Stock Exchange Simulator http://gra.onet.pl. The result of the simulation is 18 % level of gains in comparison for respective 10 % loss of the Warsaw Stock Exchange main index WIG.<|reference_end|> | arxiv | @article{snarska2006automatic,
title={Automatic Trading Agent. RMT based Portfolio Theory and Portfolio
Selection},
author={Malgorzata Snarska, Jakub Krzych},
journal={Acta Phys. Pol. B 37 (2006) 3145},
year={2006},
archivePrefix={arXiv},
eprint={physics/0608293},
primaryClass={physics.soc-ph cs.CE q-fin.PM stat.AP}
} | snarska2006automatic |
arxiv-677092 | physics/0609097 | FAST - Floor field- and Agent-based Simulation Tool | <|reference_start|>FAST - Floor field- and Agent-based Simulation Tool: In this paper a model of pedestrian motion is presented. As application its parameters are fitted to one run in a primary school evacuation exercise. Simulations with these parameters are compared to further runs during the same exercise.<|reference_end|> | arxiv | @article{kretz2006f.a.s.t.,
title={F.A.S.T. - Floor field- and Agent-based Simulation Tool},
author={Tobias Kretz and Michael Schreckenberg},
journal={in Transport Simulation: Beyond Traditional Approaches, pp.
125-135 (2009)},
year={2006},
archivePrefix={arXiv},
eprint={physics/0609097},
primaryClass={physics.comp-ph cs.MA physics.soc-ph}
} | kretz2006f.a.s.t. |
arxiv-677093 | physics/0609217 | Stochastic Model for Power Grid Dynamics | <|reference_start|>Stochastic Model for Power Grid Dynamics: We introduce a stochastic model that describes the quasi-static dynamics of an electric transmission network under perturbations introduced by random load fluctuations, random removing of system components from service, random repair times for the failed components, and random response times to implement optimal system corrections for removing line overloads in a damaged or stressed transmission network. We use a linear approximation to the network flow equations and apply linear programming techniques that optimize the dispatching of generators and loads in order to eliminate the network overloads associated with a damaged system. We also provide a simple model for the operator's response to various contingency events that is not always optimal due to either failure of the state estimation system or due to the incorrect subjective assessment of the severity associated with these events. This further allows us to use a game theoretic framework for casting the optimization of the operator's response into the choice of the optimal strategy which minimizes the operating cost. We use a simple strategy space which is the degree of tolerance to line overloads and which is an automatic control (optimization) parameter that can be adjusted to trade off automatic load shed without propagating cascades versus reduced load shed and an increased risk of propagating cascades. The tolerance parameter is chosen to describes a smooth transition from a risk averse to a risk taken strategy...<|reference_end|> | arxiv | @article{anghel2006stochastic,
title={Stochastic Model for Power Grid Dynamics},
author={Marian Anghel, Kenneth A. Werley, Adilson E. Motter},
journal={Proceedings of the Fortieth Hawaii International Conference on
System Sciences, January 3-6, 2007, Big Island, Hawaii},
year={2006},
archivePrefix={arXiv},
eprint={physics/0609217},
primaryClass={physics.soc-ph cond-mat.dis-nn cond-mat.other cs.OH}
} | anghel2006stochastic |
arxiv-677094 | physics/0610051 | Structural Inference of Hierarchies in Networks | <|reference_start|>Structural Inference of Hierarchies in Networks: One property of networks that has received comparatively little attention is hierarchy, i.e., the property of having vertices that cluster together in groups, which then join to form groups of groups, and so forth, up through all levels of organization in the network. Here, we give a precise definition of hierarchical structure, give a generic model for generating arbitrary hierarchical structure in a random graph, and describe a statistically principled way to learn the set of hierarchical features that most plausibly explain a particular real-world network. By applying this approach to two example networks, we demonstrate its advantages for the interpretation of network data, the annotation of graphs with edge, vertex and community properties, and the generation of generic null models for further hypothesis testing.<|reference_end|> | arxiv | @article{clauset2006structural,
title={Structural Inference of Hierarchies in Networks},
author={Aaron Clauset and Cristopher Moore and M. E. J. Newman},
journal={Proc. 23rd International Conference on Machine Learning (ICML),
Workshop on Social Network Analysis, Pittsburgh PA, June 2006},
year={2006},
doi={10.1007/978-3-540-73133-7_1},
archivePrefix={arXiv},
eprint={physics/0610051},
primaryClass={physics.soc-ph cs.LG physics.data-an}
} | clauset2006structural |
arxiv-677095 | physics/0612122 | Ranking Scientific Publications Using a Simple Model of Network Traffic | <|reference_start|>Ranking Scientific Publications Using a Simple Model of Network Traffic: To account for strong aging characteristics of citation networks, we modify Google's PageRank algorithm by initially distributing random surfers exponentially with age, in favor of more recent publications. The output of this algorithm, which we call CiteRank, is interpreted as approximate traffic to individual publications in a simple model of how researchers find new information. We develop an analytical understanding of traffic flow in terms of an RPA-like model and optimize parameters of our algorithm to achieve the best performance. The results are compared for two rather different citation networks: all American Physical Society publications and the set of high-energy physics theory (hep-th) preprints. Despite major differences between these two networks, we find that their optimal parameters for the CiteRank algorithm are remarkably similar.<|reference_end|> | arxiv | @article{walker2006ranking,
title={Ranking Scientific Publications Using a Simple Model of Network Traffic},
author={Dylan Walker, Huafeng Xie, Koon-Kiu Yan, Sergei Maslov},
journal={J.Stat.Mech.0706:P06010,2007},
year={2006},
doi={10.1088/1742-5468/2007/06/P06010},
archivePrefix={arXiv},
eprint={physics/0612122},
primaryClass={physics.soc-ph cs.DL physics.data-an}
} | walker2006ranking |
arxiv-677096 | physics/0612134 | Effective networks for real-time distributed processing | <|reference_start|>Effective networks for real-time distributed processing: The problem of real-time processing is one of the most challenging current issues in computer sciences. Because of the large amount of data to be treated in a limited period of time, parallel and distributed systems are required, whose performance depends on a series of factors including the interconnectivity of the processing elements, the application model and the communication protocol. Given their flexibility for representing and modeling natural and human-made systems (such as the Internet and WWW), complex networks have become a primary choice in many research areas. The current work presents how the concepts and methods of complex networks can be used to develop realistic models and simulations of distributed real-time system while taking into account two representative interconnection models: uniformly random and scale free (Barabasi-Albert), including the presence of background traffic of messages. The interesting obtained results include the identification of the uniformly random interconnectivity scheme as being largely more efficient than the scale-free counterpart.<|reference_end|> | arxiv | @article{travieso2006effective,
title={Effective networks for real-time distributed processing},
author={Gonzalo Travieso and Luciano da Fontoura Costa},
journal={arXiv preprint arXiv:physics/0612134},
year={2006},
archivePrefix={arXiv},
eprint={physics/0612134},
primaryClass={physics.soc-ph cs.DC physics.comp-ph}
} | travieso2006effective |
arxiv-677097 | physics/0701012 | The Rise and Rise of Citation Analysis | <|reference_start|>The Rise and Rise of Citation Analysis: With the vast majority of scientific papers now available online, this paper describes how the Web is allowing physicists and information providers to measure more accurately the impact of these papers and their authors. Provides a historical background of citation analysis, impact factor, new citation data sources (e.g., Google Scholar, Scopus, NASA's Astrophysics Data System Abstract Service, MathSciNet, ScienceDirect, SciFinder Scholar, Scitation/SPIN, and SPIRES-HEP), as well as h-index, g-index, and a-index.<|reference_end|> | arxiv | @article{meho2006the,
title={The Rise and Rise of Citation Analysis},
author={L. I. Meho},
journal={arXiv preprint arXiv:physics/0701012},
year={2006},
archivePrefix={arXiv},
eprint={physics/0701012},
primaryClass={physics.soc-ph cs.DL}
} | meho2006the |
arxiv-677098 | physics/0701081 | Spatio-Temporal Electromagnetic Field Shapes and their Logical Processing | <|reference_start|>Spatio-Temporal Electromagnetic Field Shapes and their Logical Processing: This paper is on the spatio-temporal signals with the topologically modulated electromagnetic fields. The carrier of the digital information is the topological scheme composed of the separatrices-manifolds and equilibrium positions of the field. The signals and developed hardware for their processing in the space-time domain are considered<|reference_end|> | arxiv | @article{kouzaev2007spatio-temporal,
title={Spatio-Temporal Electromagnetic Field Shapes and their Logical
Processing},
author={G.A. Kouzaev},
journal={arXiv preprint arXiv:physics/0701081},
year={2007},
archivePrefix={arXiv},
eprint={physics/0701081},
primaryClass={physics.comp-ph cs.CV physics.gen-ph}
} | kouzaev2007spatio-temporal |
arxiv-677099 | physics/0701168 | A Gap in the Community-Size Distribution of a Large-Scale Social Networking Site | <|reference_start|>A Gap in the Community-Size Distribution of a Large-Scale Social Networking Site: Social networking sites (SNS) have recently used by millions of people all over the world. An SNS is a society on the Internet, where people communicate and foster friendship with each other. We examine a nation-wide SNS (more than six million users at present), mutually acknowledged friendship network with third million people and nearly two million links. By employing a community-extracting method developed by Newman and others, we found that there exists a range of community-sizes in which only few communities are detected. This novel feature cannot be explained by previous growth models of networks. We present a simple model with two processes of acquaintance, connecting nearest neighbors and random linkage. We show that the model can explain the gap in the community-size distribution as well as other statistical properties including long-tail degree distribution, high transitivity, its correlation with degree, and degree-degree correlation. The model can estimate how the two processes, which are ubiquitous in many social networks, are working with relative frequencies in the SNS as well as other societies.<|reference_end|> | arxiv | @article{yuta2007a,
title={A Gap in the Community-Size Distribution of a Large-Scale Social
Networking Site},
author={Kikuo Yuta, Naoaki Ono, and Yoshi Fujiwara},
journal={arXiv preprint arXiv:physics/0701168},
year={2007},
archivePrefix={arXiv},
eprint={physics/0701168},
primaryClass={physics.soc-ph cs.CY physics.data-an}
} | yuta2007a |
arxiv-677100 | physics/0702015 | Size reduction of complex networks preserving modularity | <|reference_start|>Size reduction of complex networks preserving modularity: The ubiquity of modular structure in real-world complex networks is being the focus of attention in many trials to understand the interplay between network topology and functionality. The best approaches to the identification of modular structure are based on the optimization of a quality function known as modularity. However this optimization is a hard task provided that the computational complexity of the problem is in the NP-hard class. Here we propose an exact method for reducing the size of weighted (directed and undirected) complex networks while maintaining invariant its modularity. This size reduction allows the heuristic algorithms that optimize modularity for a better exploration of the modularity landscape. We compare the modularity obtained in several real complex-networks by using the Extremal Optimization algorithm, before and after the size reduction, showing the improvement obtained. We speculate that the proposed analytical size reduction could be extended to an exact coarse graining of the network in the scope of real-space renormalization.<|reference_end|> | arxiv | @article{arenas2007size,
title={Size reduction of complex networks preserving modularity},
author={A. Arenas, J. Duch, A. Fernandez and S. Gomez},
journal={New Journal of Physics 9 (2007) 176},
year={2007},
doi={10.1088/1367-2630/9/6/176},
archivePrefix={arXiv},
eprint={physics/0702015},
primaryClass={physics.comp-ph cond-mat.other cs.DM physics.data-an q-bio.QM}
} | arenas2007size |
Subsets and Splits