corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-670901
cs/0212041
Robust Classification with Context-Sensitive Features
<|reference_start|>Robust Classification with Context-Sensitive Features: This paper addresses the problem of classifying observations when features are context-sensitive, especially when the testing set involves a context that is different from the training set. The paper begins with a precise definition of the problem, then general strategies are presented for enhancing the performance of classification algorithms on this type of problem. These strategies are tested on three domains. The first domain is the diagnosis of gas turbine engines. The problem is to diagnose a faulty engine in one context, such as warm weather, when the fault has previously been seen only in another context, such as cold weather. The second domain is speech recognition. The context is given by the identity of the speaker. The problem is to recognize words spoken by a new speaker, not represented in the training set. The third domain is medical prognosis. The problem is to predict whether a patient with hepatitis will live or die. The context is the age of the patient. For all three domains, exploiting context results in substantially more accurate classification.<|reference_end|>
arxiv
@article{turney2002robust, title={Robust Classification with Context-Sensitive Features}, author={Peter D. Turney (National Research Council of Canada)}, journal={Proceedings of the Sixth International Conference on Industrial and Engineering Applications of Artificial Intelligence and Expert Systems, Edinburgh, Scotland, (1993), 268-276}, year={2002}, number={NRC-35074}, archivePrefix={arXiv}, eprint={cs/0212041}, primaryClass={cs.LG cs.CV} }
turney2002robust
arxiv-670902
cs/0212042
Increasing Evolvability Considered as a Large-Scale Trend in Evolution
<|reference_start|>Increasing Evolvability Considered as a Large-Scale Trend in Evolution: Evolvability is the capacity to evolve. This paper introduces a simple computational model of evolvability and demonstrates that, under certain conditions, evolvability can increase indefinitely, even when there is no direct selection for evolvability. The model shows that increasing evolvability implies an accelerating evolutionary pace. It is suggested that the conditions for indefinitely increasing evolvability are satisfied in biological and cultural evolution. We claim that increasing evolvability is a large-scale trend in evolution. This hypothesis leads to testable predictions about biological and cultural evolution.<|reference_end|>
arxiv
@article{turney2002increasing, title={Increasing Evolvability Considered as a Large-Scale Trend in Evolution}, author={Peter D. Turney (National Research Council of Canada)}, journal={Proceedings of the 1999 Genetic and Evolutionary Computation Conference Workshop Program, (1999), 43-46}, year={2002}, number={NRC-43583}, archivePrefix={arXiv}, eprint={cs/0212042}, primaryClass={cs.NE cs.CE q-bio.PE} }
turney2002increasing
arxiv-670903
cs/0212043
Computing Conformal Structure of Surfaces
<|reference_start|>Computing Conformal Structure of Surfaces: This paper solves the problem of computing conformal structures of general 2-manifolds represented as triangle meshes. We compute conformal structures in the following way: first compute homology bases from simplicial complex structures, then construct dual cohomology bases and diffuse them to harmonic 1-forms. Next, we construct bases of holomorphic differentials. We then obtain period matrices by integrating holomorphic differentials along homology bases. We also study the global conformal mapping between genus zero surfaces and spheres, and between general meshes and planes. Our method of computing conformal structures can be applied to tackle fundamental problems in computer aid design and computer graphics, such as geometry classification and identification, and surface global parametrization.<|reference_end|>
arxiv
@article{gu2002computing, title={Computing Conformal Structure of Surfaces}, author={Xianfeng Gu and Shing-Tung Yau}, journal={arXiv preprint arXiv:cs/0212043}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212043}, primaryClass={cs.GR cs.CG} }
gu2002computing
arxiv-670904
cs/0212044
Solving a "Hard" Problem to Approximate an "Easy" One: Heuristics for Maximum Matchings and Maximum Traveling Salesman Problems
<|reference_start|>Solving a "Hard" Problem to Approximate an "Easy" One: Heuristics for Maximum Matchings and Maximum Traveling Salesman Problems: We consider geometric instances of the Maximum Weighted Matching Problem (MWMP) and the Maximum Traveling Salesman Problem (MTSP) with up to 3,000,000 vertices. Making use of a geometric duality relationship between MWMP, MTSP, and the Fermat-Weber-Problem (FWP), we develop a heuristic approach that yields in near-linear time solutions as well as upper bounds. Using various computational tools, we get solutions within considerably less than 1% of the optimum. An interesting feature of our approach is that, even though an FWP is hard to compute in theory and Edmonds' algorithm for maximum weighted matching yields a polynomial solution for the MWMP, the practical behavior is just the opposite, and we can solve the FWP with high accuracy in order to find a good heuristic solution for the MWMP.<|reference_end|>
arxiv
@article{fekete2002solving, title={Solving a "Hard" Problem to Approximate an "Easy" One: Heuristics for Maximum Matchings and Maximum Traveling Salesman Problems}, author={Sandor P. Fekete, Henk Meijer, Andre Rohe, and Walter Tietze}, journal={Journal of Experimental Algorithms, 7 (2002), article 11.}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212044}, primaryClass={cs.DS} }
fekete2002solving
arxiv-670905
cs/0212045
Local Community Identification through User Access Patterns
<|reference_start|>Local Community Identification through User Access Patterns: Community identification algorithms have been used to enhance the quality of the services perceived by its users. Although algorithms for community have a widespread use in the Web, their application to portals or specific subsets of the Web has not been much studied. In this paper, we propose a technique for local community identification that takes into account user access behavior derived from access logs of servers in the Web. The technique takes a departure from the existing community algorithms since it changes the focus of in terest, moving from authors to users. Our approach does not use relations imposed by authors (e.g. hyperlinks in the case of Web pages). It uses information derived from user accesses to a service in order to infer relationships. The communities identified are of great interest to content providers since they can be used to improve quality of their services. We also propose an evaluation methodology for analyzing the results obtained by the algorithm. We present two case studies based on actual data from two services: an online bookstore and an online radio. The case of the online radio is particularly relevant, because it emphasizes the contribution of the proposed algorithm to find out communities in an environment (i.e., streaming media service) without links, that represent the relations imposed by authors (e.g. hyperlinks in the case of Web pages).<|reference_end|>
arxiv
@article{almeida2002local, title={Local Community Identification through User Access Patterns}, author={Rodrigo B. Almeida and Virgilio A. F. Almeida}, journal={arXiv preprint arXiv:cs/0212045}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212045}, primaryClass={cs.IR cs.HC} }
almeida2002local
arxiv-670906
cs/0212046
Confluent Drawings: Visualizing Non-planar Diagrams in a Planar Way
<|reference_start|>Confluent Drawings: Visualizing Non-planar Diagrams in a Planar Way: In this paper, we introduce a new approach for drawing diagrams that have applications in software visualization. Our approach is to use a technique we call confluent drawing for visualizing non-planar diagrams in a planar way. This approach allows us to draw, in a crossing-free manner, graphs--such as software interaction diagrams--that would normally have many crossings. The main idea of this approach is quite simple: we allow groups of edges to be merged together and drawn as "tracks" (similar to train tracks). Producing such confluent diagrams automatically from a graph with many crossings is quite challenging, however, so we offer two heuristic algorithms to test if a non-planar graph can be drawn efficiently in a confluent way. In addition, we identify several large classes of graphs that can be completely categorized as being either confluently drawable or confluently non-drawable.<|reference_end|>
arxiv
@article{dickerson2002confluent, title={Confluent Drawings: Visualizing Non-planar Diagrams in a Planar Way}, author={Matthew Dickerson and David Eppstein and Michael T. Goodrich and Jeremy Meng}, journal={J. Graph Algorithms and Applications (special issue for GD'03) 9(1):31-52, 2005.}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212046}, primaryClass={cs.CG cs.SE} }
dickerson2002confluent
arxiv-670907
cs/0212047
On local equilibrium equations for clustering states
<|reference_start|>On local equilibrium equations for clustering states: In this note we show that local equilibrium equations (the generalization of the TAP equations or of the belief propagation equations) do have solutions in the colorable phase of the coloring problem. The same results extend to other optimization problems where the solutions has cost zero (e.g. K-satisfiability). On a random graph the solutions of the local equilibrium equations are associated to clusters of configurations (clustering states). On a random graph the local equilibrium equations have solutions almost everywhere in the uncolored phase; in this case we have to introduce the concept quasi-solution of the local equilibrium equations.<|reference_end|>
arxiv
@article{parisi2002on, title={On local equilibrium equations for clustering states}, author={Giorgio Parisi}, journal={arXiv preprint arXiv:cs/0212047}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212047}, primaryClass={cs.CC cond-mat.dis-nn cs.DS} }
parisi2002on
arxiv-670908
cs/0212048
Strategic polymorphism requires just two combinators!
<|reference_start|>Strategic polymorphism requires just two combinators!: In previous work, we introduced the notion of functional strategies: first-class generic functions that can traverse terms of any type while mixing uniform and type-specific behaviour. Functional strategies transpose the notion of term rewriting strategies (with coverage of traversal) to the functional programming paradigm. Meanwhile, a number of Haskell-based models and combinator suites were proposed to support generic programming with functional strategies. In the present paper, we provide a compact and matured reconstruction of functional strategies. We capture strategic polymorphism by just two primitive combinators. This is done without commitment to a specific functional language. We analyse the design space for implementational models of functional strategies. For completeness, we also provide an operational reference model for implementing functional strategies (in Haskell). We demonstrate the generality of our approach by reconstructing representative fragments of the Strafunski library for functional strategies.<|reference_end|>
arxiv
@article{laemmel2002strategic, title={Strategic polymorphism requires just two combinators!}, author={Ralf Laemmel and Joost Visser}, journal={arXiv preprint arXiv:cs/0212048}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212048}, primaryClass={cs.PL} }
laemmel2002strategic
arxiv-670909
cs/0212049
An Ehrenfeucht-Fraisse Game Approach to Collapse Results in Database Theory
<|reference_start|>An Ehrenfeucht-Fraisse Game Approach to Collapse Results in Database Theory: We present a new Ehrenfeucht-Fraisse game approach to collapse results in database theory and we show that, in principle, this approach suffices to prove every natural generic collapse result. Following this approach we can deal with certain infinite databases where previous, highly involved methods fail. We prove the natural generic collapse for Z-embeddable databases over any linearly ordered context structure with arbitrary monadic predicates, and for N-embeddable databases over the context structure (R,<,+,Mon_Q,Groups). Here, N, Z, R, denote the sets of natural numbers, integers, and real numbers, respectively. Groups is the collection of all subgroups of (R,+) that contain Z, and Mon_Q is the collection of all subsets of a particular infinite subset Q of N. Restricting the complexity of the formulas that may be used to formulate queries to Boolean combinations of purely existential first-order formulas, we even obtain the collapse for N-embeddable databases over any linearly ordered context structure with arbitrary predicates. Finally, we develop the notion of N-representable databases, which is a natural generalization of the classical notion of finitely representable databases. We show that natural generic collapse results for N-embeddable databases can be lifted to the larger class of N-representable databases. To obtain, in particular, the collapse result for (N,<,+,Mon_Q), we explicitly construct a winning strategy for the duplicator in the presence of the built-in addition relation +. This, as a side product, also leads to an Ehrenfeucht-Fraisse game proof of the theorem of Ginsburg and Spanier, stating that the spectra of FO(<,+)-sentences are semi-linear.<|reference_end|>
arxiv
@article{schweikardt2002an, title={An Ehrenfeucht-Fraisse Game Approach to Collapse Results in Database Theory}, author={Nicole Schweikardt}, journal={arXiv preprint arXiv:cs/0212049}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212049}, primaryClass={cs.LO cs.DB} }
schweikardt2002an
arxiv-670910
cs/0212050
Open Problems from CCCG 2002
<|reference_start|>Open Problems from CCCG 2002: A list of the problems presented on August 12, 2002 at the open-problem session of the 14th Canadian Conference on Computational Geometry held in Lethbridge, Alberta, Canada.<|reference_end|>
arxiv
@article{demaine2002open, title={Open Problems from CCCG 2002}, author={Erik D. Demaine and Joseph O'Rourke}, journal={arXiv preprint arXiv:cs/0212050}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212050}, primaryClass={cs.CG cs.DM} }
demaine2002open
arxiv-670911
cs/0212051
ExploitingWeb Service Semantics: Taxonomies vs Ontologies
<|reference_start|>ExploitingWeb Service Semantics: Taxonomies vs Ontologies: Comprehensive semantic descriptions of Web services are essential to exploit them in their full potential, that is, discovering them dynamically, and enabling automated service negotiation, composition and monitoring. The semantic mechanisms currently available in service registries which are based on taxonomies fail to provide the means to achieve this. Although the terms taxonomy and ontology are sometimes used interchangably there is a critical difference. A taxonomy indicates only class/subclass relationship whereas an ontology describes a domain completely. The essential mechanisms that ontology languages provide include their formal specification (which allows them to be queried) and their ability to define properties of classes. Through properties very accurate descriptions of services can be defined and services can be related to other services or resources. In this paper, we discuss the advantages of describing service semantics through ontology languages and describe how to relate the semantics defined with the services advertised in service registries like UDDI and ebXML.<|reference_end|>
arxiv
@article{dogac2002exploitingweb, title={ExploitingWeb Service Semantics: Taxonomies vs. Ontologies}, author={Asuman Dogac, Gokce Laleci, Yildiray Kabak, Ibrahim Cingil}, journal={IEEE Data Engineering Bulletin, Vol. 25, No. 4, December 2002}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212051}, primaryClass={cs.DB} }
dogac2002exploitingweb
arxiv-670912
cs/0212052
Improving the Functionality of UDDI Registries through Web Service Semantics
<|reference_start|>Improving the Functionality of UDDI Registries through Web Service Semantics: In this paper we describe a framework for exploiting the semantics of Web services through UDDI registries. As a part of this framework, we extend the DAML-S upper ontology to describe the functionality we find essential for e-businesses. This functionality includes relating the services with electronic catalogs, describing the complementary services and finding services according to the properties of products or services. Once the semantics is defined, there is a need for a mechanism in the service registry to relate it with the service advertised. The ontology model developed is general enough to be used with any service registry. However when it comes to relating the semantics with services advertised, the capabilities provided by the registry effects how this is achieved. We demonstrate how to integrate the described service semantics to UDDI registries.<|reference_end|>
arxiv
@article{dogac2002improving, title={Improving the Functionality of UDDI Registries through Web Service Semantics}, author={Asuman Dogac, Ibrahim Cingil, Gokce Laleci, Yildiray Kabak}, journal={3rd VLDB Workshop on Technologies for E-Services (TES-02), Hong Kong, China, August 23-24, 2002}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212052}, primaryClass={cs.DB} }
dogac2002improving
arxiv-670913
cs/0212053
Merging Locally Correct Knowledge Bases: A Preliminary Report
<|reference_start|>Merging Locally Correct Knowledge Bases: A Preliminary Report: Belief integration methods are often aimed at deriving a single and consistent knowledge base that retains as much as possible of the knowledge bases to integrate. The rationale behind this approach is the minimal change principle: the result of the integration process should differ as less as possible from the knowledge bases to integrate. We show that this principle can be reformulated in terms of a more general model of belief revision, based on the assumption that inconsistency is due to the mistakes the knowledge bases contain. Current belief revision strategies are based on a specific kind of mistakes, which however does not include all possible ones. Some alternative possibilities are discussed.<|reference_end|>
arxiv
@article{liberatore2002merging, title={Merging Locally Correct Knowledge Bases: A Preliminary Report}, author={Paolo Liberatore}, journal={arXiv preprint arXiv:cs/0212053}, year={2002}, archivePrefix={arXiv}, eprint={cs/0212053}, primaryClass={cs.AI cs.LO} }
liberatore2002merging
arxiv-670914
cs/0212054
Improved Compact Visibility Representation of Planar Graph via Schnyder's Realizer
<|reference_start|>Improved Compact Visibility Representation of Planar Graph via Schnyder's Realizer: Let $G$ be an $n$-node planar graph. In a visibility representation of $G$, each node of $G$ is represented by a horizontal line segment such that the line segments representing any two adjacent nodes of $G$ are vertically visible to each other. In the present paper we give the best known compact visibility representation of $G$. Given a canonical ordering of the triangulated $G$, our algorithm draws the graph incrementally in a greedy manner. We show that one of three canonical orderings obtained from Schnyder's realizer for the triangulated $G$ yields a visibility representation of $G$ no wider than $\frac{22n-40}{15}$. Our easy-to-implement O(n)-time algorithm bypasses the complicated subroutines for four-connected components and four-block trees required by the best previously known algorithm of Kant. Our result provides a negative answer to Kant's open question about whether $\frac{3n-6}{2}$ is a worst-case lower bound on the required width. Also, if $G$ has no degree-three (respectively, degree-five) internal node, then our visibility representation for $G$ is no wider than $\frac{4n-9}{3}$ (respectively, $\frac{4n-7}{3}$). Moreover, if $G$ is four-connected, then our visibility representation for $G$ is no wider than $n-1$, matching the best known result of Kant and He. As a by-product, we obtain a much simpler proof for a corollary of Wagner's Theorem on realizers, due to Bonichon, Sa\"{e}c, and Mosbah.<|reference_end|>
arxiv
@article{lin2002improved, title={Improved Compact Visibility Representation of Planar Graph via Schnyder's Realizer}, author={Ching-Chi Lin, Hsueh-I Lu, I-Fan Sun}, journal={SIAM Journal on Discrete Math, 18(1):19-29, 2004}, year={2002}, doi={10.1137/S0895480103420744}, archivePrefix={arXiv}, eprint={cs/0212054}, primaryClass={cs.DS cs.CG} }
lin2002improved
arxiv-670915
cs/0212055
Mathematical foundations of modern cryptography: computational complexity perspective
<|reference_start|>Mathematical foundations of modern cryptography: computational complexity perspective: Theoretical computer science has found fertile ground in many areas of mathematics. The approach has been to consider classical problems through the prism of computational complexity, where the number of basic computational steps taken to solve a problem is the crucial qualitative parameter. This new approach has led to a sequence of advances, in setting and solving new mathematical challenges as well as in harnessing discrete mathematics to the task of solving real-world problems. In this talk, I will survey the development of modern cryptography -- the mathematics behind secret communications and protocols -- in this light. I will describe the complexity theoretic foundations underlying the cryptographic tasks of encryption, pseudo-randomness number generators and functions, zero knowledge interactive proofs, and multi-party secure protocols. I will attempt to highlight the paradigms and proof techniques which unify these foundations, and which have made their way into the mainstream of complexity theory.<|reference_end|>
arxiv
@article{goldwasser2002mathematical, title={Mathematical foundations of modern cryptography: computational complexity perspective}, author={Shafi Goldwasser}, journal={Proceedings of the ICM, Beijing 2002, vol. 1, 245--272}, year={2002}, number={ICM-2002}, archivePrefix={arXiv}, eprint={cs/0212055}, primaryClass={cs.CR cs.CC} }
goldwasser2002mathematical
arxiv-670916
cs/0212056
On the Work of Madhu Sudan: the 2002 Nevalinna Prize Winner
<|reference_start|>On the Work of Madhu Sudan: the 2002 Nevalinna Prize Winner: Madhu Sudan's work spans many areas of computer science theory including computational complexity theory, the design of efficient algorithms, algorithmic coding theory, and the theory of program checking and correcting. Two results of Sudan stand out in the impact they have had on the mathematics of computation. The first work shows a probabilistic characterization of the class NP -- those sets for which short and easily checkable proofs of membership exist, and demonstrates consequences of this characterization to classifying the complexity of approximation problems. The second work shows a polynomial time algorithm for list decoding the Reed Solomon error correcting codes. This short note will be devoted to describing Sudan's work on probabilistically checkable proofs -- the so called {\it PCP theorem} and its implications.<|reference_end|>
arxiv
@article{goldwasser2002on, title={On the Work of Madhu Sudan: the 2002 Nevalinna Prize Winner}, author={Shafi Goldwasser}, journal={Proceedings of the ICM, Beijing 2002, vol. 1, 105--115}, year={2002}, number={ICM-2002}, archivePrefix={arXiv}, eprint={cs/0212056}, primaryClass={cs.CC} }
goldwasser2002on
arxiv-670917
cs/0301001
Least squares fitting of circles and lines
<|reference_start|>Least squares fitting of circles and lines: We study theoretical and computational aspects of the least squares fit (LSF) of circles and circular arcs. First we discuss the existence and uniqueness of LSF and various parametrization schemes. Then we evaluate several popular circle fitting algorithms and propose a new one that surpasses the existing methods in reliability. We also discuss and compare direct (algebraic) circle fits.<|reference_end|>
arxiv
@article{chernov2003least, title={Least squares fitting of circles and lines}, author={N. Chernov and C. Lesort}, journal={arXiv preprint arXiv:cs/0301001}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301001}, primaryClass={cs.CV} }
chernov2003least
arxiv-670918
cs/0301002
Practical and Robust Stenciled Shadow Volumes for Hardware-Accelerated Rendering
<|reference_start|>Practical and Robust Stenciled Shadow Volumes for Hardware-Accelerated Rendering: Twenty-five years ago, Crow published the shadow volume approach for determining shadowed regions in a scene. A decade ago, Heidmann described a hardware-accelerated stencil buffer-based shadow volume algorithm. Unfortunately hardware-accelerated stenciled shadow volume techniques have not been widely adopted by 3D games and applications due in large part to the lack of robustness of described techniques. This situation persists despite widely available hardware support. Specifically what has been lacking is a technique that robustly handles various "hard" situations created by near or far plane clipping of shadow volumes. We describe a robust, artifact-free technique for hardware-accelerated rendering of stenciled shadow volumes. Assuming existing hardware, we resolve the issues otherwise caused by shadow volume near and far plane clipping through a combination of (1) placing the conventional far clip plane "at infinity", (2) rasterization with infinite shadow volume polygons via homogeneous coordinates, and (3) adopting a zfail stencil-testing scheme. Depth clamping, a new rasterization feature provided by NVIDIA's GeForce3, preserves existing depth precision by not requiring the far plane to be placed at infinity. We also propose two-sided stencil testing to improve the efficiency of rendering stenciled shadow volumes.<|reference_end|>
arxiv
@article{everitt2003practical, title={Practical and Robust Stenciled Shadow Volumes for Hardware-Accelerated Rendering}, author={Cass Everitt, Mark J. Kilgard}, journal={arXiv preprint arXiv:cs/0301002}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301002}, primaryClass={cs.GR cs.CG} }
everitt2003practical
arxiv-670919
cs/0301003
Flavor: A Language for Media Representation
<|reference_start|>Flavor: A Language for Media Representation: Flavor (Formal Language for Audio-Visual Object Representation) has been created as a language for describing coded multimedia bitstreams in a formal way so that the code for reading and writing bitstreams can be automatically generated. It is an extension of C++ and Java, in which the typing system incorporates bitstream representation semantics. This allows describing in a single place both the in-memory representation of data as well as their bitstream-level (compressed) representation. Flavor also comes with a translator that automatically generates standard C++ or Java code from the Flavor source code so that direct access to compressed multimedia information by application developers can be achieved with essentially zero programming. Flavor has gone through many enhancements and this paper fully describes the latest version of the language and the translator. The software has been made into an open source project as of Version 4.1, and the latest downloadable Flavor package is available at http://flavor.sourceforge.net.<|reference_end|>
arxiv
@article{eleftheriadis2003flavor:, title={Flavor: A Language for Media Representation}, author={Alexandros Eleftheriadis and Danny Hong}, journal={arXiv preprint arXiv:cs/0301003}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301003}, primaryClass={cs.PL} }
eleftheriadis2003flavor:
arxiv-670920
cs/0301004
Near Quadratic Matrix Multiplication Modulo Composites
<|reference_start|>Near Quadratic Matrix Multiplication Modulo Composites: We show how one can use non-prime-power, composite moduli for computing representations of the product of two $n\times n$ matrices using only $n^{2+o(1)}$ multiplications.<|reference_end|>
arxiv
@article{grolmusz2003near, title={Near Quadratic Matrix Multiplication Modulo Composites}, author={Vince Grolmusz}, journal={arXiv preprint arXiv:cs/0301004}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301004}, primaryClass={cs.CC cs.DM} }
grolmusz2003near
arxiv-670921
cs/0301005
Modelling Delay Jitter in Voice over IP
<|reference_start|>Modelling Delay Jitter in Voice over IP: It has been suggested in voice over IP that an appropriate choice of the distribution used in modeling the delay jitters, can improve the play-out algorithm. In this paper, we propose a tool using which, one can determine, at a given instance, which distribution model best explains the jitter distribution. This is done using Expectation Maximization, to choose amongst possible distribution models which include, the i.i.d exponential distribution, the gamma distribution etc.<|reference_end|>
arxiv
@article{ganesh2003modelling, title={Modelling Delay Jitter in Voice over IP}, author={R. Ganesh, B.Kaushik, R.Sadhu}, journal={arXiv preprint arXiv:cs/0301005}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301005}, primaryClass={cs.PF} }
ganesh2003modelling
arxiv-670922
cs/0301006
Temporal plannability by variance of the episode length
<|reference_start|>Temporal plannability by variance of the episode length: Optimization of decision problems in stochastic environments is usually concerned with maximizing the probability of achieving the goal and minimizing the expected episode length. For interacting agents in time-critical applications, learning of the possibility of scheduling of subtasks (events) or the full task is an additional relevant issue. Besides, there exist highly stochastic problems where the actual trajectories show great variety from episode to episode, but completing the task takes almost the same amount of time. The identification of sub-problems of this nature may promote e.g., planning, scheduling and segmenting Markov decision processes. In this work, formulae for the average duration as well as the standard deviation of the duration of events are derived. The emerging Bellman-type equation is a simple extension of Sobel's work (1982). Methods of dynamic programming as well as methods of reinforcement learning can be applied for our extension. Computer demonstration on a toy problem serve to highlight the principle.<|reference_end|>
arxiv
@article{takacs2003temporal, title={Temporal plannability by variance of the episode length}, author={Balint Takacs, Istvan Szita, Andras Lorincz}, journal={arXiv preprint arXiv:cs/0301006}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301006}, primaryClass={cs.AI} }
takacs2003temporal
arxiv-670923
cs/0301007
Kalman filter control in the reinforcement learning framework
<|reference_start|>Kalman filter control in the reinforcement learning framework: There is a growing interest in using Kalman-filter models in brain modelling. In turn, it is of considerable importance to make Kalman-filters amenable for reinforcement learning. In the usual formulation of optimal control it is computed off-line by solving a backward recursion. In this technical note we show that slight modification of the linear-quadratic-Gaussian Kalman-filter model allows the on-line estimation of optimal control and makes the bridge to reinforcement learning. Moreover, the learning rule for value estimation assumes a Hebbian form weighted by the error of the value estimation.<|reference_end|>
arxiv
@article{szita2003kalman, title={Kalman filter control in the reinforcement learning framework}, author={Istvan Szita, Andras Lorincz}, journal={arXiv preprint arXiv:cs/0301007}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301007}, primaryClass={cs.LG cs.AI} }
szita2003kalman
arxiv-670924
cs/0301008
Formal Concept Analysis and Resolution in Algebraic Domains
<|reference_start|>Formal Concept Analysis and Resolution in Algebraic Domains: We relate two formerly independent areas: Formal concept analysis and logic of domains. We will establish a correspondene between contextual attribute logic on formal contexts resp. concept lattices and a clausal logic on coherent algebraic cpos. We show how to identify the notion of formal concept in the domain theoretic setting. In particular, we show that a special instance of the resolution rule from the domain logic coincides with the concept closure operator from formal concept analysis. The results shed light on the use of contexts and domains for knowledge representation and reasoning purposes.<|reference_end|>
arxiv
@article{hitzler2003formal, title={Formal Concept Analysis and Resolution in Algebraic Domains}, author={Pascal Hitzler and Matthias Wendt (Artificial Intelligence Institute, Dresden University of Technology)}, journal={arXiv preprint arXiv:cs/0301008}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301008}, primaryClass={cs.LO cs.AI} }
hitzler2003formal
arxiv-670925
cs/0301009
A Script Language for Data Integration in Database
<|reference_start|>A Script Language for Data Integration in Database: A Script Language in this paper is designed to transform the original data into the target data by the computing formula. The Script Language can be translated into the corresponding SQL Language, and the computation is finally implemented by the first type of dynamic SQL. The Script Language has the operations of insert, update, delete, union, intersect, and minus for the table in the database.The Script Language is edited by a text file and you can easily modify the computing formula in the text file to deal with the situations when the computing formula have been changed. So you only need modify the text of the script language, but needn't change the programs that have complied.<|reference_end|>
arxiv
@article{zheng2003a, title={A Script Language for Data Integration in Database}, author={Qingguo Zheng}, journal={arXiv preprint arXiv:cs/0301009}, year={2003}, number={Report:9-1-1999}, archivePrefix={arXiv}, eprint={cs/0301009}, primaryClass={cs.DB} }
zheng2003a
arxiv-670926
cs/0301010
Comparisons and Computation of Well-founded Semantics for Disjunctive Logic Programs
<|reference_start|>Comparisons and Computation of Well-founded Semantics for Disjunctive Logic Programs: Much work has been done on extending the well-founded semantics to general disjunctive logic programs and various approaches have been proposed. However, these semantics are different from each other and no consensus is reached about which semantics is the most intended. In this paper we look at disjunctive well-founded reasoning from different angles. We show that there is an intuitive form of the well-founded reasoning in disjunctive logic programming which can be characterized by slightly modifying some exisitng approaches to defining disjunctive well-founded semantics, including program transformations, argumentation, unfounded sets (and resolution-like procedure). We also provide a bottom-up procedure for this semantics. The significance of our work is not only in clarifying the relationship among different approaches, but also shed some light on what is an intended well-founded semantics for disjunctive logic programs.<|reference_end|>
arxiv
@article{wang2003comparisons, title={Comparisons and Computation of Well-founded Semantics for Disjunctive Logic Programs}, author={Kewen Wang, Lizhu Zhou}, journal={arXiv preprint arXiv:cs/0301010}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301010}, primaryClass={cs.AI} }
wang2003comparisons
arxiv-670927
cs/0301011
Open Network Handles Implemented in DNS
<|reference_start|>Open Network Handles Implemented in DNS: An Open Network Handle System (ONHS) provides an intermediate level of service between IP numbers and domain names. A handle adheres permanently to an owner, who may assign and reassign it to different addresses at will. But a handle is a number, carrying no significance in natural language. Any user desiring a handle may generate one from a public key. This memo describes a simple implementation of an Open Network Handle System using the security extensions to the Domain Name System (DNSSEC).<|reference_end|>
arxiv
@article{o'donnell2003open, title={Open Network Handles Implemented in DNS}, author={Michael J. O'Donnell}, journal={arXiv preprint arXiv:cs/0301011}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301011}, primaryClass={cs.NI} }
o'donnell2003open
arxiv-670928
cs/0301012
Hard satisfiable formulas for DPLL-type algorithms
<|reference_start|>Hard satisfiable formulas for DPLL-type algorithms: We address lower bounds on the time complexity of algorithms solving the propositional satisfiability problem. Namely, we consider two DPLL-type algorithms, enhanced with the unit clause and pure literal heuristics. Exponential lower bounds for solving satisfiability on provably satisfiable formulas are proven.<|reference_end|>
arxiv
@article{nikolenko2003hard, title={Hard satisfiable formulas for DPLL-type algorithms}, author={Sergey I. Nikolenko}, journal={arXiv preprint arXiv:cs/0301012}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301012}, primaryClass={cs.CC} }
nikolenko2003hard
arxiv-670929
cs/0301013
Independence Properties of Algorithmically Random Sequences
<|reference_start|>Independence Properties of Algorithmically Random Sequences: A bounded Kolmogorov-Loveland selection rule is an adaptive strategy for recursively selecting a subsequence of an infinite binary sequence; such a subsequence may be interpreted as the query sequence of a time-bounded Turing machine. In this paper we show that if A is an algorithmically random sequence, A_0 is selected from A via a bounded Kolmogorov-Loveland selection rule, and A_1 denotes the sequence of nonselected bits of A, then A_1 is independent of A_0; that is, A_1 is algorithmically random relative to A_0. This result has been used by Kautz and Miltersen [1] to show that relative to a random oracle, NP does not have p-measure zero (in the sense of Lutz [2]). [1] S. M. Kautz and P. B. Miltersen. Relative to a random oracle, NP is not small. Journal of Computer and System Sciences, 53:235-250, 1996. [2] J. H. Lutz. Almost everywhere high nonuniform complexity. Journal of Computer and System Sciences, 44:220-258, 1992.<|reference_end|>
arxiv
@article{kautz2003independence, title={Independence Properties of Algorithmically Random Sequences}, author={S. M. Kautz}, journal={arXiv preprint arXiv:cs/0301013}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301013}, primaryClass={cs.CC} }
kautz2003independence
arxiv-670930
cs/0301014
Convergence and Loss Bounds for Bayesian Sequence Prediction
<|reference_start|>Convergence and Loss Bounds for Bayesian Sequence Prediction: The probability of observing $x_t$ at time $t$, given past observations $x_1...x_{t-1}$ can be computed with Bayes' rule if the true generating distribution $\mu$ of the sequences $x_1x_2x_3...$ is known. If $\mu$ is unknown, but known to belong to a class $M$ one can base ones prediction on the Bayes mix $\xi$ defined as a weighted sum of distributions $\nu\in M$. Various convergence results of the mixture posterior $\xi_t$ to the true posterior $\mu_t$ are presented. In particular a new (elementary) derivation of the convergence $\xi_t/\mu_t\to 1$ is provided, which additionally gives the rate of convergence. A general sequence predictor is allowed to choose an action $y_t$ based on $x_1...x_{t-1}$ and receives loss $\ell_{x_t y_t}$ if $x_t$ is the next symbol of the sequence. No assumptions are made on the structure of $\ell$ (apart from being bounded) and $M$. The Bayes-optimal prediction scheme $\Lambda_\xi$ based on mixture $\xi$ and the Bayes-optimal informed prediction scheme $\Lambda_\mu$ are defined and the total loss $L_\xi$ of $\Lambda_\xi$ is bounded in terms of the total loss $L_\mu$ of $\Lambda_\mu$. It is shown that $L_\xi$ is bounded for bounded $L_\mu$ and $L_\xi/L_\mu\to 1$ for $L_\mu\to \infty$. Convergence of the instantaneous losses are also proven.<|reference_end|>
arxiv
@article{hutter2003convergence, title={Convergence and Loss Bounds for Bayesian Sequence Prediction}, author={Marcus Hutter}, journal={IEEE Transactions on Information Theory, 49:8 (2003) 2061--2067}, year={2003}, doi={10.1109/TIT.2003.814488}, number={IDSIA-09-01}, archivePrefix={arXiv}, eprint={cs/0301014}, primaryClass={cs.LG cs.AI math.PR} }
hutter2003convergence
arxiv-670931
cs/0301015
Some remarks on the survey decimation algorithm for K-satisfiability
<|reference_start|>Some remarks on the survey decimation algorithm for K-satisfiability: In this note we study the convergence of the survey decimation algorithm. An analytic formula for the reduction of the complexity during the decimation is derived. The limit of the converge of the algorithm are estimated in the random case: interesting phenomena appear near the boundary of convergence.<|reference_end|>
arxiv
@article{parisi2003some, title={Some remarks on the survey decimation algorithm for K-satisfiability}, author={Giorgio Parisi}, journal={arXiv preprint arXiv:cs/0301015}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301015}, primaryClass={cs.CC cond-mat.dis-nn cs.DS} }
parisi2003some
arxiv-670932
cs/0301016
Lower Bounds on the Bounded Coefficient Complexity of Bilinear Maps
<|reference_start|>Lower Bounds on the Bounded Coefficient Complexity of Bilinear Maps: We prove lower bounds of order $n\log n$ for both the problem to multiply polynomials of degree $n$, and to divide polynomials with remainder, in the model of bounded coefficient arithmetic circuits over the complex numbers. These lower bounds are optimal up to order of magnitude. The proof uses a recent idea of R. Raz [Proc. 34th STOC 2002] proposed for matrix multiplication. It reduces the linear problem to multiply a random circulant matrix with a vector to the bilinear problem of cyclic convolution. We treat the arising linear problem by extending J. Morgenstern's bound [J. ACM 20, pp. 305-306, 1973] in a unitarily invariant way. This establishes a new lower bound on the bounded coefficient complexity of linear forms in terms of the singular values of the corresponding matrix. In addition, we extend these lower bounds for linear and bilinear maps to a model of circuits that allows a restricted number of unbounded scalar multiplications.<|reference_end|>
arxiv
@article{buergisser2003lower, title={Lower Bounds on the Bounded Coefficient Complexity of Bilinear Maps}, author={Peter Buergisser and Martin Lotz}, journal={arXiv preprint arXiv:cs/0301016}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301016}, primaryClass={cs.CC} }
buergisser2003lower
arxiv-670933
cs/0301017
Completeness and Decidability Properties for Functional Dependencies in XML
<|reference_start|>Completeness and Decidability Properties for Functional Dependencies in XML: XML is of great importance in information storage and retrieval because of its recent emergence as a standard for data representation and interchange on the Internet. However XML provides little semantic content and as a result several papers have addressed the topic of how to improve the semantic expressiveness of XML. Among the most important of these approaches has been that of defining integrity constraints in XML. In a companion paper we defined strong functional dependencies in XML(XFDs). We also presented a set of axioms for reasoning about the implication of XFDs and showed that the axiom system is sound for arbitrary XFDs. In this paper we prove that the axioms are also complete for unary XFDs (XFDs with a single path on the l.h.s.). The second contribution of the paper is to prove that the implication problem for unary XFDs is decidable and to provide a linear time algorithm for it.<|reference_end|>
arxiv
@article{vincent2003completeness, title={Completeness and Decidability Properties for Functional Dependencies in XML}, author={Millist W. Vincent and Jixue Liu}, journal={arXiv preprint arXiv:cs/0301017}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301017}, primaryClass={cs.DB} }
vincent2003completeness
arxiv-670934
cs/0301018
Novel Runtime Systems Support for Adaptive Compositional Modeling on the Grid
<|reference_start|>Novel Runtime Systems Support for Adaptive Compositional Modeling on the Grid: Grid infrastructures and computing environments have progressed significantly in the past few years. The vision of truly seamless Grid usage relies on runtime systems support that is cognizant of the operational issues underlying grid computations and, at the same time, is flexible enough to accommodate diverse application scenarios. This paper addresses the twin aspects of Grid infrastructure and application support through a novel combination of two computational technologies: Weaves - a source-language independent parallel runtime compositional framework that operates through reverse-analysis of compiled object files, and runtime recommender systems that aid in dynamic knowledge-based application composition. Domain-specific adaptivity is exploited through a novel compositional system that supports runtime recommendation of code modules and a sophisticated checkpointing and runtime migration solution that can be transparently deployed over Grid infrastructures. A core set of "adaptivity schemas" are provided as templates for adaptive composition of large-scale scientific computations. Implementation issues, motivating application contexts, and preliminary results are described.<|reference_end|>
arxiv
@article{varadarajan2003novel, title={Novel Runtime Systems Support for Adaptive Compositional Modeling on the Grid}, author={Srinidhi Varadarajan and Naren Ramakrishnan}, journal={arXiv preprint arXiv:cs/0301018}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301018}, primaryClass={cs.CE cs.DC} }
varadarajan2003novel
arxiv-670935
cs/0301019
Smoothed Analysis of Interior-Point Algorithms: Termination
<|reference_start|>Smoothed Analysis of Interior-Point Algorithms: Termination: We perform a smoothed analysis of the termination phase of an interior-point method. By combining this analysis with the smoothed analysis of Renegar's interior-point algorithm by Dunagan, Spielman and Teng, we show that the smoothed complexity of an interior-point algorithm for linear programming is $O (m^{3} \log (m/\sigma))$. In contrast, the best known bound on the worst-case complexity of linear programming is $O (m^{3} L)$, where $L$ could be as large as $m$. We include an introduction to smoothed analysis and a tutorial on proof techniques that have been useful in smoothed analyses.<|reference_end|>
arxiv
@article{spielman2003smoothed, title={Smoothed Analysis of Interior-Point Algorithms: Termination}, author={Daniel A. Spielman and Shang-Hua Teng}, journal={arXiv preprint arXiv:cs/0301019}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301019}, primaryClass={cs.DS} }
spielman2003smoothed
arxiv-670936
cs/0301020
Differential Fault Analysis on AES
<|reference_start|>Differential Fault Analysis on AES: We explain how a differential fault analysis (DFA) works on AES 128, 192 or 256 bits.<|reference_end|>
arxiv
@article{dusart2003differential, title={Differential Fault Analysis on A.E.S}, author={P. Dusart, G. Letourneux, O. Vivolo}, journal={arXiv preprint arXiv:cs/0301020}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301020}, primaryClass={cs.CR} }
dusart2003differential
arxiv-670937
cs/0301021
PHORMA: Perfectly Hashable Order Restricted Multidimensional Arrays
<|reference_start|>PHORMA: Perfectly Hashable Order Restricted Multidimensional Arrays: In this paper we propose a simple and efficient data structure yielding a perfect hashing of quite general arrays. The data structure is named phorma, which is an acronym for perfectly hashable order restricted multidimensional array. Keywords: Perfect hash function, Digraph, Implicit enumeration, Nijenhuis-Wilf combinatorial family.<|reference_end|>
arxiv
@article{lins2003phorma:, title={PHORMA: Perfectly Hashable Order Restricted Multidimensional Arrays}, author={Lauro Lins, Sostenes Lins and Silvio Melo}, journal={arXiv preprint arXiv:cs/0301021}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301021}, primaryClass={cs.DS} }
lins2003phorma:
arxiv-670938
cs/0301022
Homomorphic public-key cryptosystems and encrypting boolean circuits
<|reference_start|>Homomorphic public-key cryptosystems and encrypting boolean circuits: In this paper homomorphic cryptosystems are designed for the first time over any finite group. Applying Barrington's construction we produce for any boolean circuit of the logarithmic depth its encrypted simulation of a polynomial size over an appropriate finitely generated group.<|reference_end|>
arxiv
@article{grigoriev2003homomorphic, title={Homomorphic public-key cryptosystems and encrypting boolean circuits}, author={Dima Grigoriev and Ilia Ponomarenko}, journal={arXiv preprint arXiv:cs/0301022}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301022}, primaryClass={cs.CR} }
grigoriev2003homomorphic
arxiv-670939
cs/0301023
A semantic framework for preference handling in answer set programming
<|reference_start|>A semantic framework for preference handling in answer set programming: We provide a semantic framework for preference handling in answer set programming. To this end, we introduce preference preserving consequence operators. The resulting fixpoint characterizations provide us with a uniform semantic framework for characterizing preference handling in existing approaches. Although our approach is extensible to other semantics by means of an alternating fixpoint theory, we focus here on the elaboration of preferences under answer set semantics. Alternatively, we show how these approaches can be characterized by the concept of order preservation. These uniform semantic characterizations provide us with new insights about interrelationships and moreover about ways of implementation.<|reference_end|>
arxiv
@article{schaub2003a, title={A semantic framework for preference handling in answer set programming}, author={Torsten Schaub and Kewen Wang}, journal={arXiv preprint arXiv:cs/0301023}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301023}, primaryClass={cs.AI} }
schaub2003a
arxiv-670940
cs/0301024
Complexity and Completeness of Immanants
<|reference_start|>Complexity and Completeness of Immanants: Immanants are polynomial functions of n by n matrices attached to irreducible characters of the symmetric group S_n, or equivalently to Young diagrams of size n. Immanants include determinants and permanents as extreme cases. Valiant proved that computation of permanents is a complete problem in his algebraic model of NP theory, i.e., it is VNP-complete. We prove that computation of immanants is VNP-complete if the immanants are attached to a family of diagrams whose separation is $\Omega(n^\delta)$ for some $\delta>0$. We define the separation of a diagram to be the largest number of overhanging boxes contained in a single row. Our theorem proves a conjecture of Buergisser for a large variety of families, and in particular we recover with new proofs his VNP-completeness results for hooks and rectangles.<|reference_end|>
arxiv
@article{brylinski2003complexity, title={Complexity and Completeness of Immanants}, author={Jean-Luc Brylinski and Ranee Brylinski}, journal={arXiv preprint arXiv:cs/0301024}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301024}, primaryClass={cs.CC math.CO} }
brylinski2003complexity
arxiv-670941
cs/0301025
PHORMA: Perfectly Hashed Order Restricted Multidimensional Array
<|reference_start|>PHORMA: Perfectly Hashed Order Restricted Multidimensional Array: In this paper we propose a simple and efficient strategy to obtain a data structure generator to accomplish a perfect hash of quite general order restricted multidimensional arrays named {\em phormas}. The constructor of such objects gets two parameters as input: an n-vector a of non negative integers and a boolean function B on the types of order restrictions on the coordinates of the valid n-vectors bounded by a. At compiler time, the phorma constructor builds, from the pair a,B, a digraph G(a,B) with a single source s and a single sink t such that the st-paths are in 1-1 correspondence with the members of the B-restricted a-bounded array A(a,B). Besides perfectly hashing A(a,B), G(a,B) is an instance of an NW-family. This permits other useful computational tasks on it.<|reference_end|>
arxiv
@article{lins2003phorma:, title={PHORMA: Perfectly Hashed Order Restricted Multidimensional Array}, author={Lauro Lins, Sostenes Lins and Silvio Melo}, journal={arXiv preprint arXiv:cs/0301025}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301025}, primaryClass={cs.DM} }
lins2003phorma:
arxiv-670942
cs/0301026
Double-Negation Elimination in Some Propositional Logics
<|reference_start|>Double-Negation Elimination in Some Propositional Logics: This article answers two questions (posed in the literature), each concerning the guaranteed existence of proofs free of double negation. A proof is free of double negation if none of its deduced steps contains a term of the form n(n(t)) for some term t, where n denotes negation. The first question asks for conditions on the hypotheses that, if satisfied, guarantee the existence of a double-negation-free proof when the conclusion is free of double negation. The second question asks about the existence of an axiom system for classical propositional calculus whose use, for theorems with a conclusion free of double negation, guarantees the existence of a double-negation-free proof. After giving conditions that answer the first question, we answer the second question by focusing on the Lukasiewicz three-axiom system. We then extend our studies to infinite-valued sentential calculus and to intuitionistic logic and generalize the notion of being double-negation free. The double-negation proofs of interest rely exclusively on the inference rule condensed detachment, a rule that combines modus ponens with an appropriately general rule of substitution. The automated reasoning program OTTER played an indispensable role in this study.<|reference_end|>
arxiv
@article{beeson2003double-negation, title={Double-Negation Elimination in Some Propositional Logics}, author={Michael Beeson, Robert Veroff, Larry Wos}, journal={arXiv preprint arXiv:cs/0301026}, year={2003}, number={Preprint ANL/MCS-P1014-1202}, archivePrefix={arXiv}, eprint={cs/0301026}, primaryClass={cs.LO} }
beeson2003double-negation
arxiv-670943
cs/0301027
A comparison of four approaches to the calculation of conservation laws
<|reference_start|>A comparison of four approaches to the calculation of conservation laws: The paper compares computational aspects of four approaches to compute conservation laws of single differential equations (DEs) or systems of them, ODEs and PDEs. The only restriction, required by two of the four corresponding computer algebra programs, is that each DE has to be solvable for a leading derivative. Extra constraints for the conservation laws can be specified. Examples include new conservation laws that are non-polynomial in the functions, that have an explicit variable dependence and families of conservation laws involving arbitrary functions. The following equations are investigated in examples: Ito, Liouville, Burgers, Kadomtsev-Petviashvili, Karney-Sen-Chu-Verheest, Boussinesq, Tzetzeica, Benney.<|reference_end|>
arxiv
@article{wolf2003a, title={A comparison of four approaches to the calculation of conservation laws}, author={Thomas Wolf}, journal={Euro. Jnl of Applied Mathematics, 13, part 2 (2002) 129-152}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301027}, primaryClass={cs.SC math-ph math.MP} }
wolf2003a
arxiv-670944
cs/0301028
The integration of systems of linear PDEs using conservation laws of syzygies
<|reference_start|>The integration of systems of linear PDEs using conservation laws of syzygies: A new integration technique is presented for systems of linear partial differential equations (PDEs) for which syzygies can be formulated that obey conservation laws. These syzygies come for free as a by-product of the differential Groebner Basis computation. Compared with the more obvious way of integrating a single equation and substituting the result in other equations the new technique integrates more than one equation at once and therefore introduces temporarily fewer new functions of integration that in addition depend on fewer variables. Especially for high order PDE systems in many variables the conventional integration technique may lead to an explosion of the number of functions of integration which is avoided with the new method. A further benefit is that redundant free functions in the solution are either prevented or that their number is at least reduced.<|reference_end|>
arxiv
@article{wolf2003the, title={The integration of systems of linear PDEs using conservation laws of syzygies}, author={Thomas Wolf}, journal={J. of Symb. Comp. 35, no 5 (2003) 499-526.}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301028}, primaryClass={cs.SC math.AP} }
wolf2003the
arxiv-670945
cs/0301029
Size reduction and partial decoupling of systems of equations
<|reference_start|>Size reduction and partial decoupling of systems of equations: A method is presented that reduces the number of terms of systems of linear equations (algebraic, ordinary and partial differential equations). As a byproduct these systems have a tendency to become partially decoupled and are more likely to be factorizable or integrable. A variation of this method is applicable to non-linear systems. Modifications to improve efficiency are given and examples are shown. This procedure can be used in connection with the computation of the radical of a differential ideal (differential Groebner basis).<|reference_end|>
arxiv
@article{wolf2003size, title={Size reduction and partial decoupling of systems of equations}, author={Thomas Wolf}, journal={J. Symb. Comp. 33, no 3 (2002) 367-383}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301029}, primaryClass={cs.SC} }
wolf2003size
arxiv-670946
cs/0301030
Bounds on the Number of Longest Common Subsequences
<|reference_start|>Bounds on the Number of Longest Common Subsequences: This paper performs the analysis necessary to bound the running time of known, efficient algorithms for generating all longest common subsequences. That is, we bound the running time as a function of input size for algorithms with time essentially proportional to the output size. This paper considers both the case of computing all distinct LCSs and the case of computing all LCS embeddings. Also included is an analysis of how much better the efficient algorithms are than the standard method of generating LCS embeddings. A full analysis is carried out with running times measured as a function of the total number of input characters, and much of the analysis is also provided for cases in which the two input sequences are of the same specified length or of two independently specified lengths.<|reference_end|>
arxiv
@article{greenberg2003bounds, title={Bounds on the Number of Longest Common Subsequences}, author={Ronald I. Greenberg}, journal={arXiv preprint arXiv:cs/0301030}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301030}, primaryClass={cs.DM cs.DS} }
greenberg2003bounds
arxiv-670947
cs/0301031
Fine-Grain Authorization for Resource Management in the Grid Environment
<|reference_start|>Fine-Grain Authorization for Resource Management in the Grid Environment: In this document we describe our work-in-progress for enabling fine-grain authorization of resource management. In particular, we address the needs of Virtual Organizations (VOs) to enforce their own policies in addition to those of the resource owners.<|reference_end|>
arxiv
@article{keahey2003fine-grain, title={Fine-Grain Authorization for Resource Management in the Grid Environment}, author={K. Keahey and V. Welch}, journal={arXiv preprint arXiv:cs/0301031}, year={2003}, number={ANL/MCS-P991-0802}, archivePrefix={arXiv}, eprint={cs/0301031}, primaryClass={cs.CR cs.DC} }
keahey2003fine-grain
arxiv-670948
cs/0301032
Subclassing errors, OOP, and practically checkable rules to prevent them
<|reference_start|>Subclassing errors, OOP, and practically checkable rules to prevent them: This paper considers an example of Object-Oriented Programming (OOP) leading to subtle errors that break separation of interface and implementations. A comprehensive principle that guards against such errors is undecidable. The paper introduces a set of mechanically verifiable rules that prevent these insidious problems. Although the rules seem restrictive, they are powerful and expressive, as we show on several familiar examples. The rules contradict both the spirit and the letter of the OOP. The present examples as well as available theoretical and experimental results pose a question if OOP is conducive to software development at all.<|reference_end|>
arxiv
@article{kiselyov2003subclassing, title={Subclassing errors, OOP, and practically checkable rules to prevent them}, author={Oleg Kiselyov}, journal={Proc. "Monterey Workshop 2001: Engineering Automation for Software Intensive System Integration," sponsored by ONR/AFOSR/ARO/DARPA. June 18-22, 2001, Monterey, CA. -- pp. 33-42}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301032}, primaryClass={cs.PL cs.SE} }
kiselyov2003subclassing
arxiv-670949
cs/0301033
Computational Grids in Action: The Natinal Fusion Collaboratory
<|reference_start|>Computational Grids in Action: The Natinal Fusion Collaboratory: The National Fusion Collaboratory (NFC) project was created to advance scientific understanding and innovation in magnetic fusion research by enabling more efficient use of existing experimental facilities through more effective integration of experiment, theory, and modeling. To achieve this objective, NFC introduced the concept of "network services", which build on top of computational Grids, and provide Fusion codes, together with their maintenance and hardware resources as a service to the community. This mode of operation requires the development of new authorization and enforcement capabilities. In addition, the nature of Fusion experiments places strident quality of service requirements on codes run during the experimental cycle. In this paper, we describe Grid computing requirements of the Fusion community, and present our first experiments in meeting those requirements.<|reference_end|>
arxiv
@article{keahey2003computational, title={Computational Grids in Action: The Natinal Fusion Collaboratory}, author={K. Keahey, T. Fredian, Q. Peng, D. P. Schissel, M. Thompson, I. Foster, M. Greenwald, D. McCune}, journal={Future Generation Computer Systems 18 (2002), 1005-1015}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301033}, primaryClass={cs.DC} }
keahey2003computational
arxiv-670950
cs/0301034
Computing the Number of Longest Common Subsequences
<|reference_start|>Computing the Number of Longest Common Subsequences: This note provides very simple, efficient algorithms for computing the number of distinct longest common subsequences of two input strings and for computing the number of LCS embeddings.<|reference_end|>
arxiv
@article{greenberg2003computing, title={Computing the Number of Longest Common Subsequences}, author={Ronald I. Greenberg}, journal={arXiv preprint arXiv:cs/0301034}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301034}, primaryClass={cs.DS cs.DM} }
greenberg2003computing
arxiv-670951
cs/0301035
On the Complexity of Buffer Allocation in Message Passing Systems
<|reference_start|>On the Complexity of Buffer Allocation in Message Passing Systems: Message passing programs commonly use buffers to avoid unnecessary synchronizations and to improve performance by overlapping communication with computation. Unfortunately, using buffers makes the program no longer portable, potentially unable to complete on systems without a sufficient number of buffers. Effective buffer use entails that the minimum number needed for a safe execution be allocated. We explore a variety of problems related to buffer allocation for safe and efficient execution of message passing programs. We show that determining the minimum number of buffers or verifying a buffer assignment are intractable problems. However, we give a polynomial time algorithm to determine the minimum number of buffers needed to allow for asynchronous execution. We extend these results to several different buffering schemes, which in some cases make the problems tractable.<|reference_end|>
arxiv
@article{brodsky2003on, title={On the Complexity of Buffer Allocation in Message Passing Systems}, author={Alex Brodsky, Jan B. Pedersen, and Alan Wagner}, journal={arXiv preprint arXiv:cs/0301035}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301035}, primaryClass={cs.DC} }
brodsky2003on
arxiv-670952
cs/0301036
Algorithms using Java for Spreadsheet Dependent Cell Recomputation
<|reference_start|>Algorithms using Java for Spreadsheet Dependent Cell Recomputation: Java implementations of algorithms used by spreadsheets to automatically recompute the set of cells dependent on a changed cell are described using a mathematical model for spreadsheets based on graph theory. These solutions comprise part of a Java API that allows a client application to read, modify, and maintain spreadsheet data without using the spreadsheet application program that produced it. Features of the Java language that successfully improve the running time performance of the algorithms are also described.<|reference_end|>
arxiv
@article{francoeur2003algorithms, title={Algorithms using Java for Spreadsheet Dependent Cell Recomputation}, author={Joe Francoeur}, journal={arXiv preprint arXiv:cs/0301036}, year={2003}, archivePrefix={arXiv}, eprint={cs/0301036}, primaryClass={cs.DS cs.DM} }
francoeur2003algorithms
arxiv-670953
cs/0302001
Many Hard Examples in Exact Phase Transitions with Application to Generating Hard Satisfiable Instances
<|reference_start|>Many Hard Examples in Exact Phase Transitions with Application to Generating Hard Satisfiable Instances: This paper first analyzes the resolution complexity of two random CSP models (i.e. Model RB/RD) for which we can establish the existence of phase transitions and identify the threshold points exactly. By encoding CSPs into CNF formulas, it is proved that almost all instances of Model RB/RD have no tree-like resolution proofs of less than exponential size. Thus, we not only introduce new families of CNF formulas hard for resolution, which is a central task of Proof-Complexity theory, but also propose models with both many hard instances and exact phase transitions. Then, the implications of such models are addressed. It is shown both theoretically and experimentally that an application of Model RB/RD might be in the generation of hard satisfiable instances, which is not only of practical importance but also related to some open problems in cryptography such as generating one-way functions. Subsequently, a further theoretical support for the generation method is shown by establishing exponential lower bounds on the complexity of solving random satisfiable and forced satisfiable instances of RB/RD near the threshold. Finally, conclusions are presented, as well as a detailed comparison of Model RB/RD with the Hamiltonian cycle problem and random 3-SAT, which, respectively, exhibit three different kinds of phase transition behavior in NP-complete problems.<|reference_end|>
arxiv
@article{xu2003many, title={Many Hard Examples in Exact Phase Transitions with Application to Generating Hard Satisfiable Instances}, author={Ke Xu and Wei Li}, journal={arXiv preprint arXiv:cs/0302001}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302001}, primaryClass={cs.CC cond-mat.stat-mech cs.AI cs.DM} }
xu2003many
arxiv-670954
cs/0302002
Optimizing GoTools' Search Heuristics using Genetic Algorithms
<|reference_start|>Optimizing GoTools' Search Heuristics using Genetic Algorithms: GoTools is a program which solves life & death problems in the game of Go. This paper describes experiments using a Genetic Algorithm to optimize heuristic weights used by GoTools' tree-search. The complete set of heuristic weights is composed of different subgroups, each of which can be optimized with a suitable fitness function. As a useful side product, an MPI interface for FreePascal was implemented to allow the use of a parallelized fitness function running on a Beowulf cluster. The aim of this exercise is to optimize the current version of GoTools, and to make tools available in preparation of an extension of GoTools for solving open boundary life & death problems, which will introduce more heuristic parameters to be fine tuned.<|reference_end|>
arxiv
@article{pratola2003optimizing, title={Optimizing GoTools' Search Heuristics using Genetic Algorithms}, author={Matthew Pratola, Thomas Wolf}, journal={arXiv preprint arXiv:cs/0302002}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302002}, primaryClass={cs.NE} }
pratola2003optimizing
arxiv-670955
cs/0302003
Approximate analysis of search algorithms with "physical" methods
<|reference_start|>Approximate analysis of search algorithms with "physical" methods: An overview of some methods of statistical physics applied to the analysis of algorithms for optimization problems (satisfiability of Boolean constraints, vertex cover of graphs, decoding, ...) with distributions of random inputs is proposed. Two types of algorithms are analyzed: complete procedures with backtracking (Davis-Putnam-Loveland-Logeman algorithm) and incomplete, local search procedures (gradient descent, random walksat, ...). The study of complete algorithms makes use of physical concepts such as phase transitions, dynamical renormalization flow, growth processes, ... As for local search procedures, the connection between computational complexity and the structure of the cost function landscape is questioned, with emphasis on the notion of metastability.<|reference_end|>
arxiv
@article{cocco2003approximate, title={Approximate analysis of search algorithms with "physical" methods}, author={Simona Cocco, Remi Monasson, Andrea Montanari, Guilhem Semerjian}, journal={arXiv preprint arXiv:cs/0302003}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302003}, primaryClass={cs.CC cond-mat.stat-mech} }
cocco2003approximate
arxiv-670956
cs/0302004
Unique Pattern Matching in Strings
<|reference_start|>Unique Pattern Matching in Strings: Regular expression patterns are a key feature of document processing languages like Perl and XDuce. It is in this context that the first and longest match policies have been proposed to disambiguate the pattern matching process. We formally define a matching semantics with these policies and show that the generally accepted method of simulating longest match by first match and recursion is incorrect. We continue by solving the associated type inference problem, which consists in calculating for every subexpression the set of words the subexpression can still match when these policies are in effect, and show how this algorithm can be used to efficiently implement the matching process.<|reference_end|>
arxiv
@article{vansummeren2003unique, title={Unique Pattern Matching in Strings}, author={Stijn Vansummeren}, journal={arXiv preprint arXiv:cs/0302004}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302004}, primaryClass={cs.PL cs.DB} }
vansummeren2003unique
arxiv-670957
cs/0302005
Barnacle: An Assembly Algorithm for Clone-based Sequences of Whole Genomes
<|reference_start|>Barnacle: An Assembly Algorithm for Clone-based Sequences of Whole Genomes: We propose an assembly algorithm {\sc Barnacle} for sequences generated by the clone-based approach. We illustrate our approach by assembling the human genome. Our novel method abandons the original physical-mapping-first framework. As we show, {\sc Barnacle} more effectively resolves conflicts due to repeated sequences. The latter is the main difficulty of the sequence assembly problem. Inaddition, we are able to detect inconsistencies in the underlying data. We present and compare our results on the December 2001 freeze of the public working draft of the human genome with NCBI's assembly (Build 28). The assembly of December 2001 freeze of the public working draft generated by {\sc Barnacle} and the source code of {\sc Barnacle} are available at (http://www.cs.rutgers.edu/~vchoi).<|reference_end|>
arxiv
@article{choi2003barnacle:, title={Barnacle: An Assembly Algorithm for Clone-based Sequences of Whole Genomes}, author={Vicky Choi and Martin Farach-Colton}, journal={arXiv preprint arXiv:cs/0302005}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302005}, primaryClass={cs.DS cs.DM q-bio} }
choi2003barnacle:
arxiv-670958
cs/0302006
Grid Market Directory: A Web Services based Grid Service Publication Directory
<|reference_start|>Grid Market Directory: A Web Services based Grid Service Publication Directory: As Grids are emerging as the next generation service-oriented computing platforms, they need to support Grid economy that helps in the management of supply and demand for resources and offers an economic incentive for Grid resource providers. To enable this Grid economy, a market-like Grid environment including an infrastructure that supports the publication of services and their discovery is needed. As part of the Gridbus project, we proposed and have developed a Grid Market Directory (GMD) that serves as a registry for high-level service publication and discovery in Virtual Organisations.<|reference_end|>
arxiv
@article{yu2003grid, title={Grid Market Directory: A Web Services based Grid Service Publication Directory}, author={Jia Yu, Srikumar Venugopal, Rajkumar Buyya}, journal={arXiv preprint arXiv:cs/0302006}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302006}, primaryClass={cs.DC} }
yu2003grid
arxiv-670959
cs/0302007
G-Monitor: Gridbus web portal for monitoring and steering application execution on global grids
<|reference_start|>G-Monitor: Gridbus web portal for monitoring and steering application execution on global grids: Grids are experiencing a rapid growth in their application and along with this there is a requirement for a portal which is easy to use and scalable. We have responded to this requirement by developing an easy to use, scalable, web-based portal called G-Monitor. This paper proposes a generic architecture for a web portal into a grid environment and discusses our implementation and its application.<|reference_end|>
arxiv
@article{placek2003g-monitor:, title={G-Monitor: Gridbus web portal for monitoring and steering application execution on global grids}, author={Martin Placek and Rajkumar Buyya}, journal={arXiv preprint arXiv:cs/0302007}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302007}, primaryClass={cs.DC} }
placek2003g-monitor:
arxiv-670960
cs/0302008
Visual Environment for Rapid Composition of Parameter-Sweep Applications for Distributed Processing on Global Grids
<|reference_start|>Visual Environment for Rapid Composition of Parameter-Sweep Applications for Distributed Processing on Global Grids: Computational Grids are emerging as a platform for next-generation parallel and distributed computing. Large-scale parametric studies and parameter sweep applications find a natural place in the Grid?s distribution model. There is little or no communication between jobs. The task of parallelizing and distributing existing applications is conceptually trivial. These properties of parametric studies make it an ideal place to start developing integrated development environments (IDEs) for rapidly Grid-enabling applications. However, the availability of IDEs for scientists to Grid-enable their applications, without the need of developing them as parallel applications explicitly, is still lacking. This paper presents a Java based IDE called Visual Parametric Tool (VPT), developed as part of the Gridbus project, for rapid creation of parameter sweep (data parallel/SPMD) applications. It supports automatic creation of parameter script and parameterisation of the input data files, which is compatible with the Nimrod-G parameter specification language. The usefulness of VPT is demonstrated by a case study on composition of molecular docking application as a parameter sweep application. Such applications can be deployed on clusters using the Nimrod/enFuzion system and on global Grids using the Nimrod-G grid resource broker.<|reference_end|>
arxiv
@article{burq2003visual, title={Visual Environment for Rapid Composition of Parameter-Sweep Applications for Distributed Processing on Global Grids}, author={Shoib Burq, Steve Melnikoff, Kim Branson, Rajkumar Buyya}, journal={arXiv preprint arXiv:cs/0302008}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302008}, primaryClass={cs.DC} }
burq2003visual
arxiv-670961
cs/0302009
Data Structure for a Time-Based Bandwidth Reservations Problem
<|reference_start|>Data Structure for a Time-Based Bandwidth Reservations Problem: We discuss a problem of handling resource reservations. The resource can be reserved for some time, it can be freed or it can be queried what is the largest amount of reserved resource during a time interval. We show that the problem has a lower bound of $\Omega(\log n)$ per operation on average and we give a matching upper bound algorithm. Our solution also solves a dynamic version of the related problems of a prefix sum and a partial sum.<|reference_end|>
arxiv
@article{brodnik2003data, title={Data Structure for a Time-Based Bandwidth Reservations Problem}, author={Andrej Brodnik (1 and 2), Andreas Nilsson (2) ((1) IMFM, Ljubljana, Slovenia, (2) University of Technology, Lulea, Sweden)}, journal={arXiv preprint arXiv:cs/0302009}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302009}, primaryClass={cs.DS cs.NI} }
brodnik2003data
arxiv-670962
cs/0302010
Authenticated Append-only Skip Lists
<|reference_start|>Authenticated Append-only Skip Lists: In this work we describe, design and analyze the security of a tamper-evident, append-only data structure for maintaining secure data sequences in a loosely coupled distributed system where individual system components may be mutually distrustful. The resulting data structure, called an Authenticated Append-Only Skip List (AASL), allows its maintainers to produce one-way digests of the entire data sequence, which they can publish to others as a commitment on the contents and order of the sequence. The maintainer can produce efficiently succinct proofs that authenticate a particular datum in a particular position of the data sequence against a published digest. AASLs are secure against tampering even by malicious data structure maintainers. First, we show that a maintainer cannot ``invent'' and authenticate data elements for the AASL after he has committed to the structure. Second, he cannot equivocate by being able to prove conflicting facts about a particular position of the data sequence. This is the case even when the data sequence grows with time and its maintainer publishes successive commitments at times of his own choosing. AASLs can be invaluable in reasoning about the integrity of system logs maintained by untrusted components of a loosely-coupled distributed system.<|reference_end|>
arxiv
@article{maniatis2003authenticated, title={Authenticated Append-only Skip Lists}, author={Petros Maniatis and Mary Baker}, journal={arXiv preprint arXiv:cs/0302010}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302010}, primaryClass={cs.CR cs.DC} }
maniatis2003authenticated
arxiv-670963
cs/0302011
Smoothed Analysis of Interior-Point Algorithms: Condition Number
<|reference_start|>Smoothed Analysis of Interior-Point Algorithms: Condition Number: We show that the smoothed complexity of the logarithm of Renegar's condition number is O(log (n/sigma)).<|reference_end|>
arxiv
@article{dunagan2003smoothed, title={Smoothed Analysis of Interior-Point Algorithms: Condition Number}, author={John Dunagan, Daniel A. Spielman, and Shang-Hua Teng}, journal={arXiv preprint arXiv:cs/0302011}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302011}, primaryClass={cs.DS cs.NA} }
dunagan2003smoothed
arxiv-670964
cs/0302012
The New AI: General & Sound & Relevant for Physics
<|reference_start|>The New AI: General & Sound & Relevant for Physics: Most traditional artificial intelligence (AI) systems of the past 50 years are either very limited, or based on heuristics, or both. The new millennium, however, has brought substantial progress in the field of theoretically optimal and practically feasible algorithms for prediction, search, inductive inference based on Occam's razor, problem solving, decision making, and reinforcement learning in environments of a very general type. Since inductive inference is at the heart of all inductive sciences, some of the results are relevant not only for AI and computer science but also for physics, provoking nontraditional predictions based on Zuse's thesis of the computer-generated universe.<|reference_end|>
arxiv
@article{schmidhuber2003the, title={The New AI: General & Sound & Relevant for Physics}, author={Juergen Schmidhuber}, journal={arXiv preprint arXiv:cs/0302012}, year={2003}, number={TR IDSIA-04-03}, archivePrefix={arXiv}, eprint={cs/0302012}, primaryClass={cs.AI cs.LG quant-ph} }
schmidhuber2003the
arxiv-670965
cs/0302013
Cg in Two Pages
<|reference_start|>Cg in Two Pages: Cg is a language for programming GPUs. This paper describes Cg briefly.<|reference_end|>
arxiv
@article{kilgard2003cg, title={Cg in Two Pages}, author={Mark J. Kilgard}, journal={arXiv preprint arXiv:cs/0302013}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302013}, primaryClass={cs.GR cs.PL} }
kilgard2003cg
arxiv-670966
cs/0302014
An Algorithm for Aligning Sentences in Bilingual Corpora Using Lexical Information
<|reference_start|>An Algorithm for Aligning Sentences in Bilingual Corpora Using Lexical Information: In this paper we describe an algorithm for aligning sentences with their translations in a bilingual corpus using lexical information of the languages. Existing efficient algorithms ignore word identities and consider only the sentence lengths (Brown, 1991; Gale and Church, 1993). For a sentence in the source language text, the proposed algorithm picks the most likely translation from the target language text using lexical information and certain heuristics. It does not do statistical analysis using sentence lengths. The algorithm is language independent. It also aids in detecting addition and deletion of text in translations. The algorithm gives comparable results with the existing algorithms in most of the cases while it does better in cases where statistical algorithms do not give good results.<|reference_end|>
arxiv
@article{bharati2003an, title={An Algorithm for Aligning Sentences in Bilingual Corpora Using Lexical Information}, author={Akshar Bharati, V.Sriram, A.Vamshi Krishna, Rajeev Sangal, S.M.Bendre}, journal={arXiv preprint arXiv:cs/0302014}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302014}, primaryClass={cs.CL} }
bharati2003an
arxiv-670967
cs/0302015
Unsupervised Learning in a Framework of Information Compression by Multiple Alignment, Unification and Search
<|reference_start|>Unsupervised Learning in a Framework of Information Compression by Multiple Alignment, Unification and Search: This paper describes a novel approach to unsupervised learning that has been developed within a framework of "information compression by multiple alignment, unification and search" (ICMAUS), designed to integrate learning with other AI functions such as parsing and production of language, fuzzy pattern recognition, probabilistic and exact forms of reasoning, and others.<|reference_end|>
arxiv
@article{wolff2003unsupervised, title={Unsupervised Learning in a Framework of Information Compression by Multiple Alignment, Unification and Search}, author={J. G. Wolff}, journal={arXiv preprint arXiv:cs/0302015}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302015}, primaryClass={cs.AI cs.LG} }
wolff2003unsupervised
arxiv-670968
cs/0302016
Data-sharing relationships in the Web
<|reference_start|>Data-sharing relationships in the Web: We propose a novel structure, the data-sharing graph, for characterizing sharing patterns in large-scale data distribution systems. We analyze this structure in two such systems and uncover small-world patterns for data-sharing relationships. Using the data-sharing graph for system characterization has potential both for basic science, because we can identify new structures emerging in real, dynamic networks; and for system design, because we can exploit these structures when designing data location and delivery mechanisms. We conjecture that similar patterns arise in other large-scale systems and that these patterns can be exploited for mechanism design.<|reference_end|>
arxiv
@article{iamnitchi2003data-sharing, title={Data-sharing relationships in the Web}, author={Adriana Iamnitchi, Matei Ripeanu, Ian Foster}, journal={arXiv preprint arXiv:cs/0302016}, year={2003}, number={University of Chicago TR-2003-01}, archivePrefix={arXiv}, eprint={cs/0302016}, primaryClass={cs.NI cond-mat} }
iamnitchi2003data-sharing
arxiv-670969
cs/0302017
A Proposal to Separate Handles from Names on the Internet
<|reference_start|>A Proposal to Separate Handles from Names on the Internet: Networked communications inherently depend on the ability of the sender of a message to indicate through some token how the message should be delivered to a particular recipient. The tokens that refer messages to recipients are variously known as routes, addresses,handles, and names} ordered by their relative nearness to network topology vs. human meaning. All four sorts of token refer in some way to a recipient, but they are controlled by different authorities and their meanings depend on different contextual parameters. Today's global Internet employs dynamically determined routes, IP addresses, and domain names. Domain names combine the functions of handles and names. The high value of domain names as names leads to substantial social and legal dispute about their assignment, degrading their value as handles. The time has come to provide a distinct open network handle system (ONHS), using handles that are not meaningful in natural language and are therefore not subject to the disputes surrounding the use of names. A handle service may be deployed easily as a handle domain within the current Domain Name System. In order to minimize the administrative load, and maximize their own autonomy, netizens may use public-key cryptography to assign their own handles.<|reference_end|>
arxiv
@article{o'donnell2003a, title={A Proposal to Separate Handles from Names on the Internet}, author={Michael J. O'Donnell}, journal={arXiv preprint arXiv:cs/0302017}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302017}, primaryClass={cs.NI} }
o'donnell2003a
arxiv-670970
cs/0302018
Guided Google: A Meta Search Engine and its Implementation using the Google Distributed Web Services
<|reference_start|>Guided Google: A Meta Search Engine and its Implementation using the Google Distributed Web Services: With the advent of the Internet, search engines have begun sprouting like mushrooms after a rainfall. Only in recent years, have developers become more innovative, and came up with guided searching facilities online. The goals of these applications are to help ease and guide the searching efforts of a novice web user toward their desired objectives. A number of implementations of such services are emerging. This paper proposes a guided meta-search engine, called "Guided Google", as it serves as an interface to the actual Google.com search engine, using the Google Web Services.<|reference_end|>
arxiv
@article{hoong2003guided, title={Guided Google: A Meta Search Engine and its Implementation using the Google Distributed Web Services}, author={Ding Choon Hoong, Rajkumar Buyya}, journal={arXiv preprint arXiv:cs/0302018}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302018}, primaryClass={cs.DC} }
hoong2003guided
arxiv-670971
cs/0302019
Economic and On Demand Brain Activity Analysis on Global Grids
<|reference_start|>Economic and On Demand Brain Activity Analysis on Global Grids: The lack of computational power within an organization for analyzing scientific data, and the distribution of knowledge (by scientists) and technologies (advanced scientific devices) are two major problems commonly observed in scientific disciplines. One such scientific discipline is brain science. The analysis of brain activity data gathered from the MEG (Magnetoencephalography) instrument is an important research topic in medical science since it helps doctors in identifying symptoms of diseases. The data needs to be analyzed exhaustively to efficiently diagnose and analyze brain functions and requires access to large-scale computational resources. The potential platform for solving such resource intensive applications is the Grid. This paper describes a MEG data analysis system developed by us, leveraging Grid technologies, primarily Nimrod-G, Gridbus, and Globus. This paper explains the application of economy-based grid scheduling algorithms to the problem domain for on-demand processing of analysis jobs.<|reference_end|>
arxiv
@article{buyya2003economic, title={Economic and On Demand Brain Activity Analysis on Global Grids}, author={R. Buyya, S. Date, Y. Mizuno-Matsumoto, S. Venugopal, D. Abramson}, journal={arXiv preprint arXiv:cs/0302019}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302019}, primaryClass={cs.DC} }
buyya2003economic
arxiv-670972
cs/0302020
Analytical formulations of Peer-to-Peer Connection Efficiency
<|reference_start|>Analytical formulations of Peer-to-Peer Connection Efficiency: Use of Peer-to-Peer (P2P) service networks introduces a new communication paradigm because peers are both clients and servers and so each peer may provide/request services to/from other peers. Empirical studies of P2P networks have been undertaken and reveal useful characteristics. However there is to date little analytical work to describe P2P networks with respect to their communication paradigm and their interconnections. This paper provides an analytical formulation and optimisation of peer connection efficiency, in terms of minimising the fraction of wasted connection time. Peer connection efficiency is analysed for both a uni- and multi-connected peer. Given this fundamental optimisation, the paper optimises the number of connections that peers should make use of as a function of network load, in terms of minimising the total queue size that requests in the P2P network experience. The results of this paper provide a basis for engineering high performance P2P interconnection networks. The optimisations are useful for reducing bandwidth and power consumption, e.g. in the case of peers being mobile devices with a limited power supply. Also these results could be used to determine when a (virtual) circuit should be switched to support a connection.<|reference_end|>
arxiv
@article{harwood2003analytical, title={Analytical formulations of Peer-to-Peer Connection Efficiency}, author={Aaron Harwood}, journal={arXiv preprint arXiv:cs/0302020}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302020}, primaryClass={cs.DC cs.AR cs.NI} }
harwood2003analytical
arxiv-670973
cs/0302021
Building an Open Language Archives Community on the OAI Foundation
<|reference_start|>Building an Open Language Archives Community on the OAI Foundation: The Open Language Archives Community (OLAC) is an international partnership of institutions and individuals who are creating a worldwide virtual library of language resources. The Dublin Core (DC) Element Set and the OAI Protocol have provided a solid foundation for the OLAC framework. However, we need more precision in community-specific aspects of resource description than is offered by DC. Furthermore, many of the institutions and individuals who might participate in OLAC do not have the technical resources to support the OAI protocol. This paper presents our solutions to these two problems. To address the first, we have developed an extensible application profile for language resource metadata. To address the second, we have implemented Vida (the virtual data provider) and Viser (the virtual service provider), which permit community members to provide data and services without having to implement the OAI protocol. These solutions are generic and could be adopted by other specialized subcommunities.<|reference_end|>
arxiv
@article{simons2003building, title={Building an Open Language Archives Community on the OAI Foundation}, author={Gary Simons and Steven Bird}, journal={Library Hi Tech 21(2), 2003}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302021}, primaryClass={cs.CL cs.DL} }
simons2003building
arxiv-670974
cs/0302022
Fault-tolerant routing in peer-to-peer systems
<|reference_start|>Fault-tolerant routing in peer-to-peer systems: We consider the problem of designing an overlay network and routing mechanism that permits finding resources efficiently in a peer-to-peer system. We argue that many existing approaches to this problem can be modeled as the construction of a random graph embedded in a metric space whose points represent resource identifiers, where the probability of a connection between two nodes depends only on the distance between them in the metric space. We study the performance of a peer-to-peer system where nodes are embedded at grid points in a simple metric space: a one-dimensional real line. We prove upper and lower bounds on the message complexity of locating particular resources in such a system, under a variety of assumptions about failures of either nodes or the connections between them. Our lower bounds in particular show that the use of inverse power-law distributions in routing, as suggested by Kleinberg (1999), is close to optimal. We also give efficient heuristics to dynamically maintain such a system as new nodes arrive and old nodes depart. Finally, we give experimental results that suggest promising directions for future work.<|reference_end|>
arxiv
@article{aspnes2003fault-tolerant, title={Fault-tolerant routing in peer-to-peer systems}, author={James Aspnes, Zoe Diamadi, and Gauri Shah}, journal={arXiv preprint arXiv:cs/0302022}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302022}, primaryClass={cs.DS cs.DC} }
aspnes2003fault-tolerant
arxiv-670975
cs/0302023
Segmentation, Indexing, and Visualization of Extended Instructional Videos
<|reference_start|>Segmentation, Indexing, and Visualization of Extended Instructional Videos: We present a new method for segmenting, and a new user interface for indexing and visualizing, the semantic content of extended instructional videos. Given a series of key frames from the video, we generate a condensed view of the data by clustering frames according to media type and visual similarities. Using various visual filters, key frames are first assigned a media type (board, class, computer, illustration, podium, and sheet). Key frames of media type board and sheet are then clustered based on contents via an algorithm with near-linear cost. A novel user interface, the result of two user studies, displays related topics using icons linked topologically, allowing users to quickly locate semantically related portions of the video. We analyze the accuracy of the segmentation tool on 17 instructional videos, each of which is from 75 to 150 minutes in duration (a total of 40 hours); the classification accuracy exceeds 96%.<|reference_end|>
arxiv
@article{haubold2003segmentation,, title={Segmentation, Indexing, and Visualization of Extended Instructional Videos}, author={Alexander Haubold and John R. Kender}, journal={arXiv preprint arXiv:cs/0302023}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302023}, primaryClass={cs.IR cs.CV} }
haubold2003segmentation,
arxiv-670976
cs/0302024
Analysis and Interface for Instructional Video
<|reference_start|>Analysis and Interface for Instructional Video: We present a new method for segmenting, and a new user interface for indexing and visualizing, the semantic content of extended instructional videos. Using various visual filters, key frames are first assigned a media type (board, class, computer, illustration, podium, and sheet). Key frames of media type board and sheet are then clustered based on contents via an algorithm with near-linear cost. A novel user interface, the result of two user studies, displays related topics using icons linked topologically, allowing users to quickly locate semantically related portions of the video. We analyze the accuracy of the segmentation tool on 17 instructional videos, each of which is from 75 to 150 minutes in duration (a total of 40 hours); it exceeds 96%.<|reference_end|>
arxiv
@article{haubold2003analysis, title={Analysis and Interface for Instructional Video}, author={Alexander Haubold and John R. Kender}, journal={Proceedings of 2003 IEEE International Conference on Multimedia & Expo, Volume II, pages 705-708, July 2003}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302024}, primaryClass={cs.IR cs.CV} }
haubold2003analysis
arxiv-670977
cs/0302025
Cryptographic Randomized Response Techniques
<|reference_start|>Cryptographic Randomized Response Techniques: We develop cryptographically secure techniques to guarantee unconditional privacy for respondents to polls. Our constructions are efficient and practical, and are shown not to allow cheating respondents to affect the ``tally'' by more than their own vote -- which will be given the exact same weight as that of other respondents. We demonstrate solutions to this problem based on both traditional cryptographic techniques and quantum cryptography.<|reference_end|>
arxiv
@article{ambainis2003cryptographic, title={Cryptographic Randomized Response Techniques}, author={Andris Ambainis, Markus Jakobsson, Helger Lipmaa}, journal={arXiv preprint arXiv:cs/0302025}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302025}, primaryClass={cs.CC cs.CR cs.CY quant-ph} }
ambainis2003cryptographic
arxiv-670978
cs/0302026
Recursive function templates as a solution of linear algebra expressions in C++
<|reference_start|>Recursive function templates as a solution of linear algebra expressions in C++: The article deals with a kind of recursive function templates in C++, where the recursion is realized corresponding template parameters to achieve better computational performance. Some specialization of these template functions ends the recursion and can be implemented using optimized hardware dependent or independent routines. The method is applied in addition to the known expression templates technique to solve linear algebra expressions with the help of the BLAS library. The whole implementation produces a new library, which keeps object-oriented benefits and has a higher computational speed represented in the tests.<|reference_end|>
arxiv
@article{myrnyy2003recursive, title={Recursive function templates as a solution of linear algebra expressions in C++}, author={Volodymyr Myrnyy (Brandenburg University of Technology, Cottbus, Germany)}, journal={arXiv preprint arXiv:cs/0302026}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302026}, primaryClass={cs.MS cs.PL} }
myrnyy2003recursive
arxiv-670979
cs/0302027
Tiling space and slabs with acute tetrahedra
<|reference_start|>Tiling space and slabs with acute tetrahedra: We show it is possible to tile three-dimensional space using only tetrahedra with acute dihedral angles. We present several constructions to achieve this, including one in which all dihedral angles are less than $77.08^\circ$, and another which tiles a slab in space.<|reference_end|>
arxiv
@article{eppstein2003tiling, title={Tiling space and slabs with acute tetrahedra}, author={David Eppstein, John M. Sullivan, and Alper Ungor}, journal={Computational Geometry Theory & Applications 27(3):237-255, 2004}, year={2003}, doi={10.1016/j.comgeo.2003.11.003}, archivePrefix={arXiv}, eprint={cs/0302027}, primaryClass={cs.CG math.MG} }
eppstein2003tiling
arxiv-670980
cs/0302028
The Boolean Functions Computed by Random Boolean Formulas OR How to Grow the Right Function
<|reference_start|>The Boolean Functions Computed by Random Boolean Formulas OR How to Grow the Right Function: Among their many uses, growth processes (probabilistic amplification), were used for constructing reliable networks from unreliable components, and deriving complexity bounds of various classes of functions. Hence, determining the initial conditions for such processes is an important and challenging problem. In this paper we characterize growth processes by their initial conditions and derive conditions under which results such as Valiant's (1984) hold. First, we completely characterize growth processes that use linear connectives. Second, by extending Savick\'y's (1990) analysis, via ``Restriction Lemmas'', we characterize growth processes that use monotone connectives, and show that our technique is applicable to growth processes that use other connectives as well. Additionally, we obtain explicit bounds on the convergence rates of several growth processes, including the growth process studied by Savick\'y (1990).<|reference_end|>
arxiv
@article{brodsky2003the, title={The Boolean Functions Computed by Random Boolean Formulas OR How to Grow the Right Function}, author={Alex Brodsky and Nicholas Pippenger}, journal={arXiv preprint arXiv:cs/0302028}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302028}, primaryClass={cs.DM cs.CC} }
brodsky2003the
arxiv-670981
cs/0302029
Defeasible Logic Programming: An Argumentative Approach
<|reference_start|>Defeasible Logic Programming: An Argumentative Approach: The work reported here introduces Defeasible Logic Programming (DeLP), a formalism that combines results of Logic Programming and Defeasible Argumentation. DeLP provides the possibility of representing information in the form of weak rules in a declarative manner, and a defeasible argumentation inference mechanism for warranting the entailed conclusions. In DeLP an argumentation formalism will be used for deciding between contradictory goals. Queries will be supported by arguments that could be defeated by other arguments. A query q will succeed when there is an argument A for q that is warranted, ie, the argument A that supports q is found undefeated by a warrant procedure that implements a dialectical analysis. The defeasible argumentation basis of DeLP allows to build applications that deal with incomplete and contradictory information in dynamic domains. Thus, the resulting approach is suitable for representing agent's knowledge and for providing an argumentation based reasoning mechanism to agents.<|reference_end|>
arxiv
@article{garcia2003defeasible, title={Defeasible Logic Programming: An Argumentative Approach}, author={Alejandro Javier Garcia and Guillermo Ricardo Simari}, journal={arXiv preprint arXiv:cs/0302029}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302029}, primaryClass={cs.AI} }
garcia2003defeasible
arxiv-670982
cs/0302030
The traveling salesman problem for cubic graphs
<|reference_start|>The traveling salesman problem for cubic graphs: We show how to find a Hamiltonian cycle in a graph of degree at most three with n vertices, in time O(2^{n/3}) ~= 1.260^n and linear space. Our algorithm can find the minimum weight Hamiltonian cycle (traveling salesman problem), in the same time bound. We can also count or list all Hamiltonian cycles in a degree three graph in time O(2^{3n/8}) ~= 1.297^n. We also solve the traveling salesman problem in graphs of degree at most four, by randomized and deterministic algorithms with runtime O((27/4)^{n/3}) ~= 1.890^n and O((27/4+epsilon)^{n/3}) respectively. Our algorithms allow the input to specify a set of forced edges which must be part of any generated cycle. Our cycle listing algorithm shows that every degree three graph has O(2^{3n/8}) Hamiltonian cycles; we also exhibit a family of graphs with 2^{n/3} Hamiltonian cycles per graph.<|reference_end|>
arxiv
@article{eppstein2003the, title={The traveling salesman problem for cubic graphs}, author={David Eppstein}, journal={J. Graph Algorithms and Applications 11(1):61-81, 2007}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302030}, primaryClass={cs.DS} }
eppstein2003the
arxiv-670983
cs/0302031
Relaxed Scheduling in Dynamic Skin Triangulation
<|reference_start|>Relaxed Scheduling in Dynamic Skin Triangulation: We introduce relaxed scheduling as a paradigm for mesh maintenance and demonstrate its applicability to triangulating a skin surface in $\Rspace^3$.<|reference_end|>
arxiv
@article{edelsbrunner2003relaxed, title={Relaxed Scheduling in Dynamic Skin Triangulation}, author={Herbert Edelsbrunner and Alper Ungor}, journal={arXiv preprint arXiv:cs/0302031}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302031}, primaryClass={cs.CG} }
edelsbrunner2003relaxed
arxiv-670984
cs/0302032
Empirical Methods for Compound Splitting
<|reference_start|>Empirical Methods for Compound Splitting: Compounded words are a challenge for NLP applications such as machine translation (MT). We introduce methods to learn splitting rules from monolingual and parallel corpora. We evaluate them against a gold standard and measure their impact on performance of statistical MT systems. Results show accuracy of 99.1% and performance gains for MT of 0.039 BLEU on a German-English noun phrase translation task.<|reference_end|>
arxiv
@article{koehn2003empirical, title={Empirical Methods for Compound Splitting}, author={Philipp Koehn and Kevin Knight}, journal={arXiv preprint arXiv:cs/0302032}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302032}, primaryClass={cs.CL} }
koehn2003empirical
arxiv-670985
cs/0302033
Experimental Software Schedulability Estimation For Varied Processor Frequencies
<|reference_start|>Experimental Software Schedulability Estimation For Varied Processor Frequencies: This paper describes a new approach to experimentally estimate the application schedulability for various processor frequencies. We use additional workload generated by an artificial high priority routine to simulate the frequency decrease of a processor. Then we estimate the schedulability of applications at different frequencies. The results of such estimation can be used to determine the frequencies and control algorithms of dynamic voltage scaling/dynamic frequency scaling (DVS/DFS) implementations. The paper presents a general problem description, the proposed schedulability estimation method, its analysis and evaluation.<|reference_end|>
arxiv
@article{fabritius2003experimental, title={Experimental Software Schedulability Estimation For Varied Processor Frequencies}, author={Sampsa Fabritius, Raimondas Lencevicius, Edu Metz, Alexander Ran}, journal={arXiv preprint arXiv:cs/0302033}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302033}, primaryClass={cs.SE cs.OS} }
fabritius2003experimental
arxiv-670986
cs/0302034
Interest Rate Model Calibration Using Semidefinite Programming
<|reference_start|>Interest Rate Model Calibration Using Semidefinite Programming: We show that, for the purpose of pricing Swaptions, the Swap rate and the corresponding Forward rates can be considered lognormal under a single martingale measure. Swaptions can then be priced as options on a basket of lognormal assets and an approximation formula is derived for such options. This formula is centered around a Black-Scholes price with an appropriate volatility, plus a correction term that can be interpreted as the expected tracking error. The calibration problem can then be solved very efficiently using semidefinite programming.<|reference_end|>
arxiv
@article{d'aspremont2003interest, title={Interest Rate Model Calibration Using Semidefinite Programming}, author={Alexandre d'Aspremont}, journal={Applied Mathematical Finance 10(3), pp. 183-213, September 2003}, year={2003}, number={CMAPX-491}, archivePrefix={arXiv}, eprint={cs/0302034}, primaryClass={cs.CE} }
d'aspremont2003interest
arxiv-670987
cs/0302035
Risk-Management Methods for the Libor Market Model Using Semidefinite Programming
<|reference_start|>Risk-Management Methods for the Libor Market Model Using Semidefinite Programming: When interest rate dynamics are described by the Libor Market Model as in BGM97, we show how some essential risk-management results can be obtained from the dual of the calibration program. In particular, if the objetive is to maximize another swaption's price, we show that the optimal dual variables describe a hedging portfolio in the sense of \cite{Avel96}. In the general case, the local sensitivity of the covariance matrix to all market movement scenarios can be directly computed from the optimal dual solution. We also show how semidefinite programming can be used to manage the Gamma exposure of a portfolio.<|reference_end|>
arxiv
@article{d'aspremont2003risk-management, title={Risk-Management Methods for the Libor Market Model Using Semidefinite Programming}, author={Alexandre d'Aspremont}, journal={Journal of Computational Finance 8(4), pp. 77-99, Summer 2005}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302035}, primaryClass={cs.CE} }
d'aspremont2003risk-management
arxiv-670988
cs/0302036
Constraint-based analysis of composite solvers
<|reference_start|>Constraint-based analysis of composite solvers: Cooperative constraint solving is an area of constraint programming that studies the interaction between constraint solvers with the aim of discovering the interaction patterns that amplify the positive qualities of individual solvers. Automatisation and formalisation of such studies is an important issue of cooperative constraint solving. In this paper we present a constraint-based analysis of composite solvers that integrates reasoning about the individual solvers and the processed data. The idea is to approximate this reasoning by resolution of set constraints on the finite sets representing the predicates that express all the necessary properties. We illustrate application of our analysis to two important cooperation patterns: deterministic choice and loop.<|reference_end|>
arxiv
@article{petrov2003constraint-based, title={Constraint-based analysis of composite solvers}, author={Evgueni Petrov, Eric Monfroy}, journal={arXiv preprint arXiv:cs/0302036}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302036}, primaryClass={cs.AI} }
petrov2003constraint-based
arxiv-670989
cs/0302037
Hidden Polynomial(s) Cryptosystems
<|reference_start|>Hidden Polynomial(s) Cryptosystems: We propose variations of the class of hidden monomial cryptosystems in order to make it resistant to all known attacks. We use identities built upon a single bivariate polynomial equation with coefficients in a finite field. Indeed, it can be replaced by a ``small'' ideal, as well. Throughout, we set up probabilistic encryption protocols, too. The same ideas extend to digital signature algorithms, as well. Our schemes work as well on differential fields of positive characteristic, and elsewhere.<|reference_end|>
arxiv
@article{toli2003hidden, title={Hidden Polynomial(s) Cryptosystems}, author={Ilia Toli}, journal={arXiv preprint arXiv:cs/0302037}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302037}, primaryClass={cs.CR cs.SC} }
toli2003hidden
arxiv-670990
cs/0302038
Tight Logic Programs
<|reference_start|>Tight Logic Programs: This note is about the relationship between two theories of negation as failure -- one based on program completion, the other based on stable models, or answer sets. Francois Fages showed that if a logic program satisfies a certain syntactic condition, which is now called ``tightness,'' then its stable models can be characterized as the models of its completion. We extend the definition of tightness and Fages' theorem to programs with nested expressions in the bodies of rules, and study tight logic programs containing the definition of the transitive closure of a predicate.<|reference_end|>
arxiv
@article{erdem2003tight, title={Tight Logic Programs}, author={Esra Erdem and Vladimir Lifschitz}, journal={Theory and Practice of Logic Programming, 3(4--5):499--518, 2003.}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302038}, primaryClass={cs.AI cs.LO} }
erdem2003tight
arxiv-670991
cs/0302039
Kalman-filtering using local interactions
<|reference_start|>Kalman-filtering using local interactions: There is a growing interest in using Kalman-filter models for brain modelling. In turn, it is of considerable importance to represent Kalman-filter in connectionist forms with local Hebbian learning rules. To our best knowledge, Kalman-filter has not been given such local representation. It seems that the main obstacle is the dynamic adaptation of the Kalman-gain. Here, a connectionist representation is presented, which is derived by means of the recursive prediction error method. We show that this method gives rise to attractive local learning rules and can adapt the Kalman-gain.<|reference_end|>
arxiv
@article{poczos2003kalman-filtering, title={Kalman-filtering using local interactions}, author={Barnabas Poczos, Andras Lorincz}, journal={arXiv preprint arXiv:cs/0302039}, year={2003}, archivePrefix={arXiv}, eprint={cs/0302039}, primaryClass={cs.AI} }
poczos2003kalman-filtering
arxiv-670992
cs/0303001
When Crossings Count - Approximating the Minimum Spanning Tree
<|reference_start|>When Crossings Count - Approximating the Minimum Spanning Tree: In the first part of the paper, we present an (1+\mu)-approximation algorithm to the minimum-spanning tree of points in a planar arrangement of lines, where the metric is the number of crossings between the spanning tree and the lines. The expected running time is O((n/\mu^5) alpha^3(n) log^5 n), where \mu > 0 is a prescribed constant. In the second part of our paper, we show how to embed such a crossing metric, into high-dimensions, so that the distances are preserved. As a result, we can deploy a large collection of subquadratic approximations algorithms \cite im-anntr-98,giv-rahdg-99 for problems involving points with the crossing metric as a distance function. Applications include matching, clustering, nearest-neighbor, and furthest-neighbor.<|reference_end|>
arxiv
@article{har-peled2003when, title={When Crossings Count - Approximating the Minimum Spanning Tree}, author={Sariel Har-Peled and Piotr Indyk}, journal={arXiv preprint arXiv:cs/0303001}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303001}, primaryClass={cs.CG} }
har-peled2003when
arxiv-670993
cs/0303002
About compression of vocabulary in computer oriented languages
<|reference_start|>About compression of vocabulary in computer oriented languages: The author uses the entropy of the ideal Bose-Einstein gas to minimize losses in computer-oriented languages.<|reference_end|>
arxiv
@article{maslov2003about, title={About compression of vocabulary in computer oriented languages}, author={V. P. Maslov}, journal={arXiv preprint arXiv:cs/0303002}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303002}, primaryClass={cs.CL} }
maslov2003about
arxiv-670994
cs/0303003
A first approach for a possible cellular automaton model of fluids dynamics
<|reference_start|>A first approach for a possible cellular automaton model of fluids dynamics: In this paper I present a first attempt for a possible description of fluids dynamics by mean of a cellular automata technique. With the use of simple and elementary rules, based on random behaviour either, the model permits to obtain the evolution in time for a two-dimensional grid, where one molecule of the material fluid can ideally place itself on a single geometric square. By mean of computational simulations, some realistic effects, here showed by use of digital pictures, have been obtained. In a subsequent step of this work I think to use a parallel program for a high performances computational simulation, for increasing the degree of realism of the digital rendering by mean of a three-dimensional grid too. For the execution of the simulations, numerical methods of resolution for differential equations have not been used.<|reference_end|>
arxiv
@article{argentini2003a, title={A first approach for a possible cellular automaton model of fluids dynamics}, author={Gianluca Argentini}, journal={arXiv preprint arXiv:cs/0303003}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303003}, primaryClass={cs.CC cs.DC nlin.CG physics.comp-ph} }
argentini2003a
arxiv-670995
cs/0303004
Reliability Conditions in Quadrature Algorithms
<|reference_start|>Reliability Conditions in Quadrature Algorithms: The detection of insufficiently resolved or ill-conditioned integrand structures is critical for the reliability assessment of the quadrature rule outputs. We discuss a method of analysis of the profile of the integrand at the quadrature knots which allows inferences approaching the theoretical 100% rate of success, under error estimate sharpening. The proposed procedure is of the highest interest for the solution of parametric integrals arising in complex physical models.<|reference_end|>
arxiv
@article{adam2003reliability, title={Reliability Conditions in Quadrature Algorithms}, author={Gh. Adam, S. Adam, and N.M. Plakida}, journal={arXiv preprint arXiv:cs/0303004}, year={2003}, doi={10.1016/S0010-4655(03)00282-0}, number={E17-2002-205 (JINR Dubna preprint, sept.2002; preliminary version of this paper)}, archivePrefix={arXiv}, eprint={cs/0303004}, primaryClass={cs.NA cs.MS physics.comp-ph} }
adam2003reliability
arxiv-670996
cs/0303005
Fair Solution to the Reader-Writer-Problem with Semaphores only
<|reference_start|>Fair Solution to the Reader-Writer-Problem with Semaphores only: The reader-writer-problem is a standard problem in concurrent programming. A resource is shared by several processes which need either inclusive reading or exclusive writing access. The known solutions to this problem typically involve a number of global counters and queues. Here a very simple algorithm is presented which needs only two semaphores for synchronisation and no other global objects. The approach yields a fair solution without starving.<|reference_end|>
arxiv
@article{ballhausen2003fair, title={Fair Solution to the Reader-Writer-Problem with Semaphores only}, author={H. Ballhausen}, journal={arXiv preprint arXiv:cs/0303005}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303005}, primaryClass={cs.DC} }
ballhausen2003fair
arxiv-670997
cs/0303006
On the Notion of Cognition
<|reference_start|>On the Notion of Cognition: We discuss philosophical issues concerning the notion of cognition basing ourselves in experimental results in cognitive sciences, especially in computer simulations of cognitive systems. There have been debates on the "proper" approach for studying cognition, but we have realized that all approaches can be in theory equivalent. Different approaches model different properties of cognitive systems from different perspectives, so we can only learn from all of them. We also integrate ideas from several perspectives for enhancing the notion of cognition, such that it can contain other definitions of cognition as special cases. This allows us to propose a simple classification of different types of cognition.<|reference_end|>
arxiv
@article{gershenson2003on, title={On the Notion of Cognition}, author={Carlos Gershenson}, journal={arXiv preprint arXiv:cs/0303006}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303006}, primaryClass={cs.AI} }
gershenson2003on
arxiv-670998
cs/0303007
Glottochronology and problems of protolanguage reconstruction
<|reference_start|>Glottochronology and problems of protolanguage reconstruction: A method of languages genealogical trees construction is proposed.<|reference_end|>
arxiv
@article{victor2003glottochronology, title={Glottochronology and problems of protolanguage reconstruction}, author={Kromer Victor}, journal={arXiv preprint arXiv:cs/0303007}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303007}, primaryClass={cs.CL} }
victor2003glottochronology
arxiv-670999
cs/0303008
Solution of the Linear Ordering Problem (NP=P)
<|reference_start|>Solution of the Linear Ordering Problem (NP=P): A polynomial algorithm is obtained for the NP-complete linear ordering problem.<|reference_end|>
arxiv
@article{bolotashvili2003solution, title={Solution of the Linear Ordering Problem (NP=P)}, author={Givi Bolotashvili}, journal={arXiv preprint arXiv:cs/0303008}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303008}, primaryClass={cs.CC cs.DM} }
bolotashvili2003solution
arxiv-671000
cs/0303009
Unfolding Partiality and Disjunctions in Stable Model Semantics
<|reference_start|>Unfolding Partiality and Disjunctions in Stable Model Semantics: The paper studies an implementation methodology for partial and disjunctive stable models where partiality and disjunctions are unfolded from a logic program so that an implementation of stable models for normal (disjunction-free) programs can be used as the core inference engine. The unfolding is done in two separate steps. Firstly, it is shown that partial stable models can be captured by total stable models using a simple linear and modular program transformation. Hence, reasoning tasks concerning partial stable models can be solved using an implementation of total stable models. Disjunctive partial stable models have been lacking implementations which now become available as the translation handles also the disjunctive case. Secondly, it is shown how total stable models of disjunctive programs can be determined by computing stable models for normal programs. Hence, an implementation of stable models of normal programs can be used as a core engine for implementing disjunctive programs. The feasibility of the approach is demonstrated by constructing a system for computing stable models of disjunctive programs using the smodels system as the core engine. The performance of the resulting system is compared to that of dlv which is a state-of-the-art special purpose system for disjunctive programs.<|reference_end|>
arxiv
@article{janhunen2003unfolding, title={Unfolding Partiality and Disjunctions in Stable Model Semantics}, author={T. Janhunen, I. Niemela, D. Seipel, P. Simons, J. You}, journal={arXiv preprint arXiv:cs/0303009}, year={2003}, archivePrefix={arXiv}, eprint={cs/0303009}, primaryClass={cs.AI} }
janhunen2003unfolding