corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-671301 | cs/0307029 | The ray attack, an inefficient trial to break RSA cryptosystems | <|reference_start|>The ray attack, an inefficient trial to break RSA cryptosystems: The basic properties of RSA cryptosystems and some classical attacks on them are described. Derived from geometric properties of the Euler functions, the Euler function rays, a new ansatz to attack RSA cryptosystems is presented. A resulting, albeit inefficient, algorithm is given. It essentially consists of a loop with starting value determined by the Euler function ray and with step width given by a function $\omega_e(n)$ being a multiple of the order $\mathrm{ord}_n(e)$, where $e$ denotes the public key exponent and $n$ the RSA modulus. For $n=pq$ and an estimate $r<\sqrt{pq}$ for the smaller prime factor $p$, the running time is given by $T(e,n,r) = O((r-p)\ln e \ln n \ln r).$<|reference_end|> | arxiv | @article{de vries2003the,
title={The ray attack, an inefficient trial to break RSA cryptosystems},
author={Andreas de Vries},
journal={arXiv preprint arXiv:cs/0307029},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307029},
primaryClass={cs.CR}
} | de vries2003the |
arxiv-671302 | cs/0307030 | Parsing and Generation with Tabulation and Compilation | <|reference_start|>Parsing and Generation with Tabulation and Compilation: The standard tabulation techniques for logic programming presuppose fixed order of computation. Some data-driven control should be introduced in order to deal with diverse contexts. The present paper describes a data-driven method of constraint transformation with a sort of compilation which subsumes accessibility check and last-call optimization, which characterize standard natural-language parsing techniques, semantic-head-driven generation, etc.<|reference_end|> | arxiv | @article{hasida2003parsing,
title={Parsing and Generation with Tabulation and Compilation},
author={Koiti Hasida and Takashi Miyata},
journal={arXiv preprint arXiv:cs/0307030},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307030},
primaryClass={cs.CL}
} | hasida2003parsing |
arxiv-671303 | cs/0307031 | Automatic Classification using Self-Organising Neural Networks in Astrophysical Experiments | <|reference_start|>Automatic Classification using Self-Organising Neural Networks in Astrophysical Experiments: Self-Organising Maps (SOMs) are effective tools in classification problems, and in recent years the even more powerful Dynamic Growing Neural Networks, a variant of SOMs, have been developed. Automatic Classification (also called clustering) is an important and difficult problem in many Astrophysical experiments, for instance, Gamma Ray Burst classification, or gamma-hadron separation. After a brief introduction to classification problem, we discuss Self-Organising Maps in section 2. Section 3 discusses with various models of growing neural networks and finally in section 4 we discuss the research perspectives in growing neural networks for efficient classification in astrophysical problems.<|reference_end|> | arxiv | @article{boinee2003automatic,
title={Automatic Classification using Self-Organising Neural Networks in
Astrophysical Experiments},
author={P. Boinee, A. De Angelis, E. Milotti},
journal={S. Ciprini, A. De Angelis, P. Lubrano and O. Mansutti (eds.):
Proc. of ``Science with the New Generation of High Energy Gamma-ray
Experiments'' (Perugia, Italy, May 2003). Forum, Udine 2003, p. 177},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307031},
primaryClass={cs.NE astro-ph cs.AI}
} | boinee2003automatic |
arxiv-671304 | cs/0307032 | Data Management and Mining in Astrophysical Databases | <|reference_start|>Data Management and Mining in Astrophysical Databases: We analyse the issues involved in the management and mining of astrophysical data. The traditional approach to data management in the astrophysical field is not able to keep up with the increasing size of the data gathered by modern detectors. An essential role in the astrophysical research will be assumed by automatic tools for information extraction from large datasets, i.e. data mining techniques, such as clustering and classification algorithms. This asks for an approach to data management based on data warehousing, emphasizing the efficiency and simplicity of data access; efficiency is obtained using multidimensional access methods and simplicity is achieved by properly handling metadata. Clustering and classification techniques, on large datasets, pose additional requirements: computational and memory scalability with respect to the data size, interpretability and objectivity of clustering or classification results. In this study we address some possible solutions.<|reference_end|> | arxiv | @article{frailis2003data,
title={Data Management and Mining in Astrophysical Databases},
author={M. Frailis, A. De Angelis, V. Roberto},
journal={S. Ciprini, A. De Angelis, P. Lubrano and O. Mansutti (eds.):
Proc. of ``Science with the New Generation of High Energy Gamma-ray
Experiments'' (Perugia, Italy, May 2003). Forum, Udine 2003, p. 157},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307032},
primaryClass={cs.DB astro-ph physics.data-an}
} | frailis2003data |
arxiv-671305 | cs/0307033 | Excellence in Computer Simulation | <|reference_start|>Excellence in Computer Simulation: Excellent computer simulations are done for a purpose. The most valid purposes are to explore uncharted territory, to resolve a well-posed scientific or technical question, or to make a design choice. Stand-alone modeling can serve the first purpose. The other two goals need a full integration of the modeling effort into a scientific or engineering program. Some excellent work, much of it related to the Department of Energy Laboratories, is reviewed. Some less happy stories are recounted. In the past, some of the most impressive work has involved complexity and chaos. Prediction in a complex world requires a first principles understanding based upon the intersection of theory, experiment and simulation.<|reference_end|> | arxiv | @article{kadanoff2003excellence,
title={Excellence in Computer Simulation},
author={Leo P. Kadanoff},
journal={arXiv preprint arXiv:cs/0307033},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307033},
primaryClass={cs.NA physics.comp-ph}
} | kadanoff2003excellence |
arxiv-671306 | cs/0307034 | Range Mode and Range Median Queries on Lists and Trees | <|reference_start|>Range Mode and Range Median Queries on Lists and Trees: We consider algorithms for preprocessing labelled lists and trees so that, for any two nodes u and v we can answer queries of the form: What is the mode or median label in the sequence of labels on the path from u to v.<|reference_end|> | arxiv | @article{krizanc2003range,
title={Range Mode and Range Median Queries on Lists and Trees},
author={Danny Krizanc, Pat Morin and Michiel Smid},
journal={arXiv preprint arXiv:cs/0307034},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307034},
primaryClass={cs.DS}
} | krizanc2003range |
arxiv-671307 | cs/0307035 | Adaptive Domain Model: Dealing With Multiple Attributes of Self-Managing Distributed Object Systems | <|reference_start|>Adaptive Domain Model: Dealing With Multiple Attributes of Self-Managing Distributed Object Systems: Self-managing software has emerged as modern systems have become more complex. Some of the distributed object systems may contain thousands of objects deployed on tens or even hundreds hosts. Development and support of such systems often costs a lot. To solve this issue the systems, which are capable supporting multiple self-managing attributes, should be created. In the paper, the Adaptive domain concept is introduced as an extension to the basic domain concept to support a generic adaptation environment for building distributed object systems with multiple self-managing attributes.<|reference_end|> | arxiv | @article{motuzenko2003adaptive,
title={Adaptive Domain Model: Dealing With Multiple Attributes of Self-Managing
Distributed Object Systems},
author={Pavel Motuzenko},
journal={arXiv preprint arXiv:cs/0307035},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307035},
primaryClass={cs.AR cs.DC}
} | motuzenko2003adaptive |
arxiv-671308 | cs/0307036 | Small-World File-Sharing Communities | <|reference_start|>Small-World File-Sharing Communities: Web caches, content distribution networks, peer-to-peer file sharing networks, distributed file systems, and data grids all have in common that they involve a community of users who generate requests for shared data. In each case, overall system performance can be improved significantly if we can first identify and then exploit interesting structure within a community's access patterns. To this end, we propose a novel perspective on file sharing based on the study of the relationships that form among users based on the files in which they are interested. We propose a new structure that captures common user interests in data--the data-sharing graph-- and justify its utility with studies on three data-distribution systems: a high-energy physics collaboration, the Web, and the Kazaa peer-to-peer network. We find small-world patterns in the data-sharing graphs of all three communities. We analyze these graphs and propose some probable causes for these emergent small-world patterns. The significance of small-world patterns is twofold: it provides a rigorous support to intuition and, perhaps most importantly, it suggests ways to design mechanisms that exploit these naturally emerging patterns.<|reference_end|> | arxiv | @article{iamnitchi2003small-world,
title={Small-World File-Sharing Communities},
author={Adriana Iamnitchi, Matei Ripeanu, Ian Foster},
journal={arXiv preprint arXiv:cs/0307036},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307036},
primaryClass={cs.DC cond-mat cs.NI}
} | iamnitchi2003small-world |
arxiv-671309 | cs/0307037 | Supporting Dynamic Ad hoc Collaboration Capabilities | <|reference_start|>Supporting Dynamic Ad hoc Collaboration Capabilities: Modern HENP experiments such as CMS and Atlas involve as many as 2000 collaborators around the world. Collaborations this large will be unable to meet often enough to support working closely together. Many of the tools currently available for collaboration focus on heavy-weight applications such as videoconferencing tools. While these are important, there is a more basic need for tools that support connecting physicists to work together on an ad hoc or continuous basis. Tools that support the day-to-day connectivity and underlying needs of a group of collaborators are important for providing light-weight, non-intrusive, and flexible ways to work collaboratively. Some example tools include messaging, file-sharing, and shared plot viewers. An important component of the environment is a scalable underlying communication framework. In this paper we will describe our current progress on building a dynamic and ad hoc collaboration environment and our vision for its evolution into a HENP collaboration environment.<|reference_end|> | arxiv | @article{agarwal2003supporting,
title={Supporting Dynamic Ad hoc Collaboration Capabilities},
author={D. Agarwal and K. Berket},
journal={ECONF C0303241:MONT011,2003},
year={2003},
number={LBNL-53355},
archivePrefix={arXiv},
eprint={cs/0307037},
primaryClass={cs.OH cs.AI}
} | agarwal2003supporting |
arxiv-671310 | cs/0307038 | Manifold Learning with Geodesic Minimal Spanning Trees | <|reference_start|>Manifold Learning with Geodesic Minimal Spanning Trees: In the manifold learning problem one seeks to discover a smooth low dimensional surface, i.e., a manifold embedded in a higher dimensional linear vector space, based on a set of measured sample points on the surface. In this paper we consider the closely related problem of estimating the manifold's intrinsic dimension and the intrinsic entropy of the sample points. Specifically, we view the sample points as realizations of an unknown multivariate density supported on an unknown smooth manifold. We present a novel geometrical probability approach, called the geodesic-minimal-spanning-tree (GMST), to obtaining asymptotically consistent estimates of the manifold dimension and the R\'{e}nyi $\alpha$-entropy of the sample density on the manifold. The GMST approach is striking in its simplicity and does not require reconstructing the manifold or estimating the multivariate density of the samples. The GMST method simply constructs a minimal spanning tree (MST) sequence using a geodesic edge matrix and uses the overall lengths of the MSTs to simultaneously estimate manifold dimension and entropy. We illustrate the GMST approach for dimension and entropy estimation of a human face dataset.<|reference_end|> | arxiv | @article{costa2003manifold,
title={Manifold Learning with Geodesic Minimal Spanning Trees},
author={Jose Costa and Alfred Hero},
journal={arXiv preprint arXiv:cs/0307038},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307038},
primaryClass={cs.CV cs.LG}
} | costa2003manifold |
arxiv-671311 | cs/0307039 | Modeling Business | <|reference_start|>Modeling Business: Business concepts are studied using a metamodel-based approach, using UML 2.0. The Notation Independent Business concepts metamodel is introduced. The approach offers a mapping between different business modeling notations which could be used for bridging BM tools and boosting the MDA approach.<|reference_end|> | arxiv | @article{vitolins2003modeling,
title={Modeling Business},
author={Valdis Vitolins, Audris Kalnins},
journal={Vitolins Valdis, Audris Kalnins. Modeling Business. Modeling and
Simulation of Business Systems, Kaunas University of Technology Press,
Vilnius, May 13-14, 2003, pp. 215.-220.},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307039},
primaryClass={cs.CE}
} | vitolins2003modeling |
arxiv-671312 | cs/0307040 | Bridging the gap between modal temporal logics and constraint-based QSR as an ALC(D) spatio-temporalisation with weakly cyclic TBoxes | <|reference_start|>Bridging the gap between modal temporal logics and constraint-based QSR as an ALC(D) spatio-temporalisation with weakly cyclic TBoxes: The aim of this work is to provide a family of qualitative theories for spatial change in general, and for motion of spatial scenes in particular. To achieve this, we consider a spatio-temporalisation MTALC(D_x), of the well-known ALC(D) family of Description Logics (DLs) with a concrete domainan. In particular, the concrete domain D_x is generated by a qualitative spatial Relation Algebra (RA) x. We show the important result that satisfiability of an MTALC(D_x) concept with respect to a weakly cyclic TBox is decidable in nondeterministic exponential time, by reducing it to the emptiness problem of a weak alternating automaton augmented with spatial constraints, which we show to remain decidable, although the accepting condition of a run involves, additionally to the standard case, consistency of a CSP (Constraint Satisfaction Problem) potentially infinite. The result provides an effective tableaux-like satisfiability procedure which is discussed.<|reference_end|> | arxiv | @article{isli2003bridging,
title={Bridging the gap between modal temporal logics and constraint-based QSR
as an ALC(D) spatio-temporalisation with weakly cyclic TBoxes},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0307040},
year={2003},
number={Technical Report FBI-HH-M-311/02, Fachbereich Informatik,
Universitaet Hamburg},
archivePrefix={arXiv},
eprint={cs/0307040},
primaryClass={cs.AI cs.LO}
} | isli2003bridging |
arxiv-671313 | cs/0307041 | High-density and Secure Data Transmission via Linear Combinations | <|reference_start|>High-density and Secure Data Transmission via Linear Combinations: Suppose that there are $n$ Senders and $n$ Receivers. Our goal is to send long messages from Sender $i$ to Receiver $i$ such that no other receiver can retrieve the message intended for Receiver $i$. The task can easily be completed using $n$ private channels between the pairs. Solutions, using one channel needs either encryption or switching elements for routing the messages to their addressee. The main result of the present work is a description of a network in which The Senders and the Receivers are connected with only $n^{o(1)}$ channels; the encoding and de-coding is nothing else just very fast linear combinations of the message-bits; and there are no switching or routing-elements in the network, just linear combinations are computed, with fixed connections (channels or wires). In the proofs we do not use {\em any} unproven cryptographical or complexity theoretical assumptions.<|reference_end|> | arxiv | @article{grolmusz2003high-density,
title={High-density and Secure Data Transmission via Linear Combinations},
author={Vince Grolmusz},
journal={arXiv preprint arXiv:cs/0307041},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307041},
primaryClass={cs.CC cs.AR}
} | grolmusz2003high-density |
arxiv-671314 | cs/0307042 | A Note on Objects Built From Bricks without Corners | <|reference_start|>A Note on Objects Built From Bricks without Corners: We report a small advance on a question raised by Robertson, Schweitzer, and Wagon in [RSW02]. They constructed a genus-13 polyhedron built from bricks without corners, and asked whether every genus-0 such polyhedron must have a corner. A brick is a parallelopiped, and a corner is a brick of degree three or less in the brick graph. We describe a genus-3 polyhedron built from bricks with no corner, narrowing the genus gap.<|reference_end|> | arxiv | @article{damian2003a,
title={A Note on Objects Built From Bricks without Corners},
author={Mirela Damian and Joseph O'Rourke},
journal={arXiv preprint arXiv:cs/0307042},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307042},
primaryClass={cs.CG cs.DM}
} | damian2003a |
arxiv-671315 | cs/0307043 | An Extension of the Lovasz Local Lemma, and its Applications to Integer Programming | <|reference_start|>An Extension of the Lovasz Local Lemma, and its Applications to Integer Programming: The Lovasz Local Lemma due to Erdos and Lovasz is a powerful tool in proving the existence of rare events. We present an extension of this lemma, which works well when the event to be shown to exist is a conjunction of individual events, each of which asserts that a random variable does not deviate much from its mean. As applications, we consider two classes of NP-hard integer programs: minimax and covering integer programs. A key technique, randomized rounding of linear relaxations, was developed by Raghavan and Thompson to derive good approximation algorithms for such problems. We use our extension of the Local Lemma to prove that randomized rounding produces, with non-zero probability, much better feasible solutions than known before, if the constraint matrices of these integer programs are column-sparse (e.g., routing using short paths, problems on hypergraphs with small dimension/degree). This complements certain well-known results from discrepancy theory. We also generalize the method of pessimistic estimators due to Raghavan, to obtain constructive (algorithmic) versions of our results for covering integer programs.<|reference_end|> | arxiv | @article{srinivasan2003an,
title={An Extension of the Lovasz Local Lemma, and its Applications to Integer
Programming},
author={Aravind Srinivasan},
journal={arXiv preprint arXiv:cs/0307043},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307043},
primaryClass={cs.DS}
} | srinivasan2003an |
arxiv-671316 | cs/0307044 | The Linguistic DS: Linguisitic Description in MPEG-7 | <|reference_start|>The Linguistic DS: Linguisitic Description in MPEG-7: MPEG-7 (Moving Picture Experts Group Phase 7) is an XML-based international standard on semantic description of multimedia content. This document discusses the Linguistic DS and related tools. The linguistic DS is a tool, based on the GDA tag set (http://i-content.org/GDA/tagset.html), for semantic annotation of linguistic data in or associated with multimedia content. The current document text reflects `Study of FPDAM - MPEG-7 MDS Extensions' issued in March 2003, and not most part of MPEG-7 MDS, for which the readers are referred to the first version of MPEG-7 MDS document available from ISO (http://www.iso.org). Without that reference, however, this document should be mostly intelligible to those who are familiar with XML and linguistic theories. Comments are welcome and will be considered in the standardization process.<|reference_end|> | arxiv | @article{hasida2003the,
title={The Linguistic DS: Linguisitic Description in MPEG-7},
author={Koiti Hasida},
journal={arXiv preprint arXiv:cs/0307044},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307044},
primaryClass={cs.CL}
} | hasida2003the |
arxiv-671317 | cs/0307045 | Flexible Camera Calibration Using a New Analytical Radial Undistortion Formula with Application to Mobile Robot Localization | <|reference_start|>Flexible Camera Calibration Using a New Analytical Radial Undistortion Formula with Application to Mobile Robot Localization: Most algorithms in 3D computer vision rely on the pinhole camera model because of its simplicity, whereas virtually all imaging devices introduce certain amount of nonlinear distortion, where the radial distortion is the most severe part. Common approach to radial distortion is by the means of polynomial approximation, which introduces distortion-specific parameters into the camera model and requires estimation of these distortion parameters. The task of estimating radial distortion is to find a radial distortion model that allows easy undistortion as well as satisfactory accuracy. This paper presents a new radial distortion model with an easy analytical undistortion formula, which also belongs to the polynomial approximation category. Experimental results are presented to show that with this radial distortion model, satisfactory accuracy is achieved. An application of the new radial distortion model is non-iterative yellow line alignment with a calibrated camera on ODIS, a robot built in our CSOIS.<|reference_end|> | arxiv | @article{ma2003flexible,
title={Flexible Camera Calibration Using a New Analytical Radial Undistortion
Formula with Application to Mobile Robot Localization},
author={Lili Ma, YangQuan Chen and Kevin L. Moore (Utah State University)},
journal={arXiv preprint arXiv:cs/0307045},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307045},
primaryClass={cs.CV}
} | ma2003flexible |
arxiv-671318 | cs/0307046 | A New Analytical Radial Distortion Model for Camera Calibration | <|reference_start|>A New Analytical Radial Distortion Model for Camera Calibration: Common approach to radial distortion is by the means of polynomial approximation, which introduces distortion-specific parameters into the camera model and requires estimation of these distortion parameters. The task of estimating radial distortion is to find a radial distortion model that allows easy undistortion as well as satisfactory accuracy. This paper presents a new radial distortion model with an easy analytical undistortion formula, which also belongs to the polynomial approximation category. Experimental results are presented to show that with this radial distortion model, satisfactory accuracy is achieved.<|reference_end|> | arxiv | @article{ma2003a,
title={A New Analytical Radial Distortion Model for Camera Calibration},
author={Lili Ma, YangQuan Chen, Kevin L. Moore},
journal={arXiv preprint arXiv:cs/0307046},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307046},
primaryClass={cs.CV}
} | ma2003a |
arxiv-671319 | cs/0307047 | Rational Radial Distortion Models with Analytical Undistortion Formulae | <|reference_start|>Rational Radial Distortion Models with Analytical Undistortion Formulae: The common approach to radial distortion is by the means of polynomial approximation, which introduces distortion-specific parameters into the camera model and requires estimation of these distortion parameters. The task of estimating radial distortion is to find a radial distortion model that allows easy undistortion as well as satisfactory accuracy. This paper presents a new class of rational radial distortion models with easy analytical undistortion formulae. Experimental results are presented to show that with this class of rational radial distortion models, satisfactory and comparable accuracy is achieved.<|reference_end|> | arxiv | @article{ma2003rational,
title={Rational Radial Distortion Models with Analytical Undistortion Formulae},
author={Lili Ma, YangQuan Chen, Kevin L. Moore},
journal={arXiv preprint arXiv:cs/0307047},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307047},
primaryClass={cs.CV}
} | ma2003rational |
arxiv-671320 | cs/0307048 | Integrating cardinal direction relations and other orientation relations in Qualitative Spatial Reasoning | <|reference_start|>Integrating cardinal direction relations and other orientation relations in Qualitative Spatial Reasoning: We propose a calculus integrating two calculi well-known in Qualitative Spatial Reasoning (QSR): Frank's projection-based cardinal direction calculus, and a coarser version of Freksa's relative orientation calculus. An original constraint propagation procedure is presented, which implements the interaction between the two integrated calculi. The importance of taking into account the interaction is shown with a real example providing an inconsistent knowledge base, whose inconsistency (a) cannot be detected by reasoning separately about each of the two components of the knowledge, just because, taken separately, each is consistent, but (b) is detected by the proposed algorithm, thanks to the interaction knowledge propagated from each of the two compnents to the other.<|reference_end|> | arxiv | @article{isli2003integrating,
title={Integrating cardinal direction relations and other orientation relations
in Qualitative Spatial Reasoning},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0307048},
year={2003},
number={Technical report FBI-HH-M-304/01, Fachbereich Informatik,
Universitaet Hamburg},
archivePrefix={arXiv},
eprint={cs/0307048},
primaryClass={cs.AI}
} | isli2003integrating |
arxiv-671321 | cs/0307049 | Limit groups and groups acting freely on $\bbR^n$-trees | <|reference_start|>Limit groups and groups acting freely on $\bbR^n$-trees: We give a simple proof of the finite presentation of Sela's limit groups by using free actions on $\bbR^n$-trees. We first prove that Sela's limit groups do have a free action on an $\bbR^n$-tree. We then prove that a finitely generated group having a free action on an $\bbR^n$-tree can be obtained from free abelian groups and surface groups by a finite sequence of free products and amalgamations over cyclic groups. As a corollary, such a group is finitely presented, has a finite classifying space, its abelian subgroups are finitely generated and contains only finitely many conjugacy classes of non-cyclic maximal abelian subgroups.<|reference_end|> | arxiv | @article{guirardel2003limit,
title={Limit groups and groups acting freely on $\bbR^n$-trees},
author={Vincent Guirardel},
journal={arXiv preprint arXiv:cs/0307049},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307049},
primaryClass={cs.DL}
} | guirardel2003limit |
arxiv-671322 | cs/0307050 | A ternary Relation Algebra of directed lines | <|reference_start|>A ternary Relation Algebra of directed lines: We define a ternary Relation Algebra (RA) of relative position relations on two-dimensional directed lines (d-lines for short). A d-line has two degrees of freedom (DFs): a rotational DF (RDF), and a translational DF (TDF). The representation of the RDF of a d-line will be handled by an RA of 2D orientations, CYC_t, known in the literature. A second algebra, TA_t, which will handle the TDF of a d-line, will be defined. The two algebras, CYC_t and TA_t, will constitute, respectively, the translational and the rotational components of the RA, PA_t, of relative position relations on d-lines: the PA_t atoms will consist of those pairs <t,r> of a TA_t atom and a CYC_t atom that are compatible. We present in detail the RA PA_t, with its converse table, its rotation table and its composition tables. We show that a (polynomial) constraint propagation algorithm, known in the literature, is complete for a subset of PA_t relations including almost all of the atomic relations. We will discuss the application scope of the RA, which includes incidence geometry, GIS (Geographic Information Systems), shape representation, localisation in (multi-)robot navigation, and the representation of motion prepositions in NLP (Natural Language Processing). We then compare the RA to existing ones, such as an algebra for reasoning about rectangles parallel to the axes of an (orthogonal) coordinate system, a ``spatial Odyssey'' of Allen's interval algebra, and an algebra for reasoning about 2D segments.<|reference_end|> | arxiv | @article{isli2003a,
title={A ternary Relation Algebra of directed lines},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0307050},
year={2003},
number={Technical report FBI-HH-M-313/02, Fachbereich Informatik,
Universitaet Hamburg},
archivePrefix={arXiv},
eprint={cs/0307050},
primaryClass={cs.AI}
} | isli2003a |
arxiv-671323 | cs/0307051 | An Analytical Piecewise Radial Distortion Model for Precision Camera Calibration | <|reference_start|>An Analytical Piecewise Radial Distortion Model for Precision Camera Calibration: The common approach to radial distortion is by the means of polynomial approximation, which introduces distortion-specific parameters into the camera model and requires estimation of these distortion parameters. The task of estimating radial distortion is to find a radial distortion model that allows easy undistortion as well as satisfactory accuracy. This paper presents a new piecewise radial distortion model with easy analytical undistortion formula. The motivation for seeking a piecewise radial distortion model is that, when a camera is resulted in a low quality during manufacturing, the nonlinear radial distortion can be complex. Using low order polynomials to approximate the radial distortion might not be precise enough. On the other hand, higher order polynomials suffer from the inverse problem. With the new piecewise radial distortion function, more flexibility is obtained and the radial undistortion can be performed analytically. Experimental results are presented to show that with this new piecewise radial distortion model, better performance is achieved than that using the single function. Furthermore, a comparable performance with the conventional polynomial model using 2 coefficients can also be accomplished.<|reference_end|> | arxiv | @article{ma2003an,
title={An Analytical Piecewise Radial Distortion Model for Precision Camera
Calibration},
author={Lili Ma, YangQuan Chen, Kevin L. Moore},
journal={arXiv preprint arXiv:cs/0307051},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307051},
primaryClass={cs.CV}
} | ma2003an |
arxiv-671324 | cs/0307052 | Gridscape: A Tool for the Creation of Interactive and Dynamic Grid Testbed Web Portals | <|reference_start|>Gridscape: A Tool for the Creation of Interactive and Dynamic Grid Testbed Web Portals: The notion of grid computing has gained an increasing popularity recently as a realistic solution to many of our large-scale data storage and processing needs. It enables the sharing, selection and aggregation of resources geographically distributed across collaborative organisations. Now more and more people are beginning to embrace grid computing and thus are seeing the need to set up their own grids and grid testbeds. With this comes the need to have some means to enable them to view and monitor the status of the resources in these testbeds (eg. Web based Grid portal). Generally developers invest a substantial amount of time and effort developing custom monitoring software. To overcome this limitation, this paper proposes Gridscape ? a tool that enables the rapid creation of interactive and dynamic testbed portals (without any programming effort). Gridscape primarily aims to provide a solution for those users who need to be able to create a grid testbed portal but don?t necessarily have the time or resources to build a system of their own from scratch.<|reference_end|> | arxiv | @article{gibbins2003gridscape:,
title={Gridscape: A Tool for the Creation of Interactive and Dynamic Grid
Testbed Web Portals},
author={Hussein Gibbins and Rajkumar Buyya},
journal={arXiv preprint arXiv:cs/0307052},
year={2003},
number={July 2003 Research Report, GRIDS Lab @ The University of Melbourne},
archivePrefix={arXiv},
eprint={cs/0307052},
primaryClass={cs.DC}
} | gibbins2003gridscape: |
arxiv-671325 | cs/0307053 | Hamevol10: a C++ code for differential equations based on Runge-Kutta algorithm An application to matter enhanced neutrino oscillation | <|reference_start|>Hamevol10: a C++ code for differential equations based on Runge-Kutta algorithm An application to matter enhanced neutrino oscillation: We present a C++ implementation of a fifth order semi-implicit Runge-Kutta algorithm for solving Ordinary Differential Equations. This algorithm can be used for studying many different problems and in particular it can be applied for computing the evolution of any system whose Hamiltonian is known. We consider in particular the problem of calculating the neutrino oscillation probabilities in presence of matter interactions. The time performance and the accuracy of this implementation is competitive with respect to the other analytical and numerical techniques used in literature. The algorithm design and the salient features of the code are presented and discussed and some explicit examples of code application are given.<|reference_end|> | arxiv | @article{aliani2003hamevol1.0:,
title={Hamevol1.0: a C++ code for differential equations based on Runge-Kutta
algorithm. An application to matter enhanced neutrino oscillation},
author={P. Aliani, V. Antonelli, M. Picariello, Emilio Torrente-Lujan},
journal={arXiv preprint arXiv:cs/0307053},
year={2003},
number={IFUM-841-FT; CERN-TH-03-101 ; FT-UM-TH-03-06},
archivePrefix={arXiv},
eprint={cs/0307053},
primaryClass={cs.CE}
} | aliani2003hamevol1.0: |
arxiv-671326 | cs/0307054 | Contributions to the Development and Improvement of a Regulatory and Pre-Regulatory Digitally System for the Tools within Flexible Fabrication Systems | <|reference_start|>Contributions to the Development and Improvement of a Regulatory and Pre-Regulatory Digitally System for the Tools within Flexible Fabrication Systems: The paper reports the obtained results for the projection and realization of a digitally system aiming to assist the equipment for a regulatory and pre-regulatory tools and holding tools within the flexible fabrication systems (FFS). Moreover, based on the present results, the same methodology can be applied for assisting tools from the point of view of their integrity and to wear compensation in the FFS framework.<|reference_end|> | arxiv | @article{putz2003contributions,
title={Contributions to the Development and Improvement of a Regulatory and
Pre-Regulatory Digitally System for the Tools within Flexible Fabrication
Systems},
author={Viorel Putz and Mihai V. Putz},
journal={arXiv preprint arXiv:cs/0307054},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307054},
primaryClass={cs.CE cs.SE}
} | putz2003contributions |
arxiv-671327 | cs/0307055 | Learning Analogies and Semantic Relations | <|reference_start|>Learning Analogies and Semantic Relations: We present an algorithm for learning from unlabeled text, based on the Vector Space Model (VSM) of information retrieval, that can solve verbal analogy questions of the kind found in the Scholastic Aptitude Test (SAT). A verbal analogy has the form A:B::C:D, meaning "A is to B as C is to D"; for example, mason:stone::carpenter:wood. SAT analogy questions provide a word pair, A:B, and the problem is to select the most analogous word pair, C:D, from a set of five choices. The VSM algorithm correctly answers 47% of a collection of 374 college-level analogy questions (random guessing would yield 20% correct). We motivate this research by relating it to work in cognitive science and linguistics, and by applying it to a difficult problem in natural language processing, determining semantic relations in noun-modifier pairs. The problem is to classify a noun-modifier pair, such as "laser printer", according to the semantic relation between the noun (printer) and the modifier (laser). We use a supervised nearest-neighbour algorithm that assigns a class to a given noun-modifier pair by finding the most analogous noun-modifier pair in the training data. With 30 classes of semantic relations, on a collection of 600 labeled noun-modifier pairs, the learning algorithm attains an F value of 26.5% (random guessing: 3.3%). With 5 classes of semantic relations, the F value is 43.2% (random: 20%). The performance is state-of-the-art for these challenging problems.<|reference_end|> | arxiv | @article{turney2003learning,
title={Learning Analogies and Semantic Relations},
author={Peter D. Turney (National Research Council of Canada), Michael L.
Littman (Rutgers University)},
journal={arXiv preprint arXiv:cs/0307055},
year={2003},
number={NRC-46488},
archivePrefix={arXiv},
eprint={cs/0307055},
primaryClass={cs.LG cs.CL cs.IR}
} | turney2003learning |
arxiv-671328 | cs/0307056 | From Statistical Knowledge Bases to Degrees of Belief | <|reference_start|>From Statistical Knowledge Bases to Degrees of Belief: An intelligent agent will often be uncertain about various properties of its environment, and when acting in that environment it will frequently need to quantify its uncertainty. For example, if the agent wishes to employ the expected-utility paradigm of decision theory to guide its actions, it will need to assign degrees of belief (subjective probabilities) to various assertions. Of course, these degrees of belief should not be arbitrary, but rather should be based on the information available to the agent. This paper describes one approach for inducing degrees of belief from very rich knowledge bases, that can include information about particular individuals, statistical correlations, physical laws, and default rules. We call our approach the random-worlds method. The method is based on the principle of indifference: it treats all of the worlds the agent considers possible as being equally likely. It is able to integrate qualitative default reasoning with quantitative probabilistic reasoning by providing a language in which both types of information can be easily expressed. Our results show that a number of desiderata that arise in direct inference (reasoning from statistical information to conclusions about individuals) and default reasoning follow directly {from} the semantics of random worlds. For example, random worlds captures important patterns of reasoning such as specificity, inheritance, indifference to irrelevant information, and default assumptions of independence. Furthermore, the expressive power of the language used and the intuitive semantics of random worlds allow the method to deal with problems that are beyond the scope of many other non-deductive reasoning systems.<|reference_end|> | arxiv | @article{bacchus2003from,
title={From Statistical Knowledge Bases to Degrees of Belief},
author={Fahiem Bacchus, Adam Grove, Joseph Y. Halpern, and Daphne Koller},
journal={Artificial Intelligence 87:1-2, 1996, pp. 75-143},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307056},
primaryClass={cs.AI}
} | bacchus2003from |
arxiv-671329 | cs/0307057 | Secrecy in Multiagent Systems | <|reference_start|>Secrecy in Multiagent Systems: We introduce a general framework for reasoning about secrecy and privacy requirements in multiagent systems. Our definitions extend earlier definitions of secrecy and nondeducibility given by Shannon and Sutherland. Roughly speaking, one agent maintains secrecy with respect to another if the second agent cannot rule out any possibilities for the behavior or state of the first agent. We show that the framework can handle probability and nondeterminism in a clean way, is useful for reasoning about asynchronous systems as well as synchronous systems, and suggests generalizations of secrecy that may be useful for dealing with issues such as resource-bounded reasoning. We also show that a number of well-known attempts to characterize the absence of information flow are special cases of our definitions of secrecy.<|reference_end|> | arxiv | @article{o'neill2003secrecy,
title={Secrecy in Multiagent Systems},
author={Kevin R. O'Neill and Joseph Y. Halpern},
journal={arXiv preprint arXiv:cs/0307057},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307057},
primaryClass={cs.CR cs.LO}
} | o'neill2003secrecy |
arxiv-671330 | cs/0307058 | Efficient Instrumentation for Performance Profiling | <|reference_start|>Efficient Instrumentation for Performance Profiling: Performance profiling consists of tracing a software system during execution and then analyzing the obtained traces. However, traces themselves affect the performance of the system distorting its execution. Therefore, there is a need to minimize the effect of the tracing on the underlying system's performance. To achieve this, the trace set needs to be optimized according to the performance profiling problem being solved. Our position is that such minimization can be achieved only by adding the software trace design and implementation to the overall software development process. In such a process, the performance analyst supplies the knowledge of performance measurement requirements, while the software developer supplies the knowledge of the software. Both of these are needed for an optimal trace placement.<|reference_end|> | arxiv | @article{metz2003efficient,
title={Efficient Instrumentation for Performance Profiling},
author={Edu Metz, Raimondas Lencevicius},
journal={arXiv preprint arXiv:cs/0307058},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307058},
primaryClass={cs.PF cs.SE}
} | metz2003efficient |
arxiv-671331 | cs/0307059 | Group Authentication Using The Naccache-Stern Public-Key Cryptosystem | <|reference_start|>Group Authentication Using The Naccache-Stern Public-Key Cryptosystem: A group authentication protocol authenticates pre-defined groups of individuals such that: - No individual is identified - No knowledge of which groups can be successfully authenticated is known to the verifier - No sensitive data is exposed The paper presents a group authentication protocol based on splitting the private keys of the Naccache-Stern public-key cryptosystem in such a way that the Boolean expression defining the authenticable groups is implicit in the split.<|reference_end|> | arxiv | @article{guthery2003group,
title={Group Authentication Using The Naccache-Stern Public-Key Cryptosystem},
author={Scott B. Guthery},
journal={arXiv preprint arXiv:cs/0307059},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307059},
primaryClass={cs.CR}
} | guthery2003group |
arxiv-671332 | cs/0307060 | Neural realisation of the SP theory: cell assemblies revisited | <|reference_start|>Neural realisation of the SP theory: cell assemblies revisited: This paper describes how the elements of the SP theory (Wolff, 2003a) may be realised with neural structures and processes. To the extent that this is successful, the insights that have been achieved in the SP theory - the integration and simplification of a range of phenomena in perception and cognition - may be incorporated in a neural view of brain function. These proposals may be seen as a development of Hebb's (1949) concept of a 'cell assembly'. By contrast with that concept and variants of it, the version described in this paper proposes that any one neuron can belong in one assembly and only one assembly. A distinctive feature of the present proposals is that any neuron or cluster of neurons within a cell assembly may serve as a proxy or reference for another cell assembly or class of cell assemblies. This device provides solutions to many of the problems associated with cell assemblies, it allows information to be stored in a compressed form, and it provides a robust mechanism by which assemblies may be connected to form hierarchies, grammars and other kinds of knowledge structure. Drawing on insights derived from the SP theory, the paper also describes how unsupervised learning may be achieved with neural structures and processes. This theory of learning overcomes weaknesses in the Hebbian concept of learning and it is, at the same time, compatible with the observations that Hebb's theory was designed to explain.<|reference_end|> | arxiv | @article{wolff2003neural,
title={Neural realisation of the SP theory: cell assemblies revisited},
author={J. Gerard Wolff},
journal={arXiv preprint arXiv:cs/0307060},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307060},
primaryClass={cs.AI cs.NE}
} | wolff2003neural |
arxiv-671333 | cs/0307061 | Boundary knot method for Laplace and biharmonic problems | <|reference_start|>Boundary knot method for Laplace and biharmonic problems: The boundary knot method (BKM) [1] is a meshless boundary-type radial basis function (RBF) collocation scheme, where the nonsingular general solution is used instead of fundamental solution to evaluate the homogeneous solution, while the dual reciprocity method (DRM) is employed to approximation of particular solution. Despite the fact that there are not nonsingular RBF general solutions available for Laplace and biharmonic problems, this study shows that the method can be successfully applied to these problems. The high-order general and fundamental solutions of Burger and Winkler equations are also first presented here.<|reference_end|> | arxiv | @article{chen2003boundary,
title={Boundary knot method for Laplace and biharmonic problems},
author={W. Chen},
journal={arXiv preprint arXiv:cs/0307061},
year={2003},
number={Proc. of the 14th Nordic Seminar on Computational Mechanics, pp.
117-120, Lund, Sweden, Oct. 2001},
archivePrefix={arXiv},
eprint={cs/0307061},
primaryClass={cs.CE cs.MS}
} | chen2003boundary |
arxiv-671334 | cs/0307062 | Euclidean algorithms are Gaussian | <|reference_start|>Euclidean algorithms are Gaussian: This study provides new results about the probabilistic behaviour of a class of Euclidean algorithms: the asymptotic distribution of a whole class of cost-parameters associated to these algorithms is normal. For the cost corresponding to the number of steps Hensley already has proved a Local Limit Theorem; we give a new proof, and extend his result to other euclidean algorithms and to a large class of digit costs, obtaining a faster, optimal, rate of convergence. The paper is based on the dynamical systems methodology, and the main tool is the transfer operator. In particular, we use recent results of Dolgopyat.<|reference_end|> | arxiv | @article{baladi2003euclidean,
title={Euclidean algorithms are Gaussian},
author={Viviane Baladi and Brigitte Vallee},
journal={arXiv preprint arXiv:cs/0307062},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307062},
primaryClass={cs.DS cs.CC}
} | baladi2003euclidean |
arxiv-671335 | cs/0307063 | An Alternative to RDF-Based Languages for the Representation and Processing of Ontologies in the Semantic Web | <|reference_start|>An Alternative to RDF-Based Languages for the Representation and Processing of Ontologies in the Semantic Web: This paper describes an approach to the representation and processing of ontologies in the Semantic Web, based on the ICMAUS theory of computation and AI. This approach has strengths that complement those of languages based on the Resource Description Framework (RDF) such as RDF Schema and DAML+OIL. The main benefits of the ICMAUS approach are simplicity and comprehensibility in the representation of ontologies, an ability to cope with errors and uncertainties in knowledge, and a versatile reasoning system with capabilities in the kinds of probabilistic reasoning that seem to be required in the Semantic Web.<|reference_end|> | arxiv | @article{wolff2003an,
title={An Alternative to RDF-Based Languages for the Representation and
Processing of Ontologies in the Semantic Web},
author={J Gerard Wolff},
journal={arXiv preprint arXiv:cs/0307063},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307063},
primaryClass={cs.AI}
} | wolff2003an |
arxiv-671336 | cs/0307064 | Implementing an Agent Trade Server | <|reference_start|>Implementing an Agent Trade Server: An experimental server for stock trading autonomous agents is presented and made available, together with an agent shell for swift development. The server, written in Java, was implemented as proof-of-concept for an agent trade server for a real financial exchange.<|reference_end|> | arxiv | @article{boman2003implementing,
title={Implementing an Agent Trade Server},
author={Magnus Boman and Anna Sandin},
journal={arXiv preprint arXiv:cs/0307064},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307064},
primaryClass={cs.CE}
} | boman2003implementing |
arxiv-671337 | cs/0307065 | Application of interactive parallel visualization for commodity-based clusters using visualization APIs | <|reference_start|>Application of interactive parallel visualization for commodity-based clusters using visualization APIs: We present an efficient and inexpensive to develop application for interactive high-performance parallel visualization. We extend popular APIs such as Open Inventor and VTK to support commodity-based cluster visualization. Our implementation follows a standard master/slave concept: the general idea is to have a ``Master'' node, which will intercept a sequential graphical user interface (GUI) and broadcast it to the ``Slave'' nodes. The interactions between the nodes are implemented using MPI. The parallel remote rendering uses Chromium. This paper is mainly the report of our implementation experiences. We present in detail the proposed model and key aspects of its implementation. Also, we present performance measurements, we benchmark and quantitatively demonstrate the dependence of the visualization speed on the data size and the network bandwidth, and we identify the singularities and draw conclusions on Chromium's sort-first rendering architecture. The most original part of this work is the combined use of Open Inventor and Chromium.<|reference_end|> | arxiv | @article{tomov2003application,
title={Application of interactive parallel visualization for commodity-based
clusters using visualization APIs},
author={Stanimire Tomov (1), Robert Bennett (1), Michael McGuigan (1), Arnold
Peskin (1), Gordon Smith (1), John Spiletic (1) ((1) Brookhaven National
Laboratory)},
journal={arXiv preprint arXiv:cs/0307065},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307065},
primaryClass={cs.GR}
} | tomov2003application |
arxiv-671338 | cs/0307066 | Augernome & XtremWeb: Monte Carlos computation on a global computing platform | <|reference_start|>Augernome & XtremWeb: Monte Carlos computation on a global computing platform: In this paper, we present XtremWeb, a Global Computing platform used to generate monte carlos showers in Auger, an HEP experiment to study the highest energy cosmic rays at Mallargue-Mendoza, Argentina. XtremWeb main goal, as a Global Computing platform, is to compute distributed applications using idle time of widely interconnected machines. It is especially dedicated to -but not limited to- multi-parameters applications such as monte carlos computations; its security mechanisms ensuring not only hosts integrity but also results certification and its fault tolerant features, encouraged us to test it and, finally, to deploy it as to support our CPU needs to simulate showers. We first introduce Auger computing needs and how Global Computing could help. We then detail XtremWeb architecture and goals. The fourth and last part presents the profits we have gained to choose this platform. We conclude on what could be done next.<|reference_end|> | arxiv | @article{lodygensky2003augernome,
title={Augernome & XtremWeb: Monte Carlos computation on a global computing
platform},
author={Oleg Lodygensky, Gilles Fedak, Vincent Neri, Alain Cordier, Franck
Cappello},
journal={ECONF C0303241:THAT001,2003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307066},
primaryClass={cs.DC}
} | lodygensky2003augernome |
arxiv-671339 | cs/0307067 | Sound search in a denotational semantics for first order logic | <|reference_start|>Sound search in a denotational semantics for first order logic: In this paper we adapt the definitions and results from Apt and Vermeulen on `First order logic as a constraint programming language' (in: Proceedings of LPAR2001, Baaz and Voronkov (eds.), Springer LNAI 2514) to include important ideas about search and choice into the system. We give motivating examples. Then we set up denotational semantics for first order logic as follows: the semantic universe includes states that consist of two components: a substitution, which can be seen as the computed answer; and a constraint satisfaction problem, which can be seen as the residue of the original problem, yet to be handled by constraint programming. The interaction between these components is regulated by an operator called: infer. In this paper we regard infer as an operator on sets of states to enable us to analyze ideas about search among states and choice between states. The precise adaptations of definitions and results are able to deal with the examples and we show that, given several reasonable conditions, the new definitions ensure soundness of the system with respect to the standard interpretation of first order logic. In this way the `reasonable conditions' can be read as conditions for sound search. We indicate briefly how to investigate efficiency of search in future research.<|reference_end|> | arxiv | @article{vermeulen2003sound,
title={Sound search in a denotational semantics for first order logic},
author={C.F.M. Vermeulen},
journal={arXiv preprint arXiv:cs/0307067},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307067},
primaryClass={cs.LO}
} | vermeulen2003sound |
arxiv-671340 | cs/0307068 | Web Access to Cultural Heritage for the Disabled | <|reference_start|>Web Access to Cultural Heritage for the Disabled: Physical disabled access is something that most cultural institutions such as museums consider very seriously. Indeed, there are normally legal requirements to do so. However, online disabled access is still a relatively novel and developing field. Many cultural organizations have not yet considered the issues in depth and web developers are not necessarily experts either. The interface for websites is normally tested with major browsers, but not with specialist software like text to audio converters for the blind or against the relevant accessibility and validation standards. We consider the current state of the art in this area, especially with respect to aspects of particular importance to the access of cultural heritage.<|reference_end|> | arxiv | @article{bowen2003web,
title={Web Access to Cultural Heritage for the Disabled},
author={Jonathan P. Bowen},
journal={Jonathan P. Bowen, Web Access to Cultural Heritage for the
Disabled. In James Hemsley, Vito Cappellini and Gerd Stanke (eds.), EVA 2003
London Conference Proceedings, University College London, UK, 22-26 July
2003, pages s1:1-11. Keynote address. ISBN 0-9543146-3-8},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307068},
primaryClass={cs.CY cs.HC cs.IR}
} | bowen2003web |
arxiv-671341 | cs/0307069 | A logic for reasoning about upper probabilities | <|reference_start|>A logic for reasoning about upper probabilities: We present a propositional logic %which can be used to reason about the uncertainty of events, where the uncertainty is modeled by a set of probability measures assigning an interval of probability to each event. We give a sound and complete axiomatization for the logic, and show that the satisfiability problem is NP-complete, no harder than satisfiability for propositional logic.<|reference_end|> | arxiv | @article{halpern2003a,
title={A logic for reasoning about upper probabilities},
author={Joseph Y. Halpern and Riccardo Pucella},
journal={Journal of AI Research 17, 2001, pp. 57-81},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307069},
primaryClass={cs.AI cs.LO}
} | halpern2003a |
arxiv-671342 | cs/0307070 | Modeling Belief in Dynamic Systems, Part I: Foundations | <|reference_start|>Modeling Belief in Dynamic Systems, Part I: Foundations: Belief change is a fundamental problem in AI: Agents constantly have to update their beliefs to accommodate new observations. In recent years, there has been much work on axiomatic characterizations of belief change. We claim that a better understanding of belief change can be gained from examining appropriate semantic models. In this paper we propose a general framework in which to model belief change. We begin by defining belief in terms of knowledge and plausibility: an agent believes p if he knows that p is more plausible than its negation. We then consider some properties defining the interaction between knowledge and plausibility, and show how these properties affect the properties of belief. In particular, we show that by assuming two of the most natural properties, belief becomes a KD45 operator. Finally, we add time to the picture. This gives us a framework in which we can talk about knowledge, plausibility (and hence belief), and time, which extends the framework of Halpern and Fagin for modeling knowledge in multi-agent systems. We then examine the problem of ``minimal change''. This notion can be captured by using prior plausibilities, an analogue to prior probabilities, which can be updated by ``conditioning''. We show by example that conditioning on a plausibility measure can capture many scenarios of interest. In a companion paper, we show how the two best-studied scenarios of belief change, belief revisionand belief update, fit into our framework.<|reference_end|> | arxiv | @article{friedman2003modeling,
title={Modeling Belief in Dynamic Systems, Part I: Foundations},
author={Nir Friedman and Joseph Y. Halpern},
journal={Aritificial Intelligence 95:2, 1997, pp. 257-316},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307070},
primaryClass={cs.AI cs.LO}
} | friedman2003modeling |
arxiv-671343 | cs/0307071 | Modeling Belief in Dynamic Systems, Part II: Revisions and Update | <|reference_start|>Modeling Belief in Dynamic Systems, Part II: Revisions and Update: The study of belief change has been an active area in philosophy and AI. In recent years two special cases of belief change, belief revision and belief update, have been studied in detail. In a companion paper, we introduce a new framework to model belief change. This framework combines temporal and epistemic modalities with a notion of plausibility, allowing us to examine the change of beliefs over time. In this paper, we show how belief revision and belief update can be captured in our framework. This allows us to compare the assumptions made by each method, and to better understand the principles underlying them. In particular, it shows that Katsuno and Mendelzon's notion of belief update depends on several strong assumptions that may limit its applicability in artificial intelligence. Finally, our analysis allow us to identify a notion of minimal change that underlies a broad range of belief change operations including revision and update.<|reference_end|> | arxiv | @article{friedman2003modeling,
title={Modeling Belief in Dynamic Systems, Part II: Revisions and Update},
author={Nir Friedman and Joseph Y. Halpern},
journal={Aritificial Intelligence 95:2, 1997, pp. 257-316},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307071},
primaryClass={cs.AI cs.LO}
} | friedman2003modeling |
arxiv-671344 | cs/0307072 | Camera Calibration: a USU Implementation | <|reference_start|>Camera Calibration: a USU Implementation: The task of camera calibration is to estimate the intrinsic and extrinsic parameters of a camera model. Though there are some restricted techniques to infer the 3-D information about the scene from uncalibrated cameras, effective camera calibration procedures will open up the possibility of using a wide range of existing algorithms for 3-D reconstruction and recognition. The applications of camera calibration include vision-based metrology, robust visual platooning and visual docking of mobile robots where the depth information is important.<|reference_end|> | arxiv | @article{ma2003camera,
title={Camera Calibration: a USU Implementation},
author={Lili Ma, YangQuan Chen, and Kevin L. Moore},
journal={arXiv preprint arXiv:cs/0307072},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307072},
primaryClass={cs.CV}
} | ma2003camera |
arxiv-671345 | cs/0307073 | Search and Navigation in Relational Databases | <|reference_start|>Search and Navigation in Relational Databases: We present a new application for keyword search within relational databases, which uses a novel algorithm to solve the join discovery problem by finding Memex-like trails through the graph of foreign key dependencies. It differs from previous efforts in the algorithms used, in the presentation mechanism and in the use of primary-key only database queries at query-time to maintain a fast response for users. We present examples using the DBLP data set.<|reference_end|> | arxiv | @article{wheeldon2003search,
title={Search and Navigation in Relational Databases},
author={Richard Wheeldon, Mark Levene and Kevin Keenoy},
journal={arXiv preprint arXiv:cs/0307073},
year={2003},
archivePrefix={arXiv},
eprint={cs/0307073},
primaryClass={cs.DB}
} | wheeldon2003search |
arxiv-671346 | cs/0308001 | Two- versus three-dimensional connectivity testing of first-order queries to semi-algebraic sets | <|reference_start|>Two- versus three-dimensional connectivity testing of first-order queries to semi-algebraic sets: This paper addresses the question whether one can determine the connectivity of a semi-algebraic set in three dimensions by testing the connectivity of a finite number of two-dimensional ``samples'' of the set, where these samples are defined by first-order queries. The question is answered negatively for two classes of first-order queries: cartesian-product-free, and positive one-pass.<|reference_end|> | arxiv | @article{geerts2003two-,
title={Two- versus three-dimensional connectivity testing of first-order
queries to semi-algebraic sets},
author={Floris Geerts, Lieven Smits, Jan Van den Bussche},
journal={A revised version has been published online (21 July 2005) in Acta
Informatica under the title "N- versus (N-1)-dimensional connectivity testing
of first-order queries to semi-algebraic sets"},
year={2003},
doi={10.1007/s00236-005-0171-5},
archivePrefix={arXiv},
eprint={cs/0308001},
primaryClass={cs.LO cs.CG cs.DB}
} | geerts2003two- |
arxiv-671347 | cs/0308002 | Quantifying and Visualizing Attribute Interactions | <|reference_start|>Quantifying and Visualizing Attribute Interactions: Interactions are patterns between several attributes in data that cannot be inferred from any subset of these attributes. While mutual information is a well-established approach to evaluating the interactions between two attributes, we surveyed its generalizations as to quantify interactions between several attributes. We have chosen McGill's interaction information, which has been independently rediscovered a number of times under various names in various disciplines, because of its many intuitively appealing properties. We apply interaction information to visually present the most important interactions of the data. Visualization of interactions has provided insight into the structure of data on a number of domains, identifying redundant attributes and opportunities for constructing new features, discovering unexpected regularities in data, and have helped during construction of predictive models; we illustrate the methods on numerous examples. A machine learning method that disregards interactions may get caught in two traps: myopia is caused by learning algorithms assuming independence in spite of interactions, whereas fragmentation arises from assuming an interaction in spite of independence.<|reference_end|> | arxiv | @article{jakulin2003quantifying,
title={Quantifying and Visualizing Attribute Interactions},
author={Aleks Jakulin and Ivan Bratko},
journal={arXiv preprint arXiv:cs/0308002},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308002},
primaryClass={cs.AI}
} | jakulin2003quantifying |
arxiv-671348 | cs/0308003 | A Family of Simplified Geometric Distortion Models for Camera Calibration | <|reference_start|>A Family of Simplified Geometric Distortion Models for Camera Calibration: The commonly used radial distortion model for camera calibration is in fact an assumption or a restriction. In practice, camera distortion could happen in a general geometrical manner that is not limited to the radial sense. This paper proposes a simplified geometrical distortion modeling method by using two different radial distortion functions in the two image axes. A family of simplified geometric distortion models is proposed, which are either simple polynomials or the rational functions of polynomials. Analytical geometric undistortion is possible using two of the distortion functions discussed in this paper and their performance can be improved by applying a piecewise fitting idea. Our experimental results show that the geometrical distortion models always perform better than their radial distortion counterparts. Furthermore, the proposed geometric modeling method is more appropriate for cameras whose distortion is not perfectly radially symmetric around the center of distortion.<|reference_end|> | arxiv | @article{ma2003a,
title={A Family of Simplified Geometric Distortion Models for Camera
Calibration},
author={Lili Ma, YangQuan Chen, and Kevin L. Moore},
journal={arXiv preprint arXiv:cs/0308003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308003},
primaryClass={cs.CV}
} | ma2003a |
arxiv-671349 | cs/0308004 | DPG: A Cache-Efficient Accelerator for Sorting and for Join Operators | <|reference_start|>DPG: A Cache-Efficient Accelerator for Sorting and for Join Operators: We present a new algorithm for fast record retrieval, distribute-probe-gather, or DPG. DPG has important applications both in sorting and in joins. Current main memory sorting algorithms split their work into three phases: extraction of key-pointer pairs; sorting of the key-pointer pairs; and copying of the original records into the destination array according the sorted key-pointer pairs. The copying in the last phase dominates today's sorting time. Hence, the use of DPG in the third phase provides an accelerator for existing sorting algorithms. DPG also provides two new join methods for foreign key joins: DPG-move join and DPG-sort join. The resulting join methods with DPG are faster because DPG join is cache-efficient and at the same time DPG join avoids the need for sorting or for hashing. The ideas presented for foreign key join can also be extended to faster record pair retrieval for spatial and temporal databases.<|reference_end|> | arxiv | @article{cooperman2003dpg:,
title={DPG: A Cache-Efficient Accelerator for Sorting and for Join Operators},
author={Gene Cooperman, Xiaoqin Ma and Viet Ha Nguyen},
journal={arXiv preprint arXiv:cs/0308004},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308004},
primaryClass={cs.DB cs.DS}
} | cooperman2003dpg: |
arxiv-671350 | cs/0308005 | Disabled Access for Museum Websites | <|reference_start|>Disabled Access for Museum Websites: Physical disabled access is something that most museums consider very seriously. Indeed, there are normally legal requirements to do so. However, online disabled access is still a relatively novel field. Most museums have not yet considered the issues in depth. The Human-Computer Interface for their websites is normally tested with major browsers, but not with specialist browsers or against the relevant accessibility and validation standards. We consider the current state of the art in this area and mention an accessibility survey of some museum websites.<|reference_end|> | arxiv | @article{bowen2003disabled,
title={Disabled Access for Museum Websites},
author={Jonathan P. Bowen},
journal={Jonathan P. Bowen. Disabled Access for Museum Websites. WWW2003:
The Twelfth International World Wide Web Conference, Budapest, Hungary, 20-24
May 2003. Conference poster},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308005},
primaryClass={cs.CY cs.HC cs.IR}
} | bowen2003disabled |
arxiv-671351 | cs/0308006 | Higher-Dimensional Packing with Order Constraints | <|reference_start|>Higher-Dimensional Packing with Order Constraints: We present a first exact study on higher-dimensional packing problems with order constraints. Problems of this type occur naturally in applications such as logistics or computer architecture and can be interpreted as higher-dimensional generalizations of scheduling problems. Using graph-theoretic structures to describe feasible solutions, we develop a novel exact branch-and-bound algorithm. This extends previous work by Fekete and Schepers; a key tool is a new order-theoretic characterization of feasible extensions of a partial order to a given complementarity graph that is tailor-made for use in a branch-and-bound environment. The usefulness of our approach is validated by computational results.<|reference_end|> | arxiv | @article{fekete2003higher-dimensional,
title={Higher-Dimensional Packing with Order Constraints},
author={Sandor P. Fekete and Ekkehard Koehler and Juergen Teich},
journal={arXiv preprint arXiv:cs/0308006},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308006},
primaryClass={cs.DS cs.DM}
} | fekete2003higher-dimensional |
arxiv-671352 | cs/0308007 | On Applying Or-Parallelism and Tabling to Logic Programs | <|reference_start|>On Applying Or-Parallelism and Tabling to Logic Programs: The past years have seen widening efforts at increasing Prolog's declarativeness and expressiveness. Tabling has proved to be a viable technique to efficiently overcome SLD's susceptibility to infinite loops and redundant subcomputations. Our research demonstrates that implicit or-parallelism is a natural fit for logic programs with tabling. To substantiate this belief, we have designed and implemented an or-parallel tabling engine -- OPTYap -- and we used a shared-memory parallel machine to evaluate its performance. To the best of our knowledge, OPTYap is the first implementation of a parallel tabling engine for logic programming systems. OPTYap builds on Yap's efficient sequential Prolog engine. Its execution model is based on the SLG-WAM for tabling, and on the environment copying for or-parallelism. Preliminary results indicate that the mechanisms proposed to parallelize search in the context of SLD resolution can indeed be effectively and naturally generalized to parallelize tabled computations, and that the resulting systems can achieve good performance on shared-memory parallel machines. More importantly, it emphasizes our belief that through applying or-parallelism and tabling to logic programs the range of applications for Logic Programming can be increased.<|reference_end|> | arxiv | @article{rocha2003on,
title={On Applying Or-Parallelism and Tabling to Logic Programs},
author={Ricardo Rocha and Fernando Silva and Vitor Santos Costa},
journal={arXiv preprint arXiv:cs/0308007},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308007},
primaryClass={cs.PL}
} | rocha2003on |
arxiv-671353 | cs/0308008 | A Grid Based Architecture for High-Performance NLP | <|reference_start|>A Grid Based Architecture for High-Performance NLP: We describe the design and early implementation of an extensible, component-based software architecture for natural language engineering applications which interfaces with high performance distributed computing services. The architecture leverages existing linguistic resource description and discovery mechanisms based on metadata descriptions, combining these in a compatible fashion with other software definition abstractions. Within this architecture, application design is highly flexible, allowing disparate components to be combined to suit the overall application functionality, and formally described independently of processing concerns. An application specification language provides abstraction from the programming environment and allows ease of interface with high performance computational grids via a broker.<|reference_end|> | arxiv | @article{hughes2003a,
title={A Grid Based Architecture for High-Performance NLP},
author={Baden Hughes and Steven Bird},
journal={arXiv preprint arXiv:cs/0308008},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308008},
primaryClass={cs.DC cs.CL}
} | hughes2003a |
arxiv-671354 | cs/0308009 | The Generalized Riemann or Henstock Integral Underpinning Multivariate Data Analysis: Application to Faint Structure Finding in Price Processes | <|reference_start|>The Generalized Riemann or Henstock Integral Underpinning Multivariate Data Analysis: Application to Faint Structure Finding in Price Processes: Practical data analysis involves many implicit or explicit assumptions about the good behavior of the data, and excludes consideration of various potentially pathological or limit cases. In this work, we present a new general theory of data, and of data processing, to bypass some of these assumptions. The new framework presented is focused on integration, and has direct applicability to expectation, distance, correlation, and aggregation. In a case study, we seek to reveal faint structure in financial data. Our new foundation for data encoding and handling offers increased justification for our conclusions.<|reference_end|> | arxiv | @article{muldowney2003the,
title={The Generalized Riemann or Henstock Integral Underpinning Multivariate
Data Analysis: Application to Faint Structure Finding in Price Processes},
author={Pat Muldowney and Fionn Murtagh},
journal={arXiv preprint arXiv:cs/0308009},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308009},
primaryClass={cs.CE cs.CV}
} | muldowney2003the |
arxiv-671355 | cs/0308010 | On the probabilistic approach to the random satisfiability problem | <|reference_start|>On the probabilistic approach to the random satisfiability problem: In this note I will review some of the recent results that have been obtained in the probabilistic approach to the random satisfiability problem. At the present moment the results are only heuristic. In the case of the random 3-satisfiability problem a phase transition from the satisfiable to the unsatisfiable phase is found at $\alpha=4.267$. There are other values of $\alpha$ that separates different regimes and they will be described in details. In this context the properties of the survey decimation algorithm will also be discussed.<|reference_end|> | arxiv | @article{parisi2003on,
title={On the probabilistic approach to the random satisfiability problem},
author={Giorgio Parisi},
journal={arXiv preprint arXiv:cs/0308010},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308010},
primaryClass={cs.CC cond-mat.dis-nn}
} | parisi2003on |
arxiv-671356 | cs/0308011 | Short Cycles Connectivity | <|reference_start|>Short Cycles Connectivity: Short cycles connectivity is a generalization of ordinary connectivity. Instead by a path (sequence of edges), two vertices have to be connected by a sequence of short cycles, in which two adjacent cycles have at least one common vertex. If all adjacent cycles in the sequence share at least one edge, we talk about edge short cycles connectivity. It is shown that the short cycles connectivity is an equivalence relation on the set of vertices, while the edge short cycles connectivity components determine an equivalence relation on the set of edges. Efficient algorithms for determining equivalence classes are presented. Short cycles connectivity can be extended to directed graphs (cyclic and transitive connectivity). For further generalization we can also consider connectivity by small cliques or other families of graphs.<|reference_end|> | arxiv | @article{batagelj2003short,
title={Short Cycles Connectivity},
author={V. Batagelj, M. Zaversnik},
journal={DISCRETE MATH 307 (3-5): 310-318 FEB 6 2007},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308011},
primaryClass={cs.DS cs.DM}
} | batagelj2003short |
arxiv-671357 | cs/0308012 | Constant-Depth Frege Systems with Counting Axioms Polynomially Simulate Nullstellensatz Refutations | <|reference_start|>Constant-Depth Frege Systems with Counting Axioms Polynomially Simulate Nullstellensatz Refutations: We show that constant-depth Frege systems with counting axioms modulo $m$ polynomially simulate Nullstellensatz refutations modulo $m$. Central to this is a new definition of reducibility from formulas to systems of polynomials with the property that, for most previously studied translations of formulas to systems of polynomials, a formula reduces to its translation. When combined with a previous result of the authors, this establishes the first size separation between Nullstellensatz and polynomial calculus refutations. We also obtain new, small refutations for certain CNFs by constant-depth Frege systems with counting axioms.<|reference_end|> | arxiv | @article{impagliazzo2003constant-depth,
title={Constant-Depth Frege Systems with Counting Axioms Polynomially Simulate
Nullstellensatz Refutations},
author={Russell Impagliazzo and Nathan Segerlind},
journal={arXiv preprint arXiv:cs/0308012},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308012},
primaryClass={cs.CC cs.LO}
} | impagliazzo2003constant-depth |
arxiv-671358 | cs/0308013 | A Robust and Computational Characterisation of Peer-to-Peer Database Systems | <|reference_start|>A Robust and Computational Characterisation of Peer-to-Peer Database Systems: In this paper we give a robust logical and computational characterisation of peer-to-peer database systems. We first define a pre- cise model-theoretic semantics of a peer-to-peer system, which allows for local inconsistency handling. We then characterise the general computa- tional properties for the problem of answering queries to such a peer-to- peer system. Finally, we devise tight complexity bounds and distributed procedures for the problem of answering queries in few relevant special cases.<|reference_end|> | arxiv | @article{franconi2003a,
title={A Robust and Computational Characterisation of Peer-to-Peer Database
Systems},
author={Enrico Franconi, Gabriel Kuper, Andrei Lopatenko, Luciano Serafini},
journal={"International Workshop On Databases, Information Systems and
Peer-to-Peer Computing", 2003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308013},
primaryClass={cs.DC cs.DB}
} | franconi2003a |
arxiv-671359 | cs/0308014 | On the expressive power of semijoin queries | <|reference_start|>On the expressive power of semijoin queries: The semijoin algebra is the variant of the relational algebra obtained by replacing the join operator by the semijoin operator. We provide an Ehrenfeucht-Fraiss\'{e} game, characterizing the discerning power of the semijoin algebra. This game gives a method for showing that queries are not expressible in the semijoin algebra.<|reference_end|> | arxiv | @article{leinders2003on,
title={On the expressive power of semijoin queries},
author={Dirk Leinders (Limburgs Universitair Centrum, Belgium), Jerzy
Tyszkiewicz (Warsaw University), Jan Van den Bussche (Limburgs Universitair
Centrum, Belgium)},
journal={arXiv preprint arXiv:cs/0308014},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308014},
primaryClass={cs.DB cs.LO}
} | leinders2003on |
arxiv-671360 | cs/0308015 | Rethinking OpenPGP PKI and OpenPGP Public Keyserver | <|reference_start|>Rethinking OpenPGP PKI and OpenPGP Public Keyserver: OpenPGP, an IETF Proposed Standard based on PGP application, has its own Public Key Infrastructure (PKI) architecture which is different from the one based on X.509, another standard from ITU. This paper describes the OpenPGP PKI; the historical perspective as well as its current use. The current OpenPGP PKI issues include the capability of a PGP keyserver and its performance. PGP keyservers have been developed and operated by volunteers since the 1990s. The keyservers distribute, merge, and expire the OpenPGP public keys. Major keyserver managers from several countries have built the globally distributed network of PGP keyservers. However, the current PGP Public Keyserver (pksd) has some limitations. It does not support fully the OpenPGP format so that it is neither expandable nor flexible, without any cluster technology. Finally we introduce the project on the next generation OpenPGP public keyserver called the OpenPKSD, lead by Hironobu Suzuki, one of the authors, and funded by Japanese Information-technology Promotion Agency(IPA).<|reference_end|> | arxiv | @article{yamane2003rethinking,
title={Rethinking OpenPGP PKI and OpenPGP Public Keyserver},
author={Shinji Yamane, Jiahong Wang, Hironobu Suzuki, Norihisa Segawa and Yuko
Murayama},
journal={arXiv preprint arXiv:cs/0308015},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308015},
primaryClass={cs.CY cs.CR}
} | yamane2003rethinking |
arxiv-671361 | cs/0308016 | Collaborative Creation of Digital Content in Indian Languages | <|reference_start|>Collaborative Creation of Digital Content in Indian Languages: The world is passing through a major revolution called the information revolution, in which information and knowledge is becoming available to people in unprecedented amounts wherever and whenever they need it. Those societies which fail to take advantage of the new technology will be left behind, just like in the industrial revolution. The information revolution is based on two major technologies: computers and communication. These technologies have to be delivered in a COST EFFECTIVE manner, and in LANGUAGES accessible to people. One way to deliver them in cost effective manner is to make suitable technology choices, and to allow people to access through shared resources. This could be done throuch street corner shops (for computer usage, e-mail etc.), schools, community centres and local library centres.<|reference_end|> | arxiv | @article{bharati2003collaborative,
title={Collaborative Creation of Digital Content in Indian Languages},
author={Akshar Bharati, Rajeev Sangal},
journal={arXiv preprint arXiv:cs/0308016},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308016},
primaryClass={cs.CL}
} | bharati2003collaborative |
arxiv-671362 | cs/0308017 | Information Revolution | <|reference_start|>Information Revolution: The world is passing through a major revolution called the information revolution, in which information and knowledge is becoming available to people in unprecedented amounts wherever and whenever they need it. Those societies which fail to take advantage of the new technology will be left behind, just like in the industrial revolution. The information revolution is based on two major technologies: computers and communication. These technologies have to be delivered in a COST EFFECTIVE manner, and in LANGUAGES accessible to people. One way to deliver them in cost effective manner is to make suitable technology choices (discussed later), and to allow people to access through shared resources. This could be done throuch street corner shops (for computer usage, e-mail etc.), schools, community centers and local library centres.<|reference_end|> | arxiv | @article{bharati2003information,
title={Information Revolution},
author={Akshar Bharati, Vineet Chaitanya, Rajeev Sangal},
journal={Published as a keynote lecture in IRIL-99: Information Revolution
and Indian Languages, 12-14 Nov 1999},
year={2003},
number={LTRC-TR007},
archivePrefix={arXiv},
eprint={cs/0308017},
primaryClass={cs.CL}
} | bharati2003information |
arxiv-671363 | cs/0308018 | Anusaaraka: Overcoming the Language Barrier in India | <|reference_start|>Anusaaraka: Overcoming the Language Barrier in India: The anusaaraka system makes text in one Indian language accessible in another Indian language. In the anusaaraka approach, the load is so divided between man and computer that the language load is taken by the machine, and the interpretation of the text is left to the man. The machine presents an image of the source text in a language close to the target language.In the image, some constructions of the source language (which do not have equivalents) spill over to the output. Some special notation is also devised. The user after some training learns to read and understand the output. Because the Indian languages are close, the learning time of the output language is short, and is expected to be around 2 weeks. The output can also be post-edited by a trained user to make it grammatically correct in the target language. Style can also be changed, if necessary. Thus, in this scenario, it can function as a human assisted translation system. Currently, anusaarakas are being built from Telugu, Kannada, Marathi, Bengali and Punjabi to Hindi. They can be built for all Indian languages in the near future. Everybody must pitch in to build such systems connecting all Indian languages, using the free software model.<|reference_end|> | arxiv | @article{bharati2003anusaaraka:,
title={Anusaaraka: Overcoming the Language Barrier in India},
author={Akshar Bharati, Vineet Chaitanya, Amba P. Kulkarni, Rajeev Sangal, G
Umamaheshwara Rao},
journal={Published in "Anuvad: Approaches to Translation", Rukmini Bhaya
Nair, (editor), Sage, New Delhi, 2001},
year={2003},
number={LTRC-TR009},
archivePrefix={arXiv},
eprint={cs/0308018},
primaryClass={cs.CL}
} | bharati2003anusaaraka: |
arxiv-671364 | cs/0308019 | Language Access: An Information Based Approach | <|reference_start|>Language Access: An Information Based Approach: The anusaaraka system (a kind of machine translation system) makes text in one Indian language accessible through another Indian language. The machine presents an image of the source text in a language close to the target language. In the image, some constructions of the source language (which do not have equivalents in the target language) spill over to the output. Some special notation is also devised. Anusaarakas have been built from five pairs of languages: Telugu,Kannada, Marathi, Bengali and Punjabi to Hindi. They are available for use through Email servers. Anusaarkas follows the principle of substitutibility and reversibility of strings produced. This implies preservation of information while going from a source language to a target language. For narrow subject areas, specialized modules can be built by putting subject domain knowledge into the system, which produce good quality grammatical output. However, it should be remembered, that such modules will work only in narrow areas, and will sometimes go wrong. In such a situation, anusaaraka output will still remain useful.<|reference_end|> | arxiv | @article{bharati2003language,
title={Language Access: An Information Based Approach},
author={Akshar Bharati, Vineet Chaitanya, Amba P. Kulkarni, Rajeev Sangal},
journal={Published in the proceedings of Knowledge Based Computer Systems
Conference, 2000, Tata McGraw Hill, New Delhi 2000},
year={2003},
number={LTRC-TR010},
archivePrefix={arXiv},
eprint={cs/0308019},
primaryClass={cs.CL}
} | bharati2003language |
arxiv-671365 | cs/0308020 | LERIL : Collaborative Effort for Creating Lexical Resources | <|reference_start|>LERIL : Collaborative Effort for Creating Lexical Resources: The paper reports on efforts taken to create lexical resources pertaining to Indian languages, using the collaborative model. The lexical resources being developed are: (1) Transfer lexicon and grammar from English to several Indian languages. (2) Dependencey tree bank of annotated corpora for several Indian languages. The dependency trees are based on the Paninian model. (3) Bilingual dictionary of 'core meanings'.<|reference_end|> | arxiv | @article{bharati2003leril,
title={LERIL : Collaborative Effort for Creating Lexical Resources},
author={Akshar Bharati, Dipti M Sharma, Vineet Chaitanya, Amba P Kulkarni,
Rajeev Sangal, Durgesh D Rao},
journal={arXiv preprint arXiv:cs/0308020},
year={2003},
number={LTRC-TR015},
archivePrefix={arXiv},
eprint={cs/0308020},
primaryClass={cs.CL}
} | bharati2003leril |
arxiv-671366 | cs/0308021 | A Bernstein-Bezier Sufficient Condition for Invertibility of Polynomial Mapping Functions | <|reference_start|>A Bernstein-Bezier Sufficient Condition for Invertibility of Polynomial Mapping Functions: We propose a sufficient condition for invertibility of a polynomial mapping function defined on a cube or simplex. This condition is applicable to finite element analysis using curved meshes. The sufficient condition is based on an analysis of the Bernstein-B\'ezier form of the columns of the derivative.<|reference_end|> | arxiv | @article{vavasis2003a,
title={A Bernstein-Bezier Sufficient Condition for Invertibility of Polynomial
Mapping Functions},
author={Stephen Vavasis},
journal={arXiv preprint arXiv:cs/0308021},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308021},
primaryClass={cs.NA cs.CG}
} | vavasis2003a |
arxiv-671367 | cs/0308022 | Extending Dublin Core Metadata to Support the Description and Discovery of Language Resources | <|reference_start|>Extending Dublin Core Metadata to Support the Description and Discovery of Language Resources: As language data and associated technologies proliferate and as the language resources community expands, it is becoming increasingly difficult to locate and reuse existing resources. Are there any lexical resources for such-and-such a language? What tool works with transcripts in this particular format? What is a good format to use for linguistic data of this type? Questions like these dominate many mailing lists, since web search engines are an unreliable way to find language resources. This paper reports on a new digital infrastructure for discovering language resources being developed by the Open Language Archives Community (OLAC). At the core of OLAC is its metadata format, which is designed to facilitate description and discovery of all kinds of language resources, including data, tools, or advice. The paper describes OLAC metadata, its relationship to Dublin Core metadata, and its dissemination using the metadata harvesting protocol of the Open Archives Initiative.<|reference_end|> | arxiv | @article{bird2003extending,
title={Extending Dublin Core Metadata to Support the Description and Discovery
of Language Resources},
author={Steven Bird and Gary Simons},
journal={Computing and the Humanities, 37 (4), 2003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308022},
primaryClass={cs.CL cs.DL}
} | bird2003extending |
arxiv-671368 | cs/0308023 | On the complexity of curve fitting algorithms | <|reference_start|>On the complexity of curve fitting algorithms: We study a popular algorithm for fitting polynomial curves to scattered data based on the least squares with gradient weights. We show that sometimes this algorithm admits a substantial reduction of complexity, and, furthermore, find precise conditions under which this is possible. It turns out that this is, indeed, possible when one fits circles but not ellipses or hyperbolas.<|reference_end|> | arxiv | @article{chernov2003on,
title={On the complexity of curve fitting algorithms},
author={N. Chernov, C. Lesort, N. Simanyi},
journal={Journal of Complexity, Vol. 20, Issue 4, August 2004, pp. 484-492},
year={2003},
doi={10.1016/j.jco.2004.01.004},
archivePrefix={arXiv},
eprint={cs/0308023},
primaryClass={cs.CC cs.CV}
} | chernov2003on |
arxiv-671369 | cs/0308024 | Relational Grid Monitoring Architecture (R-GMA) | <|reference_start|>Relational Grid Monitoring Architecture (R-GMA): We describe R-GMA (Relational Grid Monitoring Architecture) which has been developed within the European DataGrid Project as a Grid Information and Monitoring System. Is is based on the GMA from GGF, which is a simple Consumer-Producer model. The special strength of this implementation comes from the power of the relational model. We offer a global view of the information as if each Virtual Organisation had one large relational database. We provide a number of different Producer types with different characteristics; for example some support streaming of information. We also provide combined Consumer/Producers, which are able to combine information and republish it. At the heart of the system is the mediator, which for any query is able to find and connect to the best Producers for the job. We have developed components to allow a measure of inter-working between MDS and R-GMA. We have used it both for information about the grid (primarily to find out about what services are available at any one time) and for application monitoring. R-GMA has been deployed in various testbeds; we describe some preliminary results and experiences of this deployment.<|reference_end|> | arxiv | @article{byrom2003relational,
title={Relational Grid Monitoring Architecture (R-GMA)},
author={Rob Byrom, Brian Coghlan, Andrew W Cooke, Roney Cordenonsi, Linda
Cornwall, Abdeslem Djaoui, Laurence Field, Steve Fisher, Steve Hicks, Stuart
Kenny, Jason Leake, James Magowan, Werner Nutt, David O'Callaghan, Norbert
Podhorszki, John Ryan, Manish Soni, Paul Taylor, Antony J Wilson},
journal={arXiv preprint arXiv:cs/0308024},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308024},
primaryClass={cs.DC}
} | byrom2003relational |
arxiv-671370 | cs/0308025 | Controlled hierarchical filtering: Model of neocortical sensory processing | <|reference_start|>Controlled hierarchical filtering: Model of neocortical sensory processing: A model of sensory information processing is presented. The model assumes that learning of internal (hidden) generative models, which can predict the future and evaluate the precision of that prediction, is of central importance for information extraction. Furthermore, the model makes a bridge to goal-oriented systems and builds upon the structural similarity between the architecture of a robust controller and that of the hippocampal entorhinal loop. This generative control architecture is mapped to the neocortex and to the hippocampal entorhinal loop. Implicit memory phenomena; priming and prototype learning are emerging features of the model. Mathematical theorems ensure stability and attractive learning properties of the architecture. Connections to reinforcement learning are also established: both the control network, and the network with a hidden model converge to (near) optimal policy under suitable conditions. Falsifying predictions, including the role of the feedback connections between neocortical areas are made.<|reference_end|> | arxiv | @article{lorincz2003controlled,
title={Controlled hierarchical filtering: Model of neocortical sensory
processing},
author={Andras Lorincz},
journal={arXiv preprint arXiv:cs/0308025},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308025},
primaryClass={cs.NE cs.AI cs.LG q-bio.NC}
} | lorincz2003controlled |
arxiv-671371 | cs/0308026 | Improvements to time bracketed authentication | <|reference_start|>Improvements to time bracketed authentication: We describe a collection of techniques whereby audiovisual or other recordings of significant events can be made in a way that hinders falsification, pre-dating, or post-dating by interested parties, even by the makers and operators of the recording equipment. A central feature of these techniques is the interplay between private information, which by its nature is untrustworthy and susceptible to suppression or manipulation by interested parties, and public information, which is too widely known to be manipulated by anyone. While authenticated recordings may be infeasible to falsify, they can be abused in other ways, such as being used for blackmail or harassment; but susceptibility to these abuses can be reduced by encryption and secret sharing.<|reference_end|> | arxiv | @article{bennett2003improvements,
title={Improvements to time bracketed authentication},
author={Charles H. Bennett},
journal={arXiv preprint arXiv:cs/0308026},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308026},
primaryClass={cs.CR cs.CY}
} | bennett2003improvements |
arxiv-671372 | cs/0308027 | A Comparison of Secret Sharing Schemes Based on Latin Squares and RSA | <|reference_start|>A Comparison of Secret Sharing Schemes Based on Latin Squares and RSA: In recent years there has been a great deal of work done on secret sharing scehemes. Secret Sharing Schemes allow for the division of keys so that an authorised set of users may access information. In this paper we wish to present a critical comparison of two of these schemes based on Latin Squares, [Cooper et., al.] and RSA [Shoup]. These two protocols will be examined in terms of their positive and negative aspects of their secuirty.<|reference_end|> | arxiv | @article{wagner2003a,
title={A Comparison of Secret Sharing Schemes Based on Latin Squares and RSA},
author={Liam Wagner},
journal={arXiv preprint arXiv:cs/0308027},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308027},
primaryClass={cs.CR cs.DM math.CO}
} | wagner2003a |
arxiv-671373 | cs/0308028 | Finding Traitors in Secure Networks Using Byzantine Agreements | <|reference_start|>Finding Traitors in Secure Networks Using Byzantine Agreements: Secure networks rely upon players to maintain security and reliability. However not every player can be assumed to have total loyalty and one must use methods to uncover traitors in such networks. We use the original concept of the Byzantine Generals Problem by Lamport, and the more formal Byzantine Agreement describe by Linial, to nd traitors in secure networks. By applying general fault-tolerance methods to develop a more formal design of secure networks we are able to uncover traitors amongst a group of players. We also propose methods to integrate this system with insecure channels. This new resiliency can be applied to broadcast and peer-to-peer secure communication systems where agents may be traitors or become unreliable due to faults.<|reference_end|> | arxiv | @article{wagner2003finding,
title={Finding Traitors in Secure Networks Using Byzantine Agreements},
author={Liam Wagner and Stuart McDonald},
journal={arXiv preprint arXiv:cs/0308028},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308028},
primaryClass={cs.CR cs.DC cs.GT cs.MA}
} | wagner2003finding |
arxiv-671374 | cs/0308029 | On Decidability of Expressive Description Logics with Composition of Roles in Number Restrictions | <|reference_start|>On Decidability of Expressive Description Logics with Composition of Roles in Number Restrictions: Description Logics are knowledge representation formalisms which have been used in a wide range of application domains. Owing to their appealing expressiveness, we consider in this paper extensions of the well-known concept language ALC allowing for number restrictions on complex role expressions. These have been first introduced by Baader and Sattler as ALCN(M) languages, with the adoption of role constructors M subset-of {o,-,And,Or}. In particular, they showed in 1999 that, although ALCN(o) is decidable, the addition of other operators may easily lead to undecidability: in fact, ALCN(o,And) and ALCN(o,-,Or) were proved undecidable. In this work, we further investigate the computational properties of the ALCN family, aiming at narrowing the decidability gap left open by Baader and Sattler's results. In particular, we will show that ALCN(o) extended with inverse roles both in number and in value restrictions becomes undecidable, whereas it can be safely extended with qualified number restrictions without losing decidability.<|reference_end|> | arxiv | @article{grandi2003on,
title={On Decidability of Expressive Description Logics with Composition of
Roles in Number Restrictions},
author={Fabio Grandi},
journal={arXiv preprint arXiv:cs/0308029},
year={2003},
number={CSITE-01-02 (rev. 8/03)},
archivePrefix={arXiv},
eprint={cs/0308029},
primaryClass={cs.LO}
} | grandi2003on |
arxiv-671375 | cs/0308030 | Learning in Multiagent Systems: An Introduction from a Game-Theoretic Perspective | <|reference_start|>Learning in Multiagent Systems: An Introduction from a Game-Theoretic Perspective: We introduce the topic of learning in multiagent systems. We first provide a quick introduction to the field of game theory, focusing on the equilibrium concepts of iterated dominance, and Nash equilibrium. We show some of the most relevant findings in the theory of learning in games, including theorems on fictitious play, replicator dynamics, and evolutionary stable strategies. The CLRI theory and n-level learning agents are introduced as attempts to apply some of these findings to the problem of engineering multiagent systems with learning agents. Finally, we summarize some of the remaining challenges in the field of learning in multiagent systems.<|reference_end|> | arxiv | @article{vidal2003learning,
title={Learning in Multiagent Systems: An Introduction from a Game-Theoretic
Perspective},
author={Jose M. Vidal},
journal={In Eduardo Alonso, editor, Adaptive Agents: LNAI 2636. Springer
Verlag, 2003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308030},
primaryClass={cs.MA cs.AI}
} | vidal2003learning |
arxiv-671376 | cs/0308031 | Artificial Neural Networks for Beginners | <|reference_start|>Artificial Neural Networks for Beginners: The scope of this teaching package is to make a brief induction to Artificial Neural Networks (ANNs) for people who have no previous knowledge of them. We first make a brief introduction to models of networks, for then describing in general terms ANNs. As an application, we explain the backpropagation algorithm, since it is widely used and many other algorithms are derived from it. The user should know algebra and the handling of functions and vectors. Differential calculus is recommendable, but not necessary. The contents of this package should be understood by people with high school education. It would be useful for people who are just curious about what are ANNs, or for people who want to become familiar with them, so when they study them more fully, they will already have clear notions of ANNs. Also, people who only want to apply the backpropagation algorithm without a detailed and formal explanation of it will find this material useful. This work should not be seen as "Nets for dummies", but of course it is not a treatise. Much of the formality is skipped for the sake of simplicity. Detailed explanations and demonstrations can be found in the referred readings. The included exercises complement the understanding of the theory. The on-line resources are highly recommended for extending this brief induction.<|reference_end|> | arxiv | @article{gershenson2003artificial,
title={Artificial Neural Networks for Beginners},
author={Carlos Gershenson},
journal={arXiv preprint arXiv:cs/0308031},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308031},
primaryClass={cs.NE cs.AI}
} | gershenson2003artificial |
arxiv-671377 | cs/0308032 | Evaluation of text data mining for database curation: lessons learned from the KDD Challenge Cup | <|reference_start|>Evaluation of text data mining for database curation: lessons learned from the KDD Challenge Cup: MOTIVATION: The biological literature is a major repository of knowledge. Many biological databases draw much of their content from a careful curation of this literature. However, as the volume of literature increases, the burden of curation increases. Text mining may provide useful tools to assist in the curation process. To date, the lack of standards has made it impossible to determine whether text mining techniques are sufficiently mature to be useful. RESULTS: We report on a Challenge Evaluation task that we created for the Knowledge Discovery and Data Mining (KDD) Challenge Cup. We provided a training corpus of 862 articles consisting of journal articles curated in FlyBase, along with the associated lists of genes and gene products, as well as the relevant data fields from FlyBase. For the test, we provided a corpus of 213 new (`blind') articles; the 18 participating groups provided systems that flagged articles for curation, based on whether the article contained experimental evidence for gene expression products. We report on the the evaluation results and describe the techniques used by the top performing groups. CONTACT: [email protected] KEYWORDS: text mining, evaluation, curation, genomics, data management<|reference_end|> | arxiv | @article{yeh2003evaluation,
title={Evaluation of text data mining for database curation: lessons learned
from the KDD Challenge Cup},
author={Alexander S. Yeh, Lynette Hirschman, Alexander A. Morgan},
journal={Bioinformatics Vol. 19 Suppl. 1 2003, pages i331-i339},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308032},
primaryClass={cs.CL q-bio.OT}
} | yeh2003evaluation |
arxiv-671378 | cs/0308033 | Coherent Keyphrase Extraction via Web Mining | <|reference_start|>Coherent Keyphrase Extraction via Web Mining: Keyphrases are useful for a variety of purposes, including summarizing, indexing, labeling, categorizing, clustering, highlighting, browsing, and searching. The task of automatic keyphrase extraction is to select keyphrases from within the text of a given document. Automatic keyphrase extraction makes it feasible to generate keyphrases for the huge number of documents that do not have manually assigned keyphrases. A limitation of previous keyphrase extraction algorithms is that the selected keyphrases are occasionally incoherent. That is, the majority of the output keyphrases may fit together well, but there may be a minority that appear to be outliers, with no clear semantic relation to the majority or to each other. This paper presents enhancements to the Kea keyphrase extraction algorithm that are designed to increase the coherence of the extracted keyphrases. The approach is to use the degree of statistical association among candidate keyphrases as evidence that they may be semantically related. The statistical association is measured using web mining. Experiments demonstrate that the enhancements improve the quality of the extracted keyphrases. Furthermore, the enhancements are not domain-specific: the algorithm generalizes well when it is trained on one domain (computer science documents) and tested on another (physics documents).<|reference_end|> | arxiv | @article{turney2003coherent,
title={Coherent Keyphrase Extraction via Web Mining},
author={Peter D. Turney (National Research Council of Canada)},
journal={Proceedings of the Eighteenth International Joint Conference on
Artificial Intelligence (IJCAI-03), (2003), Acapulco, Mexico, 434-439},
year={2003},
number={NRC-46496},
archivePrefix={arXiv},
eprint={cs/0308033},
primaryClass={cs.LG cs.CL cs.IR}
} | turney2003coherent |
arxiv-671379 | cs/0308034 | Fingerprint based bio-starter and bio-access | <|reference_start|>Fingerprint based bio-starter and bio-access: In the paper will be presented a safety and security system based on fingerprint technology. The results suggest a new scenario where the new cars can use a fingerprint sensor integrated in car handle to allow access and in the dashboard as starter button.<|reference_end|> | arxiv | @article{iovane2003fingerprint,
title={Fingerprint based bio-starter and bio-access},
author={G.Iovane, P.Giordano, C.Iovane, F.Rotulo},
journal={arXiv preprint arXiv:cs/0308034},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308034},
primaryClass={cs.CV}
} | iovane2003fingerprint |
arxiv-671380 | cs/0308035 | IS (Iris Security) | <|reference_start|>IS (Iris Security): In the paper will be presented a safety system based on iridology. The results suggest a new scenario where the security problem in supervised and unsupervised areas can be treat with the present system and the iris image recognition.<|reference_end|> | arxiv | @article{iovane2003is,
title={IS (Iris Security)},
author={G.Iovane, F.S.Tortoriello},
journal={arXiv preprint arXiv:cs/0308035},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308035},
primaryClass={cs.CV}
} | iovane2003is |
arxiv-671381 | cs/0308036 | The Rich-Club Phenomenon In The Internet Topology | <|reference_start|>The Rich-Club Phenomenon In The Internet Topology: We show that the Internet topology at the Autonomous System (AS) level has a rich--club phenomenon. The rich nodes, which are a small number of nodes with large numbers of links, are very well connected to each other. The rich--club is a core tier that we measured using the rich--club connectivity and the node--node link distribution. We obtained this core tier without any heuristic assumption between the ASes. The rich--club phenomenon is a simple qualitative way to differentiate between power law topologies and provides a criterion for new network models. To show this, we compared the measured rich--club of the AS graph with networks obtained using the Barab\'asi--Albert (BA) scale--free network model, the Fitness BA model and the Inet--3.0 model.<|reference_end|> | arxiv | @article{zhou2003the,
title={The Rich-Club Phenomenon In The Internet Topology},
author={Shi Zhou and Raul J Mondragon},
journal={IEEE Communications Letters, vol. 8, no. 3, pp.180-182, March
2004.},
year={2003},
doi={10.1109/LCOMM.2004.823426},
archivePrefix={arXiv},
eprint={cs/0308036},
primaryClass={cs.NI}
} | zhou2003the |
arxiv-671382 | cs/0308037 | Distributed and Parallel Net Imaging | <|reference_start|>Distributed and Parallel Net Imaging: A very complex vision system is developed to detect luminosity variations connected with the discovery of new planets in the Universe. The traditional imaging system can not manage a so large load. A private net is implemented to perform an automatic vision and decision architecture. It lets to carry out an on-line discrimination of interesting events by using two levels of triggers. This system can even manage many Tbytes of data per day. The architecture avails itself of a distributed parallel network system based on a maximum of 256 standard workstations with Microsoft Window as OS.<|reference_end|> | arxiv | @article{iovane2003distributed,
title={Distributed and Parallel Net Imaging},
author={G.Iovane},
journal={arXiv preprint arXiv:cs/0308037},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308037},
primaryClass={cs.CV astro-ph cs.DC}
} | iovane2003distributed |
arxiv-671383 | cs/0308038 | Image Analysis in Astronomy for very large vision machine | <|reference_start|>Image Analysis in Astronomy for very large vision machine: It is developed a very complex system (hardware/software) to detect luminosity variations connected with the discovery of new planets outside the Solar System. Traditional imaging approaches are very demanding in terms of computing time; then, the implementation of an automatic vision and decision software architecture is presented. It allows to perform an on-line discrimination of interesting events by using two levels of triggers. A fundamental challenge was to work with very large CCD camera (even 16k*16k pixels) in line with very large telescopes. Then, the architecture can use a distributed parallel network system based on a maximum of 256 standard workstations.<|reference_end|> | arxiv | @article{iovane2003image,
title={Image Analysis in Astronomy for very large vision machine},
author={G.Iovane},
journal={arXiv preprint arXiv:cs/0308038},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308038},
primaryClass={cs.CV astro-ph cs.DC}
} | iovane2003image |
arxiv-671384 | cs/0308039 | A new approach to relevancy in Internet searching - the "Vox Populi Algorithm" | <|reference_start|>A new approach to relevancy in Internet searching - the "Vox Populi Algorithm": In this paper we will derive a new algorithm for Internet searching. The main idea of this algorithm is to extend the existing algorithms by a component, which reflects the interests of the users more than existing methods. The "Vox Populi Algorithm" (VPA) creates a feedback from the users to the content of the search index. The information derived from the users query analysis is used to modify the existing crawling algorithms. The VPA controls the distribution of the resources of the crawler. Finally, we also discuss methods of suppressing unwanted content (spam).<|reference_end|> | arxiv | @article{schaale2003a,
title={A new approach to relevancy in Internet searching - the "Vox Populi
Algorithm"},
author={Andreas Schaale, Carsten Wulf-Mathies, Soenke Lieberam-Schmidt},
journal={arXiv preprint arXiv:cs/0308039},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308039},
primaryClass={cs.DS cond-mat.dis-nn cs.IR}
} | schaale2003a |
arxiv-671385 | cs/0308040 | Open source software and peer review | <|reference_start|>Open source software and peer review: We compare the open source model of software development to peer review in academia.<|reference_end|> | arxiv | @article{rai2003open,
title={Open source software and peer review},
author={Sanatan Rai},
journal={arXiv preprint arXiv:cs/0308040},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308040},
primaryClass={cs.SE cs.CY}
} | rai2003open |
arxiv-671386 | cs/0308041 | Static Data Structure for Discrete Advance Bandwidth Reservations on the Internet | <|reference_start|>Static Data Structure for Discrete Advance Bandwidth Reservations on the Internet: In this paper we present a discrete data structure for reservations of limited resources. A reservation is defined as a tuple consisting of the time interval of when the resource should be reserved, $I_R$, and the amount of the resource that is reserved, $B_R$, formally $R=\{I_R,B_R\}$. The data structure is similar to a segment tree. The maximum spanning interval of the data structure is fixed and defined in advance. The granularity and thereby the size of the intervals of the leaves is also defined in advance. The data structure is built only once. Neither nodes nor leaves are ever inserted, deleted or moved. Hence, the running time of the operations does not depend on the number of reservations previously made. The running time does not depend on the size of the interval of the reservation either. Let $n$ be the number of leaves in the data structure. In the worst case, the number of touched (i.e. traversed) nodes is in any operation $O(\log n)$, hence the running time of any operation is also $O(\log n)$.<|reference_end|> | arxiv | @article{brodnik2003static,
title={Static Data Structure for Discrete Advance Bandwidth Reservations on the
Internet},
author={Andrej Brodnik and Andreas Nilsson},
journal={arXiv preprint arXiv:cs/0308041},
year={2003},
number={IMFM-(2003)-PS-889},
archivePrefix={arXiv},
eprint={cs/0308041},
primaryClass={cs.DS}
} | brodnik2003static |
arxiv-671387 | cs/0308042 | Centralized reward system gives rise to fast and efficient work sharing for intelligent Internet agents lacking direct communication | <|reference_start|>Centralized reward system gives rise to fast and efficient work sharing for intelligent Internet agents lacking direct communication: WWW has a scale-free structure where novel information is often difficult to locate. Moreover, Intelligent agents easily get trapped in this structure. Here a novel method is put forth, which turns these traps into information repositories, supplies: We populated an Internet environment with intelligent news foragers. Foraging has its associated cost whereas foragers are rewarded if they detect not yet discovered novel information. The intelligent news foragers crawl by using the estimated long-term cumulated reward, and also have a finite sized memory: the list of most promising supplies. Foragers form an artificial life community: the most successful ones are allowed to multiply, while unsuccessful ones die out. The specific property of this community is that there is no direct communication amongst foragers but the centralized rewarding system. Still, fast division of work is achieved.<|reference_end|> | arxiv | @article{palotai2003centralized,
title={Centralized reward system gives rise to fast and efficient work sharing
for intelligent Internet agents lacking direct communication},
author={Zsolt Palotai, Sandor Mandusitz, Andras Lorincz},
journal={arXiv preprint arXiv:cs/0308042},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308042},
primaryClass={cs.IR}
} | palotai2003centralized |
arxiv-671388 | cs/0308043 | Note on Needle in a Haystack | <|reference_start|>Note on Needle in a Haystack: Introduced below is a quantum database method, not only for retrieval but also for creation. It uses a particular structure of true's and false's in a state vector of n qubits, permitting up to 2**2**n words, vastly more than for classical bits. Several copies are produced so that later they can be destructively observed and a word determined with high probability. Grover's algorithm is proposed below to read out, nondestructively the unknown contents of a given stored state vector using only one state vector.<|reference_end|> | arxiv | @article{burger2003note,
title={Note on Needle in a Haystack},
author={John Robert Burger},
journal={arXiv preprint arXiv:cs/0308043},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308043},
primaryClass={cs.ET quant-ph}
} | burger2003note |
arxiv-671389 | cs/0308044 | EqRank: A Self-Consistent Equivalence Relation on Graph Vertexes | <|reference_start|>EqRank: A Self-Consistent Equivalence Relation on Graph Vertexes: A new method of hierarchical clustering of graph vertexes is suggested. In the method, the graph partition is determined with an equivalence relation satisfying a recursive definition stating that vertexes are equivalent if the vertexes they point to (or vertexes pointing to them) are equivalent. Iterative application of the partitioning yields a hierarchical clustering of graph vertexes. The method is applied to the citation graph of hep-th. The outcome is a two-level classification scheme for the subject field presented in hep-th, and indexing of the papers from hep-th in this scheme. A number of tests show that the classification obtained is adequate.<|reference_end|> | arxiv | @article{pivovarov2003eqrank:,
title={EqRank: A Self-Consistent Equivalence Relation on Graph Vertexes},
author={Grigorii Pivovarov and Sergei Trunov},
journal={arXiv preprint arXiv:cs/0308044},
year={2003},
archivePrefix={arXiv},
eprint={cs/0308044},
primaryClass={cs.DS cs.DL}
} | pivovarov2003eqrank: |
arxiv-671390 | cs/0309001 | Media Affordances of a Mobile Push-To-Talk Communication Service | <|reference_start|>Media Affordances of a Mobile Push-To-Talk Communication Service: This paper presents an exploratory study of college-age students using two-way, push-to-talk cellular radios. We describe the observed and reported use of cellular radio by the participants, the activities and purposes for which they adopted it, and their responses. We then examine these empirical results using mediated communication theory. Cellular radios have a unique combination of affordances relative to other media used by this age group, including instant messaging (IM) and mobile phones; the results of our analysis do suggest explanations for some observed phenomena but also highlight the counter-intuitive nature of other phenomena. For example, although the radios have many important dissimilarities with IM from the viewpoint of mediated communication theory, the observed use patterns resembled those of IM to a surprising degree.<|reference_end|> | arxiv | @article{woodruff2003media,
title={Media Affordances of a Mobile Push-To-Talk Communication Service},
author={Allison Woodruff and Paul M. Aoki},
journal={arXiv preprint arXiv:cs/0309001},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309001},
primaryClass={cs.HC}
} | woodruff2003media |
arxiv-671391 | cs/0309002 | El informe NERA analizado | <|reference_start|>El informe NERA analizado: This is a review of the article "Government Preferences for Promoting Open-Source Software: A Solution in Search of A Problem" by David Evans and Bernard J. Reddy. This report was paid for by Microsoft and put together at its request. Now Microsoft is using it as part of their lobbying campaign in Europe against governments' promotion of Open Source Software. As expected, this article is strongly biased and most of the conclusions are based upon false hypotheses and evidence.<|reference_end|> | arxiv | @article{galli2003el,
title={El informe NERA analizado},
author={Ricardo Galli},
journal={arXiv preprint arXiv:cs/0309002},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309002},
primaryClass={cs.CY}
} | galli2003el |
arxiv-671392 | cs/0309003 | Model Checking Linear Logic Specifications | <|reference_start|>Model Checking Linear Logic Specifications: The overall goal of this paper is to investigate the theoretical foundations of algorithmic verification techniques for first order linear logic specifications. The fragment of linear logic we consider in this paper is based on the linear logic programming language called LO enriched with universally quantified goal formulas. Although LO was originally introduced as a theoretical foundation for extensions of logic programming languages, it can also be viewed as a very general language to specify a wide range of infinite-state concurrent systems. Our approach is based on the relation between backward reachability and provability highlighted in our previous work on propositional LO programs. Following this line of research, we define here a general framework for the bottom-up evaluation of first order linear logic specifications. The evaluation procedure is based on an effective fixpoint operator working on a symbolic representation of infinite collections of first order linear logic formulas. The theory of well quasi-orderings can be used to provide sufficient conditions for the termination of the evaluation of non trivial fragments of first order linear logic.<|reference_end|> | arxiv | @article{bozzano2003model,
title={Model Checking Linear Logic Specifications},
author={M. Bozzano, G. Delzanno and M. Martelli},
journal={arXiv preprint arXiv:cs/0309003},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309003},
primaryClass={cs.PL cs.SC}
} | bozzano2003model |
arxiv-671393 | cs/0309004 | The Structure of Information | <|reference_start|>The Structure of Information: A formal model of the structure of information is presented in five axioms which define identity, containment, and joins of infons. Joins are shown to be commutative, associative, provide inverses of infons, and, potentially, have many identity elements, two of which are multiplicative and additive. Those two types of join are distributive. The other identity elements are for operators on entwined states. Multiplicative joins correspond to adding or removing new bits to a system while additive joins correspond to a change of state. The order or size of an infon is defined. This groundwork is intended to be used to model continuous and discreet information structures through time, especially in closed systems.<|reference_end|> | arxiv | @article{long2003the,
title={The Structure of Information},
author={Bruce Long},
journal={arXiv preprint arXiv:cs/0309004},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309004},
primaryClass={cs.LO}
} | long2003the |
arxiv-671394 | cs/0309005 | Indexing Schemes for Similarity Search In Datasets of Short Protein Fragments | <|reference_start|>Indexing Schemes for Similarity Search In Datasets of Short Protein Fragments: We propose a family of very efficient hierarchical indexing schemes for ungapped, score matrix-based similarity search in large datasets of short (4-12 amino acid) protein fragments. This type of similarity search has importance in both providing a building block to more complex algorithms and for possible use in direct biological investigations where datasets are of the order of 60 million objects. Our scheme is based on the internal geometry of the amino acid alphabet and performs exceptionally well, for example outputting 100 nearest neighbours to any possible fragment of length 10 after scanning on average less than one per cent of the entire dataset.<|reference_end|> | arxiv | @article{stojmirovic2003indexing,
title={Indexing Schemes for Similarity Search In Datasets of Short Protein
Fragments},
author={Aleksandar Stojmirovic and Vladimir Pestov},
journal={Information Systems 32 (2007), 1145-1165},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309005},
primaryClass={cs.DS q-bio.BM}
} | stojmirovic2003indexing |
arxiv-671395 | cs/0309006 | The KR-Benes Network: A Control-Optimal Rearrangeable Permutation Network | <|reference_start|>The KR-Benes Network: A Control-Optimal Rearrangeable Permutation Network: The Benes network has been used as a rearrangeable network for over 40 years, yet the uniform $N(2 \log N-1)$ control complexity of the $N \times N$ Benes is not optimal for many permutations. In this paper, we present a novel $O(\log N)$ depth rearrangeable network called KR-Benes that is {\it permutation-specific control-optimal}. The KR-Benes routes {\it every} permutation with the minimal control complexity {\it specific} to that permutation and its worst-case complexity for arbitrary permutations is bounded by the Benes; thus it replaces the Benes when considering control complexity/latency. We design the KR-Benes by first constructing a restricted $2 \log K +2$ depth rearrangeable network called $K$-Benes for routing $K$-bounded permutations with control $2N \log K$, $0 \leq K \leq N/4$. We then show that the $N \times N$ Benes network itself (with one additional stage) contains every $K$-Benes network as a subgraph and use this property to construct the KR-Benes network. With regard to the control-optimality of the KR-Benes, we show that any optimal network for rearrangeably routing $K$-bounded permutations must have depth $2 \log K + 2$, and therefore the $K$-Benes (and hence the KR-Benes) is optimal.<|reference_end|> | arxiv | @article{kannan2003the,
title={The KR-Benes Network: A Control-Optimal Rearrangeable Permutation
Network},
author={Rajgopal Kannan},
journal={IEEE Transactions on Computers, Vol. 54, No. 5, pp. 534-544, May
2005.},
year={2003},
number={LSU Computer Science Tech Report LSU-CSC-TR03-01},
archivePrefix={arXiv},
eprint={cs/0309006},
primaryClass={cs.NI cs.CC}
} | kannan2003the |
arxiv-671396 | cs/0309007 | ROC Curves Within the Framework of Neural Network Assembly Memory Model: Some Analytic Results | <|reference_start|>ROC Curves Within the Framework of Neural Network Assembly Memory Model: Some Analytic Results: On the basis of convolutional (Hamming) version of recent Neural Network Assembly Memory Model (NNAMM) for intact two-layer autoassociative Hopfield network optimal receiver operating characteristics (ROCs) have been derived analytically. A method of taking into account explicitly a priori probabilities of alternative hypotheses on the structure of information initiating memory trace retrieval and modified ROCs (mROCs, a posteriori probabilities of correct recall vs. false alarm probability) are introduced. The comparison of empirical and calculated ROCs (or mROCs) demonstrates that they coincide quantitatively and in this way intensities of cues used in appropriate experiments may be estimated. It has been found that basic ROC properties which are one of experimental findings underpinning dual-process models of recognition memory can be explained within our one-factor NNAMM.<|reference_end|> | arxiv | @article{gopych2003roc,
title={ROC Curves Within the Framework of Neural Network Assembly Memory Model:
Some Analytic Results},
author={Petro M. Gopych},
journal={International Journal on Information Theories & Applications,
2003, vol. 10, no.2, pp.189-197.},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309007},
primaryClass={cs.AI cs.IR q-bio.NC q-bio.QM}
} | gopych2003roc |
arxiv-671397 | cs/0309008 | Digital Version of Green`s Theorem and its Application to The Coverage Problem in Formal Verification | <|reference_start|>Digital Version of Green`s Theorem and its Application to The Coverage Problem in Formal Verification: We present a novel scheme to the coverage problem, introducing a quantitative way to estimate the interaction between a block and its enviroment.This is achieved by setting a discrete version of Green`s theorem, specially adapted for Model Checking based verification of integrated circuits.This method is best suited for the coverage problem since it enables one to quantify the incompleteness or, on the other hand, the redundancy of a set of rules, describing the model under verification.Moreover this can be done continuously throughout the verification process, thus enabling the user to pinpoint the stages at which incompleteness/redundancy occurs. Although the method is presented locally on a small hardware example, we additionally show its possibility to provide precise coverage estimation also for large scale systems. We compare this method to others by checking it on the same test-cases.<|reference_end|> | arxiv | @article{appleboim2003digital,
title={Digital Version of Green`s Theorem and its Application to The Coverage
Problem in Formal Verification},
author={Eli Appleboim, Emil Saucan},
journal={arXiv preprint arXiv:cs/0309008},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309008},
primaryClass={cs.SC}
} | appleboim2003digital |
arxiv-671398 | cs/0309009 | What Is Working Memory and Mental Imagery? A Robot that Learns to Perform Mental Computations | <|reference_start|>What Is Working Memory and Mental Imagery? A Robot that Learns to Perform Mental Computations: This paper goes back to Turing (1936) and treats his machine as a cognitive model (W,D,B), where W is an "external world" represented by memory device (the tape divided into squares), and (D,B) is a simple robot that consists of the sensory-motor devices, D, and the brain, B. The robot's sensory-motor devices (the "eye", the "hand", and the "organ of speech") allow the robot to simulate the work of any Turing machine. The robot simulates the internal states of a Turing machine by "talking to itself." At the stage of training, the teacher forces the robot (by acting directly on its motor centers) to perform several examples of an algorithm with different input data presented on tape. Two effects are achieved: 1) The robot learns to perform the shown algorithm with any input data using the tape. 2) The robot learns to perform the algorithm "mentally" using an "imaginary tape." The model illustrates the simplest concept of a universal learning neurocomputer, demonstrates universality of associative learning as the mechanism of programming, and provides a simplified, but nontrivial neurobiologically plausible explanation of the phenomena of working memory and mental imagery. The model is implemented as a user-friendly program for Windows called EROBOT. The program is available at www.brain0.com/software.html.<|reference_end|> | arxiv | @article{eliashberg2003what,
title={What Is Working Memory and Mental Imagery? A Robot that Learns to
Perform Mental Computations},
author={Victor Eliashberg},
journal={arXiv preprint arXiv:cs/0309009},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309009},
primaryClass={cs.AI cs.NE}
} | eliashberg2003what |
arxiv-671399 | cs/0309010 | Homomorphic public-key cryptosystems over groups and rings | <|reference_start|>Homomorphic public-key cryptosystems over groups and rings: We propose a new homomorphic public-key cryptosystem over arbitrary nonidentity finite group based on the difficulty of the membership problem for groups of integer matrices. Besides, a homomorphic cryptosystem is designed for the first time over finite commutative rings.<|reference_end|> | arxiv | @article{grigoriev2003homomorphic,
title={Homomorphic public-key cryptosystems over groups and rings},
author={Dima Grigoriev, Ilia Ponomarenko},
journal={arXiv preprint arXiv:cs/0309010},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309010},
primaryClass={cs.CR}
} | grigoriev2003homomorphic |
arxiv-671400 | cs/0309011 | Indexing of Tables Referencing Complex Structures | <|reference_start|>Indexing of Tables Referencing Complex Structures: We introduce indexing of tables referencing complex structures such as digraphs and spatial objects, appearing in genetics and other data intensive analysis. The indexing is achieved by extracting dimension schemas from the referenced structures. The schemas and their dimensionality are determined by proper coloring algorithms and the duality between all such schemas and all such possible proper colorings is established. This duality, in turn, provides us with an extensive library of solutions when addressing indexing questions. It is illustrated how to use the schemas, in connection with additional relational database technologies, to optimize queries conditioned on the structural information being referenced. Comparisons using bitmap indexing in the Oracle 9.2i database, on the one hand, and multidimensional clustering in DB2 8.1.2, on the other hand, are used to illustrate the applicability of the indexing to different technology settings. Finally, we illustrate how the indexing can be used to extract low dimensional schemas from a binary interval tree in order to resolve efficiently interval and stabbing queries.<|reference_end|> | arxiv | @article{egilsson2003indexing,
title={Indexing of Tables Referencing Complex Structures},
author={Agust S. Egilsson and Hakon Gudbjartsson},
journal={arXiv preprint arXiv:cs/0309011},
year={2003},
archivePrefix={arXiv},
eprint={cs/0309011},
primaryClass={cs.DB}
} | egilsson2003indexing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.