corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-672101 | cs/0408045 | On computing the fixpoint of a set of boolean equations | <|reference_start|>On computing the fixpoint of a set of boolean equations: This paper presents a method for computing a least fixpoint of a system of equations over booleans. The resulting computation can be significantly shorter than the result of iteratively evaluating the entire system until a fixpoint is reached.<|reference_end|> | arxiv | @article{kuncak2004on,
title={On computing the fixpoint of a set of boolean equations},
author={Viktor Kuncak, K. Rustan M. Leino},
journal={arXiv preprint arXiv:cs/0408045},
year={2004},
number={MSR-TR-2003-08},
archivePrefix={arXiv},
eprint={cs/0408045},
primaryClass={cs.PL cs.LO cs.SE}
} | kuncak2004on |
arxiv-672102 | cs/0408046 | Authenticated tree parity machine key exchange | <|reference_start|>Authenticated tree parity machine key exchange: The synchronisation of Tree Parity Machines (TPMs), has proven to provide a valuable alternative concept for secure symmetric key exchange. Yet, from a cryptographer's point of view, authentication is at least as important as a secure exchange of keys. Adding an authentication via hashing e.g. is straightforward but with no relation to Neural Cryptography. We consequently formulate an authenticated key exchange within this concept. Another alternative, integrating a Zero-Knowledge protocol into the synchronisation, is also presented. A Man-In-The-Middle attack and even all currently known attacks, that are based on using identically structured TPMs and synchronisation as well, can so be averted. This in turn has practical consequences on using the trajectory in weight space. Both suggestions have the advantage of not affecting the previously observed physics of this interacting system at all.<|reference_end|> | arxiv | @article{volkmer2004authenticated,
title={Authenticated tree parity machine key exchange},
author={Markus Volkmer and Andr'e Schaumburg},
journal={arXiv preprint arXiv:cs/0408046},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408046},
primaryClass={cs.CR cond-mat.dis-nn}
} | volkmer2004authenticated |
arxiv-672103 | cs/0408047 | Pervasive Service Architecture for a Digital Business Ecosystem | <|reference_start|>Pervasive Service Architecture for a Digital Business Ecosystem: In this paper we present ideas and architectural principles upon which we are basing the development of a distributed, open-source infrastructure that, in turn, will support the expression of business models, the dynamic composition of software services, and the optimisation of service chains through automatic self-organising and evolutionary algorithms derived from biology. The target users are small and medium-sized enterprises (SMEs). We call the collection of the infrastructure, the software services, and the SMEs a Digital Business Ecosystem (DBE).<|reference_end|> | arxiv | @article{heistracher2004pervasive,
title={Pervasive Service Architecture for a Digital Business Ecosystem},
author={Thomas Heistracher, Thomas Kurz, Claudius Masuch, Pierfranco
Ferronato, Miguel Vidal, Angelo Corallo, Gerard Briscoe, and Paolo Dini},
journal={arXiv preprint arXiv:cs/0408047},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408047},
primaryClass={cs.CE cs.NI}
} | heistracher2004pervasive |
arxiv-672104 | cs/0408048 | Journal of New Democratic Methods: An Introduction | <|reference_start|>Journal of New Democratic Methods: An Introduction: This paper describes a new breed of academic journals that use statistical machine learning techniques to make them more democratic. In particular, not only can anyone submit an article, but anyone can also become a reviewer. Machine learning is used to decide which reviewers accurately represent the views of the journal's readers and thus deserve to have their opinions carry more weight. The paper concentrates on describing a specific experimental prototype of a democratic journal called the Journal of New Democratic Methods (JNDM). The paper also mentions the wider implications that machine learning and the techniques used in the JNDM may have for representative democracy in general.<|reference_end|> | arxiv | @article{funge2004journal,
title={Journal of New Democratic Methods: An Introduction},
author={John David Funge},
journal={arXiv preprint arXiv:cs/0408048},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408048},
primaryClass={cs.CY cs.LG}
} | funge2004journal |
arxiv-672105 | cs/0408049 | Using Stochastic Encoders to Discover Structure in Data | <|reference_start|>Using Stochastic Encoders to Discover Structure in Data: In this paper a stochastic generalisation of the standard Linde-Buzo-Gray (LBG) approach to vector quantiser (VQ) design is presented, in which the encoder is implemented as the sampling of a vector of code indices from a probability distribution derived from the input vector, and the decoder is implemented as a superposition of reconstruction vectors. This stochastic VQ (SVQ) is optimised using a minimum mean Euclidean reconstruction distortion criterion, as in the LBG case. Numerical simulations are used to demonstrate how this leads to self-organisation of the SVQ, where different stochastically sampled code indices become associated with different input subspaces.<|reference_end|> | arxiv | @article{luttrell2004using,
title={Using Stochastic Encoders to Discover Structure in Data},
author={Stephen Luttrell},
journal={arXiv preprint arXiv:cs/0408049},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408049},
primaryClass={cs.NE cs.CV}
} | luttrell2004using |
arxiv-672106 | cs/0408050 | Invariant Stochastic Encoders | <|reference_start|>Invariant Stochastic Encoders: The theory of stochastic vector quantisers (SVQ) has been extended to allow the quantiser to develop invariances, so that only "large" degrees of freedom in the input vector are represented in the code. This has been applied to the problem of encoding data vectors which are a superposition of a "large" jammer and a "small" signal, so that only the jammer is represented in the code. This allows the jammer to be subtracted from the total input vector (i.e. the jammer is nulled), leaving a residual that contains only the underlying signal. The main advantage of this approach to jammer nulling is that little prior knowledge of the jammer is assumed, because these properties are automatically discovered by the SVQ as it is trained on examples of input vectors.<|reference_end|> | arxiv | @article{luttrell2004invariant,
title={Invariant Stochastic Encoders},
author={Stephen Luttrell},
journal={arXiv preprint arXiv:cs/0408050},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408050},
primaryClass={cs.NE cs.CV}
} | luttrell2004invariant |
arxiv-672107 | cs/0408051 | Scalable XSLT Evaluation | <|reference_start|>Scalable XSLT Evaluation: XSLT is an increasingly popular language for processing XML data. It is widely supported by application platform software. However, little optimization effort has been made inside the current XSLT processing engines. Evaluating a very simple XSLT program on a large XML document with a simple schema may result in extensive usage of memory. In this paper, we present a novel notion of \emph{Streaming Processing Model} (\emph{SPM}) to evaluate a subset of XSLT programs on XML documents, especially large ones. With SPM, an XSLT processor can transform an XML source document to other formats without extra memory buffers required. Therefore, our approach can not only tackle large source documents, but also produce large results. We demonstrate with a performance study the advantages of the SPM approach. Experimental results clearly confirm that SPM improves XSLT evaluation typically 2 to 10 times better than the existing approaches. Moreover, the SPM approach also features high scalability.<|reference_end|> | arxiv | @article{guo2004scalable,
title={Scalable XSLT Evaluation},
author={Zhimao Guo, Min Li, Xiaoling Wang, Aoying Zhou},
journal={In Proc. of APWeb, 2004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408051},
primaryClass={cs.DB}
} | guo2004scalable |
arxiv-672108 | cs/0408052 | Application of the Double Metaphone Algorithm to Amharic Orthography | <|reference_start|>Application of the Double Metaphone Algorithm to Amharic Orthography: The Metaphone algorithm applies the phonetic encoding of orthographic sequences to simplify words prior to comparison. While Metaphone has been highly successful for the English language, for which it was designed, it may not be applied directly to Ethiopian languages. The paper details how the principles of Metaphone can be applied to Ethiopic script and uses Amharic as a case study. Match results improve as specific considerations are made for Amharic writing practices. Results are shown to improve further when common errors from Amharic input methods are considered.<|reference_end|> | arxiv | @article{yacob2004application,
title={Application of the Double Metaphone Algorithm to Amharic Orthography},
author={Daniel Yacob},
journal={arXiv preprint arXiv:cs/0408052},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408052},
primaryClass={cs.CL}
} | yacob2004application |
arxiv-672109 | cs/0408053 | Weighted average finite difference methods for fractional diffusion equations | <|reference_start|>Weighted average finite difference methods for fractional diffusion equations: Weighted averaged finite difference methods for solving fractional diffusion equations are discussed and different formulae of the discretization of the Riemann-Liouville derivative are considered. The stability analysis of the different numerical schemes is carried out by means of a procedure close to the well-known von Neumann method of ordinary diffusion equations. The stability bounds are easily found and checked in some representative examples.<|reference_end|> | arxiv | @article{yuste2004weighted,
title={Weighted average finite difference methods for fractional diffusion
equations},
author={Santos B. Yuste},
journal={arXiv preprint arXiv:cs/0408053},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408053},
primaryClass={cs.NA cond-mat.stat-mech physics.comp-ph}
} | yuste2004weighted |
arxiv-672110 | cs/0408054 | Providing Authentic Long-term Archival Access to Complex Relational Data | <|reference_start|>Providing Authentic Long-term Archival Access to Complex Relational Data: We discuss long-term preservation of and access to relational databases. The focus is on national archives and science data archives which have to ingest and integrate data from a broad spectrum of vendor-specific relational database management systems (RDBMS). Furthermore, we present our solution SIARD which analyzes and extracts data and data logic from almost any RDBMS. It enables, to a reasonable level of authenticity, complete detachment of databases from their vendor-specific environment. The user can add archival descriptive metadata according to a customizable schema. A SIARD database archive integrates data, data logic, technical metadata, and archival descriptive information in one archival information package, independent of any specific software and hardware, based upon plain text files and the standardized languages SQL and XML. For usage purposes, a SIARD archive can be reloaded into any current or future RDBMS which supports standard SQL. In addition, SIARD contains a client that enables 'on demand' reload of archives into a target RDBMS, and multi-user remote access for querying and browsing the data together with its technical and descriptive metadata in one graphical user interface.<|reference_end|> | arxiv | @article{heuscher2004providing,
title={Providing Authentic Long-term Archival Access to Complex Relational Data},
author={Stephan Heuscher, Stephan Jaermann, Peter Keller-Marxer, Frank Moehle
(Swiss Federal Archives)},
journal={Proceedings PV-2004: Ensuring the Long-Term Preservation and
Adding Value to the Scientific and Technical Data, 5-7 October 2004,
ESA/ESRIN, Frascati, Italy, (ESA WPP-232), Noordwijk: European Space Agency,
2004, pp. 241\^A?"261.},
year={2004},
number={ESA WPP-232, pp. 241 -261},
archivePrefix={arXiv},
eprint={cs/0408054},
primaryClass={cs.DL cs.DB}
} | heuscher2004providing |
arxiv-672111 | cs/0408055 | Cauchy Annealing Schedule: An Annealing Schedule for Boltzmann Selection Scheme in Evolutionary Algorithms | <|reference_start|>Cauchy Annealing Schedule: An Annealing Schedule for Boltzmann Selection Scheme in Evolutionary Algorithms: Boltzmann selection is an important selection mechanism in evolutionary algorithms as it has theoretical properties which help in theoretical analysis. However, Boltzmann selection is not used in practice because a good annealing schedule for the `inverse temperature' parameter is lacking. In this paper we propose a Cauchy annealing schedule for Boltzmann selection scheme based on a hypothesis that selection-strength should increase as evolutionary process goes on and distance between two selection strengths should decrease for the process to converge. To formalize these aspects, we develop formalism for selection mechanisms using fitness distributions and give an appropriate measure for selection-strength. In this paper, we prove an important result, by which we derive an annealing schedule called Cauchy annealing schedule. We demonstrate the novelty of proposed annealing schedule using simulations in the framework of genetic algorithms.<|reference_end|> | arxiv | @article{dukkipati2004cauchy,
title={Cauchy Annealing Schedule: An Annealing Schedule for Boltzmann Selection
Scheme in Evolutionary Algorithms},
author={Ambedkar Dukkipati, M. Narasimha Murty and Shalabh Bhatnagar},
journal={Dukkipati, A., M. N. Murty, and S. Bhatnagar, 2004, in Proceedings
of the Congress on Evolutionary Computation (CEC'2004), IEEE Press, pp. 55-62},
year={2004},
doi={10.1109/CEC.2004.1330837},
archivePrefix={arXiv},
eprint={cs/0408055},
primaryClass={cs.AI}
} | dukkipati2004cauchy |
arxiv-672112 | cs/0408056 | A CHR-based Implementation of Known Arc-Consistency | <|reference_start|>A CHR-based Implementation of Known Arc-Consistency: In classical CLP(FD) systems, domains of variables are completely known at the beginning of the constraint propagation process. However, in systems interacting with an external environment, acquiring the whole domains of variables before the beginning of constraint propagation may cause waste of computation time, or even obsolescence of the acquired data at the time of use. For such cases, the Interactive Constraint Satisfaction Problem (ICSP) model has been proposed as an extension of the CSP model, to make it possible to start constraint propagation even when domains are not fully known, performing acquisition of domain elements only when necessary, and without the need for restarting the propagation after every acquisition. In this paper, we show how a solver for the two sorted CLP language, defined in previous work, to express ICSPs, has been implemented in the Constraint Handling Rules (CHR) language, a declarative language particularly suitable for high level implementation of constraint solvers.<|reference_end|> | arxiv | @article{alberti2004a,
title={A CHR-based Implementation of Known Arc-Consistency},
author={Marco Alberti, Marco Gavanelli, Evelina Lamma, Paola Mello, Michela
Milano},
journal={arXiv preprint arXiv:cs/0408056},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408056},
primaryClass={cs.LO cs.AI}
} | alberti2004a |
arxiv-672113 | cs/0408057 | The role of robust semantic analysis in spoken language dialogue systems | <|reference_start|>The role of robust semantic analysis in spoken language dialogue systems: In this paper we summarized a framework for designing grammar-based procedure for the automatic extraction of the semantic content from spoken queries. Starting with a case study and following an approach which combines the notions of fuzziness and robustness in sentence parsing, we showed we built practical domain-dependent rules which can be applied whenever it is possible to superimpose a sentence-level semantic structure to a text without relying on a previous deep syntactical analysis. This kind of procedure can be also profitably used as a pre-processing tool in order to cut out part of the sentence which have been recognized to have no relevance in the understanding process. In the case of particular dialogue applications where there is no need to build a complex semantic structure (e.g. word spotting or excerpting) the presented methodology may represent an efficient alternative solution to a sequential composition of deep linguistic analysis modules. Even if the query generation problem may not seem a critical application it should be held in mind that the sentence processing must be done on-line. Having this kind of constraints we cannot design our system without caring for efficiency and thus provide an immediate response. Another critical issue is related to whole robustness of the system. In our case study we tried to make experiences on how it is possible to deal with an unreliable and noisy input without asking the user for any repetition or clarification. This may correspond to a similar problem one may have when processing text coming from informal writing such as e-mails, news and in many cases Web pages where it is often the case to have irrelevant surrounding information.<|reference_end|> | arxiv | @article{ballim2004the,
title={The role of robust semantic analysis in spoken language dialogue systems},
author={Afzal Ballim and Vincenzo Pallotta},
journal={Proceedings of the 3rd International Workshop on Human-Computer
Conversation, July 3-5, 2000, Bellagio, Italy},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408057},
primaryClass={cs.CL cs.AI cs.HC}
} | ballim2004the |
arxiv-672114 | cs/0408058 | Non-negative matrix factorization with sparseness constraints | <|reference_start|>Non-negative matrix factorization with sparseness constraints: Non-negative matrix factorization (NMF) is a recently developed technique for finding parts-based, linear representations of non-negative data. Although it has successfully been applied in several applications, it does not always result in parts-based representations. In this paper, we show how explicitly incorporating the notion of `sparseness' improves the found decompositions. Additionally, we provide complete MATLAB code both for standard NMF and for our extension. Our hope is that this will further the application of these methods to solving novel data-analysis problems.<|reference_end|> | arxiv | @article{hoyer2004non-negative,
title={Non-negative matrix factorization with sparseness constraints},
author={Patrik O. Hoyer},
journal={arXiv preprint arXiv:cs/0408058},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408058},
primaryClass={cs.LG cs.NE}
} | hoyer2004non-negative |
arxiv-672115 | cs/0408059 | Proofing Tools Technology at Neurosoft SA | <|reference_start|>Proofing Tools Technology at Neurosoft SA: The aim of this paper is to present the R&D activities carried out at Neurosoft S.A. regarding the development of proofing tools for Modern Greek. Firstly, we focus on infrastructure issues that we faced during our initial steps. Subsequently, we describe the most important insights of three proofing tools developed by Neurosoft, i.e. the spelling checker, the hyphenator and the thesaurus, outlining their efficiencies and inefficiencies. Finally, we discuss some improvement ideas and give our future directions.<|reference_end|> | arxiv | @article{tsalidis2004proofing,
title={Proofing Tools Technology at Neurosoft S.A.},
author={Ch. Tsalidis (1), G. Orphanos (1), A. Iordanidou (2), A. Vagelatos (3)
((1) Neurosoft S.A. (2) Patra's University, (3) RACTI)},
journal={arXiv preprint arXiv:cs/0408059},
year={2004},
number={CTI T.R.: 2004.06.01},
archivePrefix={arXiv},
eprint={cs/0408059},
primaryClass={cs.CL}
} | tsalidis2004proofing |
arxiv-672116 | cs/0408060 | Verbal chunk extraction in French using limited resources | <|reference_start|>Verbal chunk extraction in French using limited resources: A way of extracting French verbal chunks, inflected and infinitive, is explored and tested on effective corpus. Declarative morphological and local grammar rules specifying chunks and some simple contextual structures are used, relying on limited lexical information and some simple heuristic/statistic properties obtained from restricted corpora. The specific goals, the architecture and the formalism of the system, the linguistic information on which it relies and the obtained results on effective corpus are presented.<|reference_end|> | arxiv | @article{bes2004verbal,
title={Verbal chunk extraction in French using limited resources},
author={Gabriel G. Bes (GRIL), Lionel Lamadon (GRIL), Francois Trouilleux
(GRIL)},
journal={arXiv preprint arXiv:cs/0408060},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408060},
primaryClass={cs.CL}
} | bes2004verbal |
arxiv-672117 | cs/0408061 | An electronic dictionary as a basis for NLP tools: The Greek case | <|reference_start|>An electronic dictionary as a basis for NLP tools: The Greek case: The existence of a Dictionary in electronic form for Modern Greek (MG) is mandatory if one is to process MG at the morphological and syntactic levels since MG is a highly inflectional language with marked stress and a spelling system with many characteristics carried over from Ancient Greek. Moreover, such a tool becomes necessary if one is to create efficient and sophisticated NLP applications with substantial linguistic backing and coverage. The present paper will focus on the deployment of such an electronic dictionary for Modern Greek, which was built in two phases: first it was constructed to be the basis for a spelling correction schema and then it was reconstructed in order to become the platform for the deployment of a wider spectrum of NLP tools.<|reference_end|> | arxiv | @article{tsalidis2004an,
title={An electronic dictionary as a basis for NLP tools: The Greek case},
author={Ch. Tsalidis (1), A. Vagelatos (2) and G. Orphanos (1) ((1) Neurosoft
S.A. (2) RACTI)},
journal={arXiv preprint arXiv:cs/0408061},
year={2004},
number={CTI T.R.: 2004.04.03},
archivePrefix={arXiv},
eprint={cs/0408061},
primaryClass={cs.CL}
} | tsalidis2004an |
arxiv-672118 | cs/0408062 | Source Coding With Distortion Side Information At The Encoder | <|reference_start|>Source Coding With Distortion Side Information At The Encoder: We consider lossy source coding when side information affecting the distortion measure may be available at the encoder, decoder, both, or neither. For example, such distortion side information can model reliabilities for noisy measurements, sensor calibration information, or perceptual effects like masking and sensitivity to context. When the distortion side information is statistically independent of the source, we show that in many cases (e.g, for additive or multiplicative distortion side information) there is no penalty for knowing the side information only at the encoder, and there is no advantage to knowing it at the decoder. Furthermore, for quadratic distortion measures scaled by the distortion side information, we evaluate the penalty for lack of encoder knowledge and show that it can be arbitrarily large. In this scenario, we also sketch transform based quantizers constructions which efficiently exploit encoder side information in the high-resolution limit.<|reference_end|> | arxiv | @article{martinian2004source,
title={Source Coding With Distortion Side Information At The Encoder},
author={Emin Martinian, Gregory W. Wornell, and Ram Zamir},
journal={Proceedings of the 2004 Data Compression Conference in Snowbird,
UT},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408062},
primaryClass={cs.IT math.IT}
} | martinian2004source |
arxiv-672119 | cs/0408063 | Analysis and Visualization of Index Words from Audio Transcripts of Instructional Videos | <|reference_start|>Analysis and Visualization of Index Words from Audio Transcripts of Instructional Videos: We introduce new techniques for extracting, analyzing, and visualizing textual contents from instructional videos of low production quality. Using Automatic Speech Recognition, approximate transcripts (H75% Word Error Rate) are obtained from the originally highly compressed videos of university courses, each comprising between 10 to 30 lectures. Text material in the form of books or papers that accompany the course are then used to filter meaningful phrases from the seemingly incoherent transcripts. The resulting index into the transcripts is tied together and visualized in 3 experimental graphs that help in understanding the overall course structure and provide a tool for localizing certain topics for indexing. We specifically discuss a Transcript Index Map, which graphically lays out key phrases for a course, a Textbook Chapter to Transcript Match, and finally a Lecture Transcript Similarity graph, which clusters semantically similar lectures. We test our methods and tools on 7 full courses with 230 hours of video and 273 transcripts. We are able to extract up to 98 unique key terms for a given transcript and up to 347 unique key terms for an entire course. The accuracy of the Textbook Chapter to Transcript Match exceeds 70% on average. The methods used can be applied to genres of video in which there are recurrent thematic words (news, sports, meetings,...)<|reference_end|> | arxiv | @article{haubold2004analysis,
title={Analysis and Visualization of Index Words from Audio Transcripts of
Instructional Videos},
author={Alexander Haubold, John R. Kender},
journal={arXiv preprint arXiv:cs/0408063},
year={2004},
doi={10.1109/MMSE.2004.27},
archivePrefix={arXiv},
eprint={cs/0408063},
primaryClass={cs.IR cs.MM}
} | haubold2004analysis |
arxiv-672120 | cs/0408064 | Proportional Conflict Redistribution Rules for Information Fusion | <|reference_start|>Proportional Conflict Redistribution Rules for Information Fusion: In this paper we propose five versions of a Proportional Conflict Redistribution rule (PCR) for information fusion together with several examples. From PCR1 to PCR2, PCR3, PCR4, PCR5 one increases the complexity of the rules and also the exactitude of the redistribution of conflicting masses. PCR1 restricted from the hyper-power set to the power set and without degenerate cases gives the same result as the Weighted Average Operator (WAO) proposed recently by J{\o}sang, Daniel and Vannoorenberghe but does not satisfy the neutrality property of vacuous belief assignment. That's why improved PCR rules are proposed in this paper. PCR4 is an improvement of minC and Dempster's rules. The PCR rules redistribute the conflicting mass, after the conjunctive rule has been applied, proportionally with some functions depending on the masses assigned to their corresponding columns in the mass matrix. There are infinitely many ways these functions (weighting factors) can be chosen depending on the complexity one wants to deal with in specific applications and fusion systems. Any fusion combination rule is at some degree ad-hoc.<|reference_end|> | arxiv | @article{smarandache2004proportional,
title={Proportional Conflict Redistribution Rules for Information Fusion},
author={Florentin Smarandache, Jean Dezert},
journal={Proceedings of the 8th International Conference on Information
Fusion, Philadelphia, 25-29 July, 2005; IEEE Catalog Number: 05EX1120C, ISBN:
0-7803-9287-6.},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408064},
primaryClass={cs.AI}
} | smarandache2004proportional |
arxiv-672121 | cs/0408065 | The Core of Directed Network Problems with Quotas | <|reference_start|>The Core of Directed Network Problems with Quotas: This paper proves the existence of non-empty cores for directed network problems with quotas and for those combinatorial allocation problems which permit only exclusive allocations.<|reference_end|> | arxiv | @article{lahiri2004the,
title={The Core of Directed Network Problems with Quotas},
author={Somdeb Lahiri},
journal={arXiv preprint arXiv:cs/0408065},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408065},
primaryClass={cs.GT}
} | lahiri2004the |
arxiv-672122 | cs/0408066 | Robust Locally Testable Codes and Products of Codes | <|reference_start|>Robust Locally Testable Codes and Products of Codes: We continue the investigation of locally testable codes, i.e., error-correcting codes for whom membership of a given word in the code can be tested probabilistically by examining it in very few locations. We give two general results on local testability: First, motivated by the recently proposed notion of {\em robust} probabilistically checkable proofs, we introduce the notion of {\em robust} local testability of codes. We relate this notion to a product of codes introduced by Tanner, and show a very simple composition lemma for this notion. Next, we show that codes built by tensor products can be tested robustly and somewhat locally, by applying a variant of a test and proof technique introduced by Raz and Safra in the context of testing low-degree multivariate polynomials (which are a special case of tensor codes). Combining these two results gives us a generic construction of codes of inverse polynomial rate, that are testable with poly-logarithmically many queries. We note these locally testable tensor codes can be obtained from {\em any} linear error correcting code with good distance. Previous results on local testability, albeit much stronger quantitatively, rely heavily on algebraic properties of the underlying codes.<|reference_end|> | arxiv | @article{ben-sasson2004robust,
title={Robust Locally Testable Codes and Products of Codes},
author={Eli Ben-Sasson and Madhu Sudan},
journal={arXiv preprint arXiv:cs/0408066},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408066},
primaryClass={cs.IT cs.CC math.IT}
} | ben-sasson2004robust |
arxiv-672123 | cs/0408067 | The Expected Size of the Rule k Dominating Set | <|reference_start|>The Expected Size of the Rule k Dominating Set: Rule k is a localized approximation algorithm that finds a small connected dominating set in a graph. We estimate the expected size of the Rule k dominating set for the model of random unit disk graphs constructed from n random points in an s_n by s_n square region of the plane.<|reference_end|> | arxiv | @article{hansen2004the,
title={The Expected Size of the Rule k Dominating Set},
author={Jennie C. Hansen, Eric Schmutz},
journal={arXiv preprint arXiv:cs/0408067},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408067},
primaryClass={cs.DM}
} | hansen2004the |
arxiv-672124 | cs/0408068 | Probabilistic Analysis of Rule 2 | <|reference_start|>Probabilistic Analysis of Rule 2: Li and Wu proposed Rule 2, a localized approximation algorithm that attempts to find a small connected dominating set in a graph. Here we study the asymptotic performance of Rule 2 on random unit disk graphs formed from n random points in an s_n by s_n square region of the plane. If s_n is below the threshold for connectivity, then Rule 2 produces a dominating set whose expected size is O(n/(loglog n)^{3/2}). We conjecture that this bound is not optimal.<|reference_end|> | arxiv | @article{hansen2004probabilistic,
title={Probabilistic Analysis of Rule 2},
author={Jennie C. Hansen, Eric Schmutz, Li Sheng},
journal={arXiv preprint arXiv:cs/0408068},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408068},
primaryClass={cs.DM}
} | hansen2004probabilistic |
arxiv-672125 | cs/0408069 | The Integration of Connectionism and First-Order Knowledge Representation and Reasoning as a Challenge for Artificial Intelligence | <|reference_start|>The Integration of Connectionism and First-Order Knowledge Representation and Reasoning as a Challenge for Artificial Intelligence: Intelligent systems based on first-order logic on the one hand, and on artificial neural networks (also called connectionist systems) on the other, differ substantially. It would be very desirable to combine the robust neural networking machinery with symbolic knowledge representation and reasoning paradigms like logic programming in such a way that the strengths of either paradigm will be retained. Current state-of-the-art research, however, fails by far to achieve this ultimate goal. As one of the main obstacles to be overcome we perceive the question how symbolic knowledge can be encoded by means of connectionist systems: Satisfactory answers to this will naturally lead the way to knowledge extraction algorithms and to integrated neural-symbolic systems.<|reference_end|> | arxiv | @article{bader2004the,
title={The Integration of Connectionism and First-Order Knowledge
Representation and Reasoning as a Challenge for Artificial Intelligence},
author={Sebastian Bader, Pascal Hitzler, Steffen Hoelldobler},
journal={arXiv preprint arXiv:cs/0408069},
year={2004},
archivePrefix={arXiv},
eprint={cs/0408069},
primaryClass={cs.AI cs.LO cs.NE}
} | bader2004the |
arxiv-672126 | cs/0409001 | Probabilistic heuristics for disseminating information in networks | <|reference_start|>Probabilistic heuristics for disseminating information in networks: We study the problem of disseminating a piece of information through all the nodes of a network, given that it is known originally only to a single node. In the absence of any structural knowledge on the network other than the nodes' neighborhoods, this problem is traditionally solved by flooding all the network's edges. We analyze a recently introduced probabilistic algorithm for flooding and give an alternative probabilistic heuristic that can lead to some cost-effective improvements, like better trade-offs between the message and time complexities involved. We analyze the two algorithms both mathematically and by means of simulations, always within a random-graph framework and considering relevant node-degree distributions.<|reference_end|> | arxiv | @article{stauffer2004probabilistic,
title={Probabilistic heuristics for disseminating information in networks},
author={A. O. Stauffer, V. C. Barbosa},
journal={IEEE/ACM Transactions on Networking 15 (2007), 425-435},
year={2004},
doi={10.1109/TNET.2007.892877},
number={ES-660/04},
archivePrefix={arXiv},
eprint={cs/0409001},
primaryClass={cs.NI cs.DC}
} | stauffer2004probabilistic |
arxiv-672127 | cs/0409002 | Default reasoning over domains and concept hierarchies | <|reference_start|>Default reasoning over domains and concept hierarchies: W.C. Rounds and G.-Q. Zhang (2001) have proposed to study a form of disjunctive logic programming generalized to algebraic domains. This system allows reasoning with information which is hierarchically structured and forms a (suitable) domain. We extend this framework to include reasoning with default negation, giving rise to a new nonmonotonic reasoning framework on hierarchical knowledge which encompasses answer set programming with extended disjunctive logic programs. We also show that the hierarchically structured knowledge on which programming in this paradigm can be done, arises very naturally from formal concept analysis. Together, we obtain a default reasoning paradigm for conceptual knowledge which is in accordance with mainstream developments in nonmonotonic reasoning.<|reference_end|> | arxiv | @article{hitzler2004default,
title={Default reasoning over domains and concept hierarchies},
author={Pascal Hitzler},
journal={arXiv preprint arXiv:cs/0409002},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409002},
primaryClass={cs.AI cs.LO}
} | hitzler2004default |
arxiv-672128 | cs/0409003 | ScheduleNanny: Using GPS to Learn the User's Significant Locations, Travel Times and Schedule | <|reference_start|>ScheduleNanny: Using GPS to Learn the User's Significant Locations, Travel Times and Schedule: As computing technology becomes more pervasive, personal devices such as the PDA, cell-phone, and notebook should use context to determine how to act. Location is one form of context that can be used in many ways. We present a multiple-device system that collects and clusters GPS data into significant locations. These locations are then used to determine travel times and a probabilistic model of the user's schedule, which is used to intelligently alert the user. We evaluate our system and suggest how it should be integrated with a variety of applications.<|reference_end|> | arxiv | @article{bhawalkar2004schedulenanny:,
title={ScheduleNanny: Using GPS to Learn the User's Significant Locations,
Travel Times and Schedule},
author={Parth Bhawalkar, Victor Bigio, Adam Davis, Karthik Narayanaswami, Femi
Olumoko},
journal={arXiv preprint arXiv:cs/0409003},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409003},
primaryClass={cs.AI cs.CV cs.HC}
} | bhawalkar2004schedulenanny: |
arxiv-672129 | cs/0409004 | The Password Change Phase is Still Insecure | <|reference_start|>The Password Change Phase is Still Insecure: In 2004, W. C. Ku and S. M. Chen proposed an efficient remote user authentication scheme using smart cards to solve the security problems of Chien et al.'s scheme. Recently, Hsu and Yoon et al. pointed out the security weaknesses of the Ku and Chen's scheme Furthermore, Yoon et al. also proposed a new efficient remote user authentication scheme using smart cards. Yoon et al. also modified the password change phase of Ku and Chen's scheme. This paper analyzes that password change phase of Yoon et al's modified scheme is still insecure.<|reference_end|> | arxiv | @article{kumar2004the,
title={The Password Change Phase is Still Insecure},
author={Manoj Kumar},
journal={arXiv preprint arXiv:cs/0409004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409004},
primaryClass={cs.CR}
} | kumar2004the |
arxiv-672130 | cs/0409005 | Sharing Computer Network Logs for Security and Privacy: A Motivation for New Methodologies of Anonymization | <|reference_start|>Sharing Computer Network Logs for Security and Privacy: A Motivation for New Methodologies of Anonymization: Logs are one of the most fundamental resources to any security professional. It is widely recognized by the government and industry that it is both beneficial and desirable to share logs for the purpose of security research. However, the sharing is not happening or not to the degree or magnitude that is desired. Organizations are reluctant to share logs because of the risk of exposing sensitive information to potential attackers. We believe this reluctance remains high because current anonymization techniques are weak and one-size-fits-all--or better put, one size tries to fit all. We must develop standards and make anonymization available at varying levels, striking a balance between privacy and utility. Organizations have different needs and trust other organizations to different degrees. They must be able to map multiple anonymization levels with defined risks to the trust levels they share with (would-be) receivers. It is not until there are industry standards for multiple levels of anonymization that we will be able to move forward and achieve the goal of widespread sharing of logs for security researchers.<|reference_end|> | arxiv | @article{slagell2004sharing,
title={Sharing Computer Network Logs for Security and Privacy: A Motivation for
New Methodologies of Anonymization},
author={Adam J. Slagell and William Yurcik},
journal={arXiv preprint arXiv:cs/0409005},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409005},
primaryClass={cs.CR}
} | slagell2004sharing |
arxiv-672131 | cs/0409006 | Maple+GrTensorII libraries for cosmology | <|reference_start|>Maple+GrTensorII libraries for cosmology: The article mainly presents some results in using MAPLE platform for computer algebra and GrTensorII package in doing calculations for theoretical and numerical cosmology<|reference_end|> | arxiv | @article{vulcanov2004maple+grtensorii,
title={Maple+GrTensorII libraries for cosmology},
author={Dumitru N. Vulcanov, Valentina D. Vulcanov (The West University of
Timisoara, Romania)},
journal={arXiv preprint arXiv:cs/0409006},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409006},
primaryClass={cs.SC gr-qc}
} | vulcanov2004maple+grtensorii |
arxiv-672132 | cs/0409007 | The Generalized Pignistic Transformation | <|reference_start|>The Generalized Pignistic Transformation: This paper presents in detail the generalized pignistic transformation (GPT) succinctly developed in the Dezert-Smarandache Theory (DSmT) framework as a tool for decision process. The GPT allows to provide a subjective probability measure from any generalized basic belief assignment given by any corpus of evidence. We mainly focus our presentation on the 3D case and provide the complete result obtained by the GPT and its validation drawn from the probability theory.<|reference_end|> | arxiv | @article{dezert2004the,
title={The Generalized Pignistic Transformation},
author={Jean Dezert, Florentin Smarandache, Milan Daniel},
journal={Proceedings of the Seventh International Conference on Information
Fusion, International Society for Information Fusion, Stockholm, Sweden,
384-391, 2004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409007},
primaryClass={cs.AI}
} | dezert2004the |
arxiv-672133 | cs/0409008 | A Model for Fine-Grained Alignment of Multilingual Texts | <|reference_start|>A Model for Fine-Grained Alignment of Multilingual Texts: While alignment of texts on the sentential level is often seen as being too coarse, and word alignment as being too fine-grained, bi- or multilingual texts which are aligned on a level in-between are a useful resource for many purposes. Starting from a number of examples of non-literal translations, which tend to make alignment difficult, we describe an alignment model which copes with these cases by explicitly coding them. The model is based on predicate-argument structures and thus covers the middle ground between sentence and word alignment. The model is currently used in a recently initiated project of a parallel English-German treebank (FuSe), which can in principle be extended with additional languages.<|reference_end|> | arxiv | @article{cyrus2004a,
title={A Model for Fine-Grained Alignment of Multilingual Texts},
author={Lea Cyrus and Hendrik Feddes},
journal={Proc. COLING 2004 Workshop on Multilingual Linguistic Resources
(MLR2004), Geneva, August 28, 2004, pp. 15-22},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409008},
primaryClass={cs.CL}
} | cyrus2004a |
arxiv-672134 | cs/0409009 | CrocoPat 21 Introduction and Reference Manual | <|reference_start|>CrocoPat 21 Introduction and Reference Manual: CrocoPat is an efficient, powerful and easy-to-use tool for manipulating relations of arbitrary arity, including directed graphs. This manual provides an introduction to and a reference for CrocoPat and its programming language RML. It includes several application examples, in particular from the analysis of structural models of software systems.<|reference_end|> | arxiv | @article{beyer2004crocopat,
title={CrocoPat 2.1 Introduction and Reference Manual},
author={Dirk Beyer (University of California, Berkeley), Andreas Noack
(Brandenburg University of Technology)},
journal={arXiv preprint arXiv:cs/0409009},
year={2004},
number={UCB//CSD-04-1338},
archivePrefix={arXiv},
eprint={cs/0409009},
primaryClass={cs.PL cs.DM cs.DS cs.SE}
} | beyer2004crocopat |
arxiv-672135 | cs/0409010 | Distance properties of expander codes | <|reference_start|>Distance properties of expander codes: We study the minimum distance of codes defined on bipartite graphs. Weight spectrum and the minimum distance of a random ensemble of such codes are computed. It is shown that if the vertex codes have minimum distance $\ge 3$, the overall code is asymptotically good, and sometimes meets the Gilbert-Varshamov bound. Constructive families of expander codes are presented whose minimum distance asymptotically exceeds the product bound for all code rates between 0 and 1.<|reference_end|> | arxiv | @article{barg2004distance,
title={Distance properties of expander codes},
author={Alexander Barg and Gilles Zemor},
journal={arXiv preprint arXiv:cs/0409010},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409010},
primaryClass={cs.IT cs.DM math.IT}
} | barg2004distance |
arxiv-672136 | cs/0409011 | Shannon meets Wiener II: On MMSE estimation in successive decoding schemes | <|reference_start|>Shannon meets Wiener II: On MMSE estimation in successive decoding schemes: We continue to discuss why MMSE estimation arises in coding schemes that approach the capacity of linear Gaussian channels. Here we consider schemes that involve successive decoding, such as decision-feedback equalization or successive cancellation.<|reference_end|> | arxiv | @article{forney2004shannon,
title={Shannon meets Wiener II: On MMSE estimation in successive decoding
schemes},
author={G. David Forney Jr},
journal={arXiv preprint arXiv:cs/0409011},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409011},
primaryClass={cs.IT math.IT}
} | forney2004shannon |
arxiv-672137 | cs/0409012 | A New Look at Survey Propagation and its Generalizations | <|reference_start|>A New Look at Survey Propagation and its Generalizations: This paper provides a new conceptual perspective on survey propagation, which is an iterative algorithm recently introduced by the statistical physics community that is very effective in solving random k-SAT problems even with densities close to the satisfiability threshold. We first describe how any SAT formula can be associated with a novel family of Markov random fields (MRFs), parameterized by a real number \rho \in [0,1]. We then show that applying belief propagation--a well-known ``message-passing'' technique for estimating marginal probabilities--to this family of MRFs recovers a known family of algorithms, ranging from pure survey propagation at one extreme (\rho = 1) to standard belief propagation on the uniform distribution over SAT assignments at the other extreme (\rho = 0). Configurations in these MRFs have a natural interpretation as partial satisfiability assignments, on which a partial order can be defined. We isolate cores as minimal elements in this partial ordering, which are also fixed points of survey propagation and the only assignments with positive probability in the MRF for \rho=1. Our experimental results for k=3 suggest that solutions of random formulas typically do not possess non-trivial cores. This makes it necessary to study the structure of the space of partial assignments for \rho<1 and investigate the role of assignments that are very close to being cores. To that end, we investigate the associated lattice structure, and prove a weight-preserving identity that shows how any MRF with \rho>0 can be viewed as a ``smoothed'' version of the uniform distribution over satisfying assignments (\rho=0). Finally, we isolate properties of Gibbs sampling and message-passing algorithms that are typical for an ensemble of k-SAT problems.<|reference_end|> | arxiv | @article{maneva2004a,
title={A New Look at Survey Propagation and its Generalizations},
author={Eliza N. Maneva and Elchanan Mossel and Martin J. Wainwright},
journal={arXiv preprint arXiv:cs/0409012},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409012},
primaryClass={cs.CC}
} | maneva2004a |
arxiv-672138 | cs/0409013 | Locally connected spanning trees on graphs | <|reference_start|>Locally connected spanning trees on graphs: A locally connected spanning tree of a graph $G$ is a spanning tree $T$ of $G$ such that the set of all neighbors of $v$ in $T$ induces a connected subgraph of $G$ for every $v\in V(G)$. The purpose of this paper is to give linear-time algorithms for finding locally connected spanning trees on strongly chordal graphs and proper circular-arc graphs, respectively.<|reference_end|> | arxiv | @article{lin2004locally,
title={Locally connected spanning trees on graphs},
author={Ching-Chi Lin, Gerard J. Chang, Gen-Huey Chen},
journal={arXiv preprint arXiv:cs/0409013},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409013},
primaryClass={cs.DS cs.DM}
} | lin2004locally |
arxiv-672139 | cs/0409014 | A Digital Signature with Threshold Generation and Verification | <|reference_start|>A Digital Signature with Threshold Generation and Verification: This paper proposes a signature scheme where the signatures are generated by the cooperation of a number of people from a given group of senders and the signatures are verified by a certain number of people from the group of recipients. Shamir's threshold scheme and Schnorr's signature scheme are used to realize the proposed scheme.<|reference_end|> | arxiv | @article{lal2004a,
title={A Digital Signature with Threshold Generation and Verification},
author={Sunder lal and Manoj Kumar},
journal={arXiv preprint arXiv:cs/0409014},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409014},
primaryClass={cs.CR}
} | lal2004a |
arxiv-672140 | cs/0409015 | The strength of replacement in weak arithmetic | <|reference_start|>The strength of replacement in weak arithmetic: The replacement (or collection or choice) axiom scheme asserts bounded quantifier exchange. We prove the independence of this scheme from various weak theories of arithmetic, sometimes under a complexity assumption.<|reference_end|> | arxiv | @article{cook2004the,
title={The strength of replacement in weak arithmetic},
author={Stephen Cook and Neil Thapen},
journal={arXiv preprint arXiv:cs/0409015},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409015},
primaryClass={cs.LO cs.CC}
} | cook2004the |
arxiv-672141 | cs/0409016 | Using a hierarchy of Domain Specific Languages in complex software systems design | <|reference_start|>Using a hierarchy of Domain Specific Languages in complex software systems design: A new design methodology is introduced, with some examples on building Domain Specific Languages hierarchy on top of Scheme.<|reference_end|> | arxiv | @article{lugovsky2004using,
title={Using a hierarchy of Domain Specific Languages in complex software
systems design},
author={V. S. Lugovsky},
journal={arXiv preprint arXiv:cs/0409016},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409016},
primaryClass={cs.PL cs.DS cs.SE}
} | lugovsky2004using |
arxiv-672142 | cs/0409017 | Near Optimal Routing for Small-World Networks with Augmented Local Awareness | <|reference_start|>Near Optimal Routing for Small-World Networks with Augmented Local Awareness: In order to investigate the routing aspects of small-world networks, Kleinberg proposes a network model based on a $d$-dimensional lattice with long-range links chosen at random according to the $d$-harmonic distribution. Kleinberg shows that the greedy routing algorithm by using only local information performs in $O(\log^2 n)$ expected number of hops, where $n$ denotes the number of nodes in the network. Martel and Nguyen have found that the expected diameter of Kleinberg's small-world networks is $\Theta(\log n)$. Thus a question arises naturally: Can we improve the routing algorithms to match the diameter of the networks while keeping the amount of information stored on each node as small as possible? We extend Kleinberg's model and add three augmented local links for each node: two of which are connected to nodes chosen randomly and uniformly within $\log^2 n$ Mahattan distance, and the third one is connected to a node chosen randomly and uniformly within $\log n$ Mahattan distance. We show that if each node is aware of $O(\log n)$ number of neighbors via the augmented local links, there exist both non-oblivious and oblivious algorithms that can route messages between any pair of nodes in $O(\log n \log \log n)$ expected number of hops, which is a near optimal routing complexity and outperforms the other related results for routing in Kleinberg's small-world networks. Our schemes keep only $O(\log^2 n)$ bits of routing information on each node, thus they are scalable with the network size. Besides adding new light to the studies of social networks, our results may also find applications in the design of large-scale distributed networks, such as peer-to-peer systems, in the same spirit of Symphony.<|reference_end|> | arxiv | @article{zeng2004near,
title={Near Optimal Routing for Small-World Networks with Augmented Local
Awareness},
author={Jianyang Zeng, Wen-Jing Hsu and Jiangdian Wang},
journal={arXiv preprint arXiv:cs/0409017},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409017},
primaryClass={cs.DM cs.DC cs.DS}
} | zeng2004near |
arxiv-672143 | cs/0409018 | PKI Scalability Issues | <|reference_start|>PKI Scalability Issues: This report surveys different PKI technologies such as PKIX and SPKI and the issues of PKI that affect scalability. Much focus is spent on certificate revocation methodologies and status verification systems such as CRLs, Delta-CRLs, CRS, Certificate Revocation Trees, Windowed Certificate Revocation, OCSP, SCVP and DVCS.<|reference_end|> | arxiv | @article{slagell2004pki,
title={PKI Scalability Issues},
author={Adam J Slagell and Rafael Bonilla},
journal={arXiv preprint arXiv:cs/0409018},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409018},
primaryClass={cs.CR}
} | slagell2004pki |
arxiv-672144 | cs/0409019 | Outlier Detection by Logic Programming | <|reference_start|>Outlier Detection by Logic Programming: The development of effective knowledge discovery techniques has become in the recent few years a very active research area due to the important impact it has in several relevant application areas. One interesting task thereof is that of singling out anomalous individuals from a given population, e.g., to detect rare events in time-series analysis settings, or to identify objects whose behavior is deviant w.r.t. a codified standard set of "social" rules. Such exceptional individuals are usually referred to as outliers in the literature. Recently, outlier detection has also emerged as a relevant KR&R problem. In this paper, we formally state the concept of outliers by generalizing in several respects an approach recently proposed in the context of default logic, for instance, by having outliers not being restricted to single individuals but, rather, in the more general case, to correspond to entire (sub)theories. We do that within the context of logic programming and, mainly through examples, we discuss its potential practical impact in applications. The formalization we propose is a novel one and helps in shedding some light on the real nature of outliers. Moreover, as a major contribution of this work, we illustrate the exploitation of minimality criteria in outlier detection. The computational complexity of outlier detection problems arising in this novel setting is thoroughly investigated and accounted for in the paper as well. Finally, we also propose a rewriting algorithm that transforms any outlier detection problem into an equivalent inference problem under the stable model semantics, thereby making outlier computation effective and realizable on top of any stable model solver.<|reference_end|> | arxiv | @article{angiulli2004outlier,
title={Outlier Detection by Logic Programming},
author={Fabrizio Angiulli, Gianluigi Greco and Luigi Palopoli},
journal={arXiv preprint arXiv:cs/0409019},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409019},
primaryClass={cs.AI cs.LO}
} | angiulli2004outlier |
arxiv-672145 | cs/0409020 | A Generalized Disjunctive Paraconsistent Data Model for Negative and Disjunctive Information | <|reference_start|>A Generalized Disjunctive Paraconsistent Data Model for Negative and Disjunctive Information: This paper presents a generalization of the disjunctive paraconsistent relational data model in which disjunctive positive and negative information can be represented explicitly and manipulated. There are situations where the closed world assumption to infer negative facts is not valid or undesirable and there is a need to represent and reason with negation explicitly. We consider explicit disjunctive negation in the context of disjunctive databases as there is an interesting interplay between these two types of information. Generalized disjunctive paraconsistent relation is introduced as the main structure in this model. The relational algebra is appropriately generalized to work on generalized disjunctive paraconsistent relations and their correctness is established.<|reference_end|> | arxiv | @article{wang2004a,
title={A Generalized Disjunctive Paraconsistent Data Model for Negative and
Disjunctive Information},
author={Haibin Wang, Yuanchun He, Rajshekhar Sunderraman},
journal={arXiv preprint arXiv:cs/0409020},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409020},
primaryClass={cs.DB}
} | wang2004a |
arxiv-672146 | cs/0409021 | Should Cyberspace Chat Rooms be closed to protect Children? | <|reference_start|>Should Cyberspace Chat Rooms be closed to protect Children?: The explosion of people networking in cyberspace, disseminating terabytes of information, is being promoted through the use of broadband, bluetooth technology, and wireless mobile computing facilities. New communities within such venues as virtual chat rooms discussion groups, newsgroups etc are being created daily and even hourly. This is raising issues of cyberethics concerning privacy,security, crime, human needs, e-business, e-healthcare, e-government and intellectual property among others that need to be evaluated and reflected upon. With this new freedom come new moral and ethical responsibilities, which raise questions as to whether anything can be published or whether there should be restrictions. This paper addresses one specific area, that has come into the public eye, the closure by Microsoft of all of its free chat rooms.<|reference_end|> | arxiv | @article{hinze-hoare2004should,
title={Should Cyberspace Chat Rooms be closed to protect Children?},
author={Vita Hinze-Hoare},
journal={arXiv preprint arXiv:cs/0409021},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409021},
primaryClass={cs.CY}
} | hinze-hoare2004should |
arxiv-672147 | cs/0409022 | Two Models for the Study of Congested Internet Connections | <|reference_start|>Two Models for the Study of Congested Internet Connections: In this paper, we introduce two deterministic models aimed at capturing the dynamics of congested Internet connections. The first model is a continuous-time model that combines a system of differential equations with a sudden change in one of the state variables. The second model is a discrete-time model with a time step that arises naturally from the system. Results from these models show good agreement with the well-known ns network simulator, better than the results of a previous, similar model. This is due in large part to the use of the sudden change to reflect the impact of lost data packets. We also discuss the potential use of this model in network traffic state estimation.<|reference_end|> | arxiv | @article{frommer2004two,
title={Two Models for the Study of Congested Internet Connections},
author={Ian Frommer, Eric Harder, Brian Hunt, Ryan Lance, Edward Ott and James
Yorke},
journal={arXiv preprint arXiv:cs/0409022},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409022},
primaryClass={cs.NI}
} | frommer2004two |
arxiv-672148 | cs/0409023 | Proximity Inversion Functions on the Non-Negative Integers | <|reference_start|>Proximity Inversion Functions on the Non-Negative Integers: We consider functions mapping non-negative integers to non-negative real numbers such that a and a+n are mapped to values at least 1/n apart. In this paper we use a novel method to construct such a function. We conjecture that the supremum of the generated function is optimal and pose some unsolved problems.<|reference_end|> | arxiv | @article{lucier2004proximity,
title={Proximity Inversion Functions on the Non-Negative Integers},
author={Brendan Lucier},
journal={arXiv preprint arXiv:cs/0409023},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409023},
primaryClass={cs.DM}
} | lucier2004proximity |
arxiv-672149 | cs/0409024 | Aperiodic Tilings: Breaking Translational Symmetry | <|reference_start|>Aperiodic Tilings: Breaking Translational Symmetry: Classical results on aperiodic tilings are rather complicated and not widely understood. Below, an alternative approach is discussed in hope to provide additional intuition not apparent in classical works.<|reference_end|> | arxiv | @article{levin2004aperiodic,
title={Aperiodic Tilings: Breaking Translational Symmetry},
author={Leonid A. Levin},
journal={The Computer Journal, 48(6):642-645, 2005},
year={2004},
doi={10.1093/comjnl/bxh124},
archivePrefix={arXiv},
eprint={cs/0409024},
primaryClass={cs.DM cs.DC}
} | levin2004aperiodic |
arxiv-672150 | cs/0409025 | Topics in asynchronous systems | <|reference_start|>Topics in asynchronous systems: In the paper we define and characterize the asynchronous systems from the point of view of their autonomy, determinism, order, non-anticipation, time invariance, symmetry, stability and other important properties. The study is inspired by the models of the asynchronous circuits.<|reference_end|> | arxiv | @article{vlad2004topics,
title={Topics in asynchronous systems},
author={Serban E. Vlad},
journal={Analele Universitatii din Oradea, Fascicola Matematica, TOM X,
2003},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409025},
primaryClass={cs.AR}
} | vlad2004topics |
arxiv-672151 | cs/0409026 | Capacity-achieving ensembles for the binary erasure channel with bounded complexity | <|reference_start|>Capacity-achieving ensembles for the binary erasure channel with bounded complexity: We present two sequences of ensembles of non-systematic irregular repeat-accumulate codes which asymptotically (as their block length tends to infinity) achieve capacity on the binary erasure channel (BEC) with bounded complexity per information bit. This is in contrast to all previous constructions of capacity-achieving sequences of ensembles whose complexity grows at least like the log of the inverse of the gap (in rate) to capacity. The new bounded complexity result is achieved by puncturing bits, and allowing in this way a sufficient number of state nodes in the Tanner graph representing the codes. We also derive an information-theoretic lower bound on the decoding complexity of randomly punctured codes on graphs. The bound holds for every memoryless binary-input output-symmetric channel and is refined for the BEC.<|reference_end|> | arxiv | @article{pfister2004capacity-achieving,
title={Capacity-achieving ensembles for the binary erasure channel with bounded
complexity},
author={H. Pfister, I. Sason and R. Urbanke},
journal={IEEE Transactions on Information Theory, Vol. 51 (7), pp.
2352-2379, July 2005},
year={2004},
doi={10.1109/TIT.2005.850079},
archivePrefix={arXiv},
eprint={cs/0409026},
primaryClass={cs.IT math.IT}
} | pfister2004capacity-achieving |
arxiv-672152 | cs/0409027 | Bounds on the decoding complexity of punctured codes on graphs | <|reference_start|>Bounds on the decoding complexity of punctured codes on graphs: We present two sequences of ensembles of non-systematic irregular repeat-accumulate codes which asymptotically (as their block length tends to infinity) achieve capacity on the binary erasure channel (BEC) with bounded complexity per information bit. This is in contrast to all previous constructions of capacity-achieving sequences of ensembles whose complexity grows at least like the log of the inverse of the gap (in rate) to capacity. The new bounded complexity result is achieved by puncturing bits, and allowing in this way a sufficient number of state nodes in the Tanner graph representing the codes. We also derive an information-theoretic lower bound on the decoding complexity of randomly punctured codes on graphs. The bound holds for every memoryless binary-input output-symmetric channel, and is refined for the BEC.<|reference_end|> | arxiv | @article{pfister2004bounds,
title={Bounds on the decoding complexity of punctured codes on graphs},
author={H. Pfister, I. Sason and R. Urbanke},
journal={arXiv preprint arXiv:cs/0409027},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409027},
primaryClass={cs.IT math.IT}
} | pfister2004bounds |
arxiv-672153 | cs/0409028 | Incentive Systems in Multi-Level Markets for Virtual Goods | <|reference_start|>Incentive Systems in Multi-Level Markets for Virtual Goods: As an alternative to rigid DRM measures, ways of marketing virtual goods through multi-level or networked marketing have raised some interest. This report is a first approach to multi-level markets for virtual goods from the viewpoint of theoretical economy. A generic, kinematic model for the monetary flow in multi-level markets, which quantitatively describes the incentives that buyers receive through resales revenues, is devised. Building on it, the competition of goods is examined in a dynamical, utility-theoretic model enabling, in particular, a treatment of the free-rider problem. The most important implications for the design of multi-level market mechanisms for virtual goods, or multi-level incentive management systems, are outlined.<|reference_end|> | arxiv | @article{schmidt2004incentive,
title={Incentive Systems in Multi-Level Markets for Virtual Goods},
author={Andreas U. Schmidt},
journal={arXiv preprint arXiv:cs/0409028},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409028},
primaryClass={cs.GT cs.CY}
} | schmidt2004incentive |
arxiv-672154 | cs/0409029 | Efficient polynomial time algorithms computing industrial-strength primitive roots | <|reference_start|>Efficient polynomial time algorithms computing industrial-strength primitive roots: E. Bach, following an idea of T. Itoh, has shown how to build a small set of numbers modulo a prime p such that at least one element of this set is a generator of $\pF{p}$\cite{Bach:1997:sppr,Itoh:2001:PPR}. E. Bach suggests also that at least half of his set should be generators. We show here that a slight variant of this set can indeed be made to contain a ratio of primitive roots as close to 1 as necessary. We thus derive several algorithms computing primitive roots correct with very high probability in polynomial time. In particular we present an asymptotically $O^{\sim}(\sqrt{\frac{1}{\epsilon}}log^1.5(p) + \log^2(p))$ algorithm providing primitive roots of $p$ with probability of correctness greater than $1-\epsilon$ and several $O(log^\alpha(p))$, $\alpha \leq 5.23$ algorithms computing "Industrial-strength" primitive roots with probabilities e.g. greater than the probability of "hardware malfunctions".<|reference_end|> | arxiv | @article{dubrois2004efficient,
title={Efficient polynomial time algorithms computing industrial-strength
primitive roots},
author={Jacques Dubrois (Axalto), Jean-Guillaume Dumas (LJK)},
journal={Information Processing Letters 97, 2 (2006) 41-45},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409029},
primaryClass={cs.SC math.NT}
} | dubrois2004efficient |
arxiv-672155 | cs/0409030 | Automatic Generation of CHR Constraint Solvers | <|reference_start|>Automatic Generation of CHR Constraint Solvers: In this paper, we present a framework for automatic generation of CHR solvers given the logical specification of the constraints. This approach takes advantage of the power of tabled resolution for constraint logic programming, in order to check the validity of the rules. Compared to previous works where different methods for automatic generation of constraint solvers have been proposed, our approach enables the generation of more expressive rules (even recursive and splitting rules) that can be used directly as CHR solvers.<|reference_end|> | arxiv | @article{abdennadher2004automatic,
title={Automatic Generation of CHR Constraint Solvers},
author={Slim Abdennadher and Christophe Rigotti},
journal={arXiv preprint arXiv:cs/0409030},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409030},
primaryClass={cs.LO cs.PL}
} | abdennadher2004automatic |
arxiv-672156 | cs/0409031 | Field Geology with a Wearable Computer: First Results of the Cyborg Astrobiologist System | <|reference_start|>Field Geology with a Wearable Computer: First Results of the Cyborg Astrobiologist System: We present results from the first geological field tests of the `Cyborg Astrobiologist', which is a wearable computer and video camcorder system that we are using to test and train a computer-vision system towards having some of the autonomous decision-making capabilities of a field-geologist. The Cyborg Astrobiologist platform has thus far been used for testing and development of these algorithms and systems: robotic acquisition of quasi-mosaics of images, real-time image segmentation, and real-time determination of interesting points in the image mosaics. The hardware and software systems function reliably, and the computer-vision algorithms are adequate for the first field tests. In addition to the proof-of-concept aspect of these field tests, the main result of these field tests is the enumeration of those issues that we can improve in the future, including: dealing with structural shadow and microtexture, and also, controlling the camera's zoom lens in an intelligent manner. Nonetheless, despite these and other technical inadequacies, this Cyborg Astrobiologist system, consisting of a camera-equipped wearable-computer and its computer-vision algorithms, has demonstrated its ability of finding genuinely interesting points in real-time in the geological scenery, and then gathering more information about these interest points in an automated manner.<|reference_end|> | arxiv | @article{mcguire2004field,
title={Field Geology with a Wearable Computer: First Results of the Cyborg
Astrobiologist System},
author={Patrick C. McGuire, Javier Gomez-Elvira, Jose Antonio
Rodriguez-Manfredi, Eduardo Sebastian-Martinez, Jens Ormo, Enrique
Diaz-Martinez, Helge Ritter, Markus Oesker, Robert Haschke and Joerg Ontrup},
journal={arXiv preprint arXiv:cs/0409031},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409031},
primaryClass={cs.CV astro-ph cs.RO}
} | mcguire2004field |
arxiv-672157 | cs/0409032 | Desynchronization and Speedup in an Asynchronous Conservative Parallel Update Protocol | <|reference_start|>Desynchronization and Speedup in an Asynchronous Conservative Parallel Update Protocol: In a state-update protocol for a system of $L$ asynchronous parallel processes that communicate only with nearest neighbors, global desynchronization in operation times can be deduced from kinetic roughening of the corresponding virtual-time horizon (VTH). The utilization of the parallel processing environment can be deduced by analyzing the microscopic structure of the VTH. We give an overview of how the methods of non-equilibrium surface growth (physics of complex systems) can be applied to uncover some properties of state update algorithms used in distributed parallel discrete-event simulations (PDES). In particular, we focus on the asynchronous conservative PDES algorithm in a ring communication topology. The time evolution of its VTH is simulated numerically as asynchronous cellular automaton whose update rule corresponds to the update rule followed by this algorithm. We give theoretical estimates of the performance as a function of $L$ and the load per processor, i.e., approximate formulas for the mean speedup and for the desynchronization. It is established that, for a given simulation size, there is a theoretical upper bound for the desynchronization and a theoretical non-zero lower bound for the utilization. The new approach to performance studies, outlined in this chapter, is particularly useful in the search for the design of a new-generation of algorithms that would efficiently carry out an autonomous or tunable synchronization.<|reference_end|> | arxiv | @article{kolakowska2004desynchronization,
title={Desynchronization and Speedup in an Asynchronous Conservative Parallel
Update Protocol},
author={A. Kolakowska and M. A. Novotny},
journal={Ch.6 in "Artificial Inteligence and Computer Science" ed by S.
Shannon, pp.151-176 (2005 Nova Science Piblishers, Inc., New York) ISBN
1-59454-411-5},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409032},
primaryClass={cs.DC cond-mat.mtrl-sci physics.comp-ph}
} | kolakowska2004desynchronization |
arxiv-672158 | cs/0409033 | Mean and Variance Estimation by Kriging | <|reference_start|>Mean and Variance Estimation by Kriging: The aim of the paper is to derive the numerical least-squares estimator for mean and variance of random variable. In order to do so the following questions have to be answered: (i) what is the statistical model for the estimation procedure? (ii) what are the properties of the estimator, like optimality (in which class) or asymptotic properties? (iii) how does the estimator work in practice, how compared to competing estimators?<|reference_end|> | arxiv | @article{suslo2004mean,
title={Mean and Variance Estimation by Kriging},
author={Tomasz Suslo},
journal={arXiv preprint arXiv:cs/0409033},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409033},
primaryClass={cs.NA cs.MS}
} | suslo2004mean |
arxiv-672159 | cs/0409034 | Securing Data in Storage: A Review of Current Research | <|reference_start|>Securing Data in Storage: A Review of Current Research: Protecting data from malicious computer users continues to grow in importance. Whether preventing unauthorized access to personal photographs, ensuring compliance with federal regulations, or ensuring the integrity of corporate secrets, all applications require increased security to protect data from talented intruders. Specifically, as more and more files are preserved on disk the requirement to provide secure storage has increased in importance. This paper presents a survey of techniques for securely storing data, including theoretical approaches, prototype systems, and existing systems currently available. Due to the wide variety of potential solutions available and the variety of techniques to arrive at a particular solution, it is important to review the entire field prior to selecting an implementation that satisfies particular requirements. This paper provides an overview of the prominent characteristics of several systems to provide a foundation for making an informed decision. Initially, the paper establishes a set of criteria for evaluating a storage solution based on confidentiality, integrity, availability, and performance. Then, using these criteria, the paper explains the relevant characteristics of select storage systems and provides a comparison of the major differences.<|reference_end|> | arxiv | @article{stanton2004securing,
title={Securing Data in Storage: A Review of Current Research},
author={Paul Stanton},
journal={arXiv preprint arXiv:cs/0409034},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409034},
primaryClass={cs.OS cs.CR}
} | stanton2004securing |
arxiv-672160 | cs/0409035 | Parallel Computing Environments and Methods for Power Distribution System Simulation | <|reference_start|>Parallel Computing Environments and Methods for Power Distribution System Simulation: The development of cost-effective highperformance parallel computing on multi-processor supercomputers makes it attractive to port excessively time consuming simulation software from personal computers (PC) to super computes. The power distribution system simulator (PDSS) takes a bottom-up approach and simulates load at the appliance level, where detailed thermal models for appliances are used. This approach works well for a small power distribution system consisting of a few thousand appliances. When the number of appliances increases, the simulation uses up the PC memory and its runtime increases to a point where the approach is no longer feasible to model a practical large power distribution system. This paper presents an effort made to port a PC-based power distribution system simulator to a 128-processor shared-memory supercomputer. The paper offers an overview of the parallel computing environment and a description of the modification made to the PDSS model. The performance of the PDSS running on a standalone PC and on the supercomputer is compared. Future research direction of utilizing parallel computing in the power distribution system simulation is also addressed.<|reference_end|> | arxiv | @article{lu2004parallel,
title={Parallel Computing Environments and Methods for Power Distribution
System Simulation},
author={Ning Lu, Z. Todd Taylor, David P. Chassin, Ross T. Guttromson, R.
Scott Studham},
journal={arXiv preprint arXiv:cs/0409035},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409035},
primaryClass={cs.DC cs.CE cs.MA cs.PF}
} | lu2004parallel |
arxiv-672161 | cs/0409036 | A Directed Signature Scheme and its Applications | <|reference_start|>A Directed Signature Scheme and its Applications: This paper presents a directed signature scheme with the property that the signature can be verified only with the help of signer or signature receiver. We also propose its applications to share verification of signatures and to threshold cryptosystems.<|reference_end|> | arxiv | @article{lal2004a,
title={A Directed Signature Scheme and its Applications},
author={Sunder Lal and Manoj Kumar},
journal={arXiv preprint arXiv:cs/0409036},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409036},
primaryClass={cs.CR}
} | lal2004a |
arxiv-672162 | cs/0409037 | Business Processes: The Theoretical Impact of Process Thinking on Information Systems Development | <|reference_start|>Business Processes: The Theoretical Impact of Process Thinking on Information Systems Development: This paper investigates two aspects of process thinking that affect the success rate of IT projects. These two aspects are the changes in the structure of organizations and the epistemology of Information Systems Development. Firstly, the conception of business processes within the management of organizations increases the structural complexity of Information Systems, because existing systems have to be integrated into a coherent cross-functional architecture. Secondly, process thinking leads to a particular view of organizations that ultimately has a negative effect on the support of Information Systems. As an illustration of process thinking, the Business Process Reengineering movement adheres to a technocratic management perspective of organizations. Particularly this conception of organization views people as mechanisms to realize certain organizational goals. As a result of this view stakeholders are confronted with the implemented systems, rather than consulted about the scope and functionality of those systems. Therefore, both aspects of process thinking have a negative impact on the success of IT projects. The problem of structural complexity is an area that is addressed by Enterprise Application Integration, and mainly requires technical solutions. However, the problems associated with the conception of organization require a different, markedly non-technical, perspective. Several directions are discussed to overcome some limitations of process thinking, but these directions are merely small pointers. If truly effective and useful Information Systems are to be acquired, IT practitioners and scientists require a completely different mindset.<|reference_end|> | arxiv | @article{dumay2004business,
title={Business Processes: The Theoretical Impact of Process Thinking on
Information Systems Development},
author={Mark Dumay},
journal={arXiv preprint arXiv:cs/0409037},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409037},
primaryClass={cs.OH}
} | dumay2004business |
arxiv-672163 | cs/0409038 | Checking modes of HAL programs | <|reference_start|>Checking modes of HAL programs: Recent constraint logic programming (CLP) languages, such as HAL and Mercury, require type, mode and determinism declarations for predicates. This information allows the generation of efficient target code and the detection of many errors at compile-time. Unfortunately, mode checking in such languages is difficult. One of the main reasons is that, for each predicate mode declaration, the compiler is required to appropriately re-order literals in the predicate's definition. The task is further complicated by the need to handle complex instantiations (which interact with type declarations and higher-order predicates) and automatic initialization of solver variables. Here we define mode checking for strongly typed CLP languages which require reordering of clause body literals. In addition, we show how to handle a simple case of polymorphic modes by using the corresponding polymorphic types.<|reference_end|> | arxiv | @article{de la banda2004checking,
title={Checking modes of HAL programs},
author={Maria Garcia de la Banda, Warwick Harvey, Kim Marriott, Peter J.
Stuckey, Bart Demoen},
journal={Theory and Practice of Logic Programming: 5(6):623-668, 2005},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409038},
primaryClass={cs.PL}
} | de la banda2004checking |
arxiv-672164 | cs/0409039 | NP - P is not empty | <|reference_start|>NP - P is not empty: We present the MEoP problem that decides the existence of solutions to certain modular equations over prime numbers and show how this separates the complexity class NP from its subclass P<|reference_end|> | arxiv | @article{ionescu2004np,
title={NP - P is not empty},
author={Marius Constantin Ionescu},
journal={arXiv preprint arXiv:cs/0409039},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409039},
primaryClass={cs.CC cs.CR}
} | ionescu2004np |
arxiv-672165 | cs/0409040 | Unification of Fusion Theories | <|reference_start|>Unification of Fusion Theories: Since no fusion theory neither rule fully satisfy all needed applications, the author proposes a Unification of Fusion Theories and a combination of fusion rules in solving problems/applications. For each particular application, one selects the most appropriate model, rule(s), and algorithm of implementation. We are working in the unification of the fusion theories and rules, which looks like a cooking recipe, better we'd say like a logical chart for a computer programmer, but we don't see another method to comprise/unify all things. The unification scenario presented herein, which is now in an incipient form, should periodically be updated incorporating new discoveries from the fusion and engineering research.<|reference_end|> | arxiv | @article{smarandache2004unification,
title={Unification of Fusion Theories},
author={Florentin Smarandache},
journal={Presented at NATO Advanced Study Institute, Albena, Bulgaria,
16-27 May 2005. International Journal of Applied Mathematics & Statistics,
Vol. 2, 1-14, 2004.},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409040},
primaryClass={cs.AI}
} | smarandache2004unification |
arxiv-672166 | cs/0409041 | Four Principles Fundamental to Design Practice for Human Centred Systems | <|reference_start|>Four Principles Fundamental to Design Practice for Human Centred Systems: A Survey of the principal literature on Human Centred Design reveals the four most referenced principles. These are discussed with reference to the application of a particular website, and a user survey is constructed based upon the four principles.<|reference_end|> | arxiv | @article{hinze-hoare2004four,
title={Four Principles Fundamental to Design Practice for Human Centred Systems},
author={Vita Hinze-Hoare},
journal={arXiv preprint arXiv:cs/0409041},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409041},
primaryClass={cs.HC}
} | hinze-hoare2004four |
arxiv-672167 | cs/0409042 | A new architecture for making highly scalable applications | <|reference_start|>A new architecture for making highly scalable applications: An application is a logical image of the world on a computer. A scalable application is an application that allows one to update that logical image at run time. To put it in operational terms: an application is scalable if a client can change between time T1 and time T2 - the logic of the application as expressed by language L; - the structure and volume of the stored knowledge; - the user interface of the application; while clients working with the application at time T1 will work with the changed application at time T2 without performing any special action between T1 and T2. In order to realize such a scalable application a new architecture has been developed that fully orbits around language. In order to verify the soundness of that architecture a program has been build. Both architecture and program are called CommunSENS. The main purpose of this paper is: - to list the relevant elements of the architecture; - to give a visual presentation of how the program and its image of the world look like; - to give a visual presentation of how the image can be updated. Some relevant philosophical and practical backgrounds are included in the appendixes.<|reference_end|> | arxiv | @article{fitié2004a,
title={A new architecture for making highly scalable applications},
author={Harry Fiti'e},
journal={arXiv preprint arXiv:cs/0409042},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409042},
primaryClass={cs.HC cs.CL}
} | fitié2004a |
arxiv-672168 | cs/0409043 | Inapproximability of Combinatorial Optimization Problems | <|reference_start|>Inapproximability of Combinatorial Optimization Problems: We survey results on the hardness of approximating combinatorial optimization problems.<|reference_end|> | arxiv | @article{trevisan2004inapproximability,
title={Inapproximability of Combinatorial Optimization Problems},
author={Luca Trevisan},
journal={arXiv preprint arXiv:cs/0409043},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409043},
primaryClass={cs.CC}
} | trevisan2004inapproximability |
arxiv-672169 | cs/0409044 | Some Applications of Coding Theory in Computational Complexity | <|reference_start|>Some Applications of Coding Theory in Computational Complexity: Error-correcting codes and related combinatorial constructs play an important role in several recent (and old) results in computational complexity theory. In this paper we survey results on locally-testable and locally-decodable error-correcting codes, and their applications to complexity theory and to cryptography. Locally decodable codes are error-correcting codes with sub-linear time error-correcting algorithms. They are related to private information retrieval (a type of cryptographic protocol), and they are used in average-case complexity and to construct ``hard-core predicates'' for one-way permutations. Locally testable codes are error-correcting codes with sub-linear time error-detection algorithms, and they are the combinatorial core of probabilistically checkable proofs.<|reference_end|> | arxiv | @article{trevisan2004some,
title={Some Applications of Coding Theory in Computational Complexity},
author={Luca Trevisan},
journal={arXiv preprint arXiv:cs/0409044},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409044},
primaryClass={cs.CC cs.IT math.IT}
} | trevisan2004some |
arxiv-672170 | cs/0409045 | Augmenting ALC(D) (atemporal) roles and (aspatial) concrete domain with temporal roles and a spatial concrete domain -first results | <|reference_start|>Augmenting ALC(D) (atemporal) roles and (aspatial) concrete domain with temporal roles and a spatial concrete domain -first results: We consider the well-known family ALC(D) of description logics with a concrete domain, and provide first results on a framework obtained by augmenting ALC(D) atemporal roles and aspatial concrete domain with temporal roles and a spatial concrete domain.<|reference_end|> | arxiv | @article{isli2004augmenting,
title={Augmenting ALC(D) (atemporal) roles and (aspatial) concrete domain with
temporal roles and a spatial concrete domain -first results},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0409045},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409045},
primaryClass={cs.AI cs.LO}
} | isli2004augmenting |
arxiv-672171 | cs/0409046 | A TCSP-like decidable constraint language generalising existing cardinal direction relations | <|reference_start|>A TCSP-like decidable constraint language generalising existing cardinal direction relations: We define a quantitative constraint language subsuming two calculi well-known in QSR (Qualitative Spatial Reasoning): Frank's cone-shaped and projection-based calculi of cardinal direction relations. We show how to solve a CSP (Constraint Satisfaction Problem) expressed in the language.<|reference_end|> | arxiv | @article{isli2004a,
title={A TCSP-like decidable constraint language generalising existing cardinal
direction relations},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0409046},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409046},
primaryClass={cs.AI cs.LO}
} | isli2004a |
arxiv-672172 | cs/0409047 | An ALC(D)-based combination of temporal constraints and spatial constraints suitable for continuous (spatial) change | <|reference_start|>An ALC(D)-based combination of temporal constraints and spatial constraints suitable for continuous (spatial) change: We present a family of spatio-temporal theories suitable for continuous spatial change in general, and for continuous motion of spatial scenes in particular. The family is obtained by spatio-temporalising the well-known ALC(D) family of Description Logics (DLs) with a concrete domain D, as follows, where TCSPs denotes "Temporal Constraint Satisfaction Problems", a well-known constraint-based framework: (1) temporalisation of the roles, so that they consist of TCSP constraints (specifically, of an adaptation of TCSP constraints to interval variables); and (2) spatialisation of the concrete domain D: the concrete domain is now $D_x$, and is generated by a spatial Relation Algebra (RA) $x$, in the style of the Region-Connection Calculus RCC8. We assume durative truth (i.e., holding during a durative interval). We also assume the homogeneity property (if a truth holds during a given interval, it holds during all of its subintervals). Among other things, these assumptions raise the "conflicting" problem of overlapping truths, which the work solves with the use of a specific partition of the 13 atomic relations of Allen's interval algebra.<|reference_end|> | arxiv | @article{isli2004an,
title={An ALC(D)-based combination of temporal constraints and spatial
constraints suitable for continuous (spatial) change},
author={Amar Isli},
journal={arXiv preprint arXiv:cs/0409047},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409047},
primaryClass={cs.AI cs.LO}
} | isli2004an |
arxiv-672173 | cs/0409048 | FORM Matters: Fast Symbolic Computation under UNIX | <|reference_start|>FORM Matters: Fast Symbolic Computation under UNIX: We give a brief introduction to FORM, a symbolic programming language for massive batch operations, designed by J.A.M. Vermaseren. In particular, we stress various methods to efficiently use FORM under the UNIX operating system. Several scripts and examples are given, and suggestions on how to use the vim editor as development platform.<|reference_end|> | arxiv | @article{tung2004form,
title={FORM Matters: Fast Symbolic Computation under UNIX},
author={Michael M. Tung (Universidad Politecnica de Valencia)},
journal={Comp. Math. Appl. 49 (2005) 1127-1137},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409048},
primaryClass={cs.SC}
} | tung2004form |
arxiv-672174 | cs/0409049 | A Directed -Threshold Multi-Signature Scheme | <|reference_start|>A Directed -Threshold Multi-Signature Scheme: In this paper, we propose a Directed Threshold Multi-Signature Scheme. In this threshold signature scheme, any malicious set of signers cannot impersonate any other set of signers to forge the signatures. In case of forgery, it is possible to trace the signing set. This threshold signature scheme is applicable when the message is sensitive to the signature receiver; and the signatures are generated by the cooperation of a number of people from a given group of senders.<|reference_end|> | arxiv | @article{lal2004a,
title={A Directed -Threshold Multi-Signature Scheme},
author={Sunder lal and Manoj Kumar},
journal={arXiv preprint arXiv:cs/0409049},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409049},
primaryClass={cs.CR}
} | lal2004a |
arxiv-672175 | cs/0409050 | Some Applications of Directed Signature Scheme | <|reference_start|>Some Applications of Directed Signature Scheme: Directed signature scheme is applicable when the signed message contains information sensitive to the receiver, because only receiver can directly verify the signature and that he/she can prove its validity to any third party, whenever necessary. This paper presents two applications of directed signature scheme. (i) Directed –Delegated Signature Scheme. This scheme combines the idea of proxy signatures with directed signature scheme. (ii) Allocation of registration number. This scheme proposes a registration scheme in which the registration number cannot be forged and misused.<|reference_end|> | arxiv | @article{lal2004some,
title={Some Applications of Directed Signature Scheme},
author={Sunder Lal and Manoj Kumar},
journal={arXiv preprint arXiv:cs/0409050},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409050},
primaryClass={cs.CR}
} | lal2004some |
arxiv-672176 | cs/0409051 | Quantum Complexity Classes | <|reference_start|>Quantum Complexity Classes: In our thesis, we try to shed more light onto the complexity of quantum complexity classes by refining the related part of the hierarchy. First, we review the basic concepts of quantum computing in general. Then, inspired by BQP, we define new complexity classes. They are placed between BPP and PSPACE. We show that they incorporate the current important quantum algorithms. Furthermore, the importance of the unitarity constraint given by quantum mechanics is revealed. Without this requirement, we naturally arrive at the class AWPP, which was up to now thought to be just an artificially defined class. We hope that some of our newly defined classes could find their use in proving results about BQP.<|reference_end|> | arxiv | @article{tusarova2004quantum,
title={Quantum Complexity Classes},
author={Tereza Tusarova},
journal={arXiv preprint arXiv:cs/0409051},
year={2004},
number={IR-TI-001},
archivePrefix={arXiv},
eprint={cs/0409051},
primaryClass={cs.CC quant-ph}
} | tusarova2004quantum |
arxiv-672177 | cs/0409052 | Better Quasi-Ordered Transition Systems | <|reference_start|>Better Quasi-Ordered Transition Systems: Many existing algorithms for model checking of infinite-state systems operate on constraints which are used to represent (potentially infinite) sets of states. A general powerful technique which can be employed for proving termination of these algorithms is that of well quasi-orderings. Several methodologies have been proposed for derivation of new well quasi-ordered constraint systems. However, many of these constraint systems suffer from a "constraint explosion problem", as the number of the generated constraints grows exponentially with the size of the problem. In this paper, we demonstrate that a refinement of the theory of well quasi-orderings, called the theory of better quasi-orderings, is more appropriate for symbolic model checking, since it allows inventing constraint systems which are both well quasi-ordered and compact. As a main application, we introduce existential zones, a constraint system for verification of systems with unboundedly many clocks and use our methodology to prove that existential zones are better quasi-ordered. We show how to use existential zones in verification of timed Petri nets and present some experimental results. Also, we apply our methodology to derive new constraint systems for verification of broadcast protocols, lossy channel systems, and integral relational automata. The new constraint systems are exponentially more succinct than existing ones, and their well quasi-ordering cannot be shown by previous methods in the literature.<|reference_end|> | arxiv | @article{abdulla2004better,
title={Better Quasi-Ordered Transition Systems},
author={Parosh Aziz Abdulla and Aletta Nylen},
journal={arXiv preprint arXiv:cs/0409052},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409052},
primaryClass={cs.LO}
} | abdulla2004better |
arxiv-672178 | cs/0409053 | On the role of MMSE estimation in approaching the information-theoretic limits of linear Gaussian channels: Shannon meets Wiener | <|reference_start|>On the role of MMSE estimation in approaching the information-theoretic limits of linear Gaussian channels: Shannon meets Wiener: We discuss why MMSE estimation arises in lattice-based schemes for approaching the capacity of linear Gaussian channels, and comment on its properties.<|reference_end|> | arxiv | @article{forney2004on,
title={On the role of MMSE estimation in approaching the information-theoretic
limits of linear Gaussian channels: Shannon meets Wiener},
author={G. David Forney Jr},
journal={Proc. 2003 Allerton Conf. (Monticello, IL), pp. 430-439, Oct. 2003},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409053},
primaryClass={cs.IT math.IT}
} | forney2004on |
arxiv-672179 | cs/0409054 | An Approximation Algorithm for Stackelberg Network Pricing | <|reference_start|>An Approximation Algorithm for Stackelberg Network Pricing: We consider the problem of maximizing the revenue raised from tolls set on the arcs of a transportation network, under the constraint that users are assigned to toll-compatible shortest paths. We first prove that this problem is strongly NP-hard. We then provide a polynomial time algorithm with a worst-case precision guarantee of ${1/2}\log_2 m_T+1$, where $m_T$ denotes the number of toll arcs. Finally we show that the approximation is tight with respect to a natural relaxation by constructing a family of instances for which the relaxation gap is reached.<|reference_end|> | arxiv | @article{roch2004an,
title={An Approximation Algorithm for Stackelberg Network Pricing},
author={S. Roch, P. Marcotte, G. Savard},
journal={arXiv preprint arXiv:cs/0409054},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409054},
primaryClass={cs.GT math.OC}
} | roch2004an |
arxiv-672180 | cs/0409055 | The Building of Online Communities: An approach for learning organizations, with a particular focus on the museum sector | <|reference_start|>The Building of Online Communities: An approach for learning organizations, with a particular focus on the museum sector: This paper considers the move toward and potential of building online communities, with a particular focus on the museum sector. For instance, the increase in the use of `personalized' toolkits that are becoming an integral part of the online presence for learning organizations, like museums, can provide a basis for creating and sustaining communities. A set of case studies further illustrates working examples of the ways in which personalization and specific tools are developing collaborative spaces, community channels and group interactions.<|reference_end|> | arxiv | @article{beler2004the,
title={The Building of Online Communities: An approach for learning
organizations, with a particular focus on the museum sector},
author={Alpay Beler, Ann Borda, Jonathan P. Bowen, Silvia Filippini-Fantoni},
journal={In James Hemsley, Vito Cappellini and Gerd Stanke (eds.), EVA 2004
London Conference Proceedings, University College London, The Institute of
Archaeology, UK, 26-30 July 2004, pages 2.1-2.15},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409055},
primaryClass={cs.CY cs.DL}
} | beler2004the |
arxiv-672181 | cs/0409056 | Using sparse matrices and splines-based interpolation in computational fluid dynamics simulations | <|reference_start|>Using sparse matrices and splines-based interpolation in computational fluid dynamics simulations: In this relation I present a technique of construction and fast evaluation of a family of cubic polynomials for analytic smoothing and graphical rendering of particles trajectories for flows in a generic geometry. The principal result of the work was implementation and test of a method for interpolating 3D points by regular parametric curves and their fast and efficient evaluation for a good resolution of rendering. For the purpose a parallel environment using a multiprocessor cluster architecture has been used. This work has been developed for the Research and Development Department of my company for planning advanced customized models of industrial burners.<|reference_end|> | arxiv | @article{argentini2004using,
title={Using sparse matrices and splines-based interpolation in computational
fluid dynamics simulations},
author={Gianluca Argentini},
journal={arXiv preprint arXiv:cs/0409056},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409056},
primaryClass={cs.NA cs.CE physics.comp-ph}
} | argentini2004using |
arxiv-672182 | cs/0409057 | Fast Construction of Nets in Low Dimensional Metrics, and Their Applications | <|reference_start|>Fast Construction of Nets in Low Dimensional Metrics, and Their Applications: We present a near linear time algorithm for constructing hierarchical nets in finite metric spaces with constant doubling dimension. This data-structure is then applied to obtain improved algorithms for the following problems: Approximate nearest neighbor search, well-separated pair decomposition, compact representation scheme, doubling measure, and computation of the (approximate) Lipschitz constant of a function. In all cases, the running (preprocessing) time is near-linear and the space being used is linear.<|reference_end|> | arxiv | @article{har-peled2004fast,
title={Fast Construction of Nets in Low Dimensional Metrics, and Their
Applications},
author={Sariel Har-Peled, Manor Mendel},
journal={SIAM J. Comput. 35(5):1148-1184, 2006},
year={2004},
doi={10.1137/S0097539704446281},
archivePrefix={arXiv},
eprint={cs/0409057},
primaryClass={cs.DS cs.CG}
} | har-peled2004fast |
arxiv-672183 | cs/0409058 | A Sentimental Education: Sentiment Analysis Using Subjectivity Summarization Based on Minimum Cuts | <|reference_start|>A Sentimental Education: Sentiment Analysis Using Subjectivity Summarization Based on Minimum Cuts: Sentiment analysis seeks to identify the viewpoint(s) underlying a text span; an example application is classifying a movie review as "thumbs up" or "thumbs down". To determine this sentiment polarity, we propose a novel machine-learning method that applies text-categorization techniques to just the subjective portions of the document. Extracting these portions can be implemented using efficient techniques for finding minimum cuts in graphs; this greatly facilitates incorporation of cross-sentence contextual constraints.<|reference_end|> | arxiv | @article{pang2004a,
title={A Sentimental Education: Sentiment Analysis Using Subjectivity
Summarization Based on Minimum Cuts},
author={Bo Pang and Lillian Lee},
journal={Proceedings of the 42nd ACL, pp. 271--278, 2004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409058},
primaryClass={cs.CL}
} | pang2004a |
arxiv-672184 | cs/0409059 | From Digital Television to Internet? | <|reference_start|>From Digital Television to Internet?: This paper provides a general technical overview of the Multimedia Home Platform (MHP) specifications. MHP is a generic interface between digital applications and user machines, whether they happen to be set top boxes, digital TV sets or Multimedia PC's. MHP extends the DVB open standards. Addressed are MHP architexture, System core and MHP Profiles.<|reference_end|> | arxiv | @article{hinze-hoare2004from,
title={From Digital Television to Internet?},
author={Vita Hinze-Hoare},
journal={arXiv preprint arXiv:cs/0409059},
year={2004},
archivePrefix={arXiv},
eprint={cs/0409059},
primaryClass={cs.MM cs.CY}
} | hinze-hoare2004from |
arxiv-672185 | cs/0410001 | The Infati Data | <|reference_start|>The Infati Data: The ability to perform meaningful empirical studies is of essence in research in spatio-temporal query processing. Such studies are often necessary to gain detailed insight into the functional and performance characteristics of proposals for new query processing techniques. We present a collection of spatio-temporal data, collected during an intelligent speed adaptation project, termed INFATI, in which some two dozen cars equipped with GPS receivers and logging equipment took part. We describe how the data was collected and how it was "modified" to afford the drivers some degree of anonymity. We also present the road network in which the cars were moving during data collection. The GPS data is publicly available for non-commercial purposes. It is our hope that this resource will help the spatio-temporal research community in its efforts to develop new and better query processing techniques.<|reference_end|> | arxiv | @article{jensen2004the,
title={The Infati Data},
author={C. S. Jensen, H. Lahrmann, S. Pakalnis, and J. Runge},
journal={arXiv preprint arXiv:cs/0410001},
year={2004},
number={TR-79},
archivePrefix={arXiv},
eprint={cs/0410001},
primaryClass={cs.DB}
} | jensen2004the |
arxiv-672186 | cs/0410002 | Shannon Information and Kolmogorov Complexity | <|reference_start|>Shannon Information and Kolmogorov Complexity: We compare the elementary theories of Shannon information and Kolmogorov complexity, the extent to which they have a common purpose, and where they are fundamentally different. We discuss and relate the basic notions of both theories: Shannon entropy versus Kolmogorov complexity, the relation of both to universal coding, Shannon mutual information versus Kolmogorov (`algorithmic') mutual information, probabilistic sufficient statistic versus algorithmic sufficient statistic (related to lossy compression in the Shannon theory versus meaningful information in the Kolmogorov theory), and rate distortion theory versus Kolmogorov's structure function. Part of the material has appeared in print before, scattered through various publications, but this is the first comprehensive systematic comparison. The last mentioned relations are new.<|reference_end|> | arxiv | @article{grunwald2004shannon,
title={Shannon Information and Kolmogorov Complexity},
author={Peter Grunwald (CWI) and Paul Vitanyi (CWI and University of
Amsterdam)},
journal={There are some errors in this paper draft; when in doubt see the
textbook Li, Vitanyi, An Introduction to Kolmogorov Complexity and Its
Applications, Springer, 1993, 1997, 2008, 2019},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410002},
primaryClass={cs.IT math.IT}
} | grunwald2004shannon |
arxiv-672187 | cs/0410003 | Capacity and Random-Coding Exponents for Channel Coding with Side Information | <|reference_start|>Capacity and Random-Coding Exponents for Channel Coding with Side Information: Capacity formulas and random-coding exponents are derived for a generalized family of Gel'fand-Pinsker coding problems. These exponents yield asymptotic upper bounds on the achievable log probability of error. In our model, information is to be reliably transmitted through a noisy channel with finite input and output alphabets and random state sequence, and the channel is selected by a hypothetical adversary. Partial information about the state sequence is available to the encoder, adversary, and decoder. The design of the transmitter is subject to a cost constraint. Two families of channels are considered: 1) compound discrete memoryless channels (CDMC), and 2) channels with arbitrary memory, subject to an additive cost constraint, or more generally to a hard constraint on the conditional type of the channel output given the input. Both problems are closely connected. The random-coding exponent is achieved using a stacked binning scheme and a maximum penalized mutual information decoder, which may be thought of as an empirical generalized Maximum a Posteriori decoder. For channels with arbitrary memory, the random-coding exponents are larger than their CDMC counterparts. Applications of this study include watermarking, data hiding, communication in presence of partially known interferers, and problems such as broadcast channels, all of which involve the fundamental idea of binning.<|reference_end|> | arxiv | @article{moulin2004capacity,
title={Capacity and Random-Coding Exponents for Channel Coding with Side
Information},
author={Pierre Moulin and Ying Wang},
journal={arXiv preprint arXiv:cs/0410003},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410003},
primaryClass={cs.IT math.IT}
} | moulin2004capacity |
arxiv-672188 | cs/0410004 | Applying Policy Iteration for Training Recurrent Neural Networks | <|reference_start|>Applying Policy Iteration for Training Recurrent Neural Networks: Recurrent neural networks are often used for learning time-series data. Based on a few assumptions we model this learning task as a minimization problem of a nonlinear least-squares cost function. The special structure of the cost function allows us to build a connection to reinforcement learning. We exploit this connection and derive a convergent, policy iteration-based algorithm. Furthermore, we argue that RNN training can be fit naturally into the reinforcement learning framework.<|reference_end|> | arxiv | @article{szita2004applying,
title={Applying Policy Iteration for Training Recurrent Neural Networks},
author={I. Szita and A. Lorincz},
journal={arXiv preprint arXiv:cs/0410004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410004},
primaryClass={cs.AI cs.LG cs.NE}
} | szita2004applying |
arxiv-672189 | cs/0410005 | A dynamical model of a GRID market | <|reference_start|>A dynamical model of a GRID market: We discuss potential market mechanisms for the GRID. A complete dynamical model of a GRID market is defined with three types of agents. Providers, middlemen and users exchange universal GRID computing units (GCUs) at varying prices. Providers and middlemen have strategies aimed at maximizing profit while users are 'satisficing' agents, and only change their behavior if the service they receive is sufficiently poor or overpriced. Preliminary results from a multi-agent numerical simulation of the market model shows that the distribution of price changes has a power law tail.<|reference_end|> | arxiv | @article{harder2004a,
title={A dynamical model of a GRID market},
author={Uli Harder, Peter Harrison, Maya Paczuski & Tejas Shah},
journal={arXiv preprint arXiv:cs/0410005},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410005},
primaryClass={cs.MA cond-mat.other cs.CE}
} | harder2004a |
arxiv-672190 | cs/0410006 | Demo or Practice: Critical Analysis of the Language/Action Perspective | <|reference_start|>Demo or Practice: Critical Analysis of the Language/Action Perspective: Despite offering several promising concepts, the Language/Action Perspective (LAP) is still not in the mainstream of Information Systems Development (ISD). Since at present there is only a limited understanding of LAP theory and practice, it remains unclear whether the lack of LAP's impact is due to shortcomings in LAP theory itself. One classic problem within ISD is the dichotomy between social perspectives and technical perspectives. LAP claims it offers a solution to this problem. This paper investigates this claim as a means to review LAP theory. To provide a structure to a critical analysis of DEMO - an example methodology that belongs to the LAP research community - this paper utilizes a paradigmatic framework. This framework is augmented by the opinion of several DEMO practitioners by means of an expert discussion. With use of a comparative evaluation of LAP theory and DEMO theory, the implication of DEMO's reflection upon LAP is determined. The paper concludes by outlining an agenda for further research if LAP is to improve its footprint in the field.<|reference_end|> | arxiv | @article{dumay2004demo,
title={Demo or Practice: Critical Analysis of the Language/Action Perspective},
author={Mark Dumay},
journal={arXiv preprint arXiv:cs/0410006},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410006},
primaryClass={cs.OH}
} | dumay2004demo |
arxiv-672191 | cs/0410007 | A Shared Write-protected Root Filesystem for a Group of Networked Clients | <|reference_start|>A Shared Write-protected Root Filesystem for a Group of Networked Clients: A method to boot a cluster of diskless network clients from a single write-protected NFS root file system is shown. The problems encountered when first implementing the setup and their solution are discussed. Finally, the setup is briefly compared to using a kernel-embedded root file system.<|reference_end|> | arxiv | @article{souvatzis2004a,
title={A Shared Write-protected Root Filesystem for a Group of Networked
Clients},
author={Ignatios Souvatzis},
journal={Proceedings of the 2nd European BSD Conference, 2002, Amsterdam,
The Netherlands},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410007},
primaryClass={cs.OS cs.DC}
} | souvatzis2004a |
arxiv-672192 | cs/0410008 | Source Coding with Fixed Lag Side Information | <|reference_start|>Source Coding with Fixed Lag Side Information: We consider source coding with fixed lag side information at the decoder. We focus on the special case of perfect side information with unit lag corresponding to source coding with feedforward (the dual of channel coding with feedback) introduced by Pradhan. We use this duality to develop a linear complexity algorithm which achieves the rate-distortion bound for any memoryless finite alphabet source and distortion measure.<|reference_end|> | arxiv | @article{martinian2004source,
title={Source Coding with Fixed Lag Side Information},
author={Emin Martinian and Gregory W. Wornell},
journal={Allerton Conference on communication, control, and computing;
October 2004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410008},
primaryClass={cs.IT math.IT}
} | martinian2004source |
arxiv-672193 | cs/0410009 | Diffusive Load Balancing of Loosely-Synchronous Parallel Programs over Peer-to-Peer Networks | <|reference_start|>Diffusive Load Balancing of Loosely-Synchronous Parallel Programs over Peer-to-Peer Networks: The use of under-utilized Internet resources is widely recognized as a viable form of high performance computing. Sustained processing power of roughly 40T FLOPS using 4 million volunteered Internet hosts has been reported for embarrassingly parallel problems. At the same time, peer-to-peer (P2P) file sharing networks, with more than 50 million participants, have demonstrated the capacity for scale in distributed systems. This paper contributes a study of load balancing techniques for a general class of loosely-synchronous parallel algorithms when executed over a P2P network. We show that decentralized, diffusive load balancing can be effective at balancing load and is facilitated by the dynamic properties of P2P. While a moderate degree of dynamicity can benefit load balancing, significant dynamicity hinders the parallel program performance due to the need for increased load migration. To the best of our knowledge this study provides new insight into the performance of loosely-synchronous parallel programs over the Internet.<|reference_end|> | arxiv | @article{douglas2004diffusive,
title={Diffusive Load Balancing of Loosely-Synchronous Parallel Programs over
Peer-to-Peer Networks},
author={Scott Douglas and Aaron Harwood},
journal={arXiv preprint arXiv:cs/0410009},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410009},
primaryClass={cs.DC}
} | douglas2004diffusive |
arxiv-672194 | cs/0410010 | A New Proxy Ring Signature Scheme | <|reference_start|>A New Proxy Ring Signature Scheme: The concept of ring signature was introduced by Rivest, Shamir and Tauman. This signature is considered to be a simplified group signature from which identity of signer is ambiguous although verifier knows the group to which signer belong. In this paper we introduce a new proxy ring signature scheme.<|reference_end|> | arxiv | @article{awasthi2004a,
title={A New Proxy Ring Signature Scheme},
author={Amit K. Awasthi and Sunder Lal},
journal={Proceeding of RMS 2004, Page 29},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410010},
primaryClass={cs.CR math.RA}
} | awasthi2004a |
arxiv-672195 | cs/0410011 | Comment on A dynamic ID-based Remote User Authentication Scheme | <|reference_start|>Comment on A dynamic ID-based Remote User Authentication Scheme: Since 1981, when Lamport introduced the remote user authentication scheme using table, a plenty of schemes had been proposed with tables or without table using. Recently Das et al. proposed a dynamic id-based remote user authentication scheme. They claimed that their scheme is secure against ID-theft, and can resist the reply attacks, forgery attacks, insider attacks an so on. In this paper we show that Das et al's scheme is completly insecure and using of this scheme is like an open server access without password.<|reference_end|> | arxiv | @article{awasthi2004comment,
title={Comment on A dynamic ID-based Remote User Authentication Scheme},
author={Amit K Awasthi},
journal={Transaction on Cryptology, Vol. 01, Issue 02, Page 15-17, Sep 2004},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410011},
primaryClass={cs.CR}
} | awasthi2004comment |
arxiv-672196 | cs/0410012 | DiPerF: an automated DIstributed PERformance testing Framework | <|reference_start|>DiPerF: an automated DIstributed PERformance testing Framework: We present DiPerF, a distributed performance testing framework, aimed at simplifying and automating service performance evaluation. DiPerF coordinates a pool of machines that test a target service, collects and aggregates performance metrics, and generates performance statistics. The aggregate data collected provide information on service throughput, on service "fairness" when serving multiple clients concurrently, and on the impact of network latency on service performance. Furthermore, using this data, it is possible to build predictive models that estimate a service performance given the service load. We have tested DiPerF on 100+ machines on two testbeds, Grid3 and PlanetLab, and explored the performance of job submission services (pre WS GRAM and WS GRAM) included with Globus Toolkit 3.2.<|reference_end|> | arxiv | @article{dumitrescu2004diperf:,
title={DiPerF: an automated DIstributed PERformance testing Framework},
author={Catalin Dumitrescu, Ioan Raicu, Matei Ripeanu, Ian Foster},
journal={arXiv preprint arXiv:cs/0410012},
year={2004},
doi={10.1109/GRID.2004.21},
archivePrefix={arXiv},
eprint={cs/0410012},
primaryClass={cs.PF cs.DC}
} | dumitrescu2004diperf: |
arxiv-672197 | cs/0410013 | Fibonacci connection between Huffman codes and Wythoff array | <|reference_start|>Fibonacci connection between Huffman codes and Wythoff array: Fibonacci connection between non-decreasing sequences of positive integers producing maximum height Huffman trees and the Wythoff array has been proved.<|reference_end|> | arxiv | @article{vinokur2004fibonacci,
title={Fibonacci connection between Huffman codes and Wythoff array},
author={Alex Vinokur},
journal={arXiv preprint arXiv:cs/0410013},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410013},
primaryClass={cs.DM cs.DS math.CO math.NT}
} | vinokur2004fibonacci |
arxiv-672198 | cs/0410014 | Normal forms for Answer Sets Programming | <|reference_start|>Normal forms for Answer Sets Programming: Normal forms for logic programs under stable/answer set semantics are introduced. We argue that these forms can simplify the study of program properties, mainly consistency. The first normal form, called the {\em kernel} of the program, is useful for studying existence and number of answer sets. A kernel program is composed of the atoms which are undefined in the Well-founded semantics, which are those that directly affect the existence of answer sets. The body of rules is composed of negative literals only. Thus, the kernel form tends to be significantly more compact than other formulations. Also, it is possible to check consistency of kernel programs in terms of colorings of the Extended Dependency Graph program representation which we previously developed. The second normal form is called {\em 3-kernel.} A 3-kernel program is composed of the atoms which are undefined in the Well-founded semantics. Rules in 3-kernel programs have at most two conditions, and each rule either belongs to a cycle, or defines a connection between cycles. 3-kernel programs may have positive conditions. The 3-kernel normal form is very useful for the static analysis of program consistency, i.e., the syntactic characterization of existence of answer sets. This result can be obtained thanks to a novel graph-like representation of programs, called Cycle Graph which presented in the companion article \cite{Cos04b}.<|reference_end|> | arxiv | @article{costantini2004normal,
title={Normal forms for Answer Sets Programming},
author={Stefania Costantini and Alessandro Provetti},
journal={arXiv preprint arXiv:cs/0410014},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410014},
primaryClass={cs.AI}
} | costantini2004normal |
arxiv-672199 | cs/0410015 | L1 regularization is better than L2 for learning and predicting chaotic systems | <|reference_start|>L1 regularization is better than L2 for learning and predicting chaotic systems: Emergent behaviors are in the focus of recent research interest. It is then of considerable importance to investigate what optimizations suit the learning and prediction of chaotic systems, the putative candidates for emergence. We have compared L1 and L2 regularizations on predicting chaotic time series using linear recurrent neural networks. The internal representation and the weights of the networks were optimized in a unifying framework. Computational tests on different problems indicate considerable advantages for the L1 regularization: It had considerably better learning time and better interpolating capabilities. We shall argue that optimization viewed as a maximum likelihood estimation justifies our results, because L1 regularization fits heavy-tailed distributions -- an apparently general feature of emergent systems -- better.<|reference_end|> | arxiv | @article{szabo2004l1,
title={L1 regularization is better than L2 for learning and predicting chaotic
systems},
author={Z. Szabo and A. Lorincz},
journal={arXiv preprint arXiv:cs/0410015},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410015},
primaryClass={cs.LG cs.AI}
} | szabo2004l1 |
arxiv-672200 | cs/0410016 | HEP@Home - A distributed computing system based on BOINC | <|reference_start|>HEP@Home - A distributed computing system based on BOINC: Project SETI@HOME has proven to be one of the biggest successes of distributed computing during the last years. With a quite simple approach SETI manages to process large volumes of data using a vast amount of distributed computer power. To extend the generic usage of this kind of distributed computing tools, BOINC is being developed. In this paper we propose HEP@HOME, a BOINC version tailored to the specific requirements of the High Energy Physics (HEP) community. The HEP@HOME will be able to process large amounts of data using virtually unlimited computing power, as BOINC does, and it should be able to work according to HEP specifications. In HEP the amounts of data to be analyzed or reconstructed are of central importance. Therefore, one of the design principles of this tool is to avoid data transfer. This will allow scientists to run their analysis applications and taking advantage of a large number of CPUs. This tool also satisfies other important requirements in HEP, namely, security, fault-tolerance and monitoring.<|reference_end|> | arxiv | @article{amorim2004hep@home,
title={HEP@Home - A distributed computing system based on BOINC},
author={Antonio Amorim, Jaime Villate and Pedro Andrade},
journal={arXiv preprint arXiv:cs/0410016},
year={2004},
archivePrefix={arXiv},
eprint={cs/0410016},
primaryClass={cs.DC}
} | amorim2004hep@home |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.