corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-676101 | cs/9809082 | Congestion Control with Explicit Rate Indication | <|reference_start|>Congestion Control with Explicit Rate Indication: As the speed and the dynamic range of computer networks evolve, the issue of efficient traffic management becomes increasingly important. This work describes an approach to traffic management using explicit rate information provided to the source by the network. We present an asynchronous distributed algorithm for optimal rate calculation across the network, where optimality is understood in the maxmin sense. The algorithm quickly converges to the optimal rates and is shown to be well-behaved in transience.<|reference_end|> | arxiv | @article{charny1998congestion,
title={Congestion Control with Explicit Rate Indication},
author={A. Charny, D. Clark, R. Jain},
journal={arXiv preprint arXiv:cs/9809082},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809082},
primaryClass={cs.NI}
} | charny1998congestion |
arxiv-676102 | cs/9809083 | A Brief Overview of ATM: Protocol Layers, LAN Emulation, and Traffic Management | <|reference_start|>A Brief Overview of ATM: Protocol Layers, LAN Emulation, and Traffic Management: Asynchronous Transfer Mode (ATM) has emerged as the most promising technology in supporting future broadband multimedia communication services. To accelerate the deployment of ATM technology, the ATM Forum, which is a consortium of service providers and equipment vendors in the communication industries has been created to develop implementation and specification agreements. In this article, we present a brief overview on ATM protocol layers and current progress on LAN Emulation and Traffic Management in the ATM Forum.<|reference_end|> | arxiv | @article{siu1998a,
title={A Brief Overview of ATM: Protocol Layers, LAN Emulation, and Traffic
Management},
author={K. Siu and R. Jain},
journal={Computer Communications Review (ACM SIGCOMM), vol 25, no 2, April
1995, pp6-28},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809083},
primaryClass={cs.NI}
} | siu1998a |
arxiv-676103 | cs/9809084 | ATM Networks: Issues and Challenges Ahead | <|reference_start|>ATM Networks: Issues and Challenges Ahead: The paper begins with a discussion of current trends in networking and a historical reviews of past networking technologies some of which failed. This leads us to the discussion about what it takes for a new technology to succeed and what challenges we face in making the current dream of a seamless world-wide high-speed ATM network a reality. Issues in using ATM cells for very high speed applications are presented. Ensuring that the users benefit from ATM networks involves several other related disciplines. These are reviewed.<|reference_end|> | arxiv | @article{jain1998atm,
title={ATM Networks: Issues and Challenges Ahead},
author={R. Jain},
journal={arXiv preprint arXiv:cs/9809084},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809084},
primaryClass={cs.NI}
} | jain1998atm |
arxiv-676104 | cs/9809085 | Congestion Control and Traffic Management in ATM Networks: Recent Advances and A Survey | <|reference_start|>Congestion Control and Traffic Management in ATM Networks: Recent Advances and A Survey: Congestion control mechanisms for ATM networks as selected by the ATM Forum traffic management group are described. Reasons behind these selections are explained. In particular, selection criteria for selection between rate-based and credit-based approach and the key points of the debate between the two approaches are presented. The approach that was finally selected and several other schemes that were considered are described.<|reference_end|> | arxiv | @article{jain1998congestion,
title={Congestion Control and Traffic Management in ATM Networks: Recent
Advances and A Survey},
author={R. Jain},
journal={Computer Networks and ISDN Systems, vol 28, no 13, February 1995,
pp1723-1738},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809085},
primaryClass={cs.NI}
} | jain1998congestion |
arxiv-676105 | cs/9809086 | FDDI: Current Issues and Future Trends | <|reference_start|>FDDI: Current Issues and Future Trends: Key issues in upcoming FDDI standards including low-cost fiber, twisted-pair, SONET mapping, and FDDI follow-on LAN are discussed after a brief introduction to FDDI and FDDI-II<|reference_end|> | arxiv | @article{jain1998fddi:,
title={FDDI: Current Issues and Future Trends},
author={R. Jain},
journal={IEEE Communications Magazine, September 1993, pp. 98-105},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809086},
primaryClass={cs.NI}
} | jain1998fddi: |
arxiv-676106 | cs/9809087 | A Comparison of Hashing Schemes for Address Lookup in Computer Networks | <|reference_start|>A Comparison of Hashing Schemes for Address Lookup in Computer Networks: Using a trace of address references, we compared the efficiency of several different hashing functions, such as cyclic redundancy checking (CRC) polynomials, Fletcher checksum, folding of address octets using the exclusive-or operation and bit extraction from the address. Guidelines are provided for determining the size of the hashmark required to achieve a specified level of performance.<|reference_end|> | arxiv | @article{jain1998a,
title={A Comparison of Hashing Schemes for Address Lookup in Computer Networks},
author={R. Jain},
journal={IEEE Transactions on Communications, Vol. 40, No. 3, October 1992,
pp. 1570-1573},
year={1998},
doi={10.1109/26.168785},
archivePrefix={arXiv},
eprint={cs/9809087},
primaryClass={cs.NI}
} | jain1998a |
arxiv-676107 | cs/9809088 | Myths about Congestion Management in High Speed Networks | <|reference_start|>Myths about Congestion Management in High Speed Networks: Weaknesses in several recently proposed ideas about congestion control and avoidance in high-speed netwroks are identified. Both sides of the debate concerning prior reservation of resources versus walk-in service, open-loop control versus feedback control, rate control versus window control, and router-based control versus source-based control are presented. The circumstances under which backpressure is useful or not are discussed, and it is argued that a single congestion scheme is not sufficient, but that a combination of several schemes is required for complete congestion management in a network.<|reference_end|> | arxiv | @article{jain1998myths,
title={Myths about Congestion Management in High Speed Networks},
author={R. Jain},
journal={Internetworking: Research and Experience, Volume 3, 1992, pp.
101-113},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809088},
primaryClass={cs.NI}
} | jain1998myths |
arxiv-676108 | cs/9809089 | Performance Analysis of FDDI Token Ring Networks: Effect of Parameters and Guidelines for Setting TTRT | <|reference_start|>Performance Analysis of FDDI Token Ring Networks: Effect of Parameters and Guidelines for Setting TTRT: The performance of Fiber-Distributed Data Interface (FDDI) depends upon several workload parameters; for example; the arrival pattern, frame size, and configuration parameters, such as the number of stations on the ring, extent of the ring, and number of stations that are waiting to transmit. In addition, the performance is affected by a parameter called the Target Token Rotation Time (TTRT), which can be controlled by the network manager. We considered the effect of TTRT on various performance metrics for different ring configurations and concluded that a TTRT value of 8 ms provides a good performance over a wide range of configurations and workloads.<|reference_end|> | arxiv | @article{jain1998performance,
title={Performance Analysis of FDDI Token Ring Networks: Effect of Parameters
and Guidelines for Setting TTRT},
author={R. Jain},
journal={IEEE Lightwave Telecommunication Systems, vol 20, no 2, May 1991,
pp. 16-22},
year={1998},
doi={10.1109/80.93284},
archivePrefix={arXiv},
eprint={cs/9809089},
primaryClass={cs.NI}
} | jain1998performance |
arxiv-676109 | cs/9809090 | Error Characteristics of Fiber Distributed Data Interface (FDDI) | <|reference_start|>Error Characteristics of Fiber Distributed Data Interface (FDDI): Fiber Distributed Data Interface (FDDI) is a 100 megabits per second fiber optic local area network (LAN) standard being developed by the American National Standard Institute (ANSI). We analyze the impact of various design decisions on the error detection capability of the protocol. In particular, we quantify frame error rate, token loss rate, and undetected error rate. Several characteristics of the 32-bit frame check sequence (FCS) polynomial, which is also used in IEEE 802 LAN protocols, are discussed.<|reference_end|> | arxiv | @article{jain1998error,
title={Error Characteristics of Fiber Distributed Data Interface (FDDI)},
author={R. Jain},
journal={IEEE Transactions on Communications, Vol. 38, No. 8, August 1990,
pp. 1224-1252},
year={1998},
doi={10.1109/26.58757},
archivePrefix={arXiv},
eprint={cs/9809090},
primaryClass={cs.NI}
} | jain1998error |
arxiv-676110 | cs/9809091 | Congestion Control in Computer Networks: Trends and Issues | <|reference_start|>Congestion Control in Computer Networks: Trends and Issues: Popular myths that cheaper memory, high-speed links and high-speed processors will solve the problem of congestion in computer networks are shown to be false. A simple definition for congestion based on supply and demand of resources is proposed and is then used to classify various congestion schemes. The issues that make the congestion problem a difficult one are discussed, and then the architectural decisions that affect the design of a congestion scheme are presented. It is argued that long-, medium- and short-term congestion problems require different solutions. Some of the recent schemes are brifly surveyed, and areas for further research are discussed.<|reference_end|> | arxiv | @article{jain1998congestion,
title={Congestion Control in Computer Networks: Trends and Issues},
author={R. Jain},
journal={IEEE Network, May 1990, pp. 24-30},
year={1998},
doi={10.1109/65.56532},
archivePrefix={arXiv},
eprint={cs/9809091},
primaryClass={cs.NI}
} | jain1998congestion |
arxiv-676111 | cs/9809092 | Characteristics of Destination Address Locality in Computer Networks: A Comparison of Caching Schemes | <|reference_start|>Characteristics of Destination Address Locality in Computer Networks: A Comparison of Caching Schemes: The size of computer networks, along with their bandwidths, is growing exponentially. To support these large, high-speed networks, it is neccessary to be able to forward packets in a few microseconds. One part of the forwarding operation consists of searching through a large address databse. This problem is encountered in the design of bridges, routers, gateways and name servers. Caching can reduce the lookup time if there is a locality in the address reference pattern. Using a destination reference trace measured on an extended local are a network, we attempt to see if the destination refernces do have a significant locality. We compared the performance of MIN, LRU, FIFO, and random cache replacement algorithms. We found that the interactive (terminal) traffic in our sample had quite different locality behavior than that of the noninteractive traffic. The interactive traffic did not follow the LRU stack model while the noninteractivetraffic did. Examples are shown of the environments in which caching can help as well as those in which caching can hurt, unless the cache size is large.<|reference_end|> | arxiv | @article{jain1998characteristics,
title={Characteristics of Destination Address Locality in Computer Networks: A
Comparison of Caching Schemes},
author={R. Jain},
journal={Journal of Computer Networks and ISDN, Vol. 18, 1989/90, pp.
243-254},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809092},
primaryClass={cs.NI}
} | jain1998characteristics |
arxiv-676112 | cs/9809093 | A Delay Based Approach for Congestion Avoidance in Interconnected Heterogeneous Computer Networks | <|reference_start|>A Delay Based Approach for Congestion Avoidance in Interconnected Heterogeneous Computer Networks: In heterogeneous networks, achieving congestion avoidance is difficult because the congestion feedback from one subnetwork may have no meaning to source on other other subnetworks. We propose using changes in round-trip delay as an implicit feedback. Using a black-box model of the network, we derive an expression for the optimal window as a function of the gradient of the delay-window curve. The problems of selfish optimum and social optimum are also addressed. It is shown that without a careful design, it is possible to get into a race condition during heavy congestion, where each user wants more resources than others, thereby leading to a diverging congestion It is shown that congestion avoidance using round trip delay is a promising approach. The aproach has the advantage that there is absolutely no overhead for the network itself. It is exemplified by a simple scheme. The performance of the scheme is analyzed using a simulation model. The scheme is shown to be efficient, fair, convergent and adaptive to changes in network configuration. The scheme as described works only for networks that can ne modelled with queueing servers with constant service times. Further research is required to extend it for implementation in practical networks. Several directions for future research have beensuggested.<|reference_end|> | arxiv | @article{jain1998a,
title={A Delay Based Approach for Congestion Avoidance in Interconnected
Heterogeneous Computer Networks},
author={R. Jain},
journal={Computer Communications Review, ACM SIGCOMM, October 1989, pp.
56-71},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809093},
primaryClass={cs.NI}
} | jain1998a |
arxiv-676113 | cs/9809094 | Congestion Avoidance in Computer Networks with a Connectionless Network Layer | <|reference_start|>Congestion Avoidance in Computer Networks with a Connectionless Network Layer: Widespread use of computer networks and the use of varied technology for the interconnection of computers has made congestion a significant problem. In this report, we summarize our research on congestion avoidance. We compare the concept of congestion avoidance with that of congestion control. Briefly, congestion control is a recovery mechanism, while congestion avoidance is a prevention mechanism. A congestion control scheme helps the network to recover from the congestion state while a congestion avoidance scheme allows a network to operate in the region of low delay and high throughput with minimal queuing, thereby preventing it from entering the congested state in which packets are lost due to buffer shortage. A number of possible alternatives for congestion avoidance were identified. From these alternatives we selected one called the binary feedback scheme in which the network uses a single bit in the network layer header to feed back the congestion information to its users, which then increase or decrease their load to make optimal use of the resources. The concept of global optimality in a distributed system is defined in terms of efficiency and fairness such that they can be independently quantified and apply to any number of resources and users. The proposed scheme has been simulated and shown to be globally efficient, fair, responsive, convergent, robust, distributed, and configuration-independent.<|reference_end|> | arxiv | @article{jain1998congestion,
title={Congestion Avoidance in Computer Networks with a Connectionless Network
Layer},
author={R. Jain, K. Ramakrishnan, D. Chiu},
journal={arXiv preprint arXiv:cs/9809094},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809094},
primaryClass={cs.NI}
} | jain1998congestion |
arxiv-676114 | cs/9809095 | Congestion Avoidance in Computer Networks with a Connectionless Network Layer, Part I: Concepts, Goals and Methodology | <|reference_start|>Congestion Avoidance in Computer Networks with a Connectionless Network Layer, Part I: Concepts, Goals and Methodology: Congestion is said to occur in the network when the resource demands exceed the capacity and packets are lost due to too much queuing in the network. During congestion, the network throughput may drop to zero and the path delay may become very high. A congestion control scheme helps the network to recover from the congestion state. A congestion avoidance scheme allows a network to operate in the region of low delay and high throughput. Such schemes prevent a network from entering the congested state. Congestion avoidance is a prevention mechanism while congestion control is a recovery mechanism. We compare the concept of congestion avoidance with that of flow control and congestion control. A number of possible alternative for congestion avoidance have been identified. From these a few were selected for study. The criteria for selection and goals for these schemes have been described. In particular, we wanted the scheme to be globally efficient, fair, dynamic, convergent, robust, distributed, configuration independent, etc. These goals and the test cases used to verify whether a particular scheme has met the goals have been described. We model the network and the user policies for congestion avoidance as a feedback control system. The key components of a generic congestion avoidance scheme are: congestion detection, congestion feedback, feedback selector, signal filter, decision function, and increase/decrease algorithms. These components have been explained.<|reference_end|> | arxiv | @article{jain1998congestion,
title={Congestion Avoidance in Computer Networks with a Connectionless Network
Layer, Part I: Concepts, Goals and Methodology},
author={R. Jain, K. Ramakrishnan},
journal={arXiv preprint arXiv:cs/9809095},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809095},
primaryClass={cs.NI}
} | jain1998congestion |
arxiv-676115 | cs/9809096 | A Timeout Based Congestion Control Scheme for Window Flow- Controlled Networks | <|reference_start|>A Timeout Based Congestion Control Scheme for Window Flow- Controlled Networks: During overload, most networks drop packets due to buffer unavailability. The resulting timeouts at the source provide an implicit mechanism to convey congestion signals from the network to the source. On a timeout, a source should not only retransmit the lost packet, but it should also reduce its load on the network. Based on this realization, we have developed a simple congestion control scheme using the acknowledgment timeouts as indications of packet loss and congestion. This scheme does not require any new message formats, therefore, it can be used in any network with window flow control, e.g., ARPAnet or ISO.<|reference_end|> | arxiv | @article{jain1998a,
title={A Timeout Based Congestion Control Scheme for Window Flow- Controlled
Networks},
author={R. Jain},
journal={IEEE Journal of Selected Areas in Communications, Vol. SAC-4, No.
7, October 1986, pp. 1162-1167},
year={1998},
doi={10.1109/JSAC.1986.1146431},
archivePrefix={arXiv},
eprint={cs/9809096},
primaryClass={cs.NI}
} | jain1998a |
arxiv-676116 | cs/9809097 | Divergence of Timeout Algorithms for Packet Retransmissions | <|reference_start|>Divergence of Timeout Algorithms for Packet Retransmissions: The problem of adaptively setting the timeout interval for retransmitting a packet has been discussed. A layered view of the algorithms has been presented. It is shown that a timeout algorithm consists of essentially five layers or procedures which can be independently chosen and modified. A number of timeout algorithms proposed in the literature have been decomposed into these five layers. One of the key layers not discussed in the literature is that of determining the sample round trip delay for packets that have been transmitted more than once. It is shown that this layer has a significant impact on the network performance. Under repeated packet loss, most timeout algorithms either diverge or converge to a wrong value. A number of alternative schemes have been presented. It is argued that divergence is preferable to false convergence. It is a feature that is helpful in reducing network traffic congestion.<|reference_end|> | arxiv | @article{jain1998divergence,
title={Divergence of Timeout Algorithms for Packet Retransmissions},
author={R. Jain},
journal={arXiv preprint arXiv:cs/9809097},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809097},
primaryClass={cs.NI}
} | jain1998divergence |
arxiv-676117 | cs/9809098 | On Caching out-of-order packets in window flow controlled networks | <|reference_start|>On Caching out-of-order packets in window flow controlled networks: In window flow controlled networks, if a packet is lost the destination has to decide whether to save (cache) subsequent out-of-order packets. Also, the source has to decide whether to send just one packet or to send all packets following it. This leads to four different types of caching schemes. Simulations show, against our immediate intuition, that regardless of whether the destination is caching or not, the source should retransmit only one packet. This paper describes the alternatives to, and provides justification for, schemes used in Digital Network Architecture and ARPAnet TCP.<|reference_end|> | arxiv | @article{jain1998on,
title={On Caching out-of-order packets in window flow controlled networks},
author={R. Jain},
journal={arXiv preprint arXiv:cs/9809098},
year={1998},
number={DEC-TR-342},
archivePrefix={arXiv},
eprint={cs/9809098},
primaryClass={cs.NI}
} | jain1998on |
arxiv-676118 | cs/9809099 | A Quantitative Measure Of Fairness And Discrimination For Resource Allocation In Shared Computer Systems | <|reference_start|>A Quantitative Measure Of Fairness And Discrimination For Resource Allocation In Shared Computer Systems: Fairness is an important performance criterion in all resource allocation schemes, including those in distributed computer systems. However, it is often specified only qualitatively. The quantitative measures proposed in the literature are either too specific to a particular application, or suffer from some undesirable characteristics. In this paper, we have introduced a quantitative measure called Indiex of FRairness. The index is applicable to any resource sharing or allocation problem. It is independent of the amount of the resource. The fairness index always lies between 0 and 1. This boundedness aids intuitive understanding of the fairness index. For example, a distribution algorithm with a fairness of 0.10 means that it is unfair to 90% of the users. Also, the discrimination index can be defined as 1 - fairness index.<|reference_end|> | arxiv | @article{jain1998a,
title={A Quantitative Measure Of Fairness And Discrimination For Resource
Allocation In Shared Computer Systems},
author={R. Jain, D. Chiu, and W. Hawe},
journal={arXiv preprint arXiv:cs/9809099},
year={1998},
number={TR-301},
archivePrefix={arXiv},
eprint={cs/9809099},
primaryClass={cs.NI}
} | jain1998a |
arxiv-676119 | cs/9809100 | Data Flies Standby with ABR Service | <|reference_start|>Data Flies Standby with ABR Service: Explanation of ABR service in plain language.<|reference_end|> | arxiv | @article{jain1998data,
title={Data Flies Standby with ABR Service},
author={R. Jain},
journal={Network World, June 12, 1995, page 43},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809100},
primaryClass={cs.NI}
} | jain1998data |
arxiv-676120 | cs/9809101 | Flood Routing Technique for Data Networks | <|reference_start|>Flood Routing Technique for Data Networks: In this paper, a new routing algorithm based on a flooding method is introduced. Flooding techniques have been used previously, e.g. for broadcasting the routing table in the ARPAnet [1] and other special purpose networks [3][4][5]. However, sending data using flooding can often saturate the network [2] and it is usually regarded as an inefficient broadcast mechanism. Our approach is to flood a very short packet to explore an optimal route without relying on a pre-established routing table, and an efficient flood control algorithm to reduce the signalling traffic overhead. This is an inherently robust mechanism in the face of a network configuration change, achieves automatic load sharing across alternative routes and has potential to solve many contemporary routing problems. An earlier version of this mechanism was originally developed for virtual circuit establishment in the experimental Caroline ATM LAN [6][7] at Monash University.<|reference_end|> | arxiv | @article{cho1998flood,
title={Flood Routing Technique for Data Networks},
author={Jaihyung Cho, James Breen},
journal={ICICS`97;First International Conference on Information,
Communications and Signal Processing, IEEE Singapore, vo. 3, Sep 1997. pp.
1418-1422},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809101},
primaryClass={cs.NI}
} | cho1998flood |
arxiv-676121 | cs/9809102 | Analysis of Performance of Dynamic Multicast Routing Algorithms | <|reference_start|>Analysis of Performance of Dynamic Multicast Routing Algorithms: In this paper, three new dynamic multicast routing algorithms based on the greedy tree technique are proposed; Source Optimised Tree, Topology Based Tree and Minimum Diameter Tree. A simulation analysis is presented showing various performance aspects of the algorithms, in which a comparison is made with the greedy and core based tree techniques. The effects of the tree source location on dynamic membership change are also examined. The simulations demonstrate that the Source Optimised Tree algorithm achieves a significant improvement in terms of delay and link usage when compared to the Core Based Tree, and greedy algorithm.<|reference_end|> | arxiv | @article{cho1998analysis,
title={Analysis of Performance of Dynamic Multicast Routing Algorithms},
author={Jaihyung Cho, James Breen},
journal={ICCCN`98;IEEE 7th International Conference onComputer
Communications and Networks, lafayett Louisiana, U.S.A, Oct. 1998},
year={1998},
doi={10.1016/S0140-3664(99)00009-2},
archivePrefix={arXiv},
eprint={cs/9809102},
primaryClass={cs.NI}
} | cho1998analysis |
arxiv-676122 | cs/9809103 | Bicriteria Network Design Problems | <|reference_start|>Bicriteria Network Design Problems: We study a general class of bicriteria network design problems. A generic problem in this class is as follows: Given an undirected graph and two minimization objectives (under different cost functions), with a budget specified on the first, find a <subgraph \from a given subgraph-class that minimizes the second objective subject to the budget on the first. We consider three different criteria - the total edge cost, the diameter and the maximum degree of the network. Here, we present the first polynomial-time approximation algorithms for a large class of bicriteria network design problems for the above mentioned criteria. The following general types of results are presented. First, we develop a framework for bicriteria problems and their approximations. Second, when the two criteria are the same %(note that the cost functions continue to be different) we present a ``black box'' parametric search technique. This black box takes in as input an (approximation) algorithm for the unicriterion situation and generates an approximation algorithm for the bicriteria case with only a constant factor loss in the performance guarantee. Third, when the two criteria are the diameter and the total edge costs we use a cluster-based approach to devise a approximation algorithms --- the solutions output violate both the criteria by a logarithmic factor. Finally, for the class of treewidth-bounded graphs, we provide pseudopolynomial-time algorithms for a number of bicriteria problems using dynamic programming. We show how these pseudopolynomial-time algorithms can be converted to fully polynomial-time approximation schemes using a scaling technique.<|reference_end|> | arxiv | @article{marathe1998bicriteria,
title={Bicriteria Network Design Problems},
author={Madhav V. Marathe, R. Ravi, Ravi Sundaram, S. S. Ravi, Daniel J.
Rosenkrantz, Harry B. Hunt III},
journal={J. Algorithms, 28, 142-171, (1998)},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809103},
primaryClass={cs.CC cs.DS}
} | marathe1998bicriteria |
arxiv-676123 | cs/9809104 | Adaptive Multicast of Multi-Layered Video: Rate-Based and Credit-Based Approaches | <|reference_start|>Adaptive Multicast of Multi-Layered Video: Rate-Based and Credit-Based Approaches: Network architectures that can efficiently transport high quality, multicast video are rapidly becoming a basic requirement of emerging multimedia applications. The main problem complicating multicast video transport is variation in network bandwidth constraints. An attractive solution to this problem is to use an adaptive, multi-layered video encoding mechanism. In this paper, we consider two such mechanisms for the support of video multicast; one is a rate-based mechanism that relies on explicit rate congestion feedback from the network, and the other is a credit-based mechanism that relies on hop-by-hop congestion feedback. The responsiveness, bandwidth utilization, scalability and fairness of the two mechanisms are evaluated through simulations. Results suggest that while the two mechanisms exhibit performance trade-offs, both are capable of providing a high quality video service in the presence of varying bandwidth constraints.<|reference_end|> | arxiv | @article{vickers1998adaptive,
title={Adaptive Multicast of Multi-Layered Video: Rate-Based and Credit-Based
Approaches},
author={Brett J. Vickers, Celio Albuquerque and Tatsuya Suda},
journal={Proceedings of IEEE Infocom '98, pp. 1073-1083, April 1998},
year={1998},
doi={10.1109/INFCOM.1998.662917},
archivePrefix={arXiv},
eprint={cs/9809104},
primaryClass={cs.NI cs.MM}
} | vickers1998adaptive |
arxiv-676124 | cs/9809105 | Hyper-Systolic Matrix Multiplication | <|reference_start|>Hyper-Systolic Matrix Multiplication: A novel parallel algorithm for matrix multiplication is presented. The hyper-systolic algorithm makes use of a one-dimensional processor abstraction. The procedure can be implemented on all types of parallel systems. It can handle matrix-vector multiplications as well as transposed matrix products.<|reference_end|> | arxiv | @article{lippert1998hyper-systolic,
title={Hyper-Systolic Matrix Multiplication},
author={Thomas Lippert, Nikolay Petkov, Paolo Palazzari, and Klaus Schilling},
journal={arXiv preprint arXiv:cs/9809105},
year={1998},
number={HLRZ1998-59},
archivePrefix={arXiv},
eprint={cs/9809105},
primaryClass={cs.MS}
} | lippert1998hyper-systolic |
arxiv-676125 | cs/9809106 | Processing Unknown Words in HPSG | <|reference_start|>Processing Unknown Words in HPSG: The lexical acquisition system presented in this paper incrementally updates linguistic properties of unknown words inferred from their surrounding context by parsing sentences with an HPSG grammar for German. We employ a gradual, information-based concept of ``unknownness'' providing a uniform treatment for the range of completely known to maximally unknown lexical entries. ``Unknown'' information is viewed as revisable information, which is either generalizable or specializable. Updating takes place after parsing, which only requires a modified lexical lookup. Revisable pieces of information are identified by grammar-specified declarations which provide access paths into the parse feature structure. The updating mechanism revises the corresponding places in the lexical feature structures iff the context actually provides new information. For revising generalizable information, type union is required. A worked-out example demonstrates the inferential capacity of our implemented system.<|reference_end|> | arxiv | @article{barg1998processing,
title={Processing Unknown Words in HPSG},
author={Petra Barg and Markus Walther (University of Duesseldorf)},
journal={Proceedings COLING-ACL'98, vol.I, 91-95},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809106},
primaryClass={cs.CL}
} | barg1998processing |
arxiv-676126 | cs/9809107 | Computing Declarative Prosodic Morphology | <|reference_start|>Computing Declarative Prosodic Morphology: This paper describes a computational, declarative approach to prosodic morphology that uses inviolable constraints to denote small finite candidate sets which are filtered by a restrictive incremental optimization mechanism. The new approach is illustrated with an implemented fragment of Modern Hebrew verbs couched in MicroCUF, an expressive constraint logic formalism. For generation and parsing of word forms, I propose a novel off-line technique to eliminate run-time optimization. It produces a finite-state oracle that efficiently restricts the constraint interpreter's search space. As a byproduct, unknown words can be analyzed without special mechanisms. Unlike pure finite-state transducer approaches, this hybrid setup allows for more expressivity in constraints to specify e.g. token identity for reduplication or arithmetic constraints for phonetics.<|reference_end|> | arxiv | @article{walther1998computing,
title={Computing Declarative Prosodic Morphology},
author={Markus Walther (University of Marburg)},
journal={Proceedings of SIGPHON'98, pp. 11-20 (COLING-ACL'98
Post-Conference Workshop on The Computation of Phonological Constraints)},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809107},
primaryClass={cs.CL}
} | walther1998computing |
arxiv-676127 | cs/9809108 | Learning Nested Agent Models in an Information Economy | <|reference_start|>Learning Nested Agent Models in an Information Economy: We present our approach to the problem of how an agent, within an economic Multi-Agent System, can determine when it should behave strategically (i.e. learn and use models of other agents), and when it should act as a simple price-taker. We provide a framework for the incremental implementation of modeling capabilities in agents, and a description of the forms of knowledge required. The agents were implemented and different populations simulated in order to learn more about their behavior and the merits of using and learning agent models. Our results show, among other lessons, how savvy buyers can avoid being ``cheated'' by sellers, how price volatility can be used to quantitatively predict the benefits of deeper models, and how specific types of agent populations influence system behavior.<|reference_end|> | arxiv | @article{vidal1998learning,
title={Learning Nested Agent Models in an Information Economy},
author={Jose M. Vidal and Edmund H. Durfee},
journal={Journal of Experimental and Theoretical Artificial Intelligence.
10(1998)291-308},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809108},
primaryClass={cs.MA cs.AI}
} | vidal1998learning |
arxiv-676128 | cs/9809109 | Linear Complexity Hexahedral Mesh Generation | <|reference_start|>Linear Complexity Hexahedral Mesh Generation: We show that any polyhedron forming a topological ball with an even number of quadrilateral sides can be partitioned into O(n) topological cubes, meeting face to face. The result generalizes to non-simply-connected polyhedra satisfying an additional bipartiteness condition. The same techniques can also be used to reduce the geometric version of the hexahedral mesh generation problem to a finite case analysis amenable to machine solution.<|reference_end|> | arxiv | @article{eppstein1998linear,
title={Linear Complexity Hexahedral Mesh Generation},
author={David Eppstein},
journal={Comp. Geom. Theory & Appl. 12 (1999) 3-16},
year={1998},
doi={10.1016/S0925-7721(98)00032-7},
archivePrefix={arXiv},
eprint={cs/9809109},
primaryClass={cs.CG}
} | eppstein1998linear |
arxiv-676129 | cs/9809110 | Similarity-Based Models of Word Cooccurrence Probabilities | <|reference_start|>Similarity-Based Models of Word Cooccurrence Probabilities: In many applications of natural language processing (NLP) it is necessary to determine the likelihood of a given word combination. For example, a speech recognizer may need to determine which of the two word combinations ``eat a peach'' and ``eat a beach'' is more likely. Statistical NLP methods determine the likelihood of a word combination from its frequency in a training corpus. However, the nature of language is such that many word combinations are infrequent and do not occur in any given corpus. In this work we propose a method for estimating the probability of such previously unseen word combinations using available information on ``most similar'' words. We describe probabilistic word association models based on distributional word similarity, and apply them to two tasks, language modeling and pseudo-word disambiguation. In the language modeling task, a similarity-based model is used to improve probability estimates for unseen bigrams in a back-off language model. The similarity-based method yields a 20% perplexity improvement in the prediction of unseen bigrams and statistically significant reductions in speech-recognition error. We also compare four similarity-based estimation methods against back-off and maximum-likelihood estimation methods on a pseudo-word sense disambiguation task in which we controlled for both unigram and bigram frequency to avoid giving too much weight to easy-to-disambiguate high-frequency configurations. The similarity-based methods perform up to 40% better on this particular task.<|reference_end|> | arxiv | @article{dagan1998similarity-based,
title={Similarity-Based Models of Word Cooccurrence Probabilities},
author={Ido Dagan, Lillian Lee and Fernando C. N. Pereira},
journal={Machine Learning, 34, 43-69 (1999)},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809110},
primaryClass={cs.CL cs.AI cs.LG}
} | dagan1998similarity-based |
arxiv-676130 | cs/9809111 | Evolution of Neural Networks to Play the Game of Dots-and-Boxes | <|reference_start|>Evolution of Neural Networks to Play the Game of Dots-and-Boxes: Dots-and-Boxes is a child's game which remains analytically unsolved. We implement and evolve artificial neural networks to play this game, evaluating them against simple heuristic players. Our networks do not evaluate or predict the final outcome of the game, but rather recommend moves at each stage. Superior generalisation of play by co-evolved populations is found, and a comparison made with networks trained by back-propagation using simple heuristics as an oracle.<|reference_end|> | arxiv | @article{weaver1998evolution,
title={Evolution of Neural Networks to Play the Game of Dots-and-Boxes},
author={Lex Weaver and Terry Bossomaier},
journal={Alife V: Poster Presentations, May 16-18 1996, pages 43-50},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809111},
primaryClass={cs.NE cs.LG}
} | weaver1998evolution |
arxiv-676131 | cs/9809112 | On the Evaluation and Comparison of Taggers: The Effect of Noise in Testing Corpora | <|reference_start|>On the Evaluation and Comparison of Taggers: The Effect of Noise in Testing Corpora: This paper addresses the issue of {\sc pos} tagger evaluation. Such evaluation is usually performed by comparing the tagger output with a reference test corpus, which is assumed to be error-free. Currently used corpora contain noise which causes the obtained performance to be a distortion of the real value. We analyze to what extent this distortion may invalidate the comparison between taggers or the measure of the improvement given by a new system. The main conclusion is that a more rigorous testing experimentation setting/designing is needed to reliably evaluate and compare tagger accuracies.<|reference_end|> | arxiv | @article{padro1998on,
title={On the Evaluation and Comparison of Taggers: The Effect of Noise in
Testing Corpora},
author={L. Padro & L. Marquez (Universitat Politecnica de Catalunya)},
journal={arXiv preprint arXiv:cs/9809112},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809112},
primaryClass={cs.CL}
} | padro1998on |
arxiv-676132 | cs/9809113 | Improving Tagging Performance by Using Voting Taggers | <|reference_start|>Improving Tagging Performance by Using Voting Taggers: We present a bootstrapping method to develop an annotated corpus, which is specially useful for languages with few available resources. The method is being applied to develop a corpus of Spanish of over 5Mw. The method consists on taking advantage of the collaboration of two different POS taggers. The cases in which both taggers agree present a higher accuracy and are used to retrain the taggers.<|reference_end|> | arxiv | @article{marquez1998improving,
title={Improving Tagging Performance by Using Voting Taggers},
author={L. Marquez, L. Padro & H. Rodriguez (Universitat Politecnica de
Catalunya)},
journal={arXiv preprint arXiv:cs/9809113},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809113},
primaryClass={cs.CL}
} | marquez1998improving |
arxiv-676133 | cs/9809114 | The descriptive complexity approach to LOGCFL | <|reference_start|>The descriptive complexity approach to LOGCFL: Building upon the known generalized-quantifier-based first-order characterization of LOGCFL, we lay the groundwork for a deeper investigation. Specifically, we examine subclasses of LOGCFL arising from varying the arity and nesting of groupoidal quantifiers. Our work extends the elaborate theory relating monoidal quantifiers to NC1 and its subclasses. In the absence of the BIT predicate, we resolve the main issues: we show in particular that no single outermost unary groupoidal quantifier with FO can capture all the context-free languages, and we obtain the surprising result that a variant of Greibach's ``hardest context-free language'' is LOGCFL-complete under quantifier-free BIT-free projections. We then prove that FO with unary groupoidal quantifiers is strictly more expressive with the BIT predicate than without. Considering a particular groupoidal quantifier, we prove that first-order logic with majority of pairs is strictly more expressive than first-order with majority of individuals. As a technical tool of independent interest, we define the notion of an aperiodic nondeterministic finite automaton and prove that FO translations are precisely the mappings computed by single-valued aperiodic nondeterministic finite transducers.<|reference_end|> | arxiv | @article{lautemann1998the,
title={The descriptive complexity approach to LOGCFL},
author={Clemens Lautemann and Pierre McKenzie and Thomas Schwentick and
Heribert Vollmer},
journal={arXiv preprint arXiv:cs/9809114},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809114},
primaryClass={cs.CC}
} | lautemann1998the |
arxiv-676134 | cs/9809115 | A Generalized Quantifier Concept in Computational Complexity Theory | <|reference_start|>A Generalized Quantifier Concept in Computational Complexity Theory: A notion of generalized quantifier in computational complexity theory is explored and used to give a unified treatment of leaf language definability, oracle separations, type 2 operators, and circuits with monoidal gates. Relations to Lindstroem quantifiers are pointed out.<|reference_end|> | arxiv | @article{vollmer1998a,
title={A Generalized Quantifier Concept in Computational Complexity Theory},
author={Heribert Vollmer},
journal={arXiv preprint arXiv:cs/9809115},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809115},
primaryClass={cs.CC}
} | vollmer1998a |
arxiv-676135 | cs/9809116 | The Complexity of Computing Optimal Assignments of Generalized Propositional Formulae | <|reference_start|>The Complexity of Computing Optimal Assignments of Generalized Propositional Formulae: We consider the problems of finding the lexicographically minimal (or maximal) satisfying assignment of propositional formulae for different restricted formula classes. It turns out that for each class from our framework, the above problem is either polynomial time solvable or complete for OptP. We also consider the problem of deciding if in the optimal assignment the largest variable gets value 1. We show that this problem is either in P or P^NP complete.<|reference_end|> | arxiv | @article{reith1998the,
title={The Complexity of Computing Optimal Assignments of Generalized
Propositional Formulae},
author={Steffen Reith and Heribert Vollmer},
journal={arXiv preprint arXiv:cs/9809116},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809116},
primaryClass={cs.CC}
} | reith1998the |
arxiv-676136 | cs/9809117 | Hard instance generation for SAT | <|reference_start|>Hard instance generation for SAT: We propose an algorithm of generating hard instances for the Satisfying Assignment Search Problem (in short, SAT). The algorithm transforms instances of the integer factorization problem into SAT instances efficiently by using the Chinese Remainder Theorem. For example, it is possible to construct SAT instances with about 5,600 variables that is as hard as factorizing 100 bit integers.<|reference_end|> | arxiv | @article{horie1998hard,
title={Hard instance generation for SAT},
author={Satoshi Horie and Osamu Watanabe},
journal={In the Proc. of ISAAC'97, Lecture Notes in CS, Vol.1350, 22-31,
1997},
year={1998},
number={TR98-0007},
archivePrefix={arXiv},
eprint={cs/9809117},
primaryClass={cs.CC}
} | horie1998hard |
arxiv-676137 | cs/9809118 | The Boolean Hierarchy over Level 1/2 of the Straubing-Therien Hierarchy | <|reference_start|>The Boolean Hierarchy over Level 1/2 of the Straubing-Therien Hierarchy: For some fixed alphabet A, a language L of A* is in the class L(1/2) of the Straubing-Therien hierarchy if and only if it can be expressed as a finite union of languages A*aA*bA*...A*cA*, where a,b,...,c are letters. The class L(1) is defined as the boolean closure of L(1/2). It is known that the classes L(1/2) and L(1) are decidable. We give a membership criterion for the single classes of the boolean hierarchy over L(1/2). From this criterion we can conclude that this boolean hierarchy is proper and that its classes are decidable.In finite model theory the latter implies the decidability of the classes of the boolean hierarchy over the class Sigma(1) of the FO(<)-logic. Moreover we prove a ``forbidden-pattern'' characterization of L(1) of the type: L is in L(1) if and only if a certain pattern does not appear in the transition graph of a deterministic finite automaton accepting L. We discuss complexity theoretical consequences of our results.<|reference_end|> | arxiv | @article{schmitz1998the,
title={The Boolean Hierarchy over Level 1/2 of the Straubing-Therien Hierarchy},
author={Heinz Schmitz and Klaus W. Wagner},
journal={arXiv preprint arXiv:cs/9809118},
year={1998},
number={201},
archivePrefix={arXiv},
eprint={cs/9809118},
primaryClass={cs.CC cs.FL}
} | schmitz1998the |
arxiv-676138 | cs/9809119 | Droems: experimental mathematics, informatics and infinite dimensional geometry | <|reference_start|>Droems: experimental mathematics, informatics and infinite dimensional geometry: The article is devoted to a problem of elaboration of the real-time interactive videosystems for accelerated nonverbal cognitive computer and telecommunications. The proposed approach is based on the using of droems (dynamically reconstructed objects of experimental mathematics) and interpretational figures as pointers to them. Four paragraphs of the article are devoted to (1) an exposition of basic notions of the interpretational geometry, (2) the operator methods in the theory of interactive dynamical videosystems, (3) the general concept of organization of the integrated interactive real-time videocognitive systems, (4) the droems and processes of their dynamical reconstruction, where the general notions are illustrated by a concrete example related to the infinite dimensional geometry. The exposition is presumably heuristic and conceptual (the first and the third paragraphs) though some particular aspects such as content of the second and the fourth paragraphs, which allow deeper formalization and detailing in present, are exposed on the mathematical level of rigor.<|reference_end|> | arxiv | @article{juriev1998droems:,
title={Droems: experimental mathematics, informatics and infinite dimensional
geometry},
author={Denis V. Juriev},
journal={arXiv preprint arXiv:cs/9809119},
year={1998},
number={RCMPI/96-05+},
archivePrefix={arXiv},
eprint={cs/9809119},
primaryClass={cs.HC cs.GR math.RT}
} | juriev1998droems: |
arxiv-676139 | cs/9809120 | A Natural Deduction style proof system for propositional $\mu$-calculus and its formalization in inductive type theories | <|reference_start|>A Natural Deduction style proof system for propositional $\mu$-calculus and its formalization in inductive type theories: In this paper, we present a formalization of Kozen's propositional modal $\mu$-calculus, in the Calculus of Inductive Constructions. We address several problematic issues, such as the use of higher-order abstract syntax in inductive sets in presence of recursive constructors, the encoding of modal (``proof'') rules and of context sensitive grammars. The encoding can be used in the \Coq system, providing an experimental computer-aided proof environment for the interactive development of error-free proofs in the $\mu$-calculus. The techniques we adopted can be readily ported to other languages and proof systems featuring similar problematic issues.<|reference_end|> | arxiv | @article{miculan1998a,
title={A Natural Deduction style proof system for propositional $\mu$-calculus
and its formalization in inductive type theories},
author={Marino Miculan},
journal={arXiv preprint arXiv:cs/9809120},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809120},
primaryClass={cs.LO}
} | miculan1998a |
arxiv-676140 | cs/9809121 | Using Local Optimality Criteria for Efficient Information Retrieval with Redundant Information Filters | <|reference_start|>Using Local Optimality Criteria for Efficient Information Retrieval with Redundant Information Filters: We consider information retrieval when the data, for instance multimedia, is coputationally expensive to fetch. Our approach uses "information filters" to considerably narrow the universe of possiblities before retrieval. We are especially interested in redundant information filters that save time over more general but more costly filters. Efficient retrieval requires that decision must be made about the necessity, order, and concurrent processing of proposed filters (an "execution plan"). We develop simple polynomial-time local criteria for optimal execution plans, and show that most forms of concurrency are suboptimal with information filters. Although the general problem of finding an optimal execution plan is likely exponential in the number of filters, we show experimentally that our local optimality criteria, used in a polynomial-time algorithm, nearly always find the global optimum with 15 filters or less, a sufficient number of filters for most applications. Our methods do not require special hardware and avoid the high processor idleness that is characteristic of massive parallelism solutions to this problem. We apply our ideas to an important application, information retrieval of cpationed data using natural-language understanding, a problem for which the natural-language processing can be the bottleneck if not implemented well.<|reference_end|> | arxiv | @article{rowe1998using,
title={Using Local Optimality Criteria for Efficient Information Retrieval with
Redundant Information Filters},
author={Neil C. Rowe},
journal={ACM Transactions on Information Systems, 14, 2 (April 1996),
138-174},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809121},
primaryClass={cs.IR cs.AI}
} | rowe1998using |
arxiv-676141 | cs/9809122 | Practical algorithms for on-line sampling | <|reference_start|>Practical algorithms for on-line sampling: One of the core applications of machine learning to knowledge discovery consists on building a function (a hypothesis) from a given amount of data (for instance a decision tree or a neural network) such that we can use it afterwards to predict new instances of the data. In this paper, we focus on a particular situation where we assume that the hypothesis we want to use for prediction is very simple, and thus, the hypotheses class is of feasible size. We study the problem of how to determine which of the hypotheses in the class is almost the best one. We present two on-line sampling algorithms for selecting hypotheses, give theoretical bounds on the number of necessary examples, and analize them exprimentally. We compare them with the simple batch sampling approach commonly used and show that in most of the situations our algorithms use much fewer number of examples.<|reference_end|> | arxiv | @article{domingo1998practical,
title={Practical algorithms for on-line sampling},
author={Carlos Domingo, Ricard Gavalda, and Osamu Watanabe},
journal={arXiv preprint arXiv:cs/9809122},
year={1998},
number={C-123},
archivePrefix={arXiv},
eprint={cs/9809122},
primaryClass={cs.LG cs.DS}
} | domingo1998practical |
arxiv-676142 | cs/9809123 | A role of constraint in self-organization | <|reference_start|>A role of constraint in self-organization: In this paper we introduce a neural network model of self-organization. This model uses a variation of Hebb rule for updating its synaptic weights, and surely converges to the equilibrium status. The key point of the convergence is the update rule that constrains the total synaptic weight and this seems to make the model stable. We investigate the role of the constraint and show that it is the constraint that makes the model stable. For analyzing this setting, we propose a simple probabilistic game that models the neural network and the self-organization process. Then, we investigate the characteristics of this game, namely, the probability that the game becomes stable and the number of the steps it takes.<|reference_end|> | arxiv | @article{domingo1998a,
title={A role of constraint in self-organization},
author={Carlos Domingo, Osamu Watanabe, and Tadashi Yamazaki},
journal={arXiv preprint arXiv:cs/9809123},
year={1998},
number={C-124},
archivePrefix={arXiv},
eprint={cs/9809123},
primaryClass={cs.NE cs.CG}
} | domingo1998a |
arxiv-676143 | cs/9809124 | Security Policy Specification Using a Graphical Approach | <|reference_start|>Security Policy Specification Using a Graphical Approach: A security policy states the acceptable actions of an information system, as the actions bear on security. There is a pressing need for organizations to declare their security policies, even informal statements would be better than the current practice. But, formal policy statements are preferable to support (1) reasoning about policies, e.g., for consistency and completeness, (2) automated enforcement of the policy, e.g., using wrappers around legacy systems or after the fact with an intrusion detection system, and (3) other formal manipulation of policies, e.g., the composition of policies. We present LaSCO, the Language for Security Constraints on Objects, in which a policy consists of two parts: the domain (assumptions about the system) and the requirement (what is allowed assuming the domain is satisfied). Thus policies defined in LaSCO have the appearance of conditional access control statements. LaSCO policies are specified as expressions in logic and as directed graphs, giving a visual view of policy. LaSCO has a simple semantics in first order logic (which we provide), thus permitting policies we write, even for complex policies, to be very perspicuous. LaSCO has syntax to express many of the situations we have found to be useful on policies or, more interesting, the composition of policies. LaSCO has an object-oriented structure, permitting it to be useful to describe policies on the objects and methods of an application written in an object-oriented language, in addition to the traditional policies on operating system objects. A LaSCO specification can be automatically translated into executable code that checks an invocation of a program with respect to a policy. The implementation of LaSCO is in Java, and generates wrappers to check Java programs with respect to a policy.<|reference_end|> | arxiv | @article{hoagland1998security,
title={Security Policy Specification Using a Graphical Approach},
author={James A. Hoagland, Raju Pandey, Karl N. Levitt},
journal={arXiv preprint arXiv:cs/9809124},
year={1998},
number={CSE-98-3},
archivePrefix={arXiv},
eprint={cs/9809124},
primaryClass={cs.CR}
} | hoagland1998security |
arxiv-676144 | cs/9809125 | Distributed Computation, the Twisted Isomorphism, and Auto-Poiesis | <|reference_start|>Distributed Computation, the Twisted Isomorphism, and Auto-Poiesis: This paper presents a synchronization-based, multi-process computational model of anticipatory systems called the Phase Web. It describes a self-organizing paradigm that explicitly recognizes and exploits the existence of a boundary between inside and outside, accepts and exploits intentionality, and uses explicit self-reference to describe eg. auto-poiesis. The model explicitly connects computation to a discrete Clifford algebraic formalization that is in turn extended into homology and co-homology, wherein the recursive nature of objects and boundaries becomes apparent and itself subject to hierarchical recursion. Topsy, a computer program embodying the Phase Web, is available at www.cs.auc.dk/topsy.<|reference_end|> | arxiv | @article{manthey1998distributed,
title={Distributed Computation, the Twisted Isomorphism, and Auto-Poiesis},
author={Michael Manthey},
journal={CASYS'97 First International Conference on Computing Anticipatory
Systems, Liege (Belgium), August 11-15, 1997. D. Dubois, Ed (Dept.of
Mathematics, University of Liege)},
year={1998},
archivePrefix={arXiv},
eprint={cs/9809125},
primaryClass={cs.DC}
} | manthey1998distributed |
arxiv-676145 | cs/9810001 | On Dart-Zobel Algorithm for Testing Regular Type Inclusion | <|reference_start|>On Dart-Zobel Algorithm for Testing Regular Type Inclusion: This paper answers open questions about the correctness and the completeness of Dart-Zobel algorithm for testing the inclusion relation between two regular types. We show that the algorithm is incorrect for regular types. We also prove that the algorithm is complete for regular types as well as correct for tuple distributive regular types. Also presented is a simplified version of Dart-Zobel algorithm for tuple distributive regular types.<|reference_end|> | arxiv | @article{lu1998on,
title={On Dart-Zobel Algorithm for Testing Regular Type Inclusion},
author={Lunjin Lu and John G. Cleary},
journal={arXiv preprint arXiv:cs/9810001},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810001},
primaryClass={cs.LO cs.PL}
} | lu1998on |
arxiv-676146 | cs/9810002 | Pre-fetching tree-structured data in distributed memory | <|reference_start|>Pre-fetching tree-structured data in distributed memory: A distributed heap storage manager has been implemented on the Fujitsu AP1000 multicomputer. The performance of various pre-fetching strategies is experimentally compared. Subjective programming benefits and objective performance benefits of up to 10% in pre-fetching are found for certain applications, but not for all. The performance benefits of pre-fetching depend on the specific data structure and access patterns. We suggest that control of pre-fetching strategy be dynamically under the control of the application.<|reference_end|> | arxiv | @article{weaver1998pre-fetching,
title={Pre-fetching tree-structured data in distributed memory},
author={Lex Weaver and Chris Johnson},
journal={Proceedings of the Third Fujitsu Parallel Computing Workshop,
pages P1-J-1 to P1-J-8, Kawasaki, Japan, November 1994. Fujitsu Laboratories
Ltd},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810002},
primaryClass={cs.DC cs.DB}
} | weaver1998pre-fetching |
arxiv-676147 | cs/9810003 | A Linear Shift Invariant Multiscale Transform | <|reference_start|>A Linear Shift Invariant Multiscale Transform: This paper presents a multiscale decomposition algorithm. Unlike standard wavelet transforms, the proposed operator is both linear and shift invariant. The central idea is to obtain shift invariance by averaging the aligned wavelet transform projections over all circular shifts of the signal. It is shown how the same transform can be obtained by a linear filter bank.<|reference_end|> | arxiv | @article{siebert1998a,
title={A Linear Shift Invariant Multiscale Transform},
author={Andreas Siebert},
journal={Proceedings 1998 International Conference on Image Processing,
Chicago, 4-7 October 1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810003},
primaryClass={cs.CV}
} | siebert1998a |
arxiv-676148 | cs/9810004 | The Design of EzWindows: A Graphics API for an Introductory Programming Course | <|reference_start|>The Design of EzWindows: A Graphics API for an Introductory Programming Course: Teaching object-oriented programming in an introductory programming course poses considerable challenges to the instructor. An often advocated approach to meeting this challenge is the use of a simple, object-oriented graphics library. We have developed a simple, portable graphics library for teaching object-oriented programming using C++. The library, EzWindows, allows beginning programmers to design and write programs that use the graphical display found on all modern desktop computers. In addition to providing simple graphical objects such as windows, geometric shapes, and bitmaps, EzWindows provides facilities for introducing event-based programming using the mouse and timers. EzWindows has proven to be extremely popular; it is currently in use at over 200 universities, colleges, and high schools. This paper describes the rationale for EzWindows and its high-level design.<|reference_end|> | arxiv | @article{childers1998the,
title={The Design of EzWindows: A Graphics API for an Introductory Programming
Course},
author={Bruce R. Childers, James P. Cohoon, Jack W. Davidson, Peter Valle},
journal={arXiv preprint arXiv:cs/9810004},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810004},
primaryClass={cs.CY cs.GR}
} | childers1998the |
arxiv-676149 | cs/9810005 | Anytime Coalition Structure Generation with Worst Case Guarantees | <|reference_start|>Anytime Coalition Structure Generation with Worst Case Guarantees: Coalition formation is a key topic in multiagent systems. One would prefer a coalition structure that maximizes the sum of the values of the coalitions, but often the number of coalition structures is too large to allow exhaustive search for the optimal one. But then, can the coalition structure found via a partial search be guaranteed to be within a bound from optimum? We show that none of the previous coalition structure generation algorithms can establish any bound because they search fewer nodes than a threshold that we show necessary for establishing a bound. We present an algorithm that establishes a tight bound within this minimal amount of search, and show that any other algorithm would have to search strictly more. The fraction of nodes needed to be searched approaches zero as the number of agents grows. If additional time remains, our anytime algorithm searches further, and establishes a progressively lower tight bound. Surprisingly, just searching one more node drops the bound in half. As desired, our algorithm lowers the bound rapidly early on, and exhibits diminishing returns to computation. It also drastically outperforms its obvious contenders. Finally, we show how to distribute the desired search across self-interested manipulative agents.<|reference_end|> | arxiv | @article{sandholm1998anytime,
title={Anytime Coalition Structure Generation with Worst Case Guarantees},
author={Tuomas Sandholm, Kate Larson, Martin Andersson, Onn Shehory, Fernando
Tohme},
journal={Proceedings of the National Conference on Artificial Intelligence,
pp 46-53, Madison, WI, July 1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810005},
primaryClass={cs.MA cs.AI}
} | sandholm1998anytime |
arxiv-676150 | cs/9810006 | The Tiny Tera: A Packet Switch Core | <|reference_start|>The Tiny Tera: A Packet Switch Core: The objective is to design and build a small, high-bandwidth switch.<|reference_end|> | arxiv | @article{mckeown1998the,
title={The Tiny Tera: A Packet Switch Core},
author={Nick McKeown (Stanford University), Martin Izzard (Texas Instruments),
Adisak Mekkittikul (Stanford University), Bill Ellersick (Stanford
University) and Mark Horowitz (Stanford University)},
journal={Hot Interconnects V, Stanford University, August 1996; IEEE Micro
Jan/Feb 1997, pp 26-33},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810006},
primaryClass={cs.NI}
} | mckeown1998the |
arxiv-676151 | cs/9810007 | Randomization yields simple O(n log star n) algorithms for difficult Omega(n) problems | <|reference_start|>Randomization yields simple O(n log star n) algorithms for difficult Omega(n) problems: We use here the results on the influence graph by Boissonnat et al. to adapt them for particular cases where additional information is available. In some cases, it is possible to improve the expected randomized complexity of algorithms from O(n log n) to O(n log star n). This technique applies in the following applications: triangulation of a simple polygon, skeleton of a simple polygon, Delaunay triangulation of points knowing the EMST (euclidean minimum spanning tree).<|reference_end|> | arxiv | @article{devillers1998randomization,
title={Randomization yields simple O(n log star n) algorithms for difficult
Omega(n) problems},
author={Olivier Devillers},
journal={Internat. J. Comput. Geom. Appl., 2(1):621--635, 1992},
year={1998},
number={Rapport de recherche 1412, INRIA, 1991},
archivePrefix={arXiv},
eprint={cs/9810007},
primaryClass={cs.CG}
} | devillers1998randomization |
arxiv-676152 | cs/9810008 | Axiomatizing Flat Iteration | <|reference_start|>Axiomatizing Flat Iteration: Flat iteration is a variation on the original binary version of the Kleene star operation P*Q, obtained by restricting the first argument to be a sum of atomic actions. It generalizes prefix iteration, in which the first argument is a single action. Complete finite equational axiomatizations are given for five notions of bisimulation congruence over basic CCS with flat iteration, viz. strong congruence, branching congruence, eta-congruence, delay congruence and weak congruence. Such axiomatizations were already known for prefix iteration and are known not to exist for general iteration. The use of flat iteration has two main advantages over prefix iteration: 1.The current axiomatizations generalize to full CCS, whereas the prefix iteration approach does not allow an elimination theorem for an asynchronous parallel composition operator. 2.The greater expressiveness of flat iteration allows for much shorter completeness proofs. In the setting of prefix iteration, the most convenient way to obtain the completeness theorems for eta-, delay, and weak congruence was by reduction to the completeness theorem for branching congruence. In the case of weak congruence this turned out to be much simpler than the only direct proof found. In the setting of flat iteration on the other hand, the completeness theorems for delay and weak (but not eta-) congruence can equally well be obtained by reduction to the one for strong congruence, without using branching congruence as an intermediate step. Moreover, the completeness results for prefix iteration can be retrieved from those for flat iteration, thus obtaining a second indirect approach for proving completeness for delay and weak congruence in the setting of prefix iteration.<|reference_end|> | arxiv | @article{van glabbeek1998axiomatizing,
title={Axiomatizing Flat Iteration},
author={R. J. van Glabbeek (Stanford)},
journal={Proc. CONCUR '97, Warsaw, Poland, July 1997 (A. Mazurkiewicz and
J. Winkowski, eds.), LNCS 1243, Springer-Verlag, 1997, pp. 228-242},
year={1998},
number={STAN-CS-TN-97-57},
archivePrefix={arXiv},
eprint={cs/9810008},
primaryClass={cs.LO}
} | van glabbeek1998axiomatizing |
arxiv-676153 | cs/9810009 | Object-Oriented Design of Graph Oriented Data Structures | <|reference_start|>Object-Oriented Design of Graph Oriented Data Structures: Applied research in graph algorithms and combinatorial structures needs comprehensive and versatile software libraries. However, the design and the implementation of flexible libraries are challenging activities. Among the other problems involved in such a difficult field, a very special role is played by graph classification issues. We propose new techniques devised to help the designer and the programmer in the development activities. Such techniques are especially suited for dealing with graph classification problems and rely on an extension of the usual object-oriented paradigm. In order to support the usage of our approach, we devised an extension of the C++ programming language and implemented the corresponding pre-compiler.<|reference_end|> | arxiv | @article{pizzonia1998object-oriented,
title={Object-Oriented Design of Graph Oriented Data Structures},
author={Maurizio Pizzonia, Giuseppe Di Battista},
journal={arXiv preprint arXiv:cs/9810009},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810009},
primaryClass={cs.SE cs.CG cs.DS}
} | pizzonia1998object-oriented |
arxiv-676154 | cs/9810010 | C++ Templates as Partial Evaluation | <|reference_start|>C++ Templates as Partial Evaluation: This paper explores the relationship between C++ templates and partial evaluation. Templates were designed to support generic programming, but unintentionally provided the ability to perform compile-time computations and code generation. These features are completely accidental, and as a result their syntax is awkward. By recasting these features in terms of partial evaluation, a much simpler syntax can be achieved. C++ may be regarded as a two-level language in which types are first-class values. Template instantiation resembles an offline partial evaluator. This paper describes preliminary work toward a single mechanism based on Partial Evaluation which unifies generic programming, compile-time computation and code generation. The language Catat is introduced to illustrate these ideas.<|reference_end|> | arxiv | @article{veldhuizen1998c++,
title={C++ Templates as Partial Evaluation},
author={Todd L. Veldhuizen},
journal={arXiv preprint arXiv:cs/9810010},
year={1998},
number={IUCS 519},
archivePrefix={arXiv},
eprint={cs/9810010},
primaryClass={cs.PL cs.PF}
} | veldhuizen1998c++ |
arxiv-676155 | cs/9810011 | Flysig: Dataflow Oriented Delay-Insensitive Processor for Rapid Prototyping of Signal Processing | <|reference_start|>Flysig: Dataflow Oriented Delay-Insensitive Processor for Rapid Prototyping of Signal Processing: As the one-chip integration of HW-modules designed by different companies becomes more and more popular reliability of a HW-design and evaluation of the timing behavior during the prototype stage are absolutely necessary. One way to guarantee reliability is the use of robust design styles, e.g., delay-insensitivity. For early timing evaluation two aspects must be considered: a) The timing needs to be proportional to technology variations and b) the implemented architecture should be identical for prototype and target. The first can be met also by delay-insensitive implementation. The latter one is the key point. A unified architecture is needed for prototyping as well as implementation. Our new approach to rapid prototyping of signal processing tasks is based on a configurable, delay-insensitive implemented processor called Flysig. In essence, the Flysig processor can be understood as a complex FPGA where the CLBs are substituted by bit-serial operators. In this paper the general concept is detailed and first experimental results are given for demonstration of the main advantages: delay-insensitive design style, direct correspondence between prototyping and target architecture, high performance and reasonable shortening of the design cycle.<|reference_end|> | arxiv | @article{hardt1998flysig:,
title={Flysig: Dataflow Oriented Delay-Insensitive Processor for Rapid
Prototyping of Signal Processing},
author={Wolfram Hardt, Bernd Kleinjohann},
journal={Nineth IEEE International Workshop on Rapid System Prototyping
1998, Belgium, IEEE Computer Society Press},
year={1998},
doi={10.1109/IWRSP.1998.676682},
archivePrefix={arXiv},
eprint={cs/9810011},
primaryClass={cs.AR}
} | hardt1998flysig: |
arxiv-676156 | cs/9810012 | Ultrametric Distance in Syntax | <|reference_start|>Ultrametric Distance in Syntax: Phrase structure trees have a hierarchical structure. In many subjects, most notably in Taxonomy such tree structures have been studied using ultrametrics. Here syntactical hierarchical phrase trees are subject to a similar analysis, which is much siompler as the branching structure is more readily discernible and switched. The occurence of hierarchical structure elsewhere in linguistics is mentioned. The phrase tree can be represented by a matrix and the elements of the matrix can be represented by triangles. The height at which branching occurs is not prescribed in previous syntatic models, but it is by using the ultrametric matrix. The ambiguity of which branching height to choose is resolved by postulating that branching occurs at the lowest height available. An ultrametric produces a measure of the complexity of sentences: presumably the complexity of sentence increases as a language is aquired so that this can be tested. A All ultrametric triangles are equilateral or isocles, here it is shown that X structur implies that there are no equilateral triangles. Restricting attention to simple syntax a minium ultrametric distance between lexical categories is calculatex. This ultrametric distance is shown to be different than the matrix obtasined from feaures. It is shown that the definition of c-commabnd can be replaced by an equivalent ultrametric definition. The new definition invokes a minimum distance between nodes and this is more aesthetically satisfing than previouv varieties of definitions. From the new definition of c-command follows a new definition of government.<|reference_end|> | arxiv | @article{roberts1998ultrametric,
title={Ultrametric Distance in Syntax},
author={Mark D. Roberts},
journal={Prague Bulletin of Mathematical Linguistics 103 (2015) 111-130},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810012},
primaryClass={cs.CL q-bio.NC}
} | roberts1998ultrametric |
arxiv-676157 | cs/9810013 | Early Experience with ASDL in lcc | <|reference_start|>Early Experience with ASDL in lcc: The Abstract Syntax Description Language (ASDL) is a language for specifying the tree data structures often found in compiler intermediate representations. The ASDL generator reads an ASDL specification and generates code to construct, read, and write instances of the trees specified. Using ASDL permits a compiler to be decomposed into semi-independent components that communicate by reading and writing trees. Each component can be written in a different language, because the ASDL generator can emit code in several languages, and the files written by ASDL-generated code are machine- and language-independent. ASDL is part of the National Compiler Infrastructure project, which seeks to reduce dramatically the overhead of computer systems research by making it much easier to build high-quality compilers. This paper describes dividing lcc, a widely used retargetable C compiler, into two components that communicate via trees defined in ASDL. As the first use of ASDL in a `real' compiler, this experience reveals much about the effort required to retrofit an existing compiler to use ASDL, the overheads involved, and the strengths and weaknesses of ASDL itself and, secondarily, of lcc.<|reference_end|> | arxiv | @article{hanson1998early,
title={Early Experience with ASDL in lcc},
author={David R. Hanson},
journal={Software--Practice & Experience, vol. 29, no. 5, 417-435, Apr.
1999},
year={1998},
number={Microsoft Research MSR-TR-98-50},
archivePrefix={arXiv},
eprint={cs/9810013},
primaryClass={cs.PL cs.SE}
} | hanson1998early |
arxiv-676158 | cs/9810014 | Resources for Evaluation of Summarization Techniques | <|reference_start|>Resources for Evaluation of Summarization Techniques: We report on two corpora to be used in the evaluation of component systems for the tasks of (1) linear segmentation of text and (2) summary-directed sentence extraction. We present characteristics of the corpora, methods used in the collection of user judgments, and an overview of the application of the corpora to evaluating the component system. Finally, we discuss the problems and issues with construction of the test set which apply broadly to the construction of evaluation resources for language technologies.<|reference_end|> | arxiv | @article{klavans1998resources,
title={Resources for Evaluation of Summarization Techniques},
author={Judith L. Klavans (Columbia University), Kathleen R. McKeown (Columbia
University), Min-Yen Kan (Columbia University) and Susan Lee (University of
California at Berkeley)},
journal={in Proc. of First International Conference on Language Resources
and Evaluation, Rubio, Gallardo, Castro, and Tejada (eds.), Granada, Spain,
1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810014},
primaryClass={cs.CL}
} | klavans1998resources |
arxiv-676159 | cs/9810015 | Restrictions on Tree Adjoining Languages | <|reference_start|>Restrictions on Tree Adjoining Languages: Several methods are known for parsing languages generated by Tree Adjoining Grammars (TAGs) in O(n^6) worst case running time. In this paper we investigate which restrictions on TAGs and TAG derivations are needed in order to lower this O(n^6) time complexity, without introducing large runtime constants, and without losing any of the generative power needed to capture the syntactic constructions in natural language that can be handled by unrestricted TAGs. In particular, we describe an algorithm for parsing a strict subclass of TAG in O(n^5), and attempt to show that this subclass retains enough generative power to make it useful in the general case.<|reference_end|> | arxiv | @article{satta1998restrictions,
title={Restrictions on Tree Adjoining Languages},
author={Giorgio Satta (Universita di Padova) and William Schuler (University
of Pennsylvania)},
journal={Proceedings of COLING-ACL'98},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810015},
primaryClass={cs.CL}
} | satta1998restrictions |
arxiv-676160 | cs/9810016 | SYNERGY: A Linear Planner Based on Genetic Programming | <|reference_start|>SYNERGY: A Linear Planner Based on Genetic Programming: In this paper we describe SYNERGY, which is a highly parallelizable, linear planning system that is based on the genetic programming paradigm. Rather than reasoning about the world it is planning for, SYNERGY uses artificial selection, recombination and fitness measure to generate linear plans that solve conjunctive goals. We ran SYNERGY on several domains (e.g., the briefcase problem and a few variants of the robot navigation problem), and the experimental results show that our planner is capable of handling problem instances that are one to two orders of magnitude larger than the ones solved by UCPOP. In order to facilitate the search reduction and to enhance the expressive power of SYNERGY, we also propose two major extensions to our planning system: a formalism for using hierarchical planning operators, and a framework for planning in dynamic environments.<|reference_end|> | arxiv | @article{muslea1998synergy:,
title={SYNERGY: A Linear Planner Based on Genetic Programming},
author={Ion Muslea},
journal={"Recent Advances in AI Planning" (Sam Steel & Rachid Alami eds.),
p. 312-325, Springer 1997 (LNAI 1348)},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810016},
primaryClass={cs.AI}
} | muslea1998synergy: |
arxiv-676161 | cs/9810017 | General Theory of Image Normalization | <|reference_start|>General Theory of Image Normalization: We give a systematic, abstract formulation of the image normalization method as applied to a general group of image transformations, and then illustrate the abstract analysis by applying it to the hierarchy of viewing transformations of a planar object.<|reference_end|> | arxiv | @article{adler1998general,
title={General Theory of Image Normalization},
author={Stephen L. Adler},
journal={arXiv preprint arXiv:cs/9810017},
year={1998},
number={IASSNS-HEP-95/89},
archivePrefix={arXiv},
eprint={cs/9810017},
primaryClass={cs.CV}
} | adler1998general |
arxiv-676162 | cs/9810018 | A Proof Theoretic View of Constraint Programming | <|reference_start|>A Proof Theoretic View of Constraint Programming: We provide here a proof theoretic account of constraint programming that attempts to capture the essential ingredients of this programming style. We exemplify it by presenting proof rules for linear constraints over interval domains, and illustrate their use by analyzing the constraint propagation process for the {\tt SEND + MORE = MONEY} puzzle. We also show how this approach allows one to build new constraint solvers.<|reference_end|> | arxiv | @article{apt1998a,
title={A Proof Theoretic View of Constraint Programming},
author={Krzysztof R. Apt},
journal={Fundamenta Informaticae 34(1998), pp. 295-321},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810018},
primaryClass={cs.AI cs.PL}
} | apt1998a |
arxiv-676163 | cs/9810019 | Gryphon: An Information Flow Based Approach to Message Brokering | <|reference_start|>Gryphon: An Information Flow Based Approach to Message Brokering: Gryphon is a distributed computing paradigm for message brokering, which is the transferring of information in the form of streams of events from information providers to information consumers. This extended abstract outlines the major problems in message brokering and Gryphon's approach to solving them.<|reference_end|> | arxiv | @article{strom1998gryphon:,
title={Gryphon: An Information Flow Based Approach to Message Brokering},
author={Robert Strom, Guruduth Banavar, Tushar Chandra, Marc Kaplan, Kevan
Miller, Bodhi Mukherjee, Daniel Sturman, and Michael Ward},
journal={arXiv preprint arXiv:cs/9810019},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810019},
primaryClass={cs.DC}
} | strom1998gryphon: |
arxiv-676164 | cs/9810020 | Computational Geometry Column 33 | <|reference_start|>Computational Geometry Column 33: Several recent SIGGRAPH papers on surface simplification are described.<|reference_end|> | arxiv | @article{o'rourke1998computational,
title={Computational Geometry Column 33},
author={Joseph O'Rourke},
journal={Internat. J. Comput. Geom. Appl., 8(3) 381-384, 1998. Also in
SIGACT News, 29(2) (Issue 107) 14-16, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810020},
primaryClass={cs.CG cs.AI cs.GR}
} | o'rourke1998computational |
arxiv-676165 | cs/9810021 | Computational Geometry Column 32 | <|reference_start|>Computational Geometry Column 32: The proof of Dey's new k-set bound is illustrated.<|reference_end|> | arxiv | @article{o'rourke1998computational,
title={Computational Geometry Column 32},
author={Joseph O'Rourke},
journal={Internat. J. Comput. Geom. Appl.,7(5) 509-513, 1997. Also in
SIGACT News, 29(2) (Issue 107) 14-16, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810021},
primaryClass={cs.CG cs.GR}
} | o'rourke1998computational |
arxiv-676166 | cs/9810022 | Broy-Lamport Specification Problem: A Gurevich Abstract State Machine Solution | <|reference_start|>Broy-Lamport Specification Problem: A Gurevich Abstract State Machine Solution: We apply the Gurevich Abstract State Machine methodology to a benchmark specification problem of Broy and Lamport.<|reference_end|> | arxiv | @article{huggins1998broy-lamport,
title={Broy-Lamport Specification Problem: A Gurevich Abstract State Machine
Solution},
author={James K. Huggins},
journal={arXiv preprint arXiv:cs/9810022},
year={1998},
number={University of Michigan EECS Department Technical Report
CSE-TR-320-96},
archivePrefix={arXiv},
eprint={cs/9810022},
primaryClass={cs.SE}
} | huggins1998broy-lamport |
arxiv-676167 | cs/9810023 | Equivalence is in the Eye of the Beholder | <|reference_start|>Equivalence is in the Eye of the Beholder: In a recent provocative paper, Lamport points out "the insubstantiality of processes" by proving the equivalence of two different decompositions of the same intuitive algorithm by means of temporal formulas. We point out that the correct equivalence of algorithms is itself in the eye of the beholder. We discuss a number of related issues and, in particular, whether algorithms can be proved equivalent directly.<|reference_end|> | arxiv | @article{gurevich1998equivalence,
title={Equivalence is in the Eye of the Beholder},
author={Yuri Gurevich and James K. Huggins},
journal={Theoretical Computer Science (179) 1-2 (1997), 353-380},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810023},
primaryClass={cs.SE}
} | gurevich1998equivalence |
arxiv-676168 | cs/9810024 | Evolving Algebras and Partial Evaluation | <|reference_start|>Evolving Algebras and Partial Evaluation: We describe an automated partial evaluator for evolving algebras implemented at the University of Michigan.<|reference_end|> | arxiv | @article{gurevich1998evolving,
title={Evolving Algebras and Partial Evaluation},
author={Yuri Gurevich and James K. Huggins},
journal={In IFIP 13th World Computer Congress 1994, Volume I: Technology
and Foundations, eds. B. Pehrson and I. Simon, North-Holland, Amsterdam,
587-592},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810024},
primaryClass={cs.SE}
} | gurevich1998evolving |
arxiv-676169 | cs/9810025 | An Offline Partial Evaluator for Evolving Algebras | <|reference_start|>An Offline Partial Evaluator for Evolving Algebras: We describe the architecture of an evolving algebra partial evaluator, a program which specializes an evolving algebra with respect to a portion of its input. We discuss the particular analysis, specialization, and optimization techniques used and show an example of its use.<|reference_end|> | arxiv | @article{huggins1998an,
title={An Offline Partial Evaluator for Evolving Algebras},
author={James K. Huggins},
journal={arXiv preprint arXiv:cs/9810025},
year={1998},
number={University of Michigan EECS Department Technical Report
CSE-TR-229-95},
archivePrefix={arXiv},
eprint={cs/9810025},
primaryClass={cs.SE}
} | huggins1998an |
arxiv-676170 | cs/9810026 | The Railroad Crossing Problem: An Experiment with Instantaneous Actions and Immediate Reactions | <|reference_start|>The Railroad Crossing Problem: An Experiment with Instantaneous Actions and Immediate Reactions: We give an evolving algebra solution for the well-known railroad crossing problem and use the occasion to experiment with agents that perform instantaneous actions in continuous time and in particular with agents that fire at the moment they are enabled.<|reference_end|> | arxiv | @article{gurevich1998the,
title={The Railroad Crossing Problem: An Experiment with Instantaneous Actions
and Immediate Reactions},
author={Yuri Gurevich and James K. Huggins},
journal={Selected papers from CSL'95, ed. H.K. Buening, Springer Lecture
Notes in Computer Science 1092, 1996, 266--290},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810026},
primaryClass={cs.SE}
} | gurevich1998the |
arxiv-676171 | cs/9810027 | Linguistic Reflection in Java | <|reference_start|>Linguistic Reflection in Java: Reflective systems allow their own structures to be altered from within. Here we are concerned with a style of reflection, called linguistic reflection, which is the ability of a running program to generate new program fragments and to integrate these into its own execution. In particular we describe how this kind of reflection may be provided in the compiler-based, strongly typed object-oriented programming language Java. The advantages of the programming technique include attaining high levels of genericity and accommodating system evolution. These advantages are illustrated by an example taken from persistent programming which shows how linguistic reflection allows functionality (program code) to be generated on demand (Just-In-Time) from a generic specification and integrated into the evolving running program. The technique is evaluated against alternative implementation approaches with respect to efficiency, safety and ease of use.<|reference_end|> | arxiv | @article{kirby1998linguistic,
title={Linguistic Reflection in Java},
author={G. N. C. Kirby, R. Morrison, D. W. Stemple},
journal={Software - Practice & Experience 28, 10 (1998) pp 1045-1077},
year={1998},
archivePrefix={arXiv},
eprint={cs/9810027},
primaryClass={cs.PL}
} | kirby1998linguistic |
arxiv-676172 | cs/9811001 | A Polymorphic Groundness Analysis of Logic Programs | <|reference_start|>A Polymorphic Groundness Analysis of Logic Programs: A polymorphic analysis is an analysis whose input and output contain parameters which serve as placeholders for information that is unknown before analysis but provided after analysis. In this paper, we present a polymorphic groundness analysis that infers parameterised groundness descriptions of the variables of interest at a program point. The polymorphic groundness analysis is designed by replacing two primitive operators used in a monomorphic groundness analysis and is shown to be as precise as the monomorphic groundness analysis for any possible values for mode parameters. Experimental results of a prototype implementation of the polymorphic groundness analysis are given.<|reference_end|> | arxiv | @article{lu1998a,
title={A Polymorphic Groundness Analysis of Logic Programs},
author={Lunjin Lu (University of Waikato)},
journal={arXiv preprint arXiv:cs/9811001},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811001},
primaryClass={cs.PL}
} | lu1998a |
arxiv-676173 | cs/9811002 | Factorization of linear partial differential operators and Darboux integrability of nonlinear PDEs | <|reference_start|>Factorization of linear partial differential operators and Darboux integrability of nonlinear PDEs: Using a new definition of generalized divisors we prove that the lattice of such divisors for a given linear partial differential operator is modular and obtain analogues of the well-known theorems of the Loewy-Ore theory of factorization of linear ordinary differential operators. Possible applications to factorized Groebner bases computations in the commutative and non-commutative cases are discussed, an application to finding criterions of Darboux integrability of nonlinear PDEs is given.<|reference_end|> | arxiv | @article{tsarev1998factorization,
title={Factorization of linear partial differential operators and Darboux
integrability of nonlinear PDEs},
author={Serguei P. Tsarev},
journal={arXiv preprint arXiv:cs/9811002},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811002},
primaryClass={cs.SC nlin.SI solv-int}
} | tsarev1998factorization |
arxiv-676174 | cs/9811003 | A Winnow-Based Approach to Context-Sensitive Spelling Correction | <|reference_start|>A Winnow-Based Approach to Context-Sensitive Spelling Correction: A large class of machine-learning problems in natural language require the characterization of linguistic context. Two characteristic properties of such problems are that their feature space is of very high dimensionality, and their target concepts refer to only a small subset of the features in the space. Under such conditions, multiplicative weight-update algorithms such as Winnow have been shown to have exceptionally good theoretical properties. We present an algorithm combining variants of Winnow and weighted-majority voting, and apply it to a problem in the aforementioned class: context-sensitive spelling correction. This is the task of fixing spelling errors that happen to result in valid words, such as substituting "to" for "too", "casual" for "causal", etc. We evaluate our algorithm, WinSpell, by comparing it against BaySpell, a statistics-based method representing the state of the art for this task. We find: (1) When run with a full (unpruned) set of features, WinSpell achieves accuracies significantly higher than BaySpell was able to achieve in either the pruned or unpruned condition; (2) When compared with other systems in the literature, WinSpell exhibits the highest performance; (3) The primary reason that WinSpell outperforms BaySpell is that WinSpell learns a better linear separator; (4) When run on a test set drawn from a different corpus than the training set was drawn from, WinSpell is better able than BaySpell to adapt, using a strategy we will present that combines supervised learning on the training set with unsupervised learning on the (noisy) test set.<|reference_end|> | arxiv | @article{golding1998a,
title={A Winnow-Based Approach to Context-Sensitive Spelling Correction},
author={Andrew R. Golding and Dan Roth},
journal={arXiv preprint arXiv:cs/9811003},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811003},
primaryClass={cs.LG cs.CL}
} | golding1998a |
arxiv-676175 | cs/9811004 | Does Meaning Evolve? | <|reference_start|>Does Meaning Evolve?: A common method of making a theory more understandable, is by comparing it to another theory which has been better developed. Radical interpretation is a theory which attempts to explain how communication has meaning. Radical interpretation is treated as another time-dependent theory and compared to the time dependent theory of biological evolution. The main reason for doing this is to find the nature of the time dependence; producing analogs between the two theories is a necessary prerequisite to this and brings up many problems. Once the nature of the time dependence is better known it might allow the underlying mechanism to be uncovered. Several similarities and differences are uncovered, there appear to be more differences than similarities.<|reference_end|> | arxiv | @article{roberts1998does,
title={Does Meaning Evolve?},
author={Mark D. Roberts},
journal={arXiv preprint arXiv:cs/9811004},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811004},
primaryClass={cs.CL q-bio.PE}
} | roberts1998does |
arxiv-676176 | cs/9811005 | Writing and Editing Complexity Theory: Tales and Tools | <|reference_start|>Writing and Editing Complexity Theory: Tales and Tools: Each researcher should have a full shelf---physical or virtual---of books on writing and editing prose. Though we make no claim to any special degree of expertise, we recently edited a book of complexity theory surveys (Complexity Theory Retrospective II, Springer-Verlag, 1997), and in doing so we were brought into particularly close contact with the subject of this article, and with a number of the excellent resources available to writers and editors. In this article, we list some of these resources, and we also relate some of the adventures we had as our book moved from concept to reality.<|reference_end|> | arxiv | @article{hemaspaandra1998writing,
title={Writing and Editing Complexity Theory: Tales and Tools},
author={Lane A. Hemaspaandra and Alan L. Selman},
journal={arXiv preprint arXiv:cs/9811005},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811005},
primaryClass={cs.GL cs.CC}
} | hemaspaandra1998writing |
arxiv-676177 | cs/9811006 | Machine Learning of Generic and User-Focused Summarization | <|reference_start|>Machine Learning of Generic and User-Focused Summarization: A key problem in text summarization is finding a salience function which determines what information in the source should be included in the summary. This paper describes the use of machine learning on a training corpus of documents and their abstracts to discover salience functions which describe what combination of features is optimal for a given summarization task. The method addresses both "generic" and user-focused summaries.<|reference_end|> | arxiv | @article{mani1998machine,
title={Machine Learning of Generic and User-Focused Summarization},
author={Inderjeet Mani and Eric Bloedorn},
journal={arXiv preprint arXiv:cs/9811006},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811006},
primaryClass={cs.CL cs.LG}
} | mani1998machine |
arxiv-676178 | cs/9811007 | Second Product Line Practice Workshop Report | <|reference_start|>Second Product Line Practice Workshop Report: The second Software Engineering Institute Product Line Practice Workshop was a hands-on meeting held in November 1997 to share industry practices in software product lines and to explore the technical and non-technical issues involved. This report synthesizes the workshop presentations and discussions, which identified factors involved in product line practices and analyzed issues in the areas of software engineering, technical management, and enterprise management.<|reference_end|> | arxiv | @article{bass1998second,
title={Second Product Line Practice Workshop Report},
author={L. Bass, G. Chastek, P. Clements, L. Northrop, D. Smith, J. Withey},
journal={arXiv preprint arXiv:cs/9811007},
year={1998},
number={CMU/SEI-98-TR-015},
archivePrefix={arXiv},
eprint={cs/9811007},
primaryClass={cs.SE}
} | bass1998second |
arxiv-676179 | cs/9811008 | Translating near-synonyms: Possibilities and preferences in the interlingua | <|reference_start|>Translating near-synonyms: Possibilities and preferences in the interlingua: This paper argues that an interlingual representation must explicitly represent some parts of the meaning of a situation as possibilities (or preferences), not as necessary or definite components of meaning (or constraints). Possibilities enable the analysis and generation of nuance, something required for faithful translation. Furthermore, the representation of the meaning of words, especially of near-synonyms, is crucial, because it specifies which nuances words can convey in which contexts.<|reference_end|> | arxiv | @article{edmonds1998translating,
title={Translating near-synonyms: Possibilities and preferences in the
interlingua},
author={Philip Edmonds (University of Toronto)},
journal={Proceedings of the AMTA/SIG-IL Second Workshop on Interlinguas,
October 1998},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811008},
primaryClass={cs.CL}
} | edmonds1998translating |
arxiv-676180 | cs/9811009 | Choosing the Word Most Typical in Context Using a Lexical Co-occurrence Network | <|reference_start|>Choosing the Word Most Typical in Context Using a Lexical Co-occurrence Network: This paper presents a partial solution to a component of the problem of lexical choice: choosing the synonym most typical, or expected, in context. We apply a new statistical approach to representing the context of a word through lexical co-occurrence networks. The implementation was trained and evaluated on a large corpus, and results show that the inclusion of second-order co-occurrence relations improves the performance of our implemented lexical choice program.<|reference_end|> | arxiv | @article{edmonds1998choosing,
title={Choosing the Word Most Typical in Context Using a Lexical Co-occurrence
Network},
author={Philip Edmonds (University of Toronto)},
journal={Proceedings of ACL-EACL '97, student session},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811009},
primaryClass={cs.CL}
} | edmonds1998choosing |
arxiv-676181 | cs/9811010 | Learning to Resolve Natural Language Ambiguities: A Unified Approach | <|reference_start|>Learning to Resolve Natural Language Ambiguities: A Unified Approach: We analyze a few of the commonly used statistics based and machine learning algorithms for natural language disambiguation tasks and observe that they can be re-cast as learning linear separators in the feature space. Each of the methods makes a priori assumptions, which it employs, given the data, when searching for its hypothesis. Nevertheless, as we show, it searches a space that is as rich as the space of all linear separators. We use this to build an argument for a data driven approach which merely searches for a good linear separator in the feature space, without further assumptions on the domain or a specific problem. We present such an approach - a sparse network of linear separators, utilizing the Winnow learning algorithm - and show how to use it in a variety of ambiguity resolution problems. The learning approach presented is attribute-efficient and, therefore, appropriate for domains having very large number of attributes. In particular, we present an extensive experimental comparison of our approach with other methods on several well studied lexical disambiguation tasks such as context-sensitive spelling correction, prepositional phrase attachment and part of speech tagging. In all cases we show that our approach either outperforms other methods tried for these tasks or performs comparably to the best.<|reference_end|> | arxiv | @article{roth1998learning,
title={Learning to Resolve Natural Language Ambiguities: A Unified Approach},
author={Dan Roth},
journal={Proceedings of of AAAI'98 pp. 806--813},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811010},
primaryClass={cs.CL cs.LG}
} | roth1998learning |
arxiv-676182 | cs/9811011 | Case Study in Survivable Network System Analysis | <|reference_start|>Case Study in Survivable Network System Analysis: This paper presents a method for analyzing the survivability of distributed network systems and an example of its application.<|reference_end|> | arxiv | @article{ellison1998case,
title={Case Study in Survivable Network System Analysis},
author={Robert Ellison, Rick Linger, Thomas Longstaff, Nancy Mead},
journal={arXiv preprint arXiv:cs/9811011},
year={1998},
number={CMU/SEI-98-TR-014},
archivePrefix={arXiv},
eprint={cs/9811011},
primaryClass={cs.SE}
} | ellison1998case |
arxiv-676183 | cs/9811012 | Deriving Abstract Semantics for Forward Analysis of Normal Logic Programs | <|reference_start|>Deriving Abstract Semantics for Forward Analysis of Normal Logic Programs: The problem of forward abstract interpretation of {\em normal} logic programs has not been formally addressed in the literature although negation as failure is dealt with through the built-in predicate ! in the way it is implemented in Prolog. This paper proposes a solution to this problem by deriving two generic fixed-point abstract semantics $F^b and $F^\diamond for forward abstract interpretation of {\em normal} logic programs. $F^b$ is intended for inferring data descriptions for edges in the program graph where an edge denotes the possibility that the control of execution transfers from its source program point to its destination program point. $F^\diamond$ is derived from $F^b$ and is intended for inferring data descriptions for textual program points.<|reference_end|> | arxiv | @article{lu1998deriving,
title={Deriving Abstract Semantics for Forward Analysis of Normal Logic
Programs},
author={Lunjin Lu (University of Waikato)},
journal={arXiv preprint arXiv:cs/9811012},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811012},
primaryClass={cs.PL cs.LO}
} | lu1998deriving |
arxiv-676184 | cs/9811013 | The Asilomar Report on Database Research | <|reference_start|>The Asilomar Report on Database Research: The database research community is rightly proud of success in basic research, and its remarkable record of technology transfer. Now the field needs to radically broaden its research focus to attack the issues of capturing, storing, analyzing, and presenting the vast array of online data. The database research community should embrace a broader research agenda -- broadening the definition of database management to embrace all the content of the Web and other online data stores, and rethinking our fundamental assumptions in light of technology shifts. To accelerate this transition, we recommend changing the way research results are evaluated and presented. In particular, we advocate encouraging more speculative and long-range work, moving conferences to a poster format, and publishing all research literature on the Web.<|reference_end|> | arxiv | @article{bernstein1998the,
title={The Asilomar Report on Database Research},
author={Phil Bernstein, Michael Brodie, Stefano Ceri, David DeWitt, Mike
Franklin, Hector Garcia-Molina, Jim Gray, Jerry Held, Joe Hellerstein, H. V.
Jagadish, Michael Lesk, Dave Maier, Jeff Naughton, Hamid Pirahesh, Mike
Stonebraker, Jeff Ullman},
journal={ACM SIGMOD Record, December 1998},
year={1998},
number={MSR TR 98 57},
archivePrefix={arXiv},
eprint={cs/9811013},
primaryClass={cs.DB cs.DL}
} | bernstein1998the |
arxiv-676185 | cs/9811014 | Abstract State Machines 1988-1998: Commented ASM Bibliography | <|reference_start|>Abstract State Machines 1988-1998: Commented ASM Bibliography: An annotated bibliography of papers which deal with or use Abstract State Machines (ASMs), as of January 1998.<|reference_end|> | arxiv | @article{boerger1998abstract,
title={Abstract State Machines 1988-1998: Commented ASM Bibliography},
author={Egon Boerger and James K. Huggins},
journal={Formal Specification Column (H. Ehrig, ed.), EATCS Bulletin 64,
February 1998, 105--127},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811014},
primaryClass={cs.SE}
} | boerger1998abstract |
arxiv-676186 | cs/9811015 | An Emptiness Algorithm for Regular Types with Set Operators | <|reference_start|>An Emptiness Algorithm for Regular Types with Set Operators: An algorithm to decide the emptiness of a regular type expression with set operators given a set of parameterised type definitions is presented. The algorithm can also be used to decide the equivalence of two regular type expressions and the inclusion of one regular type expression in another. The algorithm strictly generalises previous work in that tuple distributivity is not assumed and set operators are permitted in type expressions.<|reference_end|> | arxiv | @article{lu1998an,
title={An Emptiness Algorithm for Regular Types with Set Operators},
author={Lunjin Lu and John G. Cleary (University of Waikato)},
journal={arXiv preprint arXiv:cs/9811015},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811015},
primaryClass={cs.LO cs.PL}
} | lu1998an |
arxiv-676187 | cs/9811016 | Comparing a statistical and a rule-based tagger for German | <|reference_start|>Comparing a statistical and a rule-based tagger for German: In this paper we present the results of comparing a statistical tagger for German based on decision trees and a rule-based Brill-Tagger for German. We used the same training corpus (and therefore the same tag-set) to train both taggers. We then applied the taggers to the same test corpus and compared their respective behavior and in particular their error rates. Both taggers perform similarly with an error rate of around 5%. From the detailed error analysis it can be seen that the rule-based tagger has more problems with unknown words than the statistical tagger. But the results are opposite for tokens that are many-ways ambiguous. If the unknown words are fed into the taggers with the help of an external lexicon (such as the Gertwol system) the error rate of the rule-based tagger drops to 4.7%, and the respective rate of the statistical taggers drops to around 3.7%. Combining the taggers by using the output of one tagger to help the other did not lead to any further improvement.<|reference_end|> | arxiv | @article{volk1998comparing,
title={Comparing a statistical and a rule-based tagger for German},
author={Martin Volk and Gerold Schneider (University of Zurich)},
journal={arXiv preprint arXiv:cs/9811016},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811016},
primaryClass={cs.CL}
} | volk1998comparing |
arxiv-676188 | cs/9811017 | Formulas as Programs | <|reference_start|>Formulas as Programs: We provide here a computational interpretation of first-order logic based on a constructive interpretation of satisfiability w.r.t. a fixed but arbitrary interpretation. In this approach the formulas themselves are programs. This contrasts with the so-called formulas as types approach in which the proofs of the formulas are typed terms that can be taken as programs. This view of computing is inspired by logic programming and constraint logic programming but differs from them in a number of crucial aspects. Formulas as programs is argued to yield a realistic approach to programming that has been realized in the implemented programming language ALMA-0 (Apt et al.) that combines the advantages of imperative and logic programming. The work here reported can also be used to reason about the correctness of non-recursive ALMA-0 programs that do not include destructive assignment.<|reference_end|> | arxiv | @article{apt1998formulas,
title={Formulas as Programs},
author={Krzysztof R. Apt and Marc Bezem},
journal={arXiv preprint arXiv:cs/9811017},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811017},
primaryClass={cs.LO cs.SC}
} | apt1998formulas |
arxiv-676189 | cs/9811018 | P-model Alternative to the T-model | <|reference_start|>P-model Alternative to the T-model: Standard linguistic analysis of syntax uses the T-model. This model requires the ordering: D-structure $>$ S-structure $>$ LF. Between each of these representations there is movement which alters the order of the constituent words; movement is achieved using the principles and parameters of syntactic theory. Psychological serial models do not accommodate the T-model immediately so that here a new model called the P-model is introduced. Here it is argued that the LF representation should be replaced by a variant of Frege's three qualities. In the F-representation the order of elements is not necessarily the same as that in LF and it is suggested that the correct ordering is: F-representation $>$ D-structure $>$ S-structure. Within this framework movement originates as the outcome of emphasis applied to the sentence.<|reference_end|> | arxiv | @article{roberts1998p-model,
title={P-model Alternative to the T-model},
author={Mark D. Roberts},
journal={arXiv preprint arXiv:cs/9811018},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811018},
primaryClass={cs.CL q-bio.NC}
} | roberts1998p-model |
arxiv-676190 | cs/9811019 | Locked and Unlocked Polygonal Chains in 3D | <|reference_start|>Locked and Unlocked Polygonal Chains in 3D: In this paper, we study movements of simple polygonal chains in 3D. We say that an open, simple polygonal chain can be straightened if it can be continuously reconfigured to a straight sequence of segments in such a manner that both the length of each link and the simplicity of the chain are maintained throughout the movement. The analogous concept for closed chains is convexification: reconfiguration to a planar convex polygon. Chains that cannot be straightened or convexified are called locked. While there are open chains in 3D that are locked, we show that if an open chain has a simple orthogonal projection onto some plane, it can be straightened. For closed chains, we show that there are unknotted but locked closed chains, and we provide an algorithm for convexifying a planar simple polygon in 3D with a polynomial number of moves.<|reference_end|> | arxiv | @article{biedl1998locked,
title={Locked and Unlocked Polygonal Chains in 3D},
author={T. Biedl, E. Demaine, M. Demaine, S. Lazard, A. Lubiw, J. O'Rourke, M.
Overmars, S. Robbins, I. Streinu, G. Toussaint, S. Whitesides},
journal={Proc. 10th ACM-SIAM Sympos. Discrete Algorithms, Jan. 1999, pp.
S866-7.},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811019},
primaryClass={cs.CG cs.DS cs.RO}
} | biedl1998locked |
arxiv-676191 | cs/9811020 | Digitizing Legacy Documents: A Knowledge-Base Preservation Project | <|reference_start|>Digitizing Legacy Documents: A Knowledge-Base Preservation Project: This paper addresses the issue of making legacy information (that material held in paper format only) electronically searchable and retrievable. We used proprietary software and commercial hardware to create a process for scanning, cataloging, archiving and electronically disseminating full-text documents. This process is relatively easy to implement and reasonably affordable.<|reference_end|> | arxiv | @article{anderson1998digitizing,
title={Digitizing Legacy Documents: A Knowledge-Base Preservation Project},
author={Elizabeth Anderson, Robert Atkinson, Cynthia Crego, Jean Slisz and
Sara Tompson},
journal={IllinoisLibraries80:211-219,1998},
year={1998},
number={Fermilab-TM-2056},
archivePrefix={arXiv},
eprint={cs/9811020},
primaryClass={cs.DL}
} | anderson1998digitizing |
arxiv-676192 | cs/9811021 | Automatic Hardware Synthesis for a Hybrid Reconfigurable CPU Featuring Philips CPLDs | <|reference_start|>Automatic Hardware Synthesis for a Hybrid Reconfigurable CPU Featuring Philips CPLDs: A high-level architecture of a Hybrid Reconfigurable CPU, based on a Philips-supported core processor, is introduced. It features the Philips XPLA2 CPLD as a reconfigurable functional unit. A compilation chain is presented, in which automatic implementation of time-critical program segments in custom hardware is performed. The entire process is transparent from the programmer's point of view. The hardware synthesis module of the chain, which translates segments of assembly code into a hardware netlist, is discussed in details. Application examples are also presented.<|reference_end|> | arxiv | @article{kastrup1998automatic,
title={Automatic Hardware Synthesis for a Hybrid Reconfigurable CPU Featuring
Philips CPLDs},
author={Bernardo Kastrup},
journal={arXiv preprint arXiv:cs/9811021},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811021},
primaryClass={cs.PL cs.AR}
} | kastrup1998automatic |
arxiv-676193 | cs/9811022 | Expoiting Syntactic Structure for Language Modeling | <|reference_start|>Expoiting Syntactic Structure for Language Modeling: The paper presents a language model that develops syntactic structure and uses it to extract meaningful information from the word history, thus enabling the use of long distance dependencies. The model assigns probability to every joint sequence of words--binary-parse-structure with headword annotation and operates in a left-to-right manner --- therefore usable for automatic speech recognition. The model, its probabilistic parameterization, and a set of experiments meant to evaluate its predictive power are presented; an improvement over standard trigram modeling is achieved.<|reference_end|> | arxiv | @article{chelba1998expoiting,
title={Expoiting Syntactic Structure for Language Modeling},
author={Ciprian Chelba, Frederick Jelinek (CLSP The Johns Hopkins University)},
journal={Proceedings of ACL'98, Montreal, Canada},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811022},
primaryClass={cs.CL}
} | chelba1998expoiting |
arxiv-676194 | cs/9811023 | Complexity limitations on quantum computation | <|reference_start|>Complexity limitations on quantum computation: We use the powerful tools of counting complexity and generic oracles to help understand the limitations of the complexity of quantum computation. We show several results for the probabilistic quantum class BQP. 1. BQP is low for PP, i.e., PP^BQP=PP. 2. There exists a relativized world where P=BQP and the polynomial-time hierarchy is infinite. 3. There exists a relativized world where BQP does not have complete sets. 4. There exists a relativized world where P=BQP but P is not equal to UP intersect coUP and one-way functions exist. This gives a relativized answer to an open question of Simon.<|reference_end|> | arxiv | @article{fortnow1998complexity,
title={Complexity limitations on quantum computation},
author={Lance Fortnow and John D. Rogers},
journal={arXiv preprint arXiv:cs/9811023},
year={1998},
number={CTI-TR-97003},
archivePrefix={arXiv},
eprint={cs/9811023},
primaryClass={cs.CC quant-ph}
} | fortnow1998complexity |
arxiv-676195 | cs/9811024 | The Essence of Constraint Propagation | <|reference_start|>The Essence of Constraint Propagation: We show that several constraint propagation algorithms (also called (local) consistency, consistency enforcing, Waltz, filtering or narrowing algorithms) are instances of algorithms that deal with chaotic iteration. To this end we propose a simple abstract framework that allows us to classify and compare these algorithms and to establish in a uniform way their basic properties.<|reference_end|> | arxiv | @article{apt1998the,
title={The Essence of Constraint Propagation},
author={Krzysztof R. Apt},
journal={arXiv preprint arXiv:cs/9811024},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811024},
primaryClass={cs.AI}
} | apt1998the |
arxiv-676196 | cs/9811025 | A Structured Language Model | <|reference_start|>A Structured Language Model: The paper presents a language model that develops syntactic structure and uses it to extract meaningful information from the word history, thus enabling the use of long distance dependencies. The model assigns probability to every joint sequence of words - binary-parse-structure with headword annotation. The model, its probabilistic parametrization, and a set of experiments meant to evaluate its predictive power are presented.<|reference_end|> | arxiv | @article{chelba1998a,
title={A Structured Language Model},
author={Ciprian Chelba (CLSP, The Johns Hopkins University, USA)},
journal={arXiv preprint arXiv:cs/9811025},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811025},
primaryClass={cs.CL}
} | chelba1998a |
arxiv-676197 | cs/9811026 | Designing an interface to optimize reading with small display windows | <|reference_start|>Designing an interface to optimize reading with small display windows: The electronic presentation of text in small display windows is mushrooming. In the present paper, four ways of presenting text in a small display window were examined and compared to a Normal Page condition: rapid serial visual presentation (RSVP), RSVP with a Completion Meter, Sentence-by-Sentence presentation, and Sentence-by-Sentence presentation with a Completion Meter. Dependent measures were reading efficiency - speed and comprehension - and preference. For designers of hardware or software with small display windows, the results suggest the following: (1) Though RSVP is disliked by readers, the present methods of allowing self-pacing and regressions in RSVP, unlike earlier tested methods, are efficient and feasible. (2) Slower reading in RSVP should be achieved by increasing pauses between sentences or by repeating sentences, not by decreasing the within-sentence rate. (3) Completion meters do not interfere with performance, and are usually preferred. (4) The space-saving Sentence-by-Sentence format is as efficient and as preferred as the Normal Page format.<|reference_end|> | arxiv | @article{rahman1998designing,
title={Designing an interface to optimize reading with small display windows},
author={Tarjin Rahman and Paul Muter},
journal={Human Factors 41 (1999) 106-117},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811026},
primaryClass={cs.HC}
} | rahman1998designing |
arxiv-676198 | cs/9811027 | Push vs Pull in Web-Based Network Management | <|reference_start|>Push vs Pull in Web-Based Network Management: In this paper, we show how Web technologies can be used effectively to (i) address some of the deficiencies of traditional IP network management platforms, and (ii) render these expensive platforms redundant. We build on the concept of embedded management application, proposed by Wellens and Auerbach, and present two models of network management application designs that rely on Web technologies. First, the pull model is based on the request/response paradigm. It is typically used to perform data polling. Several commercial management platforms already use Web technologies that rely on this model to provide for ad hoc management; we demonstrate how to extend this to regular management. Second, the push model is a novel approach which relies on the publish/subscribe/distribute paradigm. It is better suited to regular management than the pull model, and allows administrators to conserve network bandwidth as well as CPU time on the management station. It can be seen as a generalization of the paradigm commonly used for notification delivery. Finally, we introduce the concept of the collapsed network management platform, where these two models coexist.<|reference_end|> | arxiv | @article{martin-flatin1998push,
title={Push vs. Pull in Web-Based Network Management},
author={J.P. Martin-Flatin},
journal={arXiv preprint arXiv:cs/9811027},
year={1998},
number={SSC/1998/022},
archivePrefix={arXiv},
eprint={cs/9811027},
primaryClass={cs.NI}
} | martin-flatin1998push |
arxiv-676199 | cs/9811028 | TCP Trunking | <|reference_start|>TCP Trunking: A TCP trunk is an IP tunnel under TCP control, capable of carrying packets from any number of user flows. By exploiting properties of TCP, a TCP trunk provides elastic and reliable transmission over a network, and automatically shares the network fairly with other competing trunks. Moreover, by aggregating user flows into a single trunk flow, TCP trunking can significantly reduce the number of flows that the network needs to manage, thereby allowing use of simplified management to achieve improved perfor mance. For example, when dealing with only a small number of TCP trunk flows, a router with a simple FIFO buffer can experience low packet loss rates. A TCP trunk is a "soft" circuit in the sense that it requires no flow states to be maintained inside the network. Setting up a TCP trunk involves only configuring the two end nodes. This is in contrast with traditional methods of configuring circuits via signaling of network nodes. A simple packet-dropping mechanism based on packet accounting at the transmitter of a TCP trunk assures that, when the trunk reduces its bandwidth in response to network congestion, user TCP flows carried by the trunk will reduce their bandwidths by the same proportion. Simu lation results have demonstrated that TCP trunks can provide improved network performance to users, while achieving high network utilization.<|reference_end|> | arxiv | @article{kung1998tcp,
title={TCP Trunking},
author={H.T. Kung and S.Y. Wang},
journal={arXiv preprint arXiv:cs/9811028},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811028},
primaryClass={cs.NI}
} | kung1998tcp |
arxiv-676200 | cs/9811029 | A Human - machine interface for teleoperation of arm manipulators in a complex environment | <|reference_start|>A Human - machine interface for teleoperation of arm manipulators in a complex environment: This paper discusses the feasibility of using configuration space (C-space) as a means of visualization and control in operator-guided real-time motion of a robot arm manipulator. The motivation is to improve performance of the human operator in tasks involving the manipulator motion in an environment with obstacles. Unlike some other motion planning tasks, operators are known to make expensive mistakes in such tasks, even in a simpler two-dimensional case. They have difficulty learning better procedures and their performance improves very little with practice. Using an example of a two-dimensional arm manipulator, we show that translating the problem into C-space improves the operator performance rather remarkably, on the order of magnitude compared to the usual work space control. An interface that makes the transfer possible is described, and an example of its use in a virtual environment is shown.<|reference_end|> | arxiv | @article{ivanisevic1998a,
title={A Human - machine interface for teleoperation of arm manipulators in a
complex environment},
author={I. Ivanisevic and V. Lumelsky},
journal={arXiv preprint arXiv:cs/9811029},
year={1998},
number={RL-97006},
archivePrefix={arXiv},
eprint={cs/9811029},
primaryClass={cs.RO cs.AI}
} | ivanisevic1998a |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.