corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-1501 | 0710.4656 | A Memory Hierarchical Layer Assigning and Prefetching Technique to Overcome the Memory Performance/Energy Bottleneck | <|reference_start|>A Memory Hierarchical Layer Assigning and Prefetching Technique to Overcome the Memory Performance/Energy Bottleneck: The memory subsystem has always been a bottleneck in performance as well as significant power contributor in memory intensive applications. Many researchers have presented multi-layered memory hierarchies as a means to design energy and performance efficient systems. However, most of the previous work do not explore trade-offs systematically. We fill this gap by proposing a formalized technique that takes into consideration data reuse, limited lifetime of the arrays of an application and application specific prefetching opportunities, and performs a thorough trade-off exploration for different memory layer sizes. This technique has been implemented on a prototype tool, which was tested successfully using nine real-life applications of industrial relevance. Following this approach we have able to reduce execution time up to 60%, and energy consumption up to 70%.<|reference_end|> | arxiv | @article{dasygenis2007a,
title={A Memory Hierarchical Layer Assigning and Prefetching Technique to
Overcome the Memory Performance/Energy Bottleneck},
author={Minas Dasygenis, Erik Brockmeyer, Bart Durinck, Francky Catthoor,
Dimitrios Soudris, Antonios Thanailakis},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4656},
primaryClass={cs.AR}
} | dasygenis2007a |
arxiv-1502 | 0710.4657 | New Schemes for Self-Testing RAM | <|reference_start|>New Schemes for Self-Testing RAM: This paper gives an overview of a new technique, named pseudo-ring testing (PRT). PRT can be applied for testing wide type of random access memories (RAM): bit- or word-oriented and single- or dual-port RAM's. An essential particularity of the proposed methodology is the emulation of a linear automaton over Galois field by memory own components.<|reference_end|> | arxiv | @article{bodean2007new,
title={New Schemes for Self-Testing RAM},
author={Gh. Bodean, D. Bodean, A. Labunetz},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4657},
primaryClass={cs.AR}
} | bodean2007new |
arxiv-1503 | 0710.4658 | Compositional Memory Systems for Multimedia Communicating Tasks | <|reference_start|>Compositional Memory Systems for Multimedia Communicating Tasks: Conventional cache models are not suited for real-time parallel processing because tasks may flush each other's data out of the cache in an unpredictable manner. In this way the system is not compositional so the overall performance is difficult to predict and the integration of new tasks expensive. This paper proposes a new method that imposes compositionality to the system?s performance and makes different memory hierarchy optimizations possible for multimedia communicating tasks when running on embedded multiprocessor architectures. The method is based on a cache allocation strategy that assigns sets of the unified cache exclusively to tasks and to the communication buffers. We also analytically formulate the problem and describe a method to compute the cache partitioning ratio for optimizing the throughput and the consumed power. When applied to a multiprocessor with memory hierarchy our technique delivers also performance gain. Compared to the shared cache case, for an application consisting of two jpeg decoders and one edge detection algorithm 5 times less misses are experienced and for an mpeg2 decoder 6.5 times less misses are experienced.<|reference_end|> | arxiv | @article{molnos2007compositional,
title={Compositional Memory Systems for Multimedia Communicating Tasks},
author={A. M. Molnos, M. J. M. Heijligers, S. D. Cotofana, J. T. J. Van
Eijndhoven},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4658},
primaryClass={cs.AR cs.MM}
} | molnos2007compositional |
arxiv-1504 | 0710.4659 | Synchronization Processor Synthesis for Latency Insensitive Systems | <|reference_start|>Synchronization Processor Synthesis for Latency Insensitive Systems: In this paper we present our contribution in terms of synchronization processor for a SoC design methodology based on the theory of the latency insensitive systems (LIS) of Carloni et al. Our contribution consists in IP encapsulation into a new wrapper model which speed and area are optimized and synthetizability guarantied. The main benefit of our approach is to preserve the local IP performances when encapsulating them and reduce SoC silicon area.<|reference_end|> | arxiv | @article{bomel2007synchronization,
title={Synchronization Processor Synthesis for Latency Insensitive Systems},
author={Pierre Bomel (LESTER), Eric Martin (LESTER), Emmanuel Boutillon
(LESTER)},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4659},
primaryClass={cs.AR}
} | bomel2007synchronization |
arxiv-1505 | 0710.4660 | Thermal-Aware Task Allocation and Scheduling for Embedded Systems | <|reference_start|>Thermal-Aware Task Allocation and Scheduling for Embedded Systems: Temperature affects not only the reliability but also the performance, power, and cost of the embedded system. This paper proposes a thermal-aware task allocation and scheduling algorithm for embedded systems. The algorithm is used as a sub-routine for hardware/software co-synthesis to reduce the peak temperature and achieve a thermally even distribution while meeting real time constraints. The paper investigates both power-aware and thermal-aware approaches to task allocation and scheduling. The experimental results show that the thermal-aware approach outperforms the power-aware schemes in terms of maximal and average temperature reductions. To the best of our knowledge, this is the first task allocation and scheduling algorithm that takes temperature into consideration.<|reference_end|> | arxiv | @article{hung2007thermal-aware,
title={Thermal-Aware Task Allocation and Scheduling for Embedded Systems},
author={W.-L. Hung, Y. Xie, N. Vijaykrishnan, M. Kandemir, M. J. Irwin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4660},
primaryClass={cs.AR}
} | hung2007thermal-aware |
arxiv-1506 | 0710.4661 | Bright-Field AAPSM Conflict Detection and Correction | <|reference_start|>Bright-Field AAPSM Conflict Detection and Correction: As feature sizes shrink, it will be necessary to use AAPSM (Alternating-Aperture Phase Shift Masking) to image critical features, especially on the polysilicon layer. This imposes additional constraints on the layouts beyond traditional design rules. Of particular note is the requirement that all critical features be flanked by opposite-phase shifters, while the shifters obey minimum width and spacing requirements. A layout is called phase-assignable if it satisfies this requirement. If a layout is not phase-assignable, the phase conflicts have to removed to enable the use of AAPSM for the layout. Previous work has sought to detect a suitable set of phase Conflicts to be removed, as well as correct them. The contribution of this paper are the following: (1) a new approach to detect a minimal set of phase conflicts (also referred to as AAPSM conflicts), which when corrected will produce a phase-assignable layout; (2) a novel layout modification scheme for correcting these AAPSM conflicts. The proposed approach for conflict detection shows significant improvements in the quality of results and runtime for real industrial circuits, when compared to previous methods. To the best of our knowledge, this is the first time layout modification results are presented for bright-field AAPSM. Our experiments show that the percentage area increase for making a layout phase-assignable ranges from 0.7-11.8%.<|reference_end|> | arxiv | @article{chiang2007bright-field,
title={Bright-Field AAPSM Conflict Detection and Correction},
author={C. Chiang, A. Kahng, S. Sinha, X. Xu, A. Zelikovsky},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4661},
primaryClass={cs.AR}
} | chiang2007bright-field |
arxiv-1507 | 0710.4663 | Statistical Modeling of Pipeline Delay and Design of Pipeline under Process Variation to Enhance Yield in sub-100nm Technologies | <|reference_start|>Statistical Modeling of Pipeline Delay and Design of Pipeline under Process Variation to Enhance Yield in sub-100nm Technologies: Operating frequency of a pipelined circuit is determined by the delay of the slowest pipeline stage. However, under statistical delay variation in sub-100nm technology regime, the slowest stage is not readily identifiable and the estimation of the pipeline yield with respect to a target delay is a challenging problem. We have proposed analytical models to estimate yield for a pipelined design based on delay distributions of individual pipe stages. Using the proposed models, we have shown that change in logic depth and imbalance between the stage delays can improve the yield of a pipeline. A statistical methodology has been developed to optimally design a pipeline circuit for enhancing yield. Optimization results show that, proper imbalance among the stage delays in a pipeline improves design yield by 9% for the same area and performance (and area reduction by about 8.4% under a yield constraint) over a balanced design.<|reference_end|> | arxiv | @article{datta2007statistical,
title={Statistical Modeling of Pipeline Delay and Design of Pipeline under
Process Variation to Enhance Yield in sub-100nm Technologies},
author={Animesh Datta, Swarup Bhunia, Saibal Mukhopadhyay, Nilanjan Banerjee,
Kaushik Roy},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4663},
primaryClass={cs.AR}
} | datta2007statistical |
arxiv-1508 | 0710.4665 | New Perspectives and Opportunities From the Wild West of Microelectronic Biochips | <|reference_start|>New Perspectives and Opportunities From the Wild West of Microelectronic Biochips: Application of Microelectronic to bioanalysis is an emerging field which holds great promise. From the standpoint of electronic and system design, biochips imply a radical change of perspective, since new, completely different constraints emerge while other usual constraints can be relaxed. While electronic parts of the system can rely on the usual established design-flow, fluidic and packaging design, calls for a new approach which relies significantly on experiments. We hereby make some general considerations based on our experience in the development of biochips for cell analysis.<|reference_end|> | arxiv | @article{manaresi2007new,
title={New Perspectives and Opportunities From the Wild West of Microelectronic
Biochips},
author={Nicolo Manaresi, Gianni Medoro, Melanie Abonnenc, Vincent Auger, Paul
Vulto, Aldo Romani, Luigi Altomare, Marco Tartagni, Roberto Guerrieri},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4665},
primaryClass={cs.AR}
} | manaresi2007new |
arxiv-1509 | 0710.4666 | Verification of Embedded Memory Systems using Efficient Memory Modeling | <|reference_start|>Verification of Embedded Memory Systems using Efficient Memory Modeling: We describe verification techniques for embedded memory systems using efficient memory modeling (EMM), without explicitly modeling each memory bit. We extend our previously proposed approach of EMM in Bounded Model Checking (BMC) for a single read/write port single memory system, to more commonly occurring systems with multiple memories, having multiple read and write ports. More importantly, we augment such EMM to providing correctness proofs, in addition to finding real bugs as before. The novelties of our verification approach are in a) combining EMM with proof-based abstraction that preserves the correctness of a property up to a certain analysis depth of SAT-based BMC, and b) modeling arbitrary initial memory state precisely and thereby, providing inductive proofs using SAT-based BMC for embedded memory systems. Similar to the previous approach, we construct a verification model by eliminating memory arrays, but retaining the memory interface signals with their control logic and adding constraints on those signals at every analysis depth to preserve the data forwarding semantics. The size of these EMM constraints depends quadratically on the number of memory accesses and the number of read and write ports; and linearly on the address and data widths and the number of memories. We show the effectiveness of our approach on several industry designs and software programs.<|reference_end|> | arxiv | @article{ganai2007verification,
title={Verification of Embedded Memory Systems using Efficient Memory Modeling},
author={Malay K. Ganai, Aarti Gupta, Pranav Ashar},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4666},
primaryClass={cs.LO}
} | ganai2007verification |
arxiv-1510 | 0710.4667 | Integration, Verification and Layout of a Complex Multimedia SOC | <|reference_start|>Integration, Verification and Layout of a Complex Multimedia SOC: We present our experience of designing a single-chip controller for advanced digital still camera from specification all the way to mass production. The process involves collaboration with camera system designer, IP vendors, EDA vendors, silicon wafer foundry, package and testing houses, and camera maker. We also co-work with academic research groups to develop a JPEG codec IP and memory BIST and SOC testing methodology. In this presentation, we cover the problems encountered, our solutions, and lessons learned.<|reference_end|> | arxiv | @article{chen2007integration,,
title={Integration, Verification and Layout of a Complex Multimedia SOC},
author={Chien-Liang Chen, Jiing-Yuan Lin, Youn-Long Lin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4667},
primaryClass={cs.AR cs.MM}
} | chen2007integration, |
arxiv-1511 | 0710.4669 | SOC Testing Methodology and Practice | <|reference_start|>SOC Testing Methodology and Practice: On a commercial digital still camera (DSC) controller chip we practice a novel SOC test integration platform, solving real problems in test scheduling, test IO reduction, timing of functional test, scan IO sharing, embedded memory built-in self-test (BIST), etc. The chip has been fabricated and tested successfully by our approach. Test results justify that short test integration cost, short test time, and small area overhead can be achieved. To support SOC testing, a memory BIST compiler and an SOC testing integration system have been developed.<|reference_end|> | arxiv | @article{wu2007soc,
title={SOC Testing Methodology and Practice},
author={Cheng-Wen Wu},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4669},
primaryClass={cs.AR}
} | wu2007soc |
arxiv-1512 | 0710.4670 | Evolutionary Optimization in Code-Based Test Compression | <|reference_start|>Evolutionary Optimization in Code-Based Test Compression: We provide a general formulation for the code-based test compression problem with fixed-length input blocks and propose a solution approach based on Evolutionary Algorithms. In contrast to existing code-based methods, we allow unspecified values in matching vectors, which allows encoding of arbitrary test sets using a relatively small number of code-words. Experimental results for both stuck-at and path delay fault test sets for ISCAS circuits demonstrate an improvement compared to existing techniques.<|reference_end|> | arxiv | @article{polian2007evolutionary,
title={Evolutionary Optimization in Code-Based Test Compression},
author={Ilia Polian, Alejandro Czutro, Bernd Becker},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4670},
primaryClass={cs.AR}
} | polian2007evolutionary |
arxiv-1513 | 0710.4671 | An Application-Specific Design Methodology for STbus Crossbar Generation | <|reference_start|>An Application-Specific Design Methodology for STbus Crossbar Generation: As the communication requirements of current and future Multiprocessor Systems on Chips (MPSoCs) continue to increase, scalable communication architectures are needed to support the heavy communication demands of the system. This is reflected in the recent trend that many of the standard bus products such as STbus, have now introduced the capability of designing a crossbar with multiple buses operating in parallel. The crossbar configuration should be designed to closely match the application traffic characteristics and performance requirements. In this work we address this issue of application-specific design of optimal crossbar (using STbus crossbar architecture), satisfying the performance requirements of the application and optimal binding of cores onto the crossbar resources. We present a simulation based design approach that is based on analysis of actual traffic trace of the application, considering local variations in traffic rates, temporal overlap among traffic streams and criticality of traffic streams. Our methodology is applied to several MPSoC designs and the resulting crossbar platforms are validated for performance by cycle-accurate SystemC simulation of the designs. The experimental case studies show large reduction in packet latencies (up to 7x) and large crossbar component savings (up to 3.5x) compared to traditional design approaches.<|reference_end|> | arxiv | @article{murali2007an,
title={An Application-Specific Design Methodology for STbus Crossbar Generation},
author={Srinivasan Murali, Giovanni De Micheli},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4671},
primaryClass={cs.AR}
} | murali2007an |
arxiv-1514 | 0710.4672 | Yield Enhancement of Digital Microfluidics-Based Biochips Using Space Redundancy and Local Reconfiguration | <|reference_start|>Yield Enhancement of Digital Microfluidics-Based Biochips Using Space Redundancy and Local Reconfiguration: As microfluidics-based biochips become more complex, manufacturing yield will have significant influence on production volume and product cost. We propose an interstitial redundancy approach to enhance the yield of biochips that are based on droplet-based microfluidics. In this design method, spare cells are placed in the interstitial sites within the microfluidic array, and they replace neighboring faulty cells via local reconfiguration. The proposed design method is evaluated using a set of concurrent real-life bioassays.<|reference_end|> | arxiv | @article{su2007yield,
title={Yield Enhancement of Digital Microfluidics-Based Biochips Using Space
Redundancy and Local Reconfiguration},
author={Fei Su, Krishnendu Chakrabarty, Vamsee K. Pamula},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4672},
primaryClass={cs.AR}
} | su2007yield |
arxiv-1515 | 0710.4673 | Design of Fault-Tolerant and Dynamically-Reconfigurable Microfluidic Biochips | <|reference_start|>Design of Fault-Tolerant and Dynamically-Reconfigurable Microfluidic Biochips: Microfluidics-based biochips are soon expected to revolutionize clinical diagnosis, DNA sequencing, and other laboratory procedures involving molecular biology. Most microfluidic biochips are based on the principle of continuous fluid flow and they rely on permanently-etched microchannels, micropumps, and microvalves. We focus here on the automated design of "digital" droplet-based microfluidic biochips. In contrast to continuous-flow systems, digital microfluidics offers dynamic reconfigurability; groups of cells in a microfluidics array can be reconfigured to change their functionality during the concurrent execution of a set of bioassays. We present a simulated annealing-based technique for module placement in such biochips. The placement procedure not only addresses chip area, but it also considers fault tolerance, which allows a microfluidic module to be relocated elsewhere in the system when a single cell is detected to be faulty. Simulation results are presented for a case study involving the polymerase chain reaction.<|reference_end|> | arxiv | @article{su2007design,
title={Design of Fault-Tolerant and Dynamically-Reconfigurable Microfluidic
Biochips},
author={Fei Su, Krishnendu Chakrabarty},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4673},
primaryClass={cs.AR}
} | su2007design |
arxiv-1516 | 0710.4678 | CMOS-Based Biosensor Arrays | <|reference_start|>CMOS-Based Biosensor Arrays: CMOS-based sensor array chips provide new and attractive features as compared to today's standard tools for medical, diagnostic, and biotechnical applications. Examples for molecule- and cell-based approaches and related circuit design issues are discussed.<|reference_end|> | arxiv | @article{thewes2007cmos-based,
title={CMOS-Based Biosensor Arrays},
author={R. Thewes, C. Paulus, M. Schienle, F. Hofmann, A. Frey, R. Brederlow,
M. Augustyniak, M. Jenkner, B. Eversmann, P. Schindler-Bauer, M. Atzesberger,
B. Holzapfl, G. Beer, T. Haneder, H.-C. Hanke},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4678},
primaryClass={cs.AR}
} | thewes2007cmos-based |
arxiv-1517 | 0710.4679 | DVS for On-Chip Bus Designs Based on Timing Error Correction | <|reference_start|>DVS for On-Chip Bus Designs Based on Timing Error Correction: On-chip buses are typically designed to meet performance constraints at worst-case conditions, including process corner, temperature, IR-drop, and neighboring net switching pattern. This can result in significant performance slack at more typical operating conditions. In this paper, we propose a dynamic voltage scaling (DVS) technique for buses, based on a double sampling latch which can detect and correct for delay errors without the need for retransmission. The proposed approach recovers the available slack at non-worst-case operating points through more aggressive voltage scaling and tracks changing conditions by monitoring the error recovery rate. Voltage margins needed in traditional designs to accommodate worst-case performance conditions are therefore eliminated, resulting in a significant improvement in energy efficiency. The approach was implemented for a 6mm memory read bus operating at 1.5GHz (0.13 $\mu$m technology node) and was simulated for a number of benchmark programs. Even at the worst-case process and environment conditions, energy gains of up to 17% are achieved, with error recovery rates under 2.3%. At more typical process and environment conditions, energy gains range from 35% to 45%, with a performance degradation under 2%. An analysis of optimum interconnect architectures for maximizing energy gains with this approach shows that the proposed approach performs well with technology scaling.<|reference_end|> | arxiv | @article{kaul2007dvs,
title={DVS for On-Chip Bus Designs Based on Timing Error Correction},
author={Himanshu Kaul, Dennis Sylvester, David Blaauw, Trevor Mudge, Todd
Austin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4679},
primaryClass={cs.AR}
} | kaul2007dvs |
arxiv-1518 | 0710.4680 | Energy Bounds for Fault-Tolerant Nanoscale Designs | <|reference_start|>Energy Bounds for Fault-Tolerant Nanoscale Designs: The problem of determining lower bounds for the energy cost of a given nanoscale design is addressed via a complexity theory-based approach. This paper provides a theoretical framework that is able to assess the trade-offs existing in nanoscale designs between the amount of redundancy needed for a given level of resilience to errors and the associated energy cost. Circuit size, logic depth and error resilience are analyzed and brought together in a theoretical framework that can be seamlessly integrated with automated synthesis tools and can guide the design process of nanoscale systems comprised of failure prone devices. The impact of redundancy addition on the switching energy and its relationship with leakage energy is modeled in detail. Results show that 99% error resilience is possible for fault-tolerant designs, but at the expense of at least 40% more energy if individual gates fail independently with probability of 1%.<|reference_end|> | arxiv | @article{marculescu2007energy,
title={Energy Bounds for Fault-Tolerant Nanoscale Designs},
author={Diana Marculescu},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4680},
primaryClass={cs.CC cs.IT math.IT}
} | marculescu2007energy |
arxiv-1519 | 0710.4681 | A Quality-of-Service Mechanism for Interconnection Networks in System-on-Chips | <|reference_start|>A Quality-of-Service Mechanism for Interconnection Networks in System-on-Chips: As Moore's Law continues to fuel the ability to build ever increasingly complex system-on-chips (SoCs), achieving performance goals is rising as a critical challenge to completing designs. In particular, the system interconnect must efficiently service a diverse set of data flows with widely ranging quality-of-service (QoS) requirements. However, the known solutions for off-chip interconnects such as large-scale networks are not necessarily applicable to the on-chip environment. Latency and memory constraints for on-chip interconnects are quite different from larger-scale interconnects. This paper introduces a novel on-chip interconnect arbitration scheme. We show how this scheme can be distributed across a chip for high-speed implementation. We compare the performance of the arbitration scheme with other known interconnect arbitration schemes. Existing schemes typically focus heavily on either low latency of service for some initiators, or alternatively on guaranteed bandwidth delivery for other initiators. Our scheme allows service latency on some initiators to be traded off smoothly against jitter bounds on other initiators, while still delivering bandwidth guarantees. This scheme is a subset of the QoS controls that are available in the SonicsMX? (SMX) product.<|reference_end|> | arxiv | @article{weber2007a,
title={A Quality-of-Service Mechanism for Interconnection Networks in
System-on-Chips},
author={Wolf-Dietrich Weber, Joe Chou, Ian Swarbrick, Drew Wingard},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4681},
primaryClass={cs.AR}
} | weber2007a |
arxiv-1520 | 0710.4682 | Applying UML and MDA to Real Systems Design | <|reference_start|>Applying UML and MDA to Real Systems Design: Traditionally system design has been made from a black box/functionality only perspective which forces the developer to concentrate on how the functionality can be decomposed and recomposed into so called components. While this technique is well established and well known it does suffer fromsome drawbacks; namely that the systems produced can often be forced into certain, incompatible architectures, difficult to maintain or reuse and the code itself difficult to debug. Now that ideas such as the OMG's Model Based Architecture (MDA) or Model Based Engineering (MBE) and the ubiquitous modelling language UML are being used (allegedly) and desired we face a number of challenges to existing techniques.<|reference_end|> | arxiv | @article{oliver2007applying,
title={Applying UML and MDA to Real Systems Design},
author={Ian Oliver},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4682},
primaryClass={cs.SE}
} | oliver2007applying |
arxiv-1521 | 0710.4683 | The Challenges of Hardware Synthesis from C-Like Languages | <|reference_start|>The Challenges of Hardware Synthesis from C-Like Languages: MANY TECHNIQUES for synthesizing digital hardware from C-like languages have been proposed, but none have emerged as successful as Verilog or VHDL for register-transfer-level design. This paper looks at two of the fundamental challenges: concurrency and timing control.<|reference_end|> | arxiv | @article{edwards2007the,
title={The Challenges of Hardware Synthesis from C-Like Languages},
author={Stephen A. Edwards},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4683},
primaryClass={cs.PL}
} | edwards2007the |
arxiv-1522 | 0710.4684 | Reliability-Centric High-Level Synthesis | <|reference_start|>Reliability-Centric High-Level Synthesis: Importance of addressing soft errors in both safety critical applications and commercial consumer products is increasing, mainly due to ever shrinking geometries, higher-density circuits, and employment of power-saving techniques such as voltage scaling and component shut-down. As a result, it is becoming necessary to treat reliability as a first-class citizen in system design. In particular, reliability decisions taken early in system design can have significant benefits in terms of design quality. Motivated by this observation, this paper presents a reliability-centric high-level synthesis approach that addresses the soft error problem. The proposed approach tries to maximize reliability of the design while observing the bounds on area and performance, and makes use of our reliability characterization of hardware components such as adders and multipliers. We implemented the proposed approach, performed experiments with several designs, and compared the results with those obtained by a prior proposal.<|reference_end|> | arxiv | @article{tosun2007reliability-centric,
title={Reliability-Centric High-Level Synthesis},
author={S. Tosun, N. Mansouri, E. Arvas, M. Kandemir, Yuan Xie},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4684},
primaryClass={cs.AR}
} | tosun2007reliability-centric |
arxiv-1523 | 0710.4685 | Reliable System Specification for Self-Checking Data-Paths | <|reference_start|>Reliable System Specification for Self-Checking Data-Paths: The design of reliable circuits has received a lot of attention in the past, leading to the definition of several design techniques introducing fault detection and fault tolerance properties in systems for critical applications/environments. Such design methodologies tackled the problem at different abstraction levels, from switch-level to logic, RT level, and more recently to system level. Aim of this paper is to introduce a novel system-level technique based on the redefinition of the operators functionality in the system specification. This technique provides reliability properties to the system data path, transparently with respect to the designer. Feasibility, fault coverage, performance degradation and overheads are investigated on a FIR circuit.<|reference_end|> | arxiv | @article{bolchini2007reliable,
title={Reliable System Specification for Self-Checking Data-Paths},
author={C. Bolchini, F. Salice, D. Sciuto, L. Pomante},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4685},
primaryClass={cs.AR}
} | bolchini2007reliable |
arxiv-1524 | 0710.4686 | Test Planning for Mixed-Signal SOCs with Wrapped Analog Cores | <|reference_start|>Test Planning for Mixed-Signal SOCs with Wrapped Analog Cores: Many SOCs today contain both digital and analog embedded cores. Even though the test cost for such mixed-signal SOCs is significantly higher than that for digital SOCs, most prior research in this area has focused exclusively on digital cores. We propose a low-cost test development methodology for mixed-signal SOCs that allows the analog and digital cores to be tested in a unified manner, thereby minimizing the overall test cost. The analog cores in the SOC are wrapped such that they can be accessed using a digital test access mechanism (TAM). We evaluate the impact of the use of analog test wrappers on area overhead and test time. To reduce area overhead, we present an analog test wrapper optimization technique, which is then combined with TAM optimization in a cost-oriented heuristic approach for test scheduling. We also demonstrate the feasibility of using analog wrappers by presenting transistor-level simulations for an analog wrapper and a representative core. We present experimental results on test scheduling for an ITC'02 benchmark SOC that has been augmented with five analog cores.<|reference_end|> | arxiv | @article{sehgal2007test,
title={Test Planning for Mixed-Signal SOCs with Wrapped Analog Cores},
author={Anuja Sehgal, Fang Liu, Sule Ozev, Krishnendu Chakrabarty},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4686},
primaryClass={cs.AR}
} | sehgal2007test |
arxiv-1525 | 0710.4687 | On-Chip Test Infrastructure Design for Optimal Multi-Site Testing of System Chips | <|reference_start|>On-Chip Test Infrastructure Design for Optimal Multi-Site Testing of System Chips: Multi-site testing is a popular and effective way to increase test throughput and reduce test costs. We present a test throughput model, in which we focus on wafer testing, and consider parameters like test time, index time, abort-on-fail, and contact yield. Conventional multi-site testing requires sufficient ATE resources, such as ATE channels, to allow to test multiple SOCs in parallel. In this paper, we design and optimize on-chip DfT, in order to maximize the test throughput for a given SOC and ATE. The on-chip DfT consists of an E-RPCT wrapper, and, for modular SOCs, module wrappers and TAMs. We present experimental results for a Philips SOC and several ITC'02 SOC Test Benchmarks.<|reference_end|> | arxiv | @article{goel2007on-chip,
title={On-Chip Test Infrastructure Design for Optimal Multi-Site Testing of
System Chips},
author={Sandeep Kumar Goel, Erik Jan Marinissen},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4687},
primaryClass={cs.AR}
} | goel2007on-chip |
arxiv-1526 | 0710.4688 | On the Optimal Design of Triple Modular Redundancy Logic for SRAM-based FPGAs | <|reference_start|>On the Optimal Design of Triple Modular Redundancy Logic for SRAM-based FPGAs: Triple Modular Redundancy (TMR) is a suitable fault tolerant technique for SRAM-based FPGA. However, one of the main challenges in achieving 100% robustness in designs protected by TMR running on programmable platforms is to prevent upsets in the routing from provoking undesirable connections between signals from distinct redundant logic parts, which can generate an error in the output. This paper investigates the optimal design of the TMR logic (e.g., by cleverly inserting voters) to ensure robustness. Four different versions of a TMR digital filter were analyzed by fault injection. Faults were randomly inserted straight into the bitstream of the FPGA. The experimental results presented in this paper demonstrate that the number and placement of voters in the TMR design can directly affect the fault tolerance, ranging from 4.03% to 0.98% the number of upsets in the routing able to cause an error in the TMR circuit.<|reference_end|> | arxiv | @article{kastensmidt2007on,
title={On the Optimal Design of Triple Modular Redundancy Logic for SRAM-based
FPGAs},
author={F. Lima Kastensmidt, L. Sterpone, L. Carro, M. Sonza Reorda},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4688},
primaryClass={cs.AR}
} | kastensmidt2007on |
arxiv-1527 | 0710.4689 | Functional Equivalence Checking for Verification of Algebraic Transformations on Array-Intensive Source Code | <|reference_start|>Functional Equivalence Checking for Verification of Algebraic Transformations on Array-Intensive Source Code: Development of energy and performance-efficient embedded software is increasingly relying on application of complex transformations on the critical parts of the source code. Designers applying such nontrivial source code transformations are often faced with the problem of ensuring functional equivalence of the original and transformed programs. Currently they have to rely on incomplete and time-consuming simulation. Formal automatic verification of the transformed program against the original is instead desirable. This calls for equivalence checking tools similar to the ones available for comparing digital circuits. We present such a tool to compare array-intensive programs related through a combination of important global transformations like expression propagations, loop and algebraic transformations. When the transformed program fails to pass the equivalence check, the tool provides specific feedback on the possible locations of errors.<|reference_end|> | arxiv | @article{shashidhar2007functional,
title={Functional Equivalence Checking for Verification of Algebraic
Transformations on Array-Intensive Source Code},
author={K. C. Shashidhar, Maurice Bruynooghe, Francky Catthoor, Gerda Janssens},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4689},
primaryClass={cs.LO}
} | shashidhar2007functional |
arxiv-1528 | 0710.4690 | RIP: An Efficient Hybrid Repeater Insertion Scheme for Low Power | <|reference_start|>RIP: An Efficient Hybrid Repeater Insertion Scheme for Low Power: This paper presents a novel repeater insertion algorithm for interconnect power minimization. The novelty of our approach is in the judicious integration of an analytical solver and a dynamic programming based method. Specifically, the analytical solver chooses a concise repeater library and a small set of repeater location candidates such that the dynamic programming algorithm can be performed fast with little degradation of the solution quality. In comparison with previously reported repeater insertion schemes, within comparable runtimes, our approach achieves up to 37% higher power savings. Moreover, for the same design quality, our scheme attains a speedup of two orders of magnitude.<|reference_end|> | arxiv | @article{liu2007rip:,
title={RIP: An Efficient Hybrid Repeater Insertion Scheme for Low Power},
author={Xun Liu, Yuantao Peng, Marios C. Papaefthymiou},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4690},
primaryClass={cs.OH}
} | liu2007rip: |
arxiv-1529 | 0710.4691 | An O(bn^2) Time Algorithm for Optimal Buffer Insertion with b Buffer Types | <|reference_start|>An O(bn^2) Time Algorithm for Optimal Buffer Insertion with b Buffer Types: Buffer insertion is a popular technique to reduce the interconnect delay. The classic buffer insertion algorithm of van Ginneken has time complexity O(n^2), where n is the number of buffer positions. Lillis, Cheng and Lin extended van Ginneken's algorithm to allow b buffer types in time O (b^2 n^2). For modern design libraries that contain hundreds of buffers, it is a serious challenge to balance the speed and performance of the buffer insertion algorithm. In this paper, we present a new algorithm that computes the optimal buffer insertion in O (bn^2) time. The reduction is achieved by the observation that the (Q, C) pairs of the candidates that generate the new candidates must form a convex hull. On industrial test cases, the new algorithm is faster than the previous best buffer insertion algorithms by orders of magnitude.<|reference_end|> | arxiv | @article{li2007an,
title={An O(bn^2) Time Algorithm for Optimal Buffer Insertion with b Buffer
Types},
author={Zhuo Li, Weiping Shi},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4691},
primaryClass={cs.AR}
} | li2007an |
arxiv-1530 | 0710.4692 | Cantilever-Based Biosensors in CMOS Technology | <|reference_start|>Cantilever-Based Biosensors in CMOS Technology: Single-chip CMOS-based biosensors that feature microcantilevers as transducer elements are presented. The cantilevers are functionalized for the capturing of specific analytes, e.g., proteins or DNA. The binding of the analyte changes the mechanical properties of the cantilevers such as surface stress and resonant frequency, which can be detected by an integrated Wheatstone bridge. The monolithic integrated readout allows for a high signal-to-noise ratio, lowers the sensitivity to external interference and enables autonomous device operation.<|reference_end|> | arxiv | @article{kirstein2007cantilever-based,
title={Cantilever-Based Biosensors in CMOS Technology},
author={K.-U. Kirstein, Y. Li, M. Zimmermann, C. Vancura, T. Volden, W. H.
Song, J. Lichtenberg, A. Hierlemannn},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4692},
primaryClass={cs.AR}
} | kirstein2007cantilever-based |
arxiv-1531 | 0710.4693 | Memory Testing Under Different Stress Conditions: An Industrial Evaluation | <|reference_start|>Memory Testing Under Different Stress Conditions: An Industrial Evaluation: This paper presents the effectiveness of various stress conditions (mainly voltage and frequency) on detecting the resistive shorts and open defects in deep sub-micron embedded memories in an industrial environment. Simulation studies on very-low voltage, high voltage and at-speed testing show the need of the stress conditions for high quality products; i.e., low defect-per-million (DPM) level, which is driving the semiconductor market today. The above test conditions have been validated to screen out bad devices on real silicon (a test-chip) built on CMOS 0.18 um technology. IFA (inductive fault analysis) based simulation technique leads to an efficient fault coverage and DPM estimator, which helps the customers upfront to make decisions on test algorithm implementations under different stress conditions in order to reduce the number of test escapes.<|reference_end|> | arxiv | @article{majhi2007memory,
title={Memory Testing Under Different Stress Conditions: An Industrial
Evaluation},
author={Ananta K. Majhi, Mohamed Azimane, Guido Gronthoud, Maurice Lousberg,
Stefan Eichenberger, Fred Bowen},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4693},
primaryClass={cs.AR}
} | majhi2007memory |
arxiv-1532 | 0710.4694 | Exact Synthesis of 3-Qubit Quantum Circuits from Non-Binary Quantum Gates Using Multiple-Valued Logic and Group Theory | <|reference_start|>Exact Synthesis of 3-Qubit Quantum Circuits from Non-Binary Quantum Gates Using Multiple-Valued Logic and Group Theory: We propose an approach to optimally synthesize quantum circuits from non-permutative quantum gates such as Controlled-Square-Root-of-Not (i.e. Controlled-V). Our approach reduces the synthesis problem to multiple-valued optimization and uses group theory. We devise a novel technique that transforms the quantum logic synthesis problem from a multi-valued constrained optimization problem to a group permutation problem. The transformation enables us to utilize group theory to exploit the properties of the synthesis problem. Assuming a cost of one for each two-qubit gate, we found all reversible circuits with quantum costs of 4, 5, 6, etc, and give another algorithm to realize these reversible circuits with quantum gates.<|reference_end|> | arxiv | @article{yang2007exact,
title={Exact Synthesis of 3-Qubit Quantum Circuits from Non-Binary Quantum
Gates Using Multiple-Valued Logic and Group Theory},
author={Guowu Yang, William N. N. Hung, Xiaoyu Song, Marek Perkowski},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4694},
primaryClass={cs.LO}
} | yang2007exact |
arxiv-1533 | 0710.4695 | SAT-Based Complete Don't-Care Computation for Network Optimization | <|reference_start|>SAT-Based Complete Don't-Care Computation for Network Optimization: This paper describes an improved approach to Boolean network optimization using internal don't-cares. The improvements concern the type of don't-cares computed, their scope, and the computation method. Instead of the traditionally used compatible observability don't-cares (CODCs), we introduce and justify the use of complete don't-cares (CDC). To ensure the robustness of the don't-care computation for very large industrial networks, a optional windowing scheme is implemented that computes substantial subsets of the CDCs in reasonable time. Finally, we give a SAT-based don't-care computation algorithm that is more efficient than BDD-based algorithms. Experimental results confirm that these improvements work well in practice. Complete don't-cares allow for a reduction in the number of literals compared to the CODCs. Windowing guarantees robustness, even for very large benchmarks on which previous methods could not be applied. SAT reduces the runtime and enhances robustness, making don't-cares affordable for a variety of other Boolean methods applied to the network.<|reference_end|> | arxiv | @article{mishchenko2007sat-based,
title={SAT-Based Complete Don't-Care Computation for Network Optimization},
author={Alan Mishchenko, Robert K. Brayton},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4695},
primaryClass={cs.LO}
} | mishchenko2007sat-based |
arxiv-1534 | 0710.4697 | Statistical Timing Based Optimization using Gate Sizing | <|reference_start|>Statistical Timing Based Optimization using Gate Sizing: The increased dominance of intra-die process variations has motivated the field of Statistical Static Timing Analysis (SSTA) and has raised the need for SSTA-based circuit optimization. In this paper, we propose a new sensitivity based, statistical gate sizing method. Since brute-force computation of the change in circuit delay distribution to gate size change is computationally expensive, we propose an efficient and exact pruning algorithm. The pruning algorithm is based on a novel theory of perturbation bounds which are shown to decrease as they propagate through the circuit. This allows pruning of gate sensitivities without complete propagation of their perturbations. We apply our proposed optimization algorithm to ISCAS benchmark circuits and demonstrate the accuracy and efficiency of the proposed method. Our results show an improvement of up to 10.5% in the 99-percentile circuit delay for the same circuit area, using the proposed statistical optimizer and a run time improvement of up to 56x compared to the brute-force approach.<|reference_end|> | arxiv | @article{agarwal2007statistical,
title={Statistical Timing Based Optimization using Gate Sizing},
author={Aseem Agarwal, Kaviraj Chopra, David Blaauw},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4697},
primaryClass={cs.AR}
} | agarwal2007statistical |
arxiv-1535 | 0710.4698 | Automated Synthesis of Assertion Monitors using Visual Specifications | <|reference_start|>Automated Synthesis of Assertion Monitors using Visual Specifications: Automated synthesis of monitors from high-level properties plays a significant role in assertion-based verification. We present here a methodology to synthesize assertion monitors from visual specifications given in CESC (Clocked Event Sequence Chart). CESC is a visual language designed for specifying system level interactions involving single and multiple clock domains. It has well-defined graphical and textual syntax and formal semantics based on synchronous language paradigm enabling formal analysis of specifications. In this paper we provide an overview of CESC language with few illustrative examples. The algorithm for automated synthesis of assertion monitors from CESC specifications is described. A few examples from standard bus protocols (OCP-IP and AMBA) are presented to demonstrate the application of monitor synthesis algorithm.<|reference_end|> | arxiv | @article{gadkari2007automated,
title={Automated Synthesis of Assertion Monitors using Visual Specifications},
author={Ambar A. Gadkari, S. Ramesh},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4698},
primaryClass={cs.LO}
} | gadkari2007automated |
arxiv-1536 | 0710.4700 | A Decompilation Approach to Partitioning Software for Microprocessor/FPGA Platforms | <|reference_start|>A Decompilation Approach to Partitioning Software for Microprocessor/FPGA Platforms: In this paper, we present a software compilation approach for microprocessor/FPGA platforms that partitions a software binary onto custom hardware implemented in the FPGA. Our approach imposes less restrictions on software tool flow than previous compiler approaches, allowing software designers to use any software language and compiler. Our approach uses a back-end partitioning tool that utilizes decompilation techniques to recover important high-level information, resulting in performance comparable to high-level compiler-based approaches.<|reference_end|> | arxiv | @article{stitt2007a,
title={A Decompilation Approach to Partitioning Software for
Microprocessor/FPGA Platforms},
author={Greg Stitt, Frank Vahid},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4700},
primaryClass={cs.SE}
} | stitt2007a |
arxiv-1537 | 0710.4701 | A Prediction Packetizing Scheme for Reducing Channel Traffic in Transaction-Level Hardware/Software Co-Emulation | <|reference_start|>A Prediction Packetizing Scheme for Reducing Channel Traffic in Transaction-Level Hardware/Software Co-Emulation: This paper presents a scheme for efficient channel usage between simulator and accelerator where the accelerator models some RTL sub-blocks in the accelerator-based hardware/software co-simulation while the simulator runs transaction-level model of the remaining part of the whole chip being verified. With conventional simulation accelerator, evaluations of simulator and accelerator alternate at every valid simulation time, which results in poor simulation performance due to startup overhead of simulator-accelerator channel access. The startup overhead can be reduced by merging multiple transactions on the channel into a single burst traffic. We propose a predictive packetizing scheme for reducing channel traffic by merging as many transactions into a burst traffic as possible based on 'prediction and rollback.' Under ideal condition with 100% prediction accuracy, the proposed method shows a performance gain of 1500% compared to the conventional one.<|reference_end|> | arxiv | @article{lee2007a,
title={A Prediction Packetizing Scheme for Reducing Channel Traffic in
Transaction-Level Hardware/Software Co-Emulation},
author={Jae-Gon Lee, Moo-Kyoung Chung, Ki-Yong Ahn, Sang-Heon Lee, Chong-Min
Kyung},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4701},
primaryClass={cs.PF}
} | lee2007a |
arxiv-1538 | 0710.4702 | A Register Allocation Algorithm in the Presence of Scalar Replacement for Fine-Grain Configurable Architectures | <|reference_start|>A Register Allocation Algorithm in the Presence of Scalar Replacement for Fine-Grain Configurable Architectures: The aggressive application of scalar replacement to array references substantially reduces the number of memory operations at the expense of a possibly very large number of registers. In this paper we describe a register allocation algorithm that assigns registers to scalar replaced array references along the critical paths of a computation, in many cases exploiting the opportunity for concurrent memory accesses. Experimental results, for a set of image/signal processing code kernels, reveal that the proposed algorithm leads to a substantial reduction of the number of execution cycles for the corresponding hardware implementation on a contemporary Field-Programmable-Gate-Array (FPGA) when compared to other greedy allocation algorithms, in some cases, using even fewer number of registers.<|reference_end|> | arxiv | @article{baradaran2007a,
title={A Register Allocation Algorithm in the Presence of Scalar Replacement
for Fine-Grain Configurable Architectures},
author={Nastaran Baradaran, Pedro C. Diniz},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4702},
primaryClass={cs.PL}
} | baradaran2007a |
arxiv-1539 | 0710.4703 | A Way Memoization Technique for Reducing Power Consumption of Caches in Application Specific Integrated Processors | <|reference_start|>A Way Memoization Technique for Reducing Power Consumption of Caches in Application Specific Integrated Processors: This paper presents a technique for eliminating redundant cache-tag and cache-way accesses to reduce power consumption. The basic idea is to keep a small number of Most Recently Used (MRU) addresses in a Memory Address Buffer (MAB) and to omit redundant tag and way accesses when there is a MAB-hit. Since the approach keeps only tag and set-index values in the MAB, the energy and area overheads are relatively small even for a MAB with a large number of entries. Furthermore, the approach does not sacrifice the performance. In other words, neither the cycle time nor the number of executed cycles increases. The proposed technique has been applied to Fujitsu VLIW processor (FR-V) and its power saving has been estimated using NanoSim. Experiments for 32kB 2-way set associative caches show the power consumption of I-cache and D-cache can be reduced by 40% and 50%, respectively.<|reference_end|> | arxiv | @article{ishihara2007a,
title={A Way Memoization Technique for Reducing Power Consumption of Caches in
Application Specific Integrated Processors},
author={Tohru Ishihara, Farzan Fallah},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4703},
primaryClass={cs.AR}
} | ishihara2007a |
arxiv-1540 | 0710.4704 | Resource Sharing and Pipelining in Coarse-Grained Reconfigurable Architecture for Domain-Specific Optimization | <|reference_start|>Resource Sharing and Pipelining in Coarse-Grained Reconfigurable Architecture for Domain-Specific Optimization: Coarse-grained reconfigurable architectures aim to achieve both goals of high performance and flexibility. However, existing reconfigurable array architectures require many resources without considering the specific application domain. Functional resources that take long latency and/or large area can be pipelined and/or shared among the processing elements. Therefore the hardware cost and the delay can be effectively reduced without any performance degradation for some application domains. We suggest such reconfigurable array architecture template and design space exploration flow for domain-specific optimization. Experimental results show that our approach is much more efficient both in performance and area compared to existing reconfigurable architectures.<|reference_end|> | arxiv | @article{kim2007resource,
title={Resource Sharing and Pipelining in Coarse-Grained Reconfigurable
Architecture for Domain-Specific Optimization},
author={Yoonjin Kim, Mary Kiemb, Chulsoo Park, Jinyong Jung, Kiyoung Choi},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4704},
primaryClass={cs.AR}
} | kim2007resource |
arxiv-1541 | 0710.4705 | A Study of the Speedups and Competitiveness of FPGA Soft Processor Cores using Dynamic Hardware/Software Partitioning | <|reference_start|>A Study of the Speedups and Competitiveness of FPGA Soft Processor Cores using Dynamic Hardware/Software Partitioning: Field programmable gate arrays (FPGAs) provide designers with the ability to quickly create hardware circuits. Increases in FPGA configurable logic capacity and decreasing FPGA costs have enabled designers to more readily incorporate FPGAs in their designs. FPGA vendors have begun providing configurable soft processor cores that can be synthesized onto their FPGA products. While FPGAs with soft processor cores provide designers with increased flexibility, such processors typically have degraded performance and energy consumption compared to hard-core processors. Previously, we proposed warp processing, a technique capable of optimizing a software application by dynamically and transparently re-implementing critical software kernels as custom circuits in on-chip configurable logic. In this paper, we study the potential of a MicroBlaze soft-core based warp processing system to eliminate the performance and energy overhead of a soft-core processor compared to a hard-core processor. We demonstrate that the soft-core based warp processor achieves average speedups of 5.8 and energy reductions of 57% compared to the soft core alone. Our data shows that a soft-core based warp processor yields performance and energy consumption competitive with existing hard-core processors, thus expanding the usefulness of soft processor cores on FPGAs to a broader range of applications.<|reference_end|> | arxiv | @article{lysecky2007a,
title={A Study of the Speedups and Competitiveness of FPGA Soft Processor Cores
using Dynamic Hardware/Software Partitioning},
author={Roman Lysecky, Frank Vahid},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4705},
primaryClass={cs.AR}
} | lysecky2007a |
arxiv-1542 | 0710.4706 | An Infrastructure to Functionally Test Designs Generated by Compilers Targeting FPGAs | <|reference_start|>An Infrastructure to Functionally Test Designs Generated by Compilers Targeting FPGAs: This paper presents an infrastructure to test the functionality of the specific architectures output by a high-level compiler targeting dynamically reconfigurable hardware. It results in a suitable scheme to verify the architectures generated by the compiler, each time new optimization techniques are included or changes in the compiler are performed. We believe this kind of infrastructure is important to verify, by functional simulation, further research techniques, as far as compilation to Field-Programmable Gate Array (FPGA) platforms is concerned.<|reference_end|> | arxiv | @article{rodrigues2007an,
title={An Infrastructure to Functionally Test Designs Generated by Compilers
Targeting FPGAs},
author={Rui Rodrigues, Joao M. P. Cardoso},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4706},
primaryClass={cs.AR}
} | rodrigues2007an |
arxiv-1543 | 0710.4707 | Energy- and Performance-Driven NoC Communication Architecture Synthesis Using a Decomposition Approach | <|reference_start|>Energy- and Performance-Driven NoC Communication Architecture Synthesis Using a Decomposition Approach: In this paper, we present a methodology for customized communication architecture synthesis that matches the communication requirements of the target application. This is an important problem, particularly for network-based implementations of complex applications. Our approach is based on using frequently encountered generic communication primitives as an alphabet capable of characterizing any given communication pattern. The proposed algorithm searches through the entire design space for a solution that minimizes the system total energy consumption, while satisfying the other design constraints. Compared to the standard mesh architecture, the customized architecture generated by the newly proposed approach shows about 36% throughput increase and 51% reduction in the energy required to encrypt 128 bits of data with a standard encryption algorithm.<|reference_end|> | arxiv | @article{ogras2007energy-,
title={Energy- and Performance-Driven NoC Communication Architecture Synthesis
Using a Decomposition Approach},
author={Umit Y. Ogras, Radu Marculescu},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4707},
primaryClass={cs.AR}
} | ogras2007energy- |
arxiv-1544 | 0710.4709 | Analog and Digital Circuit Design in 65 nm CMOS: End of the Road? | <|reference_start|>Analog and Digital Circuit Design in 65 nm CMOS: End of the Road?: This special session adresses the problems that designers face when implementing analog and digital circuits in nanometer technologies. An introductory embedded tutorial will give an overview of the design problems at hand : the leakage power and process variability and their implications for digital circuits and memories, and the reducing supply voltages, the design productivity and signal integrity problems for embedded analog blocks. Next, a panel of experts from both industrial semiconductor houses and design companies, EDA vendors and research institutes will present and discuss with the audience their opinions on whether the design road ends at marker "65nm" or not.<|reference_end|> | arxiv | @article{gielen2007analog,
title={Analog and Digital Circuit Design in 65 nm CMOS: End of the Road?},
author={Georges Gielen, Wim Dehaene, Phillip Christie, Dieter Draxelmayr,
Edmond Janssens, Karen Maex, Ted Vucurevich},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4709},
primaryClass={cs.AR}
} | gielen2007analog |
arxiv-1545 | 0710.4710 | HEBS: Histogram Equalization for Backlight Scaling | <|reference_start|>HEBS: Histogram Equalization for Backlight Scaling: In this paper, a method is proposed for finding a pixel transformation function that maximizes backlight dimming while maintaining a pre-specified image distortion level for a liquid crystal display. This is achieved by finding a pixel transformation function, which maps the original image histogram to a new histogram with lower dynamic range. Next the contrast of the transformed image is enhanced so as to compensate for brightness loss that would arise from backlight dimming. The proposed approach relies on an accurate definition of the image distortion which takes into account both the pixel value differences and a model of the human visual system and is amenable to highly efficient hardware realization. Experimental results show that the histogram equalization for backlight scaling method results in about 45% power saving with an effective distortion rate of 5% and 65% power saving for a 20% distortion rate. This is significantly higher power savings compared to previously reported backlight dimming approaches.<|reference_end|> | arxiv | @article{iranli2007hebs:,
title={HEBS: Histogram Equalization for Backlight Scaling},
author={Ali Iranli, Hanif Fatemi, Massoud Pedram},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4710},
primaryClass={cs.OH}
} | iranli2007hebs: |
arxiv-1546 | 0710.4711 | FPGA Architecture for Multi-Style Asynchronous Logic | <|reference_start|>FPGA Architecture for Multi-Style Asynchronous Logic: This paper presents a novel FPGA architecture for implementing various styles of asynchronous logic. The main objective is to break the dependency between the FPGA architecture dedicated to asynchronous logic and the logic style. The innovative aspects of the architecture are described. Moreover the structure is well suited to be rebuilt and adapted to fit with further asynchronous logic evolutions thanks to the architecture genericity. A full-adder was implemented in different styles of logic to show the architecture flexibility.<|reference_end|> | arxiv | @article{huot2007fpga,
title={FPGA Architecture for Multi-Style Asynchronous Logic},
author={N. Huot (TIMA), H. Dubreuil (TIMA), L. Fesquet (TIMA), M. Renaudin
(TIMA)},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4711},
primaryClass={cs.AR}
} | huot2007fpga |
arxiv-1547 | 0710.4712 | An Accurate SER Estimation Method Based on Propagation Probability | <|reference_start|>An Accurate SER Estimation Method Based on Propagation Probability: In this paper, we present an accurate but very fast soft error rate (SER) estimation technique for digital circuits based on error propagation probability (EPP) computation. Experiments results and comparison of the results with the random simulation technique show that our proposed method is on average within 6% of the random simulation method and four to five orders of magnitude faster.<|reference_end|> | arxiv | @article{asadi2007an,
title={An Accurate SER Estimation Method Based on Propagation Probability},
author={Ghazanfar Asadi, Mehdi B. Tahoori},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4712},
primaryClass={cs.AR}
} | asadi2007an |
arxiv-1548 | 0710.4713 | Improving the Process-Variation Tolerance of Digital Circuits Using Gate Sizing and Statistical Techniques | <|reference_start|>Improving the Process-Variation Tolerance of Digital Circuits Using Gate Sizing and Statistical Techniques: A new approach for enhancing the process-variation tolerance of digital circuits is described. We extend recent advances in statistical timing analysis into an optimization framework. Our objective is to reduce the performance variance of a technology-mapped circuit where delays across elements are represented by random variables which capture the manufacturing variations. We introduce the notion of statistical critical paths, which account for both means and variances of performance variation. An optimization engine is used to size gates with a goal of reducing the timing variance along the statistical critical paths. We apply a pair of nested statistical analysis methods deploying a slower more accurate approach for tracking statistical critical paths and a fast engine for evaluation of gate size assignments. We derive a new approximation for the max operation on random variables which is deployed for the faster inner engine. Circuit optimization is carried out using a gain-based algorithm that terminates when constraints are satisfied or no further improvements can be made. We show optimization results that demonstrate an average of 72% reduction in performance variation at the expense of average 20% increase in design area.<|reference_end|> | arxiv | @article{neiroukh2007improving,
title={Improving the Process-Variation Tolerance of Digital Circuits Using Gate
Sizing and Statistical Techniques},
author={Osama Neiroukh, Xiaoyu Song},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4713},
primaryClass={cs.AR}
} | neiroukh2007improving |
arxiv-1549 | 0710.4714 | Assertion-Based Design Exploration of DVS in Network Processor Architectures | <|reference_start|>Assertion-Based Design Exploration of DVS in Network Processor Architectures: With the scaling of technology and higher requirements on performance and functionality, power dissipation is becoming one of the major design considerations in the development of network processors. In this paper, we use an assertion-based methodology for system-level power/performance analysis to study two dynamic voltage scaling (DVS) techniques, traffic-based DVS and execution-based DVS, in a network processor model. Using the automatically generated distribution analyzers, we analyze the power and performance distributions and study their trade-offs for the two DVS policies with different parameter settings such as threshold values and window sizes. We discuss the optimal configurations of the two DVS policies under different design requirements. By a set of experiments, we show that the assertion-based trace analysis methodology is an efficient tool that can help a designer easily compare and study optimal architectural configurations in a large design space.<|reference_end|> | arxiv | @article{yu2007assertion-based,
title={Assertion-Based Design Exploration of DVS in Network Processor
Architectures},
author={Jia Yu, Wei Wu, Xi Chen, Harry Hsieh, Jun Yang, Felice Balarin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4714},
primaryClass={cs.AR}
} | yu2007assertion-based |
arxiv-1550 | 0710.4715 | Circuit-Level Modeling for Concurrent Testing of Operational Defects due to Gate Oxide Breakdown | <|reference_start|>Circuit-Level Modeling for Concurrent Testing of Operational Defects due to Gate Oxide Breakdown: As device sizes shrink and current densities increase, the probability of device failures due to gate oxide breakdown (OBD) also increases. To provide designs that are tolerant to such failures, we must investigate and understand the manifestations of this physical phenomenon at the circuit and system level. In this paper, we develop a model for operational OBD defects, and we explore how to test for faults due to OBD. For a NAND gate, we derive the necessary input conditions that excite and detect errors due to OBD defects at the gate level. We show that traditional pattern generators fail to exercise all of these defects. Finally, we show that these test patterns can be propagated and justified for a combinational circuit in a manner similar to traditional ATPG.<|reference_end|> | arxiv | @article{carter2007circuit-level,
title={Circuit-Level Modeling for Concurrent Testing of Operational Defects due
to Gate Oxide Breakdown},
author={Jonathan R. Carter, Sule Ozev, Daniel J. Sorin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4715},
primaryClass={cs.AR}
} | carter2007circuit-level |
arxiv-1551 | 0710.4716 | Optimized Generation of Data-Path from C Codes for FPGAs | <|reference_start|>Optimized Generation of Data-Path from C Codes for FPGAs: FPGAs, as computing devices, offer significant speedup over microprocessors. Furthermore, their configurability offers an advantage over traditional ASICs. However, they do not yet enjoy high-level language programmability, as microprocessors do. This has become the main obstacle for their wider acceptance by application designers. ROCCC is a compiler designed to generate circuits from C source code to execute on FPGAs, more specifically on CSoCs. It generates RTL level HDLs from frequently executing kernels in an application. In this paper, we describe ROCCC's system overview and focus on its data path generation. We compare the performance of ROCCC-generated VHDL code with that of Xilinx IPs. The synthesis result shows that ROCCC-generated circuit takes around 2x ~ 3x area and runs at comparable clock rate.<|reference_end|> | arxiv | @article{guo2007optimized,
title={Optimized Generation of Data-Path from C Codes for FPGAs},
author={Zhi Guo, Betul Buyukkurt, Walid Najjar, Kees Vissers},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4716},
primaryClass={cs.AR}
} | guo2007optimized |
arxiv-1552 | 0710.4717 | Multi-Placement Structures for Fast and Optimized Placement in Analog Circuit Synthesis | <|reference_start|>Multi-Placement Structures for Fast and Optimized Placement in Analog Circuit Synthesis: This paper presents the novel idea of multi-placement structures, for a fast and optimized placement instantiation in analog circuit synthesis. These structures need to be generated only once for a specific circuit topology. When used in synthesis, these pre-generated structures instantiate various layout floorplans for various sizes and parameters of a circuit. Unlike procedural layout generators, they enable fast placement of circuits while keeping the quality of the placements at a high level during a synthesis process. The fast placement is a result of high speed instantiation resulting from the efficiency of the multi-placement structure. The good quality of placements derive from the extensive and intelligent search process that is used to build the multi-placement structure. The target benchmarks of these structures are analog circuits in the vicinity of 25 modules. An algorithm for the generation of such multi-placement structures is presented. Experimental results show placement execution times with an average of a few milliseconds making them usable during layout-aware synthesis for optimized placements.<|reference_end|> | arxiv | @article{badaoui2007multi-placement,
title={Multi-Placement Structures for Fast and Optimized Placement in Analog
Circuit Synthesis},
author={Raoul F. Badaoui, Ranga Vemuri},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4717},
primaryClass={cs.AR}
} | badaoui2007multi-placement |
arxiv-1553 | 0710.4718 | Noise Figure Evaluation Using Low Cost BIST | <|reference_start|>Noise Figure Evaluation Using Low Cost BIST: A technique for evaluating noise figure suitable for BIST implementation is described. It is based on a low cost single-bit digitizer, which allows the simultaneous evaluation of noise figure in several test points of the analog circuit. The method is also able to benefit from SoC resources, like memory and processing power. Theoretical background and experimental results are presented in order to demonstrate the feasibility of the approach.<|reference_end|> | arxiv | @article{negreiros2007noise,
title={Noise Figure Evaluation Using Low Cost BIST},
author={Marcelo Negreiros, Luigi Carro, Altamiro A. Susin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4718},
primaryClass={cs.OH}
} | negreiros2007noise |
arxiv-1554 | 0710.4719 | Specification Test Compaction for Analog Circuits and MEMS | <|reference_start|>Specification Test Compaction for Analog Circuits and MEMS: Testing a non-digital integrated system against all of its specifications can be quite expensive due to the elaborate test application and measurement setup required. We propose to eliminate redundant tests by employing e-SVM based statistical learning. Application of the proposed methodology to an operational amplifier and a MEMS accelerometer reveal that redundant tests can be statistically identified from a complete set of specification-based tests with negligible error. Specifically, after eliminating five of eleven specification-based tests for an operational amplifier, the defect escape and yield loss is small at 0.6% and 0.9%, respectively. For the accelerometer, defect escape of 0.2% and yield loss of 0.1% occurs when the hot and colt tests are eliminated. For the accelerometer, this level of Compaction would reduce test cost by more than half.<|reference_end|> | arxiv | @article{biswas2007specification,
title={Specification Test Compaction for Analog Circuits and MEMS},
author={Sounil Biswas, Peng Li, R. D. (shawn) Blanton, Larry T. Pileggi},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4719},
primaryClass={cs.AR}
} | biswas2007specification |
arxiv-1555 | 0710.4720 | Soft-Error Tolerance Analysis and Optimization of Nanometer Circuits | <|reference_start|>Soft-Error Tolerance Analysis and Optimization of Nanometer Circuits: Nanometer circuits are becoming increasingly susceptible to soft-errors due to alpha-particle and atmospheric neutron strikes as device scaling reduces node capacitances and supply/threshold voltage scaling reduces noise margins. It is becoming crucial to add soft-error tolerance estimation and optimization to the design flow to handle the increasing susceptibility. The first part of this paper presents a tool for accurate soft-error tolerance analysis of nanometer circuits (ASERTA) that can be used to estimate the soft-error tolerance of nanometer circuits consisting of millions of gates. The tolerance estimates generated by the tool match SPICE generated estimates closely while taking orders of magnitude less computation time. The second part of the paper presents a tool for soft-error tolerance optimization of nanometer circuits (SERTOPT) using the tolerance estimates generated by ASERTA. The tool finds optimal sizes, channel lengths, supply voltages and threshold voltages to be assigned to gates in a combinational circuit such that the soft-error tolerance is increased while meeting the timing constraint. Experiments on ISCAS'85 benchmark circuits showed that soft-error rate of the optimized circuit decreased by as much as 47% with marginal increase in circuit delay.<|reference_end|> | arxiv | @article{dhillon2007soft-error,
title={Soft-Error Tolerance Analysis and Optimization of Nanometer Circuits},
author={Yuvraj Singh Dhillon, Abdulkadir Utku Diril, Abhijit Chatterjee},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4720},
primaryClass={cs.AR}
} | dhillon2007soft-error |
arxiv-1556 | 0710.4721 | IEEE 11494 Compatible ABMs for Basic RF Measurements | <|reference_start|>IEEE 11494 Compatible ABMs for Basic RF Measurements: An analogue testing standard IEEE 1149.4 is mainly targeted for low-frequency testing. The problem studied in this paper is extending the standard also for radio frequency testing. IEEE 1149.4 compatible measurement structures (ABMs) developed in this study extract the information one is measuring from the radio frequency signal and represent the result as a DC voltage level. The ABMs presented in this paper are targeted for power and frequency measurements operating in frequencies from 1 GHz to 2 GHz. The power measurement error caused by temperature, supply voltage and process variations is roughly 2 dB and the frequency measurement error is 0.1 GHz, respectively.<|reference_end|> | arxiv | @article{syri2007ieee,
title={IEEE 1149.4 Compatible ABMs for Basic RF Measurements},
author={Pekka Syri, Juha Hakkinen, Markku Moilanen},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4721},
primaryClass={cs.AR}
} | syri2007ieee |
arxiv-1557 | 0710.4722 | Designer-Driven Topology Optimization for Pipelined Analog to Digital Converters | <|reference_start|>Designer-Driven Topology Optimization for Pipelined Analog to Digital Converters: This paper suggests a practical "hybrid" synthesis methodology which integrates designer-derived analytical models for system-level description with simulation-based models at the circuit level. We show how to optimize stage-resolution to minimize the power in a pipelined ADC. Exploration (via detailed synthesis) of several ADC configurations is used to show that a 4-3-2... resolution distribution uses the least power for a 13-bit 40 MSPS converter in a 0.25 $\mu$m CMOS process.<|reference_end|> | arxiv | @article{chien2007designer-driven,
title={Designer-Driven Topology Optimization for Pipelined Analog to Digital
Converters},
author={Yu-Tsun Chien, Dong Chen, Jea-Hong Lou, Gin-Kou Ma, Rob A. Rutenbar,
Tamal Mukherjee},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4722},
primaryClass={cs.AR}
} | chien2007designer-driven |
arxiv-1558 | 0710.4723 | Simulation Methodology for Analysis of Substrate Noise Impact on Analog / RF Circuits Including Interconnect Resistance | <|reference_start|>Simulation Methodology for Analysis of Substrate Noise Impact on Analog / RF Circuits Including Interconnect Resistance: This paper reports a novel simulation methodology for analysis and prediction of substrate noise impact on analog / RF circuits taking into account the role of the parasitic resistance of the on-chip interconnect in the impact mechanism. This methodology allows investigation of the role of the separate devices (also parasitic devices) in the analog / RF circuit in the overall impact. This way is revealed which devices have to be taken care of (shielding, topology change) to protect the circuit against substrate noise. The developed methodology is used to analyze impact of substrate noise on a 3 GHz LC-tank Voltage Controlled Oscillator (VCO) designed in a high-ohmic 0.18 $\mu$m 1PM6 CMOS technology. For this VCO (in the investigated frequency range from DC to 15 MHz) impact is mainly caused by resistive coupling of noise from the substrate to the non-ideal on-chip ground interconnect, resulting in analog ground bounce and frequency modulation. Hence, the presented test-case reveals the important role of the on-chip interconnect in the phenomenon of substrate noise impact.<|reference_end|> | arxiv | @article{soens2007simulation,
title={Simulation Methodology for Analysis of Substrate Noise Impact on Analog
/ RF Circuits Including Interconnect Resistance},
author={C. Soens, G. Van Der Plas, P. Wambacq, S. Donnay},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4723},
primaryClass={cs.PF}
} | soens2007simulation |
arxiv-1559 | 0710.4724 | Systematic Figure of Merit Computation for the Design of Pipeline ADC | <|reference_start|>Systematic Figure of Merit Computation for the Design of Pipeline ADC: The emerging concept of SoC-AMS leads to research new top-down methodologies to aid systems designers in sizing analog and mixed devices. This work applies this idea to the high-level optimization of pipeline ADC. Considering a given technology, it consists in comparing different configurations according to their imperfections and their architectures without FFT computation or time-consuming simulations. The final selection is based on a figure of merit.<|reference_end|> | arxiv | @article{barrandon2007systematic,
title={Systematic Figure of Merit Computation for the Design of Pipeline ADC},
author={L. Barrandon (IETR), S. Crand (IETR), D. Houzet (IETR)},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4724},
primaryClass={cs.AR}
} | barrandon2007systematic |
arxiv-1560 | 0710.4725 | Fault-Trajectory Approach for Fault Diagnosis on Analog Circuits | <|reference_start|>Fault-Trajectory Approach for Fault Diagnosis on Analog Circuits: This issue discusses the fault-trajectory approach suitability for fault diagnosis on analog networks. Recent works have shown promising results concerning a method based on this concept for ATPG for diagnosing faults on analog networks. Such method relies on evolutionary techniques, where a generic algorithm (GA) is coded to generate a set of optimum frequencies capable to disclose faults.<|reference_end|> | arxiv | @article{savioli2007fault-trajectory,
title={Fault-Trajectory Approach for Fault Diagnosis on Analog Circuits},
author={Carlos Eduardo Savioli, Claudio C. Czendrodi, Jose Vicente Calvano,
Antonio Carneiro De Mesquita Filho},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4725},
primaryClass={cs.NE}
} | savioli2007fault-trajectory |
arxiv-1561 | 0710.4727 | Top-Down Design of a Low-Power Multi-Channel 25-Gbit/s/Channel Gated Oscillator Clock-Recovery Circuit | <|reference_start|>Top-Down Design of a Low-Power Multi-Channel 25-Gbit/s/Channel Gated Oscillator Clock-Recovery Circuit: We present a complete top-down design of a low-power multi-channel clock recovery circuit based on gated current-controlled oscillators. The flow includes several tools and methods used to specify block constraints, to design and verify the topology down to the transistor level, as well as to achieve a power consumption as low as 5mW/Gbit/s. Statistical simulation is used to estimate the achievable bit error rate in presence of phase and frequency errors and to prove the feasibility of the concept. VHDL modeling provides extensive verification of the topology. Thermal noise modeling based on well-known concepts delivers design parameters for the device sizing and biasing. We present two practical examples of possible design improvements analyzed and implemented with this methodology.<|reference_end|> | arxiv | @article{muller2007top-down,
title={Top-Down Design of a Low-Power Multi-Channel 2.5-Gbit/s/Channel Gated
Oscillator Clock-Recovery Circuit},
author={Paul Muller, Armin Tajalli, Mojtaba Atarodi, Yusuf Leblebici},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4727},
primaryClass={cs.AR}
} | muller2007top-down |
arxiv-1562 | 0710.4728 | Energy-Aware Routing for E-Textile Applications | <|reference_start|>Energy-Aware Routing for E-Textile Applications: As the scale of electronic devices shrinks, "electronic textiles" (e-textiles) will make possible a wide variety of novel applications which are currently unfeasible. Due to the wearability concerns, low-power techniques are critical for e-textile applications. In this paper, we address the issue of the energy-aware routing for e-textile platforms and propose an efficient algorithm to solve it. The platform we consider consists of dedicated components for e-textiles, including computational modules, dedicated transmission lines and thin-film batteries on fiber substrates. Furthermore, we derive an analytical upper bound for the achievable number of jobs completed over all possible routing strategies. From a practical standpoint, for the Advanced Encryption Standard (AES) cipher, the routing technique we propose achieves about fifty percent of this analytical upper bound. Moreover, compared to the non-energy-aware counterpart, our routing technique increases the number of encryption jobs completed by one order of magnitude.<|reference_end|> | arxiv | @article{kao2007energy-aware,
title={Energy-Aware Routing for E-Textile Applications},
author={Jung-Chun Kao, Radu Marculescu},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4728},
primaryClass={cs.AR}
} | kao2007energy-aware |
arxiv-1563 | 0710.4729 | Modeling and Analysis of Loading Effect in Leakage of Nano-Scaled Bulk-CMOS Logic Circuits | <|reference_start|>Modeling and Analysis of Loading Effect in Leakage of Nano-Scaled Bulk-CMOS Logic Circuits: In nanometer scaled CMOS devices significant increase in the subthreshold, the gate and the reverse biased junction band-to-band-tunneling (BTBT) leakage, results in the large increase of total leakage power in a logic circuit. Leakage components interact with each other in device level (through device geometry, doping profile) and also in the circuit level (through node voltages). Due to the circuit level interaction of the different leakage components, the leakage of a logic gate strongly depends on the circuit topology i.e. number and nature of the other logic gates connected to its input and output. In this paper, for the first time, we have analyzed loading effect on leakage and proposed a method to accurately estimate the total leakage in a logic circuit, from its logic level description considering the impact of loading and transistor stacking.<|reference_end|> | arxiv | @article{mukhopadhyay2007modeling,
title={Modeling and Analysis of Loading Effect in Leakage of Nano-Scaled
Bulk-CMOS Logic Circuits},
author={Saibal Mukhopadhyay, Swarup Bhunia, Kaushik Roy},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4729},
primaryClass={cs.AR}
} | mukhopadhyay2007modeling |
arxiv-1564 | 0710.4731 | Leakage-Aware Interconnect for On-Chip Network | <|reference_start|>Leakage-Aware Interconnect for On-Chip Network: On-chip networks have been proposed as the interconnect fabric for future systems-on-chip and multi-processors on chip. Power is one of the main constraints of these systems and interconnect consumes a significant portion of the power budget. In this paper, we propose four leakage-aware interconnect schemes. Our schemes achieve 10.13%~63.57% active leakage savings and 12.35%~95.96% standby leakage savings across schemes while the delay penalty ranges from 0% to 4.69%.<|reference_end|> | arxiv | @article{tsai2007leakage-aware,
title={Leakage-Aware Interconnect for On-Chip Network},
author={Yuh-Fang Tsai, Vijaykrishnan Narayaynan, Yuan Xie, Mary Jane Irwin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4731},
primaryClass={cs.AR}
} | tsai2007leakage-aware |
arxiv-1565 | 0710.4732 | Energy Efficiency of the IEEE 802154 Standard in Dense Wireless Microsensor Networks: Modeling and Improvement Perspectives | <|reference_start|>Energy Efficiency of the IEEE 802154 Standard in Dense Wireless Microsensor Networks: Modeling and Improvement Perspectives: Wireless microsensor networks, which have been the topic of intensive research in recent years, are now emerging in industrial applications. An important milestone in this transition has been the release of the IEEE 802.15.4 standard that specifies interoperable wireless physical and medium access control layers targeted to sensor node radios. In this paper, we evaluate the potential of an 802.15.4 radio for use in an ultra low power sensor node operating in a dense network. Starting from measurements carried out on the off-the-shelf radio, effective radio activation and link adaptation policies are derived. It is shown that, in a typical sensor network scenario, the average power per node can be reduced down to 211m mm mW. Next, the energy consumption breakdown between the different phases of a packet transmission is presented, indicating which part of the transceiver architecture can most effectively be optimized in order to further reduce the radio power, enabling self-powered wireless microsensor networks.<|reference_end|> | arxiv | @article{bougard2007energy,
title={Energy Efficiency of the IEEE 802.15.4 Standard in Dense Wireless
Microsensor Networks: Modeling and Improvement Perspectives},
author={Bruno Bougard, Francky Catthoor, Denis C. Daly, Anantha Chandrakasan,
Wim Dehaene},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4732},
primaryClass={cs.NI}
} | bougard2007energy |
arxiv-1566 | 0710.4733 | Smart Temperature Sensor for Thermal Testing of Cell-Based ICs | <|reference_start|>Smart Temperature Sensor for Thermal Testing of Cell-Based ICs: In this paper we present a simple and efficient built-in temperature sensor for thermal monitoring of standard-cell based VLSI circuits. The proposed smart temperature sensor uses a ring-oscillator composed of complex gates instead of inverters to optimize their linearity. Simulation results from a 0.18$\mu$m CMOS technology show that the non-linearity error of the sensor can be reduced when an adequate set of standard logic gates is selected.<|reference_end|> | arxiv | @article{bota2007smart,
title={Smart Temperature Sensor for Thermal Testing of Cell-Based ICs},
author={S. A. Bota, M. Rosales, J. L. Rossello, J. Segura},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4733},
primaryClass={cs.AR}
} | bota2007smart |
arxiv-1567 | 0710.4734 | Computational Intelligence Characterization Method of Semiconductor Device | <|reference_start|>Computational Intelligence Characterization Method of Semiconductor Device: Characterization of semiconductor devices is used to gather as much data about the device as possible to determine weaknesses in design or trends in the manufacturing process. In this paper, we propose a novel multiple trip point characterization concept to overcome the constraint of single trip point concept in device characterization phase. In addition, we use computational intelligence techniques (e.g. neural network, fuzzy and genetic algorithm) to further manipulate these sets of multiple trip point values and tests based on semiconductor test equipments, Our experimental results demonstrate an excellent design parameter variation analysis in device characterization phase, as well as detection of a set of worst case tests that can provoke the worst case variation, while traditional approach was not capable of detecting them.<|reference_end|> | arxiv | @article{liau2007computational,
title={Computational Intelligence Characterization Method of Semiconductor
Device},
author={Eric Liau, Doris Schmitt-Landsiedel},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4734},
primaryClass={cs.AI cs.NE}
} | liau2007computational |
arxiv-1568 | 0710.4735 | Worst-Case and Average-Case Analysis of n-Detection Test Sets | <|reference_start|>Worst-Case and Average-Case Analysis of n-Detection Test Sets: Test sets that detect each target fault n times (n-detection test sets) are typically generated for restricted values of n due to the increase in test set size with n. We perform both a worst-case analysis and an average-case analysis to check the effect of restricting n on the unmodeled fault coverage of an (arbitrary) n-detection test set. Our analysis is independent of any particular test set or test generation approach. It is based on a specific set of target faults and a specific set of untargeted faults. It shows that, depending on the circuit, very large values of n may be needed to guarantee the detection of all the untargeted faults. We discuss the implications of these results.<|reference_end|> | arxiv | @article{pomeranz2007worst-case,
title={Worst-Case and Average-Case Analysis of n-Detection Test Sets},
author={Irith Pomeranz, Sudhakar M. Reddy},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4735},
primaryClass={cs.AR}
} | pomeranz2007worst-case |
arxiv-1569 | 0710.4736 | A New Embedded Measurement Structure for eDRAM Capacitor | <|reference_start|>A New Embedded Measurement Structure for eDRAM Capacitor: The embedded DRAM (eDRAM) is more and more used in System On Chip (SOC). The integration of the DRAM capacitor process into a logic process is challenging to get satisfactory yields. The specific process of DRAM capacitor and the low capacitance value (~30F) of this device induce problems of process monitoring and failure analysis. We propose a new test structure to measure the capacitance value of each DRAM cell capacitor in a DRAM array. This concept has been validated by simulation on a 0.18$\mu$m eDRAM technology.<|reference_end|> | arxiv | @article{lopez2007a,
title={A New Embedded Measurement Structure for eDRAM Capacitor},
author={L. Lopez (L2MP), J. M. Portal (L2MP), D. Nee (ST-Rousset)},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4736},
primaryClass={cs.AR}
} | lopez2007a |
arxiv-1570 | 0710.4737 | Efficient Feasibility Analysis for Real-Time Systems with EDF Scheduling | <|reference_start|>Efficient Feasibility Analysis for Real-Time Systems with EDF Scheduling: This paper presents new fast exact feasibility tests for uniprocessor real-time systems using preemptive EDF scheduling. Task sets which are accepted by previously described sufficient tests will be evaluated in nearly the same time as with the old tests by the new algorithms. Many task sets are not accepted by the earlier tests despite them beeing feasible. These task sets will be evaluated by the new algorithms a lot faster than with known exact feasibility tests. Therefore it is possible to use them for many applications for which only sufficient test are suitable. Additionally this paper shows that the best previous known sufficient test, the best known feasibility bound and the best known approximation algorithm can be derived from these new tests. In result this leads to an integrated schedulability theory for EDF.<|reference_end|> | arxiv | @article{albers2007efficient,
title={Efficient Feasibility Analysis for Real-Time Systems with EDF Scheduling},
author={Karsten Albers, Frank Slomka},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4737},
primaryClass={cs.OH}
} | albers2007efficient |
arxiv-1571 | 0710.4738 | Exploring NoC Mapping Strategies: An Energy and Timing Aware Technique | <|reference_start|>Exploring NoC Mapping Strategies: An Energy and Timing Aware Technique: Complex applications implemented as Systems on Chip (SoCs) demand extensive use of system level modeling and validation. Their implementation gathers a large number of complex IP cores and advanced interconnection schemes, such as hierarchical bus architectures or networks on chip (NoCs). Modeling applications involves capturing its computation and communication characteristics. Previously proposed communication weighted models (CWM) consider only the application communication aspects. This work proposes a communication dependence and computation model (CDCM) that can simultaneously consider both aspects of an application. It presents a solution to the problem of mapping applications on regular NoCs while considering execution time and energy consumption. The use of CDCM is shown to provide estimated average reductions of 40% in execution time, and 20% in energy consumption, for current technologies.<|reference_end|> | arxiv | @article{marcon2007exploring,
title={Exploring NoC Mapping Strategies: An Energy and Timing Aware Technique},
author={Cesar Marcon, Ney Calazans, Fernando Moraes, Altamiro Susin, Igor
Reis, Fabiano Hessel},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4738},
primaryClass={cs.AR}
} | marcon2007exploring |
arxiv-1572 | 0710.4739 | Q-DPM: An Efficient Model-Free Dynamic Power Management Technique | <|reference_start|>Q-DPM: An Efficient Model-Free Dynamic Power Management Technique: When applying Dynamic Power Management (DPM) technique to pervasively deployed embedded systems, the technique needs to be very efficient so that it is feasible to implement the technique on low end processor and tight-budget memory. Furthermore, it should have the capability to track time varying behavior rapidly because the time varying is an inherent characteristic of real world system. Existing methods, which are usually model-based, may not satisfy the aforementioned requirements. In this paper, we propose a model-free DPM technique based on Q-Learning. Q-DPM is much more efficient because it removes the overhead of parameter estimator and mode-switch controller. Furthermore, its policy optimization is performed via consecutive online trialing, which also leads to very rapid response to time varying behavior.<|reference_end|> | arxiv | @article{li2007q-dpm:,
title={Q-DPM: An Efficient Model-Free Dynamic Power Management Technique},
author={Min Li, Xiaobo Wu, Richard Yao, Xiaolang Yan},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4739},
primaryClass={cs.OH}
} | li2007q-dpm: |
arxiv-1573 | 0710.4740 | A New Approach to Component Testing | <|reference_start|>A New Approach to Component Testing: Carefully tested electric/electronic components are a requirement for effective hardware-in-the-loop tests and vehicle tests in automotive industry. A new method for definition and execution of component tests is described. The most important advantage of this method is independance from the test stand. It therefore offers the oppportunity to build up knowledge over a long period of time and the ability to share this knowledge with different partners.<|reference_end|> | arxiv | @article{brinkmeyer2007a,
title={A New Approach to Component Testing},
author={Horst Brinkmeyer},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4740},
primaryClass={cs.OH}
} | brinkmeyer2007a |
arxiv-1574 | 0710.4742 | Hardware Accelerated Power Estimation | <|reference_start|>Hardware Accelerated Power Estimation: In this paper, we present power emulation, a novel design paradigm that utilizes hardware acceleration for the purpose of fast power estimation. Power emulation is based on the observation that the functions necessary for power estimation (power model evaluation, aggregation, etc.) can be implemented as hardware circuits. Therefore, we can enhance any given design with "power estimation hardware", map it to a prototyping platform, and exercise it with any given test stimuli to obtain power consumption estimates. Our empirical studies with industrial designs reveal that power emulation can achieve significant speedups (10X to 500X) over state-of-the-art commercial register-transfer level (RTL) power estimation tools.<|reference_end|> | arxiv | @article{coburn2007hardware,
title={Hardware Accelerated Power Estimation},
author={Joel Coburn, Srivaths Ravi, Anand Raghunathan},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4742},
primaryClass={cs.AR}
} | coburn2007hardware |
arxiv-1575 | 0710.4743 | Efficient Solution of Language Equations Using Partitioned Representations | <|reference_start|>Efficient Solution of Language Equations Using Partitioned Representations: A class of discrete event synthesis problems can be reduced to solving language equations f . X ⊆ S, where F is the fixed component and S the specification. Sequential synthesis deals with FSMs when the automata for F and S are prefix closed, and are naturally represented by multi-level networks with latches. For this special case, we present an efficient computation, using partitioned representations, of the most general prefix-closed solution of the above class of language equations. The transition and the output relations of the FSMs for F and S in their partitioned form are represented by the sets of output and next state functions of the corresponding networks. Experimentally, we show that using partitioned representations is much faster than using monolithic representations, as well as applicable to larger problem instances.<|reference_end|> | arxiv | @article{mishchenko2007efficient,
title={Efficient Solution of Language Equations Using Partitioned
Representations},
author={Alan Mishchenko, Robert Brayton, Roland Jiang, Tiziano Villa, Nina
Yevtushenko},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4743},
primaryClass={cs.LO}
} | mishchenko2007efficient |
arxiv-1576 | 0710.4745 | Embedded Automotive System Development Process | <|reference_start|>Embedded Automotive System Development Process: Model based design enables the automatic generation of final-build software from models for high-volume automotive embedded systems. This paper presents a framework of processes, methods and tools for the design of automotive embedded systems. A steer-by-wire system serves as an example.<|reference_end|> | arxiv | @article{langenwalter2007embedded,
title={Embedded Automotive System Development Process},
author={Joachim Langenwalter},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4745},
primaryClass={cs.OH}
} | langenwalter2007embedded |
arxiv-1577 | 0710.4746 | RTK-Spec TRON: A Simulation Model of an ITRON Based RTOS Kernel in SystemC | <|reference_start|>RTK-Spec TRON: A Simulation Model of an ITRON Based RTOS Kernel in SystemC: This paper presents the methodology and the modeling constructs we have developed to capture the real time aspects of RTOS simulation models in a System Level Design Language (SLDL) like SystemC. We describe these constructs and show how they are used to build a simulation model of an RTOS kernel targeting the $\mu$-ITRON OS specification standard.<|reference_end|> | arxiv | @article{hassan2007rtk-spec,
title={RTK-Spec TRON: A Simulation Model of an ITRON Based RTOS Kernel in
SystemC},
author={M. Abdelsalam Hassan, Keishi Sakanushi, Yoshinori Takeuchi, Masaharu
Imai},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4746},
primaryClass={cs.OS}
} | hassan2007rtk-spec |
arxiv-1578 | 0710.4747 | An Efficient Transparent Test Scheme for Embedded Word-Oriented Memories | <|reference_start|>An Efficient Transparent Test Scheme for Embedded Word-Oriented Memories: Memory cores are usually the densest portion with the smallest feature size in system-on-chip (SOC) designs. The reliability of memory cores thus has heavy impact on the reliability of SOCs. Transparent test is one of useful technique for improving the reliability of memories during life time. This paper presents a systematic algorithm used for transforming a bit-oriented march test into a transparent word-oriented march test. The transformed transparent march test has shorter test complexity compared with that proposed in the previous works [Theory of transparent BIST for RAMs, A transparent online memory test for simultaneous detection of functional faults and soft errors in memories]. For example, if a memory with 32-bit words is tested with March C-, time complexity of the transparent word-oriented test transformed by the proposed scheme is only about 56% or 19% time complexity of the transparent word-oriented test converted by the scheme reported in [Theory of transparent BIST for RAMs] or [A transparent online memory test for simultaneous detection of functional faults and soft errors in memories], respectively.<|reference_end|> | arxiv | @article{li2007an,
title={An Efficient Transparent Test Scheme for Embedded Word-Oriented Memories},
author={Jin-Fu Li, Tsu-Wei Tseng, Chin-Long Wey},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4747},
primaryClass={cs.AR}
} | li2007an |
arxiv-1579 | 0710.4748 | Systematic Transaction Level Modeling of Embedded Systems with SystemC | <|reference_start|>Systematic Transaction Level Modeling of Embedded Systems with SystemC: This paper gives an overview of a transaction level modeling (TLM) design flow for straightforward embedded system design with SystemC. The goal is to systematically develop both application-specific HW and SW components of an embedded system using the TLM approach, thus allowing for fast communication architecture exploration, rapid prototyping and early embedded SW development. To this end, we specify the lightweight transaction-based communication protocol SHIP and present a methodology for automatic mapping of the communication part of a system to a given architecture, including HW/SW interfaces.<|reference_end|> | arxiv | @article{klingauf2007systematic,
title={Systematic Transaction Level Modeling of Embedded Systems with SystemC},
author={Wolfgang Klingauf},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4748},
primaryClass={cs.AR}
} | klingauf2007systematic |
arxiv-1580 | 0710.4750 | On the Analysis of Reed Solomon Coding for Resilience to Transient/Permanent Faults in Highly Reliable Memories | <|reference_start|>On the Analysis of Reed Solomon Coding for Resilience to Transient/Permanent Faults in Highly Reliable Memories: Single Event Upsets (SEU) as well as permanent faults can significantly affect the correct on-line operation of digital systems, such as memories and microprocessors; a memory can be made resilient to permanent and transient faults by using modular redundancy and coding. In this paper, different memory systems are compared: these systems utilize simplex and duplex arrangements with a combination of Reed Solomon coding and scrubbing. The memory systems and their operations are analyzed by novel Markov chains to characterize performance for dynamic reconfiguration as well as error detection and correction under the occurrence of permanent and transient faults. For a specific Reed Solomon code, the duplex arrangement allows to efficiently cope with the occurrence of permanent faults, while the use of scrubbing allows to cope with transient faults.<|reference_end|> | arxiv | @article{schiano2007on,
title={On the Analysis of Reed Solomon Coding for Resilience to
Transient/Permanent Faults in Highly Reliable Memories},
author={L. Schiano, M. Ottavi, F. Lombardi, S. Pontarelli, A. Salsano},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4750},
primaryClass={cs.IT math.IT}
} | schiano2007on |
arxiv-1581 | 0710.4751 | Influence of Memory Hierarchies on Predictability for Time Constrained Embedded Software | <|reference_start|>Influence of Memory Hierarchies on Predictability for Time Constrained Embedded Software: Safety-critical embedded systems having to meet real-time constraints are expected to be highly predictable in order to guarantee at design time that certain timing deadlines will always be met. This requirement usually prevents designers from utilizing caches due to their highly dynamic, thus hardly predictable behavior. The integration of scratchpad memories represents an alternative approach which allows the system to benefit from a performance gain comparable to that of caches while at the same time maintaining predictability. In this work, we compare the impact of scratchpad memories and caches on worst case execution time (WCET) analysis results. We show that caches, despite requiring complex techniques, can have a negative impact on the predicted WCET, while the estimated WCET for scratchpad memories scales with the achieved Performance gain at no extra analysis cost.<|reference_end|> | arxiv | @article{wehmeyer2007influence,
title={Influence of Memory Hierarchies on Predictability for Time Constrained
Embedded Software},
author={Lars Wehmeyer, Peter Marwedel},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4751},
primaryClass={cs.AR}
} | wehmeyer2007influence |
arxiv-1582 | 0710.4752 | An Iterative Algorithm for Battery-Aware Task Scheduling on Portable Computing Platforms | <|reference_start|>An Iterative Algorithm for Battery-Aware Task Scheduling on Portable Computing Platforms: In this work we consider battery powered portable systems which either have Field Programmable Gate Arrays (FPGA) or voltage and frequency scalable processors as their main processing element. An application is modeled in the form of a precedence task graph at a coarse level of granularity. We assume that for each task in the task graph several unique design-points are available which correspond to different hardware implementations for FPGAs and different voltage-frequency combinations for processors. It is assumed that performance and total power consumption estimates for each design-point are available for any given portable platfrom, including the peripheral components such as memory and display power usage. We present an iterative heuristic algorithm which finds a sequence of tasks along with an appropriate design-point for each task, such that a deadline is met and the amount of battery energy used is as small as possible. A detailed illustrative example along with a case study of a real-world application of a robotic arm controller which demonstrates the usefulness of our algorithm is also presented.<|reference_end|> | arxiv | @article{khan2007an,
title={An Iterative Algorithm for Battery-Aware Task Scheduling on Portable
Computing Platforms},
author={Jawad Khan, Ranga Vemuri},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4752},
primaryClass={cs.OH}
} | khan2007an |
arxiv-1583 | 0710.4753 | Verifying Safety-Critical Timing and Memory-Usage Properties of Embedded Software by Abstract Interpretation | <|reference_start|>Verifying Safety-Critical Timing and Memory-Usage Properties of Embedded Software by Abstract Interpretation: Static program analysis by abstract interpretation is an efficient method to determine properties of embedded software. One example is value analysis, which determines the values stored in the processor registers. Its results are used as input to more advanced analyses, which ultimately yield information about the stack usage and the timing behavior of embedded software.<|reference_end|> | arxiv | @article{heckmann2007verifying,
title={Verifying Safety-Critical Timing and Memory-Usage Properties of Embedded
Software by Abstract Interpretation},
author={Reinhold Heckmann, Christian Ferdinand},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4753},
primaryClass={cs.LO}
} | heckmann2007verifying |
arxiv-1584 | 0710.4754 | Design of a Virtual Component Neutral Network-on-Chip Transaction Layer | <|reference_start|>Design of a Virtual Component Neutral Network-on-Chip Transaction Layer: Research studies have demonstrated the feasibility and advantages of Network-on-Chip (NoC) over traditional bus-based architectures but have not focused on compatibility communication standards. This paper describes a number of issues faced when designing a VC-neutral NoC, i.e. compatible with standards such as AHB 2.0, AXI, VCI, OCP, and various other proprietary protocols, and how a layered approach to communication helps solve these issues.<|reference_end|> | arxiv | @article{martin2007design,
title={Design of a Virtual Component Neutral Network-on-Chip Transaction Layer},
author={Philippe Martin},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4754},
primaryClass={cs.AR}
} | martin2007design |
arxiv-1585 | 0710.4755 | Model Reuse through Hardware Design Patterns | <|reference_start|>Model Reuse through Hardware Design Patterns: Increasing reuse opportunities is a well-known problem for software designers as well as for hardware designers. Nonetheless, current software and hardware engineering practices have embraced different approaches to this problem. Software designs are usually modelled after a set of proven solutions to recurrent problems called design patterns. This approach differs from the component-based reuse usually found in hardware designs: design patterns do not specify unnecessary implementation details. Several authors have already proposed translating structural design patterns concepts to hardware design. In this paper we extend the discussion to behavioural design patterns. Specifically, we describe how the hardware version of the Iterator can be used to enhance model reuse.<|reference_end|> | arxiv | @article{rincon2007model,
title={Model Reuse through Hardware Design Patterns},
author={Fernando Rincon, Francisco Moya, Jesus Barba, Juan Carlos Lopez},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4755},
primaryClass={cs.SE}
} | rincon2007model |
arxiv-1586 | 0710.4756 | Design Method for Constant Power Consumption of Differential Logic Circuits | <|reference_start|>Design Method for Constant Power Consumption of Differential Logic Circuits: Side channel attacks are a major security concern for smart cards and other embedded devices. They analyze the variations on the power consumption to find the secret key of the encryption algorithm implemented within the security IC. To address this issue, logic gates that have a constant power dissipation independent of the input signals, are used in security ICs. This paper presents a design methodology to create fully connected differential pull down networks. Fully connected differential pull down networks are transistor networks that for any complementary input combination connect all the internal nodes of the network to one of the external nodes of the network. They are memoryless and for that reason have a constant load capacitance and power consumption. This type of networks is used in specialized logic gates to guarantee a constant contribution of the internal nodes into the total power consumption of the logic gate.<|reference_end|> | arxiv | @article{tiri2007design,
title={Design Method for Constant Power Consumption of Differential Logic
Circuits},
author={Kris Tiri, Ingrid Verbauwhede},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4756},
primaryClass={cs.CR}
} | tiri2007design |
arxiv-1587 | 0710.4757 | Techniques for Fast Transient Fault Grading Based on Autonomous Emulation | <|reference_start|>Techniques for Fast Transient Fault Grading Based on Autonomous Emulation: Very deep submicron and nanometer technologies have increased notably integrated circuit (IC) sensitiveness to radiation. Soft errors are currently appearing into ICs working at earth surface. Hardened circuits are currently required in many applications where Fault Tolerance (FT) was not a requirement in the very near past. The use of platform FPGAs for the emulation of single-event upset effects (SEU) is gaining attention in order to speed up the FT evaluation. In this work, a new emulation system for FT evaluation with respect to SEU effects is proposed, providing shorter evaluation times by performing all the evaluation process in the FPGA and avoiding emulator-host communication bottlenecks.<|reference_end|> | arxiv | @article{lopez-ongil2007techniques,
title={Techniques for Fast Transient Fault Grading Based on Autonomous
Emulation},
author={Celia Lopez-Ongil, Mario Garcia-Valderas, Marta Portela-Garcia, Luis
Entrena-Arrontes},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4757},
primaryClass={cs.AR}
} | lopez-ongil2007techniques |
arxiv-1588 | 0710.4758 | Exploiting Dynamic Workload Variation in Low Energy Preemptive Task Scheduling | <|reference_start|>Exploiting Dynamic Workload Variation in Low Energy Preemptive Task Scheduling: A novel energy reduction strategy to maximally exploit the dynamic workload variation is proposed for the offline voltage scheduling of preemptive systems. The idea is to construct a fully-preemptive schedule that leads to minimum energy consumption when the tasks take on approximately the average execution cycles yet still guarantees no deadline violation during the worst-case scenario. End-time for each sub-instance of the tasks obtained from the schedule is used for the on-line dynamic voltage scaling (DVS) of the tasks. For the tasks that normally require a small number of cycles but occasionally a large number of cycles to complete, such a schedule provides more opportunities for slack utilization and hence results in larger energy saving. The concept is realized by formulating the problem as a Non-Linear Programming (NLP) optimization problem. Experimental results show that, by using the proposed scheme, the total energy consumption at runtime is reduced by as high as 60% for randomly generated task sets when comparing with the static scheduling approach only using worst case workload.<|reference_end|> | arxiv | @article{leung2007exploiting,
title={Exploiting Dynamic Workload Variation in Low Energy Preemptive Task
Scheduling},
author={Lap-Fai Leung, Chi-Ying Tsui, Xiaobo Sharon Hu},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4758},
primaryClass={cs.OH}
} | leung2007exploiting |
arxiv-1589 | 0710.4759 | A Fast Concurrent Power-Thermal Model for Sub-100nm Digital ICs | <|reference_start|>A Fast Concurrent Power-Thermal Model for Sub-100nm Digital ICs: As technology scales down, the static power is expected to become a significant fraction of the total power. The exponential dependence of static power with the operating temperature makes the thermal profile estimation of high-performance ICs a key issue to compute the total power dissipated in next-generations. In this paper we present accurate and compact analytical models to estimate the static power dissipation and the temperature of operation of CMOS gates. The models are the fundamentals of a performance estimation tool in which numerical procedures are avoided for any computation to set a faster estimation and optimization. The models developed are compared to measurements and SPICE simulations for a 0.12mm technology showing excellent results.<|reference_end|> | arxiv | @article{rossello2007a,
title={A Fast Concurrent Power-Thermal Model for Sub-100nm Digital ICs},
author={J. L. Rossello, V. Canals, S. A. Bota, A. Keshavarzi, J. Segura},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4759},
primaryClass={cs.AR}
} | rossello2007a |
arxiv-1590 | 0710.4760 | Low Power Oriented CMOS Circuit Optimization Protocol | <|reference_start|>Low Power Oriented CMOS Circuit Optimization Protocol: Low power oriented circuit optimization consists in selecting the best alternative between gate sizing, buffer insertion and logic structure transformation, for satisfying a delay constraint at minimum area cost. In this paper we used a closed form model of delay in CMOS structures to define metrics for a deterministic selection of the optimization alternative. The target is delay constraint satisfaction with minimum area cost. We validate the design space exploration method, defining maximum and minimum delay bounds on logical paths. Then we adapt this method to a "constant sensitivity method" allowing to size a circuit at minimum area under a delay constraint. An optimisation protocol is finally defined to manage the trade-off performance constraint - circuit structure. These methods are implemented in an optimization tool (POPS) and validated by comparing on a 0.25$\mu$m process, the optimization efficiency obtained on various benchmarks (ISCAS?85) to that resulting from an industrial tool.<|reference_end|> | arxiv | @article{verle2007low,
title={Low Power Oriented CMOS Circuit Optimization Protocol},
author={A. Verle (LIRMM), X. Michel (LIRMM), N. Azemard (LIRMM), P. Maurine
(LIRMM), D. Auvergne (LIRMM)},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4760},
primaryClass={cs.AR}
} | verle2007low |
arxiv-1591 | 0710.4761 | Low-Cost Multi-Gigahertz Test Systems Using CMOS FPGAs and PECL | <|reference_start|>Low-Cost Multi-Gigahertz Test Systems Using CMOS FPGAs and PECL: This paper describes two research projects that develop new low-cost techniques for testing devices with multiple high-speed (2 to 5 Gbps) signals. Each project uses commercially available components to keep costs low, yet achieves performance characteristics comparable to (and in some ways exceeding) more expensive ATE. A common CMOS FPGA-based logic core provides flexibility, adaptability, and communication with controlling computers while customized positive emitter-coupled logic (PECL) achieves multi-gigahertz data rates with about $\pm$25ps timing accuracy.<|reference_end|> | arxiv | @article{keezer2007low-cost,
title={Low-Cost Multi-Gigahertz Test Systems Using CMOS FPGAs and PECL},
author={D. C. Keezer, C. Gray, A. Majid, N. Taher},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4761},
primaryClass={cs.AR}
} | keezer2007low-cost |
arxiv-1592 | 0710.4762 | Area-Efficient Selective Multi-Threshold CMOS Design Methodology for Standby Leakage Power Reduction | <|reference_start|>Area-Efficient Selective Multi-Threshold CMOS Design Methodology for Standby Leakage Power Reduction: This paper presents a design flow for an improved selective multi-threshold(Selective-MT) circuit. The Selective-MT circuit is improved so that plural MT-cells can share one switch transistor. We propose the design methodology from RTL(Register Transfer Level) to final layout with optimizing switch transistor structure.<|reference_end|> | arxiv | @article{kitahara2007area-efficient,
title={Area-Efficient Selective Multi-Threshold CMOS Design Methodology for
Standby Leakage Power Reduction},
author={Takeshi Kitahara, Naoyuki Kawabe, Fimihiro Minami, Katsuhiro Seta,
Toshiyuki Furusawa},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4762},
primaryClass={cs.AR}
} | kitahara2007area-efficient |
arxiv-1593 | 0710.4763 | Logic Design for On-Chip Test Clock Generation - Implementation Details and Impact on Delay Test Quality | <|reference_start|>Logic Design for On-Chip Test Clock Generation - Implementation Details and Impact on Delay Test Quality: This paper addresses delay test for SOC devices with high frequency clock domains. A logic design for on-chip high-speed clock generation, implemented to avoid expensive test equipment, is described in detail. Techniques for on-chip clock generation, meant to reduce test vector count and to increase test quality, are discussed. ATPG results for the proposed techniques are given.<|reference_end|> | arxiv | @article{beck2007logic,
title={Logic Design for On-Chip Test Clock Generation - Implementation Details
and Impact on Delay Test Quality},
author={Matthias Beck, Olivier Barondeau, Martin Kaibel, Frank Poehl, Xijiang
Lin, Ron Press},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4763},
primaryClass={cs.AR}
} | beck2007logic |
arxiv-1594 | 0710.4764 | Hotspot Prevention Through Runtime Reconfiguration in Network-On-Chip | <|reference_start|>Hotspot Prevention Through Runtime Reconfiguration in Network-On-Chip: Many existing thermal management techniques focus on reducing the overall power consumption of the chip, and do not address location-specific temperature problems referred to as hotspots. We propose the use of dynamic runtime reconfiguration to shift the hotspot-inducing computation periodically and make the thermal profile more uniform. Our analysis shows that dynamic reconfiguration is an effective technique in reducing hotspots for NoCs.<|reference_end|> | arxiv | @article{link2007hotspot,
title={Hotspot Prevention Through Runtime Reconfiguration in Network-On-Chip},
author={G. M. Link, N. Vijaykrishnan},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4764},
primaryClass={cs.AR}
} | link2007hotspot |
arxiv-1595 | 0710.4780 | Querying XML Documents in Logic Programming | <|reference_start|>Querying XML Documents in Logic Programming: Extensible Markup Language (XML) is a simple, very flexible text format derived from SGML. Originally designed to meet the challenges of large-scale electronic publishing, XML is also playing an increasingly important role in the exchange of a wide variety of data on the Web and elsewhere. XPath language is the result of an effort to provide address parts of an XML document. In support of this primary purpose, it becomes in a query language against an XML document. In this paper we present a proposal for the implementation of the XPath language in logic programming. With this aim we will describe the representation of XML documents by means of a logic program. Rules and facts can be used for representing the document schema and the XML document itself. In particular, we will present how to index XML documents in logic programs: rules are supposed to be stored in main memory, however facts are stored in secondary memory by using two kind of indexes: one for each XML tag, and other for each group of terminal items. In addition, we will study how to query by means of the XPath language against a logic program representing an XML document. It evolves the specialization of the logic program with regard to the XPath expression. Finally, we will also explain how to combine the indexing and the top-down evaluation of the logic program. To appear in Theory and Practice of Logic Programming (TPLP)"<|reference_end|> | arxiv | @article{almendros-jiménez2007querying,
title={Querying XML Documents in Logic Programming},
author={J. M. Almendros-Jim'enez and A. Becerra-Ter'on and F. J.
Enciso-Ba~nos},
journal={arXiv preprint arXiv:0710.4780},
year={2007},
archivePrefix={arXiv},
eprint={0710.4780},
primaryClass={cs.PL cs.DB}
} | almendros-jiménez2007querying |
arxiv-1596 | 0710.4793 | Unified Modeling of Complex Real-Time Control Systems | <|reference_start|>Unified Modeling of Complex Real-Time Control Systems: Complex real-time control system is a software dense and algorithms dense system, which needs modern software engineering techniques to design. UML is an object-oriented industrial standard modeling language, used more and more in real-time domain. This paper first analyses the advantages and problems of using UML for real-time control systems design. Then, it proposes an extension of UML-RT to support time-continuous subsystems modeling. So we can unify modeling of complex real-time control systems on UML-RT platform, from requirement analysis, model design, simulation, until generation code.<|reference_end|> | arxiv | @article{hai2007unified,
title={Unified Modeling of Complex Real-Time Control Systems},
author={He Hai, Zhong Yi-Fang, Cai Chi-Lan},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4793},
primaryClass={cs.SE}
} | hai2007unified |
arxiv-1597 | 0710.4794 | Power-Performance Trade-Offs in Nanometer-Scale Multi-Level Caches Considering Total Leakage | <|reference_start|>Power-Performance Trade-Offs in Nanometer-Scale Multi-Level Caches Considering Total Leakage: In this paper, we investigate the impact of T_{ox} and Vth on power performance trade-offs for on-chip caches. We start by examining the optimization of the various components of a single level cache and then extend this to two level cache systems. In addition to leakage, our studies also account for the dynamic power expanded as a result of cache misses. Our results show that one can often reduce overall power by increasing the size of the L2 cache if we only allow one pair of Vth/T_{ox} in L2. However, if we allow the memory cells and the peripherals to have their own Vth's and T_{ox}'s, we show that a two-level cache system with smaller L2's will yield less total leakage. We further show that two Vth's and two T_{ox}'s are sufficient to get close to an optimal solution, and that Vth is generally a better design knob than T_{ox} for leakage optimization, thus it is better to restrict the number of T_{ox}'s rather than Vth's if cost is a concern.<|reference_end|> | arxiv | @article{bai2007power-performance,
title={Power-Performance Trade-Offs in Nanometer-Scale Multi-Level Caches
Considering Total Leakage},
author={Robert Bai, Nam-Sung Kim, Tae Ho Kgil, Dennis Sylvester, Trevor Mudge},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4794},
primaryClass={cs.AR}
} | bai2007power-performance |
arxiv-1598 | 0710.4795 | Test Time Reduction Reusing Multiple Processors in a Network-on-Chip Based Architecture | <|reference_start|>Test Time Reduction Reusing Multiple Processors in a Network-on-Chip Based Architecture: The increasing complexity and the short life cycles of embedded systems are pushing the current system-on-chip designs towards a rapid increasing on the number of programmable processing units, while decreasing the gate count for custom logic. Considering this trend, this work proposes a test planning method capable of reusing available processors as test sources and sinks, and the on-chip network as the test access mechanism. Experimental results are based on ITC'02 benchmarks and on two open core processors compliant with MIPS and SPARC instruction set. The results show that the cooperative use of both the on-chip network and the embedded processors can increase the test parallelism and reduce the test time without additional cost in area and pins.<|reference_end|> | arxiv | @article{amory2007test,
title={Test Time Reduction Reusing Multiple Processors in a Network-on-Chip
Based Architecture},
author={Alexandre M. Amory, Marcelo Lubaszewski, Fernando G. Moraes, Edson I.
Moreno},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4795},
primaryClass={cs.AR}
} | amory2007test |
arxiv-1599 | 0710.4796 | A Hybrid Prefetch Scheduling Heuristic to Minimize at Run-Time the Reconfiguration Overhead of Dynamically Reconfigurable Hardware | <|reference_start|>A Hybrid Prefetch Scheduling Heuristic to Minimize at Run-Time the Reconfiguration Overhead of Dynamically Reconfigurable Hardware: Due to the emergence of highly dynamic multimedia applications there is a need for flexible platforms and run-time scheduling support for embedded systems. Dynamic Reconfigurable Hardware (DRHW) is a promising candidate to provide this flexibility but, currently, not sufficient run-time scheduling support to deal with the run-time reconfigurations exists. Moreover, executing at run-time a complex scheduling heuristic to provide this support may generate an excessive run-time penalty. Hence, we have developed a hybrid design/run-time prefetch heuristic that schedules the reconfigurations at run-time, but carries out the scheduling computations at design-time by carefully identifying a set of near-optimal schedules that can be selected at run-time. This approach provides run-time flexibility with a negligible penalty.<|reference_end|> | arxiv | @article{resano2007a,
title={A Hybrid Prefetch Scheduling Heuristic to Minimize at Run-Time the
Reconfiguration Overhead of Dynamically Reconfigurable Hardware},
author={Javier Resano, Daniel Mozos, Francky Catthoor},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4796},
primaryClass={cs.AR}
} | resano2007a |
arxiv-1600 | 0710.4797 | Rapid Generation of Thermal-Safe Test Schedules | <|reference_start|>Rapid Generation of Thermal-Safe Test Schedules: Overheating has been acknowledged as a major issue in testing complex SOCs. Several power constrained system-level DFT solutions (power constrained test scheduling) have recently been proposed to tackle this problem. However, as it will be shown in this paper, imposing a chip-level maximum power constraint doesn't necessarily avoid local overheating due to the non-uniform distribution of power across the chip. This paper proposes a new approach for dealing with overheating during test, by embedding thermal awareness into test scheduling. The proposed approach facilitates rapid generation of thermal-safer test schedules without requiring time-consuming thermal simulations. This is achieved by employing a low-complexity test session thermal model used to guide the test schedule generation algorithm. This approach reduces the chances of a design re-spin due to potential overheating during test.<|reference_end|> | arxiv | @article{rosinger2007rapid,
title={Rapid Generation of Thermal-Safe Test Schedules},
author={Paul Rosinger, Bashir Al-Hashimi, Krishnendu Chakrabarty},
journal={Dans Design, Automation and Test in Europe - DATE'05, Munich :
Allemagne (2005)},
year={2007},
archivePrefix={arXiv},
eprint={0710.4797},
primaryClass={cs.OH}
} | rosinger2007rapid |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.