text
stringlengths 0
128k
|
---|
//
// EditingViewController.swift
// BlockDataSource
//
// Created by Adam Cumiskey on 11/10/16.
// Copyright © 2016 CocoaPods. All rights reserved.
//
import Foundation
import BlockDataSource
struct Item: Equatable {
var title: String
}
extension Item {
func configureCell(cell: Cell) -> Void {
cell.textLabel?.text = title
}
}
func ==(lhs: Item, rhs: Item) -> Bool {
return lhs.title == rhs.title
}
class EditingViewController: BlockTableViewController {
var data: [Item]?
override func viewDidLoad() {
super.viewDidLoad()
navigationItem.rightBarButtonItem = editButtonItem
data = (0..<5).map { Item(title: "\($0)") }
}
override func configureDataSource(dataSource: BlockTableDataSource) {
guard let data = data else { return }
dataSource.sections = [
TableSection(
rows: data.map { item in
return TableRow(
configure: item.configureCell,
onDelete: { [unowned self] indexPath in
if let index = self.data!.index(of: item) {
self.data?.remove(at: index)
}
}
)
}
)
]
dataSource.onReorder = { [unowned self] (firstIndex, secondIndex) in
self.data!.moveObjectAtIndex(firstIndex.row, toIndex: secondIndex.row)
self.reloadUI()
}
}
}
extension Array {
mutating func moveObjectAtIndex(_ index: Int, toIndex: Int) {
let element = self[index]
remove(at: index)
insert(element, at: toIndex)
}
}
|
Board Thread:Game Discussion/@comment-10287686-20130615050156/@comment-10287686-20130615165924
Kiwi999 wrote: I found 7 in mission and asked for help.
Updated met 11 this morning 3 escaped Do you have any space for an ally by chance? :)
|
//
// MapViewPlus
//
// Created by Okhan Okbay on 10/03/2018
//
// The MIT License (MIT)
//
// Copyright (c) 2018 okhanokbay
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import Foundation
import MapKit
open class AnnotationViewPlus: MKAnnotationView {}
open class MapViewPlus: MKMapView {
fileprivate weak var generalDelegate: MapViewPlusDelegate?
public weak var calloutViewCustomizerDelegate: CalloutViewCustomizerDelegate?
public weak var anchorViewCustomizerDelegate: AnchorViewCustomizerDelegate?
fileprivate let pinID = "_MapViewPlusPinID"
fileprivate var showingCalloutView: UIView?
//These properties can be set via customizerDelegate.
public fileprivate(set) var defaultHeightForAnchors: CGFloat = 16
public fileprivate(set) var defaultInsetForCalloutView: CGFloat = 0
public fileprivate(set) var defaultFillColorForAnchors: UIColor = UIColor.white
public var calloutViewHorizontalInset: CGFloat = 8
public fileprivate(set) var animator = MapViewPlusAnimator()
open override var delegate: MKMapViewDelegate? {
get {
return super.delegate
}
set(delegate) {
guard let delegate = delegate else { return }
generalDelegate = delegate as? MapViewPlusDelegate
super.delegate = self
}
}
open func setup(withAnnotations annotations: [AnnotationPlus]) {
DispatchQueue.main.async {
for annotation in annotations {
self.addAnnotation(annotation)
}
self.generalDelegate?.mapView(self, didAddAnnotations: annotations)
}
}
}
//MARK: Protocol Helper that forwards MKMapViewDelegate methods, if not handled by MapViewPlus, to subclass.
extension MapViewPlus {
private func verifyProtocol(_ aProtocol: Protocol, contains aSelector: Selector) -> Bool {
return protocol_getMethodDescription(aProtocol, aSelector, true, true).name != nil || protocol_getMethodDescription(aProtocol, aSelector, false, true).name != nil
}
open override func responds(to aSelector: Selector!) -> Bool {
if verifyProtocol(MKMapViewDelegate.self, contains: aSelector) {
return (super.responds(to: aSelector)) || (generalDelegate?.responds(to: aSelector) ?? false)
}
return super.responds(to: aSelector)
}
open override func forwardingTarget(for aSelector: Selector!) -> Any? {
if verifyProtocol(MKMapViewDelegate.self, contains: aSelector) {
return generalDelegate
}
return forwardingTarget(for: aSelector)
}
}
extension MapViewPlus: MKMapViewDelegate {
open func mapView(_ mapView: MKMapView, viewFor annotation: MKAnnotation) -> MKAnnotationView? {
if annotation is MKUserLocation { return nil }
var annotationView = self.dequeueReusableAnnotationView(withIdentifier: pinID)
if annotationView == nil{
annotationView = AnnotationViewPlus(annotation: annotation, reuseIdentifier: pinID)
annotationView?.canShowCallout = false
}else{
annotationView?.annotation = annotation
}
annotationView?.image = generalDelegate?.mapView(self, imageFor: annotation as! AnnotationPlus)
annotationView?.centerOffset = CGPoint(x: 0, y: -annotationView!.bounds.size.height * 0.5)
return annotationView
}
open func mapView(_ mapView: MKMapView, didSelect view: MKAnnotationView) {
if view.annotation is MKUserLocation { return }
let annotation = view.annotation as! AnnotationPlus
let annotationView = view as! AnnotationViewPlus
let calloutView = generalDelegate!.mapView(self, calloutViewFor: annotationView)
calloutView.configureCallout(annotation.viewModel)
let anchorView = getAnchorView(of: calloutView, relatedToAnnotationView: annotationView)
customize(calloutView, and: anchorView, relatedTo: annotationView)
let calloutAsView = calloutView as! UIView
let combinedView = combine(calloutView, and: anchorView, relatedTo: annotationView)
view.addSubview(combinedView)
place(calloutView: calloutView, relatedToAnnotation: annotation, andAnnotationView: annotationView, anchorView)
let animationType = calloutViewCustomizerDelegate?.mapView(self, animationTypeForShowingCalloutViewOf: annotationView) ?? self.animator.defaultShowingAnimationType
self.animator.show(calloutAsView, andAnchorView: anchorView, combinedView: combinedView, withType: animationType)
generalDelegate?.mapView?(self, didSelect: view)
}
private func place(calloutView: CalloutViewPlus, relatedToAnnotation annotation: AnnotationPlus, andAnnotationView annotationView: AnnotationViewPlus, _ anchorView: AnchorView) {
let inset = calloutViewCustomizerDelegate?.mapView(self, insetFor: calloutView) ?? defaultInsetForCalloutView
let calloutAsView = calloutView as! UIView
let bottomOfAnnotation = self.convert(annotation.coordinate, toPointTo: self)
let bottomOfAnnotationView = CGPoint(x: bottomOfAnnotation.x, y: bottomOfAnnotation.y)
let padding: CGFloat = 8
var xAddition: CGFloat = 0
var yAddition: CGFloat = 0
let conditionalWidth = calloutAsView.bounds.width / 2 + padding
let conditionalHeight = calloutAsView.bounds.height + annotationView.bounds.height + anchorView.bounds.height + inset + padding
if bottomOfAnnotationView.x - conditionalWidth < 0 {
xAddition = bottomOfAnnotationView.x - conditionalWidth
}else if bottomOfAnnotationView.x + conditionalWidth > bounds.width {
xAddition = conditionalWidth - (bounds.width - bottomOfAnnotationView.x)
}
if bottomOfAnnotationView.y - conditionalHeight < 0 {
yAddition = -(conditionalHeight - bottomOfAnnotationView.y)
}
let coordinateForNewCenterOfMapView = self.convert(CGPoint(x: self.bounds.width / 2 + xAddition , y: self.bounds.height / 2 + yAddition) , toCoordinateFrom: self)
self.setCenter(coordinateForNewCenterOfMapView, animated: true)
}
open func mapView(_ mapView: MKMapView, didDeselect view: MKAnnotationView) {
if view.annotation is MKUserLocation { return }
for subview in view.subviews {
guard subview is CalloutAndAnchorView else { return }
let annotationView = view as! AnnotationViewPlus
let animationType = calloutViewCustomizerDelegate?.mapView(self, animationTypeForHidingCalloutViewOf: annotationView) ?? self.animator.defaultHidingAnimationType
self.animator.hide(subview, withAnimationType: animationType, completion: { [weak self] in
self?.showingCalloutView = nil
})
}
generalDelegate?.mapView?(self, didDeselect: view)
}
open func removeAllAnnotations() {
let annotations = self.annotations.filter { [weak self] in
return ($0 !== self?.userLocation)
}
self.removeAnnotations(annotations)
}
}
extension MapViewPlus: UIGestureRecognizerDelegate {
public func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldReceive touch: UITouch) -> Bool {
return !(touch.view is UIControl)
}
}
extension MapViewPlus {
open override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? {
guard let showingCalloutView = showingCalloutView else {
return super.hitTest(point, with: event)
}
return showingCalloutView.hitTest(showingCalloutView.convert(point, from: self), with: event) ?? super.hitTest(point, with: event)
}
}
//MARK: AnchorView Generator
extension MapViewPlus {
fileprivate func getAnchorView(of calloutView: CalloutViewPlus, relatedToAnnotationView annotationView: AnnotationViewPlus) -> AnchorView {
let anchorHeight = anchorViewCustomizerDelegate?.mapView(self, heightForAnchorOf: calloutView) ?? defaultHeightForAnchors
let anchorFillColor = anchorViewCustomizerDelegate?.mapView(self, fillColorForAnchorOf: calloutView)
return AnchorViewHelper().getDefaultAnchor(withHeight: anchorHeight, fillColor: anchorFillColor)
}
}
//MARK: CalloutView Generator
extension MapViewPlus {
fileprivate func customize(_ calloutView: CalloutViewPlus, and anchorView: AnchorView, relatedTo annotationView: AnnotationViewPlus) {
let centerForCalloutView = calloutViewCustomizerDelegate?.mapView(self, centerForCalloutViewOf: annotationView) ?? .defaultCenter
let boundsForCalloutView = calloutViewCustomizerDelegate?.mapView(self, boundsForCalloutViewOf: annotationView) ?? .defaultBounds
let inset = calloutViewCustomizerDelegate?.mapView(self, insetFor: calloutView) ?? defaultInsetForCalloutView
let calloutAsView = calloutView as! UIView
showingCalloutView = calloutAsView
switch centerForCalloutView {
case CalloutViewPlusCenter.defaultCenter:
calloutAsView.center = CGPoint(x: annotationView.bounds.size.width / 2,
y: (-calloutAsView.bounds.height / 2) + (-anchorView.bounds.size.height) + (-inset))
case CalloutViewPlusCenter.customCenter(let center):
calloutAsView.center = center
}
switch boundsForCalloutView {
case CalloutViewPlusBound.defaultBounds: break
case CalloutViewPlusBound.customBounds(let bounds):
calloutAsView.bounds = bounds
}
}
}
extension MapViewPlus {
fileprivate func combine(_ calloutView: CalloutViewPlus, and anchorView: AnchorView, relatedTo annotationView: AnnotationViewPlus) -> CalloutAndAnchorView {
let calloutAsView = calloutView as! UIView
let calloutAndAnchorView = CalloutAndAnchorView(frame:
CGRect.init(x: calloutAsView.frame.origin.x,
y: calloutAsView.frame.origin.y,
width: calloutAsView.frame.size.width,
height: calloutAsView.frame.size.height + anchorView.bounds.size.height),
calloutView: calloutView,
anchorView: anchorView)
return calloutAndAnchorView
}
}
|
swb: AddressOf, FieldWithBarrier, build Full
swb: WriteBarrierPtr: swap "&" and AddressOf
Overriding "&" operator keeps running into new trouble. This change
removes "&" operator override and switches "AddressOf" to do that.
swb: add FieldWithBarrier macro
And remove FORCE_USE_WRITE_BARRIER based macro redefinitions. We've run
into confusing effects of macro redefinition several times.
Included one local inline slot annotation point fix.
swb: RecyclerJITTypeHolder rename
Rename JITTypeHolderObject to RecyclerJITTypeHolder (previous CR feedback)
swb: misc fixes to build chakra full
@dotnet-bot test Linux tests please
:shipit:
|
# frozen_string_literal: true
module Stripe
class AlipayAccount < APIResource
include Stripe::APIOperations::Save
include Stripe::APIOperations::Delete
OBJECT_NAME = "alipay_account"
def resource_url
if !respond_to?(:customer) || customer.nil?
raise NotImplementedError,
"Alipay accounts cannot be accessed without a customer ID."
end
"#{Customer.resource_url}/#{CGI.escape(customer)}/sources" \
"/#{CGI.escape(id)}"
end
def self.update(_id, _params = nil, _opts = nil)
raise NotImplementedError,
"Alipay accounts cannot be updated without a customer ID. " \
"Update an Alipay account using `Customer.update_source(" \
"'customer_id', 'alipay_account_id', update_params)`"
end
def self.retrieve(_id, _opts = nil)
raise NotImplementedError,
"Alipay accounts cannot be retrieved without a customer ID. " \
"Retrieve an Alipay account using `Customer.retrieve_source(" \
"'customer_id', 'alipay_account_id')`"
end
end
end
|
Memory Alpha:Pages for deletion/Level 1 Entertainment
* If you want to discuss this suggestion, add comments to the section "Discussion".
Deletion rationale
Discussion
Admin resolution
Nomination withdrawn. --From Andoria with Love 00:19, 24 November 2008 (UTC)
|
Ear proximity detection
ABSTRACT
Embodiments of the disclosure include methods, apparatus and computer programs for detecting the proximity of an ear to an audio device. In one embodiment, the disclosure provides a system for detecting the presence of an ear in proximity to an audio device. The system comprises: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: compare one or more ear biometric features, extracted from the data signal, to an ear biometric template; and generate a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template.
TECHNICAL FIELD
Embodiments of the present disclosure relate to apparatus, systems, methods and computer programs for ear proximity detection. In particular, embodiments of the present disclosure relate to apparatus, systems, methods and computer programs for detecting the presence or absence of an ear in proximity to an audio device.
BACKGROUND
It is known that the acoustic properties of a user's ear, whether the outer parts (known as the pinna or auricle), the ear canal or both, differ substantially between individuals and can therefore be used as a biometric to identify the user. One method for achieving this is for one or more loudspeakers (or similar transducers) positioned close to or within the ear to generate an acoustic stimulus, and one or more microphones similarly positioned close to or within the ear to detect the acoustic response of the ear to the acoustic stimulus. One or more features may be extracted from the response signal, and used to characterize the individual.
For example, the ear canal is a resonant system, and therefore one feature which may be extracted from the response signal is the resonant frequency of the ear canal. If the measured resonant frequency (i.e. in the response signal) differs from a stored resonant frequency for the user, a biometric algorithm coupled to receive and analyse the response signal may return a negative result. Other features of the response signal may be similarly extracted and used to characterize the individual. For example, the features may comprise one or more mel frequency cepstrum coefficients. More generally, the transfer function between the acoustic stimulus and the measured response signal (or features of the transfer function) may be determined, and compared to a stored transfer function (or stored features of the transfer function) which is characteristic of the user.
The acoustic stimulus may be generated and the response measured using a personal audio device, such as earphones, headphones or a mobile phone. Power consumption is of critical importance in such personal audio devices, as space is limited and thus battery size is also limited. The battery lifetime between consecutive charges is a key performance indicator for users when selecting a device.
In order to reduce power consumption, many personal audio devices have a dedicated “in-ear detect” function, operable to detect the presence or absence of an ear in proximity to the device. If no ear is detected, the device may be placed in a low-power state in order to conserve power; if an ear is detected, the device may be placed in a relatively high-power state.
In-ear detect functions may also be used for other purposes. For example, a mobile phone may utilize an in-ear detect function to lock a touchscreen when the phone is placed close to the user's ear, in order to prevent inadvertent touch input while on a call. For example, a personal audio device may pause audio playback responsive to detection of the personal audio device being removed from the user's ears, or un-pause audio upon detection of the personal audio device being applied to the user's ears.
Various mechanisms for in-ear detect are known in the art. For example, infra-red sensors have been used in mobile phones to detect the proximity of an ear. Light sensors have been proposed to detect the insertion of earphones and headphones into or on to a user's ears. However, all of these mechanisms suffer from the drawback that they require additional hardware in the device for the purposes of in-ear detect. Additional sensors may be required, and/or additional processing circuitry for processing of the sensor output signals.
SUMMARY
Apparatus, systems, methods and computer programs for ear proximity detection are proposed, which attempt to alleviate or mitigate one or more of the problems set out above.
In a first aspect, there is provided a system for detecting the presence of an ear in proximity to an audio device. The system comprises: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: compare one or more ear biometric features, extracted from the data signal, to an ear biometric template; and generate a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template
A further aspect provides an electronic device, comprising the system recited above.
Another aspect of the disclosure provides a method of detecting the presence of an ear in proximity to an audio device. The method comprises: obtaining a data signal from the environment of the audio device; extracting one or more ear biometric features from the data signal; comparing the one or more extracted ear biometric features to an ear biometric template for an authorised user of the audio device; and generating a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template for the authorised user.
A further aspect provides an electronic apparatus comprising processing circuitry and a non-transitory machine-readable medium storing instructions which, when executed by the processing circuitry, cause the electronic apparatus to implement a method as recited above.
Another aspect provides a non-transitory machine-readable medium storing instructions which, when executed by processing circuitry, cause an electronic apparatus to implement a method as recited above.
A further aspect of the disclosure provides a system for detecting the presence of an ear in proximity to an audio device. The system comprises: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: extract one or more first ear biometric features from a first number of data frames of the data signal; calculate a first score being indicative of a distance between the one or more extracted ear biometric features and an ear biometric template of an authorised user; compare the first score to a first threshold to determine the presence or absence of any ear in proximity to the audio device; responsive to a determination of the presence of any ear in proximity to the audio device, extract one or more second ear biometric features from a second number of data frames of the data signal, the second number of data frames being greater than the first number of data frames; calculate a second score being indicative of a distance between the one or more second extracted ear biometric features and the ear biometric template of the authorised user; and compare the second score to a second threshold, different from the first threshold, to determine the presence or absence of an ear of the authorised user in proximity to the audio device.
Another aspect of the disclosure provides a system for detecting the presence of an ear in proximity to an audio device. The system comprises: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: compare one or more ear biometric features, extracted from the data signal, to an ear biometric template of an authorised user; calculate one or more scores based on the comparison, the one or more scores being indicative of a distance between the one or more extracted ear biometric features and the ear biometric template; compare the one or more scores to first and second thresholds, wherein the first and second thresholds are different to each other; and generate a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more scores to the first threshold, and generate a second output indicative of the presence or absence of an ear of the authorised user in proximity to the audio device based on the comparison of the one or more scores to the second threshold.
Another aspect of the disclosure provides the use of a biometric processor for detecting the presence of any ear in proximity to an audio device.
BRIEF DESCRIPTION OF THE DRAWINGS
For a better understanding of examples of the present disclosure, and to show more clearly how the examples may be carried into effect, reference will now be made, by way of example only, to the following drawings in which:
FIGS. 1a to 1e show examples of personal audio devices;
FIG. 2 shows an arrangement according to embodiments of the disclosure;
FIG. 3 is a schematic graph of biometric scores according to embodiments of the disclosure;
FIG. 4 shows a system according to embodiments of the disclosure;
FIG. 5 shows acquisition of an audio signal according to embodiments of the disclosure; and
FIG. 6 is a flowchart of a method according to embodiments of the disclosure.
DETAILED DESCRIPTION
Embodiments of the present disclosure provide apparatus, systems, methods and computer programs for ear proximity detection. In particular, the embodiments utilize a biometric process, based on one or more ear biometric features, to detect the presence or absence of an ear from the vicinity of a personal audio device.
As used herein, the term “personal audio device” is any electronic device which is suitable for, or configurable to, provide audio playback substantially to only a single user. Some examples of suitable personal audio devices are shown in FIGS. 1a to 1 e.
FIG. 1a shows a schematic diagram of a user's ear, comprising the (external) pinna or auricle 12 a, and the (internal) ear canal 12 b. A personal audio device 20 comprising a circum-aural headphone is worn by the user over the ear. The headphone comprises a shell which substantially surrounds and encloses the auricle 12 a, so as to provide a physical barrier between the user's ear and the external environment. Cushioning or padding may be provided at an edge of the shell, so as to increase the comfort of the user, and also the acoustic coupling between the headphone and the user's skin (i.e. to provide a more effective barrier between the external environment and the user's ear).
The headphone comprises one or more loudspeakers 22 positioned on an internal surface of the headphone, and arranged to generate acoustic signals towards the user's ear and particularly the ear canal 12 b. The headphone further comprises one or more microphones 24, also positioned on the internal surface of the headphone, arranged to detect acoustic signals within the internal volume defined by the headphone, the auricle 12 a and the ear canal 12 b.
The headphone may be able to perform active noise cancellation, to reduce the amount of noise experienced by the user of the headphone. Active noise cancellation operates by detecting a noise (i.e. with a microphone), and generating a signal (i.e. with a loudspeaker) that has the same amplitude as the noise signal but is opposite in phase. The generated signal thus interferes destructively with the noise and so lessens the noise experienced by the user. Active noise cancellation may operate on the basis of feedback signals, feedforward signals, or a combination of both. Feedforward active noise cancellation utilizes one or more microphones on an external surface of the headphone, operative to detect the environmental noise before it reaches the user's ear. The detected noise is processed quickly, and the cancellation signal generated so as to match the incoming noise as it arrives at the user's ear. Feedback active noise cancellation utilizes one or more error microphones positioned on the internal surface of the headphone, operative to detect the combination of the noise and the audio playback signal generated by the one or more loudspeakers. This combination is used in a feedback loop, together with knowledge of the audio playback signal, to adjust the cancelling signal generated by the loudspeaker and so reduce the noise. The microphone 24 shown in FIG. 1a may therefore form part of an active noise cancellation system, for example, as an error microphone.
FIG. 1b shows an alternative personal audio device 30, comprising a supra-aural headphone. The supra-aural headphone does not surround or enclose the user's ear, but rather sits on the auricle 12 a. The headphone may comprise a cushion or padding to lessen the impact of environmental noise. As with the circum-aural headphone shown in FIG. 1a , the supra-aural headphone comprises one or more loudspeakers 32 and one or more microphones 34. The loudspeaker(s) 32 and the microphone(s) 34 may form part of an active noise cancellation system, with the microphone 34 serving as an error microphone.
FIG. 1c shows a further alternative personal audio device 40, comprising an intra-concha headphone (or earphone). In use, the intra-concha headphone sits inside the user's concha cavity. The intra-concha headphone may fit loosely within the cavity, allowing the flow of air into and out of the user's ear canal 12 b.
As with the devices shown in FIGS. 1a and 1b , the intra-concha headphone comprises one or more loudspeakers 42 and one or more microphones 44, which may form part of an active noise cancellation system.
FIG. 1d shows a further alternative personal audio device 50, comprising an in-ear headphone (or earphone), insert headphone, or ear bud. This headphone is configured to be partially or totally inserted within the ear canal 12 b, and may provide a relatively tight seal between the ear canal 12 b and the external environment (i.e. it may be acoustically closed or sealed). The headphone may comprise one or more loudspeakers 52 and one or more microphones 54, as with the others devices described above, and these components may form part of an active noise cancellation system.
As the in-ear headphone may provide a relatively tight acoustic seal around the ear canal 12 b, external noise (i.e. coming from the environment outside) detected by the microphone 54 is likely to be low.
FIG. 1e shows a further alternative personal audio device 60, which is a mobile or cellular phone or handset. The handset 60 comprises one or more loudspeakers 62 for audio playback to the user, and one or more microphones 64 which are similarly positioned.
In use, the handset 60 is held close to the user's ear so as to provide audio playback (e.g. during a call). While a tight acoustic seal is not achieved between the handset 60 and the user's ear, the handset 60 is typically held close enough that an acoustic stimulus applied to the ear via the one or more loudspeakers 62 generates a response from the ear which can be detected by the one or more microphones 64. As with the other devices, the loudspeaker(s) 62 and microphone(s) 64 may form part of an active noise cancellation system.
All of the personal audio devices described above thus provide audio playback to substantially a single user in use. Each device comprises one or more loudspeakers and one or more microphones, which may be utilized to generate biometric data related to the user's ear. The loudspeaker is operable to generate an acoustic stimulus, or acoustic probing wave, towards the user's ear, and the microphone is operable to detect and measure a response of the user's ear to the acoustic stimulus, e.g. to measure acoustic waves reflected from the ear canal or the pinna, and/or to acquire other ear biometric data. The acoustic stimulus may be sonic (for example in the audio frequency range of say 20 Hz to 20 kHz) or ultra-sonic (for example greater than 20 kHz or in the range 20 kHz to 50 kHz) or near-ultrasonic (for example in the range 15 kHz to 25 kHz) in frequency. In some examples the microphone signal may be processed to measure received signals of the same frequency as that transmitted.
Another biometric marker may comprise otoacoustic noises emitted by the cochlear in response to the acoustic stimulus waveform. The otoacoustic response may comprise a mix of the frequencies in the input waveform. For example if the input acoustic stimulus consists of two tones at frequencies f1 and f2, the otoacoustic emission may include a component at frequency 2*f1−f2. The relative power of frequency components of the emitted waveform has been shown to be a useful biometric indicator. In some examples therefore the acoustic stimulus may comprise tones of two or more frequencies and the amplitude of mixing products at sums or differences of integer-multiple frequencies generated by otoacoustic emissions from the cochlear may be measured. Alternatively, otoacoustic emissions may be stimulated and measured by using stimulus waveforms comprising fast transients, e.g. clicks.
Depending on the construction and usage of the personal audio device, the measured response may comprise user-specific components, i.e. biometric data, relating to the auricle 12 a, the ear canal 12 b, or a combination of both the auricle 12 a and the ear canal 12 b. For example, the circum-aural headphones shown in FIG. 1a will generally acquire data relating to the auricle 12 a and potentially also the ear canal 12 b. The insert headphones shown in FIG. 1d will generally acquire data relating only to the ear canal 12 b.
One or more of the personal audio devices described above (or rather, the microphones within those devices) may be operable to detect bone-conducted voice signals from the user. That is, as the user speaks, sound is projected away from the user's mouth through the air. However, acoustic vibrations will also be carried through part of the user's skeleton or skull, such as the jaw bone. These acoustic vibrations may be coupled to the ear canal 12 b through the jaw or some other part of the user's skeleton or skull, and detected by the microphone. Lower frequency sounds tend to experience a stronger coupling than higher frequency sounds, and voiced speech (i.e. that speech or those phonemes generated while the vocal cords are vibrating) is coupled more strongly via bone conduction than unvoiced speech (i.e. that speech or those phonemes generated while the vocal cords are not vibrating). The in-ear headphone 50 may be particularly suited to detecting bone-conducted speech owing to the tight acoustic coupling around the ear canal 12 b.
A further ear biometric feature which may be extracted from audio signals acquired from a user's ear relates to cardiac sounds. That is, phono cardio grams have been shown to be useful in distinguishing between individuals. See, for example, “Biometric Identification Based on Frequency Analysis of Cardiac Sounds”, by Beritelli and Serrano, IEEE Transactions on Information Forensics and Security (Volume 2, Issue 3, pages 596-604, 2007). One particular feature which may be useful as a biometric is the variability of the R-R interval (i.e. the period between successive R peaks, where R is a point corresponding to the peak of the QRS complex of the electrocardiogram wave).
All of the devices shown in FIGS. 1a to 1e and described above may be used to implement aspects of the disclosure.
FIG. 2 shows an arrangement 200 according to embodiments of the disclosure. The arrangement 200 comprises a personal audio device 202, a biometric authentication system 204 and a host electronic device 206.
The personal audio device 202 may be any device which is suitable for, or configurable to provide audio playback to substantially a single user. The personal audio device 202 generally comprises one or more loudspeakers, and one or more microphones which, in use, are positioned adjacent to or within a user's ear. The personal audio device may be wearable, and comprise headphones for each of the user's ears. Alternatively, the personal audio device may be operable to be carried by the user, and held adjacent to the user's ear or ears during use. The personal audio device may comprise headphones or a mobile phone handset, as described above with respect to any of FIGS. 1a to 1 e.
The host electronic device 206 may comprise any suitable audio playback device, configurable to generate audio playback signals to be played to the user via the personal audio device 202. It will be understood that, where the personal audio device 202 comprises a cellular phone or similar device for example, the host device 206 and the personal audio device 202 may be the same.
The biometric system 204 is coupled to both the personal audio device 202 and the host electronic device 206. In some embodiments, the biometric system 204 is provided in the personal audio device 202 itself. In other embodiments, the biometric system 204 is provided in the host electronic device 206. In still further embodiments, operations of the biometric device 204 are distributed between the personal audio device 202 and the host electronic device 206.
The biometric system 204 is coupled to the personal audio device 202 and operative to control the personal audio device 202 to acquire biometric data which is indicative of the individual using the personal audio device 202.
The personal audio device 202 may thus generate an acoustic stimulus for application to the user's ear, and detect or measure the response of the ear to the acoustic stimulus in order to acquire ear biometric data. For example, the acoustic stimulus may be in the sonic range, or ultra-sonic. In some embodiments, the acoustic stimulus may have a flat frequency spectrum over a relevant frequency range, or be preprocessed in such a way that those frequencies that allow for a good discrimination between individuals are emphasized (i.e. have a higher amplitude than other frequencies). The measured response corresponds to the reflected signal received at the one or more microphones, with certain frequencies being reflected at higher amplitudes than other frequencies owing to the particular response of the user's ear. Other forms of ear biometric data (such as heart rate variability and bone-conducted voice signals, for example) may require only the detection of an audio signal without a preceding acoustic stimulus.
The biometric system 204 may send suitable control signals to the personal audio device 202, so as to initiate the acquisition of biometric data, and receive data from the personal audio device 202 corresponding to the measured response. The biometric system 204 is operable to extract one or more features from the measured response and utilize those features as part of a biometric process.
Some examples of suitable biometric processes include biometric enrolment and biometric authentication. Enrolment comprises the acquisition and storage of biometric data which is characteristic of an individual. In the present context, such stored data may be known as an “ear print”. Authentication (alternatively referred to as verification or identification) comprises the acquisition of biometric data from an individual, and the comparison of that data to the stored ear prints of one or more enrolled or authorised users. A positive comparison (i.e. a determination that the acquired data matches or is sufficiently close to a stored ear print) results in the individual being authenticated. For example, the individual may be permitted to carry out a restricted action, or granted access to a restricted area or device. A negative comparison (i.e. a determination that the acquired data does not match or is not sufficiently close to a stored ear print) results in the individual not being authenticated. For example, the individual may not be permitted to carry out the restricted action, or granted access to the restricted area or device.
Thus the biometric system 204 may provide an authentication result to the host electronic device 206 which, if the biometric result is positive and identifies the user as an authorised user, is then configured to permit or to perform one or more restricted actions.
According to embodiments of the disclosure, however, the authentication system 204 is further utilized to perform an in-ear detect function, i.e. to detect the presence or absence of an ear in proximity to the personal audio device. A positive indication that an ear is in proximity to the personal audio device may be used in a number of ways. For example, the indication may be provided to the personal audio device and used to alter the operative state of the personal audio device. The operative state may be changed from a relatively low-power state (e.g. a sleep state or unpowered state) to a relatively high-power state (e.g. activating a digital connection between the personal audio device 202 and the host device 206, activating audio playback in the personal audio device, etc). The indication may be provided to the host electronic device 206 for substantially the same purpose (e.g. to alter the operative state of the host electronic device 206, or to prompt the host electronic device to alter the operative state of the personal audio device 202), or for a different purpose (e.g. to lock a touchscreen to input, etc).
The authentication system 204 may perform a biometric authentication algorithm in order to detect the presence or absence of an ear in proximity to the personal audio device 202. This concept is explained more fully below.
FIG. 3 is a schematic graph showing the distribution of biometric authentication scores.
As described above, biometric authentication in general involves the comparison of a biometric input signal (in particular, one or more features extracted from that input signal) to a stored template for an authorised user. The stored template is typically acquired during an “enrolment” process, as described above. Some biometric authentication processes may also involve the comparison of the biometric input signal (or features extracted therefrom) to a “universal model” descriptive of the biometrics of the population at large, as opposed to the specific authorised user. Some suitable examples of comparison techniques include probabilistic linear discriminant analysis (PLDA) and calculations of cosine similarity.
An output of the biometric authentication process is a score, indicative of the likelihood that the biometric input signals are those of the authorised user. For example, a relatively high score may be indicative of a relatively high likelihood that the biometric input signals match the authorised user; a relatively low score may be indicative of a relatively low likelihood that the biometric input signals match the authorised user. Biometric processors may make a decision on whether to authenticate a particular user as an authorised user or not by comparing the biometric score to a threshold value. For example, if the biometric score exceeds the threshold, the user may be authenticated; if the biometric score falls below the threshold, the user may not be authenticated. The value of the threshold may be constant, or may vary (e.g. as a function of the required level of security).
The inventors have realised that the distribution of biometric scores may be used to make further distinctions than merely whether or not a biometric input signal corresponds to an authorised user; the biometric score may be used to determine whether or not an ear is present at all. Further, as the biometric features of input signals indicative of any ear are substantially different from the biometric features of input signals indicative of no ear, a decision on the proximity of an ear to the personal audio device 202 can be reached quickly without consuming significant power.
Thus the distribution of biometric scores may fall into three categories 300, 302, 304. Relatively high biometric scores in category 304 may be indicative of a biometric input signal originating from an authorised user (i.e. a match). Relatively lower biometric scores 302 may be indicative of a biometric input signal originating from an unauthorised user (i.e. no match). The lowest biometric scores 300 may be indicative of a biometric input signal which does not correspond to an ear at all.
First and second thresholds may be set in order to distinguish between the three categories. For example, a first threshold T₁ may be set at a value which discriminates between biometric scores 300 indicative of no ear, and scores which are indicative of any ear 302, 304 (whether that ear belongs to an authorised user or not). A second threshold T₂ may be set at a value which discriminates between biometric scores 302 indicative of an unauthorised ear, and scores which are indicative of an authorised ear 304.
The values of the thresholds may be set using a machine learning algorithm, such as a neural network. Such machine learning algorithms may be subject to a training phase, in which training data is input to the algorithm. Training data may comprise biometric scores plus corresponding categorization of those scores as belonging to one of the three categories identified above (e.g. as determined by human input). One method of achieving this is for the machine learning algorithm to attempt to categorize training biometric scores into one of the categories, and then be provided with feedback (which may be positive or negative) according to whether the categorization is correct or not. This feedback may then be used to adjust the threshold values which are applied. Once the threshold values are correctly trained, they may be put into practice.
Various different ear biometric features have been discussed above, including ear resonances and anti-resonances, oto-acoustic emissions, bone-conducted voice signals and heart rate variability. Biometric scores and corresponding categorizations or thresholds may be generated based on any one or more of these features.
In the latter case, where more than one ear biometric feature is used to categorize audio signals, different techniques may be utilized in order to fuse the different biometrics into a single process. Embodiments of the disclosure are not limited to any particular fusion technique.
In score-level fusion, separate biometric algorithms are applied to each ear biometric in order to generate multiple separate biometric scores. These scores are then consolidated. One method of achieving this is to generate a single scalar score which is subsequently compared to a scalar threshold (e.g., as shown in FIG. 3). For example, the cosine similarity may be calculated between a biometric measurement and a registered or pre-defined biometric template. Another method may take a vector of the multiple biometric scores, with the threshold then comprising a hyperplane which discriminates between the different categories in a multi-dimensional space.
In contrast, decision-level fusion consolidates multiple separate decisions from each biometric (i.e. based on separate biometric scores and separate thresholds for each biometric). Different rules may be determined for combining the multiple decisions in order to arrive at a single, overall decision as to the category of the input biometric signal.
FIG. 4 shows a system 400 according to embodiments of the disclosure.
The system 400 comprises processing circuitry 422, which may comprise one or more processors, such as a central processing unit or an applications processor (AP), or a digital signal processor (DSP). The one or more processors may perform methods as described herein on the basis of data and program instructions stored in memory 424. Memory 424 may be provided as a single component or as multiple components or co-integrated with at least some of processing circuitry 422. Specifically, the methods described herein can be performed in processing circuitry 422 by executing instructions that are stored in non-transient form in the memory 424, with the program instructions being stored either during manufacture of the system 400 or personal audio device 202 or by upload while the system or device is in use.
The processing circuitry 422 comprises a stimulus generator module 403 which is coupled directly or indirectly to an amplifier 404, which in turn is coupled to a loudspeaker 406.
The stimulus generator module 403 generates an electrical excitation signal and provides the electrical excitation signal to the amplifier 404, which amplifies it and provides the amplified signal to the loudspeaker 406. The loudspeaker 406 generates a corresponding acoustic signal which is output to the user's ear (or ears). The acoustic signal may be sonic or ultra-sonic, for example. The acoustic signal may have a flat frequency spectrum, or be preprocessed in such a way that those frequencies that allow for a good discrimination between individuals are emphasized (i.e. have a higher amplitude than other frequencies).
As noted above, the acoustic signal may be output to all or a part of the user's ear (i.e. the auricle 12 a or the ear canal 12 b). The acoustic signal is reflected off the ear, and the reflected signal (or echo signal) is detected and received by a microphone 408. The reflected signal thus comprises data which is characteristic of the individual's ear, and suitable for use as a biometric.
The reflected data signal is passed from the microphone 408 to an analogue-to-digital converter (ADC) 410, where it is converted from the analogue domain to the digital domain. Of course, in alternative embodiments the microphone may be a digital microphone and produce a digital data signal (which does not therefore require conversion to the digital domain).
The signal is detected by the microphone 408 in the time domain. However, the features extracted for the purposes of the biometric process may be in the frequency domain (in that it is the frequency response of the user's ear which is characteristic). The system 400 therefore comprises a Fourier transform module 412, which converts the reflected signal to the frequency domain. For example, the Fourier transform module 412 may implement a fast Fourier transform (FFT).
The transformed signal is then passed to a feature extract module 414, which extracts one or more features of the transformed signal for use in a biometric process (e.g. biometric enrolment, biometric authentication, etc). For example, the feature extract module 414 may extract the resonant frequency of the user's ear. For example, the feature extract module 414 may extract one or more mel frequency cepstrum coefficients. Alternatively, the feature extract module may determine the frequency response of the user's ear at one or more predetermined frequencies, or across one or more ranges of frequencies. The extracted features may correspond to data for a model of the ear.
The extracted feature(s) are passed to a biometric module 416, which performs a biometric process on them. For example, the biometric module 416 may perform a biometric enrolment, in which the extracted features (or parameters derived therefrom) are stored as part of biometric data 418 which is characteristic of the individual. The biometric data 418 may be stored within the system or remote from the system (and accessible securely by the biometric module 416). Such stored data 418 may be known as an “ear print”. In another example, the biometric module 416 may perform a biometric authentication, and compare the one or more extracted features to corresponding features in the stored ear print 418 (or multiple stored ear prints) for authorised users.
In some embodiments the stimulus waveforms may be tones of predetermined frequency and amplitude. In other embodiments the stimulus generator may be configurable to apply music to the loudspeaker, e.g. normal playback operation, and the feature extract module may be configurable to extract the response or transfer function from whatever signal components the stimulus waveform contains.
Thus in some embodiments the feature extract module 414 may be designed with foreknowledge of the nature of the stimulus, for example knowing the spectrum of the applied stimulus signal, so that the response or transfer function may be appropriately normalised. In other embodiments the feature extract module 414 may comprise a second input to monitor the stimulus (e.g. playback music) and hence provide the feature extract module with information about the stimulus signal or its spectrum so that the feature extract module 414 may calculate the transfer function from the stimulus waveform stimulus to received acoustic waveform from which it may derive the desired feature parameters. In the latter case, the stimulus signal may also pass to the feature extract module 414 via the FFT module 412.
As noted above, the microphone 408 may be operable to detect bone-conducted voice signals. In this case, the biometric algorithm performed by the biometric module 416 may comprise a check that the bone-conducted voice signal (i.e. detected in the microphone 408) and an air-conducted voice signal (i.e. detected in a voice microphone) match to an acceptable degree, i.e. correspond. This will provide an indication that the personal audio device (i.e. that which comprises the microphone 408) is being worn by the same user as is speaking into the voice microphone.
The biometric module 416 thus generates a biometric result 428 (which may be the successful or unsuccessful generation of an ear print, as well as successful or unsuccessful authentication) and outputs the result to the control module 402.
It will be apparent from the discussion above that the biometric module 416 according to embodiments of the disclosure further performs an in-ear detect function. Thus, according to embodiments of the disclosure, the biometric module 416 further generates an output 426 indicative of the presence or absence of an ear (any ear) in proximity to the loudspeaker 406. The in-ear output 426 may also be provided to the control module 402.
FIG. 5 is a schematic diagram showing the acquisition and use of an audio signal 500 for the purposes of in-ear detection and ear biometric authentication according to embodiments of the disclosure.
Audio signals acquired by personal audio devices described herein may have inherently low signal-to-noise ratios, owing to the relatively low amplitude of ear biometric features. In order to distinguish reliably between an ear of an authorised user and an ear of an unauthorised user, a biometric algorithm may require a relatively large amount of data. This is because the ear biometric features have relatively low amplitude, but also because ear biometrics vary only slightly between different individuals. Therefore, in order to have the necessary confidence that a particular biometric input signal originates from an authorised user, a relatively large amount of data may be required (e.g. averaged over a relatively long time).
In contrast, the differences are more significant between biometric input signals which are indicative of the presence of any ear and biometric input signals which are indicative of the absence of any ear. For example, audio signals acquired in the absence of any ear may have no heartbeat, no resonant frequencies or anti-resonant frequencies, no oto-acoustic emissions, etc. Thus, systems and methods according to embodiments of the disclosure may be able to discriminate reliably between the presence and absence of any ear based on relatively little data. In other words, in-ear detection according to embodiments of the disclosure can be performed quickly and consuming relatively little power. In a practical system, it is envisaged that a decision on the presence or absence of any ear may be taken reliably based on 5-10 data frames, whereas a decision on the presence of a particular ear (e.g., that of an authorised user) may be taken reliably based on approximately 100 data frames. In a system having a sample rate of 100 Hz, this would equate to approximately one second of data. Thus approximately 5-10% of the calculations ordinarily required to determine the presence of a particular ear may be required to determine the presence of any ear.
This concept is illustrated in FIG. 5, where an input audio signal 500 comprises a train of data frames 502-n (where n is an integer). Each data frame may comprise one or more data samples.
Three different scenarios are illustrated. In each case, a biometric algorithm is performed based on the audio signal, involving the comparison of biometric features extracted from the audio signal 500 to a template or ear print for an authorised user, and the generation of a biometric score indicating the likelihood that the ear of an authorised user is present. The biometric score may be based on the accumulated data in the audio signal 500, and thus may evolve and converge over time towards a “true” value. The biometric algorithm may comprise one or more different types of ear biometric features, in the latter case fusing the ear biometric scores or decisions as described above.
In the illustrated embodiment, the biometric module first determines whether the audio signal 500 comprises ear biometric features which are indicative of the presence of any ear. The determination may be based on relatively little data. In the illustrated example, the biometric module 416 makes the determination based on a single data frame; however, any number of data frames may be used to make the determination. The determination may involve the comparison of the current biometric score to a threshold T₁.
In scenario 1, the biometric module 416 determines that no ear is present, and thus the biometric algorithm ends without further calculations after data frame 502-1. In particular, the biometric module 416 does not go on to determine whether the audio signal 500 comprises ear biometric features which correspond to those of an authorised user. Of course, the algorithm may be repeated in future, e.g., periodically or in response to detection of some event.
In scenario 2, the biometric module 416 determines after data frame 502-1 that an ear is present and, responsive to that determination, goes on to perform a “full” biometric algorithm in order to determine whether the ear belongs to an authorised user or not. This process may require relatively more data, and thus in the illustrated embodiment an authentication decision can only be reliably taken after data frame 502-5. In scenario 2, this determination is negative (i.e. the user is not authorised). Scenario 3 corresponds substantially to scenario 2, but the authentication device is positive (i.e. the user is authorised). In either case, the data on which the authentication decision is taken may comprise more data frames than the data on which the in-ear detect decision is taken. For example, the data may be averaged across all data frames. The determination may involve the comparison of the current biometric score to a threshold T₂.
FIG. 6 is a flowchart of a method according to embodiments of the disclosure. The method may be performed by the system 300 described above, for example.
In step 600, the system obtains an audio signal. The audio signal may be acquired by a microphone 408 in a personal audio device, as described above. The audio signal may be obtained in conjunction with generation of an acoustic stimulus (e.g. in order to detect resonant/anti-resonant frequencies, oto-acoustic emissions, etc) or not (e.g. when detecting bone-conducted speech, heart rate variability, etc).
In step 602, one or more ear biometric features are extracted from the audio signal. This step may be performed by feature extract module 414, for example. Features in the frequency domain may be extracted following application of a Fourier transform to the audio signal. The ear biometric features may comprise one or more of the following: one or more resonant frequencies; one or more anti resonant frequencies; oto-acoustic emissions; heart-rate variability; and bone-conducted voice signals.
In step 604, a biometric algorithm is performed based on the audio signal, involving the comparison of the biometric features extracted in step 602 to a template or ear print for an authorised user, and the generation of a biometric score indicating the likelihood that the ear of an authorised user is present. In the event that more than one type of ear biometric feature is employed, a biometric fusion technique may be used to fuse the ear biometric scores or decisions as described above.
In step 606, the biometric score generated in step 604 is compared to a threshold T₁ which is discriminative between scores which are indicative of no ear and scores which are indicative of any ear. If the comparison is negative, the method proceeds to step 608, in which the method ends. Alternatively, a negative in-ear output signal may be generated, indicating the absence of any ear from the proximity of the personal audio device.
If the comparison in step 606 is positive (i.e. an ear is present), a positive in-ear output signal is generated. The system may respond to such an output signal in a number of different ways, and therefore in some embodiments the method may end at that point. That is, the biometric module 416 detects the application of the personal audio device to a user's ear, and the personal audio device or host electronic device responds to such a detection in its usual way. In the illustrated embodiment, the method proceeds to step 610 in which the personal audio device and/or the host electronic device is “woken” from a low-power state (e.g. a sleep or OFF state). However, in other embodiments the personal audio device may react, for example, by locking a touchscreen to further input, by un-pausing audio playback, or in any other way.
Thus embodiments of the disclosure provide methods, apparatus and systems in which a biometric processor or module is used to perform an in-ear detect function.
In further embodiments of the disclosure, the method goes on to perform biometric authentication of the user responsive to detection of the proximity of an ear in step 606. As noted above, biometric authentication may require more data than ear-proximity detection, and thus in step 612 further audio signal data is obtained. For example, one or more additional data frames of the audio signal may be acquired.
In step 614, one or more ear biometric features are extracted from the audio signal data (i.e. the audio signal data acquired in step 600 and/or step 612) and, in step 616, a biometric score is generated indicative of the likelihood that the extracted features match those of a stored template or ear print for an authorised user. Steps 614 and 616 may correspond substantially to steps 602 and 604 described above. The features used to generate the biometric score in step 616 may include the features extracted in step 602 as well as the features extracted in step 614.
In step 618, the score is compared to a threshold T₂, which is discriminative between ears of the population at large, and the ear of the authorised user. The threshold T₂ is different to the threshold T₁ applied in step 606 and, in embodiments where the biometric score is configured to increase with increasing likelihood of a match between input and stored template, is higher than the threshold T₁.
If the outcome of the comparison in step 618 is positive, the method proceeds to step 620 in which the user is authenticated as an authorised user; if the outcome of the comparison in step 618 is negative, the method proceeds to step 622 in which the user is not authenticated as an authorised user. Again, the system may response to positive/negative authentication of the user in any manner. For example, a restricted action may be performed or prevented from being performed; settings which are specific to the authorised user may be applied or not. There are many different possibilities and the present disclosure is not limited in that respect.
Thus the present disclosure provides methods, apparatus and systems for performing in-ear detection using a biometric processor or module. By re-using the biometric processor in this way, dedicated circuitry that would otherwise be required for in-ear detect can be left out of the personal audio device or host electronic device altogether.
Embodiments of the disclosure may be implemented in an electronic, portable and/or battery powered host device such as a smartphone, an audio player, a mobile or cellular phone, a handset. Embodiments may be implemented on one or more integrated circuits provided within such a host device. Embodiments may be implemented in a personal audio device configurable to provide audio playback to a single person, such as a smartphone, a mobile or cellular phone, headphones, earphones, etc. See FIGS. 1a to 1e . Again, embodiments may be implemented on one or more integrated circuits provided within such a personal audio device. In yet further alternatives, embodiments may be implemented in a combination of a host device and a personal audio device. For example, embodiments may be implemented in one or more integrated circuits provided within the personal audio device, and one or more integrated circuits provided within the host device.
It should be understood—especially by those having ordinary skill in the art with the benefit of this disclosure—that that the various operations described herein, particularly in connection with the figures, may be implemented by other circuitry or other hardware components. The order in which each operation of a given method is performed may be changed, and various elements of the systems illustrated herein may be added, reordered, combined, omitted, modified, etc. It is intended that this disclosure embrace all such modifications and changes and, accordingly, the above description should be regarded in an illustrative rather than a restrictive sense.
Similarly, although this disclosure makes reference to specific embodiments, certain modifications and changes can be made to those embodiments without departing from the scope and coverage of this disclosure. Moreover, any benefits, advantages, or solutions to problems that are described herein with regard to specific embodiments are not intended to be construed as a critical, required, or essential feature or element.
Further embodiments and implementations likewise, with the benefit of this disclosure, will be apparent to those having ordinary skill in the art, and such embodiments should be deemed as being encompassed herein. Further, those having ordinary skill in the art will recognize that various equivalent techniques may be applied in lieu of, or in conjunction with, the discussed embodiments, and all such equivalents should be deemed as being encompassed by the present disclosure.
The skilled person will recognise that some aspects of the above-described apparatus and methods, for example the discovery and configuration methods may be embodied as processor control code, for example on a non-volatile carrier medium such as a disk, CD- or DVD-ROM, programmed memory such as read only memory (Firmware), or on a data carrier such as an optical or electrical signal carrier. For many applications embodiments of the invention will be implemented on a DSP (Digital Signal Processor), ASIC (Application Specific Integrated Circuit) or FPGA (Field Programmable Gate Array). Thus the code may comprise conventional program code or microcode or, for example code for setting up or controlling an ASIC or FPGA. The code may also comprise code for dynamically configuring re-configurable apparatus such as re-programmable logic gate arrays. Similarly the code may comprise code for a hardware description language such as Verilog™ or VHDL (Very high speed integrated circuit Hardware Description Language). As the skilled person will appreciate, the code may be distributed between a plurality of coupled components in communication with one another. Where appropriate, the embodiments may also be implemented using code running on a field-(re)programmable analogue array or similar device in order to configure analogue hardware.
Note that as used herein the term module shall be used to refer to a functional unit or block which may be implemented at least partly by dedicated hardware components such as custom defined circuitry and/or at least partly be implemented by one or more software processors or appropriate code running on a suitable general purpose processor or the like. A module may itself comprise other modules or functional units. A module may be provided by multiple components or sub-modules which need not be co-located and could be provided on different integrated circuits and/or running on different processors.
It should be noted that the above-mentioned embodiments illustrate rather than limit the invention, and that those skilled in the art will be able to design many alternative embodiments without departing from the scope of the appended claims or embodiments. The word “comprising” does not exclude the presence of elements or steps other than those listed in a claim or embodiment, “a” or “an” does not exclude a plurality, and a single feature or other unit may fulfil the functions of several units recited in the claims or embodiments. Any reference numerals or labels in the claims or embodiments shall not be construed so as to limit their scope.
1. A system for detecting the presence of an ear in proximity to an audio device, the system comprising: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: compare one or more ear biometric features, extracted from the data signal, to an ear biometric template; and generate a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template.
2. The system according to claim 1, wherein the ear biometric authentication module is configured to: generate a score, based on the comparison between the one or more extracted ear biometric features and the ear biometric template, indicative of a distance between the one or more extracted ear biometric features and the ear biometric template; and wherein the first output is generated based on the score.
3. The system according to claim 2, wherein the ear biometric authentication module is configured to compare the score to a first threshold to generate the first output.
4. The system according to claim 1, wherein the ear biometric template is for an authorised user of the audio device, and wherein the ear biometric authentication module is further configured to generate a second output indicative of the presence of an ear of the authorised user in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template for the authorised user.
5. The system according to claim 4, wherein the ear biometric authentication module is configured to generate the second output responsive to a determination that any ear is in proximity to the audio device.
6. The system according to claim 4, wherein the ear biometric authentication module is configured to: generate a score, based on the comparison between the one or more extracted ear biometric features and the ear biometric template, indicative of a distance between the one or more extracted ear biometric features and the ear biometric template; compare the score to a first threshold to generate the first output; and compare the score to a second threshold to generate the second output, wherein the second threshold is different from the first threshold.
7. The system according to claim 1, wherein the data signal comprises a plurality of data frames, each data frame comprising a plurality of data samples.
8. The system according to claim 7, wherein the one or more ear biometric features are averaged over multiple data frames.
9. The system according to claim 7, wherein the first output is based on one or more extracted ear biometric features calculated from a first number of data frames, wherein the ear biometric template is for an authorised user of the audio device, wherein the ear biometric authentication module is further configured to generate a second output indicative of the presence of an ear of the authorised user in proximity to the audio device based on the comparison of one or more ear biometric features extracted from a second number of data frames, greater than the first number, to the ear biometric template for the authorised user.
10. The system according to claim 1, wherein the one or more ear biometric features comprise one or more of: one or more resonant frequencies; one or more anti resonant frequencies; oto-acoustic emissions; heart-rate variability; bone-conducted voice signals.
11. The system according to claim 1, wherein the extracted ear biometric features comprise a plurality of different types of ear biometric feature, and wherein the ear biometric authentication module is operative to apply a biometric fusion technique to generate the first output.
12. The system according to claim 1, wherein the data signal is an audio signal.
13. An electronic device, comprising: the system as claimed in claim
1. 14. The electronic device according to claim 13, wherein the electronic device is the audio device.
15. The electronic device according to claim 13, wherein the electronic device is a host device coupled to the audio device.
16. A method of detecting the presence of an ear in proximity to an audio device, the method comprising: obtaining a data signal from the environment of the audio device; extracting one or more ear biometric features from the data signal; comparing the one or more extracted ear biometric features to an ear biometric template for an authorised user of the audio device; and generating a first output indicative of the presence or absence of any ear in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template for the authorised user.
17. The method according to claim 16, further comprising: generating a score, based on the comparison between the one or more extracted ear biometric features and the ear biometric template for the authorised user, indicative of a distance between the one or more extracted ear biometric features and the ear biometric template for the authorised user; and wherein the first output is generated based on the score.
18. The method according to claim 17, wherein the score is compared to a first threshold to generate the first output.
19. The method according to claim 16, further comprising generating a second output indicative of the presence of an ear of the authorised user in proximity to the audio device based on the comparison of the one or more extracted ear biometric features to the ear biometric template for the authorised user.
20. The method according to claim 19, wherein the second output is generated responsive to a determination that any ear is in proximity to the audio device.
21. A system for detecting the presence of an ear in proximity to an audio device, the system comprising: an input for obtaining a data signal from the environment of the audio device; and an ear biometric authentication module configured to: extract one or more first ear biometric features from a first number of data frames of the data signal; calculate a first score being indicative of a distance between the one or more extracted ear biometric features and an ear biometric template of an authorised user; compare the first score to a first threshold to determine the presence or absence of any ear in proximity to the audio device; responsive to a determination of the presence of any ear in proximity to the audio device, extract one or more second ear biometric features from a second number of data frames of the data signal, the second number of data frames being greater than the first number of data frames; calculate a second score being indicative of a distance between the one or more second extracted ear biometric features and the ear biometric template of the authorised user; and compare the second score to a second threshold, different from the first threshold, to determine the presence or absence of an ear of the authorised user in proximity to the audio device.
22. The system according to claim 21, wherein the second number of data frames include the first number of data frames and one or more additional data frames.
23. The system according to claim 22, wherein the one or more additional data frames are obtained after the determination of the presence of any ear in proximity to the audio device.
24. The system according to claim 21, wherein the one or more second extracted ear biometric features include the one or more first extracted ear biometric features.
|
import React, { Component } from 'react';
import validateInput from '../../server/shared/validation/login';
import Input from '../../components/form/input';
import PropTypes from 'prop-types';
import './loginPage.css';
import {connect} from 'react-redux';
class LoginPage extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {
identifier: '',
password: '',
errors: {},
isLoading: false
}
this.handleSubmit = this.handleSubmit.bind(this);
this.handleChange = this.handleChange.bind(this);
}
isValid() {
const { errors, isValid } = validateInput(this.state);
this.setState({
errors
});
return isValid;
}
handleSubmit(e) {
if( this.isValid() ) {
this.setState({
errors: {},
isLoading: true
});
this.props.login(this.state);
}
e.preventDefault();
}
handleChange(e) {
this.setState({
[e.target.name]: e.target.value
})
}
componentWillReceiveProps(nextProps) {
if( nextProps.auth !== this.state.auth ) {
nextProps.auth.then(
(response)=> {
console.log(response);
this.context.router.history.push('/react-app/');
},
(err)=> {
try {
this.setState({
errors: err.data.errors,
isLoading: false
})
}catch(e) {
console.warn(err);
}
}
)
}
}
render() {
const { errors, identifier, password, isLoading } = this.state;
return(
<div className="login">
<h1>Login page</h1>
<form action="#" className="login-form" onSubmit={this.handleSubmit}>
<div className="login-form__row">
<Input
classParent="default-input"
value={identifier}
name="identifier"
type="text"
className="input login-form__input"
handleChange={this.handleChange}
placeholder="Yaroslav"
errors={errors.identifier}
/>
</div>
<div className="login-form__row">
<Input
value={password}
name="password"
type="password"
className="input login-form__input"
handleChange={this.handleChange}
placeholder="123"
errors={errors.password}
/>
</div>
<div className="login-form__row">
<button type="submit" className="login-form__btn btn" >Submit</button>
</div>
</form>
</div>
)
}
}
// LoginPage.contextTypes = {
// router: PropTypes.func
// };
LoginPage.contextTypes = {
router: PropTypes.object.isRequired
}
export default LoginPage;
|
Talk:Eye of Ra/GA1
GA Review
The edit link for this section can be used to add comments to the review.''
Reviewer: Redtigerxyz (talk · contribs) 10:26, 5 February 2012 (UTC)
Response by A. Parrot
I have made several adjustments to this article to address the 1a concerns. I have also made a separate article on Mehet-Weret, who is not synonymous with Hathor, and adjusted Hathor's article accordingly. I have looked at all uses of "eye" and have capitalized in in several cases, but I kept it in lowercase when the word refers to an eye, in general, and not a specific Eye of Ra or Horus or Atum. (In the case where Ra "grows a new eye" I left it in lowercase, because I'm not sure if that's supposed to be a second Eye of Ra or just an eyeball.) Regarding maat, books on Egyptian religion often write maat when referring to it as a word or a concept and Maat when referring to the goddess. I prefer to maintain that distinction.
As for original research, I have been very careful about that. The connections between the various Eye goddesses have been staring me in the face for a long time, but I didn't write anything about them until I had sources that explicitly describe those connections.
I don't know how you might go about checking all of my sources, but I'll address your example about the clay cobras, where the text that verifies the claim verifies a lot of the other facts in the article. When speaking about the spell inscribed on O. Gardiner 363, the spell that involves the clay uraei, Ritner says:
"In all such cases, the function of the uraeus hearkens back to its well-known origin as the 'fiery eye' of the sun god sent forth against the god's enemies, whether human, divine, or as in O. Gardiner 363, demonic."
A few pages later, he says:
"Concomitant with the developing ritualized use of four uraei is an increasingly elaborate theological interpretation and identification of the serpents themselves as hypostases of the solar eye… The four 'persons' represented by the clay uraei of the Gardiner ostracon comprise… an appropriation of the defense of the solar bark for a private bedroom."
Szpakowska does not specifically mention the Eye of Ra in relation to the clay cobras (although she does say that the cobra represents "the fiery power of the sun"), but her footnotes point to Ritner's study for details about the beliefs underlying their use. I used Szpakowska's study only to support the statement, which she makes, that the cobras may never have been used to burn anything.
As for the question-mark signs for criteria 1b and 6b, I would like to see the specifics of your concerns so I can address them. A. Parrot (talk) 02:34, 6 February 2012 (UTC)
I give this article 1 more week. The nominator needs to be go back and check if all sources explicitly relate the things said to the Eye of Ra. Szpakowska does not relate the cobras to the Eye, but the article sentence referenced to the book does. "Whether literal or metaphorical, the flames in the cobras' mouths, like the fiery venom spat by the Eye of Ra, were meant to dispel the nocturnal darkness". Another example of OR could be "The characteristics of the Eye of Ra were an important part of the Egyptian conception of female divinity in general.[21] Therefore, the Eye was equated with many goddesses". I googled Google Books most associations are there, so the article has some OR currently, but not much. Some books/sites show the right eye as an image for the Eye of Re. Not sure if it should be included. -- Redtigerxyz Talk 17:47, 12 February 2012 (UTC)
* I have gone over the sources again and removed from the article any statement that is not present in them. There are cases where I stated things that the sources do not state but strongly imply; unfortunately, it's easy to do that when one is immersed in the subject matter, even when one is watching out for it. Everything remaining in the article is directly connected to the Eye of Ra, except the passages by Lesko talking about the sun disk. I left those parts in because the sun disk may not always be the Eye of Ra, but unquestionably it very often is the Eye of Ra (Troy practically treats the two terms synonymously), and I think it gives a better idea of what the Eye is if the article states that the Disk-Thing That Is Sometimes the Eye of Ra may actually be a sphere.
* I would have liked to include an image of an actual eye, but part of the problem is that I don't think the Egyptians applied the right eye/left eye distinction very strictly. For example, this famous amulet from Tutankhamun's tomb is always labeled as an Eye of Horus, even though it's a right eye. There is one image of an eye that, according to an Egyptological book I have, specifically represents the Eye of Ra: a vignette from the Book of the Dead of Neferrenpet in which Thoth gives the Eye to Ra. Unfortunately, I can't find an image of it except in rather low-quality black and white. I could add that image, but considering its quality, I'd rather not. A. Parrot (talk) 00:28, 14 February 2012 (UTC)
* GA Pass. -- Redtigerxyz Talk 17:45, 17 February 2012 (UTC)
Note: In my zeal to demonstrate the article's accuracy, I used long quotations from Ritner's paper in my post on February 6. Not wanting to violate copyright law or Wikipedia rules about non-free content, I am now greatly shortening these quotations. A. Parrot (talk) 01:36, 28 March 2012 (UTC)
|
* In order to avoid further repetitions, I may mention once more that the muscles the function of ^yhich is to open or separate the valves have recently been termed dimricators and accessory divaricators by Mr. Hancock ; and those the function of which is to act in the closing of the valves have been termed anterior and posterior occlusors by the same distinguished zoologist. The divaricators aie those usually termed " cardinal muscles" (" adductor brcvis " of Owen ; " muscles
|
// Generated automatically from java.lang.Iterable for testing purposes
package java.lang;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.function.Consumer;
public interface Iterable<T>
{
default void forEach(java.util.function.Consumer<? super T> p0){} // manual summary
java.util.Iterator<T> iterator(); // manual summary
}
|
import * as $ from "jquery";
import "./style.scss";
interface Options {
token: string;
login: string;
teams: string[];
lastChecked: number;
}
interface User {
login: string;
}
interface Team {
slug: string;
organization: Organization;
}
interface Organization {
login: string;
}
class GitHubMentionHighlighter implements Options {
token = "";
login = "";
teams: string[] = [];
lastChecked = 0;
checkInterval = 86400000; //1000 * 60 * 60 * 24
handles(): string[] {
return this.teams.concat([this.login]);
}
mentions(): HTMLElement[] {
const classes = ".user-mention, .member-mention, .team-mention";
const handles = this.handles();
const mentions = $(classes).toArray();
return mentions.filter((mention) => {
const text = mention.innerText.toLowerCase();
return text[0] === "@" && handles.includes(text);
});
}
highlight() {
const classes = ".timeline-comment, .timeline-entry";
for (const mention of this.mentions()) {
const $mention = $(mention);
$mention.addClass("highlight");
$mention.parents(classes).addClass("highlight");
}
}
private getUser(successCallback: (user: User) => void) {
return $.ajax({
dataType: "json",
url: "https://api.github.com/user",
headers: {
Authorization: `token ${this.token}`,
},
success: (data: User) => {
successCallback(data);
}
});
}
private getTeams(successCallback: (teams: string[]) => void) {
return $.ajax({
dataType: "json",
url: "https://api.github.com/user/teams?per_page=100",
headers: {
Authorization: `token ${this.token}`,
},
success: (data) => {
const teams: string[] = data.map((team: Team) => {
const org = team["organization"]["login"].toLowerCase();
const slug = team.slug.toLowerCase();
return `@${org}/${slug}`;
});
successCallback(teams);
},
});
}
private getOptions(callback: (options: Options) => void) {
return chrome.storage.sync.get(this.options(), (options) => {
this.token = options.token;
this.login = options.login;
this.teams = options.teams;
this.lastChecked = options.lastChecked;
callback(<Options>options);
});
}
private options(): Options {
return {
token: this.token,
login: this.login,
teams: this.teams,
lastChecked: this.lastChecked,
};
}
private setOptions() {
chrome.storage.sync.set(this.options());
}
update() {
this.getUser((user) => {
this.login = `@${user["login"].toLowerCase()}`;
this.getTeams((teams: string[]) => {
this.teams = teams;
this.lastChecked = Date.now();
this.setOptions();
this.highlight();
});
});
}
shouldUpdate(): boolean {
return Date.now() > this.lastChecked + this.checkInterval;
}
constructor() {
this.getOptions(() => {
if (this.token === "") {
return console.warn(
"GitHub Mention Highlighter: Please specify a personal access token via the options page."
);
}
if (this.shouldUpdate()) {
this.update();
} else {
this.highlight();
}
});
}
}
new GitHubMentionHighlighter();
|
plant another kind. The result is a hodge-podge. However much more desirable mjxed and natural planting may be on the private lawn, the street planting should be formal and uniform. Every street that is noted -for the beauty of its shade trees has only one species of tree planted on it. When the planting and care of street trees is left to individuals, kinds are often selected that do not thrive, or some trees may be neglected, leaving gaps here and three, which spoil the avenue effect.
There are several ways of overcoming this difficulty. Some cities and towns prefer to let the individuals retain authority over the shade trees, but seek to guide them in the selection of kinds and in their care. A few cities furnish trees free, with advice about planting and caring for them. There are a number of tree-planting associations, also, in large cities, which provide trees, send out circulars, etc., without cost to the tree planters, and do much good in this way. But in most cases it has seemed best for the cit\ itself to assume the management of its street trees, in the interest of uni formity.
The duty is sometimes delegated by the city to the park commission. Such, for example, is the policy in the city of Lowell, Mass., by special act of the Massachusetts legislature. The Lowell Park Board enforces rules to the effect that no street tree shall be cut, broken, or otherwise disturbed, nor shall any guy rope, cross bar, placard, or other contrivance be fastened to it, nor shall a tree be used to fasten horses or other animals, without permission of the board. Requests for the removal or trimming of trees are made by the property owners to the board. The 80,000 trees of the city of Washington are under the care of a special commission, which also main tain nurseries. In 1893 New Jersey provided for the appointment of com missions in each municipality to take charge of the planting and care of shade trees.
In the commission method of caring for street trees, the individual property owners really have jurisdiction over the trees, but they are guided and advised by the commission. It is but a step from this policy to the appointment of a tree warden or tree forester, who shall have complete and sole charge of all street trees. This is the prevailing policy in many of the eastern states. The legislature of Massachusetts has provided for the appointment of -tree wardens in every town, large or small, He has care of all trees in the town, except those under the care of the park commission. He may pre scribe regulations for their care, enforced by suitable fines. Anyone who desires to remove or prune a tree makes application to the tree warden, who then announces a public hearing on the proposition.
Thus it is seen that the trend is toward municipal ownership of street trees. Most of the trees in a city are in a narrow strip of land between the lots and the curb. There has been a dual sovereignty over this strip of land, which has not been conducive to the best care of the trees upon it. The idea that the street tree should be communal, not private property, is gaining ground; inasmuch as the tree is a highly useful article of street ifurnishing, like a piece of fine statuary. In any case, it is preposterous to leave the matter entirely with the individual. No uniformity can result in that way. There should at least be an ordinance providing that a hearing be given before street trees are cut down. Ultimately, every city, town and village will have its tree warden. Let us hope that politics will never enter the office of the tree warden, as it sometimes has the office of park commissioner, to the confusion of park development.
|
356 SMITHSONIAN MISCELLANEOUS COLLECTIONS VOL. I33
The Government will not allow trees to be cut for fence making, and newly fallen trees are needed for firewood. Persons who need fences must, therefore, buy slats at lumber mills in the area — something few Araucanians can afford.
DOMESTICATED ANIMALS
In post-Columbian and pre-Argentine days each family owned a herd of domesticated horses to which tamed wild ones were added occasionally. Tamed wild cattle were also added to domesticated herds. Other domesticated animals were sheep, pigs, dogs, cats, and chickens, and, in recent years, turkeys and geese. Today an occasional family has a few goats.
Animals were not branded. "Every owner knew his own, and every one else knew to whom the animals belonged. The animals, too, knew where they belonged for they always came home no matter how far away they strayed. They do that today." The interpreter, born and reared in Patagonia, knew this to be true.
Oxen are given names when being trained to haul. Names of pairs owned by informants were Valiente (valiant) and Parece (similar) ; Navegando (navigating) and Marenero (sailor) ; Coronel (colonel) and Cuidado (solicitude) ; Principio (beginning) and Recuerdo (remembrance).
A pre-Argentine breed of chicken (pio) known to non-Araucanians as the "Araucanian chicken" was seen about many homes. Three characteristics distinguish them : body feathers are of several colors ; the head is tufted with feathers; and the shells of their eggs are pastel shades of blue, greenish blue, green, and yellowish pink. "That speckled gray-white hen over there with the tuft on her head lays bluish eggs; that one with the mixture of yellow, black, and red feathers lays yellowish-pink eggs." Chickens are fattened on the berries of michai. "Whenever I want to fatten one, I tell her [small daughter] to take it to where the berries of michai hang low. Chickens certainly know how to feed on those berries."
TRADE, EXCHANGE, CASH INCOME
The oldest informants recalled the days when the trade of the Arau canians extended from the Atlantic to the Pacific. Horses were traded with Pampa Indians of the Argentine plains and with Whites as far as Buenos Aires ; cattle were traded with Whites as far as Valdivia in Chile ; none remembered the days of trade with peoples to the north, the Guarani, Quechua, and Aymara. Non-Araucanian informants,
|
error with an alias in .zshrc file
I have a problem with en alias in my .zshrc file in macOS.
Here is the alias :
alias update='sudo softwareupdate -i -a; brew update; for i in $(brew cask outdated --quiet); do brew cask reinstall $i; done; brew cleanup -r; gem update --system; gem update; upgrade_oh_my_zsh; npm update -g; for x in $(pip3 list -o --format=columns | sed -n '3,$p' | cut -d' ' -f1); do pip3 install $x --upgrade; done';
When i call it, the shell answers me :
-for cmdsubst>
I can not find the origin of the problem.
Why would you use an alias for this? A function will avoid all the problems.
The crux of the issue looks to be that you're using single quotes to define the alias, but you're also trying to use single quotes within it. This breaks the continuous string zsh needs to see when you define alias foo=string. You can use a single quote within a single quoted string by ending it, escaping the quote, and starting anew. For example:
alias foo='command '\''$1'\'' another-arg'
Or to include a quote at the end, just end the quoted string and escape just a single quote:
echo 'this is a single quote: '\'
If I understand correctly, if I put double quotes at the beginning and at the end of my alias, would that normally solve the problem ?
alias update="sudo softwareupdate -i -a; brew update; for i in $(brew cask outdated --quiet); do brew cask reinstall $i; done; brew cleanup -r; gem update --system; gem update; upgrade_oh_my_zsh; npm update -g;for x in $(pip3 list -o --format=columns | sed -n '3,$p' | cut -d' ' -f1); do pip3 install $x --upgrade; done"
Yeah, it's ok. I've understood.
alias update='sudo softwareupdate -i -a; brew update; for i in $(brew cask outdated --quiet); do brew cask reinstall $i; done; brew cleanup -r; gem update --system; gem update; upgrade_oh_my_zsh; npm update -g;for x in $(pip3 list -o --format=columns | sed -n '''3,$p''' | cut -d''' ''' -f1); do pip3 install $x --upgrade; done'
Thanks a lot
Like piojo wrote the issue is that it is not possible to use single quotes within single quotes.
In this case, instead of working around the quoting, I would recommend to create a function instead of an alias.
update () {
sudo softwareupdate -i -a
brew update
for i in $(brew cask outdated --quiet); do
brew cask reinstall $i
done
brew cleanup -r
gem update --system
gem update
upgrade_oh_my_zsh
npm update -g
for x in $(pip3 list -o --format=columns |
sed -n '3,$p' | cut -d' ' -f1); do
pip3 install $x --upgrade
done
}
That way you need only the quotes you would need on the command line anyway. I think it also improves readability and makes edits easier. (Granted, it is also possible to have multi-line alias definitions)
I would point one towards Don't Read Lines With for. Granted, zsh eliminates some of the bad decisions enshrined in POSIX, but habits folks learn in zsh tend to make their way elsewhere.
@CharlesDuffy Yes, good point. Although it should not be an issue here: Each returned line only contains a single package name, the package names are highly unlikely to contain any characters that may cause issues (white spaces, globs, etc.) and we are not interested in preserving possible empty lines. Also, I only converted the given alias without any modification to the called commands.
One of the advisers relate to aliases is that if the code does not fit in one line it probably should be a function, or at least if it has so many pipes make it a function.
|
import { Param } from '@nestjs/common';
import { Mutation, Query, Args, ResolveField, Parent} from '@nestjs/graphql';
import { Resolver } from '@nestjs/graphql';
import { StudentService } from 'src/student/student.service';
import{ AssignStudentsToLessonInput} from './assign-students-to-lesson.input'
import { Lesson } from './lesson.entity';
import { CreateLessonInput } from './lesson.input';
import { LessonService } from './lesson.service';
import { LessonType } from './lesson.type';
@Resolver(of=>LessonType)
export class LessonResolver{
constructor(
private studentService: StudentService,
private lessonService:LessonService
){}
@Query(returns => LessonType)
lesson(
@Args('id') id: string,
){
return this.lessonService.getLesson(id)
}
@Mutation(returns =>LessonType)
createLesson(
@Args('createLessonInput') createLessonInput: CreateLessonInput,
){
return this.lessonService.createLesson(createLessonInput)
}
@Query(returns => [LessonType])
lessons(){
return this.lessonService.getLessons()
}
@Mutation(returns=> LessonType)
assignStudentsToLesson(
@Args('assignStudentsToLessonInput') assignStudentsToLessonInput: AssignStudentsToLessonInput
){
const { lessonId,studentIds} = assignStudentsToLessonInput
return this.lessonService.assignStudentsToLesson(lessonId,studentIds)
}
@ResolveField()
async students(@Parent() lesson: Lesson){
return this.studentService.getManyStudents(lesson.students);
}
}
|
Progress Wheel
=============
This is a custom component for Android intended for use instead of a progress bar.




A complete walkthrough of how to use this component in your app
-------------
**XML:**
To implement the view in your xml layout do the following:
1. Add the following to your attrs.xml file (in res/values):
``` xml
<declare-styleable name="ProgressWheel">
<attr name="text" format="string" />
<attr name="textColor" format="color" />
<attr name="textSize" format="dimension" />
<attr name="barColor" format="color" />
<attr name="rimColor" format="color" />
<attr name="rimWidth" format="dimension" />
<attr name="spinSpeed" format="integer" />
<attr name="circleColor" format="color" />
<attr name="radius" format="dimension" />
<attr name="barWidth" format="dimension" />
<attr name="barLength" format="dimension" />
<attr name="delayMillis" format="dimension"/>
<attr name="contourColor" format="color"/>
<attr name="contourSize" format="float"/>
</declare-styleable>
```
2. Add the following code to the root view of your layout:
`xmlns:ProgressWheel="http://schemas.android.com/apk/res/com.visualdenim.schooltraq"`
3. Add the widget code in the appropriate place in your xml file. Here's a sample implementation:
``` xml
<com.todddavies.components.progressbar.ProgressWheel
android:id="@+id/pw_spinner"
android:layout_width="200dp"
android:layout_height="200dp"
android:layout_centerInParent="true"
ProgressWheel:text="Authenticating..."
ProgressWheel:textColor="#222"
ProgressWheel:textSize="14sp"
ProgressWheel:rimColor="#330097D6"
ProgressWheel:barLength="60dp"
ProgressWheel:barColor="#0097D6"
ProgressWheel:barWidth="5dp"
ProgressWheel:rimWidth="2dp" />
```
**Java:**
First you need to either get a ProgressWheel from a layout file, or initalise one. Do this by:
- `ProgressWheel pw = new ProgressWheel(myContext, myAttributes);`
- `ProgressWheel pw = (ProgressWheel) findViewById(R.id.pw_spinner);`
To spin the progress wheel, you just call .`spin()` and to stop it spinning, you call `.stopSpinning()`
Incrementing the progress wheel is slightly more tricky, you call `.incrementProgress()`. However, this is out of 360,
(because a circle has 360 degrees), and will automatically reset once you get past 360. A percentage display is
automatically displayed.
Using as a library project
--------------------------
To use it as a library in Android Studio, please edit build.gradle.
Modify:
apply plugin: 'android'
Into:
apply plugin: 'android-library'
Since Android SDK Tools revision 17 (released March 2012), this component can
be used as a library project. In this case, you do *not* need to copy anything
into your project's attrs.xml, and you must use the following namespace URI,
instead of the above:
`xmlns:ProgressWheel="http://schemas.android.com/apk/res-auto"`
Otherwise, usage should be the same.
Todd Davies - 2012
|
paper-menu with multiple selections, howto deselect all
I use paper-menu with multiple selections (multi). Everything works fine so fare, but with a deselect all method things seems more complicated.
With html
<paper-menu multi selected-values="{{selectedValues}}">...
Dart
@property
List<String> selectedValues = [];...
Things got binded, and every iron-select/iron-deselect event results in a correct update of the selectedValues list in dart.
With clear('selectedValues') the list empties and the logic behaves like no selection is done, but in Dartium the items that previous was
selected remains marked as selected.
I have also tried with the selectedItems List or with the foreach deselect with the select method to PaperMenu, but still not successful update
in Dartium.
Anyone with ideas how to implement this?
Select with tap from Dartium and deselect with 'select' method, seems not to work together. Wrote an issue on this https://github.com/dart-lang/polymer-dart/issues/691
Found a workaround for the issue with the select method. The menu with selected values can be replaced with a new similar element created with the Dom api. One drawback is the bindings can't be set up, so these needs to be hacked with get and set methods at the element. Otherwise this seems to work ok. The calls to the Dom api are shown below.
ParticipantMenu oldPm = $$('#id_filterselection') as ParticipantMenu;
ParticipantMenu newPm = document.createElement('participant-menu');
Polymer.dom(parentNode).insertBefore(newPm, oldPm);
Polymer.dom(parentNode).removeChild(oldPm);
PolymerDom.flush();
|
Pocket Edition v0.9.0 alpha
Gameplay
* New interact button
* PC 1.8 feature : 512-block view distance (certain supported devices only)
* Advanced World Options:
* Option to select the world's type
* Old
* 256×256
* Superflat (Flat in-game on create world screen)
* Infinite
* Not recommended for low-supported devices (iOS 5.x and below).
* Reliable playing size is approximately 40,000×40,000 (+20,000 to -20,000)
* Game Mechanics:
* Taming
* Zombie sieges
Mobs
* Passive:
* Villagers
* Cannot open doors or trade
* Mooshrooms
* Neutral:
* Wolves
* Endermen
* Do not drop Ender pearls
* Hostile:
* Slimes
* Silverfish
World Generation
* Generated structures:
* Villages
* Version exclusive : Generate with wood plank bridges
* Version exclusive : Houses have carpet tables, instead of pressure plate tables
* Caves
* Lava pools
* Rivers
* Abandoned mine shafts
* Includes spider spawners rather than cave spider spawners due to lack of poison effects
* Huge Mushrooms
* Version exclusive : Can be up to 13 blocks tall
* Moss stone boulders
* Strongholds
* Hard to find as Eyes of Ender have not yet been implemented. "Rarely found under villages".
* Ice patch
* Ice spike
* Dungeons
* Desert well
* Trees:
* Jungle trees (small and large)
* Dark oak trees
* Acacia trees
* Version exclusive : Fallen trees
* 2×2 spruce trees
* Oak trees (swamp variant with vines and large oak variants)
* Naturally Generated Blocks (blocks that already existed in the game, but now spawn naturally):
* Tall Grass
* Dead Bushes
* Ferns
* Moss Stone
* Jungle Wood
* Jungle Leaves
* Melon (block)
* Pumpkins
* Structure specific
* Stone Bricks
* Cracked Stone Bricks
* Mossy Stone Bricks
* Stone Bricks Stairs
* Stone Slabs
* Iron Bars
* Torches
* Rails
* Powered Rails
* Oak Wood Planks
* Fences
* Chests
* Cobblestone
* Glass Panes
* Cobblestone Stairs
* Oak Wood Stairs
* Bookshelves
* Black Wool
* Brown Carpet
* Farmland
* Carrot Crops
* Wheat Crops
* Potato Crops
* Furnaces
* Crafting Tables
* Cobweb
* Biomes:
* Jungles
* Mesa
* Plains
* Roofed Forests
* Savannas
* Taiga
* Extreme Hills
* Mushroom Islands
* Flower Forest
* Mega Taiga
* Mega Spruce Taiga
* Swampland
* Desert
* Forests
* Deep ocean
Blocks & Items
* Monster Spawners
* Silverfish
* Spider
* Zombie
* Skeleton
* PC 1.8 feature : The type of mob spawner can be changed using a spawn egg.
* Monster Eggs
* End Portal (block frame only)
* Emerald Ore
* Emerald
* Block of Emerald
* Hardened Clay
* Stained Clay
* Available in all 16 colors
* Saddles (Do not function as of yet)
* End Stone
* Red Sand
* Mycelium
* Huge Mushrooms
* Podzol
* Packed Ice
* Version exclusive : Grassless Dirt
* Granite
* Polished Granite
* Andesite
* Polished Andesite
* Diorite
* Polished Diorite
* Wood Planks
* Acacia Wood Planks
* Dark Oak Wood Planks
* Wood
* Acacia Wood
* Dark Oak Wood
* Slabs
* Acacia Wooden Slabs
* Dark Oak Wooden Slabs
* Stairs
* Acacia Wooden Stairs
* Dark Oak Wooden Stairs
* Leaves
* Acacia Leaves
* Dark Oak Leaves
* Plants:
* Saplings
* Jungle Sapling
* Acacia Sapling
* Dark Oak Sapling
* Flowers:
* Poppy
* Replaces the Cyan Flower
* Blue Orchid
* Allium
* Azure Bluet
* Tulip
* Available in all 4 colors
* Oxeye Daisy
* Sunflower
* Lilac
* Rose Bush
* Peony
* Others
* Vines
* Lily Pads
* Cocoa Pods
* Double Tallgrass
* Large Fern
* Foods
* Cookies
Creative Inventory Additions
* Includes all new blocks and items added in 0.9.0
* Raw Beef
* Bone
* Bowl
* Spawn Eggs
* Villager
* Wolf
* Mooshroom
* Creeper
* Enderman
* Silverfish
* Skeleton
* Slime
* Spider
* Zombie
* Zombie Pigman
* Chiseled Stone Brick
Other
* Particles:
* Water Dripping Particles
* Lava Dripping Particles
World Generation
* Cold-en oak trees will not generate in maps with the new terrain.
* Oak trees now generate with different heights and structures
* Spruce trees now generate with different heights and structures
Blocks & Items
* Beds no longer restore health in normal difficulty
* Snow (cover) is now craftable
* Correct name for stairs, slabs and spawn eggs.
* Gravel no longer falls from spawn
* All carpets can be obtained in Survival
* New insta ticking of water and lava.
* Smooth lighting added for:
* Beds
* Fences
* Cobblestone Walls
* Cobweb
* Chests
* Leaves
* Smooth lighting removed for:
* Glowstone
* Glass
* Jack o'Lanterns
* Lit Redstone Ore
* Improved performance of TNT.
General
* Prettier clouds.
* New culling of clouds.
* The fog is darker underground.
* Textures:
* Separate textures for the top of Birch, Spruce, and Jungle Wood
* Jungle leaves will be permanently opaque
* New leaves rendering
* Biome-tinted blocks
* Inventory/GUI:
* World list now displays the storage size of each world
* A brand new organised layout for the crafting screen
* Only shows craftable recipes
* New layout for the survival inventory
* Each device now has its own limit for view distance
Mobs
* "New" mob spawning algorithm
* Lighter color when the mobs are hurt
* Passive mobs now spawn in little groups
* Zombies
* Zombie AI and mechanics changed, made much more horde-like.
* When damaged, zombies have a low chance to spawn more zombies.
* Spiders
* Now spawn from leaves, creating a higher amount of spiders in tree-rich biomes at night
* Animation is now twice as fast
* Improved AI
* Creepers
* Now walk faster
* Improved AI
* No longer float two pixels off the ground
Other
* The options.txt file now reads the render distance as raw blocks
* Support for immersive mode (hides the on-screen navigation bar) for devices running Android 4.4
* New Leveldb based saving format.
* New entity handling.
* Remade network code (mostly the same packages but new system).
* Tile entities are saved differently.
* New culling algorithm.
* Removed the Camera entity
* Updated light generation
* No more client side messages when a player joins
Fixes
4 bugs fixed
From released versions before 0.9.0
* Mipmapping makes fire look like a big orange blob
* Minecart in multiplayer problems
* Glitched name and description for baked potatoes
* Crafting signs only giving one unit
|
How to do the modulo operation in ARM assembly?
I'm trying to add the values in two registers, and modulo them by 8.
So, in C code, it would be like this
a = a + b;
c = a % 8;
how to do this above operation in ARM assembly.
Not all ARM processors have a direct instruction for division or modulo, so in most cases, a call to the modulo operation would end up as a function call to e.g. ___modsi3.
In this particular case, when doing modulo for 8, if the values can be assumed to be nonnegative, you can do the % 8 part as & 7. In that case, the assembly for your case would be:
add rA, rA, rB
and rC, rA, #7
What case is this assuming since it fails for certain values; e.g., 71 mod 15 = 6 test=11. Unless I'm missing something.
modulo 8 is an and with 7, because 8 is a power of 2. but modulo 15 is a completely different thing you cant use base 2 and operations you have to actually divide.
@old_timer, yeah I assumed that may have been the case. I have used a subtraction based mod function; however, it's much slower than a div (where available) based mod function due to the loops.
|
British Barclays bank seeks to bring bitcoin into service
PanARMENIAN.Net - British multinational bank Barclays has been in discussions with regulators about bringing digital currencies like bitcoin into service, CNBC reported citing the bank's UK chief executive Ashok Vaswani, RT said.
"We have been talking to a couple of fintechs [financial technology firms – Ed.] and have actually gone with the fintechs to the FCA [Britain’s Financial Conduct Authority – Ed.] to talk about how we could bring, the equivalent of bitcoin, not necessarily bitcoin, but cryptocurrencies into play," Vaswani told CNBC.
According to him, it’s an "obviously new area we've got to be careful with...”
"(We're) working on it, (it's) not ready for prime time, we'll get there soon," he added without giving any other details.
Barclays has been experimenting with bitcoin and working with digital currency start-ups. The bank wants customers to be able to receive bitcoin payments directly in their accounts.
Last year Barclays partnered with social payments app Circle which allows users to send money in messages and supports bitcoin. The start-up received a license from the FCA in 2016. Barclays provided Circle with an account to store Sterling, as well as the payments network to transfer money.
Two years ago the bank signed a deal with bitcoin company Safello to explore financial applications of the blockchain technology that powers the cryptocurrency.
Other big banks have also become interested in blockchain technology. They include UBS, Deutsche Bank, Santander and BNY Mellon. Last August they partnered to create a new digital currency to facilitate intra-bank settlements.
Banks have typically been very cautious in dealing with cryptocurrencies. However, bitcoin’s recent rocketing rise in value, making it the world's biggest cryptocurrency by market cap, has seen growing retail investor interest.
The FCA is still cautious about bitcoin, warning institutions which deal with the virtual currencies.
"We don't prohibit regulated firms from engaging in digital currency trading, nor do we prohibit banks from offering banking services to deal with currency firms that use [blockchain],” said FCA’s UK executive director of strategy Chris Woolard.
“I am not saying that we view digital currencies as an inherently bad thing… but we do have to exercise a degree of caution," he added.
Top stories
Ara Khachatryan, the current CEO of Ucom CJSC, and the Board have mutually agreed upon transition of his executive functions.
Krisp’s smart noise suppression tech silences ambient sounds and isolates your voice for calls.
Gurgen Khachatryan claimed that the "illegalities have been taking place in 2020."
The care reaction is slated to launch globally on Facebook’s app and website sometime next week.
Partner news
---
|
The Long- Dairy Institute was held at Hudson. Bad roads lessened the attendance, but we learned afterwards, what we did not know before, that the dairy interest in the vicinity of Hudson was not the leading interest. As a consequence there was not the intense enthusiasm evoked by the dairy meeting that there was by the Fruit Institute.
AFTERNOON.
1:30— "The farmer's side of factory butter making," Prof. Clinton D. Smith. 2:30 — A general discussion on dairy subjects, including feeding and care of the cows, handling the milk, and making butter and cheese. Papers to be 30 minutes lung, and followed by discussion in every case.
|
Python testing: Simulate ImportError
I have the following code
try:
from foo_fast import bar
except ImportError
from foo import bar
def some_function(a, b):
return bar(a, b)
I now want to test the two cases where foo_fast could be imported and where it couldn't.
Using pytest and pytest-mock, I naturally want to encapsulate the two situations in a pytest fixture, so I thought I would use
@pytest.fixture(params=(True, False))
def use_fast(request, mock):
if not request.param:
mock.patch("foo_fast.bar", side_effect=ImportError)
return request.param
def test_foo(use_fast):
assert some_function(1, 2)
However it seems the import statement is only run once before the test starts so I cannot mock the ImportError.
How does one mock these ImportError cases?
You can use monkeypatch.setitem() to set sys.modules['foo_fast'] = None
@pytest.fixture(params=(True, False))
def use_fast(request, monkeypatch):
if not request.param:
monkeypatch.setitem(sys.modules, 'foo_fast', None)
return request.param
def test_foo(use_fast):
assert some_function(1, 2)
Note that in Python 2
import foo_fast
will then rise an ImportError, while in Python 3 it raises a ModuleNotFoundError (which is a subclass of ImportError, so the try...catch blocks can remain unchanged)
It is possible with the mock library:
def test_import_error(self):
with mock.patch.dict('sys.modules', {'foo_fast.bar': None}):
# your tests with foo.bar
In this case from foo_fast import bar will raises ImportError.
|
#!/usr/bin/env php
<?php
if (file_exists(__DIR__ . '/../../../autoload.php')) {
require_once __DIR__ . '/../../../autoload.php';
} else {
require_once __DIR__.'/../vendor/autoload.php';
}
set_time_limit(0);
try {
$opts = \Cauditor\Utils::getopts(array(
'a' => 'all',
'h' => 'help',
'p:' => 'path:',
'r:' => 'repo:',
'b:' => 'branch:',
'c:' => 'commits:',
't:' => 'target:',
));
if (isset($opts['help'])) {
$runner = new \Cauditor\Runners\Help();
$runner->execute();
exit(0);
}
$target = isset($opts['target']) ? $opts['target'] : 'https://www.cauditor.org';
$repo = isset($opts['repo']) ? $opts['repo'] : false;
$branch = isset($opts['branch']) ? $opts['branch'] : false;
$path = isset($opts['path']) ? $opts['path'] : false;
$path = $path ?: getcwd();
if ($repo !== false) {
if (!isset($opts['all']) && !isset($opts['commits'])) {
// need a depth of 2 to get hash of previous commit
exec("git clone --depth=2 $repo $path");
} else {
exec("git clone $repo $path");
}
}
chdir("$path");
// bootstrap
$config = new Cauditor\Config($path, $path.DIRECTORY_SEPARATOR.'.cauditor.yml');
$analyzer = new Cauditor\Analyzers\PDepend\Analyzer($config);
$api = new \Cauditor\Api($target);
if (isset($opts['all'])) {
$runner = new \Cauditor\Runners\All($api, $analyzer);
} elseif (isset($opts['commits'])) {
$commits = explode(',', $opts['commits']);
$runner = new \Cauditor\Runners\Commits($api, $analyzer, $commits);
} else {
$runner = new \Cauditor\Runners\Current($api, $analyzer);
}
if (isset($opts['branch'])) {
$runner->setBranch($opts['branch']);
}
$runner->execute();
} catch (Exception $e) {
exit(1);
}
|
Definition:Measure (Measure Theory)
Let $$\mathcal A$$ be a $\sigma$-algebra.
Then $$\mu$$ is called a measure on $$\mathcal A \ $$ iff it has the following properties:
$$(1)$$: For every $$S \in \mathcal A$$:
* $$\mu \left({S}\right) \ge 0$$
* $$\mu \left({\bigcup_{n=1}^{\infty} S_n}\right) = \sum_{n=1}^{\infty} \mu \left({S_{n}}\right)$$
(that is, $$\mu\ $$ is a countably additive function).
Elementary Consequences
It follows from Measure of Null Set is Zero that $$\mu \left({\varnothing}\right) = 0$$.
Note
The definition of a measure is usually given either that:
* The measure of the empty set is defined as being zero.
|
How do you filter appsync queries on the server-side?
I'm working with AWS Amplify on a social media platform and I have a graphql API for users Timelines and Posts.
The web app subscribes to the creation of Timelines and I'm trying to filter by the deleted field on Post. I know how to do it client-side but I'm having trouble filtering in appsync (since I don't want users to be able to get access to deleted posts). Would I use a resolver? If so where would that go in Amplify?
Schema:
type Post
@model(subscriptions: { level: public })
@key(name: "SortByTimestamp", fields:[ "type", "timestamp"], queryField: "listPostsSortedByTimestamp")
@key(name: "BySpecificOwner", fields:[ "owner", "timestamp"], queryField: "listPostsBySpecificOwner")
@auth(rules: [
# {allow: owner, ownerField: "owner", provider: userPools, operations:[read, create, delete]},
{allow: private, provider: userPools, operations:[read]},
{allow: private, provider: iam ,operations:[create]},
])
{
type: String! # Always `post`
id: ID
deleted: Boolean
title: String!
content: String!
owner: String
likes: Int!
downloads: Int!
timestamp: AWSTimestamp!
}
type Timeline
@model(subscriptions: { level: public })
@auth(rules: [
{allow: owner, ownerField: "userId", provider: userPools, operations:[read, create]},
{allow: private, provider: iam, operations:[create]},
])
@key(fields: [ "userId", "timestamp"])
{
userId: ID!
timestamp: AWSTimestamp!
postId: ID!
post: Post @connection(fields: [ "postId"])
}
Did you find a solution or a workaround for this already? I have a similar issue here: https://stackoverflow.com/questions/68169815/how-to-add-server-side-business-logic-for-a-aws-amplify-graphql-api with unfortunately very few views like yours.
@Philipp Unfortunately not, I eventually ended up switching to Firebase which made things much more streamline and has better documentation for stuff like this.
John Naylor thank you very much for your feedback. See also this article "Complex Logic" section: https://www.ie.com.au/blog/aws-amplify. I will probably switch to firebase too.
|
The Goodbye People
The Goodbye People is a play by Herb Gardner. The play had a brief run on Broadway in 1968 and was made into a film which was released in 1986.
Plot
The dramedy focuses on elderly Max Silverman, who is determined to reopen the Coney Island Boardwalk hot dog stand he closed twenty-two years earlier for renovation, despite the fact he's recovering from a severe heart attack and it's the middle of February. He demands assistance from his daughter Nancy, who abandoned her husband, changed her name from Shirley, and had a nose job in an effort to assume a new and more exciting identity but has come to realize it takes more than a $4,000 rhinoplasty to erase the past. Into their lives arrives neurotic Arthur Korman, who comes to the beach to watch the sunrise and forget he despises his career choice and inability to quit a job he hates. With the help of each other, the trio manages to jump start their individual dreams before tragedy intercedes.
Productions
The play was first produced at the Berkshire Theatre in Stockbridge, Massachusetts. It was directed by Elaine May and starred Gene Saks, Zohra Lampert and Gabriel Dell. As May relates: "...it is not a special play about New York Jews. It is a quintessential play about America, about discounting the odds, about having hope with no evidence..."
The play premiered on Broadway at the Ethel Barrymore Theatre on December 3, 1968, and closed on December 7, 1968, after seven performances and 16 previews. Directed by Gardner, the cast included Milton Berle as Max Silverman, Brenda Vaccaro as Nancy Scott, Bob Dishy as Arthur Korman and Tony Lo Bianco as Max's attorney son Michael. Vaccaro was nominated for the 1969 Tony Award for Best Actress in a Play.
The play was revived on Broadway, produced by Fritz Holt and directed by Jeff Bleckner. It opened at the Belasco Theatre on April 30, 1979, and closed the same date, after one performance and sixteen previews. The cast included Herschel Bernardi as Max, Melanie Mayron as Nancy, Ron Rifkin as Arthur and Michael Tucker as Michael.
The play was presented at the Solari Theatre, Los Angeles, California, starting on January 2, 1979. Directed by Jeff Bleckner, the cast starred Peter Bonerz (Arthur), Herschel Bernardi (Max) and Patty Duke Astin (Nancy).
Film
Gardner adapted his play for a feature film he also directed. The cast included Martin Balsam as Max, Pamela Reed as Nancy, and Judd Hirsch as Arthur, with Tucker reprising his stage role as Michael The film was finished in 1984, but because of a change in the distribution company, the film was not released until 1986. The Goodbye People has parallels to Gardner's life. "He grew up near Coney Island, where the play and film are set, and his Uncle Max had a frankfurter stand on the boardwalk called Max's Busy Bee but offered a Hawaiian motif."
|
std::vector::operator[] doesn't test type
I have a struct like this:
struct VrtxPros{
long idx;
std::vector<std::string> pros;
VrtxPros(const long& _idx=-1, const std::string& val="") : idx(_idx)
{
if ( !val.empty() && val!="" )
pros.push_back(val);
}
};
and later in the code I use it like that:
long idx = 1234;
VrtxPros vp( 2134, std::string("-1") );
if ( margin ) vp.pros[0] = idx;
The compiler has no problem with that. I am wondering because the operator should deliver a reference.
I could not find an operator= in std::string which would accept a long as source.
Why does the code compile?
basic_string& operator=( CharT ch ); 4) Replaces the contents with character ch. Your long is probably being converted to a character. Other things to note are that your != "" check is redundant if I didn't miss something, and that passing "-1" is fine because std::string has an implicit conversion constructor for const char *.
@mistapink, A long is implicitly convertible to a char. http://ideone.com/wNd4u
@mistapink, As for CharT, it's the first template argument to std::basic_string<>, which is char for std::string. To compare, CharT would be wchar_t for std::wstring.
g++ implicitly casts int to char..
@Hrishi, g++ shouldn't be the only one: int x = 999; ...; char c1 = x; // OK, though it might narrow (in this case, it does narrow) Taken from the C++11 standard, § 8.5.4
A std::string can be assigned to a char, and a long can be implicitly converted to a char, so a std::string can be assigned to a long. Your compiler will probably give a warning about this kind of implicit conversion (turn up the warning level and you'll see it, if you don't already).
See the #4 operator= listed here. Notice no constructor overload takes just a char, so this sort of thing can only be done for assignment.
For that matter, you can do this too:
std::string wow;
wow = 7ull; // implicit unsigned long long to char conversion
wow = 1.3f; // implicit float to char conversion
I get no warnings from my linked sample. It's only operator= that does this, not a constructor, so it's pretty deliberate most of the time.
@chris in VS2010 with warning level 4 I get a warning from your code and mine.
@chris: Yes, it is deliverate in the standard, and no, I don't really understand why, but there is an overload of operator= in std::basic_string<> that takes a single CharT object. Why would you want such an operation is something I have wondered in the past.
Use -Wconversion for g++ to get a warning for the implicit conversion from long to char.
Ah, surprised I never had that one included.
|
A global model of carbon, nitrogen and phosphorus cycles for the terrestrial biosphere
. Carbon storage by many terrestrial ecosystems can be limited by nutrients, predominantly nitrogen (N) and phosphorus (P), in addition to other environmental constraints, water, light and temperature. However the spatial distribution and the extent of both N and P limitation at the global scale have not been quantified. Here we have developed a global model of carbon (C), nitrogen (N) and phosphorus (P) cycles for the terrestrial biosphere. Model estimates of steady state C and N pool sizes and major fluxes be-tween plant, litter and soil pools, under present climate conditions, agree well with various independent estimates. The total amount of C in the terrestrial biosphere is 2767 Gt C, and the C fractions in plant, litter and soil organic matter are 19%, 4% and 77%. The total amount of N is 135 Gt N, with about 94% stored in the soil, 5% in the plant live biomass, and 1% in litter. We found that the estimates of total soil P and its partitioning into different pools in soil are quite sensitive to biochemical P mineralization. The total amount of P (plant biomass, litter and soil) excluding occluded P in soil is 17 Gt P in the terrestrial biosphere, 33% of which is stored in the soil organic matter if biochemical P mineralization is modelled, or 31 Gt P with 67
Introduction
Simulations using global climate models with a fully coupled carbon cycle showed that warming Could reduce the net carbon storage in the terrestrial biosphere globally, resulting in an increase in atmospheric CO 2 concentration and further warming of 0.1 to 1.5 • C by 2100 (Friedlingstein et al., 2006).However there are considerable uncertainties in those predictions.For example, none of those models explicitly included nutrient limitations and their responses to climate and higher (CO 2 ).Both field measurements and theoretical studies have shown that nitrogen limitation can have a significant influence on how the carbon cycle will respond to increasing (CO 2 ) (Luo et al., 2004) and warming (Medlyn et al., 2000).This is also supported by recent studies (Sokolov et al., 2008;Churkina et al., 2009;Thornton et al., 2009;Wang and Houlton, 2009;Zaehle et al., 2010).
Globally N and P are the most common nutrients limiting plant growth and soil carbon storage (Vitousek and Howarth, 1991;Aerts and Chapin, 2000).A number of global biogeochemical models have been developed to account for N limitation on the productivity of and C uptake by the terrestrial biosphere (Parton et al., 1987;McGuire et al., 1995;Thornton et al., 2009;Xu-Ri and Prentice, 2008;Zaehle et al., 2010;Gerber et al., 2010), but only the CENTURY model (Parton et al., 1987) simulates biogeochemical cycles of C, N and phosphorus (P) and its P cycle submodel has yet to be applied globally.There are some strong reasons why the P cycle should be included in global models for studying the interactions between climate and biogeochemical cycles: (1) both theory and experiments suggest that much tropical forest and savannah are phosphorus limited (Aerts and Chapin, 2000), and tropical forests and savannahs account for about 40% of global vegetation biomass (Saugier et al., 2001) and 45% of global terrestrial net primary productivity (Field et al., 1998) ; (2) a recent study by Houlton et al. (2008) showed that biological N fixation, the largest N input to the un-managed terrestrial ecosystems at present is closely related to phosphatase production in the tropics; (3) responses of N and P cycles to climate, increasing atmospheric (CO 2 ) and human activities can be quite different because of the different biogeochemical controls on N and P cycles in the terrestrial biosphere (Vitousek et al., 1997).For example, the external input to the unmanaged ecosystems is dominated by N fixation for N, but by weathering and dust deposition for P for most unmanaged lands.Loss from the unmanaged ecosystems is dominated by gaseous fluxes via denitrification or leaching for N and by phosphate leaching for P. Misrepresenting nutrient limitation in the tropics may lead to incorrect predictions under future climate conditions.An early study showed that the relative response of leaf photosynthesis to elevated (CO 2 ) is smaller when plant growth is P limited (Conroy et al., 1990) as compared to the response under N-limited conditions and (4) some terrestrial ecosystems may shift from N limitation to P limitation under high N input (Perring et al., 2008) or future climate and higher (CO 2 ) conditions (Menge and Field, 2007;Matear et al., 2010).
The objectives of this study are (1) to develop a global biogeochemical model of C, N and P cycles for the terrestrial biosphere for use in a global climate model or earth system model; (2) to construct steady state C, N and P budgets for the terrestrial biosphere for the 1990's using available information of plant biomass, litter fall rate and soil C and N and estimates of P for different soil orders; and (3) to provide a quantitative estimate of the extent of N and P limitations and their uncertainties on plant productivity globally under the present conditions.Our model calibration strategy assumes that all fluxes are in steady state in the 1990s and the limitation of this strategy is discussed later (see Sect. 6).
While a reasonable amount of information is available for the pools and fluxes of the terrestrial carbon and nitrogen cycles (Post et al., 1982, 1985, Field et al., 1998 for example), global datasets are scarce for the P cycle and highly uncertain.Spatially explicit estimates of soil P amount are not yet available globally.Even the estimates of global total amount of soil P vary widely from, for example 200 Gt P (Jahnke, 1992) to 40-50 Gt P (Smil, 2000), due to different assumed mean P content (0.1 or 0.05%) and soil thickness (60 or 50 cm).To overcome the data limitation, we will use a well calibrated carbon cycle model (CASA') as our carbon cy-cle submodel and modeling framework and make use of observed coupling among all three cycles to constrain the nutrient (N and P) pools and fluxes.
The model CASA' was developed from the CASA model (Randerson et al., 1997) and has been used in studying the carbon-climate feedback globally (Fung et al., 2005).CASA' uses NPP from a coupled land surface model whereas CASA is an offline model using satellite derived NPP (Randerson et al., 2009).We have added N and P cycles to CASA' by adapting the N and P cycle model developed by Wang et al. (2007) and Houlton et al. (2008) from single litter and soil organic matter (SOM) pools to the multiple litter and SOM pools used by CASA'.This is important because a recent study showed that multiple-pool representation is required for studying the response of soil respiration at decadal or century time scale (Knorr et al., 2005).
Because the carbon cycle of CASA' model has been well calibrated (Randerson et al., 1997) and has been used in several previous studies (Fung et al., 1997;Randerson et al., 2002 for example), we address the question here of what sizes the nutrient pools and fluxes should be for the global C cycle as represented in the CASA' model for the 1990's.To estimate the pool sizes and fluxes of N and P, we drive the model using the spatially explicit estimates of monthly nutrient-unlimited NPP for the 1990's as input to our model, and calculate nutrient-limited NPP, and nutrient limitation factors relative to that in the 1990's for each land point at steady state.The steady state assumption is used to reduce the dependence of the estimates of N and P pools and fluxes on their initial estimates for which we currently have little global-scale spatially explicit information.The couplings of the three cycles, as represented by our model, are calibrated using independent estimates globally, such as leaf N:P ratio and fraction of P in different soil pools.The modelled pools and fluxes are then compared with estimates from other studies.
In Sects.2-4, we describe the model, model calibration, and model evaluation under present climate conditions against independent estimates of various pool sizes and biogeochemical fluxes at global scales.Section 5 describes the predicted nutrient limitation globally under the present conditions.Section 6 discusses limitations of the present study, and future studies to address those limitations.
Model description
The pools used to represent the C, N and P cycling through the terrestrial ecosystem in plants, litter and soil are shown in Fig. 1.Plants are divided into leaf, wood and root pools, litter into metabolic litter, structural litter and coarse woody debris pools and soil into microbial biomass, slow and passive pools.The turnover rate depends on soil temperature, moisture and texture for litter and soil pools (Randerson et al., 1997) or biome for plant pools.There is one additional 59 deposition (N and P), weathering (P), fixation (N) and fertilizer addition (N and P), output in red is loss by leaching or gaseous loss from the ecosystem.Plants take N from the inorganic N pool and P from the labile P pool in soil.), soil (yellow brown) into microbial biomass, slow pool and passive pool.One inorganic soil mineral N pool and three other P pools are also represented.Arrows between the pools represent the direction of C, N and P flow between pools.For N and P, external inputs are deposition (N and P), weathering (P), fixation (N) and fertilizer addition (N and P), output is loss by leaching or gaseous loss from the ecosystem.Plants take N from the inorganic N pool and P from the labile P pool in soil.
pool for N (inorganic N (NO − 3 + NH + 4 ) in the soil) and three additional P pools (labile, sorbed and strongly sorbed P) in our model.Change in a pool size with time is governed by a differential equation that is numerically integrated daily.We shall present an overview of each of the three cycles and their interactions in the following sections.A detailed description including key equations and parameter values is given in the appendices.A full list of symbols and their definition are provided in Appendix A.
Carbon cycle
The carbon cycle is based on CASA' model (Fung et al., 2005).We reduced the number of carbon pools by combining surface litter with soil litter, and surface microbial biomass with soil microbial biomass.This gives three discrete pools in the litter: structural, metabolic and coarse woody debris pools and three organic pools in the soil: microbial biomass, slow and passive pools.The fluxes between different pools are modeled as in CASA'.Details are given in Appendix B.
Transfer coefficients from plant pool i to litter pool j , b j,i and from litter pool j to soil pool k, c k,j are calculated as in CASA' model (Fung et al., 2005).Turnover rates of litter carbon (µ j ) or soil carbon (µ k ) are a function of substrate quality (lignin:N ratio), soil temperature, moisture and soil texture (Randerson et al., 1997).The turnover rate of leaves is calculated as a function of leaf age (Arora and Boer, 2005), and the turnover rates of woody tissue or fine roots are constant for each biome, but vary with biome type (see Table 1).
Because the N:C ratios of litter pools are much lower than those of soil, decomposition of litter carbon can be limited by available soil mineral N. When litter decomposition is not N limited, decomposition of litter and soil is limited by the amount of substrate, not its quality.When litter or soil carbon is decomposed, some of the decomposed carbon is respired as CO 2 .Heterotrophic soil respiration is calculated as the sum of the respired CO 2 from the decomposition of all litter and soil organic C pools.We assumed that the storage change of gaseous CO 2 in the soil is negligible, therefore the surface CO 2 flux is equal to CO 2 production in the soil.The difference between NPP and soil respiration is net ecosystem C exchange (NEE) between the land surface and atmosphere.
Input to the carbon cycle includes nutrient unlimited NPP, and initial carbon pool size, output are nutrient limited NPP, soil respiration, NEE and model pool sizes.Nutrient unlimited NPP can also be provided by a global land surface model when CASACNP is coupled to a global climate model.In this study, we used the scaled NPP from CASA simulation as the nutrient unlimited NPP (see Sect. 2.5).
Nitrogen cycle
The nitrogen cycle is based on the model developed by Parton et al. (1987) and Wang et al. (2007).Similar to the C cycle, the change in N in each pool is governed by a differential equation (see Appendix C).An additional mineral N pool in soil is also represented as only mineral N is assumed to be taken up by plants.We do not include uptake of organic N in soil by roots (Schimel and Bennett, 2004).Ammonia volatilization is not modeled as it usually occurs when soil pH is above 8 (Freney et al., 1983), and the fraction of land with pH>8 is very small globally (Batjes, 1996).
We do not explicitly model the processes of nitrification and denitrification.Therefore our model will need further improvement in the future.In our model gaseous N loss is assumed to be proportional to net N mineralization based on the "holes-in-the-pipe" idea (Firestone and Davidson, 1989) and the rate of leaching loss is proportional to the soil inorganic N pool size.Leaching loss of soil organic matter is not included in our model.
The nitrogen cycle is closely coupled to the carbon cycle; carbon decomposition and gross N mineralization is coupled by the N:C ratios of the substrates (compare equations B2 and B3 with equations C2 and C5).Net N mineralization rate (F n,net ) is the difference between gross N mineralization (F n,gr ) and N immobilization (F n,im ).When net mineralization rate is negative (gross N mineralization < N immobilization), and the additional amount of mineral N required by N immobilization can not be met by the amount of mineral N available, the litter carbon decomposition rate is reduced (see Appendix C Eq. C12 for m n ).
Nitrogen uptake by plants is modeled as a function of soil mineral N pool size, and the demand by plant growth (Eq.C7), similar to the TEM model (Melillo et al., 1993).The nitrogen demand is a product of maximal N:C ratio and NPP allocated to each plant pool minus the amount of Wetland, urban land and land ice in the IGBP biome classification are not included in our simulations.The mean N:C ratio of leaves is based on estimates from the Glopnet datasets for each biome (Wright et al., 2004) and mean P:C ratio for each biome is calculated from the mean leaf N:C ratio from Glopnet dataset and the estimated N:P ratio from this study for each biome.Minimal and maximal leaf N:C or P:C ratios are assumed to be 0.8 and 1.2 times the mean leaf N:C or P:C ratios for each biome.NPP allocation coefficients during steady leaf growth (a leaf , a wood , a root ) and mean residence time of plant tissue (1/µ i ) are based on the CASA model.Estimates of N:C and P:C ratios are based on Weedon et al. (2009) for woody tissues and on Gordon and Jackson (2000) for roots.Parameters leaf N:P ratio, x npmax , v pmax and soil C:N are estimated for each biome during model calibration.The numbers in brackets for leaf N:P ratio are the one standard error of the mean from our calibration.Leaf N:P ratio is not fixed in model simulations.resorbed N from that pool.When the uptake is greater than the minimal demand, the amount of uptake nitrogen allocated to each pool is in proportion to the demand.During senescence, some fraction of plant tissue nitrogen is resorbed to live tissue, and the remaining goes to the litter pool.Leaf and root litter are partitioned into metabolic litter and structural litter.The N:C ratio is fixed for structural litter (=1/125) but variable for metabolic litter.Woody litter goes to the coarse woody debris pool directly.Only the N:C ratio of soil organic matter and structural litter pools are fixed.N:C ratios of all plant pools are allowed to vary within prescribed ranges (see Table 1).
Input of N to the model includes atmospheric N deposition (both wet and dry), N fertilizer application, N fixation (both symbiotic and asymbiotic) and output includes N leaching and gaseous loss.
The phosphorus cycle
The phosphorus cycle is based on the model of Wang et al. (2007) and Houlton et al. (2008).The differential equations used to describe the rate of change of each pool are presented in Appendix D. Three differential equations are used to represent the dynamics of labile, sorbed and strongly sorbed phosphorus in soil.The P:C ratio for the three different plant pools.can vary within a given range for each biome, therefore N:P ratios of plant and litter pools can vary.
The N:P ratios of the newly formed soil organic pools are fixed.However the N:P ratios of the slow and passive pools will change as P in these two pools can be mineralized both biologically and biochemically.The biological P mineralization is the same pathway as N mineralization by microbial activities, and the rate of gross biological P mineralization is calculated as the carbon decomposition rate divided by the P:C ratio of the substrate.P immobilization rate is calculated as the N immobilization rate divided by the N:P ratio of different soil pools.The N:P ratio of the newly formed soil organic pool is 4 g N (g P) −1 for microbial biomass (Cleveland and Liptzin, 2007) and 7 g N (g P) −1 for the slow and passive pools for highly weathered soil orders and 5 g N (g P) −1 for the other soil orders (Crews et al., 1995).
Phosphorus in the slow and passive soil pools can also be mineralized biochemically (McGill and Cole, 1981).Therefore the N:P ratios of the slow and passive pools will increase until a steady state is reached when the P fluxes into those two pools through P immobilization (biologically only) are equal to the rates of P being mineralized (both biologically and biochemically) from those pools.Biochemical mineralization is modeled as a function of soil organic P, the N costs of P uptake and phosphatase production, and maximal specific biochemical P mineralization rate (see Wang et al., 2007;Houlton et al., 2008).
We do not model the biochemical P mineralization of litter P, as turnover rates of the litter pool are much faster than those of the slow and passive soil pools, and all P in the litter will be mineralized biologically if they are not mineralized biochemically within a few years.We do not distinguish the phosphatase production by roots from that by soil microbes in our model.
We assumed that the labile P pool is equilibrated with the sorbed P within days.The relationship between the amount of labile P and sorbed P is described using the Langmuir equation (Barrow, 1978;Lloyd et al., 2001;Wang et al., 2007).Inputs to the labile P pool are net biological P mineralization and biochemical P mineralization, P weathering, dust deposition and P fertilizer addition.Only labile P can be taken up by plants.
Some of the sorbed P can enter the strongly sorbed P pool that is not exchanged readily with the labile P; the rate of sorbed P to strongly sorbed P is assumed to be proportional to the amount of sorbed P in the soil.The flux from the strongly sorbed P pool to occluded P pool that is not available to plant or soil microbes at a time scale of decades to a century is not represented in our model.Including the dynamics of occluded P pool will significantly increase the computation with little impact on the simulated processes we are interested in here at decade or century scales.
Because of the biochemical P mineralization, the P cycle in the soil can become quite decoupled from C and N cycles in the soil (McGill and Cole 1981).However a recent study by Houlton et al. (2008) showed that the N cycle may be significantly coupled to the P cycle in some tropical soils, as the N fixation is dependent on the rate of biochemical P mineralization and N and P cycles in the N-limited tropical soils can be strongly coupled.Since we do not simulate N fixation explicitly, this coupling between N and P cycle has not yet been included in our present model.
Inputs of P to the ecosystem are weathering, deposition and fertilizer application.Outputs are the leaching loss of labile P and loss of strongly sorbed P to the occluded P.
Nutrient limitation on net primary productivity
We model NPP as a function of two nutrient limitation factors.That is where F cmax is the nutrient unlimited NPP (g C m −2 day −1 ) and x npleaf is the nutrient concentration limiting factor, and x npup is the nutrient uptake limiting factor.x npleaf is calculated as and where n leaf and p leaf are the N:C (g N/g C) and P:C (g P/g C) ratio of the leaf biomass, k n and k p are two empirical constants.
Nutrient uptake limiting factor, x npup is calculated as and x p,up = min 1, where N min is the amount of mineral N in soil (g N m −2 ), and P lab is the amount of labile P in soil (g P m −2 ), t is the time step of model integration (=1 d), F n,upmin and F p,upmin are the amount of minimal N and P uptake required to sustain a given NPP.Therefore nutrient uptake limiting factor will become less than 1 when the available nutrients (N or P) amount is less than the minimal amount of nutrient required by plants for a given NPP.Equation (2) states that NPP is limited by N or P. When NPP is N limited (x n,leaf < x p,leaf ), increasing p leaf will not reduce the N limitation, and vice versa.This is supported by the results from fertilizing experiments (Vitousek, 2004).Equation ( 3) is used because both photosynthesis and plant respiration increase with n leaf , and the increase in photosynthetic rate per unit leaf N is slower than that of respiration per unit leaf N at higher leaf N (Kattge et al., 2009, Reich et al., 2005).A similar model is commonly used in estimating the response of NPP to nitrogen limitation (Melillo et al., 1993;McMurtrie, 1991).
Very few measurements are available on the responses of photosynthesis or respiration to p leaf .Some earlier measurements, summarized by Lloyd et al. (2001), suggest that the response curve of leaf photosynthesis to p leaf has a similar shape to that for n leaf , which is also consistent with a more recent study on tropical grasses (Ghannoum et al. 2008).A study by Kattge et al. (2009) also found that the estimated maximum carboxylation rate per unit leaf nitrogen of the leaves of tropical forests on the phosphorus-poor oxisol soils is lower than those on other soils.Meir et al. (2000) found that the respiration of leaves of tropical trees was better correlated with p leaf than with n leaf .Observations from longterm fertilization experiments also show that both N-limited and P-limited forests responded to applications of the limiting fertilizer by increasing canopy leaf area, radiation use efficiency and foliar nutrient concentration (Harrington et al., 2001).For these reasons, we used the same function with different model parameters for estimating x n,leaf and x p,leaf .
In this study, we assume that k n =0.01 g N (g C) −1 , based on the results of Linder and Rook (1984) and k p =0.0006 g P (g C) −1 .It has been suggested that NPP is N limited when leaf N:P (on mass basis) <14 and is P limited when leaf Y. P. Wang et al.: A global model of carbon, nitrogen and phosphorus cycles N:P>16 based on broad-scale geographic variations of leaf N:P ratios (Koerselman and Mueleman, 1996).The value of k p is chosen so that NPP is limited by N (x n,leaf < x p,leaf ) when n leaf /p leaf <16 (g N/g P) and otherwise NPP is limited by P. As discussed in Aerts and Chapin (2000), this is a first approximation for studying nutrient limitation at broad scales.Globally the variation of leaf N:P ratio is found to be consistent with the expected nutrient limitation on NPP (Reich and Oleksyn, 2004).However species composition and other factors also likely affect the nutrient limitation within an ecosystem (e.g.Townsend et al., 2007).
Our model, as described so far, calculates nutrient-limited NPP by accounting for nutrient feedback on NPP when leaf N:C or P:C changes or soil nutrient supply cannot meet plant demand.One of the objectives of this study is to estimate the nutrient limiting factors under present conditions.Our approach here is to estimate the monthly nutrient unlimited NPP, F cmax , by multiplying the monthly NPP estimates of Randerson et al. (1997) at 1 • by 1 • spatial resolution for the 1990's by a biome-specific parameter x npmax (see 2.5).We run the model to steady state using the monthly F cmax at 1 • by 1 • as an input.At steady state, the nutrient limiting factor is equal to x npleaf , because x npup =1.However, when the external environment is changed, such as through an increase in atmospheric (CO 2 ), the nutrient uptake may limit NPP, and progressive nutrient limitation can occur (Luo et al., 2004).
Because we allow both N:C and P:C ratios of leaf biomass to vary within their prescribed ranges for each biome (see Table 1), the modelled leaf N:P ratio, ecosystem NPP will vary depending on the nutrient supply and demand.
Values of model parameters
The model has a total of 31 pools: 9 C pools, 10 N pools and 12 P pools.The C cycle of the model was calibrated using global data of (CO 2 ), 14 CO 2 (Randerson et al., 1997(Randerson et al., , 2002) ) and used for global studies (Fung et al., 1997(Fung et al., , 2005)).We used the same turnover rates and transfer coefficients for all litter and soil pools as Randerson et al. (1996).
For each biome, we prescribed the ranges (minimal and maximal) of C:N ratios of leaves based on data compiled in Glopnet (Wright et al., 2004), the ranges of C:N:P ratio of wood based on the results of Weedon et al. (2009), and the ranges of C:N:P ratio of roots based on Gordon and Jackson (2000).We calculated the range of C:P ratio of leaves from the C:N ratio and the estimated N:P ratio of leaves for each biome (see Table 1).We estimated the leaf N:P ratio for each biome from calibration using the empirical relationships between leaf N:P ratio and latitude by Hedin (2004).The actual C:N:P ratios of all plant pools during model integration will vary from point to point, depending on the available soil nutrients (N and P) for plant uptake.In this study we allow the C:N and C:P ratios of all plant pools to vary within their prescribed ranges.When the minimal nutrient demand can not be met by the available nutrients in soil, nutrient uptake limi-tation will reduce the nutrient-unlimited NPP, therefore both N:C and P:C ratios of all plant pools will not fall below their respective prescribed minima.
C:N:P ratio of the structural litter pool was fixed at 3750 (g C):25 (g N):1 (gP) for all biomes, and the C:N:P ratio of the metabolic litter pool was allowed to vary, depending on the quality of litter input.C:N ratios of soil organic pools are fixed for each biome.C:N:P of soil microbial biomass were fixed at 32 (g C):4(gN):1 (gP) for all biomes based on the estimate of Cleveland and Liptzin (2007).C:N ratio of slow and passive soil organic matter was estimated for each biome by calibrating the soil N estimate for each 2.5 • latitudinal bands against the estimate of Post et al. (1985).N:P ratios of newly formed slow and passive soil organic matter are assumed to vary with soil order, being 7 for the highly weathered soils and 5 for the rest (see Table 2) based on results of Crews et al. (1995).As soil ages, the N:P ratio of slow and passive pools will vary, depending on soil P biochemical mineralization.
To use Eq. ( 1) in our simulations, the value of x npmax is required for each biome and is estimated as follows.Using the prescribed monthly NPP of Randerson et al. (1997) as F c (F c =F c,1990 ) in our model, we ran the model to steady state to determine the pool sizes.Using these estimates of all pool sizes at steady state as the initial pool sizes, we ran the model again with F c =F c,1990 and calculated x npmax as where N c is the number of cells for each biome and A is cell area.That is equivalent to assuming that the mean biome x np is equal to 1 under steady state in 1990's.The value of x np can be considered as the nutrient limitation relative to the present conditions.Values greater than 1 indicate that the nutrient limitation is less than that under present conditions, and vice versa.Values of x npmax are listed in Table 1 for each biome.Because of variation of leaf N:C and P:C ratios within a biome, the nutrient limitation (x np ) can differ from 1 for some grid cells.Two parameters, k plab and s pmax affect the partitioning between labile P and sorbed P at equilibrium, and vary with soil order.Based on the estimates of different fractions of labile P, sorbed P and strongly sorbed P for different soil orders by Cross and Schlesinger (1995), we tuned these two parameters for each soil order using a nonlinear parameter estimation technique (Wang et al., 2009).Because of the strong Correlation between s pmax and k plab , we restricted the value of s pmax to be between 50 and 100% of the total inorganic soil P in the optimization.The value of k plab is consequently dependent on the range of s pmax as well as the values of the other parameters.
The biochemical P mineralization rate, F p,tase affects the model estimate of the fraction of organic P in soil, and is modelled as a function of the maximal specific biochemical P mineralization rate (v pmax ), the N cost of P uptake (λ pup ) and N cost of phosphatase production (λ ptase =15 gN/gP) (Treseder and Vitousek, 2001).Parameter v pmax is tuned to match the fraction of organic P in soil for each soil order.λ pup is 25 g N (g P) −1 for tropical evergreen broadleaf forests and savannahs, and is equal to 40 g N (g P) −1 for all other biomes, based on the simulation results of Houlton et al. (2008).For values of other parameters, see Tables 1 and 2.
Datasets
Three different kinds of datasets are used in this study: input data, data for model calibration and data for model evaluation.
Input dataset
We calculated the nutrient unlimited NPP for each grid cell by multiplying the monthly NPP from Randerson et al. (1997) for the 1990's by a biome-specific constant (x npmax ), and use them as input to the model.N input includes deposition, fertilizer application, fixation.We used the spatially explicit estimates of N deposition for 1990's by Dentener (2006) and N fixation by Wang and Houlton (2009) for the present climate conditions for N input.Global N input is 0.142 Gt N year −1 from fixation and 0.069 Gt N year −1 from deposition in 1990's, both are spatially explicit at 2 • by 2 • globally.Global N input from fertilizer application is taken as 0.086 Gt N year −1 (Galloway et al., 2004) and is distributed uniformly within the cropland biome.P inputs include fertilizer application, dust deposition and weathering.Global P input from fertilizer application is 0.014 Gt P year −1 in the 1990's (Smil, 2000) and is also distributed uniformly within the cropland biome.Spatially explicit P input from dust deposition is from the model output by Mahowald et al. (2008), and is 0.0007 Gt P year −1 globally in the 1990's.No spatially explicit estimates of global P weathering rates are available, and the few estimates of P weathering from different sites are highly variable (Newman, 1995).Estimates of P weathering rates from sites along a soil-age gradient have been used to estimate soil phosphorus content for relatively wet regions globally (Porder and Hilley, 2010).In this study, we estimated P weathering rate by dividing all 12 soil orders (Fig. 2) into four groups according to their weathering status.Soil orders within each weathering status were assigned a constant P weathering rate (Table 2).Based on the range of P weathering rates from soils along an age gradient in Hawaii (Chadwick et al., 1999), we assign the rate of 0.05 g P m −2 year −1 to the least weathered soils, such as Entisol, and low values to 1) and USDA soil order map (lower panel).Numbers 1 to 12 in the lower panel correspond to soil orders of Alfisol, Andisol, Aridisol, Entisol, Gellisol, Histosol, Inceptisol, Mollisol, Oxisol, Spodosol, Utisol and Vertisol, respectively.most weathered soils, such as Utisol (0.005 g P m −2 year −1 ) and Oxisol (0.003 g P m −2 year −1 ).These values are consistent with the estimated P weathering rates varying from 0.005 to 0.05 g P m −2 year −1 by Newman (1995) for different soils worldwide, and 0.007 g P m −2 year −1 for the highly weathered soils in the Amazon basin by Gardner (1990) for the residual soils (saprolites).The P weathering rate for the intermediate weathered soil orders was varied to give a global total P weathering rate of about 0.002 Gt P year −1 (Filippelli, 2002).
Datasets for model calibration
We did not carry out a rigorous calibration of all model parameters because insufficient global data are available, particularly for the P cycle.However components of the model that are important for nutrient limitation are calibrated using the estimates of soil N (Post et al., 1985), leaf N:P ratio (Hedin, 2004) and the estimates of the fractions of soil P in different pools (Cross and Schlesinger, 1995).The range of the leaf C:N ratios (dn leaf ) (prescribed for each biome) and the uncertainties of leaf P (dp leaf ) that are calculated as the product of leaf N:C ratio and P:N ratio (from calibration) are
Datasets for model evaluation
We used a number of datasets for evaluating the modeled pool sizes and fluxes.These datasets are: global vegetation biomass data (Olson et al., 1985), soil carbon pool size (Post et al., 1982), estimates of litter production (Matthews, 1997), global leaching and gaseous N losses, P leaching (Seitzinger et al., 2006) for N and P fluxes.These datasets are chosen because they are derived either directly from field observations or based on empirical relationships that are estimated from the field observations.Some of the datasets, such as vegetation biomass (Olson et al., 1985) and litter C production (Matthews, 1997) have spatially explicit information.However, as the biome classifications used by those authors are different from the IGBP biome classification we used in this study, spatially explicit comparisons could be misleading.Instead, we aggregated the spatially explicit estimates by the IGBP biome type, or by latitude for comparing with our estimates.Outputs from some other process-based models are also compared with our estimates.
Model integration
The model integration time step is one day.Meteorological inputs required for the model include daily surface air temperature, soil temperature and moisture.The daily meteorological forcing was generated using the CSIRO Conformal Cubic Atmosphere Model, CCAM, (McGregor and Dix, 2008) with the CSIRO Atmosphere and Biosphere Land Exchange (CABLE) land surface scheme (Wang and Leuning, 1998;Kowalczyk et al., 2006) at a spatial resolution of approximately 220 km globally.CCAM was run using sixhourly NCEP reanalysis for 1990 to 1997 (Kalnay et al., 1996) to produce daily mean air temperature, soil temperature and soil moisture in the rooting zone.
By reusing the daily forcings from 1990 to 1997, we ran the model to steady state.Steady state is considered to have been reached when the relative changes in total pool sizes of C, N or P per land point are less than 0.001% per year.All results reported here are for steady state in the 1990's only.Mass balances of all three cycles are achieved at every time step during the model integration.
Calibration of the biome -specific leaf N:P ratio, soil C:N ratio, and soil phosphorus fractions
Three datasets are used to calibrate leaf N:P ratio, C:N ratio of slow and passive soil pools, and the three soil parameters that affect the partitioning of soil P into different pools.
We calibrated the leaf N:P ratios of different biomes using the empirical relationship between leaf N:P ratio and latitude derived from field observations by Hedin (2004) (Fig. 3).Mean leaf N:P ratio by the calibrated model is above 16 g N/gP within the tropical region (about 15 • north or south of the equator), and less than 14 g N/g P in the region about 30 • away from the equator (see Table 1).The estimates of leaf N:P ratio by two other studies (Reich and Oleksyn, 2004;Kerkhoff et al., 2005) also fall within the range of the variation of leaf N:P ratio with latitude by our calibrated model.We then used the leaf N:P ratio as estimated here to calculate the range of C:P ratios for each plant biomass pools from the estimated C:N ratios from Glopnet dataset (see Table 1).In all CASACNP simulations presented here, we allow C:N and C:P ratios of each plant biomass pool to vary within their prescribed ranges, so N:P ratio of each plant biomass pool is not fixed.
61
(yellow green).The error bars represents the one standard error of the mean leaf N:P estimate by CASACNP within each latitudinal band.The biome mean leaf N:P ratio was calibrated against the relationship of Hedin (2004).Assuming that the C:N ratios of slow and passive soil organic matter are the same, we estimated that ratio for each biome by minimizing the squared difference between the C:N ratio of the modelled soil organic pools and the C:N ratio calculated from the estimates of soil C and N pools of Post et al. (1982Post et al. ( , 1985) ) for each 2.5 • latitudinal band (Fig. 4).Results are presented in Table 1.The modelled soil C and N pool sizes for the high northern latitudes (65 • N to 75 • N) are much smaller than the estimates by Post et al. (1982Post et al. ( , 1985)), because our model does not include wetland which has a very high content of soil organic matter (see Post et al., 1985).Overall our estimated C:N ratios of soil organic matter with latitude after calibration are consistent with those by Post et al. (1985).Our estimated mean C:N ratio of soil organic matter is highest for the deciduous needle leaf forest (C:N=30 g C/gN), close to the mean of the soil C:N ratio of boreal rain forest by Post et al. (1985), and much higher than those of other boreal forests by Post et al. (1985).
Assuming that the fractions of different soil P pools within the rooting zone as represented in our model are the same as those estimated for Cross and Schlesinger (1995), we estimated three parameters, v pmax for each biome (Table 1), and k plab , s pmax for each soil order (Table 2).We assumed that v pmax is a biome-dependent model parameter, because both plant roots and soil microbes can produce phosphates and growth of soil microbes depends on the supply of soluble carbon from root exudates (Treseder and Vitouske, 2001).Partitioning of soil P among labile, sorbed and strongly sorbed pools depends on soil chemical and physical properties (Barrow, 1978), and soil pedogenesis (Walker and Syers, 1976), therefore we assume that k plab and s pmax vary with soil order.62 matter by CASACNP (red) and Post et al. (1982Post et al. ( , 1985) ) (black).We calibrated 1552 using the latitudinal variation of C:N ratio of soil organic matter from Post e 1553 (Figure 4c).1554 et al. (1982, 1985) (black).We calibrated the model using the latitudinal variation of C:N ratio of soil organic matter from Post et al. (1985) (Fig. 4c).
Using nonlinear parameter optimization and the spatial distribution of IGBP biomes and soil orders, we found that the estimates of the fractions of soil organic P for different soil orders by Cross and Schelsinger (1995) do not constrain well the estimates of v pmax for all biomes.Based on the correlation of the estimates of v pmax among different biomes, we grouped the IGBP biomes into 4 groups and estimated the mean v pmax for each group (Table 1).Using the estimates of the fraction of labile, sorbed and strongly sorbed P from Cross and Schelsinger (1995) and soil order spatial distribution, we estimated p plab and s pmax for each soil order using a nonlinear optimization technique (Table 2).
Our calibrated model, with biochemical P mineralization, estimates global total soil P fractions of 34%, 9%, 10% and 47% in the soil organic matter, labile, sorbed and the strongly sorbed pools, compared to 28%, 9%, 13% and 50% from Cross and Schlesinger (1995) for the top 15 cm soil and the USDA soil order maps excluding the occluded P (Fig. 5).The difference between the two estimates is largest for Oxisol, our estimated fraction of organic P is too high and the fraction of strongly sorbed P is too low.More field measurements of biochemical P mineralization are needed, particularly for under-sampled soil orders and deeper soils (>15 cm). Figure 5 also compares the modelled P fractions of different soil pools with or without biochemical P mineralization in the soil.Without biochemical P mineralization, the modelled fraction of P in soil organic matter accounts for over 50%, and the fractions of labile and sorbed P together are <10% for most soil orders.While the fraction of labile P in soil can vary during the growing season (Townsend et al., 2007), the fraction of P in soil organic matter is usually less than half of total P excluding occluded P for most soils (Cross and Schlesinger, 1995) except some highly weathered soil in the tropics.Consequently including biochemical P mineralization is very important for correctly representing soil P dynamics.
Steady-state pool sizes and fluxes for 1990's
Our carbon cycle is based on CASA' model (Fung et al., 2005) with some significant differences.For example, we derived the leaf phenology from the estimates of remote sensing observations (Zhang et al., 2006).Although our modeled soil C pool sizes are quite similar to those by CASA model, our modeled plant and litter pool sizes would be quite different from those by CASA'.
Estimates of carbon pool sizes at equilibrium by our model are 520 Gt C in plant biomass, 122 Gt C in litter and 2124 Gt C in soil.Overall our estimate of plant live biomass carbon (Fig. 6) shows two large peaks, one being in the tropics (15 • S to 15 • N) and the other being in the temperate and boreal region (50 • N to 65 • N).These regions account for 38% and 20% of total plant live biomass carbon.Here we compared the estimates of vegetation C pools with those by Olson et al. (1985).Because Olson et al. (1985) used different biome classification, we calculated land area weighted means of the median, minimum and maximum plant live biomass C for each 2 • latitudinal band from their spatially explicit (0.5 • by 0.5 • global) estimates.Figure 6a shows that our model vegetation biomass C agrees quite well with the mean of the median value by Olson et al. (1985) at different latitudes except two regions: the tropical region (15 • S to 15 • N) and southern temperate region (37 • S to 45 • S).
In the tropical region, where tropical forest and tropical savanna dominate, our estimated mean biomass C is 13063 g C m −2 for tropical evergreen broadleaf forest and 6220 g C m −2 for woody savanna, much higher than the mean median values of 7467 g C m −2 and 4029 g C m −2 by Olson et al. (1985) respectively.However our estimated plant live biomass carbon compares well with the estimates of 12 100 g C m −2 by Dixon et al. (1994) and 19 428 g C m −2 by Saugier et al. (2001) for tropical evergreen forest.After accounting for the difference in the area of tropical evergreen broadleaf forest used for different studies, the total plant live biomass carbon as estimated by CASACNP is 211 Gt C, similar to the estimates of 212 Gt C by Dixon et al. (1994), 244 Gt C by Ajtay et al. (1979), but much lower 65 Figure 7. Zonal mean for land grid points of fine litter production (red) and coarse woody litter production (blue) estimated by CASACNP as compared with those by Matthews (1997) (black for fine litter and grey for coarse woody debris).than the 340 Gt C by Saugier et al. (2001) for tropical evergreen broadleaf forest using the area from the IGBP vegetation map (Fig. 6b).
Latitude (degree)
In the other region between 37 • S and 45 • S, our model estimates are closer to the maximal value by Olson et al. (1985).The mean plant live biomass carbon density as estimated by CASACNP is 4605 g C m −2 , much higher than the mean median estimate of 2401 g C m −2 for this region by Olson et al. (1985).A relatively small area of land and few field measurements available may contribute to the difference between the two estimates.The region is dominated by perennial grasslands (51%) in New Zealand and Argentina (Fig. 6b) where there are few estimates of plant live biomass carbon density.
We also compared our estimates of litter productions and coarse woody debris pool sizes for different biomes with other estimates (Fig. 7); such a comparison was not previously done for the simulations by CASA or CASA'.Matthews (1997) estimated fine and woody litter production for each of 30 biome types.Using her estimates of litter production and the 1 • by 1 • biome type map of Matthews (1983), we derived the estimates of fine and woody litter productions for each 2 • latitudinal band between 60 • S to 75 • N.For CASACNP, fine litter production is calculated as the sum of litter fall from leaves and roots.
Our estimates of global fine litter production per year and the total fine litter pool size (metabolic and structural litter) are 45 Gt C year −1 and 61 Gt C, in good agreement with Matthew's (1997) estimates of 45 to 55 Gt C year −1 and 80 Gt C respectively.Our estimate of fine litter production is more variable with latitude than that of Matthews (1997), particularly in the southern hemisphere (Fig. 7).The larger fluctuation of the predicted fine litter production by CASACNP in the southern hemisphere is associated with the change in the proportion of forested land area (Fig. 6b).This regional change in biome type and the impact on fine litter production may not be estimated correctly using the empirical relationship by Matthews (1997); more field studies are needed to verify our estimates.
Estimates of woody litter production by CASACNP agree quite well with those by Matthews (1997) (Fig. 7).Our estimate of CWD flux is 6.3 Gt C year −1 and total CWD pool size is 60 Gt C globally, compared with 6.0 Gt C year −1 and 75 Gt C by Matthews (1997).Direct measurements of CWD flux are rare, as it requires successive inventories of the same plots over more than several decades, particularly in oldgrowth forests (Harmon et al., 1993).Most studies estimate the CWD production using the woody biomass and mortality rate.These estimates can be quite sensitive to infrequent disturbance, such as insect attack and extreme weather conditions.
Measurements of total CWD pool sizes are relatively straightforward and more measurements are available.Our estimates of CWD pool sizes for all forest biomes fall within the range of previous estimates.The biome mean CWD pool size we estimated is 2437 g C m −2 for evergreen needle forests, 3000 g C m −2 for deciduous needle forests, 3762 g C m −2 for the temperate and boreal mixed forests, and less than 1000 g C m −2 for tropical forests (due to rapid decomposition of woody litter in the tropics).Our estimates are comparable with the estimates compiled by Tang et al. (2003) for various forests from field measurements.The estimates they compiled vary from 1400 to 5800 g C m −2 in coniferous forests and 1380 to 2040 g C m −2 in the mixed forest in North America, and 190 to 385 g C m −2 for dry tropical forests in Venezuela, and 650 to 8500 g C m −2 in tropical rainforests in Chile, Australia and China.
Our estimate of equilibrium soil carbon of 2124 Gt C is for the entire rooting zone within which the vertical root biomass distribution is modelled using the model developed by Jackson et al. (1996), and is therefore much higher than the estimate of 1500 Gt C of Post et al. (1982) for the top 1 cm soil, but quite close to the estimate of 2300 Gt C for the top 3 m by Batjes (1996) for soil carbon.
The equilibrium nitrogen pool sizes are 6.6 Gt N for plant, 1.1 Gt N for litter and 126 Gt N for the soil organic matter, and 0.5 Gt N in the soil mineral N pool for the global terrestrial biosphere under the present climate and CASA NPP input.
Few estimates of total N in pools are available for the global terrestrial biosphere.Our estimate of total N in plant biomass is similar to the estimate of 5.6 Gt N by Xu-ri and Prentice (2008), but is much higher than the estimates of 3.1 Gt N by Gerber et al. (2010) and 3.8 Gt N by Zaehle et al. (2010).Our estimate of soil organic N is between the 100 Gt N by Post et al. (1985) for the top 1 m soil and 156 Gt N by Batjes (1996) for the top 3 m of soil globally, and quite similar to other model estimates (Zaehle et al., 2010;Gerber et al., 2010) for the rooting zones as specified in their respective models, but much higher than the estimate www.biogeosciences.net/7/2261/2010/Biogeosciences, 7, 2261-2282, 2010 of 67 Gt N by Xu-ri and Prentice (2008).Our estimate of total soil mineral N is lower than the estimate of 0.9 Gt N by Xu-ri and Prentice (2008).
There is no global estimate of total soil P for different biomes.The total amount of soil P is closely related to the property of the parent material and soil age, and the fraction of the soil P available for plant uptake is closely related to soil sorption capacity (Barrow, 1978).To estimate the amount of soil P, we used soil order to distinguish different soil mineralogy and age.Unlike the C and N cycles, most P on land is present in rocks, predominately in apatite.During pedogenesis, the phosphorus in soil parental material is mineralized into soil by weathering and uplift (Porder et al., 2007).Walker and Syers (1976) postulated that the fraction of soil P in the occluded pools unavailable to plants or soil microbes increases as soil ages.This hypothesis is supported by measurements of soil P from sites along Chronosequences in Hawaii (Crews et al., 1995) and New Zealand (Porder et al., 2007).
Estimates of global total amount of P in the terrestrial biosphere are few, and quite variable, ranging from 40 Gt P (Smil, 2000) to 200 Gt P (Jahnke, 1992).Most measurements of soil P were made on available P that only accounts for 3 to 10% of total soil P in agricultural soils, and measurements on forest soil are relatively scarce (Johnson et al., 2003).Our model estimates that the total P in soil excluding occluded P is 16.5 Gt P if biochemical P mineralization is neglected or 30.5 Gt P otherwise.Biochemical P mineralization lowers the estimate because it increases the flux from soil organic P to labile P that can be lost by leaching.Estimates of P pool sizes at equilibrium by our model are 0.40 Gt P in plant biomass, 0.04 Gt P in litter and 5.7 Gt P in soil organic matter, and 1.5 Gt P, 1.7 Gt P and 7.6 Gt P in labile, sorbed and strongly sorbed P pools in the soil.Smil (2000) pointed out that the early estimate of total soil P by Jahnke (1992) was too high, and she estimated the amount of P in soil to be 5 to 10 Gt P in organic forms and 35 to 40 Gt P in inorganic forms.Mackenzie et al. (2002) estimated that the total organic P is only about 5 Gt P globally, similar to our estimate of 5.7 Gt P in soil organic matter.Assuming that the average amount of occluded P is 35% of total soil P globally (Cross and Schlesinger, 1995), we estimate that the total amount of occluded P is 9 Gt P, and total soil P including occluded P will be 26.5 Gt P, similar to the lower estimate by Smil (2000).
Previous studies estimated that the total amount of P in terrestrial plants varies between 0.5 to 3 Gt P (Jahnke, 1992;Smil, 2000).Given the amount of N in total terrestrial plant live biomass is 6.6 Gt N, and the N:P ratio can vary from 10 to 20 g N (g P) −1 in plants (Vitousek, 1984(Vitousek, , 2004)), we conclude that the estimate of 3 Gt P in plant live biomass is too high unless we underestimate the total amount of N in plant live biomass by an order of magnitude.On the contrary we may have overestimated the amount of N in plants, as the C:N 66 side.The dotted squares represent the global terrestrial biosphere with three major 1578 compartments, plant biomass (B), litter (L) and soil (S).The units of pool size are Gt C, N 1579 or P, the mean residence time is in years and the flux units are Gt C, N or P per year.1580 Here we included occluded P in soil (see section 5.2 for further details ratios we used are relatively low, compared with some other estimates (e.g.Vitousek, 1984Vitousek, , 2004)).Figure 8 summarizes the pool sizes and fluxes of C, N and P at steady state for the 1990's for the global terrestrial biosphere with the NPP estimates of Randerson et al. (1997) as input to our model.At steady state, the total carbon flux from plant to litter is equal to NPP, and is equal to soil respiration.The global mean NPP is 51 Gt C year −1 .The total N loss rate from soil is 0.295 Gt N year −1 , and is equal to total N input at steady state.Total plant N uptake is equal to net N mineralization, and is 1.1 Gt N year −1 , which is very close to the estimated total plant N uptake rate of 1.08 Gt N year −1 by Xu-ri and Prentice (2008).We also estimated that the annual N loss from the terrestrial biosphere is 0.06 Gt N year −1 , which is quite similar to the estimated total export of N from land to river and coastal oceans of 0.07 Gt N year −1 (Seitzinger et al., 2004).Our estimate of total N gaseous loss to atmosphere is 0.24 Gt N year −1 , and is twice as much as the global soil denitrification rate of 0.12 Gt N year −1 as estimated by Seitzinger et al. (2006).Some of the difference between the two estimates may result from N gaseous loss from nitrification and asymbiotic N fixation that is not accounted for by Seitzinger et al. (2006).
The total input of P to the terrestrial biosphere is 0.016 Gt P year −1 ; P weathering, inorganic P fertilizer addition and dust P deposition account for 12%, 84% and 4% of the total input, respectively.The rate of P loss by leaching is estimated to be 0.014 Gt P year −1 , and about 0.002 Gt P year −1 is transferred to the occluded P pools with a residence time >100 years.Using nutrient data from major rivers and coastal regions and water fluxes, Seitzinger et al. (2006) estimated the total P lost to the river and coastal ocean is 0.01 Gt P year −1 .
The mean residence at steady state can be calculated as the ratio of pool sizes and influx for C, N and P in plant, litter and soil.The total mean residence time in the terrestrial biosphere is 54 years for C, 124 years for N and 437 years for P (Fig. 8).For nutrients N and P, the exchange fluxes between plant, litter and soil within the terrestrial biosphere are much larger than the external flux into the terrestrial biosphere, therefore internal cycling of the nutrients dominates the cycling of N and P, as compared with the C cycle.The mean residence time constants of N and P in plants or litter are quite similar for N and P, but much shorter than the respective mean residence time of C, as a result of nutrient resorption by plants.
Global nutrient limitation to net primary productivity and its uncertainty
Figure 9 shows the variation of leaf N:C and P:C ratios and the nutrient limitation factor for all land points not covered by permanent snow and ice.Leaf N:C ratios of tropical forests, savannah and crop land vary between 0.04 to 0.06 g N (g C) −1 , and are significantly higher than other biomes.The N:C ratio is lowest in the deciduous needle leaf forests in the boreal region, varying between 0.02 and 0.03 g N (g C) 1 .The leaf P:C ratio varies between 0.001 and 0.003 g P (g C) −1 for unmanaged biomes, and is about 0.004 g P (g C) −1 for crop land.
Figure 9 also shows that the NPP of tropical evergreen forest and savannah and some crop land in the USA, Asia and Australia is limited by P. Most other biomes are limited by N. The deciduous needleleaf forests and high latitude shrub lands (or tundra) are most strongly limited by N.
Using Eq. ( 9) with the estimated uncertainty of leaf N:P ratio and the assumed range of C:N ratio for different biomes, we calculated the uncertainty of nutrient limitation factor (x np ) for each cell.Figure 10a shows the uncertainty of nutrient limiting factor is quite high (>0.15,shown in red) for some shrublands, grassland and woody savannah.Because of the large uncertainty of leaf N:P ratio for grassland (Table 1), we cannot distinguish N limitation from P limitation.For some shrubland and woody savannahs, the leaf N:P ratio can be quite close to 16, and therefore they are likely to be co-limited by N and P. In Fig. 10b, we show which regions are N-limited (blue-green color), P-limited (pink region) or N and P co-limited (golden color).When NPP is co-limited, the N-limiting factor is not statistically significant from the P-limiting factor (at a 95% significance level).The land type of permanent snow and ice (white) are not modeled.In order to show both N and P limitation variation spatially in the lower panel, we plotted the value of x np -1 if x n < x p , or 1-x np if x n > x p , where x n is the N limiting factor on NPP, and x p is the P limiting factor on NPP, x np = min (x n , x p ). Therefore regions with a negative value are limited by N and regions with a positive value are limited by P. A value of -0.2 corresponds to x np = x n =0.8, therefore addition of N fertilizer can increase NPP by 20%, similar for a P-limited region with a value of 0.2.
Our results agree broadly with results from a recent synthesis by LeBauer and Treseder (2008), who showed that nitrogen limitation is widespread and the relative increase in NPP in response to N fertilizer application varies from 11% for desert ecosystems to 35% in the tundra, with a global mean response of 29%.For the N-limited biomes, we estimate that N limitation reduces NPP by 10% to 40% under the present climate and (CO 2 ).LeBauer and Treseder (2008) also showed that N fertilizer addition would increase the NPP of tropical forests by about 20%, whereas our results show that nearly all tropical forests are P limited and will therefore not respond to N fertilizer addition.This discrepancy can be explained by two factors: the first one is that our model only captures the broad variations of nutrient limitation because of the relatively coarse resolution ( 2 of leaf N:P ratio and limiting nutrients are not well captured by our model simulation.For example, it has been observed that leaf N:P ratios and available soil N or P are quite variable in space and time in the tropical forests in South America (Townsend et al., 2007).The second factor is that N addition may increase biochemical P mineralization and therefore will increase NPP even when NPP is P-limited (Houlton et al., 2008).To represent this connection between N and P cycles in soil, we need to model the N cost of P uptake and N fixation explicitly.
Limitations and future studies
In this study, we developed and implemented nutrient (N and P) cycles into a well-calibrated carbon cycle model.The model was used to estimate the pool sizes and fluxes of N and P cycles, nutrient limitation and its uncertainty under the present climate condition.The estimated variation of nutrient limitation globally is also consistent with other ecological studies (Vitousek and Howarth, 1991;Le Bauer and Treseder, 2008).However, many parameters or parameterization of some processes in this model are poorly constrained, particularly those relating to nutrient cycles, because global estimates of nutrient pools and fluxes based on field measurements are very limited.To partially overcome this limitation, we assumed that all land points are at steady state under the present climate and (CO 2 ) conditions.This assumption eliminates the dependence of the modelled pools and fluxes on their initial values, and allows us to compare our model results with other estimates of pools and fluxes based on field measurements taken under present climate conditions.However the terrestrial biosphere has rarely been at steady state, particularly over the last 150 years as a result of changes in climate, atmospheric (CO 2 ), land use, and disturbance.In the following we will discuss how much the various pools have changed over the last 150 years, and what impact disturbance, such as fires and land use change, may have on the modelled nutrient limitations.
Most studies of the global carbon cycle on land assume that all pools are at steady state around 1850 (e.g.Friedlingstein et al., 2006), and integrate the model forward using prescribed inputs and simulated climate.Simulations by 11 global climate models with fully coupled carbon cycle models showed that the simulated plant biomass carbon increased by 9% and soil carbon by 4% on average as a result of changing Climate and increasing atmospheric (CO 2 ) by 2000 (Friedlingstein et al., 2006).Land use change can also affect the pool sizes and fluxes of all three cycles.It is estimated about 148 Gt C has been released into the atmosphere globally from land use change from 1850 to 2000 (Houghton, 2008) equivalent to a reduction of about 6% in the total terrestrial carbon pool we estimated at steady state.Overall the total amount of terrestrial biospheric carbon in 2000 is likely within ± 10% of the estimate for 1850 globally.
Changes in the global total size of N pools from 1850 to the 1990's will likely be small, as only 9% of the anthropogenic N input is accumulated in terrestrial pools with a residence time longer than decades (Schlesinger, 2009).However the spatial pattern of changes in carbon or nutrient pools and nutrient limitation since 1850 can be much larger.Most increases in carbon pool sizes in the terrestrial biosphere are from unmanaged forests where the only N input is deposition.As N deposition has increased since 1850, particularly in the USA and Europe, we may have underestimated the extent of nutrient limitation, particularly nitrogen limitation, for unmanaged forests.This is also consistent with the positive response of net C uptake to N deposition observed for some temperate forests (Thomas et al., 2010), since the net C uptake would be zero at steady state.For managed ecosystems, our steady state assumption will likely lead to more significant biases in the estimated nutrient limiting factors, and the biases depend on when the managed land was converted from native vegetation, and how the land was managed.For example, soil tillage may make some of the occluded P available for plant uptake while liming Can help restore soil ion balance and increase available P in the acidified soil.These have not been accounted for in our model.
Disturbance, such as fires can also have a significant effect on nutrient cycles and nutrient limitation (Certini, 2005), with the impact depending strongly on the intensity and frequency of fires (Hart et al., 2005).As Herbert et al. (2003) showed, disturbed tropical forests can become N limited, because of the disproportionately larger amount of N than P lost from timber extraction and slash burn.It is also well known that N fixers can invade after fire or during the early succession of forests in temperate regions (Vitousek et al., 2002).Therefore a steady state assumption may lead to underestimation of N limitation in those systems.In many tropical ecosystems, such as savannahs, fires can also release the phosphorus locked up in woody tissue through ash, and dispersion of ash can fertilize the vegetation regrowth after fires (Escudey et al., 2009), therefore accelerating the phosphorus cycling through different pools.Overall our steady state assumption may have resulted in overestimating phosphorus limitation in these systems; further studies are needed.
This study represents the first step in the studies of the interactions between global biogeochemical cycles and climate change.By matching the nutrient-limited NPP with the estimates of NPP by Randerson et al. (1997), we derived the estimate of nutrient limitation globally.This study has addressed the question as to what the nutrient limitation should be for the given carbon cycle at present; the question of how the carbon cycle and nutrients will interact in the future still remains unanswered.In the future, we will implement the biogeochemical model into a global land surface model that calculates the nutrient unlimited NPP as a function of a number of environmental drivers and disturbance.The combined model will then be used to study the effects of increasing (CO 2 ), land use and land use change in the past and future on pools and fluxes of all three cycles and the feedback to climate in the future.Some of the model parameters are poorly constrained, such as v pmax and values of other model parameters are arbitrarily chosen for this study.Sensitivity studies (not shown here) showed that varying v pmax by ± 20% from its mean estimate (see Table 1) only has a small influence on the estimate of nutrient limiting factors for most land points (relative change <5%).Some model parameters are assumed to vary with biome only, and will unlikely capture variations within a biome, such as the observed variation of leaf N:P ratio with species composition or seasons (Townsend et al., 2007).In the future, we will use measurements collected from field studies of ecosystem response to elevated (CO 2 ), increasing N deposition and soil warming to assess the modelled responses of different ecosystems, and therefore improve the representation of some key processes at ecosystem scale.This is important for improving our confidence in the model predictions under future climate and (CO 2 ) conditions.
Conclusions
We developed a global model of C, N and P cycles for the terrestrial biosphere.Estimates of C, N and P pool sizes and major fluxes between plant, litter and soil agree well with various independent estimates.
Including biochemical P mineralization is important for modeling the P cycle in the terrestrial ecosystem.If biochemical P mineralization is not accounted for, the model will overestimate the fraction of soil organic P and underestimate the fractions of P in the labile, sorbed and strongly sorbed pools, and the dynamics of soil P incorrectly.
Using our model for present climate conditions, we derived a spatially explicit estimate of nutrient (N and P) limitation globally that is consistent with limited evidence from field measurements.Our result shows that most tropical forest and savannahs are P-limited, and their net primary productivities are reduced by 20% due to P limitation.Most of the remaining vegetation is N-limited, and N limitation is strongest in the deciduous needle leaf forest at high northern latitudes, where N limitation reduces its NPP by about 40%.nitrogen resorption coefficient of plant pool i (=0.5 for leaf, =0.9 for wood and root) r p,i phosphorus resorption coefficient of plant pool i (=0.5 for leaf, =0.9 for wood and root) v pmax biome-specific maximal specific rate of biochemical P mineralization (d −1 ) x np nutrient limiting factor (dimensionless) x npmax a biome-dependent empirical parameter representing the ratio of nutrient un-limted NPP and nutrient-limited NPP under the present conditions (dimensionless) k n,up an empirical parameter relating plant nitrogen uptake rate to soil mineral N amount (=2 g N m −2 ) k p,up an empirical parameter relating plant P uptake rate to labile P pool size in the soil (=0.5 g P m −2 k plab an empirical parameter for describing the equilibrium between labile P and sorbed P (g P m −2 ) k ptase an empirical parameter for phosphates production (=150 g N/gP) s pmax maximum amount of sorbed P (g P m −2 ) µ i turnover rate of a plant pool i (d −1 ) µ j turnover rate of a litter pool j (d −1 ) µ k turnover rate of a soil pool k (d −1 ) µ sorb rate constant for sorbed P (d −1 ) µ ssb rate constant for strongly sorbed P (d −1 ) λ pup N cost of P uptake (=40 g N/g P for tropical biomes and 25 g N/g P for other biomes) λ ptase biome-specific N cost of phosphatase production (=15 g N/g P)
Fluxes
F c net primary productivity (g C m −2 d −1 ) F c,1990 net primary productivity for 1990's
The carbon cycle
The carbon cycle model is based on CASA' model (Fung et al., 2005) except that we combined the above ground with the below ground metabolic or structure litter pools, so that there are only nine carbon pools in our model.The equations governing the change of C pools are the same as those described by Randerson et al. (1996).They are: Where C denotes pool size in g C m −2 and µ turnover rate in d −1 , both with one subscript, i for plant, j for litter or k for soil.a c,i is the fraction of NPP (F c ) allocated to plant pool i, b j,i is the fraction of litter fall from a plant pool i allocated to litter pool j, and c k,j is the fraction of litter carbon that enters soil pool k, and d k,kk is the fraction of decomposed C from soil pool kk to soil pool k, m n is the N limitation on litter decomposition (Eq.C12), and varies from 0 to 1. Coefficient a c,j (the fraction of NPP allocated to leaf, wood or root) depends on leaf phenology.Global leaf phenology for all biomes is derived from the estimates of remote sensing observations (Zhang et al., 2006).Leaf growth is divided into four phases.Phase 1 is from leaf budburst to the start of steady leaf growth, phase 2 is from the start of steady leaf growth to the beginning of leaf senescence, phase 3 represents the period of leaf senescence and phase 4 is from the end of leaf senescence to the start of leaf bud burst.During phase 1, a c,leaf is set to 0.8, and a c,wood and a c,root are set to 0.1 for woody biomes, and 0 and 0.2 respectively for non-woody biomes.During steady leaf growth (phase 2), the allocation coefficients are constant but vary from biome to biome, taking their values from Fung et al. (2005).During phases 3 and 4, the leaf allocation is zero and its phase 2 allocation is divided between a c,wood and a c,root in proportional to their allocation coefficients.For evergreen biomes, leaf phenology remains at phase 2 throughout the year.
Leaf turnover rate will increase with cold and drought stress, and is modeled following the approach of Arora and Boer (2005).The partitioning Coefficient, b j,i , c k,j , d k,kk , µ j and µ k use the same values as in the CASA model (Randerson et al., 1996).Uptake of N by plants from soil, F n,up is calculated as F n,up = i a c,i F c n max,i − n min ,i − r n,i µ i N i (C7) N min N min + k n,up + F n,upmin where n min,i and n max,i are the minimal and maximal N:C ratios of plant pool i, k n,up is an empirical constant (=2 g N m −2 ) (Melillo et al., 1993).Here we assumed that N uptake above the minimal uptake (F n,upmin ) is proportional to the maximal amount of N required by plant growth (a c,i F c n max,i ).When the rate of plant N uptake as calculated by Eq.C7 is greater than N min /g t, (where t is the time step of model integration and equals 1 day), F n,up is set to N min / t.The minimal N uptake for a given NPP, F n,upmin , is calculated as where the first and second terms on the right-hand side of Eq. (C8) represent the minimal N required for growth and resorption, respectively.Allocation of the N uptake among different plant pools, a n,i is calculated to be proportional to the demand of N by pool i.That is = a c,i F c n max,i − n min,i − r n,i µ i N i
Nmin
Nmin+kn,up + a c,i F c n min,i − r n,i µ i N i F n,up Following Wang et al. (2007), net N mineralization rate, F n,net is calculated as the difference of gross N mineralization (F n,gr ) and N immobilization (F n,im ) rate.Gross N mineralization rate, F n,gr is calculated as The first and second terms on the right-hand side of Eq.C10 represent gross N mineralization of litter and SOM decomposition.Similarly N immobilization rate, F n,im , is calculated as m n c k,j µ j N j + kk d k,kk µ kk N kk kk =k (C11) where m n , the N-limiting factor of decomposition is calculated as follows: where F * n,net is N unlimited net mineralization, and is calculated using equations C10 and C11 with m n =1.When the N unlimited gross mineralization is less than the N unlimited immobilization, F * n,net <0 and decomposition rate is reduced by m n .A similar method was used by Comins and McMurtrie (1993) for their model.Two pathways of N loss are modeled.One is gaseous loss and the other is leaching.Gaseous N loss is proportional to net N mineralization (Firestone and Davidson, 1989) and leaching loss is proportional to the mineral N pool.That is F n,loss = f ngas max 0,F n,net + f nleach N min (C13) where f ngas is equal to 0.05 (Parton et al., 1987) and f nleach is equal to 0.5 year −1 (Hedin et al., 1995).
Appendix D Phosphorus cycle
The phosphorus cycle is based on the model of Wang et al. (2007) a p,i = 1.Where a p,i is the allocation of P uptake to different plant pools, F p,up is the plant P uptake (g P m −2 d −1 ), r p,i is the P resorption coefficient, µ sorb and µ ssb are rate constants for the sorbed and strongly sorbed P pools, respectively, both are equal to 0.0067 year −1 .s pmax and k plab are the maximum amount of sorbed P (g P m −2 ), and the constant for the adsorption (g P m −2 ), both parameters vary with soil order (Table 2).F p,net , F p,dep , F p,fert , F p,we , F p,up and F p,loss are the net biological P mineralization, dust P deposition, fertilizer P addition, P weathering rate, plant P uptake rate and P loss rates, respectively; all are in g P m −2 d −1 .F p,tase is the biochemical P mineralization rate in g P m −2 d −1 , and is calculated (see Wang et al., 2007) as F p,tase = v pmax λ pup − λ ptase λ pup − λ ptase + k ptase µ slow P slow + µ pass P pass (D11) where v pmax is the maximum specific biochemical P mineralization rate (d −1 ), λ pup and λ ptase are the N cost for P uptake and phosphatase production (g N (g P) −1 ), respectively.k ptase is an empirical constant.λ ptase =15 g N (g P) −1 and k ptase =150 g N (g P) −1 (Wang et al., 2007).Here we assumed that the contribution to biochemical P mineralization from the slow or passive SOM pool is proportional to the turnover flux of that pool.Parameters λ pup and v pmax are biome-dependent, and their values are listed in Table 1.
The dynamics of P in plant, litter and soil microbial biomass pools are similar to those for N (comparing equations D1 to D5 with C1 to C5).The treatment of the soil P pools (Eq.D8 to D10) follows that of Wang et al. (2007) except that we represent soil organic P as three pools.Different from soil N pools, the organic P in slow and passive pools can be biochemically mineralized, and the contribution to biochemical P mineralization is assumed to be proportional to their respective decay rates, As an approximation, we assumed that flux from sorbed to strongly sorbed is proportional to the size of the sorbed pool.The dynamics of the labile soil P pool is modelled using the approach developed by Lloyd et al. (2001), also used by Wang et al. (2007).
Similar to N uptake in our model, plant P uptake rate, F p,up , is calculated as F p,up = i a c,i F c p max,i − p min,i − r p,i µ i P i P lab P lab + k p,up + F p,upmin (D12) where p min,i and p max,i are the minimal and maximal P:C ratios of plant pool i, k p,up is an empirical constant (=0.5 g P m −2 ) (Wang et al., 2007).When the P uptake by plant calculated using Eq.D12 is less than P lab / t, F p,up is set to P lab / t.F p,upmin is the minimal N uptake for a given NPP and is calculated as F p,upmin = i a c,i F c p min,i − r p,i µ i P i (D13) where p min,i and p max,i are minimal and maximal P:C ratios of leaf, wood or root in g P (g C) −1 , which vary with biome type .Allocation of plant P uptake to leaf, wood and root is calculated similarly to plant N uptake.Soil P can be lost by leaching.F p,loss is calculated as F p,loss = f P P lab (D14) In this study we assumed that f P =0.04 year −1 (Hedin et al., 2003).
Fig.3.Comparison of leaf N:P (g N/g P) as estimated by CASACNP (black curve) with the empirical relationships derived from different sets of field measurements byReich and Oleksyn (2004) (dark brown),Kerkhoff et al. (2005) (orange) andHedin (2004) (yellow green).The error bars represents the one standard error of the mean leaf N:P estimate by CASACNP within each latitudinal band.The biome mean leaf N:P ratio was calibrated against the relationship of Hedin (2004).
Zonal mean of all land points of total C (a), N (b) and C:N ratio (c) of soil organic matter by CASACNP (red) and Post
Figure 5 .
Figure 5. Fraction of organic P (a), labile P (b), sorbed P (c) and strongly sorbed P (d) for each soil order excluding the occluded P for the top 15 cm soil field measurements from Cross and Schlesinger (1995) (dark red) as compared with the estimates by CASACNP with (orange) or without biochemical P mineralization (dark green).
Fig. 5 .
Fig. 5. Fraction of organic P (a), labile P (b), sorbed P (c) and strongly sorbed P (d) for each soil order excluding the occluded P for the top 15 cm soil field measurements from Cross and Schlesinger (1995) (dark red) as compared with the estimates by CASACNP with (orange) or without biochemical P mineralization (dark green).
Fig. 6 .
Fig. 6.(a) Comparison of the vegetation biomass carbon as estimated by CASACNP model (red) with those by Olson et al. (1985) (black).The grey region represents the land area weighted mean of the maximal and minimal estimates of vegetation biomass carbon, and the black curve represents the land area weighted-mean median vegetation biomass carbon as estimated by Olson et al. (1985); (b) the areas of forests, shrub land, crop land and grassland, and land ice at different latitudes.
Fig. 7 .
Fig. 7.Zonal mean for land grid points of fine litter production (red) and coarse woody litter production (blue) estimated by CASACNP as compared with those byMatthews (1997) (black for fine litter and grey for coarse woody debris).
Fig. 8 .
Fig.8.Fluxes (blue), mean residence time (red) and pool sizes (black) of the C, N and P cycles in the terrestrial biosphere at steady state under present climate conditions.The external fluxes into the terrestrial biosphere or influx are indicated on the left-hand side and the fluxes out of the terrestrial biosphere or efflux are indicated on the right-hand side.The dotted squares represent the global terrestrial biosphere with three major compartments, plant biomass (B), litter (L) and soil (S).The units of pool size are Gt C, N or P, the mean residence time is in years and the flux units are Gt C, N or P per year.Here we included occluded P in soil (see Sect. 5.2 for further details).
Fig. 9 .
Fig. 9. Spatial variation of leaf N:C ratio (g N/g C) (a), leaf P:C ratio (g P/g C) (b) and the nutrient limitation factor on NPP (c).The land type of permanent snow and ice (white) are not modeled.In order to show both N and P limitation variation spatially in the lower panel, we plotted the value of x np -1 if x n < x p , or 1-x np if x n > x p , where x n is the N limiting factor on NPP, and x p is the P limiting factor on NPP, x np = min (x n , x p ). Therefore regions with a negative value are limited by N and regions with a positive value are limited by P. A value of -0.2 corresponds to x np = x n =0.8, therefore addition of N fertilizer can increase NPP by 20%, similar for a P-limited region with a value of 0.2.
Fig. 10 .
Fig. 10.Estimates of the uncertainty (one standard deviation) of nutrient (N and P) limitation factor (x np ) (upper panel) and the regions (lower panel) where net primary productivity is N limited, P is limited or N and P co-limited.The N and P co-limited region represents the estimate of N-limitation factor (x n,leaf ) being not statistically significantly different from P limitation (x p,leaf ).
j m n µ j C j + kk d k,kk µ kk C kk − µ k C k ,kk = k;
Table 2 .
Soil order specific model parameters.Parameters k plab and s pmax are estimated in this study, P weathering rate is prescribed in this study.See 3.1 for further explanation.the uncertainty of nutrient limitation, σ xnp .Assuming that dn leaf is not correlated with dp leaf , we calculate the uncertainty of nutrient limiting factor (σ xnp ) as . There are 12 P pools.Equations governing the dynamics of P pools are:dP i dt = a p,i F p,up − µ i (1 − r p,i )P i (D1) dP str dt = (µ leaf C leaf + µ root C root )p str − m n µ str P str (D2) dP met dt = µ leaf 1 − r p,leaf P leaf + µ root 1 − r p,root P root −(µ leaf C leaf + µ root C root )p str −m n µ met P met −µ slow P slow −F p,tase µ slow P slow µ slow P slow + µ pass P pass ,kk =slow pass,j m n µ j P j + kk d pass,kk µ kk P kk −µ pass P pass − F p,tase µ pass P pass µ slow P slow +µ pass P pass ,kk = pass p,net + F p,dep + F p,fert + F p,wea + F P,tase − F p,up − F p,loss − µ sorb j c mic,j m n µ j P j + kk d mic,kk µ kk P kk (D5) −µ mic P mic ,kk = mic dP slow dt = j c slow,j m n µ j P j + kk d slow,kk µ kk P kk (D6) j c F
|
package modern.challenge;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.logging.Logger;
public class ReadWriteWithLock {
private static final Logger logger = Logger.getLogger(ReadWriteWithLock.class.getName());
private static final Random rnd = new Random();
private static final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);
private static final Reader reader = new Reader();
private static final Writer writer = new Writer();
private static int amount;
private static class Reader implements Runnable {
@Override
public void run() {
if (lock.isWriteLocked()) {
logger.warning(() -> Thread.currentThread().getName()
+ " reports that the lock is hold by a writer ...");
}
lock.readLock().lock();
try {
logger.info(() -> "Read amount: " + amount
+ " by "+ Thread.currentThread().getName());
} finally {
lock.readLock().unlock();
}
}
}
private static class Writer implements Runnable {
@Override
public void run() {
lock.writeLock().lock();
try {
Thread.sleep(rnd.nextInt(2000));
logger.info(() -> "Increase amount with 10 by " + Thread.currentThread().getName());
amount += 10;
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
logger.severe(() -> "Exception: " + ex);
} finally {
lock.writeLock().unlock();
}
}
}
public static void main(String[] args) throws InterruptedException {
System.setProperty("java.util.logging.SimpleFormatter.format",
"[%1$tT] [%4$-7s] %5$s %n");
// perform 10 reads and 10 writes with 2 readers and 4 writers
ExecutorService readerService = Executors.newFixedThreadPool(2);
ExecutorService writerService = Executors.newFixedThreadPool(4);
for (int i = 0; i < 10; i++) {
readerService.execute(reader);
writerService.execute(writer);
}
readerService.shutdown();
writerService.shutdown();
readerService.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
writerService.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
}
}
|
Remove an array using its key from a nested associative array
I have this array:
$array = array(
"foo" => "bar",
"barw" => "21",
"bara" => "22",
"barq" => "25",
"multix" => array(
"1" => array(
"ar1" => "food",
"ar2" => "dr",
"ar3" => "ch",
"ar4" => "ju"
),
"2" => array(
"ar1" => "food",
"ar2" => "dr",
"ar3" => "ch",
"ar4" => "ju"
),
"893" => "bar",
"563" => "bar",
"hd8" => "bar",
"multiv" => array(
"1" => array(
"ar1" => "food",
"ar2" => "dr",
"ar3" => "ch",
"ar4" => "ju"
),
"2" => array(
"ar1" => "food",
"ar2" => "dr",
"ar3" => "ch",
"ar4" => "ju"
),
"tw" => "bar",
"qa" => "bar",
"op" => "bar"
)
)
);
which I am reading from and writing to like this:
echo '<pre>';
print_r($array);
echo '</pre>';
echo "<br/>";
echo $array['multix']['1']['ar1'].'<br/>';
echo "<br/>";
echo $array['multix']['1']['ar2'].'<br/>';
echo "<br/>";
echo $array['multix']['1']['ar3'].'<br/>';
echo "<br/>";
echo $array['multix']['1']['ar4'].'<br/>';
$array['multix']['1']['ar4'] = "lego";
However, I am unable to delete an array of my choice like:
unset($array['multiv']['1']);
echo '<pre>';
print_r($array);
echo '</pre>';
What should I do to delete the array using its key?
Your multiv array is inside the multix array so you need to prepend the multix name
unset($array['multix']['multiv']['1']);
you may be setting your array wrong if multiv was supposed to be one up in the index level.
To expand on this: after ['multix']['2'] ends, you've reduced the indent on ['893'] but the brackets for ['multix'] are still open. Then, at the bottom, you close two sets of brackets at the same level of indent; the second is for ['multix']. Move one of those closing brackets up before ['893'] and you should be good.
|
Board Thread:Off Topic/@comment-11276487-20130805204403/@comment-11296535-20130820221506
But my wife, Senna... she would never do such a thing!
|
Select2 usage issue
my code
miku = [
{ id: 1, text: "Blue" },
{ id: 12, text: "David" },
{ id: 3, text: "Judy" },
{ id: 4, text: "Kate" },
{ id: 5, text: "John" },
];
$(".search39").select2({
language: 'zh-TW',
placeholder: "第三人",
containerCssClass: "search",
maximumInputLength: 10,
width: '100%',
minimumInputLength: 0,
tags: true,
data: miku,
matcher:
function (params, data) {
console.log(data)
if ($.trim(params) === '') {
return data;
}
if (data.includes(params)) {
return data;
}
return null;
}
});
but it return me:
Blue
David
Judy
Kate
John
It only output the "text" properties, and it cannot output other properties(like "id").
I want to search using the id.
<!DOCTYPE html>
<html lang="en">
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/select2/3.5.0/select2.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.0/jquery.min.js"></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/select2/4.0.13/css/select2.min.css" rel="stylesheet" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/select2/4.0.13/js/select2.min.js"></script>
</head>
<body>
<div>
<select class="user"></select>
</div>
<script>
$(function () {
miku = [
{ id: 1, text: "Blue", note: "123" },
{ id: 12, text: "David" },
{ id: 3, text: "Judy" },
{ id: 4, text: "Kate" },
{ id: 5, text: "John" },
];
$(".user").select2({
language: 'zh-TW',
width: '100%',
maximumInputLength: 10,
minimumInputLength: 0,
tags: true,
data: miku,
matcher:
function (params, data) {
var term = params.term;
console.log(data)
if ($.trim(params.term) === '') {
return data;
}
if (data.id.includes(term)) {
console.log(data.id);
return data;
}
return null;
}
});
})
</script>
</body>
</html>
This is the page I tested; it works fine. However, when added to the project, the output is different.
it can output otrher properties.
I also don't understand why the same code results in different outcomes.
you said it worked fine .. but I wonder how since you included jquery after select2 .. and more importantly you have 2 different versions of select2!! you should just remove the v3.5 being the very first line. In general you should look at the error in the console if you wish to find clues on what's going on and how to solve it
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Text;
namespace ILCompiler.Reflection.ReadyToRun
{
/// <summary>
/// based on <a href="https://github.com/dotnet/runtime/blob/main/src/coreclr/vm/nativeformatreader.h">NativeFormat::NativeArray</a>
/// </summary>
public class NativeArray
{
// TODO (refactoring) - all these Native* class should be private
private const int _blockSize = 16;
private uint _baseOffset;
private uint _nElements;
private byte _entryIndexSize;
private byte[] _image;
public NativeArray(byte[] image, uint offset)
{
uint val = 0;
_baseOffset = NativeReader.DecodeUnsigned(image, offset, ref val);
_nElements = (val >> 2);
_entryIndexSize = (byte)(val & 3);
_image = image;
}
public uint GetCount()
{
return _nElements;
}
public override string ToString()
{
StringBuilder sb = new StringBuilder();
sb.AppendLine($"NativeArray Size: {_nElements}");
sb.AppendLine($"EntryIndexSize: {_entryIndexSize}");
for (uint i = 0; i < _nElements; i++)
{
int val = 0;
if (TryGetAt(_image, i, ref val))
{
sb.AppendLine($"{i}: {val}");
}
}
return sb.ToString();
}
public bool TryGetAt(byte[] image, uint index, ref int pOffset)
{
if (index >= _nElements)
return false;
uint offset = 0;
if (_entryIndexSize == 0)
{
int i = (int)(_baseOffset + (index / _blockSize));
offset = NativeReader.ReadByte(image, ref i);
}
else if (_entryIndexSize == 1)
{
int i = (int)(_baseOffset + 2 * (index / _blockSize));
offset = NativeReader.ReadUInt16(image, ref i);
}
else
{
int i = (int)(_baseOffset + 4 * (index / _blockSize));
offset = NativeReader.ReadUInt32(image, ref i);
}
offset += _baseOffset;
for (uint bit = _blockSize >> 1; bit > 0; bit >>= 1)
{
uint val = 0;
uint offset2 = NativeReader.DecodeUnsigned(image, offset, ref val);
if ((index & bit) != 0)
{
if ((val & 2) != 0)
{
offset = offset + (val >> 2);
continue;
}
}
else
{
if ((val & 1) != 0)
{
offset = offset2;
continue;
}
}
// Not found
if ((val & 3) == 0)
{
// Matching special leaf node?
if ((val >> 2) == (index & (_blockSize - 1)))
{
offset = offset2;
break;
}
}
return false;
}
pOffset = (int)offset;
return true;
}
}
}
|
About return in C++
Sorry for this newbie question, but I can't find on google what I need to know.
I understand return, but don't understand this... What does it mean this?
return (tail+1)%N == head%N;
Thanks a lot for patience.
It returns true or false, depending on whether the expression is true or not.
It's the same as:
if ( (tail+1)%N == head%N )
return true;
else
return false;
Except that it's more idiomatic, and more readable to someone who knows C++.
IMO it needs the whole expression bracketed (i.e. return ((tail+1)%N == head%N); as in the if) to be more readable. But I just like excess brackets to remove any ambiguity.
this
(tail+1)%N == head%N
returns a boolean value, either true or false. This statement means that after adding 1 to trail (trail + 1) and the remainder obtained after division with N is equal to remainder of head divided with N. % is used for division with remainder
(%). Modulo is the operation that gives the remainder of a division of two values.
Check this link for c++ operators : http://www.cplusplus.com/doc/tutorial/operators/
it returns true if remainder of the division for tail + 1 and head is the same
for example if tail is 2, head is 1 and N is 2
(tail + 1) % N is 1
head % N is 1 too
so whole expression returns true
It evaluates the expression, and return the result. In this case it's two modulo operations that are compared, and the result is either true or false which will be returned.
Short Answer:
Because of the == operator your function will return a bool, meaning it can only be trueor false. An equivalent would be something like:
return 5 == 4;
which would return false since 5 is not equal to 4.
Long Answer:
Instead of writing this in a single line you could split it up into more lines of code. Let's just assume that tail, head and N are integer values, then you could write it like this:
int x, y;
x = (tail+1)%N;
y = head%N;
if ( x == y )
{
return true;
}
else
{
return false;
}
Now in this code there may be also that %confuses you a bit. The %is called the Modulus Operator and can give you the remainder of arithmetic operations. In a simple example this would mean:
10 % 3 = 1 because 10/3 is 3 with a remainder of 1. So to make it more clear let's just make another example with your specific problem:
Lets just assume that tail=10,head=6 and N=2. Then you would get something like this:
x = (10+1)%2
x = 11 % 2
x = 1
y = 6 % 2
y = 0
y != x
This would return false cause x and y are not equal. ( If you would run your code with the given example values )
To learn more about Modulus you can look here, or just on any other basic C++ Tutorial.
you're returning a boolean value. The value represents whether or not the remainder of (tail+1) divided by N is the same as that of head.
|
Effect of Hypotension and Dobutamine on Gastrointestinal Microcirculations of Healthy, Anesthetized Horses
Simple Summary When horses undergo abdominal surgery, there is a risk that tissues may not receive enough blood. One way to decide if this is happening is to measure blood pressure. The assumption is that if blood pressure is normal, then tissue blood flow is fine. A technique called dark-field microscopy has been used to look at the blood flow to tissues. We measured blood flow at three different places (the gums, the rectum, and the outside of the colon) of six healthy horses undergoing general anesthesia. Additionally, we measured how blood flow to these tissues changed when the patient’s blood pressure was lowered by giving an increased amount of gas anesthesia, which was subsequently raised with a drug called dobutamine. We found that blood flow to these tissues was present when the patients had normal blood pressure. When the blood pressure decreased, the blood flow to the tissues was unaltered. Finally, when blood pressure returned to normal, the blood flow to the tissues remained unaltered. Because blood flow to the tissue was unchanged despite clinically relevant changes in blood pressure, we concluded that using blood pressure as the only method to assess tissue blood flow may be inaccurate. Abstract Horses undergoing abdominal exploratory surgery are at risk of hypotension and hypoperfusion. Normal mean arterial pressure is used as a surrogate for adequate tissue perfusion. However, measures of systemic circulation may not be reflective of microcirculation. This study measured the mean arterial pressure, cardiac index, lactate, and four microcirculatory indices in six healthy, anesthetized adult horses undergoing elective laparotomies. The microcirculatory parameters were measured at three different sites along the gastrointestinal tract (oral mucosa, colonic serosa, and rectal mucosa) with dark-field microscopy. All macro- and microcirculatory parameters were obtained when the horses were normotensive, hypotensive, and when normotension returned following treatment with dobutamine. Hypotension was induced with increases in inhaled isoflurane. The horses successfully induced into hypotension did not demonstrate consistent, expected changes in systemic perfusion or microvascular perfusion parameters at any of the three measured gastrointestinal sites. Normotension was successfully restored with the use of dobutamine, while the systemic perfusion and microvascular perfusion parameters remained relatively unchanged. These findings suggest that the use of mean arterial pressure to make clinical decisions regarding perfusion may or may not be accurate.
Introduction
Horses undergoing abdominal exploratory surgery for colic are at risk of global and local tissue hypoperfusion.In cases with gastrointestinal disease, the hypoperfusion of the gastrointestinal microcirculation and local ischemia can contribute to gastrointestinal Vet.Sci.2024, 11, 95 2 of 12 dysfunction and tissue death.Changes in cardiovascular function in these horses can be the result of a variety of circumstances, including, but not limited to, anesthesia or endotoxemia [1][2][3].These changes can lead to generalized poor perfusion throughout the body.In horses with gastrointestinal disease, this issue can be compounded by alterations in the local tissue environment (endothelial dysfunction and inflammation) associated with the primary disease [4].
The early, intra-operative identification of hypoperfusion is necessary to allow intervention and prevent complications.However, traditional monitoring does not directly assess perfusion in general, let alone within the microcirculation.Blood flow or perfusion is the result of a balance between blood pressure and vascular resistance.Systemically, blood flow is equal to cardiac output, but the routine measurement of cardiac output is uncommon due to limitations in the availability and costs of monitoring techniques.In practice, arterial blood pressure can be measured easily and accurately, making it one of the most commonly used techniques for evaluating cardiovascular function [5].However, hypoperfusion can occur in cases of normotension, and normal perfusion can occur in cases of hypotension depending on the relative change in vascular resistance.Additionally, as noted, perfusion to specific organ beds or even at different locations within a given organ bed may not be predicted using global perfusion/cardiovascular parameters.Thus, the monitoring of local microvascular circulation could improve the identification of abnormalities in tissue perfusion.
Side stream dark field microscopy has been developed for use as a clinical tool to assess microcirculatory perfusion.It has been evaluated in human and veterinary patients, specifically dogs and horses [6][7][8][9].This device emits a green light (530 nm) that is absorbed by the hemoglobin of erythrocytes while depolarized reflected light from the surrounding tissues is projected back to the device [10].These illuminated red blood cells can be seen flowing through the reflective microcirculation when viewed through the device, such that red blood cells are of a dark density on a white background [10].As a result, a real-time video image of red blood cells flowing through the microcirculation with 326× magnification is obtained [10].These images provide immediate, subjective information about tissue perfusion, and videos can be analyzed to determine several objective microvascular perfusion indices (MPIs), including the total vessel density (TVD), the perfused vessel density (PVD), the proportion of perfused vessels (PPV), and the microvascular flow index (MFI).The sublingual microcirculation has been the most popular region for study in humans and small animals [8,9,[11][12][13][14].In horses, oral and rectal mucosa and colonic serosa have been evaluated in healthy awake and sick anesthetized animals, respectively [6,7].Microvascular perfusion indices of the oral mucosa, rectal mucosa, and colonic serosa were evaluated concurrently in healthy normotensive anesthetized horses, showing no significant differences between the sites [15].
When hypotension occurs, a variety of treatments are employed to improve blood pressure with the goal of improving perfusion.Such treatments include but are not limited to, intravenous fluid therapy, decreasing the amount of the inhalant administered, and the administration of a cardiovascular stimulant and/or vasoactive drugs.Dobutamine, a beta-1 adrenergic receptor agonist acting on the myocardium in improving contractility, is one of the most commonly used medications to improve blood pressure in horses under general anesthesia [16].Despite the typical success of dobutamine to improve systemic low blood pressure, hypoperfusion may be ongoing and undiagnosed.An evaluation of the microcirculation with dark field microscopy has the potential to more accurately describe the effects of systemic hypotension and dobutamine treatment on tissue perfusion.Therefore, the objectives of this study were to evaluate (1) the effects of systemic hypotension on dark field microscopy MPIs in three microvascular beds and (2) the effects of the correction of hypotension with dobutamine on dark field microscopy MPIs.Our hypotheses were that in healthy horses undergoing general anesthesia, hypotension would decrease the MPIs within each site, and the correction of hypotension with dobutamine would return the MPIs to values recorded during normotension without cardiovascular support drugs.
Animals, Anesthesia and Monitoring
This study was approved by the University of Georgia Institutional Animal Care and Use Committee (Approval Code: A2014 05-022-Y3-A0).Six healthy adult (average age 17; range 10-29 years) horses (3 mares and 3 geldings) without known gastrointestinal disease or cardiovascular compromise were included in this study (average weight 468 kg; range 418-575 kg).Each horse underwent a physical examination and was deemed systemically healthy before inclusion in the study.Horses had an intravenous catheter placed in the right jugular vein and were pre-medicated with intravenous xylazine hydrochloride (1.1 mg/kg); anesthesia was induced with intravenous diazepam (0.05 mg/kg) and ketamine hydrochloride (2.2 mg/kg).Once anesthetized, the horses were intubated orotracheally and placed into dorsal recumbency.Isoflurane was delivered in 100% oxygen, and mechanical ventilation was used to maintain end-tidal CO 2 between 35 and 45 mmHg.Intravenous polyionic fluids were delivered initially at 10 mL/kg/h.The direct cannulation of the facial artery for direct blood pressure monitoring was performed and maintained throughout the study period.In addition to blood pressure, vital parameters, electrocardiography, end-tidal CO 2 , oxygen flow rate, inspired oxygen concentration, tidal volume, exhaled isoflurane concentration, and peak inspiratory pressure were recorded every 5 min.Electrolyte concentration, blood gas analysis, and cardiac output (taken in duplicate via lithium dilution) [17,18] were obtained at the time of collection of the microcirculatory variables.Cardiac output was measured in duplicate via lithium dilution with a LiDCO computer (LiDCO plus, cardiac computer, LiDCO Group PLC, London, UK) [17].The cardiac index was calculated by dividing the cardiac output by the weight (kg) of the horse.The adjustment of the rate of fluid delivery and percentage of inhaled isoflurane at the discretion of the anesthesiologist was used to maintain normotension (MAP 70-90 mmHg) for the first time point.Hypotension (MAP < 60 mmHg) was then induced by increasing the administered isoflurane for the second time point.Dobutamine was administered via a continuous rate infusion at a rate (0.5 mcg/kg/min) that was titrated to effect until normotension (MAP 70-90 mmHg) was achieved.Measurements were repeated once, and normotension was achieved with the third and final time point.
Dark Field Microscopy Image Acquisition
Microcirculation videos (Microscan; MicroVision Medical, Amsterdam, The Netherlands) were obtained from the oral mucosa, rectal mucosa, and the serosa of the pelvic flexure region of the large colon.Videos were collected according to a previously described protocol [15,19].The videos were obtained when the patients were in a normotensive state without the administration of any cardiovascular support drugs (normotension), induced hypotension (hypotension), and dobutamine-induced normotension (dobutamine).For all measurement periods, the respective blood pressures were maintained for at least 10 min prior to and then throughout image collection.
To access the pelvic flexure, a ventral midline celiotomy was performed.The abdomen was clipped and aseptically prepared, and a sterile drape was placed.A 20 cm long ventral midline incision was made with a scalpel.The pelvic flexure was exteriorized onto an enterotomy (colon) tray to allow for image collection.The pelvic flexure was returned to the abdomen between image collection time points.The tray was positioned downward at a gentle angle from the incision that was considered clinically appropriate to the horse's body size and conformation, allowing the colon to rest without tension.The angle was subjectively based on the clinical experience of PK, KE, and JW.
For videos obtained from the colon, the colon was placed on the enterotomy tray, and the tip of the probe was gently placed against the colonic serosa at the pelvic flexure.The probe was stabilized for collection using sandbags to support the unit.Once the unit was placed, the operator was able to remove their hands from the unit during collection in most cases.
For videos obtained from the rectum, the probe was manipulated and inserted at least half the distance of the disposable protective cover (approximately 1-2 cm in length).Gross fecal material was digitally removed from the rectum if necessary and lavaged away.The operator then utilized the table as a reference to angulate the probe on the mucosa as ideally as possible for image collection.For videos obtained from the oral mucosa, the probe was placed on the gingival mucosa and stabilized using sandbags.For all locations, warmed sterile isotonic fluids (0.9% NaCl) were applied as needed to the tissue and abdomen at the discretion of the investigators to maintain tissue integrity and image quality.Video loops were collected around the mechanical ventilation of the horse to minimize motion.
A minimum of 3 videos, 20 s in length, were taken from each location at each sample time.The videos were obtained by the same unit.The videos for each site were obtained sequentially, and the order of site acquisition was randomized.The probe was moved between each video to adjacent but similar sites for subsequent image collection.Following the completion of data collection for this study, additional data/samples were collected from the subjects for other approved projects, and subjects were then humanely euthanized while under general anesthesia.
Measurement of Microvascular Perfusion Indices
Analysis was performed in accordance with the De Backer et al. roundtable [19].Three videos of at least 50 frames were produced from the full videos for each site.Video selection was based on clarity, stability, and acceptable image quality.The videos were then blinded by one investigator (JMW).An analysis of the blinded videos was performed by one investigator (PJK) to determine microcirculatory variables (as previously described based on consensus criteria [8,19]) using manufacturer-provided software (Automated Vascular Analysis, Version 3.2; MicroVision Medical, Amsterdam, The Netherland).Briefly, this included the total vessel density (TVD), the proportion of perfused vessels (PPV), perfused vessel density (PVD), and microvascular flow index (MFI).Detailed descriptions of video analysis and how these values are determined can be found in numerous sources [7,8,19].
Data Analysis
The normality of data was assessed based on an examination of histograms, the normal Q-Q plots of the residuals, and the Shapiro-Wilk test.The variance of the data was assessed by plotting residuals against predicted values and using Levene's test.The effect of the blood pressure state (normotensive, hypotensive, dobutamine) on MAP, CI, lactate, and microvascular variables was assessed using linear marginal models with compound symmetry, heterogeneous compound symmetry, or unstructured covariance structures.To assess the repeatability and measurement of microvascular variables, the coefficient of variation in triplicate measurements was calculated for each horse, site (colon, oral, or rectal), and microvascular variable.The effects of the site, microvascular variable, and interaction between site and microvascular variable on the CV were evaluated using a linear mixed-effects model with the horse modeled as a random effect and the site and microvascular variable modeled as fixed nominal effects.When indicated, multiple pairwise comparisons were performed using the method by Sidak.The association between colonic, oral, and rectal microvascular variables and the association between microvascular variables and MAP or CI were investigated by calculating correlation coefficients using the method described by Bland and Altman to account for repeated observations from individual animals [20].For all analyses, p < 0.05 was considered significant.All statistical analyses were performed using SPSS (SPSS version 23.IBM Corp., Armonk, NY, USA).
Results
The duration of time between the induction of anesthesia to the start of video collection was an average of 37 min.The duration of time required for the collection of all the videos averaged 102 min (range 84-125 min).This time included device movement between sites, the exteriorization of the colon prior to obtaining videos, and the subsequent return of the colon to the abdomen following video collection.While the specific time to record the video triplicates at each site during each collection time was not measured, subjectively, these three videos were obtained within five minutes each time.The duration of time that the dobutamine was administered averaged 27.17 min (range 20-35 min).The average rate of dobutamine administered was 0.76 mcg/kg/min (0.27-2 mcg/kg/min).Efforts were made to maintain a consistent end-tidal isoflurane concentration; however, the isoflurane dose was modified to some degree during data collection in order to ensure that the blood pressure and the amount of dobutamine administered remained within the desired range.The intravenous fluid rate average was 3.7 L per hour (2-10 L/h), with an average total volume of 9.5 L per horse (7-17 L/horse).The average hematocrit (HCT) was 31.2% (26-34%), with an average change in HCT of 3.3% (1-6%).
All data for mean arterial pressure, cardiac index, and lactate were normally distributed.Significant differences existed in the MAP between normotensive (mean: 81 +/− 2 mmHg) and hypotensive (mean: 49 +/− 2 mmHg) (p < 0.000001) time points and between the dobutamine (mean: 81 +/− 1 mmHg) and hypotensive time points (p < 0.000001).No significant difference existed between the normotensive and dobutamine time points.No significant difference in the cardiac index was present between the normotensive (mean: 101.39 mL/kg/min) and hypotensive (mean: 94.26 mL/kg/min) time points, but a significant difference was detected between the dobutamine (mean: 115.79 mL/kg/min) time points and both the normotensive and hypotensive time points, p = 0.017 and p = 0.0016, respectively.The mean lactate concentrations for the normotensive, hypotensive, and dobutamine time points were 0.97, 1.08, and 1.25 mmol/L, respectively.A statistically significant difference (p = 0.017) was present in lactate levels between the dobutamine and normotensive time points, although levels remained within the clinically normal reference range (<2 mmol/L).No significant differences were present in the lactate levels between dobutamine and hypotensive time points or between hypotensive and normotensive time points.
All data for the microvascular perfusion indices were normally distributed.The mean values for the MPIs (with standard error) are provided in Table 1.No significant associations between TVD, PVD, or MFI were present between blood pressure states in the colon serosa, oral mucosa, and rectal mucosa (Figures 1-3, respectively).Significant associations were present between the proportion of perfused vessels of the oral mucosa at the hypotensive and dobutamine time points (p = 0.045) but not between other pressure states at this site (Figure 4).Additionally, no significant associations in PPV were present between the pressure states in the colon serosa and rectal mucosa (Figure 4).There were no correlations for any MPI between the oral mucosa, colon serosa, and rectal mucosa.Oral TVD and colon PVD were significantly (p < 0.05) correlated with the cardiac index (0.503 and 0.407, respectively).No other MPI at any site correlated with cardiac index, and no microvascular perfusion indices at any site correlated with mean arterial pressure.Complete correlation data are presented in Table 2.
Normotension Hypotension Dobutamine
Oral Mucosa Table 2. Correlation data: correlations between microvascular parameters (total vessel density (TVD), the perfused vessel density (PVD), proportion of perfused vessels (PPV), and microvascular flow index (MFI)) of different sites (oral mucosa, rectal mucosa, and colon serosa) and between microvascular parameters, the MAP and cardiac index (CI).Table 2. Correlation data: correlations between microvascular parameters (total vessel density (TVD), the perfused vessel density (PVD), proportion of perfused vessels (PPV), and microvascular flow index (MFI)) of different sites (oral mucosa, rectal mucosa, and colon serosa) and between microvascular parameters, the MAP and cardiac index (CI).
Discussion
We found that hypotension did not decrease microvascular perfusion indices at any gastrointestinal location, thereby disproving our first hypothesis and making it impossible to test the second hypothesis in which dobutamine would return the MPIs to values similar to those recorded during normotension.In anesthetized, healthy horses, there was minimal to no correlation between macrovascular parameters and microvascular parameters at three gastrointestinal sites during normotension, hypotension, or dobutamine-induced normotension.There were also no correlations between the microvascular parameters between the three gastrointestinal sites evaluated.
In this study, neither the cardiac index nor the microvascular blood flow was altered during periods of systemic hypotension.The lack of change in microperfusion to the regions evaluated was most likely a result of consistent cardiac output, adequate macrovascular flow, and maintained tissue perfusion.Organ tissue perfusion is influenced by the difference between arterial and venous pressure across the organs, and this pressure can vary between organs and peripheral tissues throughout the body.In this study, two peripheral sites (oral and rectal mucosa) and one organ site (colon) were evaluated, and capillary flow was unaltered throughout the varying pressure states.However, it is important to recognize that vascular beds to other organ and peripheral tissue sites may have undergone vasoconstriction in order to maintain cardiac output and adequate blood flow to the sites evaluated.
The lack of correlation between sites is similar to previous work [15].These results should be interpreted with respect to a small sample size.However, other reasons could also account for the results.The three sites investigated, while all part of the gastrointestinal tract, vary in their access and anatomy.The collection of data with the microscopy unit involved a subjective amount of pressure to be applied to an area.The oral mucosa, with underlying bone, could create greater compression of the microvasculature than areas with softer underlying tissue, such as the colon and rectum.Additionally, the rectal mucosa has more folds and dimensions than the oral mucosa or colonic serosa.These anatomic variations can create scenarios in which variations in image capture may be inconsistent, regardless of their flow to the sites.The microcirculation changes dynamically and continually.Changes in blood flow to one site do not imply identical changes to another.The lack of correlation observed in this study could merely reflect this occurrence.It is important for regional and local vasoconstriction and vasodilation to occur in order to accommodate regional and local demands and to allow for rapid and significant changes in systemic vascular resistance; this is a key feature when compensating for shock.
In this study, hypotension was effectively induced by increasing the percentage of inhaled isoflurane [21].Isoflurane inhalation alters cardiovascular function via vasodilation and, to a lesser extent, decreases contractility [21].Vasodilation affects the arterial and venous side of the circulation, but hypotension, which is related to isoflurane anesthesia, is primarily the result of arterial dilation.The overall impact of the vascular tone on cardiac output is difficult to determine; however, due to the contrasting effects of arterial and venous dilation on stroke volume, the decrease in contractility caused by isoflurane is relatively minor and may also have been minimized by compensatory increases in contractility as a physiologic response to the increasing stroke volume.
The use of intravenous fluids to support the preload may have decreased the negative effect of venous dilation on cardiac output.Though intravenous fluids can also have a dilutional effect on hemoglobin, despite the use of intravenous fluids in this study, the hematocrit remained within the normal limits for all horses at all time points and only changed three percent, on average, throughout the study; the biggest change was 6% in one horse.Therefore, despite the potential dilutional effect of intravenous fluids on hemoglobin and perfusion, we do not believe it was a factor in the results of this study.
The isoflurane model for hypotension was chosen due to its reliability, ease of administration, and clinical relevance.Isoflurane is a commonly used inhalant anesthetic for equine general anesthesia, and hypotension is considered to be a common, clinically significant side effect of its use.Despite achieving mean arterial pressures that fulfill the definition of hypotension in our test subjects, systemic lactate, cardiac index, and microcirculatory perfusion indices remained clinically and, with the exception of lactate, statistically unchanged.These findings show that normal, healthy equine patients undergoing elective procedures in dorsal recumbency are likely able to maintain adequate local perfusion of the gastrointestinal tract despite the degree of systemic hypotension that was induced in this study.One limitation of this study is the health status of our test subjects; compared to horses with gastrointestinal disease, clinically healthy horses may respond much differently to isoflurane anesthesia and maintain many different relationships between their systemic cardiovascular and microvascular status.For this reason, repeating this study with different models of induced hypotension, such as blood loss or endotoxin, on clinically ill patients is necessary to determine the effects of hypotension on microcirculatory perfusion indices.Dobutamine effectively reversed hypotension in this study.Dobutamine is a positive inotrope that increases cardiac output and stroke volume, thereby reversing hypotension.It effectively reversed hypotension in our study population and significantly increased cardiac index.Although the cardiac index was increased, microvascular perfusion indices and lactate values were not significantly altered at the dobutamine time point.This lack of change in microvascular perfusion indices and lactate might be due to a continuation of adequate systemic perfusion.Alternatively, the local control of microvascular flow may have prevented these indices from increasing beyond normal.
One study evaluated the effects of dobutamine at three doses (0.5, 1, and 3 mcg/kg/min) on systemic hemodynamics and intestinal perfusion (jejunum and colon) in healthy horses undergoing anesthesia with isoflurane.Compared to the baseline, the higher doses of dobutamine significantly increased CO, HR, MAP, and blood flow to the jejunum and colon, though increased blood flow to the colon and jejunum was not significantly increased at 0.5 mcg/kg/min of dobutamine [22].Our results differ from those observed at higher doses in this study, though these are similar to those observed at the low dose.In our study, the average dose of dobutamine administered was 0.76 mcg/kg/min, with the titration of dobutamine creating dose ranges from 0.27 mcg/kg/min to 2 mcg/kg/min.We suspect that the similarities observed between the two studies have much in common with the dobutamine dosage.Additionally, the two studies evaluated the microcirculation differently.As microvascular beds are difficult to assess, a gold standard method has not been determined.We evaluated blood to the colon with side stream dark field microscopy, while others evaluated it with a micro-lightguide spectrophotometer [22].It is very reasonable to suspect that differences in techniques could account for some difference in results, at least until the two techniques are evaluated against each other.
It is worth mentioning that the effects of other medications given on vascular tone are known and unavoidable [23][24][25].For this reason, the anesthetic protocol was standardized to minimize any effects of various injectable drugs on the microcirculation.Additionally, as noted, although efforts were made to minimize adjustments to the percentage of isoflurane, it was sometimes required to maintain consistent MAP and/or dobutamine infusions.It is possible that these adjustments could have affected some measurements.
The microcirculatory function is critical to tissue perfusion, but the ability to assess the vascular beds of interest is challenging.Systemic parameters (MAP, CO, HR) can be more easily obtained in clinical patients.Thus, using systemic parameters to accurately assess the microcirculation would be a convenient circumstance.This study, like many studies before it, did not find a relationship between macro and microcirculation; this was especially true for disease states [7,8,[26][27][28][29][30][31].The lack of observable correlation between the systemic variable parameters and microvascular perfusion indices reported in the current study may be due to the use of apparently healthy horses.These patients likely maintained their local autoregulation.An evaluation of these same tissue beds in disease states is interesting and may yield different results.
Treatments such as dobutamine and intravascular fluids have the potential to alter blood volume and hematocrit.If blood viscosity is increased with hemoconcentration or decreased with hemodilution, changes in perfusion can be affected.In this study, the hematocrit was within normal limits throughout.As such, the results of this study are unlikely to be related to alterations in viscosity.
There are several limitations to this study.The use of side stream dark field is limited to mucosal or serosal surfaces, negatively affecting its utility elsewhere, such as the skin or within organs.Peristalsis within the colon and patient ventilation made image capture difficult when accounting for motion artifacts.For this reason, we used the recommended 20 s for assessment but spent a considerable amount of time in image analysis to identify at least 50 continuous frames of quality imaging for analysis.Additionally, the delay in image analysis makes this technology difficult to evaluate in a real-time clinical setting.Finally, a larger sample size could have demonstrated different results.A sample size of six was chosen based on financial and ethical concerns.
Conclusions
In summary, in healthy, anesthetized adult horses, changes in blood pressure did not result in consistent expected changes in systemic perfusion or microvascular perfusion parameters at the three sites.Based on these results, caution is recommended when interpreting the mean arterial pressure as a reflection of tissue perfusion.However, the most conservative and safe clinical approach while treating equine patients may be to assume decreased microvascular perfusion in the event of systemic hypotension.Additionally, the use of dobutamine was justified, based on the observable impact of that treatment on CI and MAP, without producing negative impacts on microperfusion.The lack of associations between the microvascular sites suggests that the use of one microvascular system to predict the behavior or condition of another microvascular system may lead to inaccuracies.
Figure 1 .
Figure 1.Total vessel density (TVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the total vessel density of the oral mucosa, rectal mucosa and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 1 .
Figure 1.Total vessel density (TVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the total vessel density of the oral mucosa, rectal mucosa and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 1 .
Figure 1.Total vessel density (TVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the total vessel density of the oral mucosa, rectal mucosa and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 2 .
Figure 2. Perfused vessel density (PVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the perfused vessel density of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 2 .
Figure 2. Perfused vessel density (PVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the perfused vessel density of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 3 .
Figure 3. Microvascular flow index (MFI): a comparison of the mean and standard error (error bars) of the microvascular flow index of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 4 .Table 2 .
Figure 4.The proportion of perfused vessels (PPV) (%): A comparison of the mean and standard error (error bars) of the proportion of perfused vessels of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.The (*) denotes statistically significant differences in the PPV of oral mucosa between hypotension and dobutamineinduced normotension.
Figure 3 .
Figure 3. Microvascular flow index (MFI): a comparison of the mean and standard error (error bars) of the microvascular flow index of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 2 .
Figure 2. Perfused vessel density (PVD) (mm/mm 2 ): A comparison of the mean and standard error (error bars) of the perfused vessel density of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 3 .
Figure 3. Microvascular flow index (MFI): a comparison of the mean and standard error (error bars) of the microvascular flow index of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.
Figure 4 .
Figure 4.The proportion of perfused vessels (PPV) (%): A comparison of the mean and standard error (error bars) of the proportion of perfused vessels of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.The (*) denotes statistically significant differences in the PPV of oral mucosa between hypotension and dobutamineinduced normotension.
Figure 4 .
Figure 4.The proportion of perfused vessels (PPV) (%): A comparison of the mean and standard error (error bars) of the proportion of perfused vessels of the oral mucosa, rectal mucosa, and colonic serosa under normotension, hypotension, and dobutamine-induced normotension.The (*) denotes statistically significant differences in the PPV of oral mucosa between hypotension and dobutamine-induced normotension.
|
Air conditioning control system
ABSTRACT
A control system for an air conditioning or refrigeration apparatus includes a constant pressure regulating valve (automatic expansion valve) between the condenser and the evaporator and a temperature responsive valve between the evaporator and the compressor. Both valves are capable of adjustment to establish: (1) a predetermined pressure (and corresponding temperature) in the evaporator; and (2) a predetermined temperature of the refrigerant in the suction line leading to the compressor. Means are provided for coordinating the setting for both such valves to maintain a certain minimum superheat in the suction gas.
nited States at ent Newton Sept. 5, 1972 Primary Examiner-Meyer PerlinAttrneyDonald W. Banner, John W. Butcher and William S. McCurry [72]Inventor: Alwin B. Newton, York, Pa.
[73] Assignee: Borg-Warner Corporation, [57] A S Chicago, 111.
A control system for an air conditioning or refrigera- File di 21, 1970tion apparatus includes a constant pressure regulating [21] Appl. No.:99,807 valve (automatic expansion valve) between the condenser and theevaporator and a temperature responsive valve between the evaporator andthe compres- [52] US. Cl. ..62/205, 62/217, 62/225 SOL Both valves are capable of adjustment to lI It. establish: a predetennined pressure CO[.[58] Field of Search ..62/205, 210, 217, 222, 225 responding temperature) in the evaporator; and (2) a predetermined temperature ofthe refrigerant in the [56] References Cited suction line leading to the compressor. Means are pro- UNITED STATES PATENTS vided for coordinating the setting for both such valves to maintain a certain minimum superheat1n the suc- 3,296,816 1/1967 We11ed ..62/210 tion 2,116,801 /1938Shivers ..62/217 4 Claims, 1 Drawing Figure o 71 68 f o 2:
5 e4 60 52 44 E 4 Q Q 11 Q: 74 6 W ,r-dl
34 2O 56 L 30 I 12 PATENTED sEr s an INVENTOR AZW/IV 5. NEWTQ/V BY,ZvaQ/Z ATTOR N EV BACKGROUND OF THE INVENTION 1. Field of the Invention Automatic control of vapor cycle refrigeration or air conditioning systems in which an automatic expansion valve and a temperature responsive suction throttling valve are independently adjustable with means for maintaining a minimum superheat to prevent carryover of liquid in the suction line.
2. Description of Prior Art US. Pat. No. 3,260,064, A. B. Newton, issued July 12, 1966, shows a system utilizing a constant pressure expansion device and a suction throttling valve sensing the temperature of refrigerant upstream from the throttling valve.
U.S. Pat. No. 3,119,559, J. W. I-leidorn, issued Jan. 28, 1964,describes an automotive air conditioning system provided with anevaporator pressure regulator downstream from the evaporator and a thermal expansion valve controlling flow of refrigerant from the high side to the low side of the system.
SUMMARY OF THE INVENTION This invention relates generally to a control for refrigeration systems and more particularly to air conditioning apparatus.
In US. Pat. No. 3,119,559, the evaporator pressure regulating valve effectively prevents the evaporator coil pressure from dropping below apredetermined, non-icing pressure condition, for example, 32 psig. Under low loads, the evaporator becomes partially flooded, sending slugs of liquid through to the compressor. This condition sometimes results in broken compressor valves and other damage.
In US. Pat. No. 3,260,064 (Newton), an improved system is described which includes an adjustable automatic expansion valve and means to control the capacity of the compressor by various techniques, for example, by throttling the suction gas or by unloading one or more compressor cylinders. In the embodiment using suction gas throttling,the refrigerant temperature is sensed as the gas leaves the evaporatoron the upstream side of the valve.
In the Newton system, if the setting on the automatic expansion valve is adjusted, it is necessary to reset the temperature responsive suction line valve to assure some minimum superheat in the gas entering the compressor. For example, assume that a system is using R- 12 refrigerant and that the evaporator pressure is set, by adjustment of the automatic expansion valve, to 30.07 psig (32F). If 6 of superheat is desired, the setting on the suction line valve would control the flow to the compressor so as to maintain 38F because the temperature responsive valve only reads the temperature component of superheat. If the automatic expansion valve is then adjusted to higher pressure, say 40.7psig to produce an evaporator coil temperature of 44F the evaporatorcoil would be completely flooded and liquid would flow through the suction line into the compressor.
In order to avoid this problem, the present invention provides a mechanical coordination between the setting of the automatic expansion value and the temperature responsive, suction line valve. Such means may take many forms, but in the preferred embodiment, it may simply be a pair of cables interconnecting an adjusting screw on each valve with a control dial. The two dials then are provided with inter engaging stop means so that the respective settings on the automatic expansion valve and the suction line valve cannot be changed to positions which result in less than the desired minimum superheat.
DESCRIPTION OF THE DRAWING The FIGURE shows a refrigeration or airconditioning system, partly in schematic form, constructed in accordance with the principles of this invention.
DETAILED DESCRIPTION OF THE INVENTION Referring now to the FIGURE, thereis shown a conventional vapor cycle refrigeration system including a compressor 10, condenser 12, and expansion device 14, evaporator 16, anda temperature responsive, suction line throttling valve 18.
Refrigerant gas is compressed in the compressor 10 and flows through line 20 to the condenser 12. Liquid refrigerant then flows through line22 to the automatic expansion device 14 which is of a pressure responsive type, to be described in detail below, maintaining a relatively constant pressure in the evaporator. The low pressure liquid refrigerant then flows through line 24 to the evaporator, over which airis circulated by means of a blower 26. The refrigerant is evaporatedfrom the inside of the coil and vaporized refrigerant gas flows through line 28 to the throttling valve 18, and then through line 29 to compressor 10.
The pressure responsive expansion device 14 is of the type that senses pressure on the downstream side of the valve to maintain a substantially constant pressure in the evaporator. On sensing a drop in pressure, the valve opens to permit more refrigerant to flow and will close upon arise in pressure. Valve 14 comprises a housing 32 enclosing a diaphragm34 attached to valve member 30, a first spring 36 biasing valve member30 closed, and a second spring 38 biasing it open. The load on spring 38is adjusted by a threaded plug 40 which determines the amount of force necessary to open the valve. Pressure on the downstream side of the valve is applied to diaphragm 34 through line 42, it being understood that the pressure could also be transmitted internally of the valve.
The suction line valve 18 comprises a housing 44 having an inlet chamber46 communicating with the evaporator through line 28 and an outlet chamber 48 communicating with the suction line of the compressor through line 29. The housing is provided with a partition 50 separating chambers46 and 48, and having a valve seat 52 which cooperates with valve 54. Atone end of the housing 44 is a valve operating mechanism comprising an operating bellows 56 connected with a remote thermal responsive bulb 58.The operating mechanism is sealed from chamber 48 by means of abel lows-type seal 60 which has a much smaller area than bellows 56. The stem 62 of valve 54 is attached to the operating bellows which expand sand contracts in response to temperature sensed by bulb 58. Remote bulb58 may be located either on the suction line 29 downstream from the valve, or in line 28 connecting the evaporator and the valve. In the FIGURE, the bulb is shown as being located in contact with line 28. The
force required to close the valve 54 i.e., the control point, is adjusted by the compression on spring 64, said spring being interposedbetween the operating bellows and an adjusting screw 66.
In accordance with the invention, the settings of the automatic expansion device 14 and the suction line valve 18 are mechanically coordinated to maintain some minimum degree of superheat. This may be accomplished in a relatively simple manner by interconnecting the adjustment screw 40 on valve 14 with an operating lever 68 via a torque transmitting cable 70; and adjusting screw 66 is connected to lever 72by cable 74. For convenience, a common dial 7! may be used to correlate the position of levers 68 and 72 with the evaporator temperature andsuperheat setting, respectively.
In order to assure that some minimum degree of superheat is present regardless of the settings on the respective levers, a lug 76 may be attached to one or the other of such levers, so that this providesinter engaging stop means for preventing the position of lever 72 to be pushed beyond the setting of lever 68. For example, any attempt to move lever 72 counter clockwise (reducing superheat) will result in engagement of lever 68 to res position it to a lower setting. Conversely,movement of lever 68 in a clockwise direction to obtain a higherevaporator pressure, will automatically move the suction line valve to a corresponding higher superheat setting.
While the invention has been described in connection with a certain specific embodiment thereof, it is to be understood that this is by way of illustration and not by way of limitation; and the scope of the appended claims should be construed as broadly as the prior art will permit.
What is claimed is:
l. Refrigeration apparatus comprising: a compressor, a condenser, and anevaporator connected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion deviceoperatively connected between said condenser and said evaporator to maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator; temperature responsive valve meansoperatively connected between said evaporator and said compressor to control the flow of refrigerant therebetween, and maintain apredetermined superheat of the refrigerant leaving said evaporator;first adjusting means for adjusting the control point of said constant pressure expansion device; second adjusting means for independently adjusting the control point of said valve means; and means for mechanically interconnecting said first and second adjusting means to maintain a predetermined minimum temperature differential between the control point of said expansion device and the control point of said temperature responsive valve means.
2. Apparatus as defined in claim 1 wherein said first adjusting means includes a remotely located lever for changing the control point of said constant pressure expansion device, and said second adjusting meansinsion device is adju ted to a setting which would result in a temperature d1 ferential lower than said predetermined minimum.
3. In a refrigeration apparatus of the type including a compressor, a condenser, and an evaporator connnected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion device operatively connected between said condenser and saidevaporator to maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator; temperature responsive valve means operatively connected between said evaporator and said compressor to control the flow of refrigerant therebetween and maintain a predetermined superheat of the refrigerant leaving said evaporator;the improvement comprising first adjusting means for adjusting the control point of said constant pressure expansion device; second adjusting means for independently adjusting the control point of said valve means; and means for mechanically interconnecting said first and second adjusting means to maintain a predetermined minimum temperature differential between the control point of said expansion device and said temperature responsive valve means.
4. Refrigeration apparatus comprising: a compressor, a condenser, and anevaporator connected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion deviceoperatively connected between said condenser and said evaporator to maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator, said expansion device including a pressure responsive element actuating a valve, said valve closing upon arise in evaporator pressure; temperature responsive valve meansoperatively connected between said evaporator and said compressor to control the flow of refrigerant therebetween and maintain apredetermined superheat of the refrigerant entering said compressor,said temperature responsive valve means having a temperature responsive element opening said valve means upon a temperature rise; first resilient means opposing the valve opening force of said temperature responsive element, first adjusting means for adjusting the compressiveforce on said first resilient means; second resilient means opposing the closing force of said pressure responsive element, and second adjusting means for adjusting the compressive force on said second resilient means; first and second remotely located levers operatively connected respectively to said first and second adjusting means; a lug attached toone of said levers adapted to engage the other of said levers after apredetermined rotation in a direction toward the same, the inter engagingof said lug with said lever being adapted to prevent the setting of the constant pressure expansion device to produce a temperature lower than apredetermined temperature difference between the refrigerant in saidevaporator and the refrigerant passing through said valve means.
1. Refrigeration apparatus comprising: a compressor, a condenser, and anevaporator connected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion deviceoperatively connected between said condenser and said evaporator to maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator; temperature responsive valve meansoperatively connected between said evaporator and said compressor to control the flow of refrigerant therebetween, and maintain apredetermined superheat of the refrigerant leaving said evaporator;first adjusting means for adjusting the control point of said constant pressure expansion device; second adjusting means for independently adjusting the control point of said valve means; and means for mechanically interconnecting said first and second adjusting means to maintain a predetermined minimum temperature differential between the control point of said expansion device and the control point of said temperature responsive valve means.
2. Apparatus as defined in claim 1wherein said first adjusting means includes a remotely located lever for changing the control point of said constant pressure expansion device,and said second adjusting means includes an adjacent lever, andinter engaging stop means on one of said levers adapted to reset the control point of said temperature responsive valve when said expansion device is adjusted to a setting which would result in a temperature differential lower than said predetermined minimum.
3. In a refrigeration apparatus of the type including a compressor, a condenser,and an evaporator connnected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion device operatively connected between said condenser and said evaporatorto maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator; temperature responsive valve meansoperatively connected between said evaporator and said compressor to control the flow of refrigerant therebetween and maintain apredetermined superheat of the refrigerant leaving said evaporator; the improvement comprising first adjusting means for adjusting the control point of said constant pressure expansion device; second adjusting means for independently adjusting the control point of said valve means; and means for mechanically interconnecting said first and second adjusting means to maintain a predetermined minimum temperature differential between the control point of said expansion device and said temperature responsive valve means.
4. Refrigeration apparatus comprising: a compressor, a coNdenser, and an evaporator connected in a closed circuit, vapor cycle system through which a refrigerant is circulated; a constant pressure expansion device operatively connected between said condenser and said evaporator to maintain a predetermined pressure (and corresponding temperature) of refrigerant in said evaporator, said expansion device including a pressure responsive element actuating a valve, said valve closing upon a rise in evaporator pressure;temperature responsive valve means operatively connected between saidevaporator and said compressor to control the flow of refrigeranttherebetween and maintain a predetermined superheat of the refrigerant entering said compressor, said temperature responsive valve means having a temperature responsive element opening said valve means upon a temperature rise; first resilient means opposing the valve opening force of said temperature responsive element, first adjusting means for adjusting the compressive force on said first resilient means; second resilient means opposing the closing force of said pressure responsive element, and second adjusting means for adjusting the compressive force on said second resilient means; first and second remotely located leversoperatively connected respectively to said first and second adjusting means; a lug attached to one of said levers adapted to engage the other of said levers after a predetermined rotation in a direction toward thesame, the inter engaging of said lug with said lever being adapted to prevent the setting of the constant pressure expansion device to produce a temperature lower than a predetermined temperature difference betweenthe refrigerant in said evaporator and the refrigerant passing through said valve means.
|
Exporting Results of Server Services from a CSV File in Powershell
I am trying to compare services running on 9 servers which are all supposed to be the same setup but are not currently that way.
I decided to create a script that exports all services from powershell to a csv file so I compare server a to b, a to c, a to d etc...
Each CSV file contains the following columns: Name, Caption, State, Startmode
I would like to be able to compare the differences and came up with a small powershell script shown below
$IWB06 = import-csv C:\PS_Temp\PIN-VUHOIWB06_Services.csv
$IWB09 = import-csv C:\PS_Temp\PIN-VUHOIWB09_Services.csv
Compare-Object -ReferenceObject $IWB06 -DifferenceObject $IWB09 -Property 'Unique ID', 'name', 'caption', 'state', 'startmode', 'comparrison' -PassThru -IncludeEqual |
Format-Table -AutoSize
My only issue is that this displays on the powershell console, but I am not able to export the results to a CSV file.
Any suggestion on how to export what is displayed in the console to a CSV file
does export-csv not work? https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/export-csv?view=powershell-6
Instead of Format-Table, just pipe to Export-Csv. If you need both, store the comparison in a variable and run it against both: $var = compare ..; $var | Format-Table -AutoSize; $var | Export-Csv -Path ..
@TheIncorrigible1 Might as well mention Tee-Object at this point then since it is very related to that concept.
@Matt Useful yes, but unfortunately does not send output to two different commands (unless you count Add-Content / New-Variable)
Replace your last line with the following:
$Results = Compare-Object -ReferenceObject $IWB06 -DifferenceObject $IWB09 -Property 'Unique ID', 'name', 'caption', 'state', 'startmode', 'comparrison' -PassThru -IncludeEqual
$Results | Export-Csv -NoTypeInformation -Encoding Unicode -Path C:\temp\MyResult.csv #Replace with your output csv destination
Comparing two files at a time sounds very inefficient. Why not combine all your CSVs into one object with an additional column denoting the source. You could then use a Group-Object to summarise. Something like this:
$path = 'C:\PS_Temp\{0}'
(ls *.csv).Name | % {
$file = $_
Write-Host $_
(Import-Csv ($path -f $_)).'Unique ID' | % {
New-Object psobject -Property @{
file = $file
id = $_
}
}
} | Group-Object id | % {
$_ | Add-Member -MemberType NoteProperty -Name Files -Value ($_.Group.file -Join '|') -PassThru
} | Select Name, Files, Count
This is a simple example just using the unique ID only but you could expand it to include other columns.
Output would be something lik this:
Name Files Count
---- ----- -----
3 PIN-VUHOIWB06.csv|PIN-VUHOIWB09.csv 2
4 PIN-VUHOIWB06.csv|PIN-VUHOIWB09.csv 2
5 PIN-VUHOIWB06.csv|PIN-VUHOIWB09.csv 2
6 PIN-VUHOIWB06.csv|PIN-VUHOIWB09.csv 2
7 PIN-VUHOIWB06.csv 1
8 PIN-VUHOIWB06.csv 1
9 PIN-VUHOIWB06.csv 1
1 PIN-VUHOIWB09.csv 1
2 PIN-VUHOIWB09.csv 1
Add this to your code:
$comparison = Compare-Object -ReferenceObject $IWB06 -DifferenceObject $IWB09 -Property 'Unique ID', 'name', 'caption', 'state', 'startmode', 'comparrison' -PassThru -IncludeEqual |
$comparison | Export-CSV -path $someFullPath
|
//
// BDCModule.swift
// BDCKit
//
// Created by Jean-Baptiste Dominguez on 2019/04/08.
// Copyright © 2019 Bitcoin.com. All rights reserved.
//
import UIKit
public enum PushType {
case modal
case nav
}
public protocol BDCModuleDelegate {
var currentViewController: UIViewController? { get }
func didPushPreview(_ viewControllerToCommit: UIViewController, type: PushType)
}
public protocol BDCModuleBuilder {
func provide(_ moduleDelegate: BDCModuleDelegate) -> UIView
}
public protocol BDCBuilder {
func provide() -> UIViewController
}
|
Pellet and fiber length for polyester fiber reinforced polypropylene composites
ABSTRACT
The present disclosure is directed generally to polyester fiber reinforced polypropylene resin pellets and methods for producing therein. The polyester fiber reinforced polypropylene resin pellets include at least 25 wt % polypropylene based polymer; from 10 to 40 wt % polyester fiber; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant. The polyester fiber is incorporated into the resin pellets by feeding chopped fiber or continuous fiber unwound from one or more spools. Articles molded from the polyester fiber reinforced polypropylene resin pellets exhibit a drop dart impact resistance that is dependent on the pellet length and whether the PET fiber is incorporated as chopped fiber or continuous fiber during the extrusion compounding process. Articles molded from the polyester fiber reinforced polypropylene resin pellets find application as automotive parts, household appliance parts, or boat hulls.
This application claims the benefit of U.S. Provisional Application 60/906,041 filed Mar. 9, 2007 and is a Continuation-in-Part of U.S. application Ser. No. 11/301,533 filed Dec. 13, 2005.
FIELD
The present disclosure is directed generally to fiber reinforced polypropylene compositions and articles made from such compositions having a flexural modulus of at least 300,000 psi and exhibiting ductility during instrumented impact testing. It more particularly relates to polyester fiber reinforced polypropylene resin pellets yielding articles with improved drop dart impact strength, and methods of producing such pellets.
BACKGROUND
Polyolefins have limited use in engineering applications due to the tradeoff between toughness and stiffness. For example, polyethylene is widely regarded as being relatively tough, but low in stiffness. Polypropylene generally displays the opposite trend, i.e., is relatively stiff, but low in toughness.
Several well known polypropylene compositions have been introduced which address toughness. For example, it is known to increase the toughness of polypropylene by adding rubber particles, either in-reactor resulting in impact copolymers, or through post-reactor blending. However, while toughness is improved, the stiffness is considerably reduced using this approach.
Glass reinforced polypropylene compositions have been introduced to improve stiffness. However, the glass fibers have a tendency to break in typical injection molding equipment, resulting in reduced toughness and stiffness. In addition, glass reinforced products have a tendency to warp after injection molding.
Another known method of improving physical properties of polyolefins is organic fiber reinforcement. For example, EP Patent Application 0397881, the entire disclosure of which is hereby incorporated herein by reference, discloses a composition produced by melt-mixing 100 parts by weight of a polypropylene resin and 10 to 100 parts by weight of polyester fibers having a fiber diameter of 1 to 10 deniers, a fiber length of 0.5 to 50 mm and a fiber strength of 5 to 13 g/d, and then molding the resulting mixture. Also, U.S. Pat. No. 3,639,424 to Gray, Jr. et al., the entire disclosure of which is hereby incorporated herein by reference, discloses a composition including a polymer, such as polypropylene, and uniformly dispersed therein at least about 10% by weight of the composition staple length fiber, the fiber being of man-made polymers, such as poly(ethylene terephthalate) or poly(1,4-cyclohexylenedimethylene terephthalate).
Fiber reinforced polypropylene compositions are also disclosed in PCT Publication WO02/053629, the entire disclosure of which is hereby incorporated herein by reference. More specifically, WO02/053629 discloses a polymeric compound, comprising a thermoplastic matrix having a high flow during melt processing and polymeric fibers having lengths of from 0.1 mm to 50 mm. The polymeric compound comprises between 0.5 wt % and 10 wt % of a lubricant.
U.S. Pat. No. 3,304,282 to Cad us et al. discloses a process for the production of glass fiber reinforced high molecular weight thermoplastics in which the plastic resin is supplied to an extruder or continuous kneader, endless glass fibers are introduced into the melt and broken up therein, and the mixture is homogenized and discharged through a die. The glass fibers are supplied in the form of endless rovings to an injection or degassing port downstream of the feed hopper of the extruder.
U.S. Pat. No. 5,401,154 to Sargent discloses an apparatus for making a fiber reinforced thermoplastic material and forming parts therefrom. The apparatus includes an extruder having a first material inlet, a second material inlet positioned downstream of the first material inlet, and an outlet. A thermoplastic resin material is supplied at the first material inlet and a first fiber reinforcing material is supplied at the second material inlet of the compounding extruder, which discharges a molten random fiber reinforced thermoplastic material at the extruder outlet. The fiber reinforcing material may include a bundle of continuous fibers formed from a plurality of monofilament fibers. Fiber types disclosed include glass, carbon, graphite and Kevlar.
U.S. Pat. No. 5,595,696 to Schlarb et al. discloses a fiber composite plastic and a process for the preparation thereof and more particularly to a composite material comprising continuous fibers and a plastic matrix. The fiber types include glass, carbon and natural fibers, and can be fed to the extruder in the form of chopped or continuous fibers. The continuous fiber is fed to the extruder downstream of the resin feed hopper.
U.S. Pat. No. 6,395,342 to Kadowaki et al. discloses an impregnation process for preparing pellets of a synthetic organic fiber reinforced polyolefin. The process comprises the steps of heating a polyolefin at the temperature which is higher than the melting point thereof by 40 degree C. or more to lower than the melting point of a synthetic organic fiber to form a molten polyolefin; passing a reinforcing fiber comprising the synthetic organic fiber continuously through the molten polyolefin within six seconds to form a polyolefin impregnated fiber; and cutting the polyolefin impregnated fiber into the pellets. Organic fiber types include polyethylene terephthalate, polybutylene terephthalate, polyamide 6, and polyamide 66.
U.S. Pat. No. 6,419,864 to Scheuring et al. discloses a method of preparing filled, modified and fiber reinforced thermoplastics by mixing polymers, additives, fillers and fibers in a twin screw extruder. Continuous fiber rovings are fed to the twin screw extruder at a fiber feed zone located downstream of the feed hopper for the polymer resin. Fiber types disclosed include glass and carbon.
The above referenced patent publications and applications have not investigated the relationship between fiber reinforced resin pellet length, cut fiber length and the resultant impact properties of articles molded from such pellets. Because pellet length may impact the average fiber length and fiber length distribution, a need exists to determine if such a relationship exists and if so, to determine the resultant effect on impact resistance of parts molded from the fiber reinforced polypropylene composite resin.
SUMMARY
It has surprisingly been found that the pellet length of PET fiber reinforced polypropylene resins has a substantial effect on the impact resistance of articles molded from such resins. In addition, the input fiber length of the chopped PET fiber used to produce PET fiber reinforced polypropylene resins also has a substantial effect on the impact resistance of articles molded from such resins. It has also been surprisingly found that the advantageous pellet length for such composites varies as a function of whether chopped PET fiber or PET in the form spools of fiber continuously unwound into the hopper of the compounding extruder is utilized to produce such resin pellets. The PET fiber reinforced polypropylene resin pellets of the present disclosure are particularly suitable for making molded articles including, but not limited to, household appliances, automotive parts, and boat hulls.
One aspect of the present disclosure provides PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 12.7 mm in length, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.3 newton meter.
Another aspect of the present disclosure provides PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 19.1 mm in length, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.1 newton meter.
Another aspect of the present disclosure provides a method of making PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 12.7 mm in length, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.3 newton meter; wherein the method comprises feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
Still another aspect of the present disclosure provides a method of making PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 19.1 mm in length, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.1 newton meter; wherein the method comprises feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
Still another aspect of the present disclosure provides PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber, wherein the polyester fiber denier is less than 5; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 25.4 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.0 newton meter.
Still yet another aspect of the present disclosure provides a method of making PET fiber reinforced polypropylene resin pellets comprising, based on the total weight of the composition, at least 25 wt % polypropylene based polymer; from 20 to 40 wt % polyester fiber, wherein the polyester fiber denier is less than 5; from 0 to 60 wt % inorganic filler; and from 0 to 0.2 wt % lubricant; wherein the resin pellets range from 3.2 to 25.4 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.0 newton meter; wherein the method comprises: feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
These and other features and attributes of the disclosed PET fiber reinforced polypropylene resin pellets and method of making therein and their advantageous applications and/or uses will be apparent from the detailed description which follows, particularly when read in conjunction with the figures appended hereto.
BRIEF DESCRIPTION OF THE DRAWINGS
To assist those of ordinary skill in the relevant art in making and using the subject matter hereof, reference is made to the appended drawings, wherein:
FIG. 1 depicts one form of the process for making PET fiber reinforced polypropylene composite resin pellets of the present disclosure using continuous PET fiber fed from spools.
FIG. 2 depicts another form of a twin screw extruder with a downstream feed port for making PET fiber reinforced polypropylene composite resin pellets of the present disclosure using chopped PET fiber fed into the downstream feed port.
FIG. 3 depicts an exemplary schematic of a twin screw extruder screw configuration for making PET fiber reinforced polypropylene composite resin pellets of the present disclosure.
DETAILED DESCRIPTION
The present disclosure relates to improved PET fiber reinforced polypropylene composite resin pellets and methods of making therein. The PET fiber reinforced polypropylene resin pellets of the present disclosure are distinguishable over the prior art in comprising a combination of a polypropylene based matrix with PET fiber and inorganic filler in a resin pellet length that is advantageous as to impact resistance of articles molded from said resin pellets. The PET reinforced polypropylene resin pellets yield articles molded from the pellets with a drop dart impact resistance of at least 5.0 newton meter and do not splinter upon impact testing. The PET fiber reinforced polypropylene composite resin pellets of the present disclosure are also distinguishable over the prior art in comprising a polypropylene based matrix polymer with an advantageous high melt flow rate without sacrificing impact resistance. In addition, PET fiber reinforced polypropylene composite resin pellets of the present disclosure do not splinter during instrumented impact testing. The process of making PET fiber reinforced polypropylene compositions of the present disclosure are also distinguishable over the prior art in providing a process that controls and optimizes the resin pellet length and the input cut PET fiber length and thickness for impact resistance. All numerical values within the detailed description and the claims herein are understood as modified by “about.”
U.S. patent application Ser. No. 11/301,533 filed on Dec. 13, 2005, herein incorporated by reference in its entirety, discloses advantageous fiber reinforced polypropylene compositions. The fiber reinforced polypropylene compositions include at least 25 wt % polypropylene based polymer, from 5 to 60 wt % organic fiber, and from 0 to 60 wt % inorganic filler. Articles molded from these fiber reinforced polypropylene compositions have a flexural modulus of at least 300,000 psi, and exhibit ductility during instrumented impact testing.
U.S. patent application Ser. No. 11/318,363 filed on Dec. 23, 2005, herein incorporated by reference in its entirety, discloses advantageous processes for making fiber reinforced polypropylene resins including at least 25 wt % polypropylene based polymer, from 5 to 60 wt % organic fiber, and from 0 to 60 wt % inorganic filler. The process includes extrusion compounding the polypropylene based polymer, the organic fiber, and the inorganic filler to form a fiber reinforced polypropylene resin pellets, which are subsequently molded to form an article with a flexural modulus of at least 300,000 psi, and that exhibits ductility during instrumented impact testing.
U.S. patent application Ser. No. 11/395,493 filed on Mar. 31, 2006, herein incorporated by reference in its entirety, discloses cloth-like fiber reinforced polypropylene compositions, and the beneficial mechanical and aesthetic properties imparted by such compositions. The cloth-like fiber reinforced polypropylene compositions include at least 25 wt % polypropylene based polymer, from 5 to 60 wt % organic reinforcing fiber, from 0 to 60 wt % inorganic filler, and from 0.1 to 2.5 wt % colorant fiber. Articles molded from these fiber reinforced polypropylene compositions have a flexural modulus of at least 300,000 psi, exhibit ductility during instrumented impact testing, and exhibit a cloth-like appearance.
The PET fiber reinforced polypropylene composite resin pellets of the present disclosure simultaneously have desirable stiffness, as measured by having a flexural modulus of at least 300,000 psi, and toughness, as measured by exhibiting ductility during instrumented impact testing. In addition, PET fiber reinforced polypropylene composite resin pellets of the present disclosure with particular pellet lengths yield articles with drop dart impact resistance values exceeding 5.0, or 6.0, or 7.0, or 8.0, or 9.0, or 10.0, or 11.0, or 12.0, or 13.0 newton meter. In a particular embodiment, the PET reinforced polypropylene resin pellets after molding yield an article having a flexural modulus of at least 350,000 psi, or at least 370,000 psi, or at least 390,000 psi, or at least 400,000 psi, or at least 450,000 psi. Still more particularly, the PET reinforced polypropylene resin pellets after molding yield have a flexural modulus of at least 600,000 psi, or at least 800,000 psi. It is also believed that having a weak interface between the polypropylene matrix and the PET fiber contributes to fiber pullout; and, therefore, may enhance toughness. Thus, there is no need to add modified polypropylene s to enhance bonding between the PET fiber and the polypropylene matrix, although the use of modified polypropylene may be advantageous to enhance the bonding between a filler, such as talc or wollastonite and the matrix polymer. In addition, in one embodiment, there is no need to add lubricant to weaken the interface between the polypropylene and the fiber to further enhance fiber pullout. Some embodiments also display no splintering during instrumented dart impact testing, which yield a further advantage of not subjecting a person in close proximity to the impact to potentially harmful splintered fragments. This characteristic is advantageous in automotive applications.
Compositions of the present disclosure generally include at least 25 wt %, based on the total weight of the composition, of polypropylene based polymer as the matrix resin. In a particular embodiment, the polypropylene is present in an amount of at least 30 wt %, or at least 35 wt %, or at least 40 wt %, or at least 45 wt %, or at least 50 wt %, or in an amount within the range having a lower limit of 30 wt %, or 35 wt %, or 40 wt %, or 45 wt %, or 50 wt %, and an upper limit of 60 wt %, or 75 wt %, or 80 wt %, based on the total weight of the composition. In another embodiment, the polypropylene is present in an amount of at least 25 wt %.
The polypropylene based resin used as the matrix resin is not particularly restricted and is generally selected from the group consisting of propylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, other propylene-α-olefin random copolymers, propylene block copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers and combinations thereof. In a particular embodiment, the polypropylene is a propylene homopolymer. In another particular embodiment, the polypropylene is a propylene impact copolymer comprising from 78 to 95 wt % homo polypropylene and from 5 to 22 wt % ethylene-propylene rubber, based on the total weight of the impact copolymer. In a particular aspect of this embodiment, the propylene impact copolymer comprises from 90 to 95 wt % homo polypropylene and from 5 to 10 wt % ethylene-propylene rubber, based on the total weight of the impact copolymer.
The polypropylene base resin of the matrix resin may have a melt flow rate of from 20 to 1500 g/10 min. In a particular embodiment, the melt flow rate of the polypropylene matrix resin is greater 100 g/10 min, and still more particularly greater than or equal to 400 g/10 min. In yet another embodiment, the melt flow rate of the polypropylene matrix resin is 1500 g/10 min. The higher melt flow rate permits for improvements in processability, throughput rates, and higher loading levels of organic fiber and inorganic filler without negatively impacting flexural modulus and impact resistance.
In a particular embodiment, the matrix polypropylene may contain less than 0.1 wt % of a modifier, based on the total weight of the polypropylene. Typical modifiers include, for example, unsaturated carboxylic acids, such as acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and derivates thereof. In another particular embodiment, the matrix polypropylene does not contain a modifier. In still yet another particular embodiment, the polypropylene based polymer further includes from 0.1 wt % to less than 10 wt % of a polypropylene based polymer modified with a grafting agent. The grafting agent includes, but is not limited to, acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
The polypropylene may further contain additives commonly known in the art, such as dispersant, lubricant, flame-retardant, antioxidant, antistatic agent, light stabilizer, ultraviolet light absorber, carbon black, nucleating agent, plasticizer, and coloring agent such as dye or pigment. The amount of additive, if present, in the polypropylene matrix is generally from 0.5 wt % or 2.5 wt % to 7.5 wt % or 10 wt %, based on the total weight of the matrix. Diffusion of additive(s) during processing may cause a portion of the additive(s) to be present in the fiber.
The polypropylene matrix resin of the present disclosure is not limited by any particular polymerization method for producing the matrix polypropylene, and the polymerization processes described herein are not limited by any particular type of reaction vessel. For example, the matrix polypropylene can be produced using any of the well known processes of solution polymerization, slurry polymerization, bulk polymerization, gas phase polymerization, and combinations thereof. Furthermore, the disclosure is not limited to any particular catalyst for making the polypropylene, and may, for example, include Ziegler-Natta or metallocene catalysts.
The fiber reinforced polypropylene resin pellets disclosed herein generally include at least 5 wt %, based on the total weight of the composition, of an organic fiber. In a particular embodiment, the fiber is present in an amount of at least 10 wt %, or at least 15 wt %, or at least 20 wt %, or in an amount within the range having a lower limit of 10 wt %, or 15 wt %, or 20 wt %, or 25 wt %, and an upper limit of 25 wt %, or 30 wt %, or 40 wt %, or 50 wt %, or 55 wt %, or 60 wt %, or 70 wt %, based on the total weight of the composition. In another embodiment, the PET fiber is present in an amount of at least 10 wt % and up to 40 wt %. In yet another embodiment, PET fiber is present in an amount of at least 20 wt % and up to 40 wt %. In order to improve the impact resistance, organic fibers are also referred to as reinforcing fibers and are incorporated into the polypropylene based polymer matrix.
The polymer used as the fiber is not particularly restricted and is generally selected from the group consisting of polyalkylene terephthalates, polyalkylene naphthalates, polyamides, polyolefins, polyacrylonitrile, and combinations thereof. In a particular embodiment, the fiber comprises a polymer selected from the group consisting of polyethylene terephthalate (PET), polybutylene terephthalate, polyamide and acrylic. In another embodiment, PET fiber is advantageous in yielding PET fiber reinforced polypropylene resin pellets with drop dart impact resistance values of at least 5.0 newton meter and exhibiting no splintering upon drop dart impact testing over a range of pellet lengths of 3.2 to 25.4 mm. In another particular embodiment, PET fiber is advantageous in yielding PET fiber reinforced polypropylene resin pellets with drop dart impact resistance values of at least 5.3 newton meter over a range of pellet lengths ranging from 3.2 to 12.7 mm. In yet another particular embodiment, PET fiber in the form of continuous fiber fed to the compounding extruder is advantageous in yielding PET fiber reinforced polypropylene resin pellets with drop dart impact resistance values of at least 7.9 newton meter over a range of pellet lengths ranging from 6.4 to 9.5 mm. In still yet another particular embodiment, PET fiber in the form of 6.4 mm long chopped fiber is advantageous in yielding PET fiber reinforced polypropylene resin pellets with drop dart impact resistance values of at least 6.4 newton meter over a range of pellet lengths ranging from 9.5 to 12.7 mm.
In one embodiment, the fiber is a single component fiber. In another embodiment, the fiber is a multicomponent fiber wherein the fiber is formed from a process wherein at least two polymers are extruded from separate extruders and melt blown or spun together to form one fiber. In a particular aspect of this embodiment, the polymers used in the multicomponent fiber are substantially the same. In another particular aspect of this embodiment, the polymers used in the multicomponent fiber are different from each other. The configuration of the multicomponent fiber can be, for example, a sheath/core arrangement, a side-by-side arrangement, a pie arrangement, an islands-in-the-sea arrangement, or a variation thereof. The fiber may also be drawn to enhance mechanical properties via orientation, and subsequently annealed at elevated temperatures, but below the crystalline melting point to reduce shrinkage and improve dimensional stability at elevated temperature.
The length and diameter of the organic fibers of the present disclosure are not particularly restricted. The length of the PET cut fiber within the meaning of this disclosure is with respect to the input length of the cut or chopped PET fiber being fed to the compounding extruder or other mixing apparatus. This is also referred to within the detailed description and the claims of the present disclosure as the “input chopped polyester fiber length” or the “input cut polyester fiber length.” The length of the cut or chopped PET fiber referred to within the detailed description and the claims is not with respect to the length of the PET fiber within the pellets after compounding. It is understood that during the extrusion compounding process, the input chopped or cut PET fiber may undergo further length reduction through the process. In a particular embodiment, the input PET cut or chopped fibers may have a length of 6.4 mm, or a length within the range of 3.2 to 25.4 mm, or more particularly a length within the range of 4.8 to 12.7 mm. In another embodiment, the input PET cut fibers may have a length of 3.2 to 19.1 mm, or 6.4 to 12.7 mm. In another embodiment, the input PET cut fiber length may be 3.2 mm, or 6.4 mm, or 12.7 mm, or 19.1 mm, or 25.4 mm.
The diameter or denier of the organic fiber within the meaning of this disclosure is also with respect to the input diameter or denier of the organic fiber being fed to the compounding extruder or other mixing apparatus. Denier is defined as grams of fiber per 9000 meters of fiber length. This is also referred to within the detailed description and the claims of the present disclosure as the “input polyester fiber denier” or the “input polyester fiber diameter.” The diameter or denier of the PET fiber referred to within the detailed description and the claims is not with respect to the diameter of the PET fiber within the pellets after compounding. It is understood that during the extrusion compounding process, the input PET fiber may undergo a change in denier or diameter due to shrinkage or expansion through the process. Denier may be related to fiber diameter for a given fiber type (fiber density).
The diameter of the organic fiber may be within the range having a lower limit of 5 μm and an upper limit of 100 μm. In a particular embodiment, the PET fibers have a diameter of from 25 to 35 μm (6 to 12 denier), or more particularly a diameter of from 25 to 30 μm (6 to 9 denier). In another embodiment, the input PET fiber may range from 5 to 15 denier. In another embodiment, the input PET fiber diameter ranges from 15 to 35 μm. In yet another embodiment, the PET fiber diameter is less than 15 microns. In another embodiment, the PET fiber denier is less than 5, or less than 4, or less than 3.2, or less than 2. In still yet another embodiment, the PET fiber denier is 3.1 (also referred to herein as low denier PET fiber). As the PET fiber denier or diameter decreases, generally increased loadings are needed in the PP/PET composite to maintain impact resistance constant.
The organic fiber may further contain additives commonly known in the art. For example, PET fiber may include additives, such as dispersant, lubricant, flame-retardant, antioxidant, antistatic agent, light stabilizer, ultraviolet light absorber, carbon black, nucleating agent, plasticizer, and coloring agent such as dye or pigment.
The organic fiber used to make the compositions of the present disclosure is not limited by any particular fiber form. For example, the PET fiber may be in the form of continuous filament yarn, partially oriented yarn, or staple fiber. In another embodiment, the fiber may be a continuous multifilament fiber or a continuous monofilament fiber.
In another embodiment of the PET fiber reinforced polypropylene resin pellets disclosed herein, the PET fiber reinforced polypropylene compositions further include from 0.01 to 0.2 wt %, or more particularly from 0.05 to 0.1 wt % lubricant, based on the total weight of the composition. Suitable lubricants include, but are not limited to, silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof. Lubricant incorporation may assist with the pull-out of organic fiber from the polypropylene based matrix polymer to further improve impact resistance.
In another exemplary embodiment of the present disclosure, the PET fiber reinforced polypropylene resin pellets may be made cloth-like in terms of appearance, feel, or a combination thereof. Cloth-like appearance or look is defined as having a uniform short fiber type of surface appearance. Cloth-like feel is defined as having a textured surface or fabric type feel. The incorporation of the colorant fiber into the fiber reinforced polypropylene composition results in a cloth-like appearance. When the fiber reinforced polypropylene composition is processed through a mold with a textured surface, a cloth-like feel is also imparted to the surface of the resulting molded part.
Cloth-like PET fiber reinforced polypropylene resin pellets of the present disclosure generally include from 0.1 to 2.5 wt %, based on the total weight of the composition, of a colorant fiber. Still more advantageously, the colorant fiber is present from 0.5 to 1.5 wt %, based on the total weight of the composition. Even still more advantageously, the colorant fiber is present at less than 1.0 wt %, based on the total weight of the composition.
The colorant fiber type is not particularly restricted and is generally selected from the group consisting of cellulosic fiber, acrylic fiber, nylon fiber or polyester type fiber. Polyester type fibers include, but are not limited to, polyethylene terephlalate, polybutylene terephalate, and polyethylene naphthalate. Polyamide type fibers include, but are not limited to, nylon 6, nylon 6,6, nylon 4,6 and nylon 6,12. In a particular embodiment, the colorant fiber is cellulosic fiber, also commonly referred to as rayon. In another particular embodiment, the colorant fiber is a nylon type fiber.
The colorant fiber used to make the PET fiber reinforced polypropylene resin pellets disclosed herein is not limited by any particular fiber form prior to being chopped for incorporation into the fiber reinforced polypropylene composition. For example, the colorant fiber may be in the form of continuous filament yarn, partially oriented yarn, or staple fiber. In another embodiment, the colorant fiber may be a continuous multifilament fiber or a continuous monofilament fiber.
The length and diameter of the colorant fiber may be varied to alter the cloth-like appearance in the molded article. The length and diameter of the colorant fiber of the present disclosure is not particularly restricted. In a particular embodiment, the input colorant fibers to the compounding process have a length of less than 6.4 mm, or advantageously a length of between 0.8 to 3.2 mm. In another particular embodiment, the diameter of the input colorant fibers to the compounding process is within the range having a lower limit of 10 μm and an upper limit of 100 μm.
The colorant fiber is colored with a coloring agent, which comprises either inorganic pigments, organic dyes or a combination thereof. U.S. Pat. Nos. 5,894,048; 4,894,264; 4,536,184; 5,683,805; 5,328,743; and 4,681,803 disclose the use of coloring agents, the disclosures of which are incorporated herein by reference in their entirety. Exemplary pigments and dyes incorporated into the colorant fiber include, but are not limited to, phthalocyanine, azo, condensed azo, azo lake, anthraquinone, perylene/peri none, indigo/thioindigo, isoindolinone, azomethineazo, dioxazine, quinacridone, aniline black, triphenylmethane, carbon black, titanium oxide, iron oxide, iron hydroxide, chrome oxide, spinel-form calcination type, chromic acid, talc, chrome vermilion, iron blue, aluminum powder and bronze powder pigments. These pigments may be provided in any form or may be subjected in advance to various dispersion treatments in a manner known per se in the art. Depending on the material to be colored, the coloring agent can be added with one or more of various additives such as organic solvents, resins, flame retardants, antioxidants, ultraviolet absorbers, plasticizers and surfactants.
The base fiber reinforced polypropylene base composite material that the colorant fiber is incorporated into may also be colored using the inorganic pigments, organic dyes or combinations thereof. Exemplary pigments and dyes for the base fiber reinforced polypropylene composite material may be of the same types as indicated in the preceding paragraph for the colorant fiber. Typically the base fiber reinforced polypropylene composite material is made of a different color or a different shade of color than the colorant fiber, such as to create a cloth-like appearance upon uniformly dispersing the short colorant fibers in the colored base fiber reinforced polypropylene composite material. In one particular exemplary embodiment, the base fiber reinforced polypropylene composite material is light grey in color and the colorant fiber is dark grey or blue in color to create a cloth-like look from the addition of the short colorant fiber uniformly dispersed through the base fiber reinforced polypropylene composite material.
The colorant fiber in the form of chopped fiber may be incorporated directly into the base fiber reinforced polypropylene composite material via the twin screw or single screw extrusion compounding process, or may be incorporated as part of a masterbatch resin to further facilitate the dispersion of the colorant fiber within the fiber reinforced polypropylene composite base material. When the colorant fiber is incorporated as part of a masterbatch resins, exemplary carrier resins include, but are not limited to, polypropylene homopolymer, ethylene-propylene copolymer, ethylene-propylene-butene-1 ter polymer, propylene-butene-1 copolymer, low density polyethylene, high density polyethylene, and linear low density polyethylene. In one exemplary embodiment, the colorant fiber is incorporated into the carrier resin at less than 25 wt %. The colorant fiber masterbatch is then incorporated into the fiber reinforced polypropylene composite base material at a loading of from 1 wt % to 10 wt %, or from 2 to 6 wt %. In a particularly advantageous embodiment, the colorant fiber masterbatch is added at 4 wt % based on the total weight of the composition. In another exemplary embodiment, a masterbatch of either black rayon or black nylon type fibers in linear low density polyethylene carrier resin is incorporated at a loading of 4 wt % in the fiber reinforced polypropylene composite base material.
The colorant fiber or colorant fiber masterbatch may be fed to the twin screw or single screw extrusion compounding process with a gravimetric feeder at either the feed hopper or at a downstream feed port in the barrel of the twin screw or single screw extruder. Kneading and mixing elements are incorporated into the twin screw or single screw extruder screw design downstream of the colorant fiber or colorant fiber masterbatch injection point, such as to uniformly disperse the colorant fiber within the cloth-like fiber reinforced polypropylene composite material.
Compositions of the present disclosure optionally include inorganic filler in an amount of at least 1 wt %, or at least 5 wt %, or at least 10 wt %, or in an amount within the range having a lower limit of 0 wt %, or 1 wt %, or 5 wt %, or 10 wt %, or 15 wt %, and an upper limit of 25 wt %, or 30 wt %, or 35 wt %, or 40 wt %, or 50 wt %, or 60 wt %, based on the total weight of the composition. In yet another embodiment, the inorganic filler may be included in the polypropylene fiber composite in the range of from 10 wt % to 60 wt %. In a particular embodiment, the inorganic filler is selected from the group consisting of talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof. The talc may have a size of from 1 to 100 microns. In one particular embodiment, at a high talc loading of up to 60 wt %, the polypropylene fiber composite exhibited a flexural modulus of at least 750,000 psi and no splintering during instrumented impact testing (15 mph, −29° C., 25 lbs). In another particular embodiment, at a low talc loading of as low as 10 wt %, the polypropylene fiber composite exhibited a flexural modulus of at least 325,000 psi and no splintering during instrumented impact testing (15 mph, −29° C., 25 lbs). In addition, wollastonite loadings of from 10 wt % to 60 wt % in the polypropylene fiber composite yielded an outstanding combination of impact resistance and stiffness.
In another particular embodiment, a fiber reinforced polypropylene composite resin pellets including a polypropylene based resin with a melt flow rate of 80 to 1500, 10 to 15 wt % of polyester fiber, and 50 to 60 wt % of inorganic filler displayed a flexural modulus of 850,000 to 1,200,000 psi and did not shatter during instrumented impact testing at −29 degrees centigrade, tested at 25 pounds and 15 miles per hour. The inorganic filler includes, but is not limited to, talc and wollastonite. This combination of stiffness and toughness is difficult to achieve in a polymeric based material. In addition, the fiber reinforced polypropylene composition has a heat distortion temperature at 66 psi of 140 degrees centigrade, and a flow and cross flow coefficient of linear thermal expansion of 2.2×10⁻⁵ and 3.3×10⁻⁵ per degree centigrade respectively. In comparison, rubber toughened polypropylene has a heat distortion temperature of 94.6 degrees centigrade, and a flow and cross flow thermal expansion coefficient of 10×10⁻⁵ and 18.6×10⁻⁵ per degree centigrade respectively.
In one exemplary embodiment where PET fiber is continuously unwound from spools into a twin screw or single screw extrusion compounding extruder hopper, the PET reinforced polypropylene resin pellets may range from 3.2 to 12.7 mm in length and more advantageously from 6.4 to 9.5 mm in length to yield drop dart impact resistance values of at least 7.9 newton meter. In another exemplary embodiment where ¼″ (6.4 mm in length) chopped PET fiber is fed via a feeder into a twin screw or single screw extrusion compounding extruder hopper, the PET reinforced polypropylene resin pellets may range from 3.2 to 12.7 mm in length, and more advantageously from 9.5 to 12.7 mm in length to yield drop dart impact resistance values of at least 6.4 newton meter. The drop dart impact resistance values are generally higher when chopped PET fiber is fed to the compounding extruder as opposed to continuous PET fiber from one or more spools.
The PET fiber reinforced polypropylene resin pellets disclosed herein are not limited by any particular method for forming the compositions. For example, the compositions can be formed by contacting polypropylene, organic fiber, and optional inorganic filler in any of the well known processes of pultrusion or extrusion compounding. In a particular embodiment, the compositions are formed in an extrusion compounding process (single screw or twin screw) or a batch type mixer when using input chopped organic fiber. In one advantageous process, PET fiber reinforced resin pellets are formed via a twin screw extrusion compounding process. In another advantageous process, PET fiber reinforced resin pellets are formed via a single screw extrusion compounding process. In a particular aspect of this embodiment, the PET fibers are cut (i.e. fed as chopped PET fiber or staple PET fiber) prior to being fed into the extruder hopper or fed to the extruder via a downstream feed port. In another particular aspect of this embodiment, the PET fibers are fed directly from one or more spools into the extruder hopper. The extrusion compounding process or pultrusion process form one or more strands of PET fiber reinforced polypropylene composites that are then cut through a pelletizing process into resin pellets of the desired length.
FIG. 1 depicts an exemplary schematic of one form of the process for making PET fiber reinforced polypropylene resin pellets of the present disclosure. Polypropylene based resin 10, inorganic filler 12, and PET fiber 14 continuously unwound from one or more spools 16 are fed into the extruder hopper 18 of a twin screw compounding extruder 20. The extruder hopper 18 is positioned above the feed throat 19 of the twin screw compounding extruder 20. The extruder hopper 18 may alternatively be provided with an auger (not shown) for mixing the polypropylene based resin 10 and the inorganic filler 12 prior to entering the feed throat 19 of the twin screw compounding extruder 20. In an alternative embodiment, as depicted in FIG. 2, the inorganic filler and/or chopped PET fiber 12 may be fed to the twin screw compounding extruder 20 at a downstream feed port 27 in the extruder barrel 26 positioned downstream of the extruder hopper 18 while the remaining components 10, 14 are metered into the extruder hopper 18.
Referring again to FIG. 1, the polypropylene based resin 10 is metered to the extruder hopper 18 via a feed system 30 for accurately controlling the feed rate. Similarly, the inorganic filler 12 is metered to the extruder hopper 18 via a feed system 32 for accurately controlling the feed rate. The feed systems 30, 32 may be, but are not limited to, gravimetric feed system or volumetric feed systems. Gravimetric feed systems are advantageous for accurately controlling the weight percentage of polypropylene based resin 10 and inorganic filler 12 being fed to the extruder hopper 18. The feed rate of PET fiber 14 to the extruder hopper 18 is controlled by a combination of the extruder screw speed, number of fiber filaments and the thickness of each filament in a given fiber spool, and the number of fiber spools 16 being unwound simultaneously to the extruder hopper 18. The higher the extruder screw speed measured in revolutions per minute (rpms), the greater will be the rate at which PET fiber 14 is fed to the twin screw compounding screw 20. The rate at which PET fiber 14 is fed to the extruder hopper also increases with the greater the number of filaments within the organic fiber 14 being unwound from a single fiber spool 16, the greater filament thickness, the greater the number fiber spools 16 being unwound simultaneously, and the rotations per minute of the extruder. With regard to downstream feeding of the inorganic filler and/or chopped organic fiber depicted in FIG. 2, one or more feed systems (not shown) are used for accurately controlling the feed rate of the inorganic filler and/or chopped PET fiber 12 fed to the twin screw compounding extruder 20 at the downstream feed port 27. Again, the feed systems (not shown) may be, but are not limited to, gravimetric feed system or volumetric feed systems.
Referring again the FIG. 1, the twin screw compounding extruder 20 includes a drive motor 22, a gear box 24, an extruder barrel 26 for holding two screws (not shown), and a strand die 28. The extruder barrel 26 is segmented into a number of heated temperature controlled zones 28. As depicted in FIG. 1, the extruder barrel 26 includes a total of ten temperature control zones 28. The two screws within the extruder barrel 26 of the twin screw compounding extruder 20 may be intermeshing or non-intermeshing, and may rotate in the same direction (co-rotating) or rotate in opposite directions (counter-rotating). From a processing perspective, the melt temperature should be maintained above the melting point of the polypropylene based resin 10, and below the melting temperature of the PET fiber 14, such that the mechanical properties imparted by the PET fiber shall be maintained when mixed into the polypropylene based resin 10. In one exemplary embodiment, the barrel temperature of the extruder zones did not exceed 154° C. when extruding PP homopolymer and PET fiber, which yielded a melt temperature above the melting point of the PP homopolymer, but significantly below the melting point of the PET fiber. In another exemplary embodiment, the barrel temperatures of the extruder zones are set at 185° C. or lower.
An exemplary schematic of a twin screw compounding extruder 20 screw configuration for making PET fiber reinforced polypropylene resin pellets is depicted in FIG. 3. The feed throat 19 allows for the introduction of polypropylene based resin, PET fiber, and inorganic filler into a feed zone of the twin screw compounding extruder 20. The inorganic filler and/or chopped PET fiber may be optionally fed to the extruder 20 at the downstream feed port 27 of FIG. 2. The twin screws 30 of FIG. 3 include an arrangement of interconnected screw sections, including conveying elements 32 and kneading elements 34. The kneading elements 34 function to melt the polypropylene based resin, cut the PET fiber lengthwise (in particular when fed continuously from one or more spools into the extruder hopper), and mix the polypropylene based melt, cut PET fiber and inorganic filler to form a uniform blend. More particularly, the kneading elements function to break up the PET fiber when fed in continuous for into lengths ranging from 0.2 to 30 mm fiber lengths, or from 0.5 to 25 mm, or from 3 to 19 mm, or from 6 to 14 mm. A series of interconnected kneading elements 34 is also referred to as a kneading block. U.S. Pat. No. 4,824,256 to Haring, et al., herein incorporated by reference in its entirety, discloses co-rotating twin screw extruders with kneading elements. The first section of kneading elements 34 located downstream from the feed throat is also referred to as the melting zone of the twin screw compounding extruder 20. The conveying elements 32 function to convey the solid components, melt the polypropylene based resin, and convey the melt mixture of polypropylene based polymer, inorganic filler and PET fiber downstream toward the strand die 28 (see FIG. 1) at a positive pressure.
The position of each of the screw sections as expressed in the number of diameters (D) from the start 36 of the extruder screws 30 is also depicted in FIG. 3. The extruder screws in FIG. 3 have a length to diameter ratio of 40/1, and at a position 32D from the start 36 of screws 30, there is positioned a kneading element 34. The particular arrangement of kneading and conveying sections is not limited to that as depicted in FIG. 3, however one or more kneading blocks consisting of an arrangement of interconnected kneading elements 34 may be positioned in the twin screws 30 at a point downstream of where organic fiber and inorganic filler are introduced to the extruder barrel. The twin screws 30 may be of equal screw length or unequal screw length. Other types of mixing sections may also be included in the twin screws 30, including, but not limited to, Maddock mixers, and pin mixers.
Referring once again to FIG. 1, the uniformly mixed PET fiber reinforced polypropylene composite melt comprising polypropylene based polymer 10, inorganic filler 12, and PET fiber 14 is metered by the extruder screws to a strand die 28 for forming one or more continuous strands 40 of fiber reinforced polypropylene composite melt. The one or more continuous strands 40 are then passed into water bath 42 for cooling them below the melting point of the fiber reinforced polypropylene composite melt to form a solid fiber reinforced polypropylene composite strands 44. The water bath 42 is typically cooled and controlled to a constant temperature much below the melting point of the polypropylene based polymer. The solid fiber reinforced polypropylene composite strands 44 are then passed into a pelletizer or pelletizing unit 46 to cut them into fiber reinforced polypropylene composite resin 48 into PET fiber reinforced polypropylene based resin pellets. Non-limiting exemplary pelletizers include underwater type pelletizers and strand type pelletizers. In one advantageous process form, a strand pelletizer is used to cut the PET fiber reinforced polypropylene composite into longer resin pellets than may be formed with an underwater type pelletizer. Generally, the number of cutting blades and the speed of the cutting blades in the pelletizer 46 may be used to control the resulting resin pellet length produced. The PET fiber reinforced polypropylene composite resin pellets 48 may then be accumulated in boxes 50, barrels, or alternatively conveyed to silos for storage.
PET fiber reinforced polypropylene composites disclosed herein may be formed into resin pellets using the extrusion compounding and pelletizing processes exemplified in FIGS. 1, 2 and 3. The resin pellets produced in the pelletizer 46 of FIG. 1 may have a length of from 1.0 mm to 25.4 mm. Pellet length is measured using a ruler or other linear measuring device. The lower limit of the resin pellet length may be 1.0 mm, or 2.0 mm, or 3.2 mm, or 4.8 mm, or 6.4 mm, or 8.0 mm, or 9.5 mm. The upper limit of the resin pellet length may be 8.0 mm, or 9.5 mm, or 11.1 mm or 12.7 mm, or 13.9 mm, or 15.0 mm, or 17.0 mm, or 19.1 mm, or 21.0 mm, or 23.0 mm, or 25.4 mm. In one particular embodiment, PET reinforced polypropylene resin pellets may have a pellet length from 3.2 to 12.7 mm, or 6.4 to 9.5 mm. In another particular embodiment, PET reinforced polypropylene resin pellets may have a pellet length from 3.2 to 12.7 mm, or 3.2 to 19.1 mm, or 3.2 to 9.5 mm, or 6.4 to 9.5 mm, or 9.5 to 19.1 mm or 9.5 to 12.7 mm. The optimum resin pellet length range for impact resistance may depend on such exemplary factors as organic fiber type, input organic fiber diameter, organic fiber loading level, input organic fiber length within the fiber reinforced polypropylene melt, method of feeding the organic fiber into the extrusion compounding process (as a chopped or staple fiber or as continuous strands being unwound from spools). In particular, the method of feeding the PET fiber into the extrusion compounding process as a chopped/staple fiber or as continuous strands being unwound from spools may impact the resultant impact resistance of articles molded from the resin pellets. Impact resistance as described herein is measured by the total energy in newton meter to shatter an article molded from the PET fiber reinforced polypropylene resin pellets. Drop dart impact resistance measured via ASTM test method D3763 and was used to establish the relationship between pellet length and impact resistance for both chopped PET fiber and continuous PET fiber feeds. The higher the total energy required to shatter the article, the greater the impact resistance. Generally, the feeding of continuous PET fiber from spools into the twin screw compounding process results in poorer impact resistance than the feeding of ¼″ (6.4 mm long) chopped PET fiber into the extrusion compounding process.
In another embodiment, the PET fiber reinforced polypropylene resin pellets disclosed herein are molded into articles. Articles made from the PET fiber-reinforced polypropylene composite resin pellets described herein include, but are not limited to, automotive parts, household appliances, and boat hulls. Automotive parts include both interior and exterior automobile parts. Cloth-like fiber reinforced polypropylene articles are particularly suitable for interior automotive parts because of the unique combination of toughness, stiffness and aesthetics. More particularly, the non-splintering nature of the failure mode during instrumented impact testing, and the cloth-like look make the cloth-like fiber reinforced polypropylene composites disclosed herein are suited for interior automotive parts, and for interior trim cover panels. Exemplary, but not limiting, interior trim cover panels include steering wheel covers, head liner panels, dashboard panels, interior door trim panels, pillar trim cover panels, and under-dashboard panels. Pillar trim cover panels include a front pillar trim cover panel, a center pillar trim cover panel, and a quarter pillar trim cover panel. Other interior automotive parts include package trays, and seat backs. Articles made from the polypropylene compositions described herein are also suitable for exterior automotive parts, including, but not limited to, bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, and other structural parts of the automobile.
The PET fiber reinforced polypropylene composite resin pellets disclosed herein include, but are not limited to, one or more of the following advantages: an advantageous combination of toughness, stiffness, and aesthetics, improved instrumented impact resistance, improved flexural modulus, improved splinter or shatter resistance during instrumented impact testing, fiber pull out during instrumented impact testing without the need for lubricant additives, ductile (non-splintering) failure mode during instrumented impact testing as opposed to brittle (splintering), a higher heat distortion temperature compared to rubber modified polypropylene, improved part surface appearance from lower inorganic filler loadings, lower part density from lower inorganic filler loadings, a lower flow and cross flow coefficient of linear thermal expansion compared to rubber modified polypropylene, the ability to continuously and accurately feed organic reinforcing fiber into a compounding extruder, reduced production costs and reduced raw material costs, improved part surface appearance, the ability to produce polypropylene fiber composites exhibiting a cloth-like look and/or feel, uniform dispersion of the organic reinforcing fiber and colorant fiber in the composite pellets, improved drop dart impact resistance through tight control of PET fiber reinforced polypropylene resin pellet length, improved impact resistance through the feeding of chopped PET fiber as opposed to continuous PET fiber via spools into the extrusion compounding process, and retention of impact resistance, ductile failure mode and stiffness after the incorporation of colorant with colorant fiber.
The following examples illustrate the present disclosure and the advantages thereto without limiting the scope thereof.
Test Methods
Fiber reinforced polypropylene compositions described herein were injection molded at 2300 psi pressure, 401° C. at all heating zones as well as the nozzle, with a mold temperature of 60° C.
Flexural modulus data was generated for injected molded samples produced from the fiber reinforced polypropylene compositions described herein using the ISO 178 standard procedure.
Instrumented impact test data was generated for injected mold samples produced from the fiber reinforced polypropylene compositions described herein using ASTM D3763. Ductility during instrumented impact testing (test conditions of 15 mph, −29° C., 25 lbs) is defined as no splintering of the sample.
Drop dart impact test data was generated for injected mold samples produced from the PET fiber reinforced polypropylene resin pellets described herein using ASTM test method D3763 and reported in drop dart impact energy values of newton meter.
EXAMPLES
PP3505G is a propylene homopolymer commercially available from ExxonMobil Chemical Company of Baytown, Tex. The MFR (2.16 kg, 230° C.) of PP3505G was measured according to ASTM D1238 to be 400 g/10 min.
PP7805 is an 80 MFR propylene impact copolymer commercially available from ExxonMobil Chemical Company of Baytown, Tex.
PP8114 is a 22 MFR propylene impact copolymer containing ethylene-propylene rubber and a plastomer, and is commercially available from ExxonMobil Chemical Company of Baytown, Tex.
PP8224 is a 25 MFR propylene impact copolymer containing ethylene-propylene rubber and a plastomer, and is commercially available from ExxonMobil Chemical Company of Baytown, Tex.
PO1020 is 430 MFR maleic anhydride functionalized polypropylene homopolymer containing 0.5-1.0 weight percent maleic anhydride.
C impact CB7 is a surface modified talc, V3837 is a high aspect ratio talc, and Jet fine 700 C is a high surface area talc, all available from Luzenac America Inc. of Englewood, Colo.
Illustrative Examples 1-8
Varying amounts of PP3505G and 0.25″ (6.4 mm) long polyester fibers were mixed in a Haake single screw extruder at 175° C. The strand that exited the extruder was cut into 0.5″ lengths and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for instrumented impact under standard automotive conditions for interior parts (25 lbs, at 15 MPH, at −29° C.). The total energy absorbed and impact results are given in Table 1.
TABLE 1 wt % wt % Total Energy Instrumented Example # PP3505G Fiber (ft-lbf) Impact Test Results 1 65 35 8.6 ± 1.1 ductile* 2 70 30 9.3 ± 0.6 ductile* 3 75 25 6.2 ± 1.2 ductile* 4 80 20 5.1 ± 1.2 ductile* 5 85 15 3.0 ± 0.3 ductile* 6 90 10 2.1 ± 0.2 ductile* 7 95 5 0.4 ± 0.1 brittle** 8 100 0 <0.1 Brittle*** *Examples 1-6: samples did not shatter or split as a result of impact, with no pieces coming off of the specimen. **Example 7: pieces broke off of the sample as a result of the impact ***Example 8: samples completely shattered as a result of impact.
Illustrative Examples 9-14
In Examples 9-11, 35 wt % PP7805, 20 wt % C impact CB7 talc, and 45 wt % 0.25″ (6.4 mm) long polyester fibers were mixed in a Haake twin screw extruder at 175° C. The strand that exited the extruder was cut into 0.5″ lengths and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for instrumented impact. The total energy absorbed and impact results are given in Table 2.
In Examples 12-14, PP8114 was extruded and injection molded under the same conditions as those for Examples 9-11. The total energy absorbed and impact results are given in Table 2.
TABLE 2 Total Instrumented Example Impact Conditions/ Energy Impact Test # Applied Energy (ft-lbf) Results 35 wt % PP7805 (70 MFR), 20 wt % talc, 45 wt % fiber 9 −29° C., 15 MPH, 16.5 ductile* 25 lbs/192 ft-lbf 10 −29° C., 28 MPH, 14.2 ductile* 25 lbs/653 ft-lbf 11 −29° C., 21 MPH, 15.6 ductile* 58 lbs/780 ft-lbf 100 wt % PP8114 (22 MFR) 12 −29° C., 15 MPH, 32.2 ductile* 25 lbs/192 ft-lbf 13 −29° C., 28 MPH, 2.0 brittle** 25 lbs/653 ft-lbf 14 −29° C., 21 MPH, 1.7 brittle** 58 lbs/780 ft-lbf *Examples 9-12: samples did not shatter or split as a result of impact, with no pieces coming off of the specimen. **Examples 13-14: samples shattered as a result of impact.
Illustrative Examples 15-16
A Leistritz ZSE27 HP-60D 27 mm twin screw extruder with a length to diameter ratio of 40:1 was fitted with six pairs of kneading elements 12″ from the die exit to form a kneading block. The die was ¼″ in diameter. Strands of continuous 27,300 denier PET fibers were fed directly from spools into the hopper of the extruder, along with PP7805 and talc. The kneading elements in the kneading block in the extruder broke up the fiber in situ. The extruder speed was 400 revolutions per minute, and the temperatures across the extruder were held at 190° C. Injection molding was done under conditions similar to those described for Examples 1-14. The mechanical and physical properties of the sample were measured and are compared in Table 3 with the mechanical and physical properties of PP8224.
The instrumented impact test showed that in both examples there was no evidence of splitting or shattering, with no pieces coming off the specimen. In the notched charpy test, the PET fiber-reinforced PP7805 specimen was only partially broken, and the PP8224 specimen broke completely.
TABLE 3 Example 15 Test PET fiber-reinforced Example 16 (Method) PP7805 with talc PP8224 Flexural Modulus, Chord 525,190 psi 159,645 psi (ISO 178) Instrumented Impact at −30° C. 6.8 J 27.5 J Energy to maximum load 100 lbs at 5 MPH (ASTM D3763) Notched Charpy Impact 52.4 kJ/m² 5.0 kJ/m² at −40° C. (ISO 179/1eA) Heat Deflection Temperature 116.5° C. 97.6° C. at 0.45 Mpa, edgewise (ISO 75) Coefficient of Linear Thermal 2.2/12.8 10.0/18.6 Expansion, −30° C. to 100° C., (E-5/° C.) (E-5/° C.) Flow/Crossflow (ASTME831)
Illustrative Examples 17-18
In Examples 17-18, 30 wt % of either PP3505G or PP8224, 15 wt % 0.25″ (6.4 mm) long polyester fibers, and 45 wt % V3837 talc were mixed in a Haake twin screw extruder at 175° C. The strand that exited the extruder was cut into 0.5″ lengths and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for flexural modulus. The flexural modulus results are given in Table 4.
TABLE 4 Instrumented Impact at −30° C. Energy to maximum Flexural Modulus, load Chord, psi 25 lbs at 15 MPH Example Polypropylene, (ISO 178) (ASTM D3763), ft-lb 17 PP8224 433840 2 18 PP3505 622195 2.9
The rubber toughened PP8114 matrix with PET fibers and talc displayed lower impact values than the PP3505 homopolymer. This result is surprising, because the rubber toughened matrix alone is far tougher than the low molecular weight PP3505 homopolymer alone at all temperatures under any conditions of impact. In both examples above, the materials displayed no splintering.
Illustrative Examples 19-24
In Examples 19-24, 25-75 wt % PP3505G, 15 wt % 0.25″ (6.4 mm) long polyester fibers, and 10-60 wt % V3837 talc were mixed in a Haake twin screw extruder at 175° C. The strand that exited the extruder was cut into 0.5″ lengths and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for flexural modulus. The flexural modulus results are given in Table 5.
TABLE 5 Flexural Modulus, Example Talc Composition, Chord, psi (ISO 178) 19 10% 273024 20 20% 413471 21 30% 583963 22 40% 715005 23 50% 1024394 24 60% 1117249
In examples 19-24, the samples displayed no splintering in drop weight testing at an −29° C., 15 miles per hour at 25 pounds.
Illustrative Examples 25-26
Two materials, one containing 10% ¼ inch (6.4 mm) polyester fibers, 35% PP3505 polypropylene and 60% V3837 talc (example 25), the other containing 10% ¼ inch (6.4 mm) polyester fibers, 25% PP3505 polypropylene homopolymer (example 26), 10% PO1020 modified polypropylene were molded in a Haake twin screw extruder at 175° C. They were injection molded into standard ASTM A370 ½ inch wide sheet type tensile specimens. The specimens were tested in tension, with a ratio of minimum to maximum load of 0.1, at flexural stresses of 70 and 80% of the maximum stress.
TABLE 6 Percentage of Maximum Stress to Example 25, Example 26, Yield Point Cycles to failure Cycles to failure 70 327 9848 80 30 63
The addition of the modified polypropylene is shown to increase the fatigue life of these materials.
Illustrative Examples 27-29
A Leistritz 27 mm co-rotating twin screw extruder with a ratio of length to diameter of 40:1 was used in these experiments. The process configuration utilized was as depicted in FIG. 1. The screw configuration used is depicted in FIG. 3, and includes an arrangement of conveying and kneading elements. Talc, polypropylene and PET fiber were all fed into the extruder feed hopper located approximately two diameters from the beginning of the extruder screws (19 in the FIG. 3). The PET fiber was fed into the extruder hopper by continuously feeding from multiple spools a fiber tow of 3100 filaments with each filament having a denier of approximately 7.1. Each filament was 27 microns in diameter, with a specific gravity of 1.38.
The twin screw extruder ran at 603 rotations per minute. Using two gravimetric feeders, PP7805 polypropylene was fed into the extruder hopper at a rate of 20 pounds per hour, while CB 7 talc was fed into the extruder hopper at a rate of 15 pounds per hour. The PET fiber was fed into the extruder at 12 pounds per hour, which was dictated by the screw speed and tow thickness. The extruder temperature profile for the ten zones 144° C. for zones 1-3, 133° C. for zone 4, 154° C. for zone 5, 135° C. for zone 6, 123° C. for zones 7-9, and 134° C. for zone 10. The strand die diameter at the extruder exit was ¼ inch.
The extrudate was quenched in an 8 foot long water trough and pelletized to ½ inch length to form PET/PP composite pellets. The extrudate displayed uniform diameter and could easily be pulled through the quenching bath with no breaks in the water bath or during instrumented impact testing. The composition of the PET/PP composite pellets produced was 42.5 wt % PP, 25.5 wt % PET, and 32 wt % talc.
The PET/PP composite resin pellets produced were injection molded and displayed the following properties:
TABLE 7 Example 27 Specific Gravity 1.3 Tensile Modulus, Chord @ 23° C. 541865 psi Tensile Modulus, Chord @ 85° C. 257810 psi Flexural Modulus, Chord @ 23° C. 505035 psi Flexural Modulus, Chord @ 85° C. 228375 psi HDT @ 0.45 MPA 116.1° C. HDT @ 1.80 MPA 76.6° C. Instrumented impact @ 23° C. 11.8 J D** Instrumented impact @ −30° C. 12.9 J D** **Ductile failure with radial cracks
In example 28, the same materials, composition, and process set-up were utilized, except that extruder temperatures were increased to 175° C. for all extruder barrel zones. This material showed complete breaks in the instrumented impact test both at 23° C. and −30° C. Hence, at a barrel temperature profile of 175° C., the mechanical properties of the PET fiber were negatively impacted during extrusion compounding such that the PET/PP composite resin had poor instrumented impact test properties.
In example 29, the fiber was fed into a hopper placed 14 diameters down the extruder (27 in the FIG. 3). In this case, the extrudate produced was irregular in diameter and broke an average once every minute as it was pulled through the quenching water bath. When the PET fiber tow is continuously fed downstream of the extruder hopper, the dispersion of the PET in the PP matrix was negatively impacted such that a uniform extrudate could not be produced, resulting in the irregular diameter and extrudate breaking.
Illustrative Example 30
An extruder with the same size and screw design as examples 27-29 was used. All zones of the extruder were initially heated to 180° C. PP 3505 dry mixed with Jet fine 700 C and PO 1020 was then fed at 50 pounds per hour using a gravimetric feeder into the extruder hopper located approximately two diameters from the beginning of the extruder screws. Polyester fiber with a denier of 7.1 and a thickness of 3100 filaments was fed through the same hopper. The screw speed of the extruder was then set to 596 revolutions per minute, resulting in a feed rate of 12.1 pounds of fiber per hour. After a uniform extrudate was attained, all temperature zones were lowered to 120° C., and the extrudate was pelletized after steady state temperatures were reached. The final composition of the blend was 48% PP 3505, 29.1% Jet fine 700 C, 8.6% PO 1020 and 14.3% polyester fiber.
The PP composite resin pellets produced while all temperature zones of the extruder were set to 120° C. were injection molded and displayed the following properties:
TABLE 8 Example 30 Flexural Modulus, Chord @ 23° C. 467,932 psi Instrumented impact @ 23° C. 8.0 J D** Instrumented impact @ −30° C. 10.4 J D** **Ductile failure with radial cracks
Illustrative Examples 31-34
4% Granite Fleck, which is a masterbatch of dark polymer fiber in a low density polyethylene carrier resin, was extrusion compounded with a twin screw extruder into both polypropylene based impact copolymer (PP 8114) (control sample) and also into a blend of PP homopolymer/PET fiber/talc (40% PP3505G polypropylene, 15% PET reinforcing fiber (¼″ (6.4 mm) length), and 41% Luzenac Jet fine 3CA talc). Corresponding resin samples without the incorporation of the colorant fiber masterbatch (no Granite Fleck) were also produced to assess the impact of the colorant fiber on impact properties for the prior art PP impact copolymer and the PP-PET fiber reinforced composite disclosed herein. The fiber reinforced polypropylene composite without the colorant fiber included 40% PP3505G polypropylene, 15% PET reinforcing fiber (¼″ (6.4 mm) length), and 45% Luzenac Jet fine 3CA talc.
These four resin samples were molded in accordance with the geometry of ASTM D3763 and tested for instrumented impact resistance and failure mode upon impact failure. The instrumented impact test results are given in Table 9.
TABLE 9 Failure mode Instru- during mented instru- Flexural Exam- impact mented modulus ple Material Composition (ft-lbs) impact (psi) 31 Impact copolymer (PP 32.2 Ductile No data 8114) (prior art control w/o colorant fiber) 32 Impact copolymer + 4.1 Brittle No data colorant fiber (PP 8114 + 4% Granite Fleck) (prior art control w/colorant fiber) 33 PP/PET fiber/talc 11.9 Ductile 609,000 composite (40% PP 3505G/15% PET fiber/45% talc) (present disclosure w/o colorant fiber) 34 PP/PET fiber/talc/colorant 12.6 Ductile 606,000 fiber composite (40% PP 3505G/15% PET fiber/41% talc/4% Granite Fleck) (present disclosure + colorant fiber)
From Table 9, it is important to note that upon the incorporation of the colorant fiber into the impact polymer (Example 32) of the prior art, there is approximately a 88% decrease in instrumented impact resistance, and also the failure mode goes from ductile (no splintering) to brittle (splintering). In contrast, when colorant fiber is added to the PP/PET fiber/talc composition material (Example 34) of the present disclosure, there is no decrease in instrumented impact resistance, while the failure mode remains ductile in nature, with negligible reduction in flexural modulus. The PP/PET fiber/talc/colorant fiber composite material after molding also has a cloth-like look to it from the incorporation of the dark colorant fiber uniformly dispersed through the molded object. Surprisingly, the PP/PET fiber/talc/colorant fiber composite material (Example 34) retains its outstanding impact resistance unlike the prior art rubber modified PP impact copolymer/colorant fiber sample (Example 32).
Illustrative Examples 35 and 36
Two processes have been developed to extrude pellets of polyester fiber reinforced polypropylene. One process involves introducing cut PET fiber into the twin screw extruder, while the second involves introducing continuous PET fiber unwound from spools into the extruder hopper of the twin screw extruder, which is then cut in situ in the extruder by the twin screws. The following examples illustrate the effect of PET fiber reinforced polypropylene resin pellet length and process type for introducing PET fiber on the impact properties of the resulting composite. In the case of cut fiber, 15% by weight of 7.1 denier ¼ inch (6.4 mm) long polyester fiber was mixed with 40% high aspect ratio talc (Luzenac Inc.), 40% of an impact copolymer PP 7905 from ExxonMobil Chemical company which has a melt flow rate (mfr) of 90 mfr in accordance with ASTM D1238, and 5% of P01020 from ExxonMobil Chemical Company, a maleic anhydride grafted polypropylene with mfr of 430 in accordance with ASTM D1238. All these materials were dry mixed in a bag and fed through the hopper of a laboratory model Haake extruder run at 200 revolutions per minute, with temperatures at 175° C. across all temperature zones. In the case of continuous fiber cut in situ within the extruder, the extruder was run in accordance with the description of examples 27-29, but with the same composition as described here, and with all zones across the extruder held at 120° C.
The PET reinforced polypropylene composite resins were made into pellet sizes ranging from 3.2 mm to 12.7 mm by varying the pelletizing conditions. For Examples 35 and 36, resin samples were collected for pellet lengths of 3.2, 6.4, 9.5 and 12.7 mm. All samples were subsequently injection molded in a 50 ton Boy machine. All heating zones were held at 401° C., with the mold at 60° C. Total cycle time was 5.1 seconds. For each example and pellet size, dart drop impact energy was measured by determining the total energy to break (in newton meter) via ASTM Test Method D3763. For each process type and pellet length, ten samples were tested. Table 10 below summarizes the total energy to break as a function of PET fiber feed type and pellet length.
The results in Table 10 indicate that the optimum pellet length to attain the highest value of drop dart impact resistance varies depending upon whether continuous PET fiber (Example 35) or chopped PET fiber (Example 36) was fed into the twin screw compounding extruder prior to pelletizing. In the case of input continuous PET fiber, the optimum pellet length was from about 6.4 to 9.5 mm in length and drop dart impact values of 7.9 to 9.6 newton meter were achieved. In the case of input cut PET fiber, the optimum pellet length was from 9.5 mm to 12.7 mm in length and drop dart impact values of 12.9 to 13.4 newton meter were achieved. However, in both cases (continuous and chopped PET fiber input), even PET reinforced polypropylene pellets as small as 3.2 mm in length were sufficient to pass the impact test, defined as no splinters or shards breaking off the sample after impact (ductile failure with radial cracks and no splintering). In addition, the impact data in Table 10 indicates that feeding chopped PET fiber consistently results in higher impact resistance than feeding continuous PET rovings at any given pellet length.
TABLE 10 Total Energy - Total Energy - Run 1 Run 2 (Units: ft-lb_(f) Std (Units: ft-lb_(f) Std. Pellet Length (newton meter)) Dev. (newton meter)) Dev Example 35: Continuous ⅛″ (3.2 mm) 5.2 (7.1) 2.65 (3.59) 4.4 (6.0) 1.8 (2.4) Fiber feed ¼″ (6.4 mm) 7.1 (9.6) 1.8 (2.4) 6.1 (8.3) 1.3 (1.8) ⅜″ (9.5 mm) 6.4 (8.7) 1.5 (2.0) 5.8 (7.9) 1.8 (2.4) ½″ (12.7 mm) 5.3 (7.2) 2.2 (3.0) 3.9 (5.3) 0.6 (0.8) Example 36: ¼″(6.4 mm) ⅛″ (3.2 mm) 6.9 (9.4) 2.1 (2.8) 7.2 (9.8) 1.7 (2.3) cut fiber feed ¼″ (6.4 mm) 8.6 (11.7) 2 (2.7) 7.6 (10.3) 1.9 (2.6) ⅜″ (9.5 mm) 9.9 (13.4) 1 (1.4) 9.8 (13.3) 0.8 (1.1) ½″ (12.7 mm) 9.8 (13.3) 1.1 (1.5) 9.5 (12.9) 2.5 (3.4)
Illustrative Examples 37-55
85% by weight of PP7905 and 15% by weight of 6 denier high tenacity polyester fiber of various pre-cut input fiber lengths (⅛″, ¼″, ½″, and ¾″) were mixed in a Haake twin screw extruder at 175° C. The strand that exited the extruder was cut into various pellet lengths (⅛″, ¼″, ⅜″, ½″, and ¾″) and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. For each input cut fiber length, a sample of the unpelletized extrudate was produced and injection molded as well. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for instrumented impact at room temperature under the following conditions for interior parts: 25 lbs, at 15 MPH, at 23 C. The total energy absorbed via impact results in foot-pounds force and newton-meter as a function of pellet length and cut-fiber length are given in Table 11 below.
TABLE 11 Total energy absorbed Pellet Length (Units: ft-lb_(f) (N-m)) ⅛″ ¼″ ⅜″ ½″ ¾″ Cut-fiber length (3.2 mm) (6.4 mm) (9.5 mm) (12.7 mm) (19.1 mm) Unpelletized ⅛″ (3.2 mm) 5.6 (7.6) 4.5 (6.1) 5.5 (7.5) 5.4 (7.3) 5.8 (7.9) 4.7 (6.4) ¼″ (6.4 mm) 6.6 (8.9) 7.7 (10.4) 4.7 (6.4) 7.1 (9.6) 5.6 (7.6) 6.4 (8.7) ½″ (12.7 mm) 7 (9.5) 7.4 (10.0) 9.4 (12.7) 8.7 (11.8) 8.3 (11.3) 9 (12.2) ¾″ (19.1 mm) 9 (12.2)
In addition, all the samples (37 to 55) tested for drop dart impact exhibited ductile failures with radial cracks. In other words, none of the samples displayed splintering upon failure. The results also indicate that increased input chopped fiber length results in high impact resistance as 12.7 mm long chopped fiber yielded the highest impact values. When 12.7 mm long chopped fiber is used, the results also indicate that resin pellet lengths of 9.5 to 12.7 mm yielded the highest impact resistance values.
Illustrative Examples 56 and 57
85% by weight of PP7905 and 15% by weight of 3.1 denier staple tenacity polyester fiber of ¼ inch (3.2 mm) cut fiber length were mixed in a Haake twin screw extruder at 175° C. The strand that exited the extruder was cut into ¼ inch (3.2 mm) pellet lengths and injection molded using a Boy 50M ton injection molder at 205° C. into a mold held at 60° C. A sample of the unpelletized extrudate was also produced and injection molded. Injection pressures and nozzle pressures were maintained at 2300 psi. Samples were molded in accordance with the geometry of ASTM D3763 and tested for instrumented impact at room temperature under the following conditions for interior parts: 25 lbs, at 15 MPH, at 23 C. In addition, the number of samples that did not splinter (# pass) and the number of sample that did splinter (# fail) upon instrumented impact testing were noted and shown below in Table 12. The total energy absorbed during impact testing in foot-pounds force and Newton-meter is also given in Table 12 below.
TABLE 12 Impact energy absorbed Sample (Units: ft-lb_(f)(N-m)) # pass/#fail ¼″ (6.4 mm) pellet length 3.3 (4.5) 4/1 Unpelletized continuous 3.5 (4.7) 1/4 extrudate
The results indicate that the low denier (3.1) PET fiber yields poorer impact results compared to higher denier PET fiber (See Examples 35-55). In particular, the impact energy absorbed for the low denier (3.1) PET fiber is lower than the higher denier (6.0 and 7.1 denier) PET fiber of Examples 35-55. In addition, the low denier PET samples exhibited failure (splintering) in one or more samples upon impact testing as compared to no splintering for the higher denier PET fiber samples of Examples 35-55. Hence, a higher loading of PET fiber (20% or more) in the PP matrix polymer is needed to achieve acceptable impact test results when using an input lower denier PET fiber (3.1 denier) as compared to an input higher denier PET fiber (6 and 7.1 denier). It is predicted that a fiber loading of 20% or more of low denier (3.1) PET fiber in a PP matrix polymer should yield impact energies of at least 5.0 newton meter, and possibly at least 5.5 newton meter when smaller pellet sizes are produced for subsequent molding and impact testing.
Applicants have attempted to disclose all embodiments and applications of the disclosed subject matter that could be reasonably foreseen. However, there may be unforeseeable, insubstantial modifications that remain as equivalents. While the present disclosure has been described in conjunction with specific, exemplary embodiments thereof, it is evident that many alterations, modifications, and variations will be apparent to those skilled in the art in light of the foregoing description without departing from the spirit or scope of the present disclosure. Accordingly, the present disclosure is intended to embrace all such alterations, modifications, and variations of the above detailed description.
All patents, test procedures, and other documents cited herein, including priority documents, are fully incorporated by reference to the extent such disclosure is not inconsistent with this invention and for all jurisdictions in which such incorporation is permitted.
When numerical lower limits and numerical upper limits are listed herein, ranges from any lower limit to any upper limit are contemplated.
1. Polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 10 to 40 wt %, based on the total weight of the composition, polyester fiber; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 12.7 mm in length, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.3 newton meter.
2. The polyester fiber reinforced resin pellets of claim 1, wherein the resin pellets range from 6.4 to 9.5 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 7.9 newton meter.
3. The polyester fiber reinforced resin pellets of claim 1, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
4. The polyester fiber reinforced resin pellets of claim 1, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
5. The polyester fiber reinforced resin pellets of claim 1, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
6. The polyester fiber reinforced resin pellets of claim 5, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
7. The polyester fiber reinforced resin pellets of claim 1, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
8. The polyester fiber reinforced resin pellets of claim 1, wherein the input polyester fiber denier is from 5 to
15. 9. The polyester fiber reinforced resin pellets of claim 1, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
10. The polyester fiber reinforced resin pellets of claim 1 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, wherein the article molded from the resin pellets exhibits a cloth-like appearance.
11. The polyester fiber reinforced resin pellets of claim 10, wherein the colorant fiber includes an inorganic pigment, an organic dye, or a combination thereof.
12. The polyester fiber reinforced resin pellets of claim 10, wherein the colorant fiber is chosen from cellulosic fiber, acrylic fiber, nylon type fiber, polyester type fiber, and combinations thereof.
13. The polyester fiber reinforced resin pellets of claim 10, wherein the input colorant fiber is from 0.8 mm to 6.4 mm in length.
14. The polyester fiber reinforced resin pellets of claim 10, wherein the polypropylene based polymer further comprises an inorganic pigment, an organic dye, or a combination thereof.
15. The polyester fiber reinforced resin pellets of claim 1, wherein the article is an automotive part, a household appliance part, or a boat hull.
16. The polyester fiber reinforced resin pellets of claim 15, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
17. Polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 10 to 40 wt %, based on the total weight of the composition, polyester fiber; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 19.1 mm in length, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.1 newton meter.
18. The polyester fiber reinforced resin pellets of claim 1, wherein the resin pellets range from 9.5 to 19.1 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.4 newton meter.
19. The polyester fiber reinforced resin pellets of claim 17, wherein the input chopped polyester fiber is from 3.2 to 25.4 mm in length.
20. The polyester fiber reinforced resin pellets of claim 19, wherein the input chopped polyester fiber is from 3.2 to 19.1 mm in length.
21. The polyester fiber reinforced resin pellets of claim 20, wherein the input chopped polyester fiber is from 6.4 to 12.7 mm in length.
22. The polyester fiber reinforced resin pellets of claim 17, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
23. The polyester fiber reinforced resin pellets of claim 17, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
24. The polyester fiber reinforced resin pellets of claim 17, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
25. The polyester fiber reinforced resin pellets of claim 24, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
26. The polyester fiber reinforced resin pellets of claim 17, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
27. The polyester fiber reinforced resin pellets of claim 17, wherein the input polyester fiber denier is from 5 to
15. 28. The polyester fiber reinforced resin pellets of claim 17, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
29. The polyester fiber reinforced resin pellets of claim 17 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, and wherein the article molded from the resin pellets exhibits a cloth-like appearance.
30. The polyester fiber reinforced resin pellets of claim 29, wherein the colorant fiber includes an inorganic pigment, an organic dye, or a combination thereof.
31. The polyester fiber reinforced resin pellets of claim 29, wherein the colorant fiber is chosen from cellulosic fiber, acrylic fiber, nylon type fiber, polyester type fiber, and combinations thereof.
32. The polyester fiber reinforced resin pellets of claim 29, wherein the input colorant fiber is from 0.8 mm to 6.4 mm in length.
33. The polyester fiber reinforced resin pellets of claim 29, wherein the polypropylene based polymer further comprises an inorganic pigment, an organic dye, or a combination thereof.
34. The polyester fiber reinforced resin pellets of claim 17, wherein the article is an automotive part, a household appliance part, or a boat hull.
35. The polyester fiber reinforced resin pellets of claim 34, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
36. A method of making polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 10 to 40 wt %, based on the total weight of the composition, polyester fiber; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 12.7 mm in length, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.3 newton meter; wherein the method comprises: feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
37. The method of claim 36, wherein the resin pellets range from 6.4 to 9.5 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 7.9 newton meter.
38. The method of claim 36, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
39. The method of claim 36, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
40. The method of claim 36, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
41. The method of claim 36, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
42. The method of claim 36, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
43. The method of claim of claim 36, wherein the input polyester fiber denier is from 5 to
15. 44. The method of claim 36, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
45. The method of claim 36 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, and wherein the article molded from the resin pellets exhibits a cloth-like appearance.
46. The method of claim 36, wherein the extruder comprises barrel temperature control set points of less than or equal to 185° C.
47. The method of claim 46, wherein the extruder comprises barrel temperature control set points of less than or equal to 165° C.
48. The method of claim 36, wherein the article is an automotive part, a household appliance part, or a boat hull.
49. The method of claim 48, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
50. A method of making polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 10 to 40 wt %, based on the total weight of the composition, polyester fiber; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 19.1 mm in length, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.1 newton meter; wherein the method comprises: feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
51. The method of claim 50, wherein the resin pellets range from 9.5 to 19.1 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 6.4 newton meter.
52. The method of claim 50, wherein the input chopped polyester fiber is from 3.2 to 25.4 mm in length.
53. The method of claim 52, wherein the input chopped polyester fiber is from 3.2 to 19.1 mm in length.
54. The method of claim 53, wherein the input chopped polyester fiber is from 6.4 to 12.7 mm in length.
55. The method of claim 50, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
56. The method of claim 50, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
57. The method of claim 50, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
58. The method of claim 57, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
59. The method of claim 50, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
60. The method of claim of claim 50, wherein the input polyester fiber denier is from 5 to
15. 61. The method of claim 50, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
62. The method of claim 50 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, and wherein the article molded from the resin pellets exhibits a cloth-like appearance.
63. The method of claim 50, wherein the extruder comprises barrel temperature control set points of less than or equal to 185° C.
64. The method of claim 63, wherein the extruder comprises barrel temperature control set points of less than or equal to 165° C.
65. The method of claim 50, wherein the article is an automotive part, a household appliance part, or a boat hull.
66. The method of claim 65, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
67. Polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 20 to 40 wt %, based on the total weight of the composition, polyester fiber, wherein the input polyester fiber denier is less than 5; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 25.4 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.0 newton meter.
68. The polyester fiber reinforced resin pellets of claim 67, wherein the resin pellets range from 3.2 mm to 9.5 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.5 newton meter.
69. The polyester fiber reinforced resin pellets of claim 67, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder.
70. The polyester fiber reinforced resin pellets of claim 67, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder.
71. The polyester fiber reinforced resin pellets of claim 69, wherein the input chopped polyester fiber is from 3.2 to 25.4 mm in length.
72. The polyester fiber reinforced resin pellets of claim 70, wherein the input chopped polyester fiber is from 3.2 to 12.7 mm in length.
73. The polyester fiber reinforced resin pellets of claim 67, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
74. The polyester fiber reinforced resin pellets of claim 67, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
75. The polyester fiber reinforced resin pellets of claim 67, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
76. The polyester fiber reinforced resin pellets of claim 74, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
77. The polyester fiber reinforced resin pellets of claim 67, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
78. The polyester fiber reinforced resin pellets of claim 67, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
79. The polyester fiber reinforced resin pellets of claim 67 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, and wherein the article molded from the resin pellets exhibits a cloth-like appearance.
80. The polyester fiber reinforced resin pellets of claim 67, wherein the article is an automotive part, a household appliance part, or a boat hull.
81. The polyester fiber reinforced resin pellets of claim 79, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
82. A method of making polyester fiber reinforced polypropylene resin pellets comprising: (a) at least 25 wt %, based on the total weight of the composition, polypropylene based polymer; (b) from 20 to 40 wt %, based on the total weight of the composition, polyester fiber, wherein the input polyester fiber denier is less than 5; (c) from 0 to 60 wt %, based on the total weight of the composition, inorganic filler; and (d) from 0 to 0.2 wt %, based on the total weight of the composition, lubricant; wherein the resin pellets range from 3.2 to 25.4 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.0 newton meter; wherein the method comprises: feeding into the extruder the polypropylene based resin, the polyester fiber, the inorganic filler, and the lubricant; extruding the polypropylene based resin, the PET fiber, the inorganic filler and the lubricant through the extruder to form a PET fiber reinforced polypropylene composite melt; cooling and pelletizing the PET fiber reinforced polypropylene composite melt to form the PET fiber reinforced polypropylene resin pellets.
83. The method of claim 81, wherein the resin pellets range from 3.2 mm to 9.5 mm in length, and wherein an article molded from the resin pellets exhibits a drop dart impact resistance of at least 5.5 newton meter.
84. The method of claim 81, wherein the polyester fiber is incorporated into the resin pellets by continuously feeding PET fiber from one or more spools into the extruder hopper of a compounding extruder.
85. The method of claim 81, wherein the polyester fiber is incorporated into the resin pellets by feeding chopped PET fiber into a compounding extruder.
86. The method of claim 84, wherein the input chopped polyester fiber is from 3.2 to 25.4 mm in length.
87. The method of claim 85, wherein the input chopped polyester fiber is from 3.2 to 12.7 mm in length.
88. The method of claim 81, wherein the polypropylene based polymer is chosen from polypropylene homopolymers, propylene-ethylene random copolymers, propylene-butene-1 random copolymers, propylene-hexene-1 random copolymers, propylene-octene-1 random copolymers, propylene-α-olefin random copolymers, propylene impact copolymers, ethylene-propylene-butene-1 ter polymers, and combinations thereof.
89. The method of claim 81, wherein the polypropylene based polymer has a melt flow rate of from 20 to 1500 g/10 minutes.
90. The method of claim 81, wherein the inorganic filler is chosen from talc, calcium carbonate, calcium hydroxide, barium sulfate, mica, calcium silicate, clay, kaolin, silica, alumina, wollastonite, magnesium carbonate, magnesium hydroxide, titanium oxide, zinc oxide, zinc sulfate, and combinations thereof.
91. The method of claim 89, wherein the inorganic filler is talc or wollastonite at a loading from 20 to 60 wt %.
92. The method of claim 81, wherein the lubricant is chosen from silicon oil, silicon gum, fatty amide, paraffin oil, paraffin wax, ester oil, and combinations thereof.
93. The method of claim 81, wherein the polypropylene based polymer further comprises from about 0.1 wt % to less than about 10 wt % of a polypropylene based polymer modified with a grafting agent, wherein the grafting agent is chosen from acrylic acid, methacrylic acid, maleic acid, itaconic acid, fumaric acid or esters thereof, maleic anhydride, itaconic anhydride, and combinations thereof.
94. The method of claim 81 further comprising from 0.1 to 2.5 wt %, based on the total weight of the composition, colorant fiber, and wherein the article molded from the resin pellets exhibits a cloth-like appearance.
95. The method of claim 81, wherein the extruder comprises barrel temperature control set points of less than or equal to 185° C.
96. The method of claim 94, wherein the extruder comprises barrel temperature control set points of less than or equal to 165° C.
97. The method of claim 81, wherein the article is an automotive part, a household appliance part, or a boat hull.
98. The method of claim 96, wherein the automotive part is chosen from bumpers, front end modules, aesthetic trim parts, body panels, under body parts, under hood parts, door cores, steering wheel covers, head liner panels, dashboard panels, interior door trim panels, package trays, seat backs, pillar trim cover panels, and under-dashboard panels.
|
/*
* Copyright 2008-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.test.hasor.core._04_scope;
import com.alibaba.fastjson.JSON;
import net.hasor.core.*;
import net.test.hasor.core._01_bean.pojo.PojoBean;
import net.test.hasor.core._01_bean.pojo.PojoInfo;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 本示列演示如何使用 Hasor 的Scope隔离Bean。
* @version : 2013-8-11
* @author 赵永春 ([email protected])
*/
public class ScopeTest {
protected Logger logger = LoggerFactory.getLogger(getClass());
@Test
public void threadScopeTest() {
System.out.println("--->>threadScopeTest<<--");
AppContext appContext = Hasor.createAppContext(new Module() {
public void loadModule(ApiBinder apiBinder) throws Throwable {
MyScope threadScope = new MyScope();
apiBinder.bindType(PojoBean.class).toScope(threadScope);
apiBinder.bindType(MyScope.class).toInstance(threadScope);
}
});
logger.debug("---------------------------------------------");
//
//
PojoInfo objectA = appContext.getInstance(PojoBean.class);
PojoInfo objectB = appContext.getInstance(PojoBean.class);
//
logger.debug("objectBody :" + JSON.toJSONString(objectA));
logger.debug("objectA eq objectB = " + (objectA == objectB));
assert objectA == objectB;
//
BindInfo<?> info = appContext.getBindInfo(PojoBean.class);
MyScope scope = appContext.getInstance(MyScope.class);
Provider<Object> provider = scope.scope(info, null);
assert provider != null;
}
}
|
Emergence of Localized Serogroup W Meningococcal Disease in the United States — Georgia, 2006–2016
Several countries in Europe and Australia are reporting an increasing incidence of Neisseria meningitidis serogroup W (NmW) as a consequence of the rapid expansion of a single NmW clone belonging to clonal complex 11 (1–5). Because this clone is reported to be associated with more severe disease, unusual clinical presentations, and a high case fatality ratio (CFR), it is considered a hypervirulent strain (1,6). In the United States, NmW accounts for approximately 5% of meningococcal disease reported each year, and this proportion has remained stable for several years (7). However, localized increases in NmW have been reported, most notably in Florida during 2008–2009 (8). In Georgia, NmW accounted for only 3% of meningococcal disease cases reported during 2006–2013; however, between January 2014 and December 2016, 42% of all reported cases were NmW. Surveillance data from Georgia were analyzed to describe the epidemiology and clinical characteristics of NmW cases, and whole-genome sequencing of NmW isolates was performed for comparison with NmW strains circulating in the United States and worldwide. These data indicate that the U.S. NmW strains might have evolved from the same ancestor as the hypervirulent strain that is circulating globally. Genetic analysis demonstrates that these strains are closely related, which would suggest that genetic variation led to the rise of different strains from the same ancestor. Given the recent global expansion of this potentially hypervirulent NmW lineage, clinicians and public health officials need to remain vigilant in obtaining isolates to monitor changes in circulating strains.
Several countries in Europe and Australia are reporting an increasing incidence of Neisseria meningitidis serogroup W (NmW) as a consequence of the rapid expansion of a single NmW clone belonging to clonal complex 11 (1)(2)(3)(4)(5). Because this clone is reported to be associated with more severe disease, unusual clinical presentations, and a high case fatality ratio (CFR), it is considered a hypervirulent strain (1,6). In the United States, NmW accounts for approximately 5% of meningococcal disease reported each year, and this proportion has remained stable for several years (7). However, localized increases in NmW have been reported, most notably in Florida during 2008-2009 (8). In Georgia, NmW accounted for only 3% of meningococcal disease cases reported during 2006-2013; however, between January 2014 and December 2016, 42% of all reported cases were NmW. Surveillance data from Georgia were analyzed to describe the epidemiology and clinical characteristics of NmW cases, and whole-genome sequencing of NmW isolates was performed for comparison with NmW strains circulating in the United States and worldwide. These data indicate that the U.S. NmW strains might have evolved from the same ancestor as the hypervirulent strain that is circulating globally. Genetic analysis demonstrates that these strains are closely related, which would suggest that genetic variation led to the rise of different strains from the same ancestor. Given the recent global expansion of this potentially hypervirulent NmW lineage, clinicians and public health officials need to remain vigilant in obtaining isolates to monitor changes in circulating strains.
A case of meningococcal disease was defined as laboratoryconfirmed N. meningitidis isolated from a normally sterile body site, reported to the Georgia Department of Public Health (DPH) during 2006-2016. A comprehensive case report form, developed for the Emerging Infections Program's Active Bacterial Core surveillance (9), was used to abstract case medical record data, including demographic and clinical information. Clinical syndromes (e.g., bacteremia, meningitis, pneumonia) were not mutually exclusive; a patient could have multiple syndromes simultaneously. For statistical comparisons, Fisher's Exact and Student's t-Test statistics were calculated; p-values <0.05 were considered statistically significant.
All N. meningitidis isolates were requested for serogroup typing at the Georgia Public Health Laboratory as part of Active Bacterial Core surveillance. The isolates were then forwarded to CDC for serogroup confirmation and further molecular characterization using whole genome sequence analysis. The phylogenetic analysis included 18 NmW isolates collected in Georgia during 2012-2016, isolates from other states collected through routine surveillance, and the genome sequences of the global strains, obtained from the Bacterial Isolate Genome Sequence Database of PubMLST,* public databases for molecular typing and microbial genome diversity.
During 2006-2016, a total of 178 meningococcal disease cases were reported to DPH, including 158 (89%) with isolates available for serogroup typing. The 20 patients without an isolate available for serogroup typing were excluded from the analysis; these patients did not differ significantly by race, age, or sex from those with a known serogroup.
Overall, 21 (13%) NmW cases and 137 (87%) N. meningitidis non-serogroup W (non-NmW) cases were identified; the proportion of NmW cases increased from 0% in 2013 to 47% in 2016 ( Figure 1). No epidemiologic links were identified among the patients with NmW disease, although 70% of NmW cases reported since 2006 were concentrated geographically in northern Georgia.
Among 21 patients with NmW disease, 14 (68%) were male compared with 74 (54%) patients with non-NmW disease; however, this difference was not statistically significant (Table). The median age of patients with NmW disease (34 years) was significantly higher than that of patients with non-NmW disease (26 years) (p = 0.005); 90% of patients with NmW were aged ≥18 years compared with 61% of patients with non-NmW disease. Data on admission to an intensive care unit (ICU) has been collected for all meningococcal disease cases since 2010; from 2010-2016, a similar percentage of patients with NmW disease and non-NmW disease were admitted to an ICU (56% and 54%, respectively). The CFR was higher for patients with NmW (24%) than for patients with non-NmW disease (15%); however, the numbers are small and were not statistically significantly different.
Bacteremia was reported in 50% of NmW and 35% of non-NmW cases, and meningitis accounted for less than 40% of infections in both groups. Although not collected systematically for all meningococcal disease cases in Georgia, it was noted in medical records that nine (41%) NmW patients during 2014-2016 reported gastrointestinal (GI) symptoms, such as diarrhea and vomiting, to their providers.
Eighteen (86%) NmW isolates belonged to clonal complex 11 (CC11); 17 of these were sequence type 11 (ST-11), and one, ST-10826, was a new sequence type. Pairwise comparison, a process of comparing any two sequences for genetic differences, indicated the difference between each pair of the 18 Georgia isolates ranged from 0-63 single nucleotide polymorphisms. The 17 ST-11 isolates from Georgia were more similar to each other than to isolates tested from other states (California, Florida, Ohio, and Texas) ( Figure 2). Overall, the U.S. NmW CC11 isolates were more similar to strains from South America and Europe (six from the United Kingdom) than to those from Africa ( Figure 2).
Discussion
Georgia experienced an increase in NmW disease during 2014-2016, compared with 2006-2013, which was associated with the emergence of a CC11 NmW strain that is different from the CC11 NmW strains from other U.S. states. Phylogenetic comparison of the Georgia and other U.S. CC11 NmW strains with global isolates indicates that these U.S. strains might have evolved from a clone previously observed in South America, which is also an ancestor of the hypervirulent United Kingdom strain that has emerged in Europe and Australia (5).
In contrast to other published reports, this analysis did not identify significant differences in CFR or clinical presentation of patients with NmW disease compared with those with non-NmW disease. However, there was a slightly higher frequency of ICU admission and higher CFR in patients with NmW disease, which are consistent with a report from the United Kingdom that found that older children and adults with NmW disease were more likely to be admitted to the ICU (1). In addition, many NmW patients in the United Kingdom had predominantly GI symptoms, diarrhea in particular, which reportedly led to initial misdiagnoses and delays in provision of appropriate care (6). Although 41% of the Georgia NmW patients did report GI symptoms, information on these symptoms was not systematically collected on all meningococcal cases for comparison. In the United Kingdom, the emergence of cases caused by the hypervirulent ST-11 strain initially began in adults but quickly extended to other age groups; during 2013-2014, this ST-11 strain accounted for nearly all NmW cases in persons aged 5-64 years and a high proportion of NmW cases in other age groups (1). This is of interest because in this analysis 90% of NmW cases occurred in persons aged ≥18 years; therefore, surveillance data will need to be monitored closely for future shifts in the age distribution of NmW cases.
The findings in this report are subject to at least three limitations. First, cases of N. meningitidis are rare, and thus performing sufficiently powered statistical tests of significance on the data are difficult. Second, serogroup W cases only make up 5% of reported meningococcal cases each year in the United States, and as a result, the comparison group for isolates within the United States is limited. Finally, clinical presentation and symptoms were not collected systematically for all N. meningitidis cases, which precluded direct analysis of Georgia data and comparison with data from other countries.
Although the numbers in this study are small, this report provides description of the NmW clone that has emerged in Georgia and its associated cases. The DPH will continue to monitor and follow up on all patients with meningococcal disease to collect clinical information and isolates to determine whether the trend of an increasing proportion of NmW cases continues. Clinicians and public health officials need to remain vigilant in obtaining isolates from all cases of meningococcal * Branch length is related to the number of nucleotide substitutions. The more substitutions an isolate has, the longer its branch will be. More evolved strains will be further from their ancestor.
disease to monitor changes in circulating strains over time, and also remain aware of the potential for atypical clinical presentations that might not be indicative of meningococcal disease to prevent delays in treatment that could result in unnecessary morbidity and mortality.
Summary
What is already known about this topic?
The incidence of meningococcal disease has been declining in the United States for decades, but Neisseria meningitidis serogroup W incidence has been increasing in countries around the world.
What is added by this report?
The incidence of Neisseria meningitidis serogroup W is increasing in Georgia. Although not associated with an outbreak, molecular testing indicated that the Georgia serogroup W isolates are all from the same clonal complex, CC11. This strain is associated with an increased morbidity and mortality which could have severe implications.
What are the implications for public health practice?
The collection and testing of meningococcal isolates for serogroup and strain information is important to monitor changes and emergence of previously underrepresented serogroups.
|
Neutron detection device and method of manufacture
ABSTRACT
A neutron detection device includes a neutron conversion layer in close proximity to an active semiconductor layer. The device is preferably based on the modification of existing conventional semiconductor memory devices. The device employs a conventional SRAM memory device that includes an SOI substrate. The SOI substrate includes an active semiconductor device layer, a base substrate and an insulating layer between the active semiconductor device layer and the base substrate. The base substrate layer is removed from the memory device by lapping, grinding and/or etching to expose the insulating layer. A neutron conversion layer is then formed on the insulating layer. The close proximity of the neutron conversion layer to the active semiconductor device layer yields substantial improvements in device sensitivity.
BACKGROUND OF THE INVENTION
The present invention is directed in general to a neutron detection device and a method of manufacturing a neutron detection device. The present invention is specifically directed to a semiconductor device forthe detection of neutrons that utilizes a neutron conversion layer inclose proximity to a conventional memory cell structure.
The development of nuclear weapons gave rise to several urgent applications for highly sensitive neutron detectors. The applications included safeguarding nuclear materials and weapons, treaty verification, anti-proliferation, and the recovery of lost militarypayloads. More recently, however, the need to guard against nuclear smuggling, the potential of a radiological weapon (so called “dirty”bombs), and terrorist acts, has given rise to an urgent need to perform neutron surveillance at border and port facilities, transportation systems and other places where large amounts of a cargo or people pass by or through on a regular basis. Such neutron surveillance must be accomplished without undue restriction or disruption of traffic flow and events.
One class of conventional neutron detectors has been based on the phenomenon of scintillation, which is a result of photon-emitting transitions that occur in the wake of energetic charged nuclei released from reactions between incident neutrons and atomic nuclei.Scintillation devices include a light-transmissive neutron sensitive material (either a gas or a liquid) that generates light upon receipt of incident neutrons. The scintillation devices are typically coupled to aphotomultiplier tube to generate an analog electrical signal based onthe production of the light within the scintillation material. The analog signal is a representation of the incident neutron irradiation.Another class of conventional neutron detector is the gas filled counter, typically based on gaseous helium-3 contained in high pressure tubes. In particular, the helium-3 filled tubes are delicate, require careful handling, and can indicate false positives when abruptly moved or struck. These types of conventional neutron detectors are effective in many types of field operations, but they are not suitable for operations requiring compact and highly sensitive devices capable of functioning for long periods of time with low power consumption.
With the advent of solid state electronics, it was realized silicon-based semiconductor devices could be used to sense alpha particles emitted from a neutron converter material in which an (n,alpha) reaction had taken place. The role of the converter material isto convert incident neutrons into emitted charged particles which are more readily sensed. When the emitted charged particle transmits a semiconductor device, it liberates charges in its wake, and these charges may be collected and used to sense the event stimulated by the initial neutron reaction. Such devices therefore serve as neutron detectors. Initial demonstrations of such a concept used free standing converter foils placed near a silicon detector such as a PIN diode. Itis more common now to utilize films of converter material placed in contact with or deposited directly upon semiconductor detectors. Lithium metal has been used for this purpose, although the chemical reactivity of the lithium metal leads to shorter detector life. Greater life hasbeen obtained with compounds of lithium such as LiF, a hard crystalline material. Boron metal has also been applied directly to silicon devices.See, “Recent Results From Thin-Film-Coated Semiconductor Neutron Detectors”, D. S. McGregor et al., X-Ray and Gamma-Ray Detectors and Applications IV, Proceedings of SPIE, Vol. 4784 (2002), the contents ofwhich are incorporated herein by reference.
The use of diode structures in neutron detectors, however, has its own set of drawbacks and limitations. The internal noise level of anuncooled diode is appreciable, and consequently it is difficult, if not impossible, to measure low background levels of ambient thermal neutron sin the surrounding area or to detect single neutron events. A typical diode also has a thick semiconductor layer in which charges are collected. Charges liberated by gamma rays are also collected in the thick semiconductor layer and these charges contribute to then on-neutron noise signal of the detector.
More recently, it has been proposed that a previously considered disadvantage of semiconductor memory cells be turned into an advantage with respect to neutron detection. Memory cells can be “hardened”against radiation to prevent errors induced by radiation. In fact, the importance of such memory integrity has been readily. appreciated for many years in the field of computers, aviation and space flight. A radiation-induced bit error is known as a soft error if the affected memory cell subsequently responds to write commands. In contrast, the induced bit error is known as a hard error if subsequent attempts to change the state of the memory cell are ineffective. Both hard and soft errors are known as single event upsets (SEUs) or single event errors(SEEs) provided that a single incoming particle induces the error in the memory cell. The error events, which are detrimental when trying to maintain data integrity, can be used in a positive manner to detect radiation events by simply monitoring the radiation-induced charges inthe states of the memory cells.
Attempts have been made to utilize commercial memory circuits with aneutron converter in order to use the SEU associated with the memory circuits for neutron detection. For example, boron has been used in the semiconductor industry as a dopant and in boron containing glass as apassivation layer that is used to cover the circuit-defining structures and to encapsulate a finished semiconductor chip. It has been demonstrated that ¹⁰B in the dopant or borophophosilicate glass (BSPG)passivation layer is responsible for sensitizing a circuit to neutron radiation. See, “Experimental Investigation of Thermal Neutron-Induced Single Event Upset in Static Random Access Memories”, Y. Arita et al.,Jpn. J. Appl. Phys. 40 (2001) pp L151-153, the contents of which are incorporated herein by reference. Accordingly, proposals have been made to coat boron on a conventional semiconductor memory chip containing apassivation layer or to first remove the passivation layer and then coat the chip with a boron converter material. U.S. Pat. No. 6,075,261 issued to Houssain et al. and entitled “Neutron Detecting Semiconductor Device”, the contents of which are incorporated herein by reference,discloses one such attempt at utilizing a conventional semiconductor memory structure as a neutron detector, wherein a neutron-reactantmaterial (converter) is coated over a conventional flash memory device.Alpha particles emitted by the boron typically must pass through the structural layers which define the circuit before they reach the activesemiconductor. These efforts to date, however, have resulted in insensitive detectors primarily because the boron conversion material isnot located close enough to the active semiconductor layer. Thus, alpha particles generated by the boron conversion material dissipate their energy in the intervening material and cannot generate a sufficient charge in the active semiconductor layer to cause an SEU.
In view of the above, it would be desirable to provide a neutron detection device that does not require the use of high pressure tubes or high voltages, is not sensitive to gamma radiations, is not sensitive to thermal noise, and operates with low power consumption, but yet is sensitive enough to permit the counting of single neutron events.
It would further be desirable to provide a neutron detection device of inexpensive design and manufacture.
Still further, it would be desirable to provide a method of manufacturing a neutron detection device that involved the modification on conventional memory devices, thereby permitting conventional memory devices to be converted to neutron detection devices.
SUMMARY OF THE INVENTION
The invention provides a neutron detection device which does not require the use of high pressure tubes or high voltages for its operation, isnot sensitive to gamma radiations, is not sensitive to thermal noise,and operates at low power consumption, but yet is sensitive enough to permit the counting of single neutron events. The invention further provides a neutron detection device of inexpensive design and manufacture. The device is based on a novel architecture for fabricatingcharge-sensitive semiconductor circuit elements in close proximity to aneutron conversion layer, thereby enabling the circuit elements to sense the charges produced in the semiconductor by transiting particles emitted from the reaction of a neutron with an atom of the conversionlayer. One embodiment of the device may be fabricated by modification of existing conventional semiconductor memory devices, thereby enabling existing devices to be modified for use a neutron detectors.
The neutron detection device includes an active semiconductor layer including a plurality of charge-sensitive elements such as conventional memory cells, and a neutron conversion layer located in close proximity to the charge-sensitive elements. The neutron conversion layer produces particles which are detectable by the charge-sensitive elements when neutrons enter the conversion layer. The location of the neutronconversion layer in close proximity to the memory cells increases the sensitivity of the neutron detection device.
The neutron conversion layer may include boron or lithium. When an electrically conductive form of boron or boron containing composition is utilized, it is preferable to include an insulating layer located between the active semiconductor layer and the neutron conversion layer.Further, a barrier layer may be located between the neutron conversionlayer and the insulating layer. The barrier layer preferably comprises silicon nitride. Additionally, more than one neutron conversion layer may be employed to improve sensitivity.
A preferred embodiment of the device employs a static random access(SRAM) memory circuit that is fabricated as a semiconductor-on-insulator(SOI) device. The SOI device includes a circuit structure layer comprising the structures by which the circuit is defined in an activesemiconductor layer, the active semiconductor layer, and an insulating layer, the layers being arranged in the order just given. The insulating layer of SOI devices is typically approximately 200 nanometers thick. Inthis preferred embodiment, beneath the insulator of the SOI device is aneutron conversion layer in intimate contact with the insulating layer.The close:proximity of the neutron conversion layer to the activesemiconductor layer yields substantial improvements in device detection sensitivity. A barrier layer can also be incorporated by intimate contact between the neutron conversion layer and the activesemiconductor layer to prevent diffusion of the neutron conversion material into the active semiconductor layer. Sensitivity can be further improved by adding a second neutron conversion layer in intimate contact with the first neutron conversion layer. It is also possible to provide an insulating neutron conversion layer in direct contact with the activesemiconductor layer. It is further possible to provide a neutronconversion layer separated from the active semiconductor layer by a barrier layer provided between them, or by an insulating barrier layer in the case of a conducting conversion layer. Thin layers may be applied to surfaces to aid in maintaining the aforementioned intimate contacts.
In a preferred embodiment of manufacture, the neutron detection device is constructed from a conventional SRAM memory device that includes aS OI substrate. The SOI substrate includes an active semiconductor layer,a base substrate and an insulating layer between the activesemiconductor layer and the base substrate. The base substrate layer is removed from the memory device by lapping, grinding and/or etching to expose the insulating layer. A neutron conversion layer is then formed on the insulating layer. The close proximity of the neutron conversionlayer to the active semiconductor layer yields substantial improvements in device sensitivity.
Additional details and advantages of the invention will become apparent to those skilled in the art in view of the following detailed description of the preferred embodiments of the invention.
BRIEF DESCRIPTION OF THE DRAWINGS
The invention will be described with reference to certain preferred embodiments thereof and the accompanying drawings, wherein:
FIG. 1 illustrates a conventional semiconductor memory device that includes a SOI substrate;
FIG. 1A illustrates a preferred embodiment of the neutron device structure
FIG. 2 illustrates the application of a bonding layer to the conventional memory device of FIG. 1 and the removal of the base substrate of the conventional memory device of FIG. 1;
FIG. 3 illustrates the formation of a neutron conversion layer on the exposed insulating layer of FIG. 2;
FIG. 4 illustrates the direct application of a neutron conversion layer to the active semiconductor layer of the conventional memory device;
FIG. 5 illustrates the addition of a second neutron conversion layer tothe device illustrated in FIG. 3;
FIG. 6 illustrates the addition of a second neutron conversion layer tothe device illustrated in FIG. 4;
FIG. 7 is a graph illustrating the Q crit for un hardened silicon memory cells based on feature size;
FIG. 8 is a plot of the Linear Energy Transfer (LET) of an alpha particle from the isotope boron-10 traversing silicon; and
FIG. 9 is a graph including limiting values for liberating charge in the active semiconductor layer.
DETAILED DESCRIPTION OF THE PREFERRED EMBODIMENTS
This application hereby incorporates by reference the application entitled “Semiconductor Substrate Incorporating a Neutron Conversion Layer”, assigned NC 84,785, filed on date even herewith. The present invention is directed to a neutron detection device that utilizes aneutron conversion layer in close proximity to charge-sensitive elements such as conventional memory cells. Specifically, the device provides aneutron conversion layer in close proximity to the active semiconductor layer of a charge-sensitive electronic semiconductor device such as a semiconductor memory cell. In particular, the invention will be described with reference to an SRAM memory device formed on a SOI substrate. It will be understood, however, that the invention is not limited to the specifically disclosed embodiment disclosed with reference to silicon devices but may also be realized with other semiconductor materials, and that alpha-emitting neutron converters based on boron and lithium may be utilized as may other alpha emitters,proton emitters, or electron emitters, and may also be utilized with other charge-sensitive device structures such as dynamic random access memories (DRAMs), other types of random access memories, non-random access memories, charge coupled devices, charge injection devices, or other memory device structures and substrates.
FIG. 1 illustrates a conventional SRAM memory device formed on a SOI substrate. The SOI substrate 10 includes an active semiconductor layer12, an insulating layer 14 (referred to as a buried oxide “BOX”) and abase substrate 16. As will be readily understood by those skilled in the art, active charge-sensitive circuit elements such as individual memory cells 15 are formed in part by modifications made within the activesemiconductor layer 14 of the SOI substrate 10. Additional structural layers are then formed over the active semiconductor layer 14 to form the working circuitry and circuit elements of the charge-sensitive device. The additional structural layers, for example, may include interconnect layers, insulating layers and/or additional circuit elements. In FIG. 1, these additional structural layers are not illustrated in detail for the sake of simplicity of illustration, but will simply be shown as a single circuit structure layer 18. It is noted, however, that the thickness of the additional structural layers that form the circuit structure layer 18 is generally much greater thanthe active semiconductor layer 12 or in the insulating layer 14. It is also common to include a passivation layer 20 on top of the circuit structure layer 18.
Previous attempts at utilizing conventional memory devices have concentrated on coating a neutron conversion layer on top of thepassivation layer 20 or on removing the passivation layer 20 and coating the neutron conversion layer on top of the circuit structure layer 18.However, the range of alpha particles emitted from a reaction between neutrons and a neutron conversion material (for example the isotope boron-10) is limited. The conventional attempts essentially placed the neutron conversion layer to far from the active semiconductor layer 12,i.e., beyond the range of the alpha particles resulting in poor sensitivity. Instead, the present invention places a neutron conversionlayer in close proximity (either directly in contact with or effectively adjacent to as will be described) to the active semiconductor layer 12without disrupting or damaging the additional structural layers provided in the circuit structure layer 18, as will be described below.
Referring now to FIG. 2, a bonding layer 22 is first applied to a wafer containing at least one conventional semiconductor memory device of the type illustrated in FIG. 1. The bonding layer 22 may be a thick epoxy(as just one example) that is used to provide a mechanical connection tothe wafer for processing purposes. As also shown in FIG. 2, the back ofthe wafer is processed to remove the base substrate 16. Lapping or similar mechanical removal processes are suitable for removing an initial portion of the base substrate 16 while leaving a sufficient remaining thickness to protect the insulating layer 14 from mechanical damage. The remaining thickness of the base substrate 16 is then removed by a chemical removal process such as etching with etch ants such ashydrazine which stops at the insulating layer 14, or by a timed etching removal process with etch ants such as TMAH.
Once the base substrate 16 has been substantially removed, a neutronconversion layer 24 is applied to the exposed insulating layer 14 as shown in FIG. 3. Sputter coating will produce lower thermal stresses inthe circuit structure layer 48 during the deposition process, and, for fragile circuits, is therefore over, for example, high temperature processing. Prior to the application of the neutron conversion layer 24,a barrier layer 26 (for an example silicon nitride) may be deposited to prevent diffusion of the neutron conversion material into the activesemiconductor layer 12. This process insures that the neutron conversionlayer 24 is located in close proximity to the active semiconductor layer12. If desired, an additional stability layer (not shown) such as epoxy may be applied to an outer layer if needed for additional mechanical stability.
The composition of the neutron conversion layer 24 may be a boron metal or composition enriched with boron-10. A metal layer requires that an insulating layer 14 be present. For example, a neutron conversion layer24 having a thickness of 1.8 microns and an insulating layer 14 having a thickness of 200 nm maybe be employed.
Boron containing layers, however, have also been placed directly on silicon diodes. McGregor et al. (cited above) have shown that mechanically stable films of the required thickness can be achieved if provision for stress relief is included. It is well known thatborosilicate glass (BSPG) is compatible with application on silicon devices, is an insulator, and is commonly used for passivation layers. ABSPG with 5% boron to serve as the insulating layer 14 and also the neutron conversion layer 24 may also be applied directly to the activesemiconductor layer 12 as shown in FIG. 4. Other Boron compounds or compositions may also be used.
The neutron conversion layer may also incorporate lithium. Lithium metal is highly reactive and has been used to sensitize diodes, but has generally shortened sensor life. Preferably, a stable material or composition such as ⁶LiF may be employed as the neutron conversion layer24 or alternatively as a second neutron conversion layer 28 formed overthe first neutron conversion layer 24 as shown in FIGS. 5 and 6. The alpha particles emitted by lithium have a longer range than those emitted by boron. For this reason, the use of two neutron conversion layers provides additional device sensitivity.
The susceptibility of memory devices to SEU in general has been extensively studied for many years, and has revealed an important quantity called the critical charge (Q crit). The Q crit is the amount of charge a memory cell must accumulate in order to produce a bit error. It has long been known that finer lithographic line widths lead to smaller cells, to smaller cell charge holding capacity, and thus to smallerQ crit for higher density memory devices. A graph illustrating the Q critfor un hardened silicon memory cells based on feature size is shown in FIG. 7. By locating the neutron conversion layer 18 in close proximity to the active semiconductor layer 12 in which memory cell elements are formed, sufficient charge can be generated by the alpha particles produced by the interaction of the neutrons with the boron-10.
In the case of the device illustrated in FIG. 3, the typical 200 nm thickness of the active semiconductor layer 12 is much less than the range of the alpha particles generated in the neutron conversion layer24. While the alpha will now reach the active semiconductor layer as required, only a fraction of the alpha energy will therefore be deposited in the active semiconductor layer 12 as it passes through that layer. The relevant quantity then becomes the amount of energy deposited along the track of the alpha particles, i.e., the Linear Energy Transfer(LET). The LET of an alpha particle from boron-10 traversing silicon is plotted in FIG. 8. (The initial energy of an alpha particle emitted by a boron-10 atom is approximately one and a half MeV.) It can be seen thatthe LET varies from about 1 to 1.5 Mev/(mg cm²) over essentially the entire useful energy range of the alpha particle. Applying these limits to a 200 nm active semiconductor layer thickness gives a range of energy deposited in the active semiconductor layer 12 for normal incidence (the charge will increase for non-normal incidence with greater path lengths through the active silicon layer 12). The amount of alpha energy required (in MeV) per liberated charge (in pC) can be calculated. See“Calculation of Cosmic-Ray Induced Soft Upsets and Scaling in VLSI Devices”, E. L. Peterson et al., IEEE Transactions on Nuclear Science,NS-29/6, December 1982, 2055-63, the contents of which are incorporated herein by reference. For the illustrated example, the energy is 22.5MeV/pC giving a value of about 2 to 3 femtocouloubs deposited in the active layer at normal incidence.
FIG. 7 can now be plotted as shown in FIG. 9 to include these limiting values for liberating charge in the active semiconductor layer. As shown in FIG. 9, the alpha particle produced will—at almost any point in its trajectory in silicon—supply an amount of charge comparable to Q crit fora 0.35 micron line width SOI RAM cell. In other words, the proximallyplaced neutron conversion layer 24 will produce alpha particles sufficient to cause SEU in conventional SOI RAM structures. The resulting structure will be referred to as a neutron sensitive random access memory (NRAM).
A neutron detector in accordance with the present invention can be utilized in a variety of applications. Just one notable application isin the area of monitoring the transportation of cargo. The low standby current draw of a device utilizing SRAM technology allows integration sof any desired duration to be performed without difficulty, as battery life can be on the order of years.
The invention has been described with reference to certain preferred embodiments thereof. It will be understood, however, that modification sand variations are possible within the scope of the appended claims. Forexample, an additional SRAM circuit can be applied to a single-sided neutron sensitive SRAM to produce a “sandwich” sensor, wherein the center layer is the neutron converter and SRAM circuits are provided on either side of the converter. Other stacking geometries may also beused. The technology for multiple stacked layers of siliconmicrocircuits has already been demonstrated. See “Electrical Integrity of State-of-the-Art 0.13 μm SOI CMOS Devices and Circuits Transferred for Three-Dimensional (3D) Integrated Circuit (IC) Fabrication”, K. W.Guarini et al., IEDM Technical Digest, IEEE, (2002), the contents ofwhich are incorporated herein by reference. Further, the invention hasbeen described with reference to silicon memory SOI circuits, however,other semiconductor may be used to fabricate semiconductor-on-insulator RAM circuits and then can also be neutron-sensitized with one or more proximal neutron conversion materials in accordance with the invention to make neutron detectors. Still further, the invention is not limited to static RAM type memory devices, but can also be incorporated in other types of charge-sensitive devices.
1. A neutron detection device comprising: an active semiconductor layer including a plurality of charge-sensitive cells; and a neutronconversion layer located in close proximity to the cells.
2. A neutron detection device as claimed in claim 1, further comprising an insulating layer located between the active semiconductor layer and the neutronconversion layer.
3. A neutron detection device as claimed in claim 1,further comprising a barrier layer located between the neutronconversion layer and the active semiconductor layer.
4. A neutron detection device as claimed in claim 3, wherein the barrier layer comprises silicon nitride.
5. A neutron detection device as claimed in claim 1, wherein the neutron conversion layer comprises boron.
6. A neutron detection device as claimed in claim 1, wherein the neutronconversion layer comprises borosilicate glass.
7. A neutron detection device as claimed in claim 6, wherein the borosilicate glass includes 5%boron.
8. A neutron detection device as claimed in claim 1, wherein the neutron conversion layer includes lithium.
9. A neutron detection device as claimed in claim 1, further comprising a second neutron conversionlayer formed in proximity to the active semiconductor layer.
10. A neutron detection device as claimed in claim 9, wherein one of the neutron conversion layers comprises boron and the other of the neutronconversion layers comprises lithium.
11. A method of manufacturing aneutron detector from a memory device, wherein the memory device includes an active semiconductor layer, a base substrate and an insulating layer between the active semiconductor layer and the base substrate, the method comprising: removing the base substrate layer froma memory device to expose the insulating layer; and forming a neutronconversion layer on the insulating layer.
12. A method of manufacturing a neutron detector from a memory device as claimed in claim 11, further comprising forming a barrier layer on the insulating layer prior to forming the neutron conversion layer.
13. A method of manufacturing aneutron detector from a memory device as claimed in claim 11, further comprising forming a second neutron conversion layer on the neutronconversion layer.
14. A method of manufacturing a neutron detector froma memory device as claimed in claim 11, wherein the neutron conversionlayer comprises boron.
15. A method of manufacturing a neutron detector from a memory device as claimed in claim 13, wherein the neutronconversion layer formed on the insulating layer comprises boron and the second neutron conversion layer comprises lithium.
16. A method of manufacturing a neutron detector from a memory device, wherein the memory device includes an active semiconductor layer, a base substrate and an insulating layer between the active semiconductor layer and the base substrate, the method comprising: removing the base substrate layer and the insulating layer from the memory device; and forming a neutronconversion layer on the active semiconductor layer.
17. A method of manufacturing a neutron detector from a memory device as claimed in claim 16, further comprising forming a barrier layer on the activesemiconductor layer prior to forming the neutron conversion layer.
18. A method of manufacturing a neutron detector from a memory device as claimed in claim 16, further comprising forming a second neutronconversion layer on the neutron conversion layer formed on the activesemiconductor layer.
19. A method of manufacturing a neutron detector from a memory device as claimed in claim 16, wherein the neutronconversion layer comprises boron.
20. A method of manufacturing aneutron detector from a memory device as claimed in claim 18, wherein the neutron conversion layer formed on the insulating layer comprises boron and the second insulating layer comprises lithium.
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot5/blob/main/LICENSE
import os
import numpy as np
import pytest
import uproot
import uproot.writing
ROOT = pytest.importorskip("ROOT")
def test_recreate(tmp_path):
filename = os.path.join(tmp_path, "whatever.root")
f1 = ROOT.TFile(filename, "recreate")
mat = ROOT.TMatrixD(3, 3)
mat[0, 1] = 4
mat[1, 0] = 8
mat[2, 2] = 3
mat.Write("mat")
f1.Close()
with uproot.open(filename) as f2:
assert f2["mat"].member("fNrows") == 3
assert f2["mat"].member("fNcols") == 3
assert np.array_equal(
f2["mat"].member("fElements"), [0, 4, 0, 8, 0, 0, 0, 0, 3]
)
|
User blog comment:TsarHypnoss/Vandalism/@comment-39677387-20200516142423
a fanon wiki can easily stop this!
https://mcdonalds-fanon.fandom.com/wiki/McDonald%27s_fanon_Wikia
the vandals can post there, and we can fix this wiki
|
using System;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Networking;
using UnityEngine.UI;
public class ExampleListener : CaptainsMessListener
{
public enum NetworkState
{
Init,
Offline,
Connecting,
Connected,
Disrupted
};
[HideInInspector]
public NetworkState networkState = NetworkState.Init;
public GameObject gameSessionPrefab;
public ExampleGameSession gameSession;
public void Start()
{
networkState = NetworkState.Offline;
ClientScene.RegisterPrefab(gameSessionPrefab);
}
public override void OnStartConnecting()
{
networkState = NetworkState.Connecting;
}
public override void OnStopConnecting()
{
networkState = NetworkState.Offline;
}
public override void OnServerCreated()
{
// Create game session
ExampleGameSession oldSession = FindObjectOfType<ExampleGameSession>();
if (oldSession == null)
{
GameObject serverSession = Instantiate(gameSessionPrefab);
NetworkServer.Spawn(serverSession);
}
else
{
Debug.LogError("GameSession already exists");
}
}
public override void OnJoinedLobby()
{
networkState = NetworkState.Connected;
gameSession = FindObjectOfType<ExampleGameSession>();
if (gameSession)
gameSession.OnJoinedLobby();
}
public override void OnLeftLobby()
{
networkState = NetworkState.Offline;
gameSession.OnLeftLobby();
}
public override void OnCountdownStarted()
{
gameSession.OnCountdownStarted();
}
public override void OnGame()
{
gameSession.OnGame();
}
public override void OnCountdownCancelled()
{
gameSession.OnCountdownCancelled();
}
public override void OnStartGame(List<CaptainsMessPlayer> aStartingPlayers)
{
Debug.Log("GO");
gameSession.OnStartGame(aStartingPlayers);
}
}
|
fix(solc): prefere dapptools style remappings
Motivation
Fix an edge in remapping resolution that made it nondeterministic for layouts like:
ds-test
├── demo
│ └── demo.sol
└── src
└── test.sol
Solution
favor dapptools style lib paths ("lib"/"src") over others ("demo") with additional check
[ethers-solc/src/remappings.rs:463] Remapping::find_many("git/rust/foundry/integration-tests/testdata/guni-lev/lib") = [
Remapping {
name: "ds-test/",
path: "git/rust/foundry/integration-tests/testdata/guni-lev/lib/ds-test/src/",
},
]
PR Checklist
[x] Added Tests
[x] Added Documentation
[ ] Updated the changelog
This PR broke remappings, resulting in the internal libs to be unbundled in their packages. Example from Vaults using Solmate
➜ vaults git:(main) ../foundry/target/debug/forge remappings
tokens/=/Users/gakonst/oss/vaults/lib/solmate/src/tokens/
auth/=/Users/gakonst/oss/vaults/lib/solmate/src/auth/
weird-erc20/=/Users/gakonst/oss/vaults/lib/solmate/lib/weird-erc20/src/
users/=/Users/gakonst/oss/vaults/lib/solmate/src/test/utils/users/
authorities/=/Users/gakonst/oss/vaults/lib/solmate/src/auth/authorities/
utils/=/Users/gakonst/oss/vaults/lib/solmate/src/test/utils/
ds-test/=/Users/gakonst/oss/vaults/lib/solmate/lib/ds-test/src/
demo/=/Users/gakonst/oss/vaults/lib/solmate/lib/ds-test/demo/
test/=/Users/gakonst/oss/vaults/lib/solmate/src/test/
mocks/=/Users/gakonst/oss/vaults/lib/solmate/src/test/utils/mocks/
it unbundled the solmate dep into all its sub-parts
|
Self-Organizing and Scalable Routing Protocol (SOSRP) for Underwater Acoustic Sensor Networks
Underwater Acoustic Sensor Networks (UASN) have two important limitations: a very aggressive (marine) environment, and the use of acoustic signals. This means that the techniques for terrestrial wireless sensor networks (WSN) are not applicable. This paper proposes a routing protocol called “Self-Organizing and Scalable Routing Protocol” (SOSRP) which is decentralized and based on tables residing in each node. A combination of the hop value to the collector node and the distance is used as a criterion to create routes leading to the sink node. The expected functions of the protocol include self-organization of the routes, tolerance to failures and detection of isolated nodes. Through the implementation of SOSRP in Matlab and a model of propagation and energy being appropriate for marine environment, performance results are obtained in different scenarios (varying both nodes and transmission range) that include parameters such as end-to-end packet delay, consumption of energy or length of the created routes (with and without failure). The results obtained show a stable, reliable and suitable operation for the deployment and operation of nodes in UASN networks.
Introduction
Underwater Wireless Sensor Networks (UWSNs) are collections of many autonomous sensor nodes, networked together through wireless links, which perform collaborating tasks to monitor physical or environment parameters such as pressure, temperature, sounds, etc. These networks were initially developed using the concept of terrestrial WSN systems but the fundamental challenges of these two technologies are different. The early implementation of UWSN with radio frequency (RF) and optical links proved that new solutions and approaches are required for underwater environment, which confronts different challenges and limitations in terms of signal propagation, low efficiency of radio wave, transmission range of few meters and scattering in case of optical waves. Therefore, acoustic waves prove to be a promising communication technology, and because of this underwater sensor networks are also referred to as Underwater Acoustic Sensor Networks (UASNs). The use of acoustic communication imposes several constraints, therefore parameters such as carrier frequency, attenuation, noise, fading, propagation delay, and limited bandwidth are important to consider during protocols designing for UASNs. Furthermore, because of water currents and various underwater activities the underwater sensors remain mobile, which makes traditional routing inefficient since the network topology changes over time. Therefore, network topology is also a vital factor in protocol design. Reliability, capacity and energy consumption of network are affected and determined through topology control technique. The reliability of underwater network topology is highly important because of high cost of sensor nodes. Moreover, the propagation environment also has substantial effect on energy consumption which results in node failure because of rapid energy depletion. For this reason, single-point topology should be avoided because a failure in a single node of the network could lead to overall network collapse.
With the advancement in the field of wireless communication and sensor technology, researchers have proposed numerous routing techniques for UASNs [1]. In Reference [2], a new clustering algorithm is proposed using a Low-Energy Adaptive Clustering Hierarchy (LEACH) protocol to address the problem of large clusters and nodes at the edge consuming more energy. In the cluster head (CH) election phase, the position of CH is considered to be the cluster center from the points which were uniformly distributed in the network. For nodes to select the CH, weight factor is introduced which considers the energy consumption between node and CH, and every CH and a base station (BS): It balances the cluster size, reducing the total network energy consumption. Energy Aware and Void Avoiding Routing Protocol (EAVARP) operates in two phases: layering and data collection phase [3]. The sensor nodes are distributed in concentric shells built during the layering phase around the sink. The protocol uses opportunistic directional forward strategy (ODFS) for forwarding the data in a data collection phase to avoid the flooding, cyclic transmissions and voids. The protocol extends the network lifetime through balancing the energy in the network compared to other routing protocols.
To address the problem of void nodes and energy-reliability trade off, a Stateless Opportunistic Routing Protocol (SORP) is proposed [4]. The protocol performs a depth-based stateless routing which can avoid the trapped and void areas. It selects the candidate forwarding node through calculating holding time for each node in the forwarding area, using the local information acquired in updating phase from the neighboring nodes. SORP decreases the energy consumption, packet loss and end-to-end delay in all scenarios, sparse or dense. In UWSNs, the efficient data delivering is still a challenge because of limitations of acoustic communication and underwater conditions. To address the packet delivery problem, a hop-based protocol is proposed in Reference [5], known as H n -PERP. The author proposes a centralized model, providing a mechanism for scheduling and data transmission processing. The protocol enhances the energy efficiency and network throughput through power monitoring solutions. In BEEC [6] routing protocol, a circular field is divided into ten sub regions and each region is further divided into eight sectors. The data is collected from the sectors using two mobile sinks, moving in circular patterns where each covers five different sectors in sequence. The protocol increases the performance of network in term of lifetime, energy consumption, throughput and stability. However, the sink nodes follow a fixed circular pattern which leads to packet loss and higher delay because of unawareness of network conditions. Additionally, to collect the data from sensor nodes, Autonomous Underwater Vehicles (AUV) are used to move the sink which requires extra resources to operate the AUVs. A routing algorithm has been proposed through remodeling the Vector-Based Forwarding (VBF) protocol in Reference [7]. It considers the routing pipe radius as a function of node range, number of nodes and dimension of environment. The selection of guiding node is based on the residual energy of the receiving node, increasing or reducing the radius of pipe. The results indicate that the protocol decreases the energy consumption in the network with large number of nodes by changing the routing pipe's width in proportion to network density. In Reference [8], the proposed protocol routes the data based on its priority. The nodes are deployed in a cube considering the underwater scenario. The algorithm distinguishes the data based on two traffic classes: high priority and low priority. For traffic with high priority, the forwarder node is selected based on the minimum distance to the BS and residual energy present in the target cube. This improves the performance in terms of energy, end-to-end delay and packet loss. In Reference [9], the proposed protocol combines a Depth Based Routing (DBR) protocol with clustering approach to minimize the energy consumption and distribute the load among the nodes in the network. The classification of nodes (normal, CH and dead) in the network is performed based on the assignment of a random number between 0 and 1. In the CH detection phase, if the residual energy of the CH is less than threshold, it will be eliminated as CH and a new CH is formed. The approach has improved the energy efficiency through implementing clustering in depth-based routing. In Reference [10], Energy Efficient DBR (EEDBR) is compared with simple DBR and a hop-by-hop dynamic address based (H2-DAB) protocol. The protocol selects the next node based on lowest depth and highest residual energy from the neighboring nodes. The results show that both path loss and packet delivery ratio are almost same for DBR and EEDBR whereas H2-DAB has a higher end-to-end delay.
The protocol proposed in this work is oriented to the network layer, so it is assumed that a free collision Medium Access Control (MAC) protocol has been previously implemented. There are many techniques applicable for UASNs [11][12][13][14], being TDMA [15,16] one of the most bandwidth efficient. The proposed protocol achieves stability as the network size increases, providing efficient paths to the sink node and fault tolerance through self-organizing the nodes in the network.
In comparison to Reference [2], SOSRP follows a decentralized mechanism where failure of a single node does not disturb the communications within the network. However, in a clustering approach, the failure of a master node disconnects an entire cluster from the network. SOSRP is a hop-to-hop based communication protocol where nodes relay messages to the sink, whereas in Reference [6] Autonomous Underwater Vehicles (AUV) are required as mobile sinks to collect the data from the deployed nodes. Therefore, extra resources to control the AUVs are needed. In Reference [5], a hop-to-hop based power efficient protocol is proposed. However, the results shown do not present a clear picture of the multi-hop operation on the energy consumed, only the number of hops in the available path.
The paper is structured as follows: Section 2 includes the system model, describing the 3D network model, mathematical model of the propagation, and the energy consumption for undersea environment. Section 3 explains the working methodology of the proposed protocol. Section 4 describes the performance metrics, analysis, and parameters used for evaluation. After that, Section 5 discusses in detail the obtained results in terms of random topology, fault tolerance and scalability. Finally, conclusions are given in Section 6.
System Model
The system model is a three-dimensional layout including depth due to its impact on important parameters, like energy. A propagation model is implemented considering the underwater conditions and several works proposed by researchers. Moreover, it is worth mentioning that energy consumption is an important parameter to consider in the design of the protocol for any sensor network. Therefore, both models of energy and propagation of acoustic waves have been implemented in Matlab for measuring, including the energy dissipation during the network operation.
Network Model
The sensor network is 200 × 200 × 200 m cube: The top of cube is considered to be the surface of water and bottom as a seabed. The nodes are deployed one by one in 3D space having random location and depth to address a realistic scenario, including a single sink node on the surface having depth zero. The nodes are randomly placed to assure the flexibility of proposed routing protocol. Once the nodes are placed, they are considered to remain static and do not flow because of marine currents and waves. Each node is placed at a minimum of 40 meters separation from the surrounding nodes. This is done to prevent nodes from sending packets of similar measured events to the sink. The separation between nodes is calculated through Euclidean metric using the well-known equation as follows, where (x i , y i , z i ) are the location coordinates for the i node. Since the nodes are placed randomly, is assumed that each node uses a power control mechanism to alter and save the transmission power based on the distance between two nodes. Figure 1 presents an example of network model where the nodes are randomly located in 3D space.
Propagation Model
Many characteristics of the underwater environment affect the acoustic communication which makes the propagation channel much more complex compared to the terrestrial communication channel. They include temperature, salinity, multipath fading, path loss, depth and Doppler effect. The acoustic signal propagation, network performance and energy dissipation are highly affected by these factors. According to these considerations, the propagation speed of sound in underwater can be expressed as: where T is the temperature (in Celsius), S the salinity of sea water (in parts per thousands, ppt) and H is the depth (m) of sensor nodes. In UASNs, the signal-to-noise ratio (SNR) is a measure of transmitted signal power to noise power (in dB re 1 µPa, [17]), given by passive sonar equation expressed as [18]: where SL is the source level (dB re 1 µPa), TL the transmission losses (dB), NL the ambient noise (dB re 1 µPa) and DL the directivity index (dB). Ambient noise in shallow waters is mainly caused by shipping activity or biological noise, and a suitable level adopted is 70 dB re 1 µPa (50 dB re 1 µPa for deep water). In relation to the source level, is defined as the intensity of sound radiated by source at the distance of 1 meter, expressed in dB re µPa (as [18]) in terms of the Power Intensity (I t ) (W/m 2 ) used:
Propagation Model
Many characteristics of the underwater environment affect the acoustic communication which makes the propagation channel much more complex compared to the terrestrial communication channel. They include temperature, salinity, multipath fading, path loss, depth and Doppler effect. The acoustic signal propagation, network performance and energy dissipation are highly affected by these factors. According to these considerations, the propagation speed of sound in underwater can be expressed as: where T is the temperature (in Celsius), S the salinity of sea water (in parts per thousands, ppt) and H is the depth (m) of sensor nodes. In UASNs, the signal-to-noise ratio (SNR) is a measure of transmitted signal power to noise power (in dB re 1 µPa, [17]), given by passive sonar equation expressed as [18]: where SL is the source level (dB re 1 µPa), TL the transmission losses (dB), NL the ambient noise (dB re 1 µPa) and DL the directivity index (dB). Ambient noise in shallow waters is mainly caused by shipping activity or biological noise, and a suitable level adopted is 70 dB re 1 µPa (50 dB re 1 µPa for deep water). In relation to the source level, is defined as the intensity of sound radiated by source at the distance of 1 meter, expressed in dB re µPa (as [18]) in terms of the Power Intensity ( ) (W/m 2 ) used: Hydrophones can be assigned a typical value of 20 dB [18] for SNR, where DL is zero when considering omnidirectional modems. Therefore, using (3) SL can be written as: where denotes source level in shallow waters and source level for deep waters. Transmission Loss (TL) is dependent on the absorption coefficient ( ( ), dB/km) and distance. It is the collective depletion in acoustic intensity during wave propagation which significantly affects the underwater communication. Another reason for suffering transmission loss is spreading, with cylindrical shape for a depth lower than 100 meters (shallow waters) or spherical at higher depths Hydrophones can be assigned a typical value of 20 dB [18] for SNR, where DL is zero when considering omnidirectional modems. Therefore, using (3) SL can be written as: where SL SH denotes source level in shallow waters and SL DP source level for deep waters. Transmission Loss (TL) is dependent on the absorption coefficient (α( f ), dB/km) and distance. It is the collective depletion in acoustic intensity during wave propagation which significantly affects the underwater communication. Another reason for suffering transmission loss is spreading, with cylindrical shape for a depth lower than 100 meters (shallow waters) or spherical at higher depths (deep waters). In this work, nodes are deployed with random depth from zero to 200 meters so both cylindrical and spherical spreading are considered. In every case, TL (dB) can be estimated using (7)-(8) [18]: where r is the distance (in meters) between transmitter and receiver, and TL CS and TL SS denotes transmission loss in cylindrical and spherical spreading, respectively. The absorption coefficient α( f ) for frequencies ranging from 100 Hz to 10 kHz is expressed through Thorp's propagation model as: with frequency ( f ) measured in kHz.
Energy Consumption Model
The sensor nodes in WSNs are mostly powered through batteries and it is inconvenient to replace it or recharge it when they are depleted. Considering the underwater environment, cost and time required for such operations is high, so energy efficiency is one of the major concerns in designing protocols for UASN. The energy to transmit the data from one node to another over distance is given by Reference [18]: where p l (bits) is the packet length, E elec (J/bit) is the electronics energy consumed, E amp (J/bit) is the amplifier energy dissipation, P t (W) is the power transmitted, and R (bps) is the transmission rate. The P t (W) can be expressed as [18] P t = A * I t = 2πr * H * I t (11) Similarly, the energy consumed (in J) during reception process is given as [19]: where E DA (J) is the energy consumed during data aggregation process.
Proposed Protocol
Considering the challenges, and the harsh ocean environment, a self-organizing protocol is proposed to achieve scalability, robustness and fault tolerant system known as Self Organizing and Scalable Routing Protocol (SOSRP). The SOSRP is designed to conserve the energy and ensure the packet delivery to sink through a power control and hop count-based techniques, including a fault-tolerance algorithm. The protocol enables a node to find the neighboring nodes, forming a connectivity matrix. The packet routing is based on the smallest distance and hop count between the source and sink.
Network Initialization
The nodes are deployed one by one at random depths underwater having random ( , , ) coordinates, whereas the sink is placed at the surface of sea with zero depth. Initially, after deployment, nodes do not have any prior information about the address and location of the sink node.
In the network initialization phase, the sink node broadcasts a control packet named "HELLO" packet in a defined transmission radius, containing base station ID and hop count which denotes the address and total number of wireless links from node to sink, respectively. After receiving the packet, the node increments the value, stores the hop count if it is not already present or is smaller than the stored hop count and relays the message with the updated value. In the case of the hop count being equal or larger than the current value, the node will discard the message. This process continues until the message reaches every node in the network.
Neighbor Discovery
After the initialization phase, a neighbor discovery phase begins where it is considered that each node broadcast a four bytes request message (see Figure 3) in defined transmission range to discover the neighboring nodes. The packet encapsulates sender ID and a timestamp label containing the time when the packet is transmitted. In response, neighboring nodes forwards an "INFO" message of 6 bytes, containing sender/neighbor ID, timestamp, hop count and distance to the sink with a format shown in Figure 4.
Network Initialization
The nodes are deployed one by one at random depths underwater having random (x, y, z) coordinates, whereas the sink is placed at the surface of sea with zero depth. Initially, after deployment, nodes do not have any prior information about the address and location of the sink node.
In the network initialization phase, the sink node broadcasts a control packet named "HELLO" packet in a defined transmission radius, containing base station ID and hop count which denotes the address and total number of wireless links from node to sink, respectively. After receiving the packet, the node increments the value, stores the hop count if it is not already present or is smaller than the stored hop count and relays the message with the updated value. In the case of the hop count being equal or larger than the current value, the node will discard the message. This process continues until the message reaches every node in the network.
Neighbor Discovery
After the initialization phase, a neighbor discovery phase begins where it is considered that each node broadcast a four bytes request message (see Figure 3) in defined transmission range to discover the neighboring nodes. The packet encapsulates sender ID and a timestamp label containing the time when the packet is transmitted.
Network Initialization
The nodes are deployed one by one at random depths underwater having random ( , , ) coordinates, whereas the sink is placed at the surface of sea with zero depth. Initially, after deployment, nodes do not have any prior information about the address and location of the sink node.
In the network initialization phase, the sink node broadcasts a control packet named "HELLO" packet in a defined transmission radius, containing base station ID and hop count which denotes the address and total number of wireless links from node to sink, respectively. After receiving the packet, the node increments the value, stores the hop count if it is not already present or is smaller than the stored hop count and relays the message with the updated value. In the case of the hop count being equal or larger than the current value, the node will discard the message. This process continues until the message reaches every node in the network.
Neighbor Discovery
After the initialization phase, a neighbor discovery phase begins where it is considered that each node broadcast a four bytes request message (see Figure 3) in defined transmission range to discover the neighboring nodes. The packet encapsulates sender ID and a timestamp label containing the time when the packet is transmitted. In response, neighboring nodes forwards an "INFO" message of 6 bytes, containing sender/neighbor ID, timestamp, hop count and distance to the sink with a format shown in Figure 4. In response, neighboring nodes forwards an "INFO" message of 6 bytes, containing sender/neighbor ID, timestamp, hop count and distance to the sink with a format shown in Figure 4. Upon receiving the packet, the node generates the neighbor table storing neighbor ID, hop count and distance from sink. The Time of Arrival technique is considered for calculating the distance between two nodes and it represents the accumulated hop-to-hop distance from that node to the destination. To conserve the energy, the neighbor discovery phase is only initiated when a change in topology is detected, such as nodes addition or losses. Upon receiving the packet, the node generates the neighbor table storing neighbor ID, hop count and distance from sink. The Time of Arrival technique is considered for calculating the distance between two nodes and it represents the accumulated hop-to-hop distance from that node to the destination.
To conserve the energy, the neighbor discovery phase is only initiated when a change in topology is detected, such as nodes addition or losses.
Path Selection Criteria
The path selection criterion for SOSRP is based on hop count and distance between source and destination. The protocol selects the shortest path between source and sink. On sensing the event, the path formulation begins with the selection of next hop by source node to transmit the data to the sink node.
To diminish the energy consumption during data transmission, the selection criteria of next node is based on smallest hop count and distance from source node to sink node. When source node has data to send, it will look up in its neighbor table to select the next node. The node with the least hop count value will be selected as the next hop. If two neighboring nodes have the same hop count value in its neighbor table, the node with the shortest hop count distance will be selected as the next one. Since, each entry in the table contain an accumulated distance from source to destination as mentioned in Section 3.2, the selected node represents the overall selected path leading to the sink node as an optimal case.
Packet Transmission
In the last phase, the packet is transmitted from source node to the sink using multi-hop communication, where intermediate nodes are selected based on smallest hop count and shortest distance between source and base station.
It is important for a source to select an efficient forwarding node to conserve the energy and minimize the delay. Therefore, on acquiring the data, the source node checks the local routing table for the selection of next hop. The routing table holds neighbor ID, hop count and distance from transmitting node to destination. The forwarding node is selected using path selection criteria where hop count and distance are compared among all the entries stored in the table for the purpose of packet transmission. Upon, selection of forwarding node, source node adds a header containing sink address, selected forwarding node ID, source address/ID, timestamp, hop count and distance. The forwarding node ID is defined for a neighbor to recognize that the packet is meant for it and to avoid other neighboring nodes to transmit it to the sink. As a result, it diminishes the possibility of receiving multiple copies of the same packet. After attaching the header to the data packet, the data is transmitted. Upon reception of packet, the receiving node (selected node) sends an acknowledgement packet to source. This process repeats at each node until the packet reaches the sink node. However, the scope of acknowledgement is limited to hop to hop. If a route failure is detected (i.e., no acknowledgement is received by sending node) the algorithm selects an alternate path to transmit the data based on the routing table information stored in it, repeating the process from the path selection phase.
Path Selection Criteria
The path selection criterion for SOSRP is based on hop count and distance between source and destination. The protocol selects the shortest path between source and sink. On sensing the event, the path formulation begins with the selection of next hop by source node to transmit the data to the sink node.
To diminish the energy consumption during data transmission, the selection criteria of next node is based on smallest hop count and distance from source node to sink node. When source node has data to send, it will look up in its neighbor table to select the next node. The node with the least hop count value will be selected as the next hop. If two neighboring nodes have the same hop count value in its neighbor table, the node with the shortest hop count distance will be selected as the next one. Since, each entry in the table contain an accumulated distance from source to destination as mentioned in Section 3.2, the selected node represents the overall selected path leading to the sink node as an optimal case.
Packet Transmission
In the last phase, the packet is transmitted from source node to the sink using multi-hop communication, where intermediate nodes are selected based on smallest hop count and shortest distance between source and base station.
It is important for a source to select an efficient forwarding node to conserve the energy and minimize the delay. Therefore, on acquiring the data, the source node checks the local routing table for the selection of next hop. The routing table holds neighbor ID, hop count and distance from transmitting node to destination. The forwarding node is selected using path selection criteria where hop count and distance are compared among all the entries stored in the table for the purpose of packet transmission. Upon, selection of forwarding node, source node adds a header containing sink address, selected forwarding node ID, source address/ID, timestamp, hop count and distance. The forwarding node ID is defined for a neighbor to recognize that the packet is meant for it and to avoid other neighboring nodes to transmit it to the sink. As a result, it diminishes the possibility of receiving multiple copies of the same packet. After attaching the header to the data packet, the data is transmitted. Upon reception of packet, the receiving node (selected node) sends an acknowledgement packet to source. This process repeats at each node until the packet reaches the sink node. However, the scope of acknowledgement is limited to hop to hop. If a route failure is detected (i.e., no acknowledgement is received by sending node) the algorithm selects an alternate path to transmit the data based on the routing table information stored in it, repeating the process from the path selection phase.
Performance Metrics
The selected metrics to evaluate the performance of SOSRP are energy consumption, end-to-end delay, hop count and number of paths (to the sink). In order to test a stable behavior, two different parameters will be swept: range of transmission and size (number of nodes). A node dissipates energy in a sensor network while performing the operations necessary for the collection of data required by the application. Such operations include processing, listening to the channel, transmitting, and receiving the data. Energy consumption is the sum of energy dissipation by a node during the process of performing different operations, whereas the accumulation of energy dissipated by each node defines the total energy consumption of network.
The end-to-end delay refers to the time taken to transmit the data packet from source to sink, irrespective to the number of intermediate nodes. It is the sum of transmission, propagation, queuing, and processing delay in a network.
The hop-count quantity is the measure of number of intermediate nodes between source and destination. In a multi-hop approach, the higher the hop count, the higher will be the energy consumption and end-to-end delay. Therefore, it is essential to keep the hop counter small to lower the delay and energy consumption. Similarly, the hop distance is measured between two neighboring nodes. It is one of the key parameters used to select the next node in the path. Depending on hop distance, the energy is dissipated. Therefore, the smaller the hop distance, the less energy is consumed in delivering the data.
In WSNs, transmission range has significant impact on several parameters of network. It defines the coverage area of a sensor node. If the range of the sensor nodes are large enough to directly reach the sink node (in a hop), they will consume more energy to communicate due to the large distances. However, to converse the energy it is essential to keep the transmission power to a minimum, for which multi-hop (more than a single hop) communication is the best approach. Based on this, transmission power control mechanism is implemented to conserve the energy by calculating the power required to send the packet hop to hop, considering the changing distance between the nodes and receiver's sensitivity. Different transmission range is tested to evaluate its effects on the performance of network, as can be seen in Section 5.
Analysis
The simulation was tested against different variations in several network parameters and scenarios. Simulation parameters are discussed in Table 1, with some of them taken from References [19][20][21][22]. Considering the different distances between neighboring nodes, the energy and propagation model were simulated to calculate the required power to transmit the packet from one node to another in order to conserve the energy (power control technique). Two scenarios were considered for simulation: Optimal behavior and Pragmatic behavior. The Optimal behavior presents the flawless path selection and data delivery during the entire simulation time period. Whereas in Pragmatic behavior, a temporary failure is introduced with the probability of 0.2 to test and identify the fault-tolerance mechanism and its effects on the performance metrics selected.
Each scenario was further implemented for both different and same topologies. In different topology, new locations were assigned to nodes with increasing network size, whereas in the same topology, new nodes were added in the existing network keeping intact the previous node locations. In each case, network size and transmission range values were swept to identify (i) the effects of adding new nodes to the network and (ii) the optimal range to use a multi-hop communication. The simulation runtime was 50 rounds; in each round every node sent a single INFO packet containing the sensed data to the sink node.
Random Topology
The behavior of the SOSRP was tested with different network sizes (from 50 to 100 nodes). For each network size, new random locations for the nodes were set, and consequently the ad-hoc topology would be different when the number of nodes changed. This was done to observe the effect of newly formed paths. Moreover, the transmission range was also swept (from 70 m to 100 m) to evaluate the effects on the connectivity in the network in terms of three parameters: end-to-end delay, number of hop count in total generated paths, and total energy consumption of the network. The results obtained for the energy consumption are in consonance with other authors [18,19], and the specifications shown in Table 1 can be kept using a range of commercial modems [23,24]. With respect to the packet delay, the time shown in figures needs to be added to the time used by the MAC layer, not considered here in the end-to-end delay. This MAC delay depends of the specific MAC technique employed.
The results obtained are shown in Figures 5 and 6 for a network area of 200 × 200 × 200 m, and in Figures 7 and 8 for a network area of 2000 × 2000 × 500 m, where a stable operation is kept in the network with random topology for each network size. In general, increasing the network size tends to reduce or maintain the end-to-end delay. This means the new routes are converging to the sink in an efficient way. Moreover, a routing delay lower than two seconds for the worst case studied (Figure 8) is not a bad time [18] in such an extensive area of 2000 × 2000 × 500 m.
SNR
Signal
Random Topology
The behavior of the SOSRP was tested with different network sizes (from 50 to 100 nodes). For each network size, new random locations for the nodes were set, and consequently the ad-hoc topology would be different when the number of nodes changed. This was done to observe the effect of newly formed paths. Moreover, the transmission range was also swept (from 70 m to 100 m) to evaluate the effects on the connectivity in the network in terms of three parameters: end-to-end delay, number of hop count in total generated paths, and total energy consumption of the network. The results obtained for the energy consumption are in consonance with other authors [18,19], and the specifications shown in Table 1 can be kept using a range of commercial modems [23,24]. With respect to the packet delay, the time shown in figures needs to be added to the time used by the MAC layer, not considered here in the end-to-end delay. This MAC delay depends of the specific MAC technique employed.
The results obtained are shown in a stable operation is kept in the network with random topology for each network size. In general, increasing the network size tends to reduce or maintain the end-to-end delay. This means the new routes are converging to the sink in an efficient way. Moreover, a routing delay lower than two seconds for the worst case studied (Figure 8) is not a bad time [18] in such an extensive area of 2000 × 2000 × 500 m. Another evident result is observed when only the transmission range is increased: the delay decreases. The reason is that the coverage area of the nodes is increased which, in turn decreases the hop count between the source and destination. This effect can be seen in Figure 6, where the delay for the longest path is as high as 1.6 s for a 50-node network and the shortest transmission range (70 meters). Another effect can be witnessed in Figure 8, where in a 70-node network with 700 meters of transmission range, the delay increases with respect to larger networks. This is due to large network area and random placement of node and increasing the number of hops, indicating that SOSRP is Another evident result is observed when only the transmission range is increased: the delay decreases. The reason is that the coverage area of the nodes is increased which, in turn decreases the hop count between the source and destination. This effect can be seen in Figure 6, where the delay for the longest path is as high as 1.6 s for a 50-node network and the shortest transmission range (70 meters). Another effect can be witnessed in Figure 8, where in a 70-node network with 700 meters of transmission range, the delay increases with respect to larger networks. This is due to large network area and random placement of node and increasing the number of hops, indicating that SOSRP is Another evident result is observed when only the transmission range is increased: the delay decreases. The reason is that the coverage area of the nodes is increased which, in turn decreases the hop count between the source and destination. This effect can be seen in Figure 6, where the delay for the longest path is as high as 1.6 s for a 50-node network and the shortest transmission range (70 meters). Another effect can be witnessed in Figure 8, where in a 70-node network with 700 meters of transmission range, the delay increases with respect to larger networks. This is due to large network area and random placement of node and increasing the number of hops, indicating that SOSRP is Another evident result is observed when only the transmission range is increased: the delay decreases. The reason is that the coverage area of the nodes is increased which, in turn decreases the hop count between the source and destination. This effect can be seen in Figure 6, where the delay for the longest path is as high as 1.6 s for a 50-node network and the shortest transmission range (70 meters). Another effect can be witnessed in Figure 8, where in a 70-node network with 700 meters of transmission range, the delay increases with respect to larger networks. This is due to large network area and random placement of node and increasing the number of hops, indicating that SOSRP is adopting the new paths efficiently leading to the sink node. However, a higher transmission range also increases the number of interferences because of spatial overlapping. The MAC implemented must works accordingly to the new situation, avoiding the new possible interferences.
Another parameter to measure the efficiency in multi-hop routing protocols is the hop count in a route (path always ending in the sink node). A low number of hops in every route is desirable to keep a limited end-to-end delay.
The results of the simulations are shown in Figure 9. In this set of Figures, there is a double interest: to know the number of hops of the routes generated by SOSRP, and to see the influence of the transmission range in this value. Generally, increasing the transmission range leads to the shortest routes (decreasing the hops count). This effect is evident in Figure 9b. adopting the new paths efficiently leading to the sink node. However, a higher transmission range also increases the number of interferences because of spatial overlapping. The MAC implemented must works accordingly to the new situation, avoiding the new possible interferences. Another parameter to measure the efficiency in multi-hop routing protocols is the hop count in a route (path always ending in the sink node). A low number of hops in every route is desirable to keep a limited end-to-end delay.
The results of the simulations are shown in Figure 9. In this set of Figures, there is a double interest: to know the number of hops of the routes generated by SOSRP, and to see the influence of the transmission range in this value. Generally, increasing the transmission range leads to the shortest routes (decreasing the hops count). This effect is evident in Figure 9b. On the other hand, it must be noted that when the network size changes, new random locations are selected for the nodes, changing the topology. For that reason, the result obtained for one network size is not the same case by adding a few nodes. Despite this, the results are consistent with a stable behavior of the SOSRP protocol proposed here.
In order to evaluate the results in terms of energy, we must consider two logical effects. One of them is that as the network size grows, the energy consumption must also increase. The reason is obvious: more nodes are participating in sensing and transmitting data. The second logical effect is related to the transmission range: if it increases, the paths to the sink have fewer hops due to the nodes being not too far from each other. This fact leads us to reduce the energy employed in a small network area such as 200 × 200 × 200 m.
Both effects can be seen in the results presented in Figures 10-11 On the other hand, it must be noted that when the network size changes, new random locations are selected for the nodes, changing the topology. For that reason, the result obtained for one network size is not the same case by adding a few nodes. Despite this, the results are consistent with a stable behavior of the SOSRP protocol proposed here.
In order to evaluate the results in terms of energy, we must consider two logical effects. One of them is that as the network size grows, the energy consumption must also increase. The reason is obvious: more nodes are participating in sensing and transmitting data. The second logical effect is related to the transmission range: if it increases, the paths to the sink have fewer hops due to the nodes being not too far from each other. This fact leads us to reduce the energy employed in a small network area such as 200 × 200 × 200 m.
Both effects can be seen in the results presented in Figures 10 and 11 for a network area of 200 × 200 × 200 m. Figures 10 and 12 calculate the total energy consumption for 50 rounds, while Figures 11 and 13 show the energy consumption only for the longest path. The energy consumed for transmitting data through the longest path ( Figure 11) has a maximum of 2.5 mJ approximately for a network size of 50 nodes and a transmission range of 70 meters. The effect of choosing a large network area can be witnessed in Figures 12 and 13, where network area is 2000 × 2000 × 500 m. It can be observed in Figure 12 how the energy tends to decrease when the transmission range changes from 500 to 600 meters. However, due to the random placement of nodes in a large area, the distances among nodes are increased. Therefore, the energy consumption also is increased because more power is utilized to communicate with neighboring nodes at farther distance. transmitting data through the longest path ( Figure 11) has a maximum of 2.5 mJ approximately for a network size of 50 nodes and a transmission range of 70 meters. The effect of choosing a large network area can be witnessed in Figures 12-13, where network area is 2000 × 2000 × 500 m. It can be observed in Figure 12 how the energy tends to decrease when the transmission range changes from 500 to 600 meters. However, due to the random placement of nodes in a large area, the distances among nodes are increased. Therefore, the energy consumption also is increased because more power is utilized to communicate with neighboring nodes at farther distance. transmitting data through the longest path ( Figure 11) has a maximum of 2.5 mJ approximately for a network size of 50 nodes and a transmission range of 70 meters. The effect of choosing a large network area can be witnessed in Figures 12-13, where network area is 2000 × 2000 × 500 m. It can be observed in Figure 12 how the energy tends to decrease when the transmission range changes from 500 to 600 meters. However, due to the random placement of nodes in a large area, the distances among nodes are increased. Therefore, the energy consumption also is increased because more power is utilized to communicate with neighboring nodes at farther distance.
Faulty Tolerance
Because of random deployment of nodes, if a node has N number of neighbors. It will have at least N number of possible paths leading to the sink. Among the available paths, the one with smallest hop count and shortest distance to sink is considered an optimal path while others are alternate. Alternate path is the best possible route available after optimal path, selected based on the path selection criterion when fault is detected. To further evaluate the fidelity of SOSRP, a fault probability of 0.2 is realized, e.g., in case of 5000 optimal paths found, only 1000 pragmatic paths will be detected.
In multi-hop communication, it is desirable to keep the hop count minimum in the path to limit the end-to-end delay and the energy consumption. The results obtained compare the total number of paths generated by SOSRP with number of hops in each path for optimal and pragmatic behavior of protocol. The results are shown in Figures 14-15. In Figure 14 the number of hops in the route is shown for a network size of 50 nodes, and 100 nodes in the case of Figure 15.
Faulty Tolerance
Because of random deployment of nodes, if a node has N number of neighbors. It will have at least N number of possible paths leading to the sink. Among the available paths, the one with smallest hop count and shortest distance to sink is considered an optimal path while others are alternate. Alternate path is the best possible route available after optimal path, selected based on the path selection criterion when fault is detected. To further evaluate the fidelity of SOSRP, a fault probability of 0.2 is realized, e.g., in case of 5000 optimal paths found, only 1000 pragmatic paths will be detected.
In multi-hop communication, it is desirable to keep the hop count minimum in the path to limit the end-to-end delay and the energy consumption. The results obtained compare the total number of paths generated by SOSRP with number of hops in each path for optimal and pragmatic behavior of protocol. The results are shown in Figures 14 and 15. In Figure 14 the number of hops in the route is shown for a network size of 50 nodes, and 100 nodes in the case of Figure 15. It is noticeable from the results that SOSRP successfully responds to fault detected in the optimal path by selecting a new route to the sink. However, this increases the number of paths with higher hop count, thus affecting the end-to-end delay and energy consumption of the network.
This effect can be observed in Figure 14a, where 50 nodes are deployed with transmission range of 70 meters. The figure shows the influence of transmission range in pragmatic behavior of network, increasing the hops up to 12 in the alternate path. Similar results can be witnessed in Figure 15a.
However, increasing the transmission range to 100 meters removes this problem, as shown in Figures 14b-15b, where the highest number of hops in both optimal and alternate path is four and five respectively, and where pragmatic behavior causes more routes with higher hop count.
The results shown in Figure 16 depict the end-to-end delay of optimal and alternate path, for different network sizes and keeping transmission range 70 and 100 meters. The results show similar behavior of end-to-end delay in alternate path with respect to the optimal path. This effect is evident in Figure 16a. However, it can be also observed that alternate path offers more delay in data transmission as compared to optimal path with a maximum of approximately 1.7 s. Furthermore, increasing the transmission range can mitigate the end-to-end delay because of a reduced number of hops in the newly formed routes. This effect can be observed in Figure 16b for a transmission range of 100 meters, where the obtained delay for alternate and optimal path is approximately 1.3 and 0.7 s respectively, which is much less than the delay observed in Figure 16a. It is noticeable from the results that SOSRP successfully responds to fault detected in the optimal path by selecting a new route to the sink. However, this increases the number of paths with higher hop count, thus affecting the end-to-end delay and energy consumption of the network.
This effect can be observed in Figure 14a, where 50 nodes are deployed with transmission range of 70 meters. The figure shows the influence of transmission range in pragmatic behavior of network, increasing the hops up to 12 in the alternate path. Similar results can be witnessed in Figure 15a.
However, increasing the transmission range to 100 meters removes this problem, as shown in Figures 14b and 15b, where the highest number of hops in both optimal and alternate path is four and five respectively, and where pragmatic behavior causes more routes with higher hop count.
The results shown in Figure 16 depict the end-to-end delay of optimal and alternate path, for different network sizes and keeping transmission range 70 and 100 meters. The results show similar behavior of end-to-end delay in alternate path with respect to the optimal path. This effect is evident in Figure 16a. However, it can be also observed that alternate path offers more delay in data transmission as compared to optimal path with a maximum of approximately 1.7 s. Furthermore, increasing the transmission range can mitigate the end-to-end delay because of a reduced number of hops in the newly formed routes. This effect can be observed in Figure 16b for a transmission range of 100 meters, where the obtained delay for alternate and optimal path is approximately 1.3 and 0.7 s respectively, which is much less than the delay observed in Figure 16a. In order to examine the performance of SOSRP, the percentage of increase for end-to-end delay and energy consumption is calculated. It is performed by measuring the two parameters for optimal and alternate path. The results obtained are shown in Figure 17. In order to examine the performance of SOSRP, the percentage of increase for end-to-end delay and energy consumption is calculated. It is performed by measuring the two parameters for optimal and alternate path. The results obtained are shown in Figure 17.
(a) (b) Figure 16. End-to-end delay for optimal and alternate paths for different transmission ranges: (a) 70 m, (b) 100 m.
In order to examine the performance of SOSRP, the percentage of increase for end-to-end delay and energy consumption is calculated. It is performed by measuring the two parameters for optimal and alternate path. The results obtained are shown in Figure 17. Figure 17 shows the percentage of increase in maximum end-to-end delay and total energy consumption, obtained in pragmatic behavior of protocol. The influence of increasing the transmission range and network size is obvious in both cases (a) and (b), reducing the percentage of end-to-end delay and energy consumption. This low increment indicates a proper operation of SOSRP.
Network Scalability
In a real UASN, when it is needed to increase the number of nodes, it would be very expensive and illogical to collect out of the water those in service and make a new deployment again one by one until the total number of sensors is completed. Instead of this, a more realistic task would be to perform a new deployment of only the new nodes needed, maintaining the topology of the previous network.
Considering the above discussion, SOSRP was tested for network scalability by deploying the 50 nodes at first and simulating them for different ranges (70 to 100 meters). Ten new nodes were randomly added to the network in each simulation, keeping the previous location of deployed nodes. Figure 17 shows the percentage of increase in maximum end-to-end delay and total energy consumption, obtained in pragmatic behavior of protocol. The influence of increasing the transmission range and network size is obvious in both cases (a) and (b), reducing the percentage of end-to-end delay and energy consumption. This low increment indicates a proper operation of SOSRP.
Network Scalability
In a real UASN, when it is needed to increase the number of nodes, it would be very expensive and illogical to collect out of the water those in service and make a new deployment again one by one until the total number of sensors is completed. Instead of this, a more realistic task would be to perform a new deployment of only the new nodes needed, maintaining the topology of the previous network.
Considering the above discussion, SOSRP was tested for network scalability by deploying the 50 nodes at first and simulating them for different ranges (70 to 100 meters). Ten new nodes were randomly added to the network in each simulation, keeping the previous location of deployed nodes. The process was repeated until the network size reached 100 nodes. This approach is more realistic and allows each newly deployed node to connect with the network using the local information from neighbor nodes. Moreover, the number of operations decreased in the network (e.g., calculating new routes) by keeping the routes stable with minimum changes when few nodes were added to the existing network. It is essential for the network to perform the necessary functions irrespective of variation in the number of nodes. Figure 18 shows the outcomes of average (a) and longest path (b) end-to-end delay. As previously discussed, it is obvious from the figures that with increasing transmission range the end-to-end delay decreases because of lower number of hops in the selected route. However, with changing network size there is a slight variation in delay. This stability in delay is observed by preserving the routes stable (previous topology) and newly deployed nodes within the transmission range of other nodes, keeping the maximum number of hops the same as before. This effect can be further seen in Figure 19.
end-to-end delay. As previously discussed, it is obvious from the figures that with increasing transmission range the end-to-end delay decreases because of lower number of hops in the selected route. However, with changing network size there is a slight variation in delay. This stability in delay is observed by preserving the routes stable (previous topology) and newly deployed nodes within the transmission range of other nodes, keeping the maximum number of hops the same as before. This effect can be further seen in Figure 19. route. However, with changing network size there is a slight variation in delay. This stability in delay is observed by preserving the routes stable (previous topology) and newly deployed nodes within the transmission range of other nodes, keeping the maximum number of hops the same as before. This effect can be further seen in Figure 19. The results show the number of hops in all paths generated for different network sizes (50, 70, and 100). It is quite evident from the results that by adding new nodes in the network, the total number of paths is increased because of the addition of ten nodes in each run. However, the maximum number of hops (max: hops = 6) in any path are the same for different network sizes, as shown in Figure 19. This proves that performance metrics of SOSRP remain stable irrespective of increasing network size.
In order to validate the observed results, multiple simulations were performed. The results of two simulations (Test 1 and Test 2) are shown in Figure 20 for end-to-end delay obtained in longest path. Comparing the obtained results, it can be seen in Figure 20a that with increasing network size, the delay for longest path decreases for a transmission range of 70 meters. An opposite effect can be observed in Figure 20b, where delay is increasing up to a network size of 80 nodes, keeping the transmission range 70 meters. This is because of random placement of new nodes, increasing the number of hops in the route selected. However, the maximum longest path delay obtained in Test 2 The results show the number of hops in all paths generated for different network sizes (50, 70, and 100). It is quite evident from the results that by adding new nodes in the network, the total number of paths is increased because of the addition of ten nodes in each run. However, the maximum number of hops (max: hops = 6) in any path are the same for different network sizes, as shown in Figure 19. This proves that performance metrics of SOSRP remain stable irrespective of increasing network size.
In order to validate the observed results, multiple simulations were performed. The results of two simulations (Test 1 and Test 2) are shown in Figure 20 for end-to-end delay obtained in longest path. Comparing the obtained results, it can be seen in Figure 20a that with increasing network size, the delay for longest path decreases for a transmission range of 70 meters. An opposite effect can be observed in Figure 20b, where delay is increasing up to a network size of 80 nodes, keeping the transmission range 70 meters. This is because of random placement of new nodes, increasing the number of hops in the route selected. However, the maximum longest path delay obtained in Test 2 (approximately 1.45 s) is still less than maximum delay in Test 1. The stability is obtained in longest path delay by increasing the transmission range. The effect is noticeable in results for different transmission ranges (80, 90 and 100).
number of paths is increased because of the addition of ten nodes in each run. However, the maximum number of hops (max: hops = 6) in any path are the same for different network sizes, as shown in Figure 19. This proves that performance metrics of SOSRP remain stable irrespective of increasing network size.
In order to validate the observed results, multiple simulations were performed. The results of two simulations (Test 1 and Test 2) are shown in Figure 20 for end-to-end delay obtained in longest path. Comparing the obtained results, it can be seen in Figure 20a that with increasing network size, the delay for longest path decreases for a transmission range of 70 meters. An opposite effect can be observed in Figure 20b, where delay is increasing up to a network size of 80 nodes, keeping the transmission range 70 meters. This is because of random placement of new nodes, increasing the number of hops in the route selected. However, the maximum longest path delay obtained in Test 2 (approximately 1.45 s) is still less than maximum delay in Test 1. The stability is obtained in longest path delay by increasing the transmission range. The effect is noticeable in results for different transmission ranges (80, 90 and 100).
Conclusions
With advancements in the field of wireless communication and sensor technology, new techniques and protocols are proposed for UASNs. These kinds of networks have become popular among researchers because of applications such as disaster prevention, ocean exploration and Figure 20. End-to-end delay for longest path fortwo particular simulations: Test 1 (a) and Test 2 (b).
Conclusions
With advancements in the field of wireless communication and sensor technology, new techniques and protocols are proposed for UASNs. These kinds of networks have become popular among researchers because of applications such as disaster prevention, ocean exploration and resource discovery. Different centralized and distributed network routing approaches have been proposed by researchers to make the communication efficient in underwater.
In this paper, using the concept of decentralized network a Self-Organizing and Scalable Routing Protocol (SOSRP) is proposed where each node forms a local connectivity based on the information acquired from the neighboring nodes and performing the data transmission. The protocol utilizes a multi-hop communication technique to transmit the sensed data to the sink node. Each node formulates the routing table using control packets broadcasted in initialization and neighbor discovery phase, and path selection is based on the information of hop count and distance to base station (sink node) from the transmitting node. The Matlab platform was used to simulate the protocol along with both proper energy and propagation models for acoustic communication to contemplate the undersea conditions. The performance of the protocol was measured against end-to-end delay, energy consumption and number of hops in the path by varying network size and transmission range for optimal and pragmatic behavior of SOSRP. Through different simulations, it was found that an optimal transmission range and network size can improve the performance of the protocol by decreasing the number of hops in the generated path. The results show that SOSRP provides stable operation, scalability, fault tolerance, and isolation detection for UASNs.
|
External iliac vein rupture is very rare, but it necessitates emergent treatment (Jiang et al. [@CR5]; Kim IH et al. [@CR6]). This lethal condition was provoked by the bleed caused hypovolemic shock related symptoms and signs, including syncope and hypotension in most cases. In the emergency room, this condition has been mistaken for traumatic or gynecological emergency surgical cases because of the accompanying abdominal distension and syncope-related trauma. The findings of leaked contrast media suggesting vessel rupture were not easily found in abdominopelvic CT on account of heavily compressed hematoma (Kim IH et al. [@CR6]). Immediate resuscitation is imperative, but the appropriate management was disturbed in some cases (Jiang et al. [@CR5]; Kim IH et al. [@CR6]).
We herein report a case of successful transcatheter embolization for massive bleeding due to spontaneous rupture of the external iliac vein. We also present imaging findings. To the best of our knowledge, this is the first case report to show successful embolization for spontaneous rupture of the external iliac vein.
Case presentation {#Sec2}
An 82-year-old female patient with left knee pain and left leg edema was admitted to the emergency room. She had a history of hypertension and 1 week of constipation, with no other diseases or history of trauma. Shortly thereafter, she developed sudden left lower abdominal and back pain. She lost consciousness and went into shock. Blood pressure was 50/36 mmHg and her pulse rate was 150 beats per minute. The results of a complete blood count showed anemia, with a hemoglobin concentration of 7.1 g/dL and platelet count of 84,000/μL, and no other abnormal data.
Non-contrast and contrast-enhanced CT was performed. CT images of the abdomen revealed a large retroperitoneal hematoma, with the leakage of contrast medium in the hematoma into the left flank. The left common iliac vein was dilated with a thrombus, and the origin of the bleeding was apparent (Fig. [1](#Fig1){ref-type="fig"}a). These findings suggested left external iliac vein rupture. Fig. 1**a**. Contrast-enhanced CT of the venous phase shows extravasation (arrows) from the lt. external iliac vein (arrowhead). **b**. DSA shows marked extravasation from the laceration to the adjacent site of the left internal iliac vein (arrows). A thrombus occluded the left common iliac vein. **c**. DSA shows that the left iliac vein hwas filled and fully embolized with the NBCA/Lipiodol mixture. **d**. Two days after following embolization, CT shows left leg swelling similar to deep venous thrombosis. **e**. Six months after embolization, CT shows the resolution of left leg swelling
Open surgery was considered,; however, since the patient's condition may have deteriorated further due to the time needed to prepare for surgery, including general anesthesia, transcatheter venous embolization of the left iliac vein was selected.
The procedure was performed under local anesthesia. Sheaths (5-Fr) were introduced at the left common femoral vein. Digital subtraction angiography (DSA) from the left external iliac vein by using a 5.2-Fr compliant balloon catheter (nominal diameter of 10 mm) (Selecon MP Catheter II, Terumo Clinical Supply Co., Ltd., Gifu, Japan) inserted from the sheath showed massive extravasation from the laceration to the adjoining site of the left internal iliac vein. The left common iliac vein was occluded by thrombus (Fig. [1](#Fig1){ref-type="fig"}b).
After the 5.2-Fr balloon catheter had been inflated at the distal site of the external iliac vein to reduce the extravasation. N-butyl-2-cyanoacrylate (NBCA) was mixed with Lipiodol (Guerbet, Villepinte, France) at a ratio of 1:2. The left Iliac vein was filled and completely embolized with the NBCA/Lipiodol mixture (total injected volume, 5 mL) using a 1.8-Fr microcatheter (Carnelian® PIXIE ER, Tokai Medical Products, Inc., Aichi, Japan) (Fig. [1](#Fig1){ref-type="fig"}c).
After embolization, the patient quickly recovered. One day after embolization, an inferior vena cava filter (FilterWire*,Boston Scientific, Massachusetts,USA)* was temporarily implanted to prevent pulmonary embolism due to the presence of a large amount of thrombus in the left iliac artery. One week later, the filter was removed. After embolization, this patient developed left leg swelling similar to deep vein thrombosis (Fig. [1](#Fig1){ref-type="fig"}d). We administered low molecular weight heparin, which was subsequently replaced with warfarin and applied compressive stockings. The patient was followed up 6 months later. Leg edema gradually resolved. Due to the development of collateral circulation and the amelioration of deep vein thrombosis, the administration of warfarin was stopped at 2 months (Fig. [1](#Fig1){ref-type="fig"}e).
Informed consent was obtained from the patient for the publication of this case report and any accompanying images.
Spontaneous rupture of the iliac vein has rarely been reported; only approximately 30 cases have been published to date (Jiang et al. [@CR5]; Kim IH et al. [@CR6]).
Despite several proposed etiologies, including venous hypertension and constipation, the underlying cause remains unknown (Jazayeri et al. [@CR4]; Kwon et al. [@CR7]).
The majority of cases have been complicated by deep venous thrombosis. Local venous hypertension due to deep venous thrombosis and venous wall weakness developing secondary to thrombophlebitis may be of etiological significance (DePass [@CR3]; Plate G et al.[@CR8]). The present case had deep venous thrombosis and constipation.
The associated hypovolemic shock-related symptoms and signs, including syncope and hypotension, have been examined in the majority of these cases. Previous studies revealed that the goal of surgical management was to maintain the continuity of the ruptured iliac vein by direct suture or bypass reconstruction (Jiang et al. [@CR5]; Kwon et al. [@CR7]). onto the best of our knowledge, effective transcatheter embolization for marked bleeding due to spontaneous rupture of the external iliac vein has not yet been reported.
Two cases of the endovascular management of iliac vein rupture during percutaneous interventions for occlusive lesions were previously reported. In these cases, covered stents were employed (Adams MK et al. [@CR1]). In one case, external iliac vein rupture was repaired with an endovascular stent and open laparotomy for abdominal decompression (Chen YC et al. [@CR2]).
The treatment using covered stents may have been more favorable than transcatheter venous embolization because covered stents are able to preserve the venous flow in the iliac vein. However, the patient's condition was severe due to active and massive bleeding, and there were no covered stents in the hospital..
Following embolization in the present case, venous flow was impaired due to occlusion of the left iliac vein, leading to leg swelling. In the 6 month follow-up, leg edema gradually resolved. Postoperative anticoagulation and the development of collateral circulation may play important roles in maintaining venous flow and recovery.
Spontaneous rupture of the iliac vein is a very rare and lethal condition. Transcatheter venous embolization can control potentially life-threatening bleeding and may be an appropriate alternative to direct open repair. Rapid bleeding control in a critical condition is facilitated by this minimally invasive approach.
: Computed tomography
: French Gauge
: Digital subtraction angiography
ES and IS performed the treatment. ES and UM performed pre and post procedure interventional follow-ups. ES and UM drafted the manuscript. All authors reviewed and revised the manuscript. All authors read and approved the final manuscript.
This research did not receive any grant from public or commercial funding resources.
The relevant data have been included in the manuscript. The datasets used and analyzed during the current study are available from the corresponding author upon reasonable request.
The Institutional Review Board of Nagasaki University Hospital approved the publication of this case report.
Written informed consent was obtained from the patient for the publication of this case report and any accompanying images.
The authors declare that they have no competing interests.
|
# Arduino Datalogger
A custom data logger based on ATmega32u4 with a custom PCB.
<img src="front.jpg" width="200"> <img src="back.jpg" width="200">
The device features the following main components:
* Atmel ATmega32u4 MCU
* Texas Instruments HDC1080 temperature and humidity sensor
* Maxim DS3231 RTC
* WS2812B RGB LED
* microSD card reader
* battery holder for CR2032 coin cell
The coin cell was initially intended for powering the RTC as well as the MCU, but it turns out the SD card doesn't work with the battery voltage.
As such, the 3-positions switch is omitted and the device needs power from the USB port (e.g. with power-bank).
The battery is used for keeping the RTC settings in any case.
The schematics, bill of materials and circuit design can be found [on Open Source Hardware Lab](https://oshwlab.com/krizzli/data-logger-v2).
|
Genesis 35:29 Cross References
« Genesis 35:28 | Genesis 36:1 » | Compare: NIV, KJV, NLT, NKJV, NRSV, ESV | Cross references home
Genesis 35:29
And Isaac breathed his last, and he died and was gathered to his people, old and full of days. And his sons Esau and Jacob buried him.
Genesis 15:15
As for yourself, you shall go to your fathers in peace; you shall be buried in a good old age.
Genesis 49:33
When Jacob finished commanding his sons, he drew up his feet into the bed and breathed his last and was gathered to his people.
Genesis 49:31
There they buried Abraham and Sarah his wife. There they buried Isaac and Rebekah his wife, and there I buried Leah—
Ecclesiastes 12:5-7
they are afraid also of what is high, and terrors are in the way; the almond tree blossoms, the grasshopper drags itself along, and desire fails, because man is going to his eternal home, and the mourners go about the streets— before the silver cord is snapped, or the golden bowl is broken, or the pitcher is shattered at the fountain, or the wheel broken at the cistern, and the dust returns to the earth as it was, and the spirit returns to God who gave it.
Job 5:26
You shall come to your grave in ripe old age, like a sheaf gathered up in its season.
Genesis 27:41
Now Esau hated Jacob because of the blessing with which his father had blessed him, and Esau said to himself, “The days of mourning for my father are approaching; then I will kill my brother Jacob.”
Genesis 27:1-2
When Isaac was old and his eyes were dim so that he could not see, he called Esau his older son and said to him, “My son”; and he answered, “Here I am.” He said, “Behold, I am old; I do not know the day of my death.
Genesis 25:17
(These are the years of the life of Ishmael: 137 years. He breathed his last and died, and was gathered to his people.)
Abraham breathed his last and died in a good old age, an old man and full of years, and was gathered to his people. Isaac and Ishmael his sons buried him in the cave of Machpelah, in the field of Ephron the son of Zohar the Hittite, east of Mamre,
Genesis 23:19-20
After this, Abraham buried Sarah his wife in the cave of the field of Machpelah east of Mamre (that is, Hebron) in the land of Canaan. The field and the cave that is in it were made over to Abraham as property for a burying place by the Hittites.
|
Vertical channel device having buried source
ABSTRACT
A structure is provided comprising a semiconductor substrate having a first conductivity type, a buried source region having a second opposite conductivity type, and an epitaxial layer of the second conductivity type having a lower dopant concentration than the buried source region. Field oxide regions are formed at outer edges of the epitaxial layer. A well region of first conductivity type is implanted into the central portion of the epitaxial layer to define the active area. Trenches are etched through the well region into the buried source region. A first layer of silicon oxide is grown on the surface and within the trenches. Gate electrodes are formed by depositing a layer of polysilicon and etching back to leave the polysilicon layer only within the trenches. Ions of second conductivity type are implanted into the top portion of the well region to form drain regions. A second layer of silicon oxide is deposited over the top surfaces and planarized. Contact trenches are etched through the second silicon oxide layer and the field oxide regions to connect to the buried source region. A second set of contact trenches are etched through portions of the second silicon oxide layer to the underlying drain regions. A layer of tungsten is deposited and etched back leaving the tungsten within the first and second trenches. Interconnections are made between the source and drain regions to complete the fabrication.
This Application is a divisional of application Ser. No. 08/351,492 filed Dec. 7, 1994, now U.S. Pat. No. 5,455,190.
BACKGROUND OF THE INVENTION
(1) Field of the Invention
The present invention relates to the fabrication of integrated circuit devices, and more particularly, to a method of making a vertical channel device using buried source techniques to reduce the area of the active region in the fabrication of integrated circuits.
(2) Description of the Prior Art
A priority in integrated circuit chip fabrication technology today is in reducing the chip size. Workers in the art have striven to reduce the width of the polysilicon from 2 μm, 1 μm, 0.8 μm, 0.5 μm, to 0.35 μm, etc. Most of the effort at reducing chip size is directed toward reducing the polysilicon size. At some point in the near future, the polysilicon width shrinkage will have reached its maximum effectiveness. Source and drain regions are planned and drawn at the chip surface, which wastes valuable underlying layout areas. The large field oxide areas are used for isolation only. For example, in FIG. 1, there is shown a portion of a partially completed integrated circuit. Field oxide regions 11 have been formed in and on the surface of the semiconductor substrate 10. Source contacts 20 and drain contacts 22 are shown in a finger-type layout on the surface of the integrated circuit chip. In order to further shrink the chip layout, the source and drain regions must be addressed.
U.S. Pat. No. 5,164,325 to Cogan et al shows the formation of a vertical channel device using a buried source/drain structure.
SUMMARY OF THE INVENTION
Accordingly, it is a primary object of the invention to provide a process for fabricating an integrated circuit device with decreased junction area.
It is another object of the present invention to provide a process for fabricating a vertical channel device integrated circuit.
In accordance with the objects of the invention, a new method of manufacturing a vertical channel device integrated circuit is described. A structure is provided comprising a semiconductor substrate having a first conductivity type, a buried source region having a second conductivity type opposite to the first conductivity type and a first dopant concentration formed on top of the semiconductor substrate, and an epitaxial layer of the second conductivity type having a second dopant concentration formed on the surface of the buried source region wherein the second dopant concentration is less than the first dopant concentration. Field oxide regions are formed in and on the surface of the epitaxial layer on the outer edges of the semiconductor substrate. A well region of first conductivity type is implanted into the epitaxial layer within the central portion of the semiconductor substrate wherein the well region defines the active area of the integrated circuit. Trenches are etched through the well region into the underlying buried source region where the well region is not covered by a mask. A first layer of silicon oxide is thermally grown conformally on the surface of the well region and within the trenches. Gate electrodes are formed by depositing a layer of polysilicon over the surface of the well region and within the trenches and etching back the polysilicon layer leaving the polysilicon layer only within the trenches. The trenches are covered with a photoresist mask. Ions of second conductivity type are implanted into the top portion of the well region not covered by the photoresist mask to form drain regions within the well region. A second layer of silicon oxide is deposited over the surface of the well region and the field oxide regions and planarized. Contact trenches are etched through the second silicon oxide layer and the field oxide regions not covered by a mask to connect to the buried source region. A second set of contact trenches are etched through portions of the second silicon oxide layer not covered by a mask to the underlying drain regions. A layer of tungsten is deposited over the surface of the substrate and within the first and second contact trenches and etched back leaving the tungsten only within the trenches. Interconnections are made between the source and drain regions to complete the fabrication of the vertical channel device in the manufacture of an integrated circuit.
According to the objects of the present invention, a new vertical channel device integrated circuit is described. The device comprises a semiconductor substrate of a first conductivity type, a buried source region having a second conductivity type opposite the first conductivity type formed on top of the semiconductor substrate, an epitaxial layer of the second conductivity type formed overlying the buried source region wherein the epitaxial layer has a dopant concentration less than the dopant concentration of the buried source region, a well region of the first conductivity type formed within the central portion of the epitaxial layer wherein the well region defines the active region of the integrated circuit, field oxide regions formed in and on the outer edges of the epitaxial layer, a top planarized layer of silicon oxide overlying the field oxide regions and the well region, polysilicon-filled trenches extending through the well region and contacting buried source region, drain regions of the second conductivity type within the top surface of the well region and between the polysilicon-filled trenches, first tungsten plug-filled trenches through the planarized silicon oxide layer contacting the drain regions, second tungsten plug-filled trenches through the field oxide regions contacting the buried source region, and interconnections between the source and drain regions.
BRIEF DESCRIPTION OF THE DRAWINGS
In the accompanying drawings forming a material part of this description, there is shown:
FIG. 1 is a cross-sectional representation of an integrated circuit of the prior art.
FIGS. 2 through 9 are cross-sectional representations of a preferred embodiment of the present invention.
FIGS. 10A and 10B are cross-sectional representations of a first preferred embodiment of the present invention.
FIG. 10C is a cross-sectional representation of a second preferred embodiment of the present invention.
FIG. 11 is a cross-sectional representation of a preferred embodiment of the present invention.
FIG. 12A is a cross-section representation of a completed integrated circuit according to the first preferred embodiment of the present invention.
FIG. 12B is a cross-section representation of a completed integrated circuit according to the second preferred embodiment of the present invention.
DESCRIPTION OF THE PREFERRED EMBODIMENTS
FIGS. 2 through 12 illustrate the formation of an N channel FET integrated circuit device. However, it is well understood by those skilled in the art that a P channel FET integrated circuit device could also be formed by simply substituting opposite polarities to those given for the N channel embodiment. Also, a CMOS FET could in a similar way be formed by making both N channel and P channel devices upon the same substrate.
Referring now more particularly to FIG. 2, there is shown a semiconductor substrate 10, preferably composed of silicon having a (100) crystallographic orientation. For this NMOS illustration, the substrate is a P-substrate or a P-well. First, the buried source region will be formed. Ions are implanted into the top surface of the semiconductor substrate to form the implanted region 12. The buried source region will be of opposite conductivity type from the substrate. For an N+ region, arsenic ions are implanted at a dosage of between about 1 E 15 to 1 E 16 atoms/cm² at an energy of between about 50 to 100 KeV. For a P+ region (not shown), BF₂ ions are implanted at a dosage of between about 1 E 15 to 1 E 16 atoms/cm² at an energy of between about 50 to 100 KeV.
Referring now to FIG. 3, an epitaxial layer 14 is formed by, for example, subjecting the substrate to a flow of SiH₂ Cl₂ -H₂ -HCl at a temperature of between about 950° to 1080° C. and a pressure of between about 40 to 60 Torr. The epitaxial layer is grown to a thickness of between about 2000 to 4000 Angstroms. The epitaxial layer has the same conductivity type as the buried source layer, but the dopant concentration of the epitaxial layer 14 is between about 1 E 11 to 1 E 12 atoms/cm³, less than the dopant concentration of the buried layer which is between about 1 E 15 to 1 E 16 atoms/cm³.
The substrate is oxidized to form field oxide isolation regions 16 on the periphery of the active area. Ions are implanted into the active area to form the well region 18, as shown in FIG. 4. The well region is of the same conductivity as the semiconductor substrate 10. For the NMOS example illustrated, the well region is a P-well, formed by implanting boron ions at a dosage of between about 5 E 12 to 5 E 13 atoms/cm² and an energy of between about 100 to 200 KeV. An N-well, not shown, would be formed by implanting phosphorus ions at a dosage of between about 1 E 12 to 1 E 13 atoms/cm² and an energy of between about 100 to 200 KeV.
Referring now to FIG. 5, silicon trenches 20 are etched through the well region 18 and into the buried source region 12, using a conventional reactive ion etching ambient.
A gate oxide layer 22 is thermally grown conformally on the surface of the well region 18 and within the trenches 20, as illustrated in FIG. 6. A layer of polysilicon is deposited by chemical vapor deposition (CVD) over the surface of the gate oxide layer 22 and within the trenches. The polysilicon is doped as is conventional to form polysilicon gate electrodes 24. The polysilicon and gate oxide layer are etched back leaving the polysilicon only within the trenches as shown in FIG. 7.
Referring now to FIG. 8, drain regions 28 are formed within the top surface of the well region between the polysilicon gate electrodes. Lithographic masks, not shown, may be required to protect the areas, such as the gate electrodes, not to be subjected to the ion implantation. The formation of the lithographic masks is done by conventional lithography and etching techniques. For the N+ drain implant of the example, arsenic ions 26 are implanted at a dosage of between about 1 E 15 to 1 E 16 atoms/cm² and energy of between about 50 to 100 KeV. For a P+ drain implantation, not shown, BF₂ ions are implanted at a dosage of between about 1 E 15 to 1 E 16 atoms/cm² and energy of between about 50 to 100 KeV.
Referring now to FIG. 9, a layer of silicon dioxide 30 is deposited by CVD over the surface of the substrate and planarized. The connection to the buried source region will now be described. Two alternative methods may be used to accomplish the connection. The first method is illustrated by FIGS. 10A and 10B.
Referring now to FIG. 10A, trenches 32 are etched through the field oxide regions 16 to the underlying epitaxial layer 14. Referring to FIG. 10B, ions 34 are implanted through the trenches 32 into the epitaxial layer to form heavily doped region 36 of the same dopant concentration as the underlying buried source region. The implant then is driven in to provide connection to the buried source region. For the NMOS example illustrated, phosphorus ions are implanted with a dosage of between about 5 E 15 to 5 E 16 atoms/cm² and energy of between about 100 to 200 KeV. For a PMOS integrated circuit device, not shown, BF₂ ions are implanted with a dosage of between about 5 E 15 to 5 E 16 atoms/cm² and energy of between about 100 to 200 KeV.
Alternatively, as illustrated in FIG. 10C, trenches 32 are etched through the field oxide regions 16 and the underlying epitaxial layer 14 to the buried source region 12. With this alternative, the trench makes a direct connection to the buried source region so that an additional implant is unnecessary. The first alternative is illustrated in the remaining figures, although processing is the same for both alternatives.
Referring now to FIG. 11, connection to the drain regions is now made. The source and drain contact etching must be performed with separate etching masks because of the differing depths of the contacts. Trenches 42 are etched through the silicon dioxide layer 30 to the drain regions 28.
Selective tungsten CVD deposition 44 and etch back is used to fill the trenches 32 and 42 to complete the source and drain contacts, as shown in FIG. 12A in the first alternative and FIG. 12B in the second alternative. Interconnections 46 and 48 are formed between the source and drain regions, respectively, to complete formation of the vertical channel device integrated circuit.
The device formed by the method of the present invention employs vertical channels and buried source junction. Source contacts are planned at the surface of the field oxide regions so as to reduce the size of the active area. This results in nearly a 30% decrease in layout and chip size.
The vertical channel device integrated circuit of the present invention illustrated in FIGS. 12A and 12B comprises a semiconductor substrate 10 of a first conductivity type, a buried source region 12 having a second conductivity type opposite the first conductivity type formed on top of the semiconductor substrate, an epitaxial layer 14 of the second conductivity type formed overlying the buried source region wherein the epitaxial layer has a dopant concentration less than the dopant concentration of the buried source region, a well region 18 of the first conductivity type formed within the central portion of the epitaxial layer wherein the well region defines the active region of the integrated circuit, field oxide regions 16 formed in and on the outer edges of the epitaxial layer, a top planarized layer 30 of silicon oxide overlying the field oxide regions and the well region, polysilicon-filled trenches 24 extending through the well region and contacting buried source region 12, drain regions 28 of the second conductivity type within the top surface of the well region and between the polysilicon-filled trenches, first tungsten plug-filled trenches 44 through the planarized silicon oxide layer contacting the drain regions 28, second tungsten plug-filled trenches 44 through the field oxide regions 16 contacting the buried source region 12, and interconnections 46 and 48 between the source and drain regions.
FIG. 12A illustrates the first alternative connection to the buried source region as described with reference to FIGS. 10A and 10B. Tungsten plug-filled trenches 44 extend to the epitaxial layer 14. Heavily-doped region 36 has the same dopant concentration as the underlying buried source region 12 and hence provides connection to the buried source region.
FIG. 12B illustrates the second alternative connection to the buried source region 12 as described with respect to FIG. 10C. The tungsten plug-filled trenches 44 extend through the field oxide regions 16 and the underlying epitaxial layer 14 to the buried source region 12. With this alternative, the tungsten plug 44 makes a direct connection to the buried source region 12.
While the invention has been particularly shown and described with reference to the preferred embodiments thereof, it will be understood by those skilled in the art that various changes in form and details may be made without departing from the spirit and scope of the invention.
What is claimed is:
1. A vertical channel device integrated circuit comprising:a semiconductor substrate of a first conductivity type; a buried source region having a second conductivity type opposite said first conductivity type overlying said semiconductor substrate; an epitaxial layer of said second conductivity type overlying said buried source region wherein said epitaxial layer has a dopant concentration less than the dopant concentration of said buried source region; a well region of said first conductivity type within the central portion of said epitaxial layer wherein said well region defines the active region of said integrated circuit; field oxide regions in and on the outer edges of said epitaxial layer; a top planarized layer of silicon oxide overlying said field oxide regions and said well region; gate oxide lined trenches extending through said well region and contacting said buried source region; polysilicon gate electrodes within said gate oxide lined trenches; drain regions of said second conductivity type within the top surface of said well region and between said gate oxide lined trenches; first tungsten plug-filled trenches through said planarized silicon oxide layer contacting said drain regions; and second tungsten plug-filled trenches through said field oxide regions contacting said buried source region.
2. A device according to claim 1 wherein said vertical channel device is an NMOS device and wherein said first conductivity type is P and said second conductivity type is N.
3. A device according to claim 1 wherein said vertical channel device is a PMOS device and wherein said first conductivity type is N and said second conductivity type is P.
4. A device according to claim 1 wherein said buried source region has a dopant concentration of between about 1 E 15 to 1 E 16 atoms/cm³.
5. A device according to claim 1 wherein said epitaxial layer has a thickness of between about 2000 to 4000 Angstroms and a dopant concentration of between about 1 E 11 to 1 E 12 atoms/cm³.
6. A device according to claim 2 wherein said well region is a P-well having a dopant concentration of between about 5 E 12 to 5 E 13 atoms/cm³.
7. A device according to claim 3 wherein said well region is an N-well having a dopant concentration of between about 5 E 12 to 5 E 13 atoms/cm³.
8. A device according to claim 1 wherein said second tungsten-plug filled trenches extend through said field oxide regions and said underlying epitaxial layer to said buried source region.
9. A device according to claim 1 wherein said second tungsten-plug filled trenches extend through said field oxide regions to said underlying epitaxial layer and wherein implanted regions in said epitaxial layer having the same conductivity type as said buried source region connect said second tungsten plug-filled trenches to said buried source region.
10. A vertical channel integrated circuit comprising:a semiconductor substrate having a first doped layer of a first conductivity type; a buried source/drain region having a second conductivity type opposite said first conductivity type overlying said first doped layer; a second doped layer of said second conductivity type overlying said buried source/drain region; a well region of said first conductivity type within a portion of said second doped layer; a layer of silicon oxide overlying said well region; trenches having walls extending through said well region to said buried source/drain region; a gate insulator layer coveting said walls of said trenches polysilicon gate electrodes within said trenches, said polysilicon gate electrodes separated from said buried source/drain region by said gate insulator layer; source/drain regions of said second conductivity type within said well region; conducting regions extending through said silicon oxide layer and contacting said source/drain regions of said second conductivity type within said well region; and a conductor-filled trench extending to electrically contact said buried source/drain region.
11. The device of claim 10, comprising:a plurality of source/drain regions of said second conductivity type within said well region.
12. A device according to claim 10 wherein said vertical channel device is an NMOS device and wherein said first conductivity type is P and said second conductivity type is N.
13. A device according to claim 12 wherein said well region has a P-type dopant concentration of between about 5 E 12 to 5 E 13 atoms/cm³.
14. A device according to claim 10 wherein said vertical channel device is a PMOS device and wherein said first conductivity type is N and said second conductivity type is P.
15. A device according to claim 14 wherein said well region is an N-well having a dopant concentration of between about 5 E 12 to 5 E 13 atoms/cm³.
16. A device according to claim 10 wherein said buried source region has a dopant concentration of between about 1 E 15 to 1 E 16 atoms/cm³.
17. A device according to claim 10 wherein said layer of said second conductivity type overlying said buried source/drain region is an epitaxial layer.
18. A device according to claim 10, wherein said buried source/drain region is common to said plurality of source/drain regions of said second conductivity type within said well region.
|
<?php
namespace App\Repository;
use App\Entity\PresentationEntity;
use Doctrine\ORM\EntityRepository;
class PresentationEntityRepository extends EntityRepository {
public function ableToAddPresentation(PresentationEntity $newPres): bool {
$existingPresentations = $this->findBy([
'hall' => $newPres->getHall(),
'valid' => true,
]);
$results = [];
foreach ($existingPresentations as $exiPres) {
$result = (
($newPres->getStartTime() >= $exiPres->getStartTime() && $newPres->getStartTime() <= $exiPres->getEndTime()) ||
($newPres->getEndTime() >= $exiPres->getStartTime() && $newPres->getEndTime() <= $exiPres->getEndTime())
);
array_push($results, $result);
}
return !in_array(true, $results);
}
}
|
My question: Is the following sentence factually correct?
"Rheumatoid arthritis mainly attacks the joints of the body."
Options:
- yes
- no
Please think gradually:
Rheumatoid arthritis causes painful swelling in the joints.
Thus, the answer is yes.
|
A.N.T. Farm Wiki talk:Folive/@comment-4981082-20130108171050/@comment-6977360-20130109221140
They're so cute together <333
|
ControlNet won't find models from Automatic 1111 server
Describe the bug
Hello, I successfully installed AI-Render to run locally on my Windows 10 workstation. Now I'm trying to enable ControlNet from AI-Render's interface in Blender, but when I hit "Load Models from Automatic1111" I get this error:
"Couldn't get the list of available ControlNet models from the Automatic111 server. Make sure ControlNet is installed and activated."
I do have ControlNet installed with many models on my Automatic1111, but AI-Render somehow is not able to see it. Am I missing something?
Thank you so much for this incredible tool and for your precious time.
Kindest
Andrea
To reproduce
Install Blender 3.5.1
Install Automatic 1111 web-UI to run locally
Install ControlNet + models
Install AI-Render
Within Ai-Render Blender's UI, under the ControlNet dropdown section, click on "Load Models from Automatic1111
Error log
ControlNet models returned from Automatic1111 API:
{'detail': 'Not Found'}
AI Render Error: Couldn't get the list of available ControlNet models from the Automatic1111 server. Make sure ControlNet is installed and activated. Get help
fake_module: addon missing 'bl_info' gives bad performance!: 'C:\Users\intern\AppData\Roaming\Blender Foundation\Blender\3.5\scripts\addons\Animated Render Border v3_0.py'
Environment
Blender version (upper right corner of splash screen): 3.5.1
AI Render version (find in Preferences > Add-ons): 0.7.8
Operating system (Windows/Mac/Linux): Windows 10
Screenshots/video
Additional information
Hello Ben,
thank you for your answer, I think re-launching the web-UI from cmd did the
work, now it's working.
Kindest,
Andrea
Andrea Avellino
Co-Founder
+39 375 54 67 937
@.***
New Format IO GmbH
newformat.io http://www.newformat.io/
@newformat.io https://www.instagram.com/newformat.io/
This E-Mail and any attached files are confidential and may be legally
privileged.
Disclosure, reproduction, copying, distribution or use of this
communication by other than its addressee is prohibited.
If you received this E-Mail in error, please notify the sender and delete
it immediately.
E-Mail transmission cannot be guaranteed to be secure or error free as
information could be intercepted,
corrupted, lost, destroyed, delayed or incomplete, or contain viruses.
The sender does not accept liability for any losses incurred as a result of
this E-Mail transmission.
On Wed, May 10, 2023 at 5:59 PM Ben Rugg @.***> wrote:
Do you have Automatic1111 running in API mode? That would be my first
guess. See instructions here:
https://github.com/benrugg/AI-Render/wiki/Local-Installation
—
Reply to this email directly, view it on GitHub
https://github.com/benrugg/AI-Render/issues/102#issuecomment-1542449863,
or unsubscribe
https://github.com/notifications/unsubscribe-auth/A3PA5DTUREYCPQMI5QOH6TLXFO3N7ANCNFSM6AAAAAAX43E3KQ
.
You are receiving this because you authored the thread.Message ID:
@.***>
Ok, great - thanks for the update!
|
import React from 'react';
import { PostHeader, PostTitle, MainContent, PostDescription } from './styles';
export default function AboutMe() {
return (
<>
<PostHeader>
<PostTitle>
Hey guys <span aria-label="Hi" role="img">😎</span>,
</PostTitle>
<PostDescription>
</PostDescription>
</PostHeader>
<MainContent>
<p>Me chamo Ângelo, sou fullStack Developer e entusiasta UI/UX em constante formação (é sério, estudo todo dia desde que conheci a programação <span aria-label="notebook" role="img"> 💻</span>).<br/> Há mais ou menos um ano (em 2019), eu tive meu primeiro contato intenso com a programação e desde então não larguei mais.<span aria-label="coração" role="img">💜</span></p>
<p>Mas eu ficava trocando de linguagem e não focava em nenhuma. Fiquei assim por uns sete meses e nesse período conheci java, python, php e laravel. <br/>
<strong>NÃO FAÇAM ISSO, É SÉRIO!! <span aria-label="negação" role="img">🚫😤</span></strong>
<br/>
Escolha uma e foque nela, depois o resto fica mais fácil. (Em breve faço um post explicando como começar, prometo).
</p>
<p>Mas bem, atualmente estou cursando ciência da computação e finalizando um EAD em Gestão da Tecnologia da Informação. Trabalho como freelancer na web. Mas logo, logo quero entrar em uma empresa/startup bem massinha.</p>
<h3>Com quais tecnologias trabalho?</h3>
<p>Olha, eu amo tudo que envolve o javascript, mas escolhi dar foco na stack <b>ReactJs, NodeJs e React Native</b>. Perfeita para o desenvolvimento de uma aplicação completa, lidando com o backend, frontend e mobile. </p>
<h3> Principais Habilidades: </h3>
<ul>
<li>User Interface</li>
<li>User Experience</li>
<li>NodeJS</li>
<li>ReactJS / Redux / Arquitetura Flux</li>
<li>Javascript (Testes, ES6+)</li>
<li>Gatsby - PWA</li>
<li>MySQL - MongoDB - Postgres</li>
<li>Wordpress</li>
<li>Git</li>
<li>Scrum and Kanban</li>
<li>HTML e Template Languages</li>
<li>TDD e Continuous Integration</li>
<li>Design Responsivo (Mobile First)</li>
</ul>
<p>Já participei de alguns eventos bem massinhas também, como a Campus Party, Startup Weekend, dois Hackathons. Eventos estes que recomendo para qualquer pessoa que quer desenvolver habilidades intrapessoas e dar o seu melhor. (Esse assunto merece um post também).</p>
<p> Se quiser saber mais, me chama em qualquer rede social ali e vamos bater um papo, até mais. <span aria-label="tchau, tchau" role="img"></span>👋👋 </p>
</MainContent>
</>
);
}
|
77. We know for certain that the globules of the venous blood, when they come in con tact with air in the lungs, change their co lour, and that this change of colour is ac companied by an absorption of oxygen ; and that all those constituents of the blood which possess in any degree the power of combining with oxygen, absorb it in the lungs, and be come saturated with it. Although in contact with these other compounds, the globules, when arterialized, retain their florid, red co lour in the most minute ramifications of the arteries ; and we observe them to change their colour, and to assume the dark red colour which characterizes venous blood, only during their passage through the capillaries. From these facts we must conclude that the constituents of arterial blood are altogether destitute of the power to deprive the arte rialized globules of the oxygen which they have absorbed from the air; and we can draw no other conclusion from the change of colour which occurs in the capillaries, than that the arterialized globules, during their passage through the capillaries, return to the condition which characterized them in venous blood; that consequently, they give up the oxygen absorbed in the lungs, and thus acquire the power of combining with that element afresh.
78. We find, therefore, in arterial blood, albumen, which, like all the other consti tuents of that fluid, has become saturated with cxygen in its passage through the lungs, and oxygen gas, which is conveyed to every particle in the body in chemical combination with the globules of the blood. As far as our observations extend (in the developement of the chick during incuba tion,) all the conditions seem to be here united which are necessary to the formation of every kind of tissue ; while that portion of oxygen which is not consumed in the growth or reproduction of organs combines with the substance of the living parts, and produces, by its union with their elements, the act of transformation which we have called the change of matter.
79. It is obvious, that all compounds, of whatever kind, which are present in the capillaries, whether separated there, or in troduced by endosmosis or imbibition, if not altogether incapable of uniting with oxygen, must, Avhen in contact with the arterialized globules, the carriers of oxygen, be affected exactly in the same way as the solids form ing part of living organs. These com pounds, or their elements, will enter into combination with oxygen, and in this case there will either be no change of matter, or that change will exhibit itself in another form, yielding products of a different kind.
80. The conception, then, of a change in the two qualities of the blood above alluded to, by means of a foreign body contained in the blood or introduced into the circulation (a medicinal agent) presupposes two kinds of operation.
Assuming that the remedy cannot enter
' into any such chemical union with the con stituents of the blood as puts an end to the vital activity ; assuming, further, that it is not in a condition of transformation capable '.of being communicated to the constituents ! of the blood or of the organs, and of con tinuing in them ; assuming, lastly, that it is incapable, by its contact with the living parts, of putting a stop to the change of matter, the transformation of their elements ; then, in order to discover the modus ope rand! of this class of medicinal agents, no thing is left but to conclude that their elements take a share in the formation of certain constituents of the living body, or in the production of certain secretions.
81. The vital process of secretion, in so far as it is related to the chemical forces, has been subjected to examination in the preced ing pages. In the carnivora we have rea son to believe, that without the addition of any foreign matter in the food, the bile and the constituents of the urine are formed in those parts where the change of matter takes place. In other classes of animals, on the other hand, we may suppose that in the or gan of secretion itself, the secreted fluid is produced from certain matters conveyed to it ; in the herbivora, for example, the bile is formed from the elements of starch along with those of a nitrogenized product of the metamorphosis of the tissues. But this sup position by no means excludes the opinion, that in the carnivora the products of the me tamorphosed tissues are resolved into bile, uric acid, or urea, only after reaching the secreting organ; nor the opinion that the elements of the non-azotized food, conveyed directly by the circulation to every part of the body, where change of matter is going on, may there unite with the elements of the metamorphosed tissues, to form the constitu ents of the bile and of the urine.
82. If we now assume, that certain me dicinal agents may become constituents of secretions, this can only occur in two ways. Either they enter the circulation, and take a direct share in the change of matter in so far as the : r elements enter into the compo sition of 'the new products ; or they are con veyed to the organs of secretion, where they exert an influence on the formation or on the quality of a secretion by the addition of their elements.
In either case, they must lose in the or ganism their chemical character ; and we know with sufficient certainty, that this clasg of medicinal bodies disappear in the body without leaving a trace. In fact, if we as cribe to them any effect, they cannot lose their peculiar character by the action of the stomach ; their disappearance, therefore, pre supposes that they have been applied to cer tain purposes, which cannot be imagined to occur without a change in their composition.
83. Now, however limited may be our knowledge of the composition of the differ ent secretions, with the exception of the
|
[Federal Register Volume 69, Number 61 (Tuesday, March 30, 2004)]
[Rules and Regulations]
[Pages 16722-16738]
[FR Doc No: 04-6794]
-----------------------------------------------------------------------
DEPARTMENT OF TRANSPORTATION
Federal Motor Carrier Safety Administration
49 CFR Parts 380 and 391
[Docket FMCSA-97-2176]
RIN 2126-AA08
Minimum Training Requirements for Longer Combination Vehicle
(LCV) Operators and LCV Driver-Instructor Requirements
AGENCY: Federal Motor Carrier Safety Administration (FMCSA), DOT.
ACTION: Final rule.
-----------------------------------------------------------------------
SUMMARY: The Federal Motor Carrier Safety Administration (FMCSA)
establishes standards for minimum training requirements for the
operators of longer combination vehicles (LCVs) and requirements for
the instructors who train these operators. This action is in response
to section 4007 of the Intermodal Surface Transportation Efficiency Act
of 1991, which directed that training for the operators of LCVs include
certification of an operator's proficiency by an instructor who has met
the requirements established by the Secretary of Transportation
(Secretary). The purpose of this final rule is to enhance the safety of
commercial motor vehicle (CMV) operations on our Nation's highways.
EFFECTIVE DATE: June 1, 2004.
FOR FURTHER INFORMATION CONTACT: Mr. Robert Redmond, Office of Safety
Programs, (202) 366-9579, Federal Motor Carrier Safety Administration,
U.S. Department of Transportation, 400 Seventh Street, SW., Washington,
DC 20590. Office hours are from 8:30 a.m. to 5 p.m., EST, Monday
through Friday, except Federal holidays.
SUPPLEMENTARY INFORMATION: Sec. 4007(b) of the Motor Carrier Act of
1991 [Title IV of the Intermodal Surface Transportation Efficiency Act
of 1991 (ISTEA), Public Law 102-240, 105 Stat. 1914, 2152; 49 U.S.C.
31307] directs the U.S. Department of Transportation (DOT) to establish
Federal minimum training requirements for drivers of LCVs. The ISTEA
also requires that the certification of these drivers' proficiency be
accomplished by instructors who meet certain Federal minimum
requirements to ensure an acceptable degree of quality control and
uniformity. Sec. 4007(f) of the ISTEA defines an LCV as ``any
combination of a truck tractor and 2 or more trailers or semi-
trailers'' that has a gross vehicle weight (GVW) greater than 80,000
pounds (36,288 kilograms) and is operated on the Interstate Highway
System. This final rule implements the requirements of Sec. 4007.
Background
In the early 1980s, the Federal Highway Administration (FHWA)
determined that a need existed for technical guidance in the area of
truck driver training. FHWA is the predecessor agency to FMCSA within
DOT. Research at that time had shown that many driver-training schools
offered little or no structured curricula or uniform training programs
for any type of CMV.
To help correct this problem, FHWA developed the Model Curriculum
for Training Tractor-Trailer Drivers, issued in 1985 (GPO Stock No.
050-001-00293-1). The Model Curriculum, as it is known in the industry,
incorporated the agency's ``Proposed Minimum Standards for Training
Tractor Trailer Drivers'' (1984). The Model Curriculum is a broad set
of recommendations that incorporates standardized minimum core
curriculum guidelines and training materials, as well as guidelines
pertaining to vehicles, facilities, instructor hiring practices,
graduation requirements, and student placement. Curriculum content
includes the following areas: basic operation, safe operating
practices, advanced operating practices, vehicle maintenance, and
nonvehicle activities.
The Professional Truck Driver Institute (PTDI) was created in 1986
by the motor carrier industry to certify training programs offered by
truck driver training schools. Originally named the Professional Truck
Driver Institute of America, the group changed its name in November
1998 to reflect the addition of Canada to the organization. PTDI
derived its certification criteria from the Model Curriculum, and, in
mid-1988, began certifying truck-driver training programs across the
country. As of February 2003, approximately 64 schools in 27 States and
Canada have received the PTDI certification. Although many schools have
a number of truck driving courses, most have only one course that is
certified by PTDI.
The Commercial Motor Vehicle Safety Act of 1986 (CMVSA) (49 U.S.C.
31301 et seq.), although not directly targeted at driver training, was
intended to improve highway safety. Its goal was to ensure that drivers
of large trucks and buses possess the knowledge and skills necessary to
operate these vehicles safely on public highways. The CMVSA established
the commercial driver's license (CDL) program and directed the agency
to establish minimum Federal standards that States must meet when
licensing CMV drivers. The CMVSA applies to virtually anyone who
operates a commercial motor vehicle in interstate or intrastate
commerce, including employees of Federal, State, and local governments.
As defined by the implementing regulation, a CMV is a motor vehicle or
combination of motor vehicles used in commerce to transport passengers
or property if the vehicle meets one or more of the following criteria:
(a) Has a gross combination weight rating (GCWR) of 11,794 or more
kilograms (26,001 or more pounds) inclusive of a towed unit with a
gross vehicle weight rating (GVWR) of more than 4,536 kilograms (10,000
pounds).
(b) Has a GVWR of 11,794 or more kilograms (26,001 or more pounds).
(c) Is designed to transport 16 or more passengers, including the
driver.
(d) Is of any size and used in the transportation of hazardous
materials as defined in this section [49 CFR 383.5].
In accordance with the CMVSA, all drivers of commercial motor
vehicles must possess a valid CDL in order to be properly qualified to
operate the vehicle(s) they drive. In addition to passing the CDL
knowledge and skills tests required for the basic vehicle group, all
persons who operate or expect to operate any of the following vehicles,
which have special handling characteristics, must obtain endorsements
under 49 CFR 383.93(b):
(a) Double/triple trailers;
(b) Passenger vehicles;
(c) Tank vehicles;
(d) Vehicles required to be placarded for hazardous materials.
For all endorsements, the driver is required to pass a knowledge
test that gauges the person's familiarity with the special handling
characteristics of the specific vehicle type. To obtain a passenger
endorsement, the driver also must pass a skills test.
The CDL standards do not require the comprehensive driver training
proposed in the Model Curriculum, since the CDL is a licensing standard
as opposed to a training standard. Accordingly, there are no
prerequisite Federal or State training requirements to obtain a CDL.
In 1990, the National Transportation Safety Board (NTSB)
recommended to FHWA (Safety Recommendation H-90-3) that drivers of
specialized vehicles, including multiple-trailer vehicles, receive
training in the special handling characteristics and other variables
that influence the controllability and maneuverability of these
vehicles. On September 12, 1990, NTSB voided this Safety Recommendation
as ``Closed--Reconsidered.'' NTSB determined that the knowledge and
skills necessary for the operation of multiple-trailer combination
vehicles are covered in the CDL requirements under 49 CFR subpart C as
well as in the ``the model driver manual and the model knowledge and
skills tests,'' and that the trucking industry provided adequate
training in these requirements.
In February 1991, FHWA awarded a contract to PTDI to develop
voluntary criteria for training drivers in the safe operation of twin
8.534-meter (28-foot) trailer combination vehicles. The resulting
``Twin Trailer Driver Curriculum'' outlines how drivers should be
trained in the safe operation of these vehicles. Subject matter experts
from motor carrier fleets, industry associations, training
institutions, and governmental organizations assisted in developing the
curriculum, which consists of 115 clock hours of direct driver
participation including a minimum of 56 hours of behind-the-wheel
training. The ``Twin Trailer Driver Curriculum'' is available for
review in the public docket for this rulemaking.
The agency awarded two additional contracts to the PTDI to develop
curriculum outlines addressing triple-trailer combination vehicles and
Rocky Mountain/Turnpike Double combination vehicles. Ultimately, the
curriculum outlines for twin trailers, Rocky Mountain/Turnpike Doubles,
and triple-trailer combinations were merged into a single document,
entitled ``Multiple Trailer Combination Vehicle (MTCV) Driver Training
Guide: Suggested Units of Instruction and Curriculum Outline.'' The
PTDI was selected to develop a composite modular training curriculum
outline embracing both the LCV driver and the LCV instructor.
Upon completion of the curricula, the agency coordinated with the
U.S. Department of Education to ensure that the proposed training
requirements were in concert with its accreditation requirements.
Representatives from both agencies agreed that the proposed training
requirements would be eligible for accreditation by any group meeting
the criteria and procedures described in the publication Nationally
Recognized Accrediting Agencies and Associations, Criteria and
Procedures for Listing by the U.S. Secretary of Education and Current
List. This document is available for review in the public docket for
this rulemaking.
During this period, two additional FHWA initiatives--a series of
highway-safety focus groups in December 1994, and FHWA's first National
Truck and Bus Safety Summit, held in March 1995--contributed to an
enhanced understanding of driver training. Although neither project
specifically focused on driver training methods or minimum training
standards, they nevertheless provided perspective on the importance of
driver training and the need for minimum training requirements. The
``Focus Group Report'' on the 1994 initiative and the ``1995 Truck and
Bus Safety Summit, Report of Proceedings'' are available for review in
the rulemaking docket.
On January 15, 1993, FHWA's Office of Motor Carriers published an
advance notice of proposed rulemaking (ANPRM) in the Federal Register
(58 FR 4638) seeking comments and responses to 13 specific questions.
The agency received 24 comments, which were summarized in the notice of
proposed rulemaking (NPRM) discussed below.
Summary of the NPRM
The agency used the results of the projects mentioned above, the
research conducted over the past several years, and the comments to the
1993 ANPRM to develop the proposals in the NPRM, published in the
Federal Register on August 12, 2003 (68 FR 47890).
The NPRM proposed standards for minimum training requirements for
the operators of LCVs and requirements for the instructors who train
these operators. It also outlined procedures for determining compliance
with the proposed rule by operators, instructors, training
institutions, and employers.
As agency research and crash data have not indicated that multiple-
trailer combination vehicle operations pose a significant safety
problem, FMCSA proposed to limit the training requirement to operators
of LCVs, as defined in Sec. 4007(f) of the ISTEA, rather than extend it
to multiple-trailer combinations weighing less than 80,000 pounds.
As for the training, the NPRM proposed general requirements
pertaining to an LCV driver-training test--consisting of both a
knowledge and skills assessment--for all students wishing to obtain an
LCV Driver-Training Certificate. FMCSA believes that specialized
vehicle combinations require somewhat different training requirements
because of differing operating characteristics. Therefore, we proposed
two separate training courses for LCV drivers: LCV Doubles and LCV
Triples. Although the proposed minimum curricula would be identical,
the training entity would tailor each course to the unique operational
and handling characteristics of the specific LCV category. Specialized
commodity training could be addressed as necessary by training
institutions or carriers.
The NPRM also established guidelines as to which drivers must
comply with the proposed rule. The individual seeking LCV training
would have to possess a valid CDL with a double/triple trailer
endorsement, have only one driver's license, have a good driving
record, and provide evidence of experience operating the category of
combination vehicle designated as a prerequisite for the desired LCV
training. Evidence of driving experience would consist of a statement
from one or more employers indicating the type and amount of driving
experience while employed by that motor carrier.
In addition, FMCSA believes that for many current LCV drivers, the
combination of a good driving record
and experience with an LCV double or triple indicates that the
individual has the minimum knowledge and driving skills to operate such
a vehicle. Accordingly, we proposed to allow certain drivers to
substitute a good driving record and experience for the completion of
the LCV driver-training requirements. The driver would have to provide
the employing motor carrier with evidence that he or she had operated
LCVs safely during the 2-year period prior to application. FMCSA
believes that grandfathering such drivers will not diminish public
safety or the overall safe operation of CMVs.
Regarding the training program, each instructor employed by a
training institution offering LCV training would be required to meet
all State requirements for a vocational education instructor. FMCSA
believes that, initially, persons currently conducting double/triple
trailer combination vehicle training would become qualified LCV
instructors under the proposed grandfather requirements. Subsequently,
when the need for new instructors arises, those qualified
(grandfathered) LCV instructors would train new instructors, who would
then be qualified to train drivers.
While the States assume varying degrees of control over education,
institutions of postsecondary education are permitted to operate with
considerable autonomy. As a consequence, educational institutions can
vary widely in the quality and adequacy of their programs. To ensure a
basic level of quality and adequacy, the U.S. Department of Education
has established accreditation requirements. FMCSA therefore proposed
that any entity--whether for-profit or not-for-profit, private or
public--that meets these accreditation requirements would be allowed to
offer the training.
As for employer responsibilities, the proposed rule expressly
prohibits a motor carrier from employing an individual to operate an
LCV unless he or she has first met the requirements under the proposal.
FMCSA or Motor Carrier Safety Assistance Program (MCSAP) State
enforcement officials would verify compliance with the LCV driver
training and driver-instructor requirements at the carrier's place of
business during the compliance review, rather than at the roadside. For
this reason, carriers would be required to maintain proof of
qualification of LCV drivers and LCV driver-instructors in the
qualification files for these individuals. This enforcement approach
emphasizes that the motor carrier and driver each have a responsibility
for the LCV training requirement. The driver would have to obtain the
necessary LCV training, and the carrier would be required to prohibit a
driver from operating an LCV without that training. Although
enforcement officials would not be burdened with trying to determine at
roadside whether a CMV driver is subject to the LCV training
requirement, they could still check the CDL to determine whether the
driver has the required doubles/triples endorsement.
Based on some of the public comments received in response to the
NPRM, the agency made certain changes to the proposal as reflected in
today's final rule. These are included in the discussion of comments
below.
Discussion of Comments to the NPRM
FMCSA received nine comments on the NPRM. Five comments were from
associations, two from individuals, one from a public interest group,
and one from a motor carrier.
General Support
Several commenters praise FMSCA for taking this action. For
example, the United Parcel Service (UPS) ``commends the FMCSA for their
efforts to promote commercial vehicle safety, particularly driver
training standards.'' Advocates for Highway and Auto Safety (AHAS)
supports the main framework of the proposed training regimen. The Motor
Freight Carriers Association (MFCA) also commends FMCSA for closely
following the training guidelines used by unionized less-than-truckload
motor carriers.
General Opposition
Several commenters criticized this action. The American Trucking
Associations (ATA) argues that the proposed mandatory training for LCV
drivers is unlikely to result in safer LCV operations. ATA suggests
that, rather than the proposed regulations, the agency adopt ``a set of
performance-based rules for training LCV drivers and driver-instructors
that could result in enhanced public safety and will not impact the
flow of freight on the nation's highways.'' Nonetheless, ATA provides
recommendations to enhance the proposal. The chairman of the Montana
Logging Association's Professional Log Haulers Committee opposes the
new training rules for operators of LCVs, citing four points of
contention: first, training would be a burden to rural log haulers;
second, the proposed rule would compound the driver shortage; third,
this highly specialized form of truck transportation needs particular
skills; and fourth, a trainee already goes through an extensive
orientation with a trainer until both are satisfied about the new
driver's skills. An individual commenter also expressed criticism of
the proposed rule, explaining that insurance entities will make sure
that motor carriers comply with industry standards.
Finally, the Commercial Vehicle Safety Alliance (CVSA) commented:
[CVSA is] concerned that the limited resources of both States
and the FMCSA may be expended unnecessarily if this proposed
rulemaking becomes regulation. Currently LCV operations have a crash
rate lower than other commercial motor vehicle types and, at a time
when State agencies are struggling financially, [CVSA does] not
support an effort to expend substantive resources in an area that
already operates in an overall safe manner.
FMCSA Response: Under section 4007(b) of the ISTEA, Congress
expressly mandated the development of minimum training standards for
operators of LCVs and requirements for those who instruct these
drivers. Many of those who responded to the proposal, including
dissenters, made useful recommendations for enhancing the proposal.
FMCSA particularly appreciates information about how the industry
currently trains LCV drivers and what entities offer this training. The
agency has considered all comments and revised the final rule to
reflect several recommended improvements.
One way in which the Montana Logging Association might meet the
challenges of complying with this rule, as outlined in its comments, is
for the association to provide the LCV driver-training program to the
drivers of its member carriers.
Exclusion of Non-LCVs From Training Requirement
AHAS discusses at some length a series of studies and reports
dealing with the relative safety of LCV operations. It quotes the
Transportation Research Board's Special Report 211 (1986), which found
that ``[t]wins [i.e., Western Doubles, usually a tractor pulling two
28-foot trailers] probably have slightly more crash involvements per
mile traveled than tractor-semi-trailers operated under identical
conditions at highway speeds.'' AHAS believes this information and
related data compel FMCSA to subject drivers of Western Doubles
weighing less than 80,000 pounds to the training requirements of this
rule. AHAS argues that FMCSA ``has no adequate foundation in the
administrative record of this rulemaking'' for excluding Western
Doubles ``and, therefore, continued reliance upon the arguments
advanced in the notice * * * would
constitute arbitrary and capricious agency action.''
FMCSA Response: Sec. 4007(b) of ISTEA requires training for drivers
of LCVs, which subsection (f) defines as ``any combination of a truck
tractor and 2 or more trailers or semi-trailers which operate on the
National System of Interstate and Defense Highways with a gross vehicle
weight [GVW] greater than 80,000 pounds.'' This rule does not address
drivers of Western Doubles, which normally operate at or below 80,000
pounds GVW, because Sec. 4007(b) is not applicable to them by its
terms.
Definitions
Training institution
Two commenters questioned the definitions used in the proposed
rule.
UPS and ATA remark that the definition of training institution is
unclear. Section 380.105 would define a training institution as any
technical or vocational school accredited by an accrediting institution
recognized by the U.S. Department of Education. Sections 380.301(b) and
380.303(a) would require an LCV driver-instructor to ``meet all State
requirements for a vocational instructor, if employed by a training
institution.'' Specifically, UPS asks for clarification about the
process by which an employer's internal training school [such as the
UPS Driver Training School (DTS) or others] becomes accredited. ATA
urges the agency to publish a clarification stating that training
programs that are managed directly by a motor carrier or provide
exclusive service to a motor carrier are not considered to be training
institutions.
In a related comment, MFCA said it is unaware of any driver
training school that trains instructors for triples, or, for that
matter, triples drivers. Until now, individual motor carriers have
filled that role, with State regulators providing oversight.
FMCSA Response: In the NPRM, the establishment of requirements for
training institutions was not intended to be interpreted as a mandate
to use these training institutions. The rule does not require a motor
carrier to employ a training institution to provide the LCV driver
training described in the appendix to part 380. Conversely, a motor
carrier's internal training school is not a training institution, as
defined in Sec. 380.105, unless it also accepts students from other
motor carriers and charges them for training. However, if a motor
carrier opts to use training institutions, the schools must be
accredited, and the training institute employee must meet State
vocational instructor guidelines.
In today's rule, FMCSA has clarified the definition of training
institution under Sec. 380.105(b) by stating that neither a motor
carrier's training school for its drivers nor an entity that
exclusively offers services to a single motor carrier is a training
institution. Accordingly, in-house trainers who are not affiliated with
a training institution must comply with the standards under subpart C
to part 380, except Sec. Sec. 380.301(a)(2), 380.301(b)(2), or
380.303(b)(4). A motor carrier's in-house training school for its
drivers does not require accreditation.
LCV Double and LCV Double subcategories
ATA and CVSA question the definition of the term longer combination
vehicle (Sec. 380.105(b)). The NPRM defines a longer combination
vehicle as ``any combination of a truck-tractor and two or more
trailers or semi-trailers, which operate on the National System of
Interstate and Defense Highways with a gross vehicle weight (GVW)
greater than 36,288 kilograms (80,000 pounds.).'' An LCV double is
defined as a Turnpike Double or a Rocky Mountain Double, and both terms
are defined to include trailer-length specifications. ATA points out
that the trailer-length specifications create the possibility that
doubles weighing more than 80,000 pounds, whose two trailers are of
equal length but less than 45 feet, would qualify as an LCV, but that
the drivers of these vehicles would not have training requirements
because the definition of an LCV double is length-specific. ATA and
CVSA suggest that FMCSA clarify this issue or simply omit the trailer-
length specification.
FMCSA Response: FMCSA agrees that the proposed definition of an LCV
double (and the LCV double subcategories) would inadvertently exclude
certain vehicles that Congress clearly intended be covered under the
LCV training requirements, according to the LCV definition in section
4007(f). For example, the NPRM's definition of Rocky Mountain double
creates an applicability loophole for a vehicle that meets the weight
and configuration thresholds established under section 4007(f) of ISTEA
but exceeds the length specification for one of its components. To
correct this error, the agency has in today's final rule removed the
terms ``Rocky Mountain double,'' ``Turnpike double,'' ``twin
trailers,'' and ``Western double'' from the list of definitions under
Sec. 380.105. The definition of an LCV double has been modified to
eliminate references to LCV subcategories defined by trailer length. An
LCV double is redefined to mean ``an LCV consisting of a truck-tractor
in combination with two trailers and/or semi-trailers.''
Qualified LCV Driver-Instructor
ATA and CVSA suggest that the definition of qualified LCV driver-
instructor should properly refer to Subpart C, not Subpart B.
Additionally, ATA comments that it is assumed that classroom
instructors would not be included under this definition. ATA believes
that classroom instruction activities need no driving prerequisites.
Therefore, they request the rule clearly note that classroom
instruction personnel need not meet any of the requirements of a
``qualified LCV driver-instructor.''
FMCSA Response: FMCSA agrees. Under the definition of Qualified LCV
driver-instructor in the section ``Sec. 380.105 Definitions'' of
today's final rule, we have corrected the erroneous cross-reference to
read ``subpart C of this part.'' See the section ``Driver-Instructor
Requirement'' for a complete discussion of substantive changes to the
definition of classroom instructor.
Driver Training Program
UPS believes the proposed rule would alter the way its training
programs are currently operated. The UPS Driver Training School
initially qualifies UPS drivers for twin trailers only. UPS does not
have a separate school for LCV training but provides this special
training to the driver at his or her work location, depending on the
local State-by-State regulatory conditions that exist for LCV
operations. After the extra training has occurred, a revised DOT road
test form is prepared to indicate that the driver is qualified to drive
LCVs. UPS asserts that the proposed rule would require the carrier to
substantially modify its current training curriculum to include the
requirements contained in the appendix to part 380, or to create a
separate school specifically for LCV training away from the driver's
work location. UPS is not convinced that generic training and testing
will accommodate the unique aspects of different regional operations
(such as mountainous terrain versus turnpikes, or eastern U.S. versus
western U.S.).
UPS also asserts that the terms of the proposed training and
certification program assume that a driver operates a particular type
of LCV combination exclusively, whereas ``[i]n reality, UPS drivers may
operate one or more of the LCV combinations in any given day, week or
month.''
FMCSA Response: This rule establishes an LCV driver-training
program and standards for driver instructors. The LCV driver-training
program need not be offered in a different school from that used for
other CMV training. Neither is the agency requiring UPS or any other
carrier to incur the extra expense of training all drivers in the
operation of an LCV; the rule applies only to those drivers who must
operate an LCV as defined under Sec. 380.105. However, UPS
acknowledges that it already provides separate training for its drivers
who operate LCVs. This training should be modified to include those
requirements described in the appendix to part 380.
FMCSA has removed the definitions of LCV double subcategories in
order to make clear that, while the rule applies to all LCV doubles, it
does not require separate training for every conceivable subcategory of
LCV double. Furthermore, the driver-training certificate will indicate
the general LCV type(s) that a driver is authorized to operate: LCV
doubles, LCV triples, or both.
Requirements To Qualify for Driver Training
Proposed Sec. Sec. 380.203 and 380.205 would require drivers to
have a doubles/triples endorsement on their CDLs for 6 months before
applying for LCV doubles or triples training. ATA argues that it is
likely that the knowledge and/or skills required to obtain the
endorsement will be no more stringent than those required to obtain the
LCV Driver-Training Certificate in this rule. Therefore, ATA believes
that this requirement is duplicative, unnecessary, and not relevant.
ATA explains that job opportunities occur randomly and a driver does
not have the luxury of preparing for job changes ahead of time. The ATA
urges FMCSA to remove the prerequisite to have a doubles/triples
endorsement 6 months before applying for LCV doubles or triples
training.
UPS and MFCA are concerned about the employer's responsibilities to
provide evidence of a driver's experience. For example, MFCA asks if a
verbal (i.e., oral) statement from the employer would suffice as
evidence when requested by an authorized FMCSA, State, or local
official in the course of a compliance review. UPS strongly believes
that further clarification is needed regarding the obligations and
responsibilities of an employer to seek information regarding a
driver's experience or, conversely, to provide such information to
another employer.
FMCSA Response: The doubles/triples CDL endorsement is obtained by
passing a knowledge test without a skills component. The purpose of the
requirement that a driver-candidate possess this endorsement for at
least 6 months prior to taking LCV training is to give the driver
adequate time and opportunity to gain experience in operating
combination vehicles having a GVW of less than 80,000 pounds. The
doubles/triples endorsement qualifies drivers to operate combinations
weighing less than 80,000 pounds (e.g., Western doubles). The
endorsement does not qualify drivers to operate either doubles or
triples above the 80,000-pound threshold, and it cannot be substituted
for the LCV Driver-Training Certificate. Drivers possessing the
doubles/triples endorsement may not operate doubles and triples over
80,000 pounds until they successfully complete the required training
and receive the LCV Driver-Training Certificate.
FMCSA believes it is important that drivers acquire this
operational experience with lighter combination vehicles before being
trained in the operation of LCVs. We concluded that the safety benefit
of progressing from combination-vehicle experience to LCV training
justifies the requirement that drivers hold the endorsement for 6
months. Today's rule clarifies that a written--but not an oral--
statement from a previous employer is sufficient evidence of 6 months
of combination-vehicle driving experience.
Substitute for Driver Training--Grandfathering
General
AHAS disputes the statement in the NPRM that ``the combination of a
good driving record and experience with a representative vehicle of the
specific LCV category is an appropriate indication that the individual
has the minimum knowledge and driving skills to operate such a
vehicle.'' AHAS contends that FMCSA has ``no grounds for grandfathering
the vast majority of LCV drivers'' and is thus ``forswearing
significant crash reduction benefits.'' AHAS also argues that the
agency has no authority under Sec. 4007(b) to grandfather any LCV
drivers.
FMCSA Response: The argument that the agency has no authority to
grandfather drivers is contradicted by the broadly discretionary nature
of the statutory mandate. Sec. 4007(b) simply directed the agency to
``establish minimum training requirements'' [emphasis added], i.e.,
requirements sufficient in the judgment of the agency to improve the
safety of LCV operations. Congress did not specify classroom versus on-
the-road instruction, or the degree of crash reduction to be achieved;
nor did it require universal training. In view of the small population
of LCV drivers subject to the rule, the far smaller number of crashes
involving LCVs, the fact that most current LCV drivers have undergone
LCV training, and the shortcomings of available data on the relative
safety of LCV operations, FMCSA has concluded that grandfathering safe,
experienced LCV drivers is an effective means of reducing the costs of
this rule while retaining its safety benefits. The agency has therefore
placed training requirements on the drivers most in need of
instruction--those with no experience in LCVs and current LCV drivers
with flawed safety records. This is entirely consistent with the
Congressional mandate.
FMCSA indicated the strict preconditions for grandfathering in its
proposal, and these conditions are retained in today's rule. A driver
will be eligible only if, during the last two years immediately before
applying for the exemption, he or she had no suspension, revocation, or
cancellation of his or her CDL; no convictions for a major offense
committed while operating a CMV; no convictions for a railroad-highway
grade crossing violation while operating a CMV; no convictions for
violating an out-of-service order; and, above all, no convictions for
violating a State or local traffic law in connection with a CMV crash
[Sec. 380.111(b)(3), (4), (5), (6), and (8), respectively]. The agency
estimates that 1,750 of the 35,000 current LCV drivers will not qualify
for grandfathering. Involvement in CMV crashes, however, will not
automatically bar a driver from being grandfathered, nor does FMCSA
believe it should. A 1996 report from the National Highway Traffic
Safety Administration (NHTSA) shows that in 71 percent of two-vehicle
fatal crashes involving a large truck and another type of vehicle, the
behavior of the other driver was a causative factor while that of the
truck driver was not (``Traffic Safety Facts 1996: Large Trucks'').
There is no reason to believe that the distribution of fault in LCV
crashes is significantly different. In short, current LCV drivers
convicted of a wide range of reckless or irresponsible behaviors will
be required to take the training set forth in today's rule, leaving
only those drivers eligible for grandfathering who have actual
experience operating LCVs and a driving record clear of the most
serious violations. AHAS presented no evidence that good, experienced
LCV drivers pose
a significant crash risk requiring mitigation through training.
At-Fault Crash Involvement While Operating a CMV
Under the proposed rule, drivers who meet certain criteria may be
grandfathered from the new driver training. One such criterion is that,
during the past 2 years, a driver had ``[n]o accident in which he/she
was found to be at fault, while operating a CMV'' [Sec.
380.111(b)(9)]. MFCA, CVSA, and ATA remark that the term at-fault
accident is never defined, nor does the NPRM state who determines fault
when a crash occurs. ATA urges FMCSA to define an at-fault accident to
mean an accident for which a truck driver has been convicted of an
offense that contributed to the crash. FMCSA also should provide
guidance, either in the preamble or in an interpretation, regarding
what types of offenses would generally be considered as contributing to
a crash. MFCA asks whether ``at-fault'' simply means a citation of any
kind relating to a CMV accident. MFCA suggests following the criteria
in Sec. 383.51, Disqualification of drivers. CVSA recommends that
FMCSA define the term and then use it throughout the regulation.
FMCSA Response: FMCSA has eliminated Sec. Sec. 380.111(b)(9),
380.203(a)(10), and 380.205(a)(10), which referred to fault, because
States do not uniformly define or assess fault in crashes. The agency
believes that the requirement under Sec. Sec. 380.111(b)(8),
380.203(a)(9), and 380.205(a)(9) for ``no convictions for a violation
of State or local law relating to motor vehicle traffic control arising
in connection with any traffic crash while operating a CMV''
sufficiently addresses the issue of a CMV driver's crash involvement.
Two-Year Driving Experience for Grandfathering
The NPRM proposed that a driver must have 2 years of driving
experience, immediately preceding the date of application for a
``Certificate of Grandfathering,'' in the type of LCV he or she seeks
to continue operating. ATA comments that the phrase ``immediately
preceding'' would present a major problem for motor carriers operating
LCVs because many drivers who are currently qualified could not meet
the proposed qualification requirement. ATA explains that many drivers
have several years of experience driving doubles and/or triples and are
currently qualified to do so. For one reason or another, however, they
are now driving a tractor-trailer combination or Western Doubles, or
are teaching driver training instead of driving. Disqualifying these
individuals would be a hardship on them, and counterproductive for both
motor carrier and employee. Therefore, ATA strongly recommends that
FMCSA revise Sec. 380.111(c)(2) to allow grandfathering of any driver
who currently holds a CDL with a doubles/triples endorsement and is
authorized by a State to operate LCVs. CVSA agrees, stating that ``any
driver who currently holds a CDL with a double/triples endorsement, has
no CDL disqualifications, has not been involved in a preventable crash,
and is authorized by a State to operate LCVs should be grandfathered.''
FMCSA Response: FMCSA has retained the 2-year driving experience
requirement for grandfathered LCV drivers. However, in today's rule the
driver is not required to have operated an LCV continuously during the
previous 2 years, nor must he or she have operated LCVs exclusively.
The driver is required only to have operated LCVs periodically within
the previous 2 years. Drivers often take the tests for endorsement
classes they have no immediate intention of using. Grandfathering
virtually anyone holding a double/triple trailer endorsement could
exempt from training some individuals who had never driven an LCV at
all, despite their having the requisite endorsement. This would change
grandfathering into a simple exemption. The agency rejects that
approach.
Driver-Instructor Requirements
UPS believes the issue of driver-instructor requirements is perhaps
the most problematic portion of the entire proposed rule. UPS requests
that the agency outline the process for driver-instructors to become
certified. UPS also believes it would be most practical to allow UPS
Driver Training School instructors to be able, in turn, to train other
UPS management personnel.
One requirement of the NPRM is that an LCV driver-instructor have
at least 2 years' driving experience in the type of vehicle (LCV double
or triple) for which he or she will provide training, as well as a
Class A CDL with a doubles/triples endorsement. This requirement
presents a major concern for UPS and ATA, which consider it infeasible.
UPS states that, while it operates one of the Nation's safest
commercial vehicle fleets, some of its LCV training personnel have not
had at least 2 years of LCV driving experience. UPS does not believe
that 2 years' experience operating an LCV is a relevant prerequisite
for becoming a highly skilled LCV driver-trainer. All UPS LCV driver-
trainers have successfully completed the UPS Driver Training School but
have not necessarily driven twin trailers or LCV Triples for 2 years.
UPS's position seems to be that driving experience in an LCV is less
important than skill as an instructor. ATA agrees, and comments that
the rule would make a large number of existing driver-instructors
ineligible to continue their training duties. ATA also explains that
motor carriers currently use driver-instructors who have never driven
an LCV but have had a great deal of success training others to drive
LCVs. ATA and UPS agree that the enactment of this provision would
result in significant financial and administrative hardship to motor
carriers.
Additionally, ATA assumes that classroom instructors would not be
included under the definition of a qualified LCV driver-instructor. ATA
comments that classroom instruction activities need no driving
prerequisites. Therefore, ATA believes the rule should clearly state
that classroom instruction personnel need not meet any of the
requirements of a ``qualified LCV driver-instructor.''
FMCSA Response: Based on the information that motor carriers
routinely use nondrivers to teach training courses, FMCSA has revised
the requirements for a qualified LCV driver-instructor in Sec. Sec.
380.105(b), 380.109(a), 380.301, 380.303, 391.53, and the appendix to
part 380. The definition of a qualified LCV driver-instructor now
includes a distinction between (1) classroom instructors and (2) skills
instructors. Motor carriers may use an individual who does not possess
a CDL, a doubles/triples endorsement, or recent CMV driving experience
to instruct or test LCV drivers in knowledge and skills that do not
require the actual operation of an LCV or one of its components.
However, only a skills instructor may train or test driver-candidates
in those skills requiring the operation of an LCV or one of its
components.
Driver Testing
UPS seeks additional guidance and clarification from FMCSA on
proposed requirements for testing methods, proficiency determinations,
and automatic test failure in order to determine if its Driver Training
School meets the standards contemplated by FMCSA regarding these
driver-testing provisions.
ATA directs its comment to Sec. 380.109(a) in the NPRM, which
discusses the administration of driver-student knowledge and skills
tests. This paragraph would require a qualified LCV driver-instructor
to administer
knowledge and skills tests to driver-students. ATA notes that knowledge
tests could be administered by almost anyone since there is no need for
interaction between the driver-student and the instructor. Skills tests
are generally taken on private property on a ``closed course.''
Therefore, a qualified LCV driver-instructor would not be needed. ATA
strongly suggests that FMCSA remove the requirement that a ``qualified
driver-instructor'' administer the knowledge and skills tests to
driver-students and replace the term ``a qualified driver-instructor''
with the term ``an authorized motor carrier or training institution
employee.''
FMCSA Response: Motor carriers needing guidance for testing methods
and proficiency determinations are referred to the ``Examiner's Manual
for Commercial Driver's License Tests.'' You may obtain a copy of the
document from the American Association of Motor Vehicle Administrators
(AAMVA), 4300 Wilson Boulevard, Suite 400, Arlington, Virginia 22203.
Automatic test failure determinations are made at the sole discretion
of the qualified LCV driver-instructor.
Today's rule retains the requirement that only qualified LCV
driver-instructors administer knowledge and skills tests. We anticipate
that a number of small carriers will conduct in-house training to meet
the rule's provisions. As most such training programs will be small,
allowing test administration by persons other than qualified driver-
instructors could open the door to driver-trainees administering tests
to one another. Under the rule, a qualified LCV classroom instructor
may administer knowledge tests (as well as skills tests not involving
actual operation of an LCV or one of its components), while only a
qualified skills instructor may administer skills tests based on actual
operation of an LCV. These standards protect the integrity of knowledge
and skills testing and increase assurances that only qualified LCV
driver-candidates will receive certification.
Merging the LCV Driver-Training Program With the Commercial Driver's
License Program
The National Private Truck Council, Inc. (NPTC) supports the
additional training requirements for LCV drivers and the general
categories of instruction outlined in Sec. 380.201(a). However, NPTC
advocates incorporating the four general LCV training areas into the
CDL testing program rather than creating separate training requirements
with which a motor carrier must comply. NPTC believes integrating the
LCV training areas into the CDL testing program would assist a motor
carrier in attempting to demonstrate the adequacy of driver training in
court cases for crash-related litigation involving its drivers. In
addition the driver's training certification, like the CDL, would
follow the driver from carrier to carrier.
FMCSA Response: LCVs are allowed to operate in fewer than half the
States, and relatively small numbers of CDL drivers are covered under
the LCV training requirements. FMCSA believes that requiring the State
to administer, and enforce at roadside inspections, the LCV training
requirements would add an unnecessary complication to the CDL program.
FMCSA believes the Driver-Training Certificate is sufficient
documentation that a driver has met the LCV training requirement.
Compliance Enforcement
CVSA believes that if an LCV operator is required to obtain
additional training, this should be reflected on the CDL. CVSA is
concerned about the lack of information provided for the roadside
officer, since an additional endorsement will not be added to the CDL.
The officer at roadside will not have access to any of the information
concerning the LCV training, thus making this requirement unenforceable
during a safety inspection. Therefore, any noncompliance will be
discovered only through auditing the recordkeeping requirements for
drivers and motor carriers, and not during a driver/vehicle safety
inspection.
CVSA also questions why the proposed regulation is located in part
380 rather than parts 383 and 391, where other driver-related
regulations are found. CVSA believes codifying this regulation in
another part adds confusion with regard to compliance, both for the
enforcement community and for the industry. CVSA recommends adding the
proposed regulations to part 383 since they are applicable to CDL
drivers.
FMCSA Response: By placing the LCV driver training and related
requirements in part 380, FMCSA is emphasizing that these requirements
are a training responsibility and that compliance would be checked at
the carrier's place of business during a compliance review. Because the
requirement is not a driver licensing issue to be administered by the
State licensing agency, enforcement officials will not check for
compliance at roadside. (Roadside enforcement officials may, however,
check an LCV driver's CDL to verify the presence of a doubles/triples
endorsement.)
Appendix--Knowledge and Skills Training (Appendix to Part 380)
ATA comments that many of the knowledge and skills requirements are
already required for obtaining a CDL, and would therefore simply be
repeated during LCV training. Like a postgraduate course, the training
should build upon knowledge already acquired, not repeat it.
Additionally, ATA strongly suggests that FMCSA eliminate the
requirements already specified in part 383, subpart G, which would
include units 1.2, 1.3, 1.4, and 2.1.
ATA also remarks that some requirements proposed in the appendix to
part 380 would be imposed on LCV drivers, but not on other CDL holders,
even though the situations addressed are not unique to LCVs. ATA states
that security issues (Unit 3.5) are not unique to LCV drivers and asks
why FMCSA finds it necessary to propose this requirement for LCV
drivers when no other CDL holder is required to have this instruction.
Also, ATA states that the proposed maintenance and trouble-shooting
requirements in Unit 4.3 go beyond those currently required for other
CDL holders. ATA does not understand why FMCSA believes that only LCV
drivers should have these skills. Furthermore, some motor carriers
prohibit LCV driver-employees from performing maintenance or emergency
repairs to their complex and high-technology vehicles. Therefore, ATA
also suggests that units 3.5 and 4.3 be eliminated.
FMCSA Response: Although many of the knowledge and skills topics
covered in the LCV training program may be similar to those in the CDL
Licensing Test, the licensing test measures general knowledge and
familiarity with best practices. The LCV training program is intended
to cover topics much more comprehensively and tailor the instruction to
the unique characteristics of an LCV. The proficiency development unit
will allow the driver to apply what is learned in class and to perfect
skills under the supervision of a qualified instructor.
In response to ATA's request, FMCSA has eliminated Unit 2.1--
Inspection, because these skills are adequately covered under Unit
4.3--Maintenance and Trouble-shooting. Unit 3.5--Security has been
revised to refer to Federal and State security requirements including
those of the Transportation Security Administration and the Research
and Special Programs Administration. The agency has also revised the
Unit 4.3 description to include knowledge of certain maintenance
functions and how to
communicate vehicle malfunctions. The rule does not compel a motor
carrier to allow an LCV driver to perform maintenance, but the agency
believes it would be beneficial for LCV drivers to have basic
maintenance and trouble-shooting skills. In some circumstances, it may
be necessary to make temporary repairs that would allow the driver to
move the vehicle to a safer location before permanent repairs are made.
Comments on the Cost-Benefit Analysis
ATA states that FMCSA inadvertently omitted ``the opportunity cost
to the motor carrier.'' A few ATA members have furnished cost figures
for their LCV operations. Using these figures, ATA estimates that the
annual cost to motor carriers of compliance with the rule ``would be
$4,995,650 while the 10-year cost would be $49,956,500.'' Therefore,
ATA estimates that the 10-year cost ``would exceed the 10-year benefits
by $25,556,500 when you consider a 10 percent crash rate reduction; for
a five percent accident rate reduction, costs would exceed benefits by
$12,778,250.''
In addition, ATA believes that ``because LCVs have such an
exemplary safety record, FMCSA would be hard-pressed to develop a
prescriptive training requirement that would pass a cost-benefit
analysis. ATA, therefore, seriously questions the need for mandatory
LCV training.'' Recognizing, however, that the agency is under
Congressional direction to develop an LCV training requirement, ``ATA
encourages the agency to develop a training requirement that is
performance-based, with at-fault crash rates as the measure of
performance for motor carriers.''
FMCSA Response: ATA is correct that FMCSA should have explicitly
included the opportunity cost to motor carriers of requiring some of
their drivers to undergo training. FMCSA implicitly recognized this
cost by including drivers' wages in its NPRM estimate of the cost of
LCV training, but did not include the profits motor carriers would
forgo. We have added these costs to the regulatory evaluation for
today's rule. However, in the above-quoted calculations based on
figures provided by specific carriers, ATA overestimates the cost of
compliance with the LCV training requirements by including motor
carriers' entire LCV operating costs. Although carriers will forfeit
some revenue as a result of LCV driver training, those losses will be
partly offset by reduced costs: Motor carriers will not have hourly
operating costs (e.g., fuel, wear and tear, tires) for drivers being
trained. See the regulatory evaluation for a detailed comparison of
costs and benefits.
Comments on the Federalism Assessment
ATA asks why FMCSA did not include an implementation date for State
adoption of the proposed rule. According to ATA, 22 States allow the
operation of LCVs within their borders, and many of those States have
driver and vehicle requirements for LCV operations. Because FMCSA
asserts that nothing in the NPRM preempts any State law or regulation,
motor carriers and drivers that operate LCVs could be required to
comply with two sets of training requirements. This would be confusing
to the regulated motor carriers and would be considered
counterproductive. ATA argues that the trucking industry needs a
standardized rule that applies nationwide, and recommends that FMCSA
review its Federalism assessment, revise it, and include an
implementation date for State adoption.
FMCSA Response: Under the MCSAP, States have up to 3 years to adopt
regulations compatible with the Federal Motor Carrier Safety
Regulations [49 CFR 350.331(d)]. In any case, a State with special LCV
requirements must continue to enforce them pursuant to the ISTEA freeze
on the length and weight of LCVs and long doubles and triples [49
U.S.C. 31112(d)(1) and 23 U.S.C. 127(d)(1)(B), implemented by 23 CFR
658.21]. Failure to do so would force FHWA, our sister agency within
DOT, to withhold some of that State's Federal-aid highway funds or to
take injunctive action against the State in Federal court. For both
these reasons, it would be inappropriate to preempt current State
regulations.
Rulemaking Analyses and Notices
Executive Order 12866 (Regulatory Planning and Review) and DOT
Regulatory Policies and Procedures
FMCSA has determined that this action is a significant regulatory
action within the meaning of Executive Order 12866, and is significant
within the meaning of the Department of Transportation's regulatory
policies and procedures (DOT Order 2100.5 dated May 22, 1980; 44 FR
11034, February 26, 1979) because of significant public interest in the
issues relating to CMV safety and training of certain CMV drivers. The
Office of Management and Budget has completed its review of this rule
under Executive Order 12866.
Regulatory Evaluation
Following is a summary of the regulatory evaluation. The complete
evaluation has been placed in the docket.
Approximately 35,000 drivers currently operate LCVs; most are
expected to be grandfathered. Approximately 1,200 LCV drivers are
estimated to require training annually. ANPRM docket comments and
conversation with industry representatives and analysts suggest that
LCV drivers are currently obtaining about half the amount of training
we estimate would be needed to cover the topics outlined in this rule,
approximately 50 hours. The net cost of training (including drivers'
wages) is $45.50 an hour. This results in a 10-year cost of
approximately $29 million.
Precisely quantifying the benefits of this rule is difficult.
Congress clearly assumed that increased training reduces crash rates,
and many analysts agree with this position. However, quantitative data
examining the relationship between training and crash rates is not
plentiful, and those studies we have located have not found a strong
and consistent relationship. Therefore, we performed sensitivity
analysis, estimating the benefits from a range of reductions in
drivers' crash rates for drivers who have received training. Net
benefits ranged from -$12 million for a 5 percent reduction in the
crash rate to +40 million for a 20 percent reduction.
Table 1 presents the results for a number of possible deterrence
levels.
Table 1.--Benefit Cost Ratio With Different Crash Rate Reductions
------------------------------------------------------------------------
Crash reduction 5% 10% 15% 20%
------------------------------------------------------------------------
B/C Ratio........................... 0.6 1.2 1.8 2.4
------------------------------------------------------------------------
Table 2 shows costs, benefits, and the number of crashes and
drivers that would be affected by these proposals, with an assumed 10
percent reduction in crashes.
Table 2.--Summary Results With 10% Crash Rate Reductions
[millions of dollars]
--------------------------------------------------------------------------------------------------------------------------------------------------------
Trained annually 10-year costs 10-year benefits Net benefits B/C ratio Crashes prevented
--------------------------------------------------------------------------------------------------------------------------------------------------------
1,172............................................... $28.8 $34.4 $5.6 1.2 315
--------------------------------------------------------------------------------------------------------------------------------------------------------
This analysis assumes that, under the rule, prospective LCV drivers
will obtain an additional 50 hours of training. This is a conservative
estimate, in that it is on the high end of the range of likely training
time. Nonetheless, because of uncertainty over how many hours of
training will be needed, we performed sensitivity analysis for
different assumed hours of training. As expected, the sensitivity
analysis shows that net benefits move in the opposite direction of the
number of hours.
All costs and benefits are over a 10-year period, and are
discounted at a 7 percent rate.
Regulatory Flexibility Act
The Regulatory Flexibility Act of 1980 (5 U.S.C. 601-612), as
amended, requires Federal agencies to ``* * *endeavor, consistent with
the objectives of the rule and of applicable statutes, to fit
regulatory and informational requirements to the scale of the
businesses, organizations, and governmental jurisdictions subject to
regulation.'' Accordingly, DOT policy requires an analysis of the
impact of all regulations (or proposals) on small entities, and
mandates that agencies shall strive to lessen any adverse effects on
these businesses. The following sections contain the FMCSA regulatory
flexibility analysis.
Need and Objective for the Rule
This action is being promulgated in response to Congressional
direction. Specifically, Sec. 4007(b) of ISTEA directed the Secretary
to promulgate regulations requiring training for LCV drivers. Congress
mandated this action because of concern over the number of LCV crashes.
The objective of this rule is to reduce the number of LCV crashes
through better training of LCV drivers.
Significant Issues Raised in Response to IRFA
Commenters to the NPRM docket did not raise any significant issues
concerning the Initial Regulatory Flexibility Analysis. None of the
eight commenters addressed any small business concerns.
Number of Small Entities to Which the Action Will Apply
This action will apply to all small entities regulated by FMCSA
that own or operate LCVs. Using the number of drivers as a proxy for
size, the majority of carriers can reasonably be described as small. As
of April 2002, there were 610,000 motor carriers on the FMCSA Motor
Carrier Management Information System (MCMIS) census file. Of the
500,000 carriers for which we have driver data, 435,000 (87 percent)
have six or fewer drivers. Assuming that 87 percent of the 110,000
carriers with no driver information are also small, the total number of
carriers with six or fewer drivers would exceed half a million.
Reporting, Recordkeeping, and Other Compliance Requirements
This action will impose a very modest burden on small entities,
since it largely regulates the actions of drivers rather than motor
carriers. Nonetheless, this action does impose some recordkeeping
requirements on motor carriers. The primary carrier requirement would
be to verify drivers' eligibility before allowing them to operate an
LCV. In addition, carriers must maintain a copy of the required driver-
training certificate in each driver qualification (DQ) file. Carriers
are currently required to maintain a DQ file for each driver, as
outlined in part 391 of the Federal Motor Carrier Safety Regulations.
No special skills are required to verify eligibility to operate an LCV
or to place a driver-training certificate in a DQ file.
Agency Steps To Minimize Impacts on Small Entities
As discussed above, while this rule will affect a significant
number of small entities, the impact on any individual small carrier
will be minimal. Therefore, FMCSA certifies that this regulation will
not have a significant impact on the small businesses subject to
today's final rule.
Executive Order 13132 (Federalism)
This action has been analyzed in accordance with the principles and
criteria contained in Executive Order 13132. It has been determined
that this rulemaking does not have a substantial direct effect on
States, nor would it limit the policy-making discretion of the States.
Nothing in this document preempts any State law or regulation.
Executive Order 12372 (Intergovernmental Review)
Catalog of Federal Domestic Assistance Program Number 20.217, Motor
Carrier Safety. The regulations implementing Executive Order 12372
regarding intergovernmental consultation on Federal programs and
activities do not apply to this rule.
Paperwork Reduction Act
Under the Paperwork Reduction Act of 1995 (PRA) (49 U.S.C. 3501-
3520), Federal agencies must obtain approval from the Office of
Management and Budget (OMB) for each collection of information they
conduct, sponsor, or require through regulations. FMCSA has determined
that this final rule creates a new collection of information requiring
OMB's approval. This PRA section addresses the information collection
burden for certifying new LCV drivers and current, non-grandfathered
LCV drivers; the burden associated with grandfathering those current
LCV drivers who are eligible for certification; and the burden
associated with certifying that driver-instructors satisfy the
qualification requirements of Sec. 380.301.
FMCSA estimates that 35,000 drivers currently operate LCVs. Ninety-
five percent of these drivers (or 33,250 LCV drivers) are expected to
be eligible to be grandfathered during the first year after the rule
becomes effective. The agency also estimates that approximately 1,200
new LCV drivers would require training each year, with an additional
1,750 non-grandfathered LCV drivers (or 5 percent of LCV drivers
currently operating) requiring training during the first year. In
addition, there would be a burden to the motor carrier or other
training entity to complete, photocopy, and file the training
certification form for LCV operation. FMCSA estimates that 10
minutes would be required for this paperwork activity, resulting in a
first-year information collection burden of 491.7 hours, or rounded to
the nearest tenth, 492 burden hours [1,200 new LCV drivers + 1,750 LCV
drivers x 10 minutes per motor carrier/training entity, divided by 60
minutes = 492 hours] and an annual information collection burden in
subsequent years of 200 hours [1,200 LCV drivers x 10 minutes divided
by 60 minutes = 200 hours].
For grandfathering 33,250 LCV drivers, there would be a one-time,
one-year-only information collection burden of 16,625 hours, since LCV
drivers can be grandfathered only during the first year after the rule
becomes effective. There are two parts to the burden for grandfathered
drivers: (1) the burden for the driver to collect and provide the
information to the motor carrier, and (2) the burden for the motor
carrier to review the documents and to complete, duplicate, and file
the certification form. FMCSA estimates that it would take
approximately 15 minutes for the driver to collect the necessary
information and provide the documentation to the motor carrier, and 15
minutes for the motor carrier to review the information, complete the
certification, and duplicate and file the document. Therefore, the
burden associated with grandfathering the 33,250 LCV drivers would be
16,625 burden hours [(33,250 LCV drivers x 15 minutes per driver,
divided by 60 minutes = 8,312.5 hours) + (33,250 LCV drivers x 15
minutes per motor carrier, divided by 60 minutes = 8,312.5 hours) =
16,625 hours].
FMCSA estimates that the burden associated with driver-instructor
certification would be 70 burden hours during the first year after the
rule becomes effective and 3 annual burden hours thereafter. The agency
based these estimates on the following.
We estimate that during the first year, training 1,200 new LCV
drivers and 1,750 non-grandfathered LCV drivers would require 148
driving-instructors teaching four classes of five students each [2,950
drivers, divided by five students per class, divided by four classes
per year = 147.5 LCV driving instructors, or rounded to the nearest
tenth, 148 burden hours]. Approximately one-third (or 49) of the
instructors would be classroom instructors and two-thirds (99) would be
skills instructors. Instructors would provide to the training school
(or to the training entity of the motor carrier) documentation
certifying their qualifications under Sec. 380.301.
FMCSA estimates that a classroom instructor would take 10 minutes
to collect this instructor documentation and provide it to the
certifying training school or motor carrier, while the skills
instructor would require 15 minutes to collect and provide this
documentation. The training school or motor carrier would require an
estimated 15 minutes to review the documentation, complete the
instructor certification, and duplicate and file the document.
Therefore, the first-year burden associated with instructor
certification would be 70 burden hours [(49 classroom instructors x 10
minutes per instructor = 490 minutes, divided by 60 minutes = 8.1
hours, or rounded to the nearest tenth, 8 burden hours) + (99 skills
instructors x 15 minutes per instructor = 1,485 minutes, divided by 60
minutes = 24.75 hours, or rounded to the nearest tenth, 25 burden
hours) + (148 total instructors x 15 minutes' administrative burden per
instructor certification = 2,220 minutes, divided by 60 minutes = 37
burden hours) = 70 hours].
As the specialized nature of LCV training correlates with low
instructor turnover, FMCSA estimates an annual turnover rate of 10
percent. Based on an estimated annual instructor pool of 60 instructors
to train 1,200 new LCV drivers (with each instructor teaching four
classes of five students), six new instructors (two classroom
instructors and four skills instructors) would need to be certified
each year after the first year. Therefore, the estimated subsequent-
year annual burden associated with instructor certification would be
2.8 burden hours, or rounded to the nearest tenth, 3 burden hours [(two
classroom instructors x 10 minutes = 20 minutes) + (four skills
instructors x 15 minutes = 60 minutes) + (six new instructors x 15
minutes' administrative burden per instructor certification = 90
minutes) = 170 minutes/60 minutes = 3 hours].
Thus, the total first-year burden associated with this rule, when
promulgated, is estimated to be 17,187 burden hours [492 + 16,625 + 70
= 17, 187 hours]. The information collection burden for subsequent
years would drop to 203 burden hours [200 + 3 = 203 hours].
------------------------------------------------------------------------
Activity--Burden to complete and process
the annual Certification form for LCV First-year Burden hours
drivers and to certify driver- burden hours for subsequent
instructors years
------------------------------------------------------------------------
First-year training 492 ..............
of 1,200 new LCV drivers + 1,750 non-
grandfathered LCV drivers..............
First-year instructor 70 ..............
certification for 1,200 new LCV drivers
+ 1,750 non-grandfathered LCV drivers..
Training & instructor .............. 203
certification in subsequent years--
1,200 new LCV drivers annually.........
Grandfathering 33,250 LCV drivers 16,625 ..............
currently operating in the first year..
-----------------
Total............................... 17,187 203
------------------------------------------------------------------------
OMB Control Number: 2126-(new).
Title: Training Certification for Drivers of Longer Combination
Vehicles.
Respondents: 36,348 during the first year; 1,260 in subsequent
years.
Estimated Annual Hour Burden for the Information Collection: Year 1
= 17,187 hours; subsequent years = 203 hours.
Interested parties are invited to send comments regarding any
aspect of these information collection requirements, including but not
limited to: (1) Whether the collection of information is necessary for
the performance of the functions of FMCSA, including whether the
information has practical utility; (2) the accuracy of the estimated
burden; (3) ways to enhance the quality, utility, and clarity of the
collected information; and (4) ways to minimize the collection burden
without reducing the quality of the information collected.
You may submit any additional comments on the information
collection burden addressed by this final rule to the Office of
Management and Budget. The OMB must receive your comments by April 29,
2004. You must mail or hand deliver your comments to: Attention: Desk
Officer for the Department of Transportation, Docket Library, Office of
Information and Regulatory Affairs, Office of Management and Budget,
Room 10102, 725 17th Street, NW., Washington, DC 20503.
National Environmental Policy Act
The agency analyzed this final rule for the purpose of the National
Environmental Policy Act of 1969
(NEPA) (42 U.S.C. 4321 et seq.) and determined under our environmental
procedures Order 5610.1 (published in the March 1, 2004 Federal
Register at 69 FR 9680 with an effective date of March 30, 2004), that
this action is categorically excluded (CE) under Appendix 2, paragraph
6.d of the Order from further environmental documentation. That CE
relates to establishing regulations and actions taken pursuant to these
regulations that concern the training, qualifying, licensing,
certifying, and managing of personnel. In addition, the agency believes
that the action includes no extraordinary circumstances that will have
any effect on the quality of the environment. Thus, the action does not
require an environmental assessment or an environmental impact
statement.
We have also analyzed this rule under the Clean Air Act, as amended
(CAA) section 176(c), (42 U.S.C. 7401 et seq.) and implementing
regulations promulgated by the Environmental Protection Agency.
Approval of this action is exempt from the CAA's General Conformity
requirement since it involves policy development and civil enforcement
activities, such as, investigations, inspections, examinations, and the
training of law enforcement personnel. See 40 CFR 93.153(c)(2). It will
not result in any emissions increase nor will it have any potential to
result in emissions that are above the general conformity rule's de
minimis emission threshold levels. Moreover, it is reasonably
foreseeable that the rule change will not increase total CMV mileage,
change the routing of CMVs, how CMVs operate or the CMV fleet-mix of
motor carriers. This action merely establishes standards for minimum
training requirements for operators of LCVs and requirements for the
instructors who train them.
Executive Order 13211 (Energy Supply, Distribution, or Use)
The agency has analyzed this action under Executive Order 13211,
Actions Concerning Regulations That Significantly Affect Energy Supply,
Distribution, or Use. This action is not a significant energy action
within the meaning of Section 4(b) of the Executive Order because it is
not likely to have a significant adverse effect on the supply,
distribution, or use of energy. This rule establishes training
requirements for operators of LCVs and sets forth requirements for
trainers of such operators. This action has no effect on the supply or
use of energy, nor do we believe it will cause a shortage of drivers
qualified to distribute energy, such as gasoline, fuel oil, or other
fuels.
Unfunded Mandates Reform Act of 1995
This rule does not impose a Federal mandate resulting in the
expenditure by State, local, or tribal governments, in the aggregate,
or by the private sector, of $100 million or more in any one year (2
U.S.C. 1531 et seq.). Under this rule, there are no costs to States,
and costs to the private sector should be minimal. This action
establishes minimum training standards for operators of LCVs.
Although not required to do so under the FMCSRs, motor carriers
routinely provide similar training to their drivers who operate LCVs.
The rule does not stipulate that motor carriers must provide such
training, but requires them to use only those drivers and driver-
instructors who have met the standards established by the rule.
Executive Order 12630 (Taking of Private Property)
This rule will not effect a taking of private property or otherwise
have taking implications under Executive Order 12630, Governmental
Actions and Interference with Constitutional Protected Property Rights.
Executive Order 12988 (Civil Justice Reform)
This action meets applicable standards in sections 3(a) and 3(b)(2)
of E.0. 12988, Civil Justice Reform, to minimize litigation, eliminate
ambiguity, and reduce burden.
Executive Order 13045 (Protection of Children)
The agency has analyzed this action under Executive Order 13045,
Protection of Children from Environmental Health Risks and Safety
Risks. This rule sets forth training requirements for LCV drivers and
sets standards for instructors of such drivers. Therefore, FMCSA
certifies that this action is not an economically significant rule and
does not concern an environmental risk to health or safety that may
disproportionately affect children.
List of Subjects
49 CFR Part 380
Driver training, instructor requirements.
49 CFR Part 391
Highways and roads, Motor vehicle safety.
0
For the reasons stated in the preamble, the Federal Motor Carrier
Safety Administration amends 49 CFR chapter III as set forth below:
0
1. Chapter III is amended by adding part 380 to read as follows:
PART 380--SPECIAL TRAINING REQUIREMENTS
Subpart A--Longer Combination Vehicle (LCV) Driver-Training and Driver-
Instructor Requirements--General
Sec.
380.101 Purpose and scope.
380.103 Applicability.
380.105 Definitions.
380.107 General requirements.
380.109 Driver testing.
380.111 Substitute for driver training.
380.113 Employer responsibilities.
Subpart B--LCV Driver-Training Program
380.201 General requirements.
380.203 LCV Doubles.
380.205 LCV Triples.
Subpart C--LCV Driver-Instructor Requirements
380.301 General requirements.
380.303 Substitute for instructor requirements.
380.305 Employer responsibilities.
Subpart D--Driver-Training Certification
380.401 Certification document.
Appendix to Part 380--LCV Driver Training Programs, Required
Knowledge and Skills
Authority: 49 U.S.C. 31136, 31307, and 31502; Sec. 4007(b) of
Pub. L. 102-240 (105 Stat. 2152); 49 CFR 1.73.
Subpart A--Longer Combination Vehicle (LCV) Driver-Training and
Driver-Instructor Requirements--General
Sec. 380.101 Purpose and scope.
(a) Purpose. The purpose of this part is to establish minimum
requirements for operators of longer combination vehicles (LCVs) and
LCV driver-instructors.
(b) Scope. This part establishes:
(1) Minimum training requirements for operators of LCVs;
(2) Minimum qualification requirements for LCV driver-instructors;
and
(3) Procedures for determining compliance with this part by
operators, instructors, training institutions, and employers.
Sec. 380.103 Applicability.
The rules in this part apply to all operators of LCVs in interstate
commerce, employers of such persons, and LCV driver-instructors.
Sec. 380.105 Definitions.
(a) The definitions in part 383 of this subchapter apply to this
part, except where otherwise specifically noted.
(b) As used in this part:
Classroom instructor means a qualified LCV driver-instructor who
provides knowledge instruction that
does not involve the actual operation of a longer combination vehicle
or its components. Instruction may take place in a parking lot, garage,
or any other facility suitable for instruction.
Longer combination vehicle (LCV) means any combination of a truck-
tractor and two or more trailers or semi-trailers, which operate on the
National System of Interstate and Defense Highways with a gross vehicle
weight (GVW) greater than 36,288 kilograms (80,000 pounds).
LCV Double means an LCV consisting of a truck-tractor in
combination with two trailers and/or semi-trailers.
LCV Triple means an LCV consisting of a truck-tractor in
combination with three trailers and/or semi-trailers.
Qualified LCV driver-instructor means an instructor meeting the
requirements contained in subpart C of this part. There are two types
of qualified LCV driver-instructors: (1) classroom instructor and (2)
skills instructor.
Skills instructor means a qualified LCV driver-instructor who
provides behind-the-wheel instruction involving the actual operation of
a longer combination vehicle or its components outside a classroom.
Training institution means any technical or vocational school
accredited by an accrediting institution recognized by the U.S.
Department of Education. A motor carrier's training program for its
drivers or an entity that exclusively offers services to a single motor
carrier is not a training institution.
Sec. 380.107 General requirements.
(a) Except as provided in Sec. 380.111, a driver who wishes to
operate an LCV shall first take and successfully complete an LCV
driver-training program that provides the knowledge and skills
necessary to operate an LCV. The specific types of knowledge and skills
that a training program shall include are outlined in the appendix to
this part.
(b) Before a person receives training:
(1) That person shall present evidence to the LCV driver-instructor
showing that he/she meets the general requirements set forth in subpart
B of this part for the specific type of LCV training to be taken.
(2) The LCV driver-instructor shall verify that each trainee
applicant meets the general requirements for the specific type of LCV
training to be taken.
(c) Upon successful completion of the training requirement, the
driver-student shall be issued an LCV Driver Training Certificate by a
certifying official of the training entity in accordance with the
requirements specified in subpart D of this part.
Sec. 380.109 Driver testing.
(a) Testing methods. The driver-student must pass knowledge and
skills tests in accordance with the following requirements, to
determine whether a driver-student has successfully completed an LCV
driver-training program as specified in subpart B of this part. The
written knowledge test may be administered by any qualified driver-
instructor. The skills tests, based on actual operation of an LCV, must
be administered by a qualified LCV skills instructor.
(1) All tests shall be constructed to determine if the driver-
student possesses the required knowledge and skills set forth in the
appendix to this part for the specific type of LCV training program
being taught.
(2) Instructors shall develop their own tests for the specific type
of LCV-training program being taught, but those tests must be at least
as stringent as the requirements set forth in paragraph (b) of this
section.
(3) LCV driver-instructors shall establish specific methods for
scoring the knowledge and skills tests.
(4) Passing scores must meet the requirements of paragraph (b) of
this section.
(5) Knowledge and skills tests shall be based upon the information
taught in the LCV training programs as set forth in the appendix to
this part.
(6) Each knowledge test shall address the training provided during
both theoretical and behind-the-wheel instruction, and include at least
one question from each of the units listed in the table to the appendix
to this part, for the specific type of LCV training program being
taught.
(7) Each skills test shall include all the maneuvers and operations
practiced during the Proficiency Development unit of instruction
(behind-the-wheel instruction), as described in the appendix to this
part, for the specific type of LCV training program being taught.
(b) Proficiency determinations. The driver-student must meet the
following conditions to be certified as an LCV driver:
(1) Answer correctly at least 80 percent of the questions on each
knowledge test; and
(2) Demonstrate that he/she can successfully perform all of the
skills addressed in paragraph (a)(7) of this section.
(c) Automatic test failure. Failure to obey traffic laws or
involvement in a preventable crash during the skills portion of the
test will result in automatic failure. Automatic test failure
determinations are made at the sole discretion of the qualified LCV
driver-instructor.
(d) Guidance for testing methods and proficiency determinations.
Motor carriers should refer to the Examiner's Manual for Commercial
Driver's License Tests for help in developing testing methods and
making proficiency determinations. You may obtain a copy of this
document by contacting the American Association of Motor Vehicle
Administrators (AAMVA), 4300 Wilson Boulevard, Suite 400, Arlington,
Virginia 22203.
Sec. 380.111 Substitute for driver training.
(a) Grandfather clause. The LCV driver-training requirements
specified in subpart B of this part do not apply to an individual who
meets the conditions set forth in paragraphs (b), (c), and (d) of this
section. A motor carrier must ensure that an individual claiming
eligibility to operate an LCV on the basis of this section meets these
conditions before allowing him/her to operate an LCV.
(b) An individual must certify that, during the 2-year period
immediately preceding the date of application for a Certificate of
Grandfathering, he/she had:
(1) A valid Class A CDL with a ``double/triple trailers''
endorsement;
(2) No more than one driver's license;
(3) No suspension, revocation, or cancellation of his/her CDL;
(4) No convictions for a major offense while operating a CMV as
defined in Sec. 383.51(b) of this subchapter;
(5) No convictions for a railroad-highway grade crossing offense
while operating a CMV as defined in Sec. 383.51(d) of this subchapter;
(6) No convictions for violating an out-of-service order as defined
in Sec. 383.51(e) of this subchapter;
(7) No more than one conviction for a serious traffic violation, as
defined in Sec. 383.5 of this subchapter, while operating a CMV; and
(8) No convictions for a violation of State or local law relating
to motor vehicle traffic control arising in connection with any traffic
crash while operating a CMV.
(c) An individual must certify and provide evidence that he/she:
(1) Is regularly employed in a job requiring the operation of a CMV
that requires a CDL with a double/triple trailers endorsement; and
(2) Has operated, during the 2 years immediately preceding the date
of application for a Certificate of Grandfathering, vehicles
representative
of the type of LCV that he/she seeks to continue operating.
(d) A motor carrier must issue a Certificate of Grandfathering to a
person who meets the requirements of this section and must maintain a
copy of the certificate in the individual's Driver Qualification file.
[GRAPHIC] [TIFF OMITTED] TR30MR04.000
(e) An applicant may be grandfathered under this section only
during the year following June 1, 2004.
Sec. 380.113 Employer responsibilities.
(a) No motor carrier shall:
(1) Allow, require, permit or authorize an individual to operate an
LCV unless he/she meets the requirements in Sec. Sec. 380.203 or
380.205 and has been issued the LCV driver-training certificate
described in Sec. 380.401. This provision does not apply to
individuals who are eligible for the substitute for driver training
provision in Sec. 380.111.
(2) Allow, require, permit, or authorize an individual to operate
an LCV which the LCV driver-training certificate, CDL, and CDL
endorsement(s) do not authorize the driver to operate. This provision
applies to individuals employed by or under contract to the motor
carrier.
(b) A motor carrier that employs or has under contract LCV drivers
shall provide evidence of the certifications required by Sec. 380.401
or Sec. 380.111 of this part when requested by an authorized FMCSA,
State, or local official in the course of a compliance review.
Subpart B--LCV Driver-Training Program
Sec. 380.201 General requirements.
(a) The LCV Driver-Training Program that is described in the
appendix to this part requires training using an LCV Double or LCV
Triple and must include the following general categories of
instruction:
(1) Orientation;
(2) Basic operation;
(3) Safe operating practices;
(4) Advanced operations; and
(5) Nondriving activities.
(b) The LCV Driver-Training Program must include the minimum topics
of training set forth in the appendix to this part and behind-the-wheel
instruction that is designed to provide an opportunity to develop the
skills outlined under the Proficiency Development unit of the training
program.
Sec. 380.203 LCV Doubles.
(a) To qualify for the training necessary to operate an LCV Double,
a driver-student shall, during the 6 months immediately preceding
application for training, have:
(1) A valid Class A CDL with a double/triple trailer endorsement;
(2) Driving experience in a Group A vehicle as described in Sec.
383.91 of this subchapter. Evidence of driving experience shall be an
employer's written statement that the driver has, for at least 6 months
immediately preceding application, operated a Group A vehicle while
under his/her employ;
(3) No more than one driver's license;
(4) No suspension, revocation, or cancellation of his/her CDL;
(5) No convictions for a major offense, as defined in Sec.
383.51(b) of this subchapter, while operating a CMV;
(6) No convictions for a railroad-highway grade crossing offense,
as defined in Sec. 383.51(d) of this subchapter, while operating a
CMV;
(7) No convictions for violating an out-of-service order as defined
in Sec. 383.51(e) of this subchapter;
(8) No more than one conviction for a serious traffic violation, as
defined in Sec. 383.5 of this subchapter, while operating a CMV; and
(9) No convictions for a violation of State or local law relating
to motor vehicle traffic control arising in connection with any traffic
crash while operating a CMV.
(b) Driver-students meeting the preliminary requirements in
paragraph (a) of this section shall successfully complete a training
program that meets the minimum unit requirements for LCV Doubles as set
forth in the appendix to this part.
(c) Driver-students who successfully complete the Driver Training
Program for LCV Doubles shall be issued a certificate, in accordance
with subpart D of this part, indicating the driver is qualified to
operate an LCV Double.
Sec. 380.205 LCV Triples.
(a) To qualify for the training necessary to operate an LCV Triple,
a driver-student shall, during the 6 months immediately preceding
application for training, have:
(1) A valid Class A CDL with a double/triple trailer endorsement;
(2) Experience operating the vehicle listed under paragraph
(a)(2)(i) or (a)(2)(ii) of this section. Evidence of driving experience
shall be an employer's written statement that the driver has, during
the 6 months immediately preceding application, operated the applicable
vehicle(s):
(i) Group A truck-tractor/semi-trailer combination as described in
Sec. 383.91 of this subchapter; or
(ii) Group A truck-tractor/semi-trailer/trailer combination that
operates at a gross vehicle weight of 80,000 pounds or less;
(3) No more than one driver's license;
(4) No suspension, revocation, or cancellation of his/her CDL;
(5) No convictions for a major offense, as defined in Sec.
383.51(b) of this subchapter, while operating a CMV;
(6) No convictions for a railroad-highway grade crossing offense,
as defined in Sec. 383.51(d) of this subchapter, while operating a
CMV;
(7) No convictions for violating an out-of-service order, as
defined in Sec. 383.51(e) of this subchapter;
(8) No more than one conviction for a serious traffic violation, as
defined in Sec. 383.5 of this subchapter, while operating a CMV; and
(9) No convictions for a violation of State or local law relating
to motor vehicle traffic control arising in connection with any traffic
crash, while operating a CMV.
(b) Driver-students meeting the preliminary requirements in
paragraph (a) of this section shall successfully complete a training
program that meets the minimum unit requirements for LCV Triples as set
forth in the appendix to this part.
(c) Driver-students who successfully complete the Driver Training
Program for LCV Triples shall be issued a certificate, in accordance
with subpart D of this part, indicating the driver is qualified to
operate an LCV Triple.
Subpart C--LCV Driver-Instructor Requirements
Sec. 380.301 General requirements.
There are two types of LCV driver-instructors: Classroom
instructors and Skills instructors. Except as provided in Sec.
380.303, you must meet the conditions under paragraph (a) or paragraph
(b) of this section to qualify as an LCV driver-instructor.
(a) Classroom instructor. To qualify as an LCV Classroom
instructor, a person shall:
(1) Have audited the driver-training course that he/she intends to
instruct.
(2) If employed by a training institution, meet all State
requirements for a vocational instructor.
(b) Skills instructor. To qualify as an LCV skills instructor, a
person shall:
(1) Provide evidence of successful completion of the Driver-
Training Program requirements, as set forth in subpart B of this part,
when requested by employers and/or an authorized FMCSA, State, or local
official in the course of a compliance review. The Driver-Training
Program must be for the operation of CMVs representative of the subject
matter that he/she will teach.
(2) If employed by a training institution, meet all State
requirements for a vocational instructor;
(3) Possess a valid Class A CDL with all endorsements necessary to
operate the CMVs applicable to the subject matter being taught (LCV
Doubles and/or LCV Triples, including any specialized variation
thereof, such as a tank vehicle, that requires an additional
endorsement); and
(4) Have at least 2 years' CMV driving experience in a vehicle
representative of the type of driver training to be provided (LCV
Doubles or LCV Triples).
Sec. 380.303 Substitute for instructor requirements.
(a) Classroom instructor. The requirements specified under Sec.
380.301(a) of this part for a qualified LCV driver-instructor are
waived for a classroom instructor-candidate who has 2 years of recent
satisfactory experience teaching the classroom portion of a program
similar in content to that set forth in the appendix to this part.
(b) Skills instructor. The requirements specified under Sec.
380.301(b) of this part for a qualified LCV driver-instructor are
waived for a skills instructor-candidate who:
(1) Meets the conditions of Sec. 380.111(b);
(2) Has CMV driving experience during the previous 2 years in a
vehicle representative of the type of LCV that is the subject of the
training course to be provided;
(3) Has experience during the previous 2 years in teaching the
operation of the type of LCV that is the subject of the training course
to be provided; and
(4) If employed by a training institution, meets all State
requirements for a vocational instructor.
Sec. 380.305 Employer responsibilities.
(a) No motor carrier shall: (1) Knowingly allow, require, permit or
authorize a driver-instructor in its employ, or under contract to the
motor carrier, to provide LCV driver training unless such person is a
qualified LCV driver-instructor under the requirements of this subpart;
or
(2) Contract with a training institution to provide LCV driver
training unless the institution:
(i) Uses instructors who are qualified LCV driver-instructors under
the requirements of this subpart;
(ii) Is accredited by an accrediting institution recognized by the
U.S. Department of Education;
(iii) Is in compliance with all applicable State training school
requirements; and
(iv) Identifies drivers certified under Sec. 380.401 of this part,
when requested by employers and/or an authorized FMCSA, State, or local
official in the course of a compliance review.
(b) A motor carrier that employs or has under contract qualified
LCV driver-instructors shall provide evidence of the certifications
required by Sec. 380.301 or Sec. 380.303 of this part, when requested
by an authorized FMCSA, State, or local official in the course of a
compliance review.
Subpart D--Driver-Training Certification
Sec. 380.401 Certification document.
(a) A student who successfully completes LCV driver training shall
be
issued a Driver-Training Certificate that is substantially in
accordance with the following form.
[GRAPHIC] [TIFF OMITTED] TR30MR04.001
(b) An LCV driver must provide a copy of the Driver-Training
Certificate to his/her employer to be filed in the Driver Qualification
File.
Appendix to Part 380--LCV Driver Training Programs, Required Knowledge
and Skills
The following table lists topics of instruction required for
drivers of longer combination vehicles pursuant to 49 CFR part 380,
subpart B. The training courses for operators of LCV Doubles and LCV
Triples must be distinct and tailored to address their unique
operating and handling characteristics. Each course must include the
minimum topics of instruction, including behind-the-wheel training
designed to provide an opportunity to develop the skills outlined
under the Proficiency Development unit of the training program. Only
a skills instructor may administer behind-the-wheel training
involving the operation of an LCV or one of its components. A
classroom instructor may administer only instruction that does not
involve the operation of an LCV or one of its components.
Table to the Appendix--Course Topics for LCV Drivers
------------------------------------------------------------------------
------------------------------------------------------------------------
Section 1: Orientation
------------------------------------------------------------------------
1.1............................. LCVs in Trucking
1.2............................. Regulatory Factors
1.3............................. Driver Qualifications
1.4............................. Vehicle Configuration Factors
---------------------------------
Section 2: Basic Operation
------------------------------------------------------------------------
2.1............................. Coupling and Uncoupling
2.2............................. Basic Control and Handling
2.3............................. Basic Maneuvers
2.4............................. Turning, Steering and Tracking
2.5............................. Proficiency Development
---------------------------------
Section 3: Safe Operating Practices
------------------------------------------------------------------------
3.1............................. Interacting with Traffic
3.2............................. Speed and Space Management
3.3............................. Night Operations
3.4............................. Extreme Driving Conditions
3.5............................. Security Issues
3.6............................. Proficiency Development
---------------------------------
Section 4: Advanced Operations
------------------------------------------------------------------------
4.1............................. Hazard Perception
4.2............................. Hazardous Situations
---------------------------------
4.3............................. Maintenance and Troubleshooting
---------------------------------
Section 5: Non-Driving Activities
------------------------------------------------------------------------
5.1............................. Routes and Trip Planning
5.2............................. Cargo and Weight Considerations
------------------------------------------------------------------------
Section 1--Orientation
The units in this section must provide an orientation to the
training curriculum and must cover the role LCVs play within the
motor carrier industry, the factors that affect their operations,
and the role that drivers play in the safe operation of LCVs.
Unit 1.1--LCVs in Trucking. This unit must provide an
introduction to the emergence of LCVs in trucking and must serve as
an orientation to the course content. Emphasis must be placed upon
the role the driver plays in transportation.
Unit 1.2--Regulatory factors. This unit must provide instruction
addressing the Federal, State, and local governmental bodies that
propose, enact, and implement the laws, rules, and regulations that
affect the trucking industry. Emphasis must be placed on those
regulatory factors that affect LCVs, including 23 CFR 658.23 and
Appendix C to part 658.
Unit 1.3--Driver qualifications. This unit must provide
classroom instruction addressing the Federal and State laws, rules,
and regulations that define LCV driver qualifications. It also must
include a discussion on medical examinations, drug and alcohol
tests, certification, and basic health and wellness issues. Emphasis
must be placed upon topics essential to physical and mental health
maintenance, including (1) diet, (2) exercise, (3) avoidance of
alcohol and drug abuse, and caution in the use of prescription and
nonprescription drugs, (4) the adverse effects of driver fatigue,
and (5) effective fatigue countermeasures. Driver-trainees who have
successfully completed the Entry-level training segments at Sec.
380.503(a) and (c) are considered to have satisfied the requirements
of Unit 1.3.
Unit 1.4--Vehicle configuration factors. This unit must provide
classroom instruction addressing the key vehicle components used in
the configuration of longer combination vehicles. It also must
familiarize the driver-trainee with various vehicle combinations, as
well as provide instruction about unique characteristics and factors
associated with LCV configurations.
Section 2--Basic Operation
The units in this section must cover the interaction between the
driver and the vehicle. They must teach driver-trainees how to
couple and uncouple LCVs, ensure the vehicles are in proper
operating condition, and control the motion of LCVs under various
road and traffic conditions.
During the driving exercises at off-highway locations required
by this section, the driver-trainee must first familiarize himself/
herself with basic operating characteristics of an LCV. Utilizing an
LCV, students must be able to perform the skills learned in each
unit to a level of proficiency required to permit safe transition to
on-street driving.
Unit 2.1--Coupling and uncoupling. This unit must provide
instruction addressing the procedures for coupling and uncoupling
LCVs. While vehicle coupling and uncoupling procedures are common to
all truck-tractor/semi-trailer operations, some factors are peculiar
to LCVs. Emphasis must be placed upon preplanning and safe operating
procedures.
Unit 2.2--Basic control and handling. This unit must provide an
introduction to basic vehicular control and handling as it applies
to LCVs. This must include instruction addressing brake performance,
handling characteristics and factors affecting LCV stability while
braking, turning, and cornering. Emphasis must be placed upon safe
operating procedures.
Unit 2.3--Basic maneuvers. This unit must provide instruction
addressing the basic vehicular maneuvers that will be encountered by
LCV drivers. This must include instruction relative to backing, lane
positioning and path selection, merging situations, and parking
LCVs. Emphasis must be placed upon safe operating procedures as they
apply to brake performance and directional stability while
accelerating, braking, merging, cornering, turning, and parking.
Unit 2.4--Turning, steering, and tracking. This unit must
provide instruction addressing turning situations, steering
maneuvers, and the tracking of LCV trailers. This must include
instruction related to trailer sway and off-tracking. Emphasis must
be placed on maintaining directional stability.
Unit 2.5--Proficiency development: basic operations. The purpose
of this unit is to enable driver-students to gain the proficiency in
basic operation needed to safely undertake on-street instruction in
the Safe Operations Practices section of the curriculum.
The activities of this unit must consist of driving exercises
that provide practice for the development of basic control skills
and mastery of basic maneuvers. Driver-students practice skills and
maneuvers learned in the Basic Control and Handling; Basic
Maneuvers; and Turning, Steering and Tracking units. A series of
basic exercises is practiced at off-highway locations until students
develop sufficient proficiency for transition to on-street driving.
Once the driver-student's skills have been measured and found
adequate, the driver-student must be allowed to move to on-the-
street driving.
Nearly all activity in this unit will take place on the driving
range or on streets or roads that have low-density traffic
conditions.
Section 3--Safe Operating Practices
The units in this section must cover the interaction between
student drivers, the vehicle, and the traffic environment. They must
teach driver-students how to apply their basic operating skills in a
way that ensures their safety and that of other road users under
various road, weather, and traffic conditions.
Unit 3.1--Interacting with traffic. This unit must provide
instruction addressing the principles of visual search,
communication, and sharing the road with other traffic. Emphasis
must be placed upon visual search, mirror usage, signaling and/or
positioning the vehicle to communicate, and understanding the
special situations encountered by LCV drivers in various traffic
situations.
Unit 3.2--Speed and space management. This unit must provide
instruction addressing the principles of speed and space management.
Emphasis must be placed upon maintaining safe vehicular speed and
appropriate space surrounding the vehicle under various traffic and
road conditions. Particular attention must be placed upon
understanding the special situations encountered by LCVs in various
traffic situations.
Unit 3.3--Night operations. This unit must provide instruction
addressing the principles of Night Operations. Emphasis must be
placed upon the factors affecting operation of LCVs at night. Night
driving presents specific factors that require special attention on
the part of the driver. Changes in vehicle safety inspection,
vision, communications, speed management, and space management are
needed to deal with the special problems night driving presents.
Unit 3.4--Extreme driving conditions. This unit must provide
instruction addressing the driving of LCVs under extreme driving
conditions. Emphasis must be placed upon the factors affecting the
operation of LCVs in cold, hot, and inclement weather and in the
mountains and desert. Changes in basic driving habits are needed to
deal with the specific problems presented by these extreme driving
conditions.
Unit 3.5--Security issues. This unit must include a discussion
of security requirements imposed by the Department of Homeland
Security, Transportation Security Administration; the U.S.
Department of Transportation, Research and Special Programs
Administration; and any other State or Federal agency with
responsibility for highway or motor carrier security.
Unit 3.6--Proficiency development. This unit must provide
driver-students an opportunity to refine, within the on-street
traffic environment, their vehicle handling skills learned in the
first three sections. Driver-student performance progress must be
closely monitored to determine when the level of proficiency
required for carrying out the basic traffic maneuvers of stopping,
turning, merging, straight driving, curves, lane changing, passing,
driving on hills, driving through traffic restrictions, and parking
has been attained. The driver-student must also be assessed for
regulatory compliance with all traffic laws.
Nearly all activity in this unit will take place on public
roadways in a full range of traffic environments applicable to this
vehicle configuration. This must include urban and rural
uncontrolled roadways, expressways or freeways, under light,
moderate, and heavy traffic conditions. There must be a brief
classroom session to familiarize driver-students with the type of
on-street maneuvers they will perform and how their performance will
be rated.
The instructor must assess the level of skill development of the
driver-student and must increase in difficulty, based upon the level
of skill attained, the types of maneuvers, roadways and traffic
conditions to which the driver-student is exposed.
Section 4--Advanced Operations
The units in this section must introduce higher level skills
that can be acquired only after the more fundamental skills and
knowledge taught in sections two and three have been mastered. They
must teach the perceptual skills necessary to recognize potential
hazards, and must demonstrate the procedures needed to handle an LCV
when faced with a hazard.
The Maintenance and Trouble-shooting Unit must provide
instruction that addresses how to keep the vehicle in safe and
efficient operating condition. The purpose of this unit is to teach
the correct way to perform simple maintenance tasks, and how to
troubleshoot and report those vehicle discrepancies or deficiencies
that must be repaired by a qualified mechanic.
Unit 4.1--Hazard perception. This unit must provide instruction
addressing the principles of recognizing hazards in sufficient time
to reduce the severity of the hazard and neutralize a possible
emergency situation. While hazards are present in all motor vehicle
traffic operations, some are peculiar to LCV operations. Emphasis
must be placed upon hazard recognition, visual search, and response
to possible emergency-producing situations encountered by LCV
drivers in various traffic situations.
Unit 4.2--Hazardous situations. This unit must address dealing
with specific procedures appropriate for LCV emergencies. These must
include evasive steering, emergency braking, off-road recovery,
brake failures, tire blowouts, rearward amplification, hydroplaning,
skidding, jackknifing and the rollover phenomenon. The discussion
must include a review of unsafe acts and the role they play in
producing hazardous situations.
Unit 4.3--Maintenance and trouble-shooting. This unit must
introduce driver-students to the basic servicing and checking
procedures for the various vehicle components and provide knowledge
of conducting preventive maintenance functions, making simple
emergency repairs, and diagnosing and reporting vehicle
malfunctions.
Section 5--Non-Driving Activities
The units in this section must cover activities that are not
directly related to the vehicle itself but must be performed by an
LCV driver. The units in this section must ensure these activities
are performed in a manner that ensures the safety of the driver,
vehicle, cargo, and other road users.
Unit 5.1--Routes and trip planning. This unit must address the
importance of and requirements for planning routes and trips. This
must include classroom discussion of Federal and State requirements
for a number of topics including permits, vehicle size and weight
limitations, designated highways, local access, the reasonable
access rule, staging areas, and access zones.
Unit 5.2--Cargo and weight considerations. This unit must
address the importance of proper cargo documentation, loading,
securing and unloading cargo, weight distribution, load sequencing
and trailer placement. Emphasis must be placed on the importance of
axle weight distribution, as well as on trailer placement and its
effect on vehicle handling.
PART 391--QUALIFICATIONS OF DRIVERS AND LONGER COMBINATION VEHICLE
(LCV) DRIVER INSTRUCTORS
0
2. The authority citation for 49 CFR part 391 is revised to read as
follows:
Authority: 49 U.S.C. 322, 504, 31133, 31136 and 31502; Sec.
4007(b) of Pub. L. 102-240 (105 Stat. 2152); and 49 CFR 1.73.
0
3. Part 391 is amended by revising the title to read as set forth above
and by adding a new Sec. 391.53 to subpart F to read as follows:
Sec. 391.53 LCV Driver-Instructor qualification files.
(a) Each motor carrier must maintain a qualification file for each
LCV driver-instructor it employs or uses. The LCV driver-instructor
qualification file may be combined with his/her personnel file.
(b) The LCV driver-instructor qualification file must include the
information in paragraphs (b)(1) and (b)(2) of this section for a
skills instructor or the information in paragraph (b)(1) of this
section for a classroom instructor, as follows:
(1) Evidence that the instructor has met the requirements of 49 CFR
380.301 or 380.303;
(2) A photographic copy of the individual's currently valid CDL
with the appropriate endorsements.
Issued on: March 22, 2004.
Annette M. Sandberg,
Administrator.
[FR Doc. 04-6794 Filed 3-29-04; 8:45 am]
BILLING CODE 4910-EX-P
|
Fixes node-sass version range in dependencies
To avoid yarn from emitting warnings when having scss-to-json as a dependency of a project, change the way the version range is specified in dependencies.
warning Pattern<EMAIL_ADDRESS>^4.0.0"] is trying to unpack in the same destination "/Users/vieira/Library/Caches/Yarn/v2/npm-node-sass-4.9.4-349bd7f1c89422ffe7e1e4b60f2055a69fbc5512" as pattern ["node-sass<EMAIL_ADDRESS>This could result in non-deterministic behavior, skipping.
@ryanbahniuk Can this be merged?
|
from typing import TYPE_CHECKING, no_type_check
import logging
from qtpy.QtCore import QEvent, QPoint, QSize, Qt, Signal
from qtpy.QtGui import QContextMenuEvent, QCursor, QFontMetrics, QIcon, QMouseEvent
from qtpy.QtWidgets import (QBoxLayout, QFrame, QLabel, QMenu, QSizePolicy,
QStyle, QWidget, QPushButton)
from .util import start_drag_distance, set_button_icon
from .enums import DragState, DockFlags, DockWidgetArea, DockWidgetFeature
from .eliding_label import ElidingLabel
if TYPE_CHECKING:
from . import (DockWidget, DockAreaWidget, FloatingDockContainer)
logger = logging.getLogger(__name__)
class DockWidgetTabPrivate:
public: 'DockWidgetTab'
dock_widget: 'DockWidget'
icon_label: QLabel
title_label: QLabel
drag_start_mouse_position: QPoint
is_active_tab: bool
dock_area: 'DockAreaWidget'
drag_state: DragState
floating_widget: 'FloatingDockContainer'
icon: QIcon
close_button: QPushButton
@no_type_check
def __init__(self, public: 'DockWidgetTab'):
'''
Private data constructor
Parameters
----------
public : DockWidgetTab
'''
self.public = public
self.dock_widget = None
self.icon_label = None
self.title_label = None
self.drag_start_mouse_position = None
self.is_active_tab = False
self.dock_area = None
self.drag_state = DragState.inactive
self.floating_widget = None
self.icon = None
self.close_button = None
def create_layout(self):
'''
Creates the complete layout including all controls
'''
self.title_label = ElidingLabel(text=self.dock_widget.windowTitle())
self.title_label.set_elide_mode(Qt.ElideRight)
self.title_label.setObjectName("dockWidgetTabLabel")
self.title_label.setAlignment(Qt.AlignCenter)
self.close_button = QPushButton()
self.close_button.setObjectName("tabCloseButton")
set_button_icon(self.public.style(), self.close_button,
QStyle.SP_TitleBarCloseButton)
self.close_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.close_button.setVisible(False)
self.close_button.setToolTip("Close Tab")
self.close_button.clicked.connect(self.public.close_requested)
fm = QFontMetrics(self.title_label.font())
spacing = round(fm.height()/4.0)
# Fill the layout
layout = QBoxLayout(QBoxLayout.LeftToRight)
layout.setContentsMargins(2*spacing, 0, 0, 0)
layout.setSpacing(0)
self.public.setLayout(layout)
layout.addWidget(self.title_label, 1)
layout.addSpacing(spacing)
layout.addWidget(self.close_button)
layout.addSpacing(round(spacing*4.0/3.0))
layout.setAlignment(Qt.AlignCenter)
self.title_label.setVisible(True)
def move_tab(self, ev: QMouseEvent):
'''
Moves the tab depending on the position in the given mouse event
Parameters
----------
ev : QMouseEvent
'''
ev.accept()
# left, top, right, bottom = self.public.getContentsMargins()
move_to_pos = self.public.mapToParent(ev.pos())-self.drag_start_mouse_position
move_to_pos.setY(0)
self.public.move(move_to_pos)
self.public.raise_()
def is_dragging_state(self, drag_state: DragState) -> bool:
'''
Test function for current drag state
Parameters
----------
drag_state : DragState
Returns
-------
value : bool
'''
return self.drag_state == drag_state
def title_area_geometry_contains(self, global_pos: QPoint) -> bool:
'''
Returns true if the given global point is inside the title area
geometry rectangle. The position is given as global position.
Parameters
----------
global_pos : QPoint
Returns
-------
value : bool
'''
return self.dock_area.title_bar_geometry().contains(self.dock_area.mapFromGlobal(global_pos))
def start_floating(
self,
dragging_state: DragState = DragState.floating_widget
) -> bool:
'''
Starts floating of the dock widget that belongs to this title bar
Returns true, if floating has been started and false if floating is not
possible for any reason
Parameters
----------
dragging_state : DragState
Returns
-------
value : bool
'''
dock_container = self.dock_widget.dock_container()
if dock_container is None:
return
logger.debug('is_floating %s',
dock_container.is_floating())
logger.debug('area_count %s',
dock_container.dock_area_count())
logger.debug('widget_count %s',
self.dock_widget.dock_area_widget().dock_widgets_count())
# if this is the last dock widget inside of this floating widget,
# then it does not make any sense, to make it floating because
# it is already floating
if (dock_container.is_floating()
and (dock_container.visible_dock_area_count() == 1)
and (self.dock_widget.dock_area_widget().dock_widgets_count() == 1)):
return False
logger.debug('startFloating')
self.drag_state = dragging_state
size = self.dock_area.size()
from .floating_dock_container import FloatingDockContainer
if self.dock_area.dock_widgets_count() > 1:
# If section widget has multiple tabs, we take only one tab
self.floating_widget = FloatingDockContainer(dock_widget=self.dock_widget)
else:
# If section widget has only one content widget, we can move the complete
# dock area into floating widget
self.floating_widget = FloatingDockContainer(dock_area=self.dock_area)
if dragging_state == DragState.floating_widget:
self.floating_widget.start_dragging(self.drag_start_mouse_position,
size, self.public)
overlay = self.dock_widget.dock_manager().container_overlay()
overlay.set_allowed_areas(DockWidgetArea.outer_dock_areas)
self.floating_widget = self.floating_widget
else:
self.floating_widget.init_floating_geometry(self.drag_start_mouse_position, size)
self.dock_widget.emit_top_level_changed(True)
return True
def test_config_flag(self, flag: DockFlags) -> bool:
'''
Returns true if the given config flag is set
Parameters
----------
flag : DockFlags
Returns
-------
value : bool
'''
return flag in self.dock_area.dock_manager().config_flags()
@property
def floatable(self):
'''
Is the dock widget floatable?
'''
return DockWidgetFeature.floatable in self.dock_widget.features()
class DockWidgetTab(QFrame):
active_tab_changed = Signal()
clicked = Signal()
close_requested = Signal()
close_other_tabs_requested = Signal()
moved = Signal(QPoint)
def __init__(self, dock_widget: 'DockWidget', parent: QWidget):
'''
Parameters
----------
dock_widget : DockWidget
The dock widget this title bar
parent : QWidget
The parent widget of this title bar
'''
super().__init__(parent)
self.d = DockWidgetTabPrivate(self)
self.setAttribute(Qt.WA_NoMousePropagation, True)
self.d.dock_widget = dock_widget
self.d.create_layout()
def on_detach_action_triggered(self):
if self.d.floatable:
self.d.drag_start_mouse_position = self.mapFromGlobal(QCursor.pos())
self.d.start_floating(DragState.inactive)
def mousePressEvent(self, ev: QMouseEvent):
'''
Mousepressevent
Parameters
----------
ev : QMouseEvent
'''
if ev.button() == Qt.LeftButton:
ev.accept()
self.d.drag_start_mouse_position = ev.pos()
self.d.drag_state = DragState.mouse_pressed
self.clicked.emit()
return
super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev: QMouseEvent):
'''
Mouse release event
Parameters
----------
ev : QMouseEvent
'''
# End of tab moving, emit signal
if self.d.is_dragging_state(DragState.tab) and self.d.dock_area:
self.moved.emit(ev.globalPos())
self.d.drag_start_mouse_position = QPoint()
self.d.drag_state = DragState.inactive
super().mouseReleaseEvent(ev)
def mouseMoveEvent(self, ev: QMouseEvent):
'''
Mousemoveevent
Parameters
----------
ev : QMouseEvent
'''
if (not (ev.buttons() & Qt.LeftButton)
or self.d.is_dragging_state(DragState.inactive)):
self.d.drag_state = DragState.inactive
return super().mouseMoveEvent(ev)
# move floating window
if self.d.is_dragging_state(DragState.floating_widget):
self.d.floating_widget.move_floating()
return super().mouseMoveEvent(ev)
# move tab
if self.d.is_dragging_state(DragState.tab):
# Moving the tab is always allowed because it does not mean moving
# the dock widget around
self.d.move_tab(ev)
# Maybe a fixed drag distance is better here ?
drag_distance_y = abs(self.d.drag_start_mouse_position.y()-ev.pos().y())
start_dist = start_drag_distance()
if drag_distance_y >= start_dist:
# If this is the last dock area in a dock container with only
# one single dock widget it does not make sense to move it to a new
# floating widget and leave this one empty
if (self.d.dock_area.dock_container().is_floating()
and self.d.dock_area.open_dock_widgets_count() == 1
and self.d.dock_area.dock_container().visible_dock_area_count() == 1):
return
# Floating is only allowed for widgets that are movable
if self.d.floatable:
self.d.start_floating()
elif (self.d.dock_area.open_dock_widgets_count() > 1
and (ev.pos()-self.d.drag_start_mouse_position).manhattanLength() >= start_dist):
# Wait a few pixels before start moving
self.d.drag_state = DragState.tab
else:
return super().mouseMoveEvent(ev)
def contextMenuEvent(self, ev: QContextMenuEvent):
'''
Context menu event
Parameters
----------
ev : QContextMenuEvent
'''
ev.accept()
self.d.drag_start_mouse_position = ev.pos()
menu = QMenu(self)
detach = menu.addAction("Detach", self.on_detach_action_triggered)
detach.setEnabled(self.d.floatable)
menu.addSeparator()
action = menu.addAction("Close", self.close_requested)
action.setEnabled(self.is_closable())
menu.addAction("Close Others", self.close_other_tabs_requested)
menu.exec(self.mapToGlobal(ev.pos()))
def mouseDoubleClickEvent(self, event: QMouseEvent):
'''
Double clicking the tab widget makes the assigned dock widget floating
Parameters
----------
event : QMouseEvent
'''
# If this is the last dock area in a dock container it does not make
# sense to move it to a new floating widget and leave this one
# empty
if (self.d.floatable and
(not self.d.dock_area.dock_container().is_floating()
or self.d.dock_area.dock_widgets_count() > 1)):
self.d.drag_start_mouse_position = event.pos()
self.d.start_floating(DragState.inactive)
super().mouseDoubleClickEvent(event)
def is_active_tab(self) -> bool:
'''
Returns true, if this is the active tab
Returns
-------
value : bool
'''
return self.d.is_active_tab
def set_active_tab(self, active: bool):
'''
Set this true to make this tab the active tab
Parameters
----------
active : bool
'''
closable = DockWidgetFeature.closable in self.d.dock_widget.features()
tab_has_close_button = self.d.test_config_flag(DockFlags.active_tab_has_close_button)
self.d.close_button.setVisible(active and closable and tab_has_close_button)
if self.d.is_active_tab == active:
return
self.d.is_active_tab = active
self.style().unpolish(self)
self.style().polish(self)
self.d.title_label.style().unpolish(self.d.title_label)
self.d.title_label.style().polish(self.d.title_label)
self.update()
self.active_tab_changed.emit()
def dock_widget(self) -> 'DockWidget':
'''
Returns the dock widget this title widget belongs to
Returns
-------
value : DockWidget
'''
return self.d.dock_widget
def set_dock_area_widget(self, dock_area: 'DockAreaWidget'):
'''
Sets the dock area widget the dockWidget returned by dockWidget() function belongs to.
Parameters
----------
dock_area : DockAreaWidget
'''
self.d.dock_area = dock_area
def dock_area_widget(self) -> 'DockAreaWidget':
'''
Returns the dock area widget this title bar belongs to.
Returns
-------
value : DockAreaWidget
'''
return self.d.dock_area
def set_icon(self, icon: QIcon):
'''
Sets the icon to show in title bar
Parameters
----------
icon : QIcon
'''
layout = self.layout()
if not self.d.icon_label and icon.isNull():
return
if not self.d.icon_label:
self.d.icon_label = QLabel()
self.d.icon_label.setAlignment(Qt.AlignVCenter)
self.d.icon_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.d.icon_label.setToolTip(self.d.title_label.toolTip())
layout.insertWidget(0, self.d.icon_label, Qt.AlignVCenter)
layout.insertSpacing(1, round(1.5*layout.contentsMargins().left()/2.0))
elif icon.isNull():
# Remove icon label and spacer item
layout.removeWidget(self.d.icon_label)
layout.removeItem(layout.itemAt(0))
self.d.icon_label.deleteLater()
self.d.icon_label = None
self.d.icon = icon
if self.d.icon_label:
self.d.icon_label.setPixmap(icon.pixmap(self.windowHandle(), QSize(16, 16)))
self.d.icon_label.setVisible(True)
def icon(self) -> QIcon:
'''
Returns the icon
Returns
-------
value : QIcon
'''
return self.d.icon
def text(self) -> str:
'''
Returns the tab text
Returns
-------
value : str
'''
return self.d.title_label.text()
def set_text(self, title: str):
'''
Sets the tab text
Parameters
----------
title : str
'''
self.d.title_label.setText(title)
def is_closable(self) -> bool:
'''
This function returns true if the assigned dock widget is closeable
Returns
-------
value : bool
'''
return (self.d.dock_widget and
DockWidgetFeature.closable in self.d.dock_widget.features())
def event(self, e: QEvent) -> bool:
'''
Track event ToolTipChange and set child ToolTip
Parameters
----------
e : QEvent
Returns
-------
value : bool
'''
if e.type() == QEvent.ToolTipChange:
text = self.toolTip()
self.d.title_label.setToolTip(text)
return super().event(e)
# def setVisible(self, visible: bool):
# '''
# Set visible
#
# Parameters
# ----------
# visible : bool
# '''
# super().setVisible(visible)
|
<?php
namespace app\modules\page;
use Yii;
/**
* page module definition class
*/
class Module extends \yii\base\Module
{
/**
* @inheritdoc
*/
public $controllerNamespace = 'app\modules\page\controllers';
/**
* @inheritdoc
*/
public function init()
{
parent::init();
if (empty(Yii::$app->i18n->translations['page'])) {
Yii::$app->i18n->translations['page'] = [
'class' => 'yii\i18n\PhpMessageSource',
'basePath' => __DIR__ . '/messages',
];
}
}
}
|
Empty Gahrron's Withering Bottle
The Empty Gahrron's Withering Bottle is provided as an objective for the following quests:
* Gahrron's Withering Cauldron
* Target: Gahrron's Withering
|
Album Maddalena Vinyl-LP General Music ZSLGE55063 - Italy - 1971
Vinyl-LP General Music ZSLGE55063 - Italy - 1971
Ennio Morricone Composer; Ennio Morricone Arranger; Bruno Nicolai Conductor; -
|
Human Genome Project
History
The Role of Celera Genomics
Goals
Benefits
Whose genome was sequenced?
"Whose genome was sequenced in the public (HGP) and private projects?"
|
New Faction
Background
The Ruler of Conspiracy Arc
Philip believed the rumor of Ainz Ooal Gown's death and had the idea if he married Albedo he would inherit the Sorcerer Kingdom. At this point it was too late to find another scapegoat to for the new noble faction and it was still necessary to use the action to gather all the idiots in the Kingdom. Her comrades attempted to console her and urge her to continue, though the former prostitute still demanded a transfer from her two year assignment.
The Witch of the Falling Kingdom Arc
Strength
Known Members
* Philip (Founder)
* Hilma Cygnaeus (Subordinate/Real Founder)
* Wayne Delvin
* Igthorn Rokerson
Trivia
* Prince Zanac see the New Faction as power hungry and a cancer to the Re-Estize Kingdom.
|
SystemJS: Fetch error when not specifying .js when importing
I'm using TypeScript with module: "commonjs" and I can't seem to import exported classes.
Let's say I've got an exported class Train, like so:
export class Train {}
Now I want to create an instance of that Train in another file, like so:
import { Train } from "./Train";
class Main {
var train = new Train();
console.log("Heck it, trainz!", train);
}
I'm getting the following errors:
GET http://localhost:3000/Train 404 (Not Found)
(index):10 Error: Fetch error: 404 Not Found
Instantiating http://localhost:3000/Train
Loading http://localhost:3000/App.js
Loading App.js
at fetch.js:37
at <anonymous>
The head of my index.html looks like this:
<script src="node_modules/systemjs/dist/system.src.js"></script>
<script>
System.config({
"defaultExtension": "js"
});
System.import('dist/App.js').catch(function(err){ console.error(err); });
</script>
If I'm adding .js to the import, the error goes away, but the console.log doesn't get called.
Anyone knows what's up?
Sorry guys, I figured it out. I called my init function using window.onload which wasn't being called anymore since systemjs takes care of everything now. Thanks for reading!
|
Invalid values/overflow when running in github CI
I integrated this project into a machine learning framework to add support for webgpu. So far its been good and we can run most of the test suite locally using the library but when we go to CI we get strange errors on some operations.
Currently only utilizing the compute portion of the library, no rendering at all so don't believe the issue revolves around a missing package
Utilizing the github runner windows-latest as it seemed to be the most stable but still failing on some tests. These tests run fine locally and I have tested on multiple machines, only when utilizing github runners do they present themselves. We have other runtimes that run the same tests in CI that don't get these errors (CLANG, METAL, OPENCL) so I don't think its the actual values being passed that causes it.
Output from CI runner:
============================== warnings summary ===============================
D:\a\tinygrad\tinygrad\tinygrad\ops.py:100: RuntimeWarning: invalid value encountered in divide
D:\a\tinygrad\tinygrad\tinygrad\ops.py:100: RuntimeWarning: overflow encountered in exp2
D:\a\tinygrad\tinygrad\tinygrad\ops.py:100: RuntimeWarning: invalid value encountered in log2
Running on CI does indeed come with some risk, because of the virtualization. We only run gpu stuff on Linux runners using Lavapipe, see e.g. these docs.
On Windows, wgpu-py still defaults to Vulkan. My advice would be to try setting WGPU_BACKEND_TYPE (valid values are “Vulkan”, “Metal”, “D3D12”, “D3D11”, or “OpenGL”, though on Windows I'd only try Vulkan, D3D11 and D3D12). Try locally to see if the error can perhaps be reproduced, and on CI whether it can fix it :)
Switching to a macos runner and forcing the metal backend worked for our CI needs. was also able to create a web demo running efficientnet if you want to check it out here
Thanks this library was a huge help!
Switching to a macos runner and forcing the metal backend
Out of interest, is that a runner with real hardware, or does it include virtualization for Metal somehow?
was also able to create a web demo running efficientnet if you want to check it out here
Very nice! Great to see wgpu-py also being used for compute tasks! Are you using the wgpu API directly, or do you make use of the compute_with_buffers util?
|
#pragma once
#include <stddef.h>
#include <stdint.h>
#define MIN_POOL_OBJECT_SIZE_BYTES sizeof(int)
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
char* buffer;
unsigned int objCount;
size_t objSize;
void* nextFreeBlock;
} PoolAllocator;
int poolInit(PoolAllocator* alloc, char* buffer, size_t objSize, unsigned int objCount); //places pointer to next free block in every block
void* poolAlloc(PoolAllocator* alloc); //remove pointer to next free block, return free block
void poolFree(PoolAllocator* alloc, void* pointer); // place pointer into block, set next free block to this pointer
void poolReset(PoolAllocator* alloc); //works like init
#ifdef __cplusplus
}
#endif
|
Henderson v. The State.
Indictment for Arson.
1. Arson; what is not a dwelling-house within the meaning of the statute. — A small one room structure with a piazza along one side of it, which, though built for that purpose, had not been occupied as a dwelling for 8 or 10 years, and was dilapidated and wholly unlit for habitation, and had for many years been used only for the storage of cotton, was not a dwelling-house within the meaning of sections 3780 and 3781 of the Code, defining the crime of arson.
2. Same; burning of cotton house containing cotton arson in the second degree, regardless of its value.- — The burning of a cotton house containing cotton is, under the. provisions of section 3781 of the Code, arson in the second degree, although the house with its contents is of less value than $500; the provision of said section, as to value having no reference to cotton houses, but to the buildings named therein just preceding the provision as to cotton houses.
3. Same; averment of value in indictment. — In an indictment for arson in burning a cotton house containing cotton, an averment of the value of the house and its contents is mere surplusage, and should be entirely disregarded in the consideration of the case.
4. Failure to prove venue entitles discharge; when charge does not present ■ the question. — In a criminal prosecution, the failure to prove the venue entitles the defendant to an acquittal, and the court should, upon request, instruct the jury to acquit the defendant if they believe the evidence; but where, on a trial under an indictment for arson in the second degree, there is no evidence'of the venue, a charge that “If the., jury believe all the evidence in this case, thé defendant is not -guilty of arson in the second degree,” does not present the question of the failure to prove venue, and an exception to the refusal of the trial court to give said charge in such case is unavailing on appeal to raise the point of the want of evidence of venue.
Appeal from the Circuit Court of Covington.
Tried before the Hon. Jesse M. Carmichael.
The defendant was indicted, tried and convicted of arson in the second degree, and sentenced to the penitentiary for two years. The facts of the case are sufficiently stated in the opinion. Upon the introduction of all the evidence, the defendant requested the court to give the following written charges, and separately excepted to the court’s refusal to give each of them as asked : (1.) “If the jury believe all the evidence in this case, the defendant is not guilty of arson in the second degree.” (2.) “If the house alleged to have been burned was not an unoccupied dwelling-house, within the meaning of the statute, the defendant is not guilty of arson in the second degree, unless the house with its contents is shown by the proof to have been worth five hundred dollars or more.” (3.) “Unless the jury believe from the evidence beyond a reasonable doubt, that the defendant set fire to and burned an unoccupied dwelling-house, or set fire to and burned a cotton house, which, with its contents was worth five hundred dollars or more, then the defendant is not guilty, and the jury should acquit him.” (4.) “If the jury believe from the evidence that the house alleged to have been burned was built for and used as a dwelling-house up to 8 or 10 years ago, and then its use as a dwelling-house was abandoned, and it has not since that time been used or occupied as a dwelling-house, or for people to live in, but has been used as a cotton house to store or keep cotton in, then it is a question for the jury to decide whether it was an unoccupied dwelling-house, or a cotton house, and if it was not an unoccupied dwelling-house, but was a cotton house, then the defendant can not be convicted of arson in the second degree, unless the house with its contents is shown by the proof to have been worth five hundred dollars or more.”
Hickman & Riley and Henry Opp, for appellant.
The degree of arson, whether second or third, which the law ascribes to the burning of a cotton house, is determinable by the value of the house and the property therein contained. — James v. State, 104 Ala. 20. The venue in this case was not proven, and the judgment of conviction was necessarily erroneous. This point was raised and presented by the firs't charge requested by the defendant, and which was refused by the court. — Randolph v. State, 100 Ala. 139; Gawthorn v. State, 63 Ala. 157.
W. 0. Fitts, Attorney-General, for the State.
McCLELLAN, J.
This indictment contains three counts. The first two charge arson of an uninhabited dwelling house. The proof was that the house was a small one room structure with a piazza along one side of it, that it had been built, and was at one time occupied, for the purposes of a dwelling, but that it had not been so occupied for eight or ten years last past; was at the time of the fire dilapidated, dismantled and wholly unfit for habitation, and had been used for said period only for the storage of cotton. We are of opinion that on these undisputed facts it had wholly lost its character as a dwelling-house within the meaning of sections 3780 and 3781 of the Code ; and that the conviction must be referred to the third count of the indictment, which avers the arson of a “cotton house containing cotton, * * * which cotton house with the cotton therein contained was of the value of five hundred dollars or more. ’ ’
The evidence shows that a cotton house containing cotton was burned, and that the value of the house and its contents was less than five hundred dollars. Upon this, there being also evidence tending to connect the defendant with the burning, two questions were sought to be raised by instructions r- quested on the part of the defendant. The first of these is, whether the burning of a cotton house which together with its contents is of less value than five hundred dollars is arson in the second degree, under section 2781 of the Code, or arson in the third degree, under section 2784. The statute itself is clear in the solution of this question : it expressly declares that the burning of a cotton house containing cotton is arson in the second degree; the provision as to value contained in the same section having obviously no reference to such houses, but only to the buildings named therein just before this provision. Ample justification of this legislative discrimination between cotton houses and the like and those previously enumerated is found in the consideration that buildings of the former class are usually more exposed- to incendiarism than those of the latter.
The other point raised in this connection is, that there is a fatal variance between the indictment and the proof, in that it is alleged that the value of the house and contents was five hundred dollars or more, and the proof is of a value scarcely more than one hundred dollars. We think this position is ill taken. The averment of value in the indictment is matter of innocuous surplusage which should be entirely disregarded, rather than matter of description which must be proved. The case is strictly analogous in this respect to that presented by an indictment for larceny of property laying value at twenty-five dollars or more, when the proof at the trial is of a vdlue of, say, ten dollars. In such case there would be a conviction of petit larceny — a result which would be impossible if appellant’s contention, that the averment of value is descriptive and must be proved, else a variance exists and acquittal follows, be conceded.
It is stated in the bill of exceptions that all the evidence .adduced on the trial is set out therein. It does not appear by the bill that there was any evidence tending to show that the offense charged and which the evidence went to prove, was committed in Covington county, the county of the indictment: there is no evidence of venue. On this state of case the trial court should, upon request, have charged the jury to acquit the defendant if they believed the evidence, but unless such charge was asked and refused the point was not saved for the appellant, and can not be availed of here.—Brown v. State, 100 Ala. 92; Randolph v. State, 100 Ala. 139. We do not find that the point was saved on the trial. The request for instruction which is now relied on as presenting the question is the following : “If the jury believe all the evidence in this case, the defendant is not guilty of arson in the second degree,” which was refused. It is manifest from the whole record as also upon the words of this request that it was addressed to the question of the degree of the crime, and not to the inquiry whether the crime, confessedly committed, so far as this charge is concerned, in some degree, was committed in Covington county. And so it must have been understood by the court and jury. It does not direct an acquittal merely if the jury believe the evidence, which would have been proper since there was no evidence of venue ; but it requires an affirmative finding by the jury that the defendant did not. burn the cotton house described in the indictment whether the house was or was not in Covington county. The charge was, in other words, clearly not intended to raise the question of venue, that question is not presented by its terms, and to give it that effect, to say the least, would be to require the giving of a confusing and misleading instruction as applied to this question of venue, and one which with reference to the point to which it was really addressed was affirmatively unsound.
We find no error in the record, and the judgment is affirmed.
|
they had oblerved in this navigation.
At that time the crown of Portugal was annexed to Spain ^ and the governor of the captainfhip, or Maranon, for the fovereign of both kingdoms, was Jacome ReyQ:iundo de Norona, who, zealous for the improvement of this difcovery, as of the higheft im portance to his prince, fitted out a fleet of canoes, un der the command of captain Texera, to go up the river, and furvey the country with greater form and accuracy. This flotilla departed from the neighbour hood of Para, on the 28th of Odlober 1637, with the two religious on board ; and after an inceflant fa-
. tigue in making way againft the ftream, they arrived at Payamino on the 24th of June 1638. This place belongs to the jurifdiclion of the government of Quixos ; whence Texera, with the foldiers and the
. two religious, went to Qtiito, where he gave an ac count of the expedition to the audiencia, which tranf mitted the particulars to the count de Chinchon, vice roy of Peru and he, agreeably to the zeal he had always manifeftcd for enlarging his majefty's domi nions, held a council about making more particular difcoveries along the fliores of that river.
Among other things, the count de Chinchon gave orders, that the Portuguefe flotilla fhould return to Para and with it fent fome intelligent perfons, whofe zeal might be depended on, with orders to take an
- accurate furvey of the river and its banks *, and after difcharging this commiffion, to proceed to Spain, and make a report of their expedition to the council of the Indies, in order to be laid before his majefty, that meafures might in confequence be taken for fecuring the conquefl: of thefe nations. The perfons chofen were, the Reverend Fathers Chriftopher de Accuna and Aiidrez de Artieda, Jefuits, a.?d perfons every way equal to the fervice. They left Qiiito on the 16th of February 16395 and having embarked with the ar-
|
fixed build for case-sensitive file system
updated all the imports which was causing build failure as some file systems are case-sensitive.
Thanks for contributing!
|
import React, { Component } from 'react';
import { Router, Scene, Actions } from 'react-native-router-flux';
import { createStore, applyMiddleware, combineReducers, compose } from 'redux';
import { connect, Provider } from 'react-redux';
import { AppRegistry, Navigator, AsyncStorage } from 'react-native';
import thunk from 'redux-thunk';
import promiseMiddleware from 'redux-promise';
import { devToolsEnhancer, composeWithDevTools} from 'remote-redux-devtools';
import uuid from 'uuid';
import reducers from '../reducers';
const RouterWithRedux = connect()(Router);
const middleware = [thunk, promiseMiddleware];
const composeEnhancers = composeWithDevTools({realtime: true});
const store = createStore(reducers, composeEnhancers(
applyMiddleware(...middleware),
));
import HomeMenu from './home';
import AnchorScanner from './anchorScanner';
const scenes = Actions.create(
<Scene key="root">
<Scene key="home" component={HomeMenu} initial={true} hideNavBar={true} panHandlers={null}/>
<Scene key="anchorScanner" component={AnchorScanner} hideNavBar={true}></Scene>
</Scene>
);
export default class App extends Component {
async componentWillMount() {
try {
const ownerId = await AsyncStorage.getItem('user_id');
if (!ownerId) {
const newOwnerId = uuid.v4();
await AsyncStorage.setItem('user_id', newOwnerId)
.then((doc) => console.log(doc))
.catch((err) => alert(err.toString()));
}
} catch (err) {
const ownerId = uuid.v4();
await AsyncStorate.setItem('user_id', ownerId)
.then((doc) => console.log(doc))
.catch((err) => alert(err.toString()));
}
}
render() {
return (
<Provider store={store}>
<RouterWithRedux scenes={scenes}/>
</Provider>
);
}
}
|
of the condensed water to the traps. Suitable gratings or runways should be provided, so that the men entering the kiln can do so \. itli safety and without walking on the pipes. The interior iron work and pipes should be protected by a good kiln paint, or the pipes can be painted while hot with a mixture of cylinder oil and graphite, if preferred. The coils should be inspected occasionally to make sure that they are all working, and the traps should be observed every day.
CALIBRATION AND ADJUSTMENT OF INSTRUMENTS.
Success in all except the easiest kind of kiln drying depends upon the accuracy of the instruments and apparatus used in the regula tion of the kiln and in the determination of the moisture content. It is therefore essential that the apparatus be maintained in an ac curate operating condition. Most important is the matter of tem perature indicating, recording, and regulating instruments, since through them both temperature and humidity are determined and controlled.
THERMOMETERS.
The simplest and most satisfactory way to calibrate indicating and recording thermometers is to compare them at different tempera tures within their range with a standardized or calibrated ther mometer of known accuracy. Each operator should have at least one such standard thermometer. The type recommended is a 12-inch glass chemical thermometer, with graduations in degrees Fahren heit etched on the stem, and having a range of 30° to 220° correct for ordinary purposes. Such a thermometer can be purchased for about $3 list price ; and a brass protecting sleeve or case, recom mended for use in kilns, can be had for about $1.50.
The usual laboratory method of calibration by comparison with a standard thermometer consists in immersing the standard and the thermometers to be calibrated in a vessel of water, which is con stantly stirred to keep the temperature uniform throughout. The water is gradually heated, and the thermometers are read at intervals of a few degrees. The difference between the reading of any ther mometer and the standard at any temperature is the error of that thermometer, and a correction of this amount must be applied to the reading to give the correct temperature. If the standard reads higher, call the correction plus (-)-) and add it to the reading of the thermometer, and vice versa. This method isjapplicable to glass stem thermometers and other portable types. Wet-bulb thermome ters are calibrated this way also, the wicks being removed during calibration. Once every six months should be sufficient for the cali bration of glass thermometers.
Recording thermometers require more attention than other types on account of the comparative ease with which they may become de ranged. They should be calibrated in water, as described for glass thermometers, the bulb and about a foot of the tube being im mersed. This calibration will give a good idea of how the error, if any, varies throughout the operative range of the instrument. After calibration the instrument should be mounted in place in the kiln and then checked up at several points in its range by comparison with the standard thermometer hung close beside its bulb. These
|
[Bugfix] Update run_batch.py to handle larger numbers of batches
vllm engine appears to error out somewhere between 20k-100k async requests. Refactor to only allow a maximum of 10k concurrent requests with Semaphore. Also added in a tqdm progress bar. Note that this was created under 0.5.0.post1, I am unable to test integration with some of the newer changes, but it should work.
It would be nice to suppress the request output as well and just have the option to have tqdm only but I was unable to figure out how to make that work.
Note that this is my first PR for this project so I'm not entirely sure how all of this works.
PR Checklist (Click to Expand)
Thank you for your contribution to vLLM! Before submitting the pull request, please ensure the PR meets the following criteria. This helps vLLM maintain the code quality and improve the efficiency of the review process.
PR Title and Classification
Only specific types of PRs will be reviewed. The PR title is prefixed appropriately to indicate the type of change. Please use one of the following:
[Bugfix] for bug fixes.
[CI/Build] for build or continuous integration improvements.
[Doc] for documentation fixes and improvements.
[Model] for adding a new model or improving an existing model. Model name should appear in the title.
[Frontend] For changes on the vLLM frontend (e.g., OpenAI API server, LLM class, etc.)
[Kernel] for changes affecting CUDA kernels or other compute kernels.
[Core] for changes in the core vLLM logic (e.g., LLMEngine, AsyncLLMEngine, Scheduler, etc.)
[Hardware][Vendor] for hardware-specific changes. Vendor name should appear in the prefix (e.g., [Hardware][AMD]).
[Misc] for PRs that do not fit the above categories. Please use this sparingly.
Note: If the PR spans more than one category, please include all relevant prefixes.
Code Quality
The PR need to meet the following code quality standards:
We adhere to Google Python style guide and Google C++ style guide.
Pass all linter checks. Please use format.sh to format your code.
The code need to be well-documented to ensure future contributors can easily understand the code.
Include sufficient tests to ensure the project to stay correct and robust. This includes both unit tests and integration tests.
Please add documentation to docs/source/ if the PR modifies the user-facing behaviors of vLLM. It helps vLLM user understand and utilize the new features or changes.
Notes for Large Changes
Please keep the changes as concise as possible. For major architectural changes (>500 LOC excluding kernel/data/config/test), we would expect a GitHub issue (RFC) discussing the technical design and justification. Otherwise, we will tag it with rfc-required and might not go through the PR.
What to Expect for the Reviews
The goal of the vLLM team is to be a transparent reviewing machine. We would like to make the review process transparent and efficient and make sure no contributor feel confused or frustrated. However, the vLLM team is small, so we need to prioritize some PRs over others. Here is what you can expect from the review process:
After the PR is submitted, the PR will be assigned to a reviewer. Every reviewer will pick up the PRs based on their expertise and availability.
After the PR is assigned, the reviewer will provide status update every 2-3 days. If the PR is not reviewed within 7 days, please feel free to ping the reviewer or the vLLM team.
After the review, the reviewer will put an action-required label on the PR if there are changes required. The contributor should address the comments and ping the reviewer to re-review the PR.
Please respond to all comments within a reasonable time frame. If a comment isn't clear or you disagree with a suggestion, feel free to ask for clarification or discuss the suggestion.
Thank You
Finally, thank you for taking the time to read these guidelines and for your interest in contributing to vLLM. Your contributions make vLLM a great tool for everyone!
@w013nad do you mind sharing the error you got without this PR?
I think we should definitely support this many requests, but wonder if there's an underlying issue that we should fix.
FYI there are ongoing refactors to move engine/api server/worker into own processes. This should enable much higher QPS.
I'm doing a RAG test running 4500 questions through with k=[0,1,2,3,5,6,7,8,9,10,11,12,13,14,15,17,20,25, 50, 75] chunks. The total file size is 1.2GB and 95k requests. I did another test which also had 95k requests, but the queries were much shorter, and it worked fine. This indicates it's not the number of requests but the amount of text I'm sending in. The other batch file size was around 200 MB.
INFO 06-23 22:52:10 async_llm_engine.py:564] Received request cmpl-f96570de404141e09427685b17c752c4: prompt: '<|im_start', params: SamplingParams(n=1, best_of=1, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, temperature=0.01, top_p=1.0, top_k=-1, min_p=0.0, seed=None, use_beam_search=False, length_penalty=1.0, early_stopping=False, stop=[], stop_token_ids=[], include_stop_str_in_output=False, ignore_eos=False, max_tokens=1000, min_tokens=0, logprobs=None, prompt_logprobs=None, skip_special_tokens=True, spaces_between_special_tokens=True, truncate_prompt_tokens=None), prompt_token_ids: [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645], lora_request: None.
ERROR 06-23 22:54:02 async_llm_engine.py:535] Engine iteration timed out. This should never happen!
ERROR 06-23 22:54:02 async_llm_engine.py:52] Engine background task failed
ERROR 06-23 22:54:02 async_llm_engine.py:52] Traceback (most recent call last):
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/engine/async_llm_engine.py", line 506, in engine_step
ERROR 06-23 22:54:02 async_llm_engine.py:52] request_outputs = await self.engine.step_async()
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/engine/async_llm_engine.py", line 235, in step_async
ERROR 06-23 22:54:02 async_llm_engine.py:52] output = await self.model_executor.execute_model_async(
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/executor/distributed_gpu_executor.py", line 166, in execute_model_async
ERROR 06-23 22:54:02 async_llm_engine.py:52] return await self._driver_execute_model_async(execute_model_req)
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/executor/multiproc_gpu_executor.py", line 149, in _driver_execute_model_async
ERROR 06-23 22:54:02 async_llm_engine.py:52] return await self.driver_exec_model(execute_model_req)
ERROR 06-23 22:54:02 async_llm_engine.py:52] asyncio.exceptions.CancelledError
ERROR 06-23 22:54:02 async_llm_engine.py:52]
ERROR 06-23 22:54:02 async_llm_engine.py:52] During handling of the above exception, another exception occurred:
ERROR 06-23 22:54:02 async_llm_engine.py:52]
ERROR 06-23 22:54:02 async_llm_engine.py:52] Traceback (most recent call last):
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/lib/python3.10/asyncio/tasks.py", line 456, in wait_for
ERROR 06-23 22:54:02 async_llm_engine.py:52] return fut.result()
ERROR 06-23 22:54:02 async_llm_engine.py:52] asyncio.exceptions.CancelledError
ERROR 06-23 22:54:02 async_llm_engine.py:52]
ERROR 06-23 22:54:02 async_llm_engine.py:52] The above exception was the direct cause of the following exception:
ERROR 06-23 22:54:02 async_llm_engine.py:52]
ERROR 06-23 22:54:02 async_llm_engine.py:52] Traceback (most recent call last):
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/engine/async_llm_engine.py", line 42, in _log_task_completion
ERROR 06-23 22:54:02 async_llm_engine.py:52] return_value = task.result()
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/local/lib/python3.10/dist-packages/vllm/engine/async_llm_engine.py", line 532, in run_engine_loop
ERROR 06-23 22:54:02 async_llm_engine.py:52] has_requests_in_progress = await asyncio.wait_for(
ERROR 06-23 22:54:02 async_llm_engine.py:52] File "/usr/lib/python3.10/asyncio/tasks.py", line 458, in wait_for
ERROR 06-23 22:54:02 async_llm_engine.py:52] raise exceptions.TimeoutError() from exc
ERROR 06-23 22:54:02 async_llm_engine.py:52] asyncio.exceptions.TimeoutError
INFO 06-23 22:54:03 async_llm_engine.py:167] Aborted request cmpl-e0d8265bcc46475d944cd1b0be30b316.
After this, I get 95k lines of
INFO 06-23 22:54:06 async_llm_engine.py:167] Aborted request cmpl-f96570de404141e09427685b17c752c4.
and then it just shuts itself down.
gotcha, this makes sense. @w013nad do you mind making the change to the streaming approach and let's just get this merged in the front end?
|
Cyber dragon infinity
Example 1:
If I activate a card from my hand can cyber dragon infinity negate and "destroy it"
Example 2:
If I activate a monster effect from my hand and crystal wing synchro dragon tries to negate it does the negation happen and if so what happens to that monster
Can you provide the text from the cyber dragon card so someone doesn't have to look that card up to start helping you.
Example 1: Yes
The effect of Cyber Dragon Infinity can negate the activation of any card effect, regardless of its location, as it does not specify any area in which the effect need be activated.
Once per turn, when a card or effect is activated (Quick Effect): You can detach 1 material from this card; negate the activation, and if you do, destroy it.
As for the destruction of the activated card: if the card or effect whose activation is negated is in a zone wherein it can be destroyed, it is destroyed. This includes the hand, field, main deck and extra deck. If a card or effect whose activation is negated exists in the GY or is banished, the activation will still be negated, but the card cannot be destroyed in those zones.
Example 2: Yes, the Negation Occurs; Results Depend
As with Cyber Dragon Infinity, since Crystal Wing Synchro Dragon does not specify any area in which the activated effect need exist, it can negate the activation of any monster effect regardless of location. Now, as to the question of what happens: this varies depending on what kind of effect you activate in your hand.
Certain monsters, such as D.D. Crow, activate their effects by discarding themselves from the hand. Supposing that a player activates D.D. Crow's effect, Crystal Wing can negate its activation. However, examine the remaining clause (in bold):
Once per turn, during either player's turn, when another monster's effect is activated: You can negate the activation, and if you do, destroy that monster, and if you do that, this card gains ATK equal to the destroyed monster's original ATK until the end of this turn.
So, the gaining of attack is wholly contingent upon the monster whose effect was negated also being destroyed as a result of Crystal Wing's effect.
As another example scenario, if a monster effect activates in the hand without discarding it, as is the case with the Danger! series of monsters, Crystal Wing can negate the activation and - provided the card is still in the hand when Crystal Wing's effect resolves - destroy that monster, and gain ATK equal to its original ATK.
|
Find max and min of $11\cos^2\theta+3\sin\theta+6\sin\theta\cos\theta+5$
I'm trying to solve the following question from my textbook.
Using the identity $\cos2\theta=2\cos^2\theta-1$ and $a\cos\theta+b\sin\theta=\sqrt{a^2+b^2}\cos(\theta-c)$, where $c$ is a constant, find the maximum and minimum values of $$11\cos^2\theta+3\sin\theta+6\sin\theta\cos\theta+5$$
$$\begin{align}
11\cos^2\theta+3\sin\theta+6\sin\theta\cos\theta+5 &=5.5(\cos2\theta+1)+3\sin\theta+3\sin2\theta+5 \\
&=5.5\cos2\theta+3\sin2\theta+3\sin\theta+10.5 \\
&=\sqrt{39.25}\cos(2\theta+28.610°)+3\sin\theta+10.5
\end{align}$$
And now I'm kind of stuck. Did I approach the question wrongly?
The identity $acos(\theta) + bsin(\theta) = \sqrt{a^2+b^2}cos(\theta-a)$ is not correct.
Where did you get the value $c=-28.610$ it seems to me $c$ should be $\frac{1}{2}$
@JustinStevenson Well, we need $\cos c=\frac{11}{\sqrt{157}}$, so $c$ is $0.499346722$ in radians or $28.6104596843^\circ$.
Please double-check the given problem.
|
Board Thread:Fun and games/@comment-24735666-20151102212558/@comment-24508466-20160131000912
Let's turn this thread into "Your own pinata parties and epic quests!" #4.
|
Give me the complete political debate on the topic of Working Women's Association that ends with: ...ing us from achieving equality.\nI thank them all.
|
---
sidebar: 'jamstack'
prev: '/jamstack/'
next: '/jamstack/how-to-convert/'
---
## Prerequisites
In order to use the Adapter you have to get an Udesly plan: Adapter (monthly/yearly) or All Access and have at least the [Account *Lite Plan*](https://webflow.com/pricing#account) of Webflow that is the base plan that allows you to export your projects. You don't need absolutely to buy any Site plan in order to use the Adapter.
You also need to install the [Udesly Chrome Extension](https://chrome.google.com/webstore/detail/udesly-template-configura/khhgdnefpkphamogndglabaalbpfidbf?hl=en&authuser=0)
<video autoplay="" muted="" playsinline="true" loop="">
<source src="/assets/video/install-chrome-extension.webm">
</video>
## How it Works?
Basically, you create your template in Webflow, and the Adapter converts it in an [11ty](https://www.11ty.dev/) backed theme. **11ty** is a really simple static site generator that enforces only little requirements, and is really suitable to be used with a Webflow CMS based theme, since has a really similar templating structure.
The adapter not only converts the theme, but also optimizes a little bit the Webflow code to optimize the *above the fold* scripts, for example the Google fonts will be requested with a display swap, and the css files and js scripts will be lazyloaded. The image tags are converted into picture tags and there is a script that will be runned before the site deploy that will convert all your images into *webp*. (Come on *Safari*, even *Edge* supports this format... 😟😟😟)
*Sounds like Magic right?* 🧙
Well, it is! Because there is a special extension that adds every custom attribute needed for you! You just need to install it here!
## Requirements
There are a few requirements for the CMS to be converted correctly. (Well just one )
1) Do not use *tags* as collection name! They are already used by 11ty. Give another name to them!
2) Do not use *date* as field name! It's reserved by 11ty!
|
Alice Bergel
Alice Rose Bergel, née Berger (1911 - 1998) was a German-American literary scholar who taught at the University of California, Irvine. She collaborated with her husband, Kurt Bergel, on several editions and translations of writing by Albert Schweitzer.
Life
Berger was born on 15 June 1911 in Berlin, the daughter of Bruno Martin Berger and Else J. Solon. She studied at the University of Berlin and the University of Freiberg from 1929 to 1933, and gained a PhD in Romance languages and philosophy from the University of Berlin in 1934. She married Kurt Bergel on 28 August 1938. She escaped Nazi Germany to England in 1939, and moved to the United States in 1941.
She and her husband founded the Albert Schweitzer Institute at Chapman University. The couple collaborated on a translation of Schweitzer's memoirs of his childhood.
Works
* (tr. and ed. with Kurt Bergel) Albert Schweitzer and Alice Ehlers: a friendship in letters, 1991
* (ed. with Kurt Bergel) "Liebes Cembalinchen--": Albert Schweitzer, Alice Ehlers: eine Freundschaft in Briefen, 1997
* (tr. with Kurt Bergel) Memoirs of childhood and youth by Albert Schweitzer. Syracuse: Syracuse University Press, 1997
* (tr. with Kurt Bergel) The stone breakers and other novellas by Ferdinand von Saar, 1998
|
Lake Superior and Mississippi Railroad Company v. United States/Opinion of the Court
Congress, in most of the legislative acts by which it has made donations of the public lands to the States in which they lie for the purpose of aiding in the construction of railroads, has stipulated that the railroads so aided shall be public highways for the use of the government, free from all tolls or other charge for transportation of its property or troops. The question has arisen between the railroad companies owning these roads and the officers of the government, whether this reservation includes the free use of the roads alone, or transportation also. The companies claim, that, if they give to the government the free use of their roads, it is all that is required of them. The government claims that it is entitled to have free transportation on the roads, and that it is the duty of the companies to perform it; and Congress has refused compensation for such transportation, giving the companies, however, the right to appeal to the Court of Claims. That court having been applied to, and having decided adversely to the companies, they have appealed to this court, and the cases are now before us for consideration.
The manner in which the question arises is stated with sufficient accuracy by the counsel of one of the appellant companies, as follows:--
'Was the plaintiff, by reason of being a land-grant railroad, bound to transport the troops and property of the United States, free of charge, or had she a right to a reasonable compensation for such services. . ..
'The act of May 5, 1864 (13 Stat. 64), made a grant of land, in the usual form, to the State of Minnesota, to aid in the construction of plaintiff's road. That act contained the following provisions: 'And the said railroad shall be, and remain, a public highway for the use of the government of the United States, free from all toll or other charge for [upon] the transportation of any property or troops of the United States.' Sect. 5, p. 65. The seventh section provides,--
"That the United States mail shall be transported over said road, under the direction of the Post-Office Department, at such price as Congress may, by law, direct: Provided, that, until such price is fixed by law, the Postmaster-General shall have the power to determine the same."By the act of Congress of June 16, 1874 (18 Stat. 74), making appropriations for the army for the fiscal year ending June 30, 1875, it was provided, 'That no part of the money appropriated by this act shall be paid to any railroad company for the transportation of any property or troops of the United States over any railroad which, in whole or in part, was constructed by the aid of a grant of public land, on the condition that such railroad should be a public highway for the use of the government of the United States, free from toll or other charge, or upon any other conditions for the use of such road for such transportation; nor shall any allowance be made out of any money appropriated by this act for the transportation of officers of the army over any such road when on duty, and under orders, as a military officer of the United States. But nothing herein contained shall be construed as preventing any such railroad from bringing a suit in the Court of Claims for the charges for such transportation, and recovering the same, if found entitled thereto by virtue of the laws in force prior to the passage of this act.'. ..
'The case turns upon the construction that should be given to the clause in the act of 1864, which declares that 'the said railroad shall be, and remain, a public highway for the use of the government of the United States, free from all toll or other charge for [upon] the transportation of any property or troops of the United States."
And the counsel for the appellants analyzes this provision as follows:--
'This is a legislative declaration of three things: 1. That the railroad shall be a public highway. 2. That the United States shall have the right to use the same for the transportation of its troops and property. 3. That the United States, in the transportation of its troops and property over such railroad as a public highway, shall not be required to pay toll or other charge.'
It is somewhat singular that a provision apparently so simple in its terms should give rise to such a wide difference of opinion as to its true construction. The difficulty arises from the peculiar character of a railway as a means of public travel and transportation. The case of a turnpike or a canal would have furnished no difficulty whatever. Those thoroughfares are usually constructed and owned by companies who have nothing to do with transportation thereon. They merely furnish the thoroughfare. Had the provision in question related to public works of this kind, it would have been clear that the right reserved to the government would have been merely the right to use the works themselves (the turnpike or the canal) free from toll. The words 'free from all toll or other charge for the transportation of property or troops' would have referred, by necessary implication, to transportation performed by the government itself, either in its own carriages or vessels, or in carriages or vessels procured and employed at its expense. No one would imagine for a moment that the turnpike or canal company would be bound to furnish the means of transportation, much less the propelling power and labor for performing it.
Indeed, Congress has, in several instances, commencing as far back as 1824, made donations of right of way, or grants of land, for canals and turnpikes, and has made almost the exact reservation contained in the railroad grants. The first was that made May 26, 1824, authorizing the State of Indiana to connect the Wabash River with the Miami of Lake Erie; and the reservation was in these words: 'And provided further, that the said canal when completed shall be and for ever remain a public highway for the use of the government of the United States, free from any toll or charge whatever, for any property of the United States, or persons in their service in public business, passing through the same.' 4 Stat. 47.
On the 2d of March; 1827, an act, with precisely the same reservation, was passed, making a grant of land to the State of Illinois, to aid in opening a canal to unite the waters of the Illinois River with those of Lake Michigan. 4 Stat. 234. On the 2d of March, 1833, an amendment to this act was passed, which declared 'that the lands granted to the State of Illinois, by the act to which this is an amendment, may be used and disposed of by said State for the purpose of making a railroad, instead of a canal, as in said act contemplated;. . . Provided, that if a railroad is made in place of a canal, the State of Illinois shall be subject to the same duties and obligations, and the government of the United States shall be entitled to and have the same privileges on said railroad, which they would have had through the canal if it had been opened.' Evidently the only thing reserved in this case was the use of the road.
It will be observed that the last-cited act was passed in 1833, when railroads were about being introduced as means of public communication in this country. It is undoubtedly familiar to most of those whose recollection goes back to that period, that railroads were generally expected to be public highways, on which every man who could procure the proper carriages and apparatus would have the right to travel. This was the understanding in England, where they originated. The Railway Clauses Consolidation Act, passed in 1842, provided in detail for the use of railways by all persons who might choose to put carriages thereon, upon payment of the tolls demandable, subject to the provisions of the statute and the regulations of the company. Acts of 5 & 6 Vict. c. 55. And suits were sustained to compel railway companies to keep up their roads for the use of the public. King v. Severn R. Co., 2 B. & A. 646; Queen v. Grand Junction, 4 Q. B. 18; 2 Redf. sect. 249; Pierce's American Railway Law, 519. Most of the early railroad charters granted in this country were framed upon the same idea Thus the charter of the Mohawk and Hudson Railroad Company, granted by the legislature of New York in 1826 (which was one of the earliest), after giving the company power to construct the road, provided as follows:--
'And shall have power to regulate the time and manner in which goods and passengers shall be transported, taken, and carried on the same, as well as the manner in which they shall collect tolls and dues on account of transportation and carriage, and shall have power to erect and maintain toll-houses and other buildings for the accommodation of their concerns.' Laws of 1826, p. 289.
In subsequent charters, granted in 1828 and succeeding years, the intent is still more plainly expressed. Thus, in the charter of the Ithaca and Owego Railroad Company, it is provided:--
'Sect. 9. The said corporation shall have power to determine the width and dimensions of the said railroad; to regulate the time and manner in which goods and passengers shall be transported thereon; and the manner of collecting tolls for such transportation; and to erect and maintain toll-houses, &c. Sect. 11. The said corporation may demand and receive from all persons using or travelling upon said rail the following tolls; to wit, for every ton weight of goods, &c., three cents per mile for every mile the same shall pass upon the said road, and a ratable proportion for any greater or less quantity; for every pleasure-carriage, or carriage used for the conveyance of passengers, three cents per mile, in addition to the toll by weight upon the loading. Sect. 12. All persons paying the toll aforesaid may, with suitable and proper carriages, use and travel upon the said railroad, subject to such rules and regulations as the said corporation are authorized to make by the ninth section of this act.' Laws of 1828, p. 17.
Substantially the same provisions were contained in other charters granted in 1828 and 1829. Laws of 1828, pp. 197, 228, 296, 307, 403, 474; Laws of 1829, p. 252. In 1830 and subsequent years, an abbreviated formula was employed, but still apparently recognizing the possible use of the roads by the public; giving, amongst other things, express power to regulate the time and manner in which goods and passengers should be transported thereon, and power to erect toll-houses, &c. So in the early charters granted by the legislature of Massachusetts, it was usual, after granting a toll upon all passengers and property conveyed or transported upon the road, to provide that the transportation of persons and property, the construction of wheels, the form of cars and carriages, the weight of loads, &c., should be in conformity to such rules, regulations, and provisions as the directors should prescribe, and that the road might be used by any persons who should comply with such rules and regulations. This formula was continued down to 1835. See 2 Railroad Laws and Charters, pp. 41, 60, 67, 77, 95, 103, 117, 124, 132, 141, 166, 195, 215. Like provisions were inserted in various charters granted by the legislature of Maine, some as late as the year 1837; and in 1842 a general law was passed, requiring every railroad company whose road should be connected with that of another company to draw over their road the cars of such other company; and, on refusal so to do, the latter company was authorized to run its cars, with its own locomotives over such road, being subject to the general regulations thereof. See 1 id. 8, 22, 60, 63, 77, 310. Similar provisions as to the use of railroads by the public are contained in several early charters granted by the legislature of New Hampshire, coming down to a period as late as 1844. Id. 325, 335, 343, 364, 378, 411. In that year a statute was passed, entitled 'An Act to render railroad corporations public in certain cases,' &c., by one section of which it was provided, that said corporations, whenever thereto required by the legislature, should permit all persons to run locomotives and cars on their road. Id. p. 648.
In New Jersey, not only did the railroad charters contain provisions similar to those above quoted with regard to the authority of the directors to regulate the construction of carriages to be used on their roads, the weight of loads to be carried, the times of starting and the rate of speed, but expressly declared that such roads should be public highways. See Charter of Camden and Amboy Railroad Company, Feb. 4, 1830. The charter of the New Jersey Railroad, passed in 1832, distinguished between tolls for transportation in the cars of the company and those of other persons; and provided that no farmer should be required to pay any toll for the transportation of the produce of his farm to market in his own carriage, weighing not more than one ton, when the load did not exceed one thousand pounds.
The charter of the Philadelphia and Trenton Railroad Company, granted by the legislature of Pennsylvania in 1832, expressly made the road a public highway, and contained various provisions adapted to a road of that character; and no doubt similar provisions were contained in other charters granted in that State.
In the case of Boyle v. Philadelphia and Reading Railroad Company, 54 Penn. 310, decided in 1867, the Supreme Court of Pennsylvania held that the charter of the latter company made the road a public highway, on which all persons might place vehicles of transportation on conforming to the regulations of the company; and that in limiting the amount of 'tolls' demandable for transportation on the road, the legislature had reference to 'tolls' charged to other parties using the road, and not to the freights or charges for transportation which the company itself was authorized to demand when performing transportation.
In Missouri, as late as the year 1847, the legislature, when incorporating the Hannibal and St. Joseph Railroad Company, subjected it to the same restrictions and gave to it the same privileges before imposed and conferred on the Louisiana and Columbia Railroad Company, created in 1837; amongst which was the following: namely, 'that the company should have power to prescribe the kind of carriage to be used on its road, by whom, whether to be propelled by steam or other power, all cars being subject to the discretion of the company, and no person to put any carriage on the road without its permission; and the company was authorized to charge tolls and freight for the transportation of persons, commodities, or carriages on the road; and it was declared that the State and the United States should have the right, in time of war, to use said road in transportation of troops or munitions of war in preference to all other persons.' Missouri Railroad Laws, pp. 8-13. In reference to this railroad (among others), Congress, in 1852, made a grant of land to the State of Missouri, with the same reservation now under consideration, 'that the said railroads shall be and remain public highways for the use of the government of the United States,' &c. 10 Stat. 9. Read in connection with the charter of the railroad, which the rule relating to laws in pari materia requires, it is certain that, in this case at least, the reservation has relation to the use of the railroad alone, and not to the transportation service of the company.
On the other hand, in Maryland, from the first railroad charter granted in 1826,-namely, that of the Baltimore and Ohio Railroad Company,-the legislature has prohibited the use of railroads by any other company or person than the companies owning the same, except with their consent. But even this legislation is a recognition of the distinction between the railroad considered as a structure adapted to general use, and its actual use by placing vehicles and conducting transportation thereon. See Laws of Md. 1826, c. 123, sect. 18, and charters in subsequent years in the Session Laws.
It is undoubtedly true, that, in practice, railroads, as a general thing, are only operated by the companies that own them, or by those with whom they have permanent arrangements for the purpose. These companies have a practical, if not a legal, monopoly of thier use. The great expense of constructing and managing cars and motive power fit to be used on railroads as they have actually developed, the difficulty of strict compliance with the regulations adopted, and the diversified ways in which the companies could make the transportation business uncomfortable to those who might attempt to carry it on, are a most effectual security against any interference with their business as carried on by themselves. And in some of the States where railroads were originally declared public highways, the right of the public to use them has been expressly abrogated,-as in Massachusetts, for example, by the act of 1845. See Railroad Laws and Ch. 648.
But the ascertained impracticability of the general and indiscriminate public use of these great thoroughfares does not preclude their use by transportation companies having no interest in the roads themselves. Such companies, in fact, are actually engaged in conducting a vast carrying business on the principal lines of railroad throughout the country. Nor does it preclude the idea, that it may be of great importance to the government, in conducting its various operations in peace and in war, to have the free use of railroads as thoroughfares whenever it chooses to assume the conduct and management of its own transportation thereon.
Be this, however, as it may, the general course of legislation referred to sufficiently demonstrates the fact, that in the early history of railroads it was quite generally supposed that they could be public highways in fact as well as in name. This view pervaded the language of most charters granted at that period, many of which still remain in force; and the railroads constructed under them are, theoretically at least, public highways to this day. This fact affords the only explanation of much of the language used, not only in those early charters, but in many of those which have been granted since, the latter adopting, as was natural, the forms of phraseology found prepared to hand. The language referred to is only consistent with the idea that railroads were to be regarded and used as public highways. The forms of legislative expression thus adopted, and coming down from a period when they had greater practical significance than they now have, bring with them an established sense, which renders them free from all uncertainty and doubt. We know, as well as we know the sense of any phrase in the English language which has a historical meaning and application, what is meant when a railroad is spoken of in a law as a 'public highway.' We know that it refers to the immovable structure stretching across the country, graded and railed for the use of the locomotive and its train of cars.
But it is not alone in charters which contemplate the creation of railroads as public highways that we find evidence of the understood distinction between railroads as mere thoroughfares, and the operations to be carried on upon them by means of locomotives and cars. This is manifest from the fact, amongst other things, that express power is invariably given (if intended to be conferred) to the railroad company to equip its road, and to transport goods and passengers thereon and charge compensation therefor. This practice evidently springs from the conviction that a railroad company is not necessarily a transportation company, and that, to make it such, express authority must be given for that purpose, in compliance with the rule that no power is conferred upon a corporation which is not given expressly or by clear implication.
In view of the legislative history and practice referred to, it seems impossible to resist the conclusion, when we meet with a legislative declaration to the effect that a particular railroad shall be a public highway, that the meaning is, that it shall be open to the use of the public with their own vehicles; and that when Congress, in granting lands in aid of such a road, declared that the same shall be and remain a public highway for the use of the government of the United States, it only means that the government shall have the right to use the road, but not that it shall have the right to require its transportation to be performed by the railroad company. And when this right of the use of the road is granted 'free from all toll or other charge for transportation of any property or troops of the United States,' it only means, that the government shall not be subject to any toll for such use of the road. This, we think, is the natural and most obvious meaning of the language used, when viewed in the light afforded by the history of railroad legislation in this country.
This was also the interpretation put by the Executive Department of the government upon the reservation in question prior to the passage of the acts of 1864. At the breaking out of the late civil war, it became a matter of great practical importance to the railroad companies which had received grants of land subject to this restriction, whether they were or were not to receive any compensation for transporting government property and troops in their cars. It was held that they were, and that a reasonable abatement should be made for the free use of the road, to which the government was entitled. The views of the War Department were set forth in a communication from Mr. Cameron, Secretary of War, to the president of the Illinois Central Railroad Company, dated Aug. 15, 1861, in which he says, 'It has been decided by this department that the clause in your charter (9 Stat. 467, sect. 4) gives a clear right to the government of the United States to the use of your roadway, without compensation, for the transportation of its troops and its property. As a proper compensation for motive power, cars, and all other facilities incident to transportation, two cents per mile will be allowed for passenger travel, subject to a discount of thirty-three and a third per cent as due to government for charter privileges. Payment for transportation of freights, stores, munitions of war, and other public property, will be made at such reasonable rates as may be allowed railroad companies, subject, however, to the abatement of thirty-three and a third per cent, as before specified.' A movement to compel the same company to transport property for the government free of charge was made in 1865; but was reported against adversely by learned committees, after receiving from the War Department a full explanation of the reasons upon which its action had been based. See letter of Q. M. Gen. Meigs to Senator Sherman, dated Feb. 14, 1865, and the action of the Senate and House of Representatives, 2d Sess. 38th Congress, Cong. Globe, vol. lxviii. pp. 890-902, 1045, 1387-1389. The same views were fully expressed by the Attorney-General, when applied to for his opinion, in 1872. 14 Opinions, 591. In accordance with these views, settlements were made with the different companies concerned down to the passage of the act of 1874, suspending payment, as before stated.
It is not without significance, in this connection, that in other grants, when Congress intended to provide for transportation being performed by the railroad company, explicit and proper language is used for that purpose. As in the case of the Union Pacific Railroad Company, chartered by Congress July 1, 1862, where it is enacted that the company shall transmit despatches over its telegraph lines, transport mails, troops, and munitions of war, supplies, and public stores, upon its railroad, for the government, whenever required to do so by any department thereof, and that the government shall at all times have the preference in the use of the same for all the purposes aforesaid, at fair and reasonable rates of compensation, not to exceed the amount paid by private parties for the same kind of service. 12 Stat. 493. In this case compensation was provided for. In other cases the transportation was to be furnished without charge. After the discussion in 1865, before referred to, Congress made several grants of land, with the express reservation that the government property should be transported over the roads concerned at the cost, charge, and expense of the company owning and operating the same, when required by the United States so to do, using language entirely different from that under consideration in the cases now before the court. See acts of 1866 (14 Stat. 95, 237, 241, 290, 338, 549).
But suppose, in the cases under consideration, the States of Kansas and Minnesota, to which the land-grants were directly made, had themselves severally chosen to construct the railroads in question, to be operated and used by any individuals or transportation corporations who might see fit to place rolling-stock thereon upon payment of the proper tolls, would the government have had any further right than that of using the road with its own carriages free of toll? It certainly could not have the right to use the carriages of third persons placed on the road; nor, from any thing contained in the act of Congress, could it require that the State should procure and place rolling-stock on the road. All that the act reserves is the free use of the railroad. Of course this implies, also, the free use of all fixtures and appurtenances forming part of the road, and which are essential to its practical use, such as turntables, switches, d ep ots, and other necessary appendages. Lord Chancellor Cottenham, in the case of Cother v. The Midland Railway Company, 2 Phill. 473, said, 'The term 'railway,' by itself, includes all works authorized to be constructed; and, for the purpose of constructing the railway, the company are authorized to construct such stations and other works as they may think proper.' 1 Redf. on Railw., sect. 105. The 'works' referred to by the Lord Chancellor were those permanent and immovable appendages which constitute parts of the completed structure.
We are of opinion that the reservation in question secures to the government only a free use of the railroads concerned, and that it does not entitle the government to have troops or property transported by the companies over their respective roads free of charge for transporting the same.
In coming to this conclusion, we do not place any great stress upon the use of the work 'toll,' as being a word peculiarly applicable to charges for the use of a highway, as contradistinguished from the charge for transportation, which is more properly denominated 'freight;' for whilst this is undoubtedly true, it must be conceded, that, in the actual language of railroad legislation, the word 'toll' is very often used to express the charge for transportation also. Our opinion is based rather upon that marked distinction which the mind naturally makes, and which is so generally made in railroad legislation between the road as a thoroughfare and the transaction of the carrier business thereon, whether by the railroad company itself or by other persons, and the manifest intent of Congress, in the legislation under review, to reserve only the free use of the road, and not the active service of the company in transportation.
The objection that it would be inconvenient for government to provide locomotives and cars for the performance of its transportation cannot be properly urged. The government can do what it always has done, without experiencing any difficulty, employ the services of the railroad and transportation companies which have provided these accommodations. It might be very convenient for the government to have more rights than it has stipulated for; but we are on a question of construction, and on this question the usus loquendi is a far more valuable aid than the inquiry what might be desirable.
Equally untenable is the idea, that, because railways are not ordinarily used as public highways, therefore the appellation of 'public highways,' when given to them, must mean something different from what it has ever meant before, and must embrace the rolling-stock with which they are operated and used. Such a method of interpretation would set us all at sea, and would invest the courts with the power of making contracts, instead of the parties to them. It is contended by the government, that though it be not entitled to the active services of the company, but only to the use of the 'railroad,' that, at least, this term (railroad) must be regarded as including the equipment of the road as a part thereof, and that the government should be adjudged to have the free use of the locomotives and cars of the company, as well as the track. But, as suggested, we cannot see any good reason for this position. No doubt the word, as used in certain connections and in particular charters and instruments, may properly have a wider latitude of signification, so as to include the equipment and rolling-stock as accessory to the track, constituting together one incorporated mass or corpus of property as the subject-matter of the particular enactment or disposition. It is not our purpose to question the propriety of this view in the cases and for the purposes to which it may be applicable. But where, as in the laws under review, the railroad is referred to throughout in its character as a road, as a permanent structure, and designated and required to be a 'public highway,' it cannot, without doing violence to language, and disregarding the long-established usage of legislative expression, as shown in the previous part of this opinion, be extended to embrace the rolling-stock or other personal property of the railroad company.
The decrees of the Court of Claims in the several cases must be reversed, and a new decree made in favor of the respective petitioners, in conformity with the principles of this opinion; that is to say, awarding to each of them compensation for all transportation performed by them respectively of troops and property of the government (excepting the mails), subject to a fair deduction for the use of their respective railroads.
MR. JUSTICE MILLER, with whom concurred MR. JUSTICE CLIFFORD, MR. JUSTICE SWAYNE, and MR. JUSTICE DAVIS, dissenting.
|
1. Introduction {#sec1-jpm-11-01386}
Neuraxial anesthesia techniques include spinal, epidural, and combined spinal-epidural anesthesia. The anesthesia level needed in a specific surgery is determined by the dermatome level of the skin incision part and the level needed for surgical manipulation, but neuraxial anesthesia is commonly used in surgery of the lower abdomen and lower extremities and provides an alternative to general anesthesia when appropriate \[[@B1-jpm-11-01386]\]. Previous observational and randomized studies have suggested that surgery under neuraxial anesthesia is associated with better results than surgery under general anesthesia for some procedures, although it lacked high-quality evidence \[[@B2-jpm-11-01386]\] and did not have more significant advantages over other specific types of anesthesia \[[@B3-jpm-11-01386],[@B4-jpm-11-01386]\]. In addition, research findings on the association between anesthesia and dementia have been inconsistent \[[@B5-jpm-11-01386],[@B6-jpm-11-01386],[@B7-jpm-11-01386],[@B8-jpm-11-01386]\]. Postoperative cognitive decline is a general anesthesia complication \[[@B9-jpm-11-01386]\], which may develop in up to 80% and 26% of patients who undergo cardiac surgeries and non-cardiac surgeries, respectively \[[@B10-jpm-11-01386]\]. There is a perception that postoperative cognitive decline may increase the risk of dementia and Alzheimer's disease \[[@B9-jpm-11-01386]\]; thus, some patients prefer surgery under neuraxial anesthesia to general anesthesia. However, it is difficult to separate and evaluate the effects of surgery and anesthesia on cognitive function. Neuraxial anesthesia may expose patients to a certain degree of postoperative cognitive decline and dementia. In the present study, we aimed to evaluate the incidence of dementia in patients who underwent surgery under neuraxial anesthesia and investigate the potential of surgery under neuraxial anesthesia as a risk factor for dementia using nationwide, representative cohort sample data.
2. Materials and Methods {#sec2-jpm-11-01386}
2.1. National Sample Cohort {#sec2dot1-jpm-11-01386}
The Korean National Health Insurance Service (KNHIS) has provided mandatory health coverage to the South Korean population since 1989. A unique identification number assigned to each South Korean resident at birth prevents the omission or duplication of healthcare data. With the integration of Medical Aid data into the KNHIS database in 2006, this database comprises the entire population of South Korea. Therefore, usage of the KNHIS database eliminates selection bias. The KNHIS database contains nearly all medical data, including the diagnostic codes according to the Korean Classification of Diseases, which are similar to those of the International Classification of Diseases. The present study utilized a representative sample of 1,025,340 adults from the 2002--2013 KNHIS--National Sample Cohort (NSC) in South Korea (NHIS-2018-2-258). This dataset accounts for approximately 2.2% of the South Korean population in 2002. Stratified random sampling was performed using 1476 strata by age (18 groups), sex (two groups), and income level (41 groups: 40 health insurance groups and one medical aid beneficiary) of the South Korean population of 46 million in 2002. Additionally, the KNHIS--NSC contains data of all health services, including hospital visits (inpatient and outpatient), medical procedures, drug prescriptions, hospital diagnoses, and demographic information (including sex, age, household income, and mortality) during the study period of 2002--2013. To date, numerous previous studies have been published using these data.
2.2. Study Setting and Participants {#sec2dot2-jpm-11-01386}
This study was approved by the Institutional Review Board of Hallym Medical University Chuncheon Sacred Hospital (IRB No. 2021-08-006), and the need for written informed consent was waived because the KNHIS-NSC dataset comprised de-identified secondary data for research purposes. The study cohort comprised patients who underwent surgery under neuraxial anesthesia during the index period (1 January 2003, to 31 December 2004) and were aged over 55 years at enrollment. To remove any potential pre-existing cases of anesthesia, we established a washout period (1 January 2002, to 31 December 2002). Additionally, we excluded the following patients: (1) underwent surgery under neuraxial anesthesia before and after the index period; (2) underwent surgery under other anesthesia besides neuraxial anesthesia from 2002 to 2013; (3) with a history of brain and heart surgery from 2002 to 2013; (4) diagnosed with dementia before and during the index period; and (5) died during the index period. The comparison group (patients who did not receive anesthesia) comprised randomly selected propensity score-matched individuals from the remaining cohort registered in the database (four for each patient who underwent surgery under neuraxial anesthesia) between 2003 and 2004. The study and comparison groups were matched by variables such as sociodemographic factors (age, sex, residential area, and household income), Charlson comorbidity index, and the enrollment date. The schematic description of the cohort design is presented in [Figure 1](#jpm-11-01386-f001){ref-type="fig"}.
2.3. Predictor and Outcome Variables {#sec2dot3-jpm-11-01386}
We collected data for patients diagnosed with dementia (Alzheimer's disease \[F00, G30\], vascular dementia \[F01\], and others \[F02, F03\]) from 2002 to 2013. Detailed patient characteristics, including sex, age, residence, household income, and Charlson comorbidity index, are presented in [Table 1](#jpm-11-01386-t001){ref-type="table"}. The study population was divided into three age groups (55--64, 65--74, and ≥75 years), three income groups (low: ≤30%, middle: 30.1--69.9%, and high: ≥70% of the median), three residential areas (first area: Seoul, the largest metropolitan region in South Korea; second area: other metropolitan cities in South Korea; and third area: small cities and rural areas), and three Charlson comorbidity index group (0, 1, and ≥2).The Charlson comorbidity index, developed based on medical record data, converted 19 diseases into ICD-10 codes for application to administrative data, and the list of Charlson comorbidity index and weight are summarized in [Appendix A](#app1-jpm-11-01386){ref-type="app"} [Table A1](#jpm-11-01386-t0A1){ref-type="table"}. The study endpoint was the death of a participant or the incidence of dementia. The characteristics of all patients who had no events and who were alive until 31 December 2013 are shown in [Appendix A](#app1-jpm-11-01386){ref-type="app"} [Table A2](#jpm-11-01386-t0A2){ref-type="table"}. The risks of dementia in the surgery under neuraxial anesthesia and comparison groups were compared using person-years at risk, which was defined as the duration between the date of enrollment and the patient's respective endpoint.
2.4. Statistical Analysis {#sec2dot4-jpm-11-01386}
We employed one-to-four propensity score-matching according to age, sex, residential area, household income, and comorbidities. The incidence rates per 1000 person-years for dementia were obtained by dividing the number of patients with dementia by person-years at risk. The overall disease-free survival rate was determined using Kaplan--Meier survival curves for the entire observation period. To evaluate the risk association between surgery under neuraxial anesthesia and dementia, we used Cox proportional hazard regression to calculate the hazard ratio and 95% confidence intervals (CI), adjusting for other predictor variables. As a subgroup analysis, we evaluated hazard ratios of dementia according to sex, age, Charlson comorbidity index, type of surgery, and dementia type among the sample patient. All statistical analyses were performed using R version 3.3.1 (R Foundation for Statistical Computing, Vienna, Austria) with a significance level of 0.05.
::: {#jpm-11-01386-t001 .table-wrap}
Characteristics of the study subjects.
Variables Comparison\ Surgery under Neuraxial Anesthesia\ *p* Value
(*n* = 4488) (*n* = 1122)
----------- ------------------ -------------- ------------------------------------- -----------
Sex Male 2044 (45.5%) 511 (45.5%) 1.000
Female 2444 (54.5%) 611 (54.5%)
Ages 55--64 1892 (42.2%) 473 (42.2%) 1.000
65--74 1596 (35.6%) 399 (35.6%)
≥75 1000 (22.3%) 250 (22.3%)
Residence Seoul 908 (20.2%) 227 (20.2%) 1.000
Second area 940 (20.9%) 235 (20.9%)
Third area 2640 (58.8%) 660 (58.8%)
Household Low (0--30%) 992 (22.1%) 248 (22.1%) 1.000
Middle (30--70%) 1424 (31.7%) 356 (31.7%)
High (70--100%) 2072 (46.2%) 518 (46.2%)
CCI 0 2472 (55.1%) 618 (55.1%) 1.000
1 1024 (22.8%) 256 (22.8%)
≥2 992 (22.1%) 248 (22.1%)
Comparison, subjects without anesthesia; Seoul, the largest metropolitan area; second area, other metropolitan cities; third area, other areas; CCI, Charlson comorbidity index.
3. Results {#sec3-jpm-11-01386}
3.1. Effects of the Surgery under Neuraxial Anesthesia on the Incidence of Dementia among Patients Aged over 55 Years {#sec3dot1-jpm-11-01386}
The present study comprised 1122 patients who underwent surgery under neuraxial anesthesia and 4488 comparison participants (patients who did not undergo surgery under anesthesia). The two cohorts (the surgery under neuraxial anesthesia group and the comparison group) had similar distributions of sex, age, residential area, household income, and Charlson comorbidity index, meaning that each variable was matched appropriately between the two groups ([Table 1](#jpm-11-01386-t001){ref-type="table"}, [Figure 2](#jpm-11-01386-f002){ref-type="fig"}). The overall incidence of dementia was higher in the surgery under neuraxial anesthesia group (14.8 per 1000 person-years) than in the comparison group (11.5 per 1000 person-years) ([Table 2](#jpm-11-01386-t002){ref-type="table"}).
3.2. Hazard Ratios of Dementia in Patients Aged over 55 Years and Who Underwent Surgery under Neuraxial Anesthesia {#sec3dot2-jpm-11-01386}
[Figure 3](#jpm-11-01386-f003){ref-type="fig"} presents the Kaplan--Meier survival curves with log-rank tests for the cumulative hazard plot of specific disease-free between comparison and surgery under neuraxial anesthesia groups. The results of the log-rank test indicated that patients who underwent surgery under neuraxial anesthesia developed dementia more frequently than those who did not undergo surgery under anesthesia during the 9-year follow-up period. In the subgroups analysis, patients who underwent surgery under neuraxial anesthesia developed Alzheimer's disease more frequently than those who did not undergo surgery under anesthesia during the 9-year follow-up period ([Figure 4](#jpm-11-01386-f004){ref-type="fig"}).
::: {#jpm-11-01386-f002 .fig}
Balance plot for five variables before and after matching.
::: {#jpm-11-01386-t002 .table-wrap}
Incidence per 1000 person-years and HR (95% CIs) of dementia during the follow-up period.
Variables N Case Incidence Unadjusted HR (95% CI) Adjusted HR\ *p* Value
-------------------------- ------ ------ ----------- ---------------------------- ---------------------------- -----------
Comparison 4488 442 11.49 1.00 (ref) 1.00 (ref)
Surgery under neuraxial\ 1122 121 14.78 1.40 (1.14--1.72) \*\* 1.44 (1.17--1.76) \*\*\* \<0.001
Male 2555 192 9.33 1.00 (ref) 1.00 (ref)
Female 3055 371 14.23 1.51 (1.27--1.80) \*\*\* 1.32 (1.10--1.57) \*\* 0.002
55--64 2365 77 3.40 1.00 (ref) 1.00 (ref)
65--74 1995 240 14.35 4.44 (3.43--5.74) \*\*\* 4.26 (3.29--5.51) \*\*\* \<0.001
≥75 1250 246 33.66 11.90 (9.20--15.40) \*\*\* 11.36 (8.77--14.72) \*\*\* \<0.001
Seoul 1135 99 9.99 1.00 (ref) 1.00 (ref)
Second area 1175 126 12.92 1.31 (1.01--1.70) \* 1.19 (0.92--1.56) 0.188
Third area 3300 338 12.52 1.27 (1.02--1.59) \* 1.0aq7 (0.85--1.34) 0.573
Low (0--30%) 1240 132 13.12 1.00 (ref) 1.00 (ref)
Middle (30--70%) 1780 151 10.09 0.76 (0.60--0.96) \* 0.87 (0.69--1.10) 0.239
High (70--100%) 2590 280 12.94 0.98 (0.79--1.20) 0.99 (0.80--1.21) 0.894
0 3090 283 10.54 1.00 (ref) 1.00 (ref)
1 1280 163 15.24 1.47 (1.21--1.78) \*\*\* 1.37 (1.13--1.66) \*\* 0.001
≥2 1240 117 12.82 1.25 (1.01--1.55) \* 1.21 (0.97--1.50) 0.091
Seoul, the largest metropolitan area; second area, other metropolitan cities; third area, other areas; CCI, Charlson comorbidity index; HR, hazard ratio; CI, confidence interval. \* *p* \< 0.05, \*\* *p* \< 0.010, and \*\*\* *p* \< 0.001.
::: {#jpm-11-01386-f003 .fig}
Risk of development of dementia disease between neuraxial anesthesia and comparison (non-anesthesia).
::: {#jpm-11-01386-f004 .fig}
Cumulative hazard plot of specific between neuraxial anesthesia patient group and comparison group: (**A**) Alzheimer's disease. (**B**) Vascular disease.
We used simple and multiple Cox regression models to analyze the hazard ratio for the development of dementia ([Table 2](#jpm-11-01386-t002){ref-type="table"}). After adjusting for sociodemographic factors and Charlson comorbidity index, we found that surgery under neuraxial anesthesia was associated with prospective dementia development with an adjusted hazard ratio of 1.44 (95% CI, 1.17--1.76). In addition, female and increasing age were significantly associated with the prospective development of dementia.
In the subgroup analysis, we observed no significant difference in the hazard ratios of dementia by sex between surgery under neuraxial anesthesia and non-anesthesia groups. Additionally, the adjusted hazard ratio for dementia development among the older (≥75) patients who underwent surgery under neuraxial anesthesia during the 9-year follow-up period was 1.60 (95% CI: 1.19--2.15). The adjusted hazard ratio for dementia development among patients who underwent surgery under neuraxial anesthesia with Charlson comorbidity index of 0 during the 9-year follow-up period was 1.62 (95% CI: 1.23--2.15) ([Table 3](#jpm-11-01386-t003){ref-type="table"}). Moreover, the adjusted hazard ratio of developing Alzheimer's disease in patients aged ≥55 who underwent surgery under neuraxial anesthesia during the 9-year follow-up period was 1.52 (95% CI, 1.20--1.92) compared with the patients who did not undergo surgery under anesthesia; however, we did not find any association between surgery under neuraxial anesthesia and vascular dementia (adjusted hazard ratio of 1.29; 95% CI, 0.82--2.03) in patients ≥55 who underwent surgery under neuraxial anesthesia ([Table 4](#jpm-11-01386-t004){ref-type="table"}). [Table 5](#jpm-11-01386-t005){ref-type="table"} summarizes the incidence per 1000 person-years and risk of dementia, Alzheimer's disease, and vascular dementia according to type of surgery.
4. Discussion {#sec4-jpm-11-01386}
The present study investigated the incidence of dementia in older adults who underwent surgery under neuraxial anesthesia and compared the incidence risk of dementia between those who underwent surgery under neuraxial anesthesia and those who did not undergo surgery under neuraxial anesthesia in Korea. The corresponding incidences of dementia during the 9-year follow-up period were 14.8 and 11.5 cases per 1000 person-years. The incidence risk of dementia in older adults who underwent surgery under neuraxial anesthesia was 1.44-fold higher than that in older adults who did not undergo surgery under neuraxial anesthesia, even after adjusting for several risk factors, including age, sex, residence, household income, and comorbidities. Similarly, the incidence risk of Alzheimer's disease in older adults who underwent surgery under neuraxial anesthesia was 1.52-fold higher than that in older adults who did not undergo surgery under neuraxial anesthesia. Surgery under neuraxial anesthesia increased the risk of dementia in individuals with a Charlson comorbidity index of 0, which suggests that surgery under neuraxial anesthesia can increase the risk of dementia in the absence of comorbidities. Patients who underwent surgery under neuraxial anesthesia had elevated risk of dementia and Alzheimer's disease regardless of the type of surgery, which suggests that neuraxial anesthesia increases the risk of dementia.
Several studies have reported an association between anesthesia and dementia, but the effect of general anesthesia on dementia remains controversial. A systematic review and meta-analysis of 15 case-control studies did not find a significant association between anesthesia and Alzheimer's disease \[[@B11-jpm-11-01386]\]. However, studies investigating the association between dementia and surgery under neuraxial anesthesia are limited. There are some studies on the postoperative cognitive dysfunction or delirium after surgery under neuraxial anesthesia. Ehsani et al. reported that the incidence of postoperative delirium and early cognitive disorder was higher in general anesthesia than in spinal anesthesia \[[@B12-jpm-11-01386]\]. However, Zhang et al. showed that spinal anesthesia + isoflurane was associated with a higher incidence of dementia than spinal anesthesia, but there was no difference between spinal anesthesia and spinal anesthesia + desflurane \[[@B13-jpm-11-01386]\]. Silbert et al. reported no significant difference in the rate of postoperative cognitive dysfunction between general anesthesia and spinal anesthesia \[[@B14-jpm-11-01386]\]. Aiello et al. reported that exposure to general anesthesia or neuraxial anesthesia was not associated with dementia or Alzheimer's disease in community-dwelling members of the Adult Changes in Thought cohort aged 65 years and above and free of dementia at baseline \[[@B9-jpm-11-01386]\]. However, their study had some limitations. First, data were collected through interviews and patient recall. Second, the demographic composition was primarily white, middle class, well-educated patients, which did not reflect the general population. In contrast, we investigated the general population with long-term follow-up, and our results showing high incidence in older age, females, and low-income earners and those residing in rural areas are similar to previous studies \[[@B15-jpm-11-01386],[@B16-jpm-11-01386]\].
It is unclear why older adults with surgery under neuraxial anesthesia have a higher incidence of dementia than those without surgery under neuraxial anesthesia. Our results showed that the effect on dementia is more associated with Alzheimer's disease than vascular dementia. The direct effect of surgery under neuraxial anesthesia on the pathology of Alzheimer's disease is difficult to explain, but the neurotoxicity of local anesthetics can be considered. In the spinal canal after spinal anesthesia, lidocaine-induced apoptosis, and higher concentrations of lidocaine induced necrosis and non-specific apoptosis \[[@B17-jpm-11-01386],[@B18-jpm-11-01386]\]. However, the effects of spinal anesthesia on the brain have not been studied, and the effects of local anesthesia may be limited to the specific organ or tissue into which the anesthetic is injected. Fathy et al. reported that lidocaine and bupivacaine can lead to postoperative cognitive impairment after cataract surgery under local anesthesia, but they did not convincingly demonstrate whether this outcome was a definitive effect of local anesthetics \[[@B19-jpm-11-01386]\].
Postoperative events can be a risk factor for dementia. In animals, postoperative cognitive dysfunction is associated with postoperative cytokine-induced inflammation in the hippocampus \[[@B20-jpm-11-01386],[@B21-jpm-11-01386]\]. Various proinflammatory cytokines, such as tumor necrosis factor alpha, maintain a state of chronic neuroinflammation, resulting in postoperative cognitive impairment and postoperative delirium \[[@B22-jpm-11-01386],[@B23-jpm-11-01386]\]. Surgery may induce astrogliosis, β-amyloid accumulation, and τ phosphorylation in the elderly, which may be associated with the cognitive decline seen in postoperative cognitive dysfunction \[[@B24-jpm-11-01386]\]. The decreased functional connectivity of the executive control network and its anticorrelation with the default mode network may contribute to executive function deficits following surgery \[[@B25-jpm-11-01386]\]. The brain drives the surgical stress response by initiating changes in neuroendocrine balance, but changes in homeostasis can lead to postoperative cognitive impairment (POCD) \[[@B26-jpm-11-01386]\]. Changes in the metabolism before and after surgery can be related to postoperative delirium \[[@B27-jpm-11-01386]\]. Perioperative management may be associated with a risk of dementia. Preoperative fasting, body temperature control, and blood pressure management are associated with the risk of postoperative cognitive disorder \[[@B28-jpm-11-01386]\]. Patients with a high risk of postoperative neurological complications require frailty screening with preoperative cognitive screening for the best perioperative neurological results \[[@B29-jpm-11-01386]\].
Reduction of cerebrospinal fluid pressure due to cerebrospinal fluid loss in the epidural space after spinal anesthesia is not uncommon \[[@B30-jpm-11-01386]\]. In older people, there may be a persistent and significant decrease in cerebrospinal fluid pressure \[[@B31-jpm-11-01386]\]. Patients with low cerebrospinal fluid flow have significantly reduced memory, visual construction, and verbal fluency. Alterations in cerebrospinal fluid flow may contribute to some of the cognitive deficits observed in Alzheimer's disease patients \[[@B32-jpm-11-01386]\]. Injection of the drug into the lumbar epidural space compresses the dural sac, changes the compliance of the spinal subarachnoid space, and moves cerebrospinal fluid upwards towards the skull. Depending on the amount of the drug, intracranial pressure may increase \[[@B33-jpm-11-01386]\]. The amount can be increased with prolonged anesthesia or with a patient-controlled analgesia catheter administered after nerve axis anesthesia. Increased intracranial pressure may be associated with cognitive impairment \[[@B34-jpm-11-01386]\]. Spinal anesthesia triggers cerebral vasodilation \[[@B35-jpm-11-01386]\] and may raise intracranial pressure \[[@B36-jpm-11-01386]\]. In addition, the ability to control cerebral blood flow decreases in the elderly \[[@B37-jpm-11-01386]\].
The elevated risk of cognitive deficits may be explained by the effect of neuraxial anesthesia on the risk factors for Alzheimer's disease. Cerebrovascular disease is associated with decreasing cognitive function, and it lowers the threshold of clinical dementia in patients with neuropathological diagnosis of Alzheimer's disease \[[@B38-jpm-11-01386],[@B39-jpm-11-01386],[@B40-jpm-11-01386]\]. Many older patients take anticoagulants for atrial fibrillation, coronary artery disease, and thromboembolism cases. The anticoagulants should be discontinued before surgery under neuraxial anesthesia. However, perioperative discontinuation of anticoagulants for several days can cause acute ischemia, which raises the risk of acute ischemic stroke \[[@B41-jpm-11-01386],[@B42-jpm-11-01386]\]. If only the small blood vessels are affected, it may be difficult to deny that cerebral ischemia has occurred, even if there are no symptoms of cerebral ischemia after surgery under anesthesia \[[@B43-jpm-11-01386]\]. The effect of surgery on dementia should also be considered. Neuraxial anesthesia slows the stress response during surgery, reduces intraoperative bleeding, and reduces the frequency of postoperative thromboembolism. In addition, considering the advantages of reducing morbidity and mortality in high-risk patients and controlling post-operative pain, surgery may have a significant impact on dementia \[[@B1-jpm-11-01386]\]. After surgery, patients have limited physical activity, which may increase the risk of dementia compared to active physical activity \[[@B44-jpm-11-01386]\]. In particular, many surgeries of the lower extremities are performed under neuraxial anesthesia, and the patients may have prolonged immobility depending on the postoperative condition
Approximately 50 million people worldwide have dementia, and an estimated 5--8% of the general population over the age of 60 years develops dementia \[[@B45-jpm-11-01386]\]. The treatment for dementia is expensive, and the earlier the onset, the higher the cost of care. Considering that most types of dementia, such as Alzheimer's disease, have slow progression, the associated risk factors should be investigated and controlled \[[@B46-jpm-11-01386]\]. Therefore, the risk of dementia should be considered when selecting the anesthesia type and surgery. Although surgery under neuraxial anesthesia may be associated with dementia risk factors, surgery under neuraxial anesthesia is thought to have little relevance to dementia because patients are conscious during anesthesia. However, despite the increase in surgical demands at old age \[[@B47-jpm-11-01386]\], and considering that studies on dementia require long-term follow-up, studies on the effect of neuraxial anesthesia on dementia have been limited.
The present study has some limitations. First, we could not determine the anesthesia and surgery experience of patients before the index period because we categorized patients according to the presence of neuraxial anesthesia experience during the index period. Considering that dementia progresses slowly, anesthesia and surgery experience before the index period might have affected the development of dementia. However, to overcome this challenge, we only enrolled subjects who were 55 years of age or older and investigated the effect of surgery under neuraxial anesthesia in old age. Additionally, this study had a one-year washout period for surgery under anesthesia and excluded patients with additional surgery under anesthesia after the index period to ensure that only the effect of surgery under neuraxial anesthesia during the index period was evaluated. Moreover, we perfectly matched the surgery under neuraxial anesthesia and non-surgery under neuraxial anesthesia groups using propensity scores with several variables, including age, sex, residence, household income, and comorbidities, and the effect of these variables on dementia was similar to that of previous studies. Second, it was difficult to evaluate the effect of various procedures, including surgeries and local anesthesia, on our findings. We evaluated the effects of anesthesia and surgery on dementia as a series of procedures. However, surgery may affect immediate survival and long-term outcomes \[[@B48-jpm-11-01386]\]. There are many types of surgery, and the corresponding intra- and post-operative effects may be different. The comparison group was non-anesthesia regardless of surgery. We classified the two groups by code of anesthesia, but there was no separate code for local anesthesia in KNHIS-NSC. The comparison group may include simple surgery under local anesthesia. Local anesthesia is usually administered for simple procedures, including skin surgery, open wound repair, abscess drainage, foreign body removal from the skin, vascular access, and dental procedures \[[@B49-jpm-11-01386],[@B50-jpm-11-01386],[@B51-jpm-11-01386]\]. The direct effect on the brain by local anesthesia for simple surgery may be limited. In addition, intensive postoperative care is not needed because complications of simple surgery are rare, and active pain control is usually not required because the surgical wound is small. Although we used the KNHIS--NSC dataset in the present study, it has been established for medical service claims and reimbursement, not for research. This dataset lacked sufficient data on procedures, including surgeries and local anesthesia. Additional studies are needed to evaluate the effects of each surgery and local anesthesia. Third, our results are likely to be further biased due to confounding by indication or postoperative events, that is, patients who received neuraxial anesthesia may have done so because of unmeasured differences between groups, such as frailty, patient or clinician preference, or traumatic brain injury and stroke during the follow-up period. We matched the Charlson comorbidity index between the two groups to reduce the confounding as much as possible. Fourth, we could not validate the diagnosis of dementia, Alzheimer's disease, and vascular dementia with the ICD code. Thus, we may have underestimated cognitive dysfunction or dementia diagnosis. Further studies are needed to analyze hospital-based or registry-based datasets that include clinical cognitive function tests to confirm our findings.
5. Conclusions {#sec5-jpm-11-01386}
The present study showed that older patients who underwent surgery under neuraxial anesthesia had a higher risk of developing dementia. Especially patients who underwent surgery under neuraxial anesthesia showed a higher risk of Alzheimer's disease. Anesthesia is accompanied by a series of processes leading to surgery and post-operative care, and since we evaluated the effect of surgery under neuraxial anesthesia on dementia, it is still insufficient to determine the effects of surgery and neuraxial anesthesia on dementia, respectively. However, our findings reveal new insights that surgery under neuraxial anesthesia can be considered a possible risk factor for dementia.
Conception and design of the work, J.-H.S.; acquisition of data for the work, D.-K.K., H.Y. and J.-H.S.; Analysis and interpretation of data for the work, H.Y., D.-K.K. and Y.-S.K.; Writing the manuscript---original draft preparation, Y.-S.K. and J.-H.S., review and editing, S.-H.L., C.K., J.-J.L., D.-K.K. and J.-H.S. All authors have read and agreed to the published version of the manuscript.
This research was supported by a grant of the Korea Health Technology R&D Project through the Korea Health Industry Development Institute (KHIDI), funded by the Ministry of Health & Welfare, Republic of Korea (grant number: HR21C0198). Additionally, this research was supported by 'R&D Program for Forest Science Technology (Project No. FTIS-2021397A00-2123-0107)' provided by the Korea Forest Service (Korea Forestry Promotion Institute).
The study was conducted according to the guidelines of the Declaration of Helsinki and approved by the Clinical Research Ethics Committee of Chuncheon Sacred Heart Hospital, Hallym University (IRB No. 2021-08-006).
Patient consent was waived because this study used only deidentified data.
The data used in this study are owned by the Korean National Health Insurance Service. Data can be used with permission from the Korean National Health Insurance Service.
The authors declare no conflict of interest.
::: {#jpm-11-01386-t0A1 .table-wrap}
List of Charlson's comorbidities and these weight.
Comorbidities International Classification of Disease, 10th Revision Code Original Weight Updated Weight
------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ----------------- ----------------
Myocardial infarction I21.x, I22.x, I25.2 1 0
Congestive heart failure I09.9, I11.0, I13.0, I13.2, I25.5, I42.0, I42.5--I42.9, I43.x, I50.x, P29.0 1 2
Peripheral vascular disease I70.x, I71.x, I73.1, I73.8, I73.9, I77.1, I79.0, I79.2, K55.1, K55.8, K55.9, Z95.8, Z95.9 1 0
Cerebrovascular disease G45.x, G46.x, H34.0, I60.x--I69.x 1 0
Chronic pulmonary disease I27.8, I27.9, J40.x--J47.x, J60.x--J67.x, J68.4, J70.1, J70.3 1 1
Rheumatologic disease M05.x, M06.x, M31.5, M32.x--M34.x, M35.1, M35.3, M36.0 1 1
Peptic ulcer disease K25.x--K28.x 1 0
Mild liver disease B18.x, K70.0--K70.3, K70.9, K71.3--K71.5, K71.7, K73.x, K74.x, K76.0, K76.2--K76.4, K76.8, K76.9, Z94.4 1 2
Diabetes without chronic complication E10.0, E10.1, E10.6, E10.8, E10.9, E11.0, E11.1, E11.6, E11.8, E11.9, E12.0, E12.1, E12.6, E12.8, E12.9, E13.0, E13.1, E13.6, E13.8, E13.9, E14.0, E14.1, E14.6, E14.8, E14.9 1 0
Diabetes with chronic complication E10.2--E10.5, E10.7, E11.2--E11.5, E11.7, E12.2--E12.5, E12.7, E13.2--E13.5, E13.7, E14.2--E14.5, E14.7 2 1
Hemiplegia or paraplegia G04.1, G11.4, G80.1, G80.2, G81.x, G82.x, G83.0--G83.4, G83.9 2 2
Renal disease I12.0, I13.1, N03.2--N03.7, N05.2--N05.7, N18.x, N19.x, N25.0, Z49.0--Z49.2, Z94.0, Z99.2 2 1
Any malignancy including leukemia and lymphoma C00.x--C26.x, C30.x--C34.x, C37.x--C41.x, C43.x, C45.x--C58.x, C60.x--C76.x, C81.x--C85.x, C88.x, C90.x--C97.x 2 2
Moderate or severe liver disease I85.0, I85.9, I86.4, I98.2, K70.4, K71.1, K72.1, K72.9, K76.5, K76.6, K76.7 3 4
Metastatic solid tumor C77.x--C80.x 6 6
Acquired immune deficiency syndrome/human immunodeficiency virus B20.x--B22.x, B24.x 6 4
Resource from \[[@B52-jpm-11-01386]\].
::: {#jpm-11-01386-t0A2 .table-wrap}
Description of time to event and censored data.
The Number of Dementia Event
Neuraxial anesthesia 121
Total censored (No event) 5047
Neuraxial anesthesia 1001
Termination of study 3679
Neuraxial anesthesia 737
Loss to follow up/Drop-out 1368
Neuraxial anesthesia 264
::: {#jpm-11-01386-f001 .fig}
Schematic description of study design.
::: {#jpm-11-01386-t003 .table-wrap}
Hazard ratios of dementia by sex, age, and comorbidity score between surgery under neuraxial anesthesia and comparison (non-anesthesia).
Sex Male Female
------------------------ ------------------------------------ -------------------------- ------------------------------------ ---------------------- ------------------------------------ ------------------------
Unadjusted HR (95% CI) 1.00 (ref) 1.54 (1.09--2.17) \* 1.00 (ref) 1.33 (1.03--1.71) \*
Adjusted HR (95% CI) 1.00 (ref) 1.60 (1.13--2.25) \*\* 1.00 (ref) 1.36 (1.05--1.75) \*
Ages 55--64 65--74 ≥75
Comparison Surgery under neuraxial anesthesia Comparison Surgery under neuraxial anesthesia Comparison Surgery under neuraxial anesthesia
Unadjusted HR (95% CI) 1.00 (ref) 1.32 (0.74--2.35) 1.00 (ref) 1.30 (0.94--1.80) 1.00 (ref) 1.60 (1.19--2.16) \*\*
Adjusted HR (95% CI) 1.00 (ref) 1.33 (0.75--2.37) 1.00 (ref) 1.31 (0.95--1.81) 1.00 (ref) 1.60 (1.19--2.15) \*\*
CCI 0 1 ≥2
Comparison Surgery under neuraxial anesthesia Comparison Surgery under neuraxial anesthesia Comparison Surgery under neuraxial anesthesia
Unadjusted HR (95% CI) 1.00 (ref) 1.55 (1.17--2.05) \*\* 1.00 (ref) 1.33 (0.90--1.96) 1.00 (ref) 1.18 (0.73--1.88)
Adjusted HR (95% CI) 1.00 (ref) 1.62 (1.23--2.15) \*\*\* 1.00 (ref) 1.39 (0.94--2.05) 1.00 (ref) 1.11 (0.69--1.77)
CCI, Charlson comorbidity index; HR, hazard ratio; CI, confidence interval. \* *p* \< 0.05, \*\* *p* \< 0.010, and \*\*\* *p* \< 0.001.
::: {#jpm-11-01386-t004 .table-wrap}
Incidence per 1000 person-years and HR (95% CI) of specific diseases (Alzheimer's disease and vascular dementia).
Variables N Case Incidence Unadjusted HR\ Adjusted HR\ *p* Value
(95% CI) (95% CI)
------------------------------------ ------ ------ ----------- ------------------------ -------------------------- -----------
Comparison 4488 328 8.45 1.00 (ref) 1.00 (ref)
Surgery under neuraxial anesthesia 1122 92 11.13 1.48 (1.17--1.87) \*\* 1.52 (1.20--1.92) \*\*\* \<0.001
Comparison 4488 95 2.40 1.00 (ref) 1.00 (ref)
Surgery under neuraxial anesthesia 1122 24 2.84 1.27 (0.81--2.01) 1.29 (0.82--2.03) 0.273
HR, hazard ratio; CI, confidence interval. \*\* *p* \< 0.010, and \*\*\* *p* \< 0.001.
::: {#jpm-11-01386-t005 .table-wrap}
Incidence per 1000 person-years and HR (95% CI) of dementia, Alzheimer's disease and vascular dementia according to surgery type.
Variables N Case Incidence Unadjusted HR\ Adjusted HR\ *p* Value
(95% CI) (95% CI)
--------------------- ------ ------ ----------- -------------------------- ------------------------ -----------
Comparison 4488 442 11.49 1.00 (ref) 1.00 (ref)
Minor 1032 107 13.95 1.32 (1.06--1.63) \* 1.40 (1.13--1.74) \*\* 0.002
Major 90 14 27.06 2.76 (1.62--4.71) \*\*\* 1.78 (1.04--3.04) \* 0.036
Comparison 4488 328 8.45 1.00 (ref) 1.00 (ref)
Minor 1032 81 10.47 1.38 (1.08--1.77) \* 1.48 (1.16--1.90) \*\* 0.002
Major 90 11 20.88 3.00 (1.64--5.48) \*\*\* 1.87 (1.02--3.44) \* 0.043
Comparison 4488 95 2.40 1.00 (ref) 1.00 (ref)
Minor 1032 23 2.91 1.30 (0.82--2.06) 1.36 (0.86--2.17) 0.188
Major 90 1 1.80 0.88 (0.12--6.31) 0.57 (0.08--4.11) 0.577
HR, hazard ratio; CI, confidence interval. \* *p* \< 0.05, \*\* *p* \< 0.010, and \*\*\* *p* \< 0.001.
Both corresponding authors contributed equally to this work.
|
How to do URL masking?
How do I make:
http://ecorustic.com/model.php?category=Destinations&name=destindere&id=138
appear as:
http://ecorustic.com/Destinations/destindere
to the user but the page to still point at:
http://ecorustic.com/model.php?category=Destinations&name=destindere&id=138
?
I tried the following code but link remained unchanged :
RewriteEngine On
RewriteRule ^[A-Za-z-]+/[A-Za-z-]+/[A-Za-z-]+/([A-Za-z0-9-]+)/?$ model.php?category=$1&name=$2&id=$3
By what magic are we supposed to infer the ID?
Can 138 be derived from "destindere" in a database? If so, then yes this can be done - the first step is to remove the id query string requirement in the model.php URL, and do the lookup internally.
the id is given when the page is parsed, and it's the id of the page in the database
Have a look at mod_rewrite if you are using the Apache HTTP server.
i did but it gives the exact oposite of what i want, it transforms the second link into the first :( , in all examples i found
try it
RewriteEngine on
RewriteCond %{REQUEST_URI} ^/(.*)/(.*)/(.*)$
RewriteRule ^ model.php?category=%1&name=%2&id=%3 [L]
sample
http://ecorustic.com/Destinations/destindere/138
Rewrite to
http://ecorustic.com/model.php?category=Destinations&name=destindere&id=138
i need :
http://ecorustic.com/model.php?category=Destinations&name=destindere&id=138 to be transformed into http://ecorustic.com/Destinations/destindere/138 not the other way around
You should take a look at the HTTP URL rewriting guide on Apache. You said:
i did but it gives the exact oposite of what i want, it transforms the
second link into the first :( , in all examples i found
It's true and that's exactly what you want: make the fancy url appear to the user while in the background it is converted to the bad url to be read by the server.
There are thousands of tutorials and guides for any need on that. Here's some:
Guide 1
Guide 2
yes but how do make the fancy url appear??? that's what i don't get cause i already have the "bad" url, in those tutorial it explaines how to get the "bad" url based on the fancy url, BUT i need to get the fancy url based on the "bad" url(the url the visitors need to see is the fancy,but they see the "bad" url);
@Johny, you don't get the url, it's the fancy url that is translated in the bad url by the mod_rewrite. So that whan you write host.com/post/1/ it will be redirected to host.com?page=post&id=1. To do this you have to learn how to use mod rewrite of Apache by following the urls I've posted.
|
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
class CreateBerkasTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('berkas', function (Blueprint $table) {
$table->id();
$table->foreignId('wisuda_id')->constrained('wisudas', 'id')->onDelete('cascade');
$table->string('pasfoto');
$table->string('scanktp');
$table->string('bebasperpustakaan');
$table->string('toeflcept');
$table->string('buktiskripsi');
$table->string('pengesahanskripsi');
$table->string('pembayaranpendaftaran');
$table->string('status_pasfoto')->default('pending');
$table->string('status_scanktp')->default('pending');
$table->string('status_bebasperpustakaan')->default('pending');
$table->string('status_toeflcept')->default('pending');
$table->string('status_buktiskripsi')->default('pending');
$table->string('status_pengesahanskripsi')->default('pending');
$table->string('status_pembayaranpendaftaran')->default('pending');
$table->timestamps();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('berkas');
}
}
|
import React, {useContext, useState} from 'react';
import scss from './header.module.scss';
import {DataContext} from '../../data/DataProvider';
export function AppHeader() {
const dataContext = useContext(DataContext);
const [description, setDescription] = useState('');
const inputChange = (event) => {
setDescription(event.target.value);
}
const buttonClick = (event) => {
event.preventDefault();
dataContext.sendRequest({description: description});
setDescription('');
}
return (
<header className={scss.appHeader}>
<h1 className={scss.appTitle}>Github <span>Jobs</span></h1>
<form className={setFormClassName(dataContext.showForm)}>
<div className={scss.inputWrapper}>
<span className={scss.icon}>work_outline</span>
<input className={scss.searchInput} type="text"
placeholder="Title, companies, expertise or benefits"
onChange={inputChange} value={description}
/>
<button className={scss.searchButton} onClick={buttonClick}>Search</button>
</div>
</form>
</header>
);
}
function setFormClassName(show) {
if (show) {
return scss.headerSearch;
}
return `${scss.headerSearch} ${scss.hide}`;
}
|
How to work around a dictionary so duplicate keys will work?
I am making a column called month which reads from an importing excel file that has a column with certain strings that represent a month number. The problem I'm currently having is that this file has different strings that represent the same month(One with letters that represent months, and another with 00 numbers that represent months). This is leading to my code to have duplicate keys.
I created a function called get_value to get the values needed and then I created the dictionary
Here is my code
def get_value(value, dictionary):
print(dictionary.values())
if value in dictionary.values():
answer = [k for k, v in dictionary.items() if v == value][0]
# answer = answer[1]
else:
answer = "No data"
return answer
More code
#Dictionary to assign month letters into numbers
month_codes = {
"1": "F",
"2": "G",
"3": "H",
"4": "J",
"5": "K",
"6": "M",
"7": "N",
"8": "Q",
"9": "U",
"10": "V",
"11": "X",
"12": "Z"
}
#Modify month column to get values from month_codes dictionary
df['Month'] = df['Month'].apply(lambda x: get_value(x,month_codes))
#New dictionary to turn 00 months in to a one digit string number
month_codes_2 = {
"1": "01",
"2": "02",
"3": "03",
"4": "04",
"5": "05",
"6": "06",
"7": "07",
"8": "08",
"9": "09",
"10": "10",
"11": "11",
"12": "12",
}
How can get both dictionaries to work for one column?
Why is your dictionary backward? Why are you trying to use a dict to look up keys given values?
Your title suggests you want duplicate keys to somehow work in dictionaries (not possible), but your post seems to suggest the opposite. Please clarify by providing a minimal reproducible example.
Is there any reason why you can't simply use lstrip? print('01'.lstrip('0'))
I am assuming your excel column can have 2 kinds of values: a string value ("Jan", "Feb", ...) and a numerical value ("01", "02", ...). What you are trying to do here is to convert them the same way (e.g. Both "Jan" and "01" should convert to 1).
If my assumption is correct, this solution may work for you. You only need one dictionary for string values ("Jan", "Feb", ...).
month_codes = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
...
}
def get_value(value, dictionary):
try:
return int(value)
except Exception as e:
return dictionary[value]
df['Month'] = df['Month'].apply(lambda x: get_value(x, month_codes))
Hope this is what you are looking for!
My excel column can only have the number vale (1,2,3....). Thats why I ran into the duplicate key problem
Oh so you mean your column values are (1, 2, 3, ...) and want to convert them to ("Jan", "Feb", "Mar", ...) and ("01", "02", "03", ...)? Based on a condition?
for 1 -> "01", 2 -> "02", ... , you can look at this answer: https://stackoverflow.com/questions/134934/display-number-with-leading-zeros
|
from __future__ import annotations
import logging
from prettyqt import constants, core, custom_widgets, widgets
logger = logging.getLogger(__name__)
BOOL_ITEMS = {
None: "Show all",
constants.CheckState.Checked: "Show True",
constants.CheckState.Unchecked: "Show False",
}
class FilterHeader(widgets.HeaderView):
"""A HeaderView subclass which includes widgets with filter possibilities.
When setting the header view on an ItemView, a proxy model will be created which is
linked to the filter widgets.
The correct filter widget is automatically inferred from the content of the columns.
So basically everything that needs to be done is the following:
```py
model = MyModel()
widget = widgets.TableView()
widget.set_model(model)
widget.h_header = custom_widgets.FilterHeader() # same as setHorizontalHeader()
```
and you will get filter capabilities for your table.
!!! note
Since the FilterHeader will infer the column content type based on the first few
rows, it will only work correctly for tables with homogenous data.
<figure markdown>

<figcaption>FilterHeader widget</figcaption>
</figure>
"""
def __init__(self, parent: widgets.TableView):
self._editors_visible = False
self._editors = []
self._proxy = parent.proxifier.get_proxy(
"multi_column_filter",
recursive_filtering_enabled=True,
)
self._padding = 6
super().__init__(constants.HORIZONTAL, parent)
self.setStretchLastSection(True)
# self.setResizeMode(QHeaderView.Stretch)
self.setDefaultAlignment(constants.ALIGN_CENTER_LEFT)
# self.setSortIndicatorShown(False)
self.sectionResized.connect(self._adjust_positions)
parent.h_scrollbar.valueChanged.connect(self._adjust_positions)
parent.model_changed.connect(self._update_filter_boxes)
self.sectionResized.connect(self._adjust_positions)
self._update_filter_boxes()
self.update_geometries()
@classmethod
def setup_example(cls):
w = widgets.TableView()
widget = cls(parent=w)
return widget
def are_editors_visible(self) -> bool:
return self._editors_visible
def set_editors_visible(self, visible: bool):
self._editors_visible = visible
for editor in self._editors:
editor.setVisible(visible)
self.updateGeometries()
def _update_filter_boxes(self):
# TODO: deal with column changes by connecting to Model signals.
# That way we wouldnt have to update all editors on change.
while self._editors:
editor = self._editors.pop()
editor.deleteLater()
self.create_editors()
self._adjust_positions()
def create_editors(self):
# using parent model here bc we cant guarantee that we are already set to view.
parent = self.parent()
model = parent.model()
self._proxy.clear_filters()
for i in range(model.columnCount()):
typ = model.get_column_type(i)
if typ is bool:
def set_filter(val, i=i):
self._proxy.set_filter_value(i, val, constants.CHECKSTATE_ROLE)
name = f"filter_combo_{i}"
widget = widgets.ComboBox(margin=0, object_name=name, parent=self)
widget.add_items(BOOL_ITEMS)
widget.value_changed.connect(set_filter)
elif typ in [int, float]:
def set_filter(val, i=i):
self._proxy.set_filter_value(i, val)
name = f"filter_numwidget_{i}"
widget = custom_widgets.NumFilterWidget(
margin=0, object_name=name, parent=self
)
widget.filter_changed.connect(set_filter)
title = model.headerData(i, constants.HORIZONTAL, constants.DISPLAY_ROLE)
widget.lineedit.setPlaceholderText(f"Filter {title}...")
elif typ is str:
def set_filter(val, i=i):
self._proxy.set_filter_value(i, val)
name = f"filter_lineedit_{i}"
widget = widgets.LineEdit(margin=0, object_name=name, parent=self)
widget.value_changed.connect(set_filter)
title = model.headerData(i, constants.HORIZONTAL, constants.DISPLAY_ROLE)
widget.setPlaceholderText(f"Filter {title}...")
else:
widget = widgets.Widget()
widget.show()
self._editors.append(widget)
def sizeHint(self) -> core.QSize:
size = super().sizeHint()
if self._editors:
height = self._editors[0].sizeHint().height()
size.setHeight(size.height() + height + self._padding)
return size
def updateGeometries(self):
if self._editors:
height = self._editors[0].sizeHint().height()
self.setViewportMargins(0, 0, 0, height + self._padding)
else:
self.setViewportMargins(0, 0, 0, 0)
super().updateGeometries()
self._adjust_positions()
def _adjust_positions(self):
for index, editor in enumerate(self._editors):
height = editor.sizeHint().height()
compensate_y = 0
compensate_x = 0
match editor:
case widgets.QComboBox():
compensate_y = +2
case widgets.QPushButton():
compensate_y = -1
case widgets.QCheckBox():
compensate_y = 4
compensate_x = 4
case widgets.QWidget():
compensate_y = -1
editor.move(
self.sectionPosition(index) - self.offset() + 1 + compensate_x,
height + (self._padding // 2) + compensate_y,
)
editor.resize(self.sectionSize(index), height)
def set_filter_case_sensitive(self, value: bool):
self._proxy.set_filter_case_sensitive(value)
def clear_filters(self):
for editor in self._editors:
editor.clear()
editors_visible = core.Property(
bool,
are_editors_visible,
set_editors_visible,
doc="Whether the filter widgets are visible",
)
if __name__ == "__main__":
from prettyqt import itemmodels
app = widgets.app()
with app.debug_mode():
view = widgets.TableView()
model = itemmodels.QObjectPropertiesModel(view, parent=view)
# model = model.proxifier[:, 0:3]
view.set_selection_behavior("rows")
view.setEditTriggers(view.EditTrigger.AllEditTriggers)
view.set_delegate("editor", column=1)
view.setModel(model)
view.resize(640, 480)
view.set_selection_behavior("rows")
view.adapt_sizes()
header = FilterHeader(parent=view)
view.setHorizontalHeader(header)
# view.h_header._update_filter_boxes()
view.h_header.visible_editors = True
view.show()
with app.debug_mode():
app.exec()
print(view.h_header._proxy._filters)
|
Timeline
Pre-Council Times (37,000,000 BCE - 500 BCE)
37,000,000 BCE
298,000 BCE
125,000 BCE
48,000 BCE: Fall of the Protheans
6000 BCE
1900 BCE
1800 BCE
580 BCE
520 BCE
* The salarians discover the Citadel and open diplomatic relations with the asari.
Council Era - Formation (500 BCE - 1 CE)
500 BCE: Founding of the Citadel Council
200 BCE - 1 CE
* First contact is made with the batarians. They are granted an embassy a century later.
* First contact is made with the hanar and the quarians. Both races are later granted embassies.
Council Era - War and Rebellion (1 CE - 900 CE)
1 CE: The Rachni Wars
80 CE
300 CE
300 - 700 CE
700 CE: The Krogan Rebellions
* The turians accept the volus as a client race within the Turian Hierarchy.
800 CE
* The Citadel Conventions are drawn up in the wake of the conflict.
Council Era - Expansion (900 CE - 2157 CE)
900 CE
1400 CE
1600 CE
1755 CE
1880 CE
1895 CE: The Geth War
1921 CE
1961 CE
1969 CE
1980 CE
2000 CE
2069 CE
2070 CE
2075 CE
2077 CE
* Liara T'Soni is born.
2103 CE
2125 CE
2137 CE
* David Anderson is born.
2139 CE
* Saren Arterius is born.
* Kahlee Sanders is born.
2143 CE
* Construction of Gagarin Station (Jump Zero) begins beyond the orbit of Pluto.
2146 CE
* Thane Krios is born.
2147 CE
* Trace amounts of element zero are discovered on Mars.
2148 CE: Humanity Discovers Mass Effect Physics
2149 CE
2150 CE
* Miranda Lawson is born.
2151 CE
2152 CE
* Thane Krios is submitted for training as an assassin under the hanar.
2154 CE
* April 11: Shepard is born.
* There is a second publicized accident involving the exposure of humans to element zero.
2155 CE
* Saren Arterius is promoted to active service in the turian military at the age of 16.
* Jeff "Joker" Moreau is born.
2156 CE
* Arcturus Station is formally inaugurated.
* A small number of human children exposed to element zero exhibit minor telekinetic abilities.
2157 CE
* Pluto's orbit becomes circularized as a result of mass relay operations.
* Jacob Taylor is born.
* David Anderson graduates from Officer Candidate School with the rank of Second Lieutenant.
Council Era - Advent of Humanity (2157 CE - 2183 CE)
2157 CE: The First Contact War
2158 CE
* April 14: Ashley Williams is born.
2159 CE
* Saren Arterius becomes the youngest turian to be inducted into the Spectres.
2160 CE
* The Systems Alliance Parliament is formed.
* The biotic drug red sand is first used.
* Kasumi Goto is born.
2161 CE
* Tali'Zorah nar Rayya is born.
* Jack is born.
2162 CE
* Construction of Arcturus Station is completed.
2163 CE
* The first experimental L1 biotic implants are used in humans.
2164 CE
2165 CE
2166 CE
2167 CE
* The magazine Fornax is launched.
2168 CE
* Shepard receives secondary exposure to element zero. Permanent biotic inclination manifests.
2170 CE
* L3 biotic implants are developed after L2 implants prove to be dangerous.
* The first A-61 Mantis Gunships are produced.
2171 CE
* Shepard is officially detected as a biotic and fitted with L3 implants.
2172 CE
* April 11: Shepard enlists in the Alliance military.
* On Gagarin Station, "Eliza" becomes the first sapient AI created in the Systems Alliance.
2173 CE
* Kaidan Alenko enlists in the Alliance military.
2174 CE
* Ashley Williams enlists in the Alliance military.
2175 CE
2176 CE
2177 CE
2178 CE
2182 CE
* Admiral Kahoku of the Systems Alliance begins investigating suspected Cerberus activities.
2183 CE
Mass Effect (2183 CE - 2186 CE)
2183 CE: The Eden Prime War
* L4 biotic implants are developed.
2184 CE
* L5 biotic implants are developed.
2185 CE
2186 CE
|
Board Thread:Fun and Games/@comment-39133133-20191224230359/@comment-35614398-20191225081232
Communist China please. Also merry Christmas
(Are the communists the pink or green one?)
|
THE YOUNG NATURALIST.
London sparrows, no doubt, keeping it well in check ; the male moths may be observed in the parks from November to January, flying at dusk, or at rest on trees and palings. The apterous female can only be obtained by careful searching in the cracks of the bark.
We now come to the " Pugs " of which large family I have at present only observed two species, namely, B. centaur mia and B, vulgata, the first appears during July and August, while the latter is found from May to September, both being fairly abundant. IE. assimilata may perhaps also occur in some districts, but I have no .definite record of it.
Of the "Carpet" moths we have only one species M> fluctuata (Garden Carpet), but this is exceedingly abundant everywhere throughout the summer. The larvae are found in the antumn on various garden plants, being very fond of Indian cress. They rest with the interior segments coiled up, and drop from the food plant when disturbed.
This concludes the list of Geometrse, and we now turn our attention to those lovers of darkness, the Noctuse. Of Noctuse, the town collector will be able to make a very good show, there being a fair number on the list. The first is the well-known Acronycta psi (Dagger Moth), which is commonly met with on tree trunks during June and July, or it may be easily decoyed with the entomologist's "sugar/' The larva, which makes a very good object for preserving, is found during the winter months, feeding on various trees, the lime and elm being favourites. When full fed it forms a cocoon of bits of bark, or rubbish of any kind. This species varies considerably in the colour of the anterior wings, some specimens being a beautiful silvery grey, while others are smoky drab, the dagger-like markings, however, are very constant.
Our next species is Acronycta megacephala (Poplar Grey Moth.) This is a regular inhabitant of Hackney. During the months of May and June it is observed in considerable numbers on the trunks of poplar and other trees, sitting with the wings folded up, and the anterior legs extended j when thus at rest it forms a pretty object. The larvae feed upon poplar, and occasionally on willow trees ; they are fond of resting upon the underside of the leaves, and on the trunks, always having their heads bent round so as to resemble a loop. When disturbed they hold very tightly to the bark, so much so that it requires some little force and skill to detach them. When full fed they form a compact cocoon in some cleft of the bark, covering the outside with little fragments of wood, etc., so that it is very difficult to distinguish. Occasionally they go underground, but even then they form a very strong case. The pupae are very long, and of a bright red colour.
|
$(function () {
let topmenu = $('.topmenu'),
generalItem = $('.topmenu__general_item'),
submenuItem = $('.topmenu__submenu_item');
generalItem.hover(function () {
let self = $(this);
//if (self.find('.topmenu__submenu').length && !self.hasClass('topmenu__general_item_active')) {
self.toggleClass('topmenu__general_item_hover');
self.closest('.topmenu__general').find('.topmenu__submenu').hide();
self.find('.topmenu__submenu').removeAttr('style');
//}
if (self.closest('.topmenu').find('.topmenu__general_item_active') && !self.hasClass('topmenu__general_item_active')) {
self.closest('.topmenu').find('.topmenu__general_item_active')
.addClass('topmenu__general_item_hide-border');
}
}, function () {
let self = $(this);
//if (self.find('.topmenu__submenu').length && !self.hasClass('topmenu__general_item_active')) {
self.toggleClass('topmenu__general_item_hover');
self.closest('.topmenu__general').find('.topmenu__submenu').removeAttr('style');
//}
if (self.closest('.topmenu').find('.topmenu__general_item_active') && !self.hasClass('topmenu__general_item_active')) {
self.closest('.topmenu').find('.topmenu__general_item_active')
.removeClass('topmenu__general_item_hide-border');
}
});
submenuItem.hover(function () {
let self = $(this);
if (self.find('.topmenu__submenu-two-level').length) {
self.find('.topmenu__submenu_link').addClass('topmenu__submenu_link_active')
.next('.topmenu__submenu-two-level').addClass('topmenu__submenu-two-level_active');
}
}, function () {
let self = $(this);
if (self.find('.topmenu__submenu-two-level').length) {
self.find('.topmenu__submenu_link').removeClass('topmenu__submenu_link_active')
.next('.topmenu__submenu-two-level').removeClass('topmenu__submenu-two-level_active');
}
});
});
|
In the Night Garden The Movie 3: The Mystery of the World (2019 film)/Credits
Opening Credits (North America)
Twentieth Century Fox,
TriStar Pictures
and
Alliance Films
presents
in association with
BaronNation Entertainment
and
Pathé
A
BBC Films
production
In The Night Garden: The Movie 3
Opening Credits (International)
TriStar Pictures,
Twentieth Century Fox
and
Alliance Films
Presents
in association with
BaronNation Entertainment
and
Pathé
A
BBC Films
production
In The Night Garden: The Movie 3
Opening Credits (Australia and New Zealand)
Alliance Films,
TriStar Pictures
and
Twentieth Century Fox
present
in association with
BaronNation Entertainment
and
Pathé
a
BBC Films
production
In The Night Garden: The Movie 3
CAST
Garrett Hedlund - Makka Pakka
Andrew Garfield - Iggle-Piggle
Elle Fanning - Upsy Daisy
George Ezra - Tombliboo Unn
Tom Hiddleston - Tombliboo Ooo
Zara Larsson - Tombliboo Eee
Brie Larson - The Pontipines
David Jason - The Wottingers
Nicholas Lyndhurst - Evil Bill
Mel Gibson - Puss n’ Boots
Ariana Grande - Narrator/Adult Upsy Daisy
Nathan Sykes - Adult Iggle-Piggle
Robert Downey Jr. - Dad
SPECIAL THANKS
With The Participation Of
TELEFILM CANADA
QUEBEC FILM AND TELEVISION TAX CREDIT GESTION SODEC
THE CANADIAN FILM OR VIDEO PRODUCTION TAX CREDIT
Closing Logos
|
Transitive sets and the Mostowski collapse
I was wondering if every set can be "transitized" - that is, made into a transitive version of itself. Is this basically what the Mostowski collapse says?
${{{{}}},{}}$.
If $y$ is transitive and has $x$ as an element, we can define the Mostowski collapse of $y$. By uniqueness, the image of $x$ does not depend on which $y$ we pick, and it is $\pi(x)=\pi[x]={\pi(z)\mid z\in x}$, where $\pi$ is the collapsing map. But this set needs not be transitive.
That is not the Mostowski collapse. The collapse says a partially ordered set with certain properties is isomorphic to a transitive set with the $\in$ as the order.
You seem to ask about transitive closure, which is the smallest transitive set which includes a particular set.
Alright, thanks. Yes, I suppose that that's probably what I was thinking about.
|
package main
import "fmt"
var _ Structer = &FakeStructer{}
func main() {
structHookCalled := false
namedStructHookCalled := false
s := struct {
a string
b string
}{"e", "f"}
t := struct {
a string
b string
}{"g", "h"}
u := struct {
c string
d string
}{"i", "j"}
v := struct {
c string
d string
}{"k", "l"}
f := &FakeStructer{
StructHook: func(a struct {
a string
b string
}) struct {
c string
d string
} {
structHookCalled = true
return v
},
NamedStructHook: func(z struct {
a string
b string
}) (a struct {
c string
d string
}) {
namedStructHookCalled = true
if z == s {
return u
}
return v
},
}
f.Struct(s)
if len(f.StructCalls) != 1 {
panic(fmt.Sprintf("StructCalls: %d", len(f.StructCalls)))
}
if !structHookCalled {
panic("StructHook not called")
}
if !f.StructCalled() {
panic("StructCalled: Struct not called")
}
if !f.StructCalledOnce() {
panic("StructCalledOnce: Struct not called once")
}
if f.StructNotCalled() {
panic("StructNotCalled: Struct not called")
}
if !f.StructCalledN(1) {
panic("StructCalledN: Struct not called once")
}
if !f.StructCalledWith(s) {
panic(fmt.Sprintf("StructCalledWith: Struct not called with %s", s))
}
if !f.StructCalledOnceWith(s) {
panic(fmt.Sprintf("StructCalledOnceWith: Struct not called once with %s", s))
}
f.NamedStruct(s)
if len(f.NamedStructCalls) != 1 {
panic(fmt.Sprintf("NamedStructCalls: %d", len(f.NamedStructCalls)))
}
if !namedStructHookCalled {
panic("NamedStructHook not called")
}
if !f.NamedStructCalled() {
panic("NamedStructCalled: NamedStruct not called")
}
if !f.NamedStructCalledOnce() {
panic("NamedStructCalledOnce: NamedStruct not called once")
}
if f.NamedStructNotCalled() {
panic("NamedStructNotCalled: NamedStruct not called")
}
if !f.NamedStructCalledN(1) {
panic("NamedStructCalledN: NamedStruct not called once")
}
if !f.NamedStructCalledWith(s) {
panic(fmt.Sprintf("NamedStructCalledWith: NamedStruct not called once with %s", s))
}
if !f.NamedStructCalledOnceWith(s) {
panic(fmt.Sprintf("NamedStructCalledOnceWith: NamedStruct not called once with %s", s))
}
res, found := f.NamedStructResultsForCall(s)
if res != u || found != true {
panic(fmt.Sprintf("NamedStructResultsForCall: NamedStruct results for %s not %s, found: %s", s, u, found))
}
res, found = f.NamedStructResultsForCall(t)
if found != false {
panic(fmt.Sprintf("NamedStructResultsForCall: NamedStruct results for %s found", t))
}
f.NamedStruct(t)
if len(f.NamedStructCalls) != 2 {
panic(fmt.Sprintf("NamedStructCalls: %d", len(f.NamedStructCalls)))
}
if !f.NamedStructCalledN(2) {
panic("NamedStructCalledN: NamedStruct not called twice")
}
res, found = f.NamedStructResultsForCall(t)
if res != v || found != true {
panic(fmt.Sprintf("NamedStructResultsForCall: NamedStruct results for %s not %s, found: %s", t, v, found))
}
}
|
<?php
/**
* api
* @author zsx<[email protected]>
* @package api/route/error
* @php >= 5.3
*/
namespace AppChecker;
use AppChecker\Log;
use Symfony\Component\Console\Application;
use Symfony\Component\Console\Command\Command;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
class Bootstrap extends Command {
protected function configure() {
$this
->setName('run')
->setDescription('To run checker')
->addArgument(
'appid',
InputArgument::REQUIRED,
'AppID'
)
->addOption(
'bloghost',
null,
InputOption::VALUE_OPTIONAL,
"Your Z-BlogPHP Url that can use webbrowser to access."
)
;
}
protected function execute(InputInterface $input, OutputInterface $output) {
global $scope;
global $zbp;
global $app;
global $bloghost;
Log::SetOutputInterface($output);
$bloghost = $input->getOption('bloghost');
if ($bloghost == "") {
$bloghost = "http://localhost/";
}
//$zbp->option['ZC_PERMANENT_DOMAIN_ENABLE'] = false;
//$zbp->option['ZC_ORIGINAL_BLOG_HOST'] = $zbp->option['ZC_BLOG_HOST'];
$zbp->option['ZC_BLOG_HOST'] = $bloghost;
Log::Log('Detected $bloghost = ' . $bloghost);
Log::Info('Completed!');
Log::Log('Getting App...');
$appId = $input->getArgument('appid');
if ($zbp->CheckApp($appId)) {
Log::Error('You should disable ' . $appId . ' in Z-BlogPHP first.');
}
$app = $zbp->LoadApp('plugin', $appId);
if ($app->id !== null) {
Log::Info('Detected Plugin.');
} else {
$app = $zbp->LoadApp('theme', $appId);
if ($app->id !== null) {
Log::Info('Detected Theme.');
} else {
Log::Error('App not Found!');
}
}
Log::Title("System Information");
Log::Info("Z-BlogPHP: " . $zbp->version);
Log::Info("System: " . \GetEnvironment());
Scanner::Run();
Log::Info('OK!');
}
}
foreach (['utils', 'log', 'scanner'] as $index => $item) {
require 'lib/' . $item . '.php';
}
$path = getenv('ZBP_PATH');
if (!is_dir($path) || !chdir($path)) {
echo 'Cannot open your Z-BlogPHP index.php: ' . $path;
exit;
}
require 'zb_system/function/c_system_base.php';
Log::Log('Loading Z-BlogPHP...');
$zbp->Load();
\ZBlogException::ClearErrorHook();
$application = new Application();
$application->add(new Bootstrap());
$application->run();
|
SAP programming without ABAP Workbench
First post here so excuse any mistakes.
A little background: I work in a company that uses SAP for daily work, I use it for daily work. I have some programming skills, I know some C, Java an VBA.
What I need: I want to make a program that interacts with SAP in order to automate some of the tasks that me and the ppl on my department do.
I can't use the SAP macro tool because I need info from outside SAP, like e-mail and excel. Its of no use to ask for help from the SAP admins and developers on the company because it is a massively huge company and i doubt that i will get any attention.
I need to know with which program, net beans, code block and that kind of stuff, i can begin because its impossible to get the ABAP workbench, and in which language. I can learn any programming language that is needed, i just need some guidance.
I know that there is a SAP plugin for eclipse that allows java development but that is also paid. I need an open, free solution here. I can't get nor i need development access on SAP, all i want to do is a program that will automate several actions and roles, like a macro, but that can interact with other programs like Excel and e-mail.
Thanks in advance, and ask away if i did not made my self clear
You need access to ABAP Workbench in order to define the Interface you will need in SAP. That is usually an RFC or BAPI/BADI that you define and can be accessed via an RFC Destination or something made available on the SAP Java Stack. Once you have that all setup correctly then you could use Eclipse or whatever to build out your Java code
I see, although i understood only half of it, There is no way i can get access to abap workbench. what i intend to do is a program that would send the commands that i would, example. instead of I comping and pasting a number from an excel sheet and entering it on SAP, the program will do it for me. Like a big integrated macro. The way i planed it i dont think i need Workbench access but i have no idea where to begin to execute it.
if your company is a big one you may have access to SAP GUIs scripting extension GuiXT. Maybe some of your requirements can be met using that extension. You can't control a SAP transaction from the outside, you can only use SAP BAPIs (function modules within SAP) to execute certain actions. But those are usually encapsulated transactions, for instance creating a new sales order. You would need to program an external application that accesses the BAPIs in the SAP system using the SAP Connector (both java and c# connectors are available for free for SAP customers).
If your problem can be solved with the standard BAPIs your could use JCO. But you need access to the service marketplace to download it...
You may develop some web services with SAP integration team, that will implement required logic. It isn't hard task from technical point of view. All you need then is to call exposed web serivce from any application using VBA.
Since the SAP macro tool runs with client-side VBA scripts, it will work for what you want. External data will be no problem if you are good with VBA.
Since you are a business user, you shouldn't be given access to the ABAP workbench. Unfortunately this means you can't develop RFC or HTTP interfaces, or anything on the server. Fortunately, everything you need can be done via the client GUI, which is why a macro will work.
There is no way to do what you are asking without permission from SAP administration. I suspect if they wont give you access to SAP workbench you are not authorized to be doing what you ask. There are reasons for the system being secure, otherwise anyone could do what you ask and access data such as HR, personnel etc. with no controls. Sorry this is an enterprise system.
Later....
|
<?php
namespace App\Payment\Advertiser\Actions;
use App\Payment\Advertiser\Responder\ShowHyperPayTransactionStatusResponder;
use App\Payment\Advertiser\Domain\Services\ShowHyperPayTransactionStatusService;
use App\Payment\Advertiser\Domain\Requests\ShowHyperPayTransactionStatusFormRequest;
class ShowHyperPayTransactionStatusAction
{
public function __construct(ShowHyperPayTransactionStatusResponder $responder, ShowHyperPayTransactionStatusService $services)
{
$this->responder = $responder;
$this->services = $services;
}
public function __invoke(ShowHyperPayTransactionStatusFormRequest $request)
{
return $this->responder->withResponse(
$this->services->handle($request->validated())
)->respond();
}
}
|
Sitecore License for DR servers
I am planning to have a scaled down replica of our production sitecore infrastructure as Disaster Recovery servers so that the same can be used in case of a disaster. This will mean i will have to procur sitecore license for DR server as well even though there might never be an incident to switch to DR servers. Is there any possible solution where the same license can be used in prod and switched to DR when required.
Maybe. Whatever I suggest here, you are going to have to verify with your Sitecore Sales Representative.
If the DR environment is "cold", then I have experienced Sitecore will allow this without extra license purchase. "Cold" meaning, the DR environment spins up and takes over, when disaster strikes.
Reasoning being, you actually do not "overuse" your license in this setup. Only one environment is ever active.
For a "warm" DR setup, I have experienced in the past that this can be arranged with Sitecore, possibly at a reduced license cost. Possibly. But this is between you and Sitecore.
I cannot stress this enough however, validate your setup and license requirements with Sitecore before going ahead with either setup.
I concur with the venerable @MarkCassidy. Once upon a time a “hot” server license for Sitecore came with an allowance for a “cold” spare. There is some speculation over the definition of these terms, and licensing for Sitecore is often altered per customer, so you must contact the Account Manager for a definitive position.
To add to what Mark said, do you have access to your contract with Sitecore? Do make sure you understand the definitions therein. The definition of "server" and the definition of "Cold Failover Server Installation" deserve extra attention.
In our case, we were entitled to "an equal number of Cold Failover Server Installation(s) as the number of Server Installation(s)".
Now, "server" is defined very precisely as well, with a specific number of cores (or allocated cores in case of VMs). So do make sure you have that covered as well!
Another option to consider is going with a consumption based license see here In that case the cost depends on volume of visits rather than the number of servers. This can bring other benefits as well like scaling up to meet peak load and you will need this if you want to use JSS. For sure check with Sitecore.
|
added Profile Photo manager page !with menu fix!
added Profile Photo manager page !with menu fix!; unified Profil Photo link to Profile Photo manager for Cover and Profile Photo block on Profile page
Thank you @apachler we've included this fix.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.