text
stringlengths 0
128k
|
---|
<?php
use yii\helpers\Html;
use yii\widgets\ActiveForm;
/* @var $this yii\web\View */
/* @var $user app\models\User */
/* @var $profile app\models\Profile */
/* @var $form yii\widgets\ActiveForm */
?>
<div class="user-form">
<div class="row">
<hr>
<?php $form = ActiveForm::begin(); ?>
<div class="col-md-4">
<?php
if (isset($profile->avatar)) {
echo Html::img($profile->avatar, ['class' => 'center-block', 'id' => 'user-avatar', 'style' => 'width:50%; height:20%']);
}
?>
<?= $form->field($profile, 'imageFile')->widget(\kartik\file\FileInput::className(), [
'options' => [
'accept' => 'image/*',
],
]) ?>
<?= Html::a('Set default image', ['user/reset-image', 'id' => $profile->id], ['class' => 'btn btn-danger']) ?>
</div>
<div class="col-md-8">
<? if (Yii::$app->authManager->checkAccess(Yii::$app->user->identity->getId(), 'admin')): ?>
<?= $form->field($user, 'role')->dropDownList($user->getRolesArray()) ?>
<hr>
<?php endif; ?>
<? if (Yii::$app->authManager->checkAccess(Yii::$app->user->identity->getId(), 'moderator')): ?>
<?= $form->field($user, 'status')->dropDownList($user->getStatuses()) ?>
<hr>
<?php endif; ?>
<?= $form->field($user, 'email')->textInput(['maxlength' => true]) ?>
<?= $form->field($user, 'username')->textInput(['maxlength' => true]) ?>
<?= $form->field($profile, 'first_name')->textInput(['maxlength' => true]) ?>
<?= $form->field($profile, 'last_name')->textInput(['maxlength' => true]) ?>
<?= $form->field($profile, 'birth_date')->widget(\kartik\date\DatePicker::className(), [
'options' => ['placeholder' => 'Enter birth date ...'],
'pluginOptions' => [
'autoclose' => true
]
]) ?>
<?= $form->field($profile, 'details')->textarea(['rows' => 6]) ?>
<div class="form-group">
<?= Html::submitButton('Save', ['class' => 'btn btn-success']) ?>
</div>
</div>
<?php ActiveForm::end(); ?>
</div>
</div>
|
//
// PagerContainter.swift
// PagingMenuView
//
// Created by Андрей Чернопрудов on 02/03/2017.
// Copyright © 2017 Naumen. All rights reserved.
//
import UIKit
protocol PageContainer: class {
func setup(with items: [PageItem])
func setActive(page index: Int)
}
class PageContainterScrollView: UIScrollView, PageContainer {
// MARK: - Instance variables
weak var coordinator: PageCoordinator?
var pages: [UIView] = []
// MARK: - Public
convenience init(coordinator: PageCoordinator) {
self.init()
self.coordinator = coordinator
translatesAutoresizingMaskIntoConstraints = false
isPagingEnabled = true
bounces = false
showsHorizontalScrollIndicator = false
delegate = self
}
func setActive(page index: Int) {
let view = pages[index]
scrollRectToVisible(view.frame, animated: true)
}
func setup(with items: [PageItem]) {
subviews.forEach { $0.removeFromSuperview() }
pages.removeAll()
let views = items.map { $0.view }
add(viewsAsPages: views)
}
// MARK: - Private
private func add(viewsAsPages views: [UIView]) {
var trailing: NSLayoutXAxisAnchor = leadingAnchor
for view in views {
view.setContentCompressionResistancePriority(UILayoutPriority(rawValue: 751), for: .horizontal)
view.translatesAutoresizingMaskIntoConstraints = false
addSubview(view)
view.widthAnchor.constraint(equalTo: widthAnchor).isActive = true
view.heightAnchor.constraint(equalTo: heightAnchor).isActive = true
view.topAnchor.constraint(equalTo: topAnchor).isActive = true
view.bottomAnchor.constraint(equalTo: bottomAnchor).isActive = true
view.leadingAnchor.constraint(equalTo: trailing).isActive = true
trailing = view.trailingAnchor
}
trailing.constraint(equalTo: trailingAnchor).isActive = true
self.pages = views
}
}
extension PageContainterScrollView: UIScrollViewDelegate {
public func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
let indexOfPage = scrollView.contentOffset.x / scrollView.frame.size.width
coordinator?.select(page: Int(indexOfPage))
}
}
|
Although substance use, such as alcohol consumption, is known to be
associated with cognitive decline during ageing, its direct influence on the
central nervous system remains unclear. In this study, we aim to investigate
the potential influence of alcohol intake frequency on accelerated brain ageing
by estimating the mean potential brain-age gap (BAG) index, the difference
between brain age and actual age, under different alcohol intake frequencies in
a large UK Biobank (UKB) cohort with extensive phenomic data reflecting a
comprehensive life-style profile. We face two major challenges: (1) a large
number of phenomic variables as potential confounders and (2) a small
proportion of participants with complete phenomic data. To address these
challenges, we first develop a new ensemble learning framework to establish
robust estimation of mean potential outcome in the presence of many
confounders. We then construct a data integration step to borrow information
from UKB participants with incomplete phenomic data to improve efficiency. Our
analysis results reveal that daily intake or even a few times a week may have
significant effects on accelerating brain ageing. Moreover, extensive numerical
studies demonstrate the superiority of our method over competing methods, in
terms of smaller estimation bias and variability.
|
Brachinus efflans
Brachinus efflans is a species of ground beetle in the Brachininae subfamily that can be found in Bulgaria, Germany, Italy, Portugal, Spain, Gibraltar, and on the islands such as Sicily. It can also be found in North African countries such as Algeria, Morocco, Tunisia, and is common in Syria too. The species is black coloured with red head and legs, and is similar to Brachinus crepitans.
|
Talk:Calcite Chamber/@comment-31168983-20170416094833/@comment-30763995-20170421130708
took 5 scythers with focus sash and one scizor
all with FALSE SWIPE
|
502 Bad Gateway laravel5.7
I've been using laravel homestead for quite sometime . However the last time I created a project I got this error with the latest vagrant and laravel homestead
I've tried changing the php version as most forums with the same problem but to no avial
502 Bad Gateway
nginx/1.15.6
Can you add your homestead.yaml to your question?
I know you have said you've tried changing the PHP version. I'm unable to comment because of reputation so I'll answer with the typical fix for this issue below.
Had the same issue with the latest version of homestead.
There's an issue with xdebug that the homestead developers are waiting for a fix for. The solution is to disable xdebug or use php 7.2. I opted for the latter. In that case, make the following change in your homestead.yaml and then running vagrant reload --provision will fix this.
sites:
- map: homestead.test
to: /home/vagrant/code/public
php: "7.2"
Thanks , let me guess a php7.3 confusion?
An issue with xdebug and php7.3, yes. Homestead developers are waiting for an upstream fix.
I'm getting quite annoyed with the people behind Homestead. Pretty much every release the past year has been broken due to the developers want to be on the bleeding edge of all software. I don't mind crashing every once in a while, but this is just stuff that should never ever pass any QA test. Clean install? Broken. Anyway. Thanks for the solution, worked flawlessly!
I had a similar problem. I solved it as follows:
vagrant ssh
vagrant @ homestead: ~ $ cd /var/log/nginx
vagrant @ homestead: ~ $ ls
access.log error.log {your-site-map-name}-error.log
vagrant @ homestead: ~ $ cat {your-site-map-name}-error.log
When you get this error:
connect () to unix: /var/run/php/php7.3-fpm.sock failed
then you can fix it so:
vagrant @ homestead: ~ $ php -v
or
vagrant @ homestead: ~ $ cd /var/run/php/
vagrant @ homestead: ~ $ ls
You wil get you file version, my is 8.1:
php8.1-fpm.pid php8.1-fpm.sock php-fpm.sock
Then you need to go:
vagrant @ homestead: ~ $ cd /etc/nginx/sites-enabled
vagrant @ homestead: ~ $ sudo nano {your-site-map-name}.test
Change it in this file {your-site-map-name} .test:
fastcgi_pass unix: /run/php/php8.1-fpm.sock;
Or on a different one that you got.
Then you need only restart nginx:
vagrant @ homestead: ~ $ sudo service nginx restart
and should be working.
|
export function meaningfulTime (comparedDate, baseDate) {
const now = baseDate ? new Date(baseDate) : new Date()
const then = new Date(comparedDate)
const values = { then: comparedDate, now: baseDate, diff: undefined }
const retVal = { key: '', values }
var diff
diff = now.getFullYear() - then.getFullYear()
if (diff > 0) {
values.diff = diff
retVal.key = diff === 1 ? 'time.lastYear' : 'time.yearsAgo'
return retVal
}
diff = now.getMonth() - then.getMonth()
if (diff > 0) {
values.diff = diff
retVal.key = diff === 1 ? 'time.lastMonth' : 'time.monthsAgo'
return retVal
}
diff = now.getDate() - then.getDate()
if (diff >= 0) {
values.diff = diff
retVal.key = diff === 0 ? 'time.today' : (diff === 1 ? 'time.yesterday' : 'time.daysAgo')
return retVal
}
values.diff = -1
retVal.key = 'time.future'
return retVal
}
|
Delta
She sports purple eyes. Her body is a slender build with an ample bosom.
Her attire comprises of the customary slime bodysuit with black and bronze coloring.
Personality
Powers & Abilities
Physical prowess
Beastkin of Senses
Combatant
Swordswoman
In the game, she uses large gauntlets with claws.
Hunter's Frenzy
Magic
But in exchange, almost all of her slime suit temporarily disappears (leaving only the private parts covered), having been redirected into the giant sword.
Trivia
* Delta's character is based off a dog the author once raised.
|
Mammography system and method employing offset compression paddles, automatic collimation, and retractable anti-scatter grid
ABSTRACT
A mammographic imaging system is optimized for use with a single fixed size flat panel digital image receptor. It accommodates compression devices (paddles) of varying sizes, and positions them properly in a field of view of the image receptor. When a compression paddle with size smaller than the field of view of the image receptor is used, the compression paddle can be shifted laterally in the direction parallel to the chest wall, so as to facilitate different views of different size breasts, and permit the image receptor to image as much of the desired tissue as possible. An automatic X-ray collimator restricts the X-ray illumination of the breast in accordance with the compression paddle size and location in the field of view. An anti-scatter grid, mounted inside the image receptor enclosure, just below the top cover of the enclosure, can be retracted out of the field of view of the image receptor for use in magnification imaging.
RELATED APPLICATIONS
This application is a continuation of U.S. patent application No.14/595,826, now abandoned, filed Jan. 13, 2015, which is a continuation of U.S. patent application No. 14/052,825, now U.S. Pat. No. 8,948,340,filed Oct. 14, 2013, which is a continuation of and claims priority under 35 U.S.C. § 120 to U.S. patent application Ser. No. 13/190,989,now U.S. Pat. No. 8,559,595, filed Jul. 26, 2011, which is a continuation of U.S. Pat. No. 7,986,765, filed on Feb. 22, 2010, whichis a continuation of U.S. Pat. No. 7,688,940, filed on May 8, 2009,which is a continuation of U.S. Pat. No. 7,609,806, filed Jan. 15, 2008,which is a continuation of U.S. Pat. No. 7,319,735, filed Nov. 30, 2006,which is a continuation of U.S. Pat. No. 7,443,949, filed Oct. 18, 2004,which is a Section 371 national stage of International Application No.PCT/US02/33058, filed Oct. 17, 2002, claiming the benefit of U.S.Provisional Application No. 60/350,213, filed Oct. 19, 2001. Each of the above applications is hereby incorpor ated by reference.
BACKGROUND
X-Ray mammography machines typically use an x-ray source mounted at one end of a rotatable c-arm assembly and an image receptor at the other.Between the x-ray source and the image receptor is a device forcompressing and immobilizing a breast. Until recently, the image receptor was typically a screen-film (s/f) cassette, which generated animage related to the detected transmission of x-rays through the breast.These s/f cassettes typically come in standard sizes, e.g., 18 cm×24 cm(small) and 24 cm×30 cm (large), with the large cassette used when thebreast is too large to be uniformly compressed by the small cassette.The cassettes are easily attachable and removable from a breast support tray of a conventional mammography system. The device for compressingthe breast is often called a paddle, and comes is a variety of sizes to match both the cassette size and the breast size. Such matching isdesirably because the use of a small size paddle on a large breast can result in uneven and inadequate breast compression and may not allow full-breast imaging, which using a large paddle on a small breast can impede access to the breast, which is important during the compression cycle in order to optimize the amount of breast tissue brought into the field of view of the image receptor.
New mammography systems are now being developed to use digital image receptors as replacements for the screen-film cassettes. These digital image receptors, sometimes called flat panel receptors, or flat panel digital x-ray receptors, are different in many ways from s/f cassettes.They have many advantages, but also tend to be heavier and somewhat thicker. Typically, they are not designed to be removed in normal use,so a system normally will employ only one size image receptor. These characteristics can presents challenges for some imaging procedures and breast sizes, particularly for the medio lateral oblique view (MLO) taken as a part of typical breast x-ray imaging. As with screen-film system,it is still advantageous to use a compression paddle that matches thebreast size. This typically means that the compression paddles will be removable, and there will be a selection of paddle sizes available withthe system.
A number of x-ray protocols have been used for breast imaging. One common view is the cranio-caudal (CC) view, illustrates in FIG. 5, which images the breast of a standing or sitting patient from above. Another is the medio lateral oblique view (MLO), taken from an oblique or angled view, and also illustrated in FIG. 5. In screen-film mammography systems, the compression paddle typically is centered relative to the proximal edge of the screen-film cassette. In some views, such as theM LO view, and particularly for smaller breasts, this may present some difficulty as the cassette may have to be pressed against the armpit inorder to approximately center the breast relative to the proximal edge of the film (the edge closest to and parallel to the chest wall). In such cases, the smaller size cassette can be used. This, plus the relative thinness of the cassette, generally allow for adequate centering. However, when a digital x-ray receptor is used usually only one size is available, and it may be the size comparable to the larger size screen-film cassette. Also, the digital receptor tends to be thicker than a screen-film cassette. Thus, centering the breast can be difficult or impossible in some cases, particularly for the MLO view and patients with smaller breasts, with the result that optimal positioning of the breast may not be possible for some views and patients.
To applicants' knowledge, these and other issues regarding compression paddle use with flat panel digital receptors in mammography have not been solved and perhaps have not been even addressed. In a different setting, it has been proposed to move a compression paddle laterally,relative to the proximal edge of the screen-film cassette, but for the different purpose of aligning a cutout in the paddle with a particular portion of the breast. See U.S. Pat. No. 5,199,056. This is believed to require a paddle larger that would normally be used for the breast size so as to maintain even compression when the cutout is off-center relative to the breast. Other earlier proposals are know for features such as collimation that adjusts to film cassette size, source-to-image distance and/or cross-sectional area to be imaged (U.S. Pat. Nos.3,502,878, 3,863,073, 5,627,869, and 6,149,301), moving a paddle (U.S.Pat. No. 3,971,950), moving a cassette (U.S. Pat. No. 4,989,227), andretracting a cassette holder (U.S. Pat. No. 4,559,641). The cited patents are hereby incorpor ated by reference in this patent specification.
SUMMARY
An object of the disclosed system and method is to provide mammography that overcomes known disadvantages of proposals involving the otherwise desirable use of flat panel, digital x-ray receptors.
Another object is to employ compression paddles that match both the size and position of the patient's breast relative to the proximal edge of a digital x-ray image receptor so as to improve image quality, patient comfort and the ability of the health professional to position thebreast optimally for imaging.
Another is to provide automated collimation control that changes x-ray beam collimation in accordance with one or more of the size and position of the compression paddle and of the breast, and the position of abreast platform relative to the receptor, preferably in response to information that is automatically sensed.
Another is to provide x-ray exposure control that is responsive to atleast one of the size and position of the compression paddle, the position of the breast, and a pre-exposure x-ray measurement, preferably in response to information that is automatically sensed.
Another is to provide a scatter-suppression grid that is retracted for image magnification protocols, preferable automatically in response to sensing a breast position for magnification imaging.
These and other objects are met in a non-limiting example comprising a mammography system having a flat panel digital x-ray receptor, an x-ray source selectively emitting a collimated x-ray beam toward the receptor,and a compression paddle of a selectable size mounted for selective movement at least along a proximal edge of the x-ray receptor as well as along the x-ray beam. At least for selected breast x-ray protocols, a patient's breast is positioned off-center relative to the proximal edge of the x-ray receptor, and paddle of an appropriate size also is positioned off-center relative the same proximal edge to compress thebreast for x-ray imaging.
In addition, the system includes one or more of a number of other features. An exposure control can be responsive to information regarding breast thickness along the beam direction to control x-ray exposure for imaging. This information can come from a conventional auto-exposure sensor (AES) resulting from a pre-exposure, low-dose firing of the x-ray source from an output of the digital x-ray receptor during such pre-exposure firing, and/or from sensors for the relative positions ofthe x-ray source, the x-ray receptor, the compression paddle and/or thebreast tray. The system can include a collimation control responsive to information regarding one or more of the size of the paddle, its location along the beam, its location relative to the proximal edge ofthe receptor, a desired field of view, magnification parameters, and the like. This information can come from appropriate sensors and/or can be input by the health professional carrying out imaging. The system can include a scatter-suppressing grid selectively movable between a position in the path of the imaging beam and a position outside the path(for magnification imaging). Again, information for controlling grid position can come from one or more different sources. And, the system can include a built-in or a separate viewing station receiving x-ray image information from the x-ray receptor and possibly from some or allof the sensors, processing it, and displaying the results as an image and/or in other forms.
BRIEF DESCRIPTION OF THE DRAWING
FIG. 1 illustrates a partial side view of a mammography system imaging a patient's breast.
FIG. 2 illustrates the system also in side view but in more detail andin a magnification mode.
FIG. 3 illustrates a lateral displacement of a small compression paddle along the proximate edge of the image receptor.
FIGS. 4A, 4B, and 4C show three common positions of a small compression paddle relative to the image receptor.
FIG. 5 illustrates two common x-ray protocols for breast imaging.
DETAILED DESCRIPTION OF PREFERRED EMBODIMENTS
Referring to FIG. 1, an x-ray source 1 is at one end of a generally C-shaped frame 7 and a flat panel digital x-ray imaging receptor 5 is atthe other end. X-ray source 1 includes a collimator schematicallyillustrated at 40 to confine an x-ray beam 30 emitted from source 1 to a desired footprint at receptor 5, typically no larger than the area of receptor 5 and preferably just enough to image a patient's breast 3 or at least a selected part thereof, as compressed toward receptor 5 by a compression paddle 2 mounted on an arm 6 that in turn mounts to frame 7.A lower platform 11, often called a breast tray, is immediately below the breast, and a scatter-reducing grid 4 is between breast tray 11 andx-ray receptor 5 and is housed in the same enclosure 12 with the receptor. As is known in the art, frame 7 can rotate between horizontal and vertical direction of x-ray beam 30.
In use for a CC view, paddle 2 and its supporting arm 6 are moved up,breast 3 is positioned on tray 11 and compressed by bringing paddle 2down as needed. With suitable collimation by collimators 40 (which typically collimate in two directions, of which only one is illustrated in FIG. 1), beam 30 from source 1 images the breast onto receptor 5 andthe resulting electronic image information is transmitted to a viewing station 22 (FIG. 2). The image typically is rectangular. Preferably, thecollimation is such that beam 30 illuminates an area of receptor 5 just large enough to show the image of breast 3, or at least a selected part thereof. Importantly, different sizes and shapes of paddles 2 can be mounted to arm 6, and the paddle can be selectively positioned off-center relative to proximal edge 5 a of receptor 5 (the left edge in FIG. 1).
Referring to FIG. 2, the system can operate in a magnification mode inwhich the relative positions along x-ray beam 30 of source 1, breast tray 11, and/or receptor 5 are adjusted to provide the desired image magnification. In this example, source 1 and receptor 5 stay in place but tray 11 slides up support 7 to a position spaced up from receptor 5,and the collimation of beam 30 is adjusted as needed. Note that for magnification imaging scatter-reducing grid 4 is withdrawn from the portion of receptor 5 that receives the desired breast image, because the angles of the grid septa typically are not suitable for a magnification view. If these angles can be changed to match the selected magnification, the grid can remain in place. Alternatively and if desired, a different grid that is suitable for the selected magnifiedview can be introduced in place of grid 4 in FIG. 1. Auto-controls 1 acan include (a) an auto-exposure control coupled with an AEC sensor 24and/or receptor 5 to receive exposure information in a pre-imaging firing of source 1, (b) an auto-collimation control to adjust thecollimation of beam 30, (c) an auto-grid control to selectively withdraw grid 4, and (d) an auto-magnification control to adjust parameters for magnification imaging. AEC sensor 24 can be conventional separate sensor that helps determine imaging exposure parameters in a pre-imaging exposure of the immobilized breast at a low x-ray dosage. Alternatively,receptor 5 can be used for that purpose, eliminating the need for a separate AEC sensor, because the output of receptor 5 resulting from alow-dose pre-imaging exposure can provide the information for auto-exposure control. In addition, the output of receptor 5 in response to the pre-imaging exposure can reveal the position of the breast relative to the receptor, and thus provide information for auto-collimation to confine beam 30 to a footprint that matches thebreast even when the breast is off-center relative to proximal edge 5 a.The auto-collimation control can be an arrangement sensing size and/orthe position of one or more of breast 3, paddle 2, and tray 11, using respective sensors and automatically adjusting collimators 40 to confine beam 30 to the required cross-section and position. The auto-grid control can respond to a signal indicating that that magnification imaging will be carried out to withdraw grid 4, for example to the position shown in FIG. 2, using a motor 4 a. This signal can come from information provided by respective sensors or it can be input by the health professional using the system. The auto-magnification control canbe an arrangement responding the data entered by a health professional through viewing station 22, or in some other way, e.g., based on information from sensors to adjust the system elements involved in magnification. Information for the auto-controls can be provided in various ways. One is from sensors S that keep track of the size and position of paddle 2 along beam 30 and relative to proximal edge 5 a ofx-ray receptor 5, of the position of breast tray 11 along beam 30, ofthe position of grid 4, and the setting of collimators 40. Another is inputs from an auto-exposure sensor and/or x-ray receptor 5 resulting from a pre-exposure firing of beam 30 at low dose, with breast 3 inplace for imaging. As is known in the art, the output of receptor 5 canbe used to detect the position of breast 3 relative to receptor 5, or atleast the approximate position of the breast relative to proximal edge 5a. Yet another possible source of information for the auto-controls is inputs from the health professional using the system, through a keyboard or other input devices in viewing station 22 or elsewhere. Information is exchanged between auto-controls 1 a, sensors S, and viewing station22 over appropriate links, shown schematically. Suitable arrangements,including encoders, motors (of which only motor M retracting andre storing grid 4 is expressly illustrated), and other control elements are included in mammography system 10 but, for clarity of the drawings,are not expressly illustrated.
FIG. 3 illustrates an example of an arrangement for positioning paddle 2off-center relative to proximal edge 5 a of receptor 5. While such off-center positioning can be used for other views as well, it is most important for views such as the MLO view. As seen in FIG. 3, paddle 2includes a rib 20 that has a channel slot 20 a and is secured to arm 6with a removable and adjustable lock or detent 21 that passes through channel 20 a. In operation, the health professional selects a paddle 2that is suitable in size and perhaps in shape to the breast to beim aged, removes any existing paddle 2 from arm 6 by pulling out orunscrewing detent 21, and installs the selected paddle 2 by securing itto arm 6 with detent 21 in a position relative to proximal edge 5 a that matches the patient's breast's position. Any desired further lateral adjustment can be made by sliding paddle 2 along the direction of the proximal edge 5 a, before or during compressing the breast for taking animage.
FIGS. 4a, 4b, and 4c illustrate an alternated arrangement for lateral adjustment of paddle 2. Here a paddle 2 of a selected size and possible shape is removable secured to arm 6, and arm 6 is in turn slidablysecured to frame 6 to slide laterally, along the direction of proximal edge 5 a of receptor 5. The term “lateral” is used here to designate movement parallel to, or at least generally along, the proximal edge 5a, even when the imaging plane of receptor 5 is oriented for an MLO view or is vertical. For example, FIG. 4 can illustrate a position of paddle2 for an MLO view of the left breast, FIG. 4b can illustrate a position for a CC view, and FIG. 4c can illustrate a position for an MLO view ofthe right breast.
It should be clear than many other arrangements and variations will be apparent to persons skilled in the technology based on the disclosure inthis patent specification and that the above embodiments are only someof examples embodying inventions whose scope is defined by the appended claims.
The invention claimed is:
1. A mammography system comprising: a flatpanel digital x-ray receptor having a proximal edge relative to a patient's breast to be imaged; and a compression paddle operably coupled such that the compression paddle moves generally along the proximal edge of the receptor, wherein the compression paddle is configured to be positioned in a first position substantially centered along a length ofthe proximal edge of the x-ray receptor, and a second position off-center relative to the x-ray receptor.
2. The system of claim 1,wherein the compression paddle is configured to slide between the first position and the second position.
3. The system of claim 1, wherein the compression paddle is secured in at least one of the first position andthe second position with an adjustable lock.
4. The system of claim 1,wherein the compression paddle is secured in at least one of the first position and the second position with a detent.
5. The system of claim4, wherein the compression paddle includes a channel to facilitate movement of the compression paddle relative to the x-ray receptor. 6.The system of claim 1, wherein the second position that is off-center relative to the x-ray receptor is for an MLO view of the patient's breast.
7. The system of claim 1 further comprising a scatter-reducing grid removably positioned between the compression paddle and the x-ray receptor.
8. The system of claim 7, wherein the scatter-reducing grid isin a first position for a first imaging mode and in a second position for a second imaging mode.
9. The system of claim 8, wherein the scatter-reducing grid is not positioned between the compression paddle and the x-ray receptor for the second imaging mode.
10. The system of claim 9 further comprising a motor coupled with the scatter-reducing grid for moving the grid between the first position and the second position.
11. The system of claim 1 further comprising an exposure control responsive to information regarding at least one of the one or more positions of the compression paddle.
12. A method of imaging a patient's breast with a mammography system comprising: providing a flatpanel digital x-ray receptor having a proximal edge relative to a patient's breast to be imaged; securely positioning a compression paddle along a length of the proximal edge of the x-ray receptor, the compression paddle operably coupled such that the compression paddle moves generally along the proximal edge of the receptor, wherein the compression paddle is configured to be positioned in a first position substantially centered along a length of the proximal edge of the x-ray receptor, and a second position off-center relative to the x-ray receptor; compressing the patient's breast between the x-ray receptor and the compression paddle when the compression paddle is in the first position; and imaging the patient's breast with x-rays from an x-ray source.
13. The method of claim 12, wherein securely positioning the compression paddle includes sliding the compression paddle between thefirst position and the second position.
14. The method of claim 13,wherein the compression paddle is secured in at least one of the first position and the second position with an adjustable lock.
15. The method of claim 13, wherein the compression paddle is secured in at least oneof the first position and the second position with a detent.
16. The method of claim 12 further removably positioning a scatter-reducing grid between the compression paddle and the x-ray receptor.
17. The method of claim 16, wherein the scatter-reducing grid is in a first position for a first imaging mode and in a second position for a second imaging mode.18. The method of claim 17, wherein the scatter-reducing grid is not positioned between the compression paddle and the x-ray receptor for the second imaging mode.
19. The method of claim 17, wherein a motor coupled with the scatter-reducing grid moves the grid between the first position and the second position.
20. The system of claim 1, wherein when in the second position, an edge of the compression paddle is substantially aligned with an edge of a breast support platform.
|
Page:The Adventures of David Simple (1904).djvu/181
away for his own profit, that he might be the happier man, at last entirely pacified me: we heard from him once a week, and I then lived in a situation, I think, the most desirable in the world; I am sure I have often esteemed it so since, and wished to live it over again. This life continued till I was twelve years old, when all my tranquillity was interrupted by a fatal accident, which has never been out of my thoughts twenty-four hours since it happened, and which I can never mention without the most piercing grief.
"One morning, as my mother and I were walking in the fields (as was our custom an hour before breakfast) a thorn ran into her foot, which put her into the most violent pain; insomuch, that she was unable to stir. As we were alone, I knew not what to do to help her; I saw her turn as pale as death, and look ready to faint away; this threw me into intolerable agonies and I fell a-screaming so loud, that I was heard by some labouring men, who were at plough in a ground not far from the place where we were. They immediately came to our assistance; I desired them to take one of their horses, and contrive, if they could, to carry my mother home; we were not above a quarter of a mile distant, so that one of the men made a shift, as she was a little woman, to carry her before him. It would be in vain to attempt to describe what my father (who loved her very affectionately) felt at this sight. "We rubbed her foot with some spirits, and in a little time she seemed to be easy, and went about the house only a little limping, without any great complaint, for four days; at the end of which she began to be very uneasy. We presently looked at her foot, the point of the thorn was just visible; all around it was very much swelled, and in the middle was a great black spot; we neither of us
|
S04A0703.
MOORE v. THE STATE.
(603 SE2d 228)
BENHAM, Justice.
Allen Christopher Moore appeals his convictions for murder and possession of a firearm during commission of a felony. The evidence at trial established that immediately prior to his death, Eric Kemp was speaking on the telephone with his girlfriend and told her he had to hang up because “Little Al,” which was Moore’s nickname, was at the door. She described the killer as a short, dark-skinned male with braids in his hair and a scar on his forehead, with “dirty, vampire teeth,” wearing a blue shirt and cap and jeans. Police officers found shell casings from two weapons and a scrap of paper with Moore’s name on it at the scene of the shooting. One neighbor saw a short man wearing a sweatshirt run from Kemp’s house and get into the backseat of a green Toyota Camry which was then driven away, while another neighbor saw two men run and get into the Camry. One of two women who had accompanied Moore’s cousin from Pittsburgh, Pennsylvania to Atlanta testified Moore and his cousin entered into a drug deal with Kemp and were cheated; Moore had a handgun and his cousin obtained a handgun; and on the evening of the shooting, the two men left in a green Camry, wearing dark clothes. When Moore and his cousin returned, they and the two women left hurriedly for Pittsburgh, where the witness later saw Moore and his cousin washing bloody clothes. Moore’s cousin told the witness they had killed Kemp and a little girl was in the house at the time of the killing. Testimony established Moore’s cousin was driving his girlfriend’s green Toyota Camry around the time of the shooting.
1. The evidence adduced at trial and summarized above was sufficient to authorize a rational trier of fact to find Moore guilty beyond a reasonable doubt of murder and possession of a firearm during commission of a felony. Jackson v. Virginia, 443 U. S. 307 (99 SC 2781, 61 LE2d 560) (1979).
2. In six enumerations of error, Moore contends he received ineffective assistance of counsel from the attorney who represented him at trial.
In order to prevail on a claim of ineffective assistance, appellant “must show that counsel’s performance was deficient and that the deficient performance so prejudiced the client that there is a reasonable likelihood that, but for counsel’s errors, the outcome of the trial would have been different. [Cits.]” [Cit.] Appellant “ ‘must overcome the strong presumption that counsel’s conduct falls within the broad range of reasonable professional conduct.’ ” [Cit.] In reviewing a lower court’s determination of a claim of ineffective assistance of counsel, an appellate court gives deference to the lower court’s factual findings, which are upheld unless clearly erroneous; the lower court’s legal conclusions are reviewed de novo. [Cit.]
Bales v. State, 277 Ga. 713, 715 (2) (594 SE2d 644) (2004).
(a) Moore first argues trial counsel rendered ineffective representation by failing to object when the State elicited testimony highlighting Moore’s pretrial silence. Specifically, he complains the State elicited testimony that Moore, although he was well acquainted with Kemp’s family, failed to make a statement of condolence to the family, to assist in raising funds, or to attend Kemp’s funeral, and that the police gave Moore an opportunity during interrogation to explain where he was on the occasion of the shooting and why he had not come forward when he knew the police were seeking him.
Contrary to Moore’s argument, the questions relating to Moore’s lack of interaction with Kemp’s family after the shooting did not bear in any way on Moore’s pre-arrest silence, but on the fact he left town immediately after the killing and was not present to interact with Kemp’s family. A defendant’s flight is a proper subject for questioning and for argument. Renner v. State, 260 Ga. 515 (3) (b) (397 SE2d 683) (1990).
The State argues the questions regarding Moore’s pre-arrest silence were not improper because they were intended to elicit testimony explaining interrogation techniques used to question Moore. The State does not explain, however, and we do not perceive the relevance of the topic of interrogation methods to the issue of Moore’s guilt or innocence. Moreover, given the holding in Mallory v. State, 261 Ga. 625 (5) (409 SE2d 839) (1991) (overruled on other grounds by Chapel v. State, 270 Ga. 151 (4) (510 SE2d 802) (1998), see Clark v. State, 271 Ga. 6 (5) (515 SE2d 155) (1999)), that evidence concerning pre-trial silence, including a failure to come forward, is more prejudicial than it is probative of any relevant fact, we agree the questioning was improper.
Assuming trial counsel’s failure to object to the improper questioning by the prosecuting attorney constituted deficient performance, there remains for consideration whether the deficiency so prejudiced Moore that there is a reasonable likelihood that absent the deficiency, the outcome of the trial would have been different. Bales v. State, supra. Considering the State did not pursue the improper questioning and did not elicit testimony concerning Moore’s response to the interrogation, and the strength of the circumstantial evidence of Moore’s guilt, we are not persuaded the timely interposition of an objection by trial counsel would have produced a different result at trial. Accordingly, we conclude Moore has not shown the prejudice necessary to establish ineffective assistance of counsel. Id.
(b) Moore asserts trial counsel was ineffective in failing to request a jury charge concerning the credibility of witnesses who testified in hope of leniency regarding pending criminal charges. However, since the two witnesses regarding whose credibility Moore asserts a charge would be appropriate testified without contradiction they were not testifying in hope of leniency, a charge on that subject would not have been authorized by the evidence (Monsalve v. State, 271 Ga. 523 (3) (519 SE2d 915) (1999)), and trial counsel cannot be faulted for not requesting a jury charge which was not authorized. Callendar v. State, 275 Ga. 115 (3) (e) (561 SE2d 113) (2002).
(c) As to three of Moore’s assertions of ineffectiveness, trial counsel testified at the initial hearing on Moore’s motion for new trial that there were strategic reasons for her actions. (1) A detective from Pittsburgh testified at trial that a woman who came to Atlanta to visit Moore’s cousin and who returned to Pittsburgh with Moore and his cousin after the shooting told the detective someone had shot at her and there were rumors she would not live to testify in Atlanta. That woman herself testified to the same effect, adding she had not been threatened by anyone in Atlanta. Trial counsel testified on motion for new trial that her decision not to object to the testimony was strategic in that she wished to turn the attention of the jury to Moore’s cousin as the shooter and wished to allow the testimony to give the impression it was Moore’s cousin who threatened the witness. (2) During her testimony on motion for new trial, trial counsel was asked why she did not object to the prosecuting attorney’s comments during closing argument that Moore had kept his mouth closed during trial so the jury could not see that his teeth matched the description by Kemp’s daughter of the shooter’s teeth, which she described as “dirty vampire teeth.” Trial counsel responded she did not object because she did not want to call the jury’s attention to Moore’s teeth which, she said, were in fact as described. (3) With regard to her failure to request a jury charge on alibi, trial counsel testified she did not request a charge on alibi because the only evidence of alibi was a statement attributed to Moore which was not, in her opinion, sufficient to support an alibi defense, and which was contradicted by other things Moore told the police.
“As a general rule, matters of reasonable trial strategy and tactics do not amount to ineffective assistance of counsel. [Cit.] We agree with the trial court’s implicit determination that trial counsel’s choice of trial strategy was not unreasonable.” Bales v. State, supra at 715 (2).
Decided September 27, 2004.
Brian Steel, for appellant.
Paul L. Howard, Jr., District Attorney, Bettieanne C. Hart, Peggy R. Katz, Assistant District Attorneys, Thurbert E. Baker, Attorney General, Frank M. Gaither, Jr., Assistant Attorney General, for appellee.
(d) At a second hearing on Moore’s amended motion for new trial, appellate counsel produced a witness who testified she saw Moore in Pittsburgh on the date of the shooting. Moore contends trial counsel was ineffective for failing to find that witness, but stipulated at the hearing that trial counsel would have testified, had she been present, that Moore had not been able to provide trial counsel with any specifics regarding where he was or who he was with while he was in Pittsburgh, and that trial counsel had been unable to locate anyone to support the alibi defense because the information given her was inadequate. “Trial counsel cannot be held ineffective for failing to track down a witness whose whereabouts are unknown. [Cit.]” Morris v. State, 257 Ga. App. 169, 172 (2) (570 SE2d 619) (2002). Here, not only was the witness’s location unknown, but the witness’s identity was unknown to trial counsel. Under those circumstances, we find no error in the trial court’s rejection of this claim of ineffectiveness.
(e) Finally, Moore complains of trial counsel’s failure to object to the prosecuting attorney’s vouching for the truthfulness of prosecution witnesses. “The longstanding rule is that counsel may not state to the jury his or her personal belief about the veracity of a witness. [Cit.]” Bolden v. State, 272 Ga. 1, 2 (525 SE2d 690) (2000). Contrary to Moore’s argument on appeal, none of the instances he specifies involved the prosecuting attorney stating a personal belief about the veracity of a witness. One instance involved a reference to the fact that some of the evidence the State presented undercut the credibility of its own witness and the other instances were either statements of the State’s theory of the case or were statements of the conclusions the State wished the jury to draw from the evidence, which is permissible. Fulton v. State, 278 Ga. 58 (8) (597 SE2d 396) (2004). Since the prosecuting attorney’s remarks were not objectionable, objection to those remarks would have lacked merit. “Failure to make a meritless objection cannot be evidence of ineffective assistance.” Hayes v. State, 262 Ga. 881, 884 (3) (c) (426 SE2d 886) (1993).
Judgment affirmed.
All the Justices concur.
The crimes occurred on June 7,2001, and Moore was indicted on July 27,2001, for malice murder, felony murder, aggravated assault, and possession of a firearm during commission of a felony. Following a jury verdict of guilty on all counts reached on February 8,2002, Moore was sentenced to life imprisonment for murder and a consecutive term of five years for possession of a firearm during commission of a crime. The other charges merged into the murder conviction. Moore’s motion for new trial, filed February 22, 2002, and amended April 25, July 9, and August 20,2003, was denied on September 17, 2003. Pursuant to a notice of appeal filed September 23, 2003, the appeal was docketed in this Court on December 31, 2003, and was submitted for decision after oral argument on July 20, 2004.
|
/*
* Copyright © 2019 Dominokit
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dominokit.domino.ui.style;
import elemental2.dom.Element;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
/**
* CompositeCssProperty class.
*
* @author vegegoku
* @version $Id: $Id
*/
public class CompositeCssProperty implements IsCssProperty {
private Set<CssProperty> cssProperties = new HashSet<>();
/**
* of.
*
* @param cssProperties a {@link java.util.Collection} object.
* @return a {@link org.dominokit.domino.ui.style.CompositeCssProperty} object.
*/
public static CompositeCssProperty of(Collection<CssProperty> cssProperties) {
return new CompositeCssProperty(cssProperties);
}
/**
* of.
*
* @param cssProperties a {@link org.dominokit.domino.ui.style.CssProperty} object.
* @return a {@link org.dominokit.domino.ui.style.CompositeCssProperty} object.
*/
public static CompositeCssProperty of(CssProperty... cssProperties) {
return new CompositeCssProperty(cssProperties);
}
/**
* Constructor for CompositeCssProperty.
*
* @param cssProperties a {@link java.util.Collection} object.
*/
public CompositeCssProperty(Collection<CssProperty> cssProperties) {
this.cssProperties.addAll(cssProperties);
}
/**
* Constructor for CompositeCssProperty.
*
* @param cssProperties a {@link org.dominokit.domino.ui.style.CssProperty} object.
*/
public CompositeCssProperty(CssProperty... cssProperties) {
this(Arrays.asList(cssProperties));
}
/** {@inheritDoc} */
@Override
public void apply(Element element) {
cssProperties.forEach(cssProperty -> cssProperty.apply(element));
}
/** {@inheritDoc} */
@Override
public void remove(Element element) {
cssProperties.forEach(cssProperty -> cssProperty.remove(element));
}
}
|
Thread:ClarentBloodArthur/@comment-26381576-20200111120840/@comment-26381576-20200115122431
@Taka: Now, I know this is actually a guy but...what a cutie. Awesome stand, too.
@Lu: Intense!
|
import * as exec from './exec'
import * as invite from './invite'
import * as install from './install'
export const command = '%{botNickname} <subcommand> [args..]'
export const desc = 'commands'
export async function handler({ args, context }) { /*nop*/ }
export const subcommands = {
exec, invite, install
}
|
Thread:PopkornHUPIKO2/@comment-32769624-20180704070722
Hi, welcome to ! Thanks for your edit to the Sakura Hagiwara page.
Make sure to check out our rules page.
Please leave me a message if I can help with anything!
|
User talk:<IP_ADDRESS>
Welcome
Welcome to Central Wikia, <IP_ADDRESS>!
Finally, please keep an eye on the recent changes to see what the community is up to here.
Enjoy! -- Richard1990 (Talk) 22:21, September 19, 2009
|
Community as Story and the Dynamic Nature of Community : Perceptions , Place , and Narratives about Change
We present a theoretical discussion conceptualizing “community as story”—narratives that create and recreate one’s definition of and relationship to their community. We use a variety of disciplinary sources and representative quotes to help develop the theory. In so doing, we discuss the importance of subjective perception, narrative and place to the creation of a community story. Community stories take place in time and place, and as changes to the place occur, residents are compelled to adjust their stories and definitions. These changes are reflected in narratives that reminisce about what the community was and what it is becoming. The narratives then become part of a new community story. Above and beyond our theoretical conceptualization of “community as story”, to help illustrate our arguments in an empirical setting, we present a historical narrative from interviews with residents of Vance, Alabama, home of the Mercedes-Benz plant, which discuss the changing nature of and relationship to their community after the arrival of the plant in the 1990s.
Introduction
"Trying to study community is like trying to scoop jello [sic] up with your fingers.You can get hold of some, but there's always more slipping away from you" (Pelly-Effrat 1974, p. 1).In this paper, we dip into the proverbial "community" Jell-O to offer a theoretical exploration of "community as story".Community as story refers to the narratives and meta-narratives residents create about their community and changes in them.Our approach to community as story is interdisciplinary, since we cite the works not only of sociologists, but also of artists, theologians, anthropologists, political scientists, historians, novelists, philosophers, and geographers.
We begin by reviewing the dynamic processes through which residents-individually and collectively-come to envision their community.Next, we address how shared, idealized conceptualizations of community influence the interpretation and navigation of the residents' world.To better understand how stories establish community identity, we then focus on scholarship attuned to understanding the relationship between emotional attachments and the place where community happens.We further examine how social impacts often necessitate a subjective redefining of community.
In the final section, we provide an example of community as story by highlighting a historical case study of residents' narratives of Vance, Alabama, home to a Mercedes-Benz factory.Building the factory in Vance influenced shifts in the individual community stories and an emerging meta-community narrative.We intentionally call upon a historical case study because, as was articulated by Chapman (1979, p. 46), "the past is at its best when it takes us to places that counsel and instruct, that show us who we are by showing us where we have been, that remind us of our connections to what happened here" (original emphasis).Furthermore, by studying community stories, we do not discount other theories of community or the more objectifiable aspects of it.Instead, our purpose is to draw attention to some aspects of community that often "slip through the fingers" of community scholarship by expounding on the theory of community as story.
Community as Story: A Nexus of Individual and Collective Narratives
Community as story includes both a personal and a collective nexus of local economic, social/cultural, historical and individual factors.Dynamic, synergistic communities (Kenyon 2000;Crow and Allen 1994) must be conceptualized using more than economic and demographic characteristics.Communities are places where, and when, community happens for people (Brown et al. 1998), and community happens in large part through people's narratives-that is, their stories (Flynn 1991; see also Hinchman and Hinchman 1997).As Flynn says, "Personal stories blend into (and) are chapters within community stories" (Flynn 1991, p. 25).It is "in large part through shared stories that communities create themselves and bind themselves together" (Card 1990, p. 273).
Community Story Shapes an Individual's Stories
All stories are composed by human actors and their ability to conceptualize and author their life story is limited by the setting (or community) of which they are a part.MacIntyre (1997, pp. 251, 253) maintains: What the agent is able to do and say intelligibly as an actor is deeply affected by the fact that we are never more (and sometimes less) than the co-authors of our own narratives.Only in fantasy do we live what story we please . . .We enter upon a stage which we did not design and we find ourselves part of an action that was not of our making . . . it is always the case that there are constraints on how the story can continue and that within those constraints there are indefinitely many ways that it can continue.
Community as story, then, is portrayed through the observed or imagined relations between specific actors who occupy a particular place in time and the inherited stories of that place (Phillips 2002;Calhoun 1991).Zerubavel (1999, p. 7) asserts, "I experience the world not only personally, through my own senses, but also impersonally, through my mental membership in various social communities" (emphasis in original).Thus, we as individuals are in some ways constrained or conditioned by our community's narrative.Nevertheless, we have the power, along with our fellow actors, to reshape and redefine that narrative.
Individual stories and community stories can be intertwined, play off each other, or be at odds with each other.Individuals constantly shape, adjust and reconcile discordant elements of their personal community stories with an emergent community story."A community's stories offer members a set of canonical symbols, plots, and characters through which they can interpret reality and negotiate-or even create-their world" (Hinchman and Hinchman 1997, p. 235).Thus, community is always to some degree "imposed" because each individual must reconcile her story of community with all others in that place and time to create the community's story.Dewey (1916, p. 24) believed that community comes into being through a "process of sharing experience until it becomes a common possession.It modifies the disposition of both parties who partake in it".Consequently, our individual community stories, as Hummon (1990) explains, will be further modified by the "type" of community we live in: rural, urban or suburban.For this reason, making "sense of reality and [our] place in the everyday world" (p. 6) is largely predicated on our type and ideals of community, the sense of place it engenders, and our attachments to those places that embody that ideal (see Beckley 2003).Flynn (1991, p. 25) captures this idea well: "To the extent that communities lack story, they lack a sense of community, of solidarity . . .If people do not perceive that they are helping to move the story along, then there is less community, less community-as-story".The constraints of those ideas will be played out in our community stories, which are shaped by our history: "The story of my life is always embedded in the story of those communities from which I derive my identity.I am born with a past" (MacIntyre 1997, p. 159).Thus, "while we certainly think both as individuals and as human beings, what goes on inside our heads is also affected by the particular thought communities to which we happen to belong" (Zerubavel 1999, p. 9; emphasis in original).This does not mean that every individual must "buy into" the larger story.Even if an individual's community story remains discordant with the larger story, (s)he still knows the larger story-what members of the community say it is (or should be).
Creation and Maintenance of Community Boundaries
Individuals who compose community impose criteria on those who belong in it.They define the criteria through the creation of boundaries that are physical, political, linguistic, ethnic or racial.It is the individuals who assert the boundaries of the imagined community, although powerful outsiders may assign a different boundary (Cornell and Hartmann 2007).Such boundaries inevitably include individuals who may or may not share a common imagination of these boundaries.The interaction of these collective community identity processes varies over time and place, and results in changing community boundaries.
As the community story boundaries develop through the individual's collective stories, those individuals, in turn, must redefine their personal stories of community to accommodate the larger perceived group view of the larger perceived group-their community.Consequently, Hummon (1990, p. 9) asserts that "people's attitudes toward community are seldom unique, nor are they universally known.Rather, their views tend to be patterned, shared with others with whom they live and communicate; different from others who lie outside the boundaries of common culture and experience".Similarly, Card (1990) says that: No human community could ever exist if we had no mechanism to enable us to feel safe in trusting other people's behavior to follow certain predicable patterns.And these predictable patterns can't arise solely from personal experience-we must know, with some certainty, before we have observed another member of the community for any length of time, what he or she is likely to do in most situations . . .Each community has its own epic: a complex of stories about what it means to be a member of that community . . .All storytelling contains elements of the particular, the epic, and the mythic (pp. 273-274).
MacIntyre (1997) conception similarly emphasizes that our shared views or meta-story of community are always a compromise with the potential multiplicity of views of other individuals who also seek to envision community: It is because we all live out narratives in our lives, and because we understand our own lives in terms of narratives that we live out, that the form of narrative is appropriate for understanding the actions of others.Stories are lived before they are told (p. 249).
Ideals Shape Stories
Many individuals and communities share common components of their stories because their interpretations of the world lead them to feel that communities must meet certain standards.As such, as was observed by Hill (1991), the "quest for community is a quest for a particular kind of community to include particular people.The worth of a particular quest for community cannot be separated from the consequences of sustaining one community at the expense of another."That is, community stories promote common ideals about "the good life" (Bell and Newby 1972) or the way things should be.As Hummon (1990, p. 6) says, "Community beliefs, I propose, are best understood as interpretive, socially-shared perspectives, learned from community ideology and socially-structured experience".For example, a common element in most contemporary community stories is an ideal of "the good life", but this is a modern twist to the community as story.
The belief that stable and tightly-knit communities have existed in the past and still survive in distant lands is an important myth for industrial and highly mobile societies.It is therefore no coincidence that it was in the turmoil of late nineteenth-century industrialization that the idea of "community" as opposed to modern "society" was developed extensively, particularly in the work of Tönnies (Macfarlane et al. 2008, p. 1).
Contemporary Americans, in particular, draw heavily on a highly idealized interpretation of what community has been, by formulating an idea of what it should be (Rutman and Rutman 1984).Thus, today, community often represents, as Abrams (1971, pp. 59-60) suggests: . . .that mythical state of social wholeness in which each member has his place and in which life is regulated by cooperation rather than by competition.It . . .always seems to be in decline at any given historical present.Thus, community is that which each generation feels it must rediscover and re-create.
Because we seek to blend our individual stories into the story of our communities, our concept of community acts as a guidepost for our relations with others for conceptualizing others in the community, which contributes to the overall characteristics of the community story.
Community as Story: A Mythic and Imagined Structure
Community as both a personal and shared ideal, or community as story, might best be represented as a "mythic structure", a concept described by Peterson (1990, p. 9): Myths express the collective mentality of any given age and provide patterns for human action . . .Since the human relationship with myth is based on use rather than truth or falsity, myth provides the most appropriate instrument for the necessary inversion of ideological contradictions . . .Myth's flexibility allows its users to correct dysfunctional orientations without worrying about contradictions, logical or otherwise.Myths that lose the flexibility become dysfunctional.Card (1990) similarly says that the "truth" of stories is not predicated on their close association to empirical facts, but on how effectively they reveal truths about humanity in general.The dynamic and highly flexible mythic structure applies to community stories, which describe overarching "truths" of the community while withstanding contradictions.This understanding of community is corroborated by Macfarlane et al. (2008) who argue that community is a "powerful myth" in industrial societies.
Although our stories of community are adaptable and accommodating with regard to the ambiguity within the 'factual' empirical world, they may become dysfunctional when their flexibility is challenged to such an extent that they can no longer account for overt inconsistencies.In other words, in such cases, a severe cognitive dissonance is created, which we strive to resolve by redefining who and what constitutes our community and our relationship to it.Essentially, we must revise our story.On an individual level, this can occur as the result of a life-altering epiphany, the end of a long absence from a place or a group, a life-cycle change, and so forth.On a larger level, it is often prompted by a singular event-a natural disaster, for example (see Erikson 1976), by a short-term, yet large-scale "mega-event", such as the Olympics (see Cope et al. 2015), by a large economic development that dwarfs all other entities in the community (see Brown et al. 1989), or by processes of industrialization and suburbanization (see Salamon 2003).Under these circumstances, what ultimately remains firmly in our control is our ability to define and redefine our community, our story.
Community as Story: The Place and the Imagined Community
To fully understand how community stories or myths develop and change, we must understand the significance of place in their creation.As Flynn (1991, p. 24) argues, "Stories take place somewhere . . .and community must occur some-place".Because our ideal of who and what constitutes our community is generally tied to a specific place, our emotional attachments to the place-our sense of place-become part of our story.It is where our community happens.Alterations in the place, therefore, can force us to redefine how our community happens there.As Gieryn (2000, p. 465) maintains: In spite of its relatively enduring and imposing materiality, the meaning or value of the same place is labile-flexible in the hands of different people or cultures, malleable over time, and inevitably constructed . . .Place is not space-which is more properly conceived as abstract geometries . . .Space is what place becomes when the unique gatherings of things, meanings, and values are sucked out . . .Put positively, place is space filled up by people, practices, objects, and representations.This view echoes Gussow's earlier assertion (Gussow 1971, p. 27) that place is "a piece of the whole environment which has been claimed by feelings".The emotion-laden sense of place experienced by individuals in a place contributes to the unique construction of the community narrative there.As Lewis (1979, p. 41; emphasis in original) asserts: "All these places, no matter what else they have, have a sense of shared experience.And, very often, that experience is NOT shared by other folk who do not inhabit that particular place".In other words, "This is where, for me, community happens".
All imagined community occupies place-even if the place is "virtual"-and community is what gives place its meaning.Place and community become common components of each other, not because community must physically occupy some place in time, but because the individuals who imagine community must do so.According to Tuan (1977, p. 12), place and community-which happens in it-become mutual objects of value: "What begins as undifferentiated space becomes place as we get to know it better and endow it with value . . .What can be known is a reality that is a construct of experience, a creation of feeling and thought" (Tuan 1977, pp. 6, 9).
Places take on value as community when they are "personed", becoming imbued with the "sense of the presence of those who are not physically there" (Bell 1997, p. 113).When we imagine who belongs to our community, we also imagine who belongs to the place where our community occurs.Bell (1997, p. 113) argues that, "The meaning of a place, its genius loci, depends upon the geniuses we locate there".So, my imagined community is closely tied to my perception of place and to those who do and do not fit in it.Indeed, the value of my place, my community, can be altered if I feel others who "do not fit" encroach upon it.
Together with having a conception of the people who make up and belong in a place, the name of our place, which grasps what a community represents for us, is of equal importance.Tuan (1977, p. 29) maintains that: Things are not quite real until they acquire names and can be classified in some way.Curiosity about places is part of a general curiosity about things, part of the need to label experiences so that they have a greater degree of permanence and fit into some conceptual scheme.
Place names become part of our individual stories."Our place" or "my place" has a name if only at an individual level, where "my place" is associated with certain events or experiences.More generally, if many people share a common name for a place, they may also share much of a common story about that place.
The common stories that develop through the "personing" and "naming" of places are sometimes referred to as "heritage narratives", which are selective understandings of the history and character of a place, largely shaped through social interaction (Alkon 2004;Carmichael and McDonough 2019).According to Gieryn (2000, p. 467), "Place saturates social life: it is one medium (along with historical time) through which social life happens . . .Places are made through human practices and institutions even as they help make those practices and institutions".Place, thus, mediates social life.
Place and community, then-made up of a name, people, and values-are tied up with community identity and community story.Residents "have a sense of place, shaped by a shared history and a shared culture derived from continuity of generations" (Salamon 2003, p. 3).Mealor (1979, p. 189) describes the process of creating community identity as follows: We are all essentially terrestrial creatures identified with a particular town or area.No matter where we travel or what we do, there is in the back of our minds a place we call home.We may have several "homes", each identified in time and space with other human beings and with important events.Unlike our forefathers, our spatial identity can change more easily as a result of education, travel, and occupation.Even though the mobility that is ours in the last one-third of the twentieth century enables us to develop identity with new places, we can and do identify through memories with those previous places that were "home".Our mind usually reflects upon those places through eyes that recall landscapes and people as they were, not as they are today.
Basso similarly comments on the construction of place and especially the influence our socially-attuned imaginations have on personal and community identities.
Place-making . . . is a common response to common curiosities-what happened here? . . .What people make of their places is closely connected to what they make of themselves as members of society and inhabitants of the earth . . .If place-making is a way of constructing the past, a venerable means of doing human history, it is also a way of constructing social traditions and, in the process, personal and social identities.We are, in a sense, the place-worlds we imagine (Basso 1996, pp. 5, 7;emphasis in original).
Even if we no longer affiliate ourselves with a certain place, we still look for something to which we can attach our identity of self and of others.Thus, Salman Rushdie (1991, pp. 124-125), speaking of the highly mobile nature of people in modern society, asserted: The effect of mass migrations has been the creation of radically new types of human being-"mass migrants"-who root themselves in ideas rather than places, in memories as much as in material things; people who have been obliged to define themselves-because they are so defined by others-by their otherness; people in whose deepest selves strange fusions occur, unprecedented unions between what they were and where they find themselves . . .Migrants must, of necessity, make a new imaginative relationship with the world, because of the loss of familiar habitats.
Although many of us are not migrants-at least in the normative sense-sometimes we must still "make a new imaginative relationship with the world because of the loss of familiar habitats" when our community or our perception of it changes dramatically.As the place to which we are connected changes, we adjust our community story accordingly.This usually occurs through small, sometimes imperceptible shifts.For many, the places of reference will never change dramatically enough to challenge the fundamental definition of community associated with it.Few of us are compelled to rewrite our community story entirely.Some may feel so compelled in cases where they experience singular life-changing events in their communities, but when this occurs, how can they reconceptualize the story of the place they once knew as non-traditional "migrants"?How do they reconcile their view of community in light of another's?Hirschman (1970) argues that we always have three options for dealing with imposed conditions: "exit", "voice", and "loyalty"."Exit is the act of simply leaving . . .[It] is essentially a private and also typically a silent decision and activity" (Hirschman 1995, pp. 12, 34).A person may choose to "exit" the conditions if she finds she is unable to reconcile them with her own views.This can be done physically-by literally leaving or moving, or symbolically by removing oneself from the procedures that legitimate the conditions (e.g., refusing to vote).In essence, she has attempted to withdraw part of her story from the larger community's.A second option is "voice", "the act of complaining, or of organizing to complain, or of protesting, with the intention of achieving a direct recuperation of the quality that has been impaired . . .[It] is typically a public activity" (Hirschman 1995, pp. 12, 34).People can try to express their opinions to the larger community in an attempt to make their voice the voice of the community.Voice is the act of trying to redefine the community's story.Lastly, a person can simply be "loyal" to the existing conditions.
When new conditions arise in a community that compel people to redefine their community stories, individuals will seek to redefine the story with a method that is most satisfying for them.Because people share commonalities in their definitions of community (Hummon 1990), individual satisfaction for some (if not the majority) of the community members will likely see some type of group solution versus an individualistic one.Consequently, people will likely respond to new conditions through voice or loyalty rather than through exit, seeking to revise the community's story as they revise their own.Regardless of the approach they take-exit, voice, or loyalty-each member of the affected community will have to redefine his position from prior conditions to accommodate the new conditions.Understanding how community is redefined through its members-the rewriting of the community story-is the basis of community impacts.
Social Impacts: The Rewriting of the Community Story
Community impacts precipitated by a singular event can be viewed as the consequences of people transitioning from one perceived community to another in the same location.Yet in this transition, the definitions of both the place and the community change to accommodate the event creating the impact, for it is now part of a "new" community that occupies the same place as the old one.From this point on, community-this community-can only be understood when the event, or thing, is considered a part of it.It is now part of the local context and the narratives associated with it.The place has changed to accommodate it; so too have the definitions of community which happen in that place.Finsterbusch (1980, p. 23) states that "generally, impacts on individuals are best monitored within a quality-of-life framework which includes both descriptions of measurable changes in a person's objective conditions and subjective responses to these changes".The changes in objective conditions may even be imposed externally, but the subjective responses to them are not.What lies within the control of community members is their subjective redefining of their community.Abrams (1982, p. 8) notes that, " . . .what people do in the present [is] a struggle to create a future out of the past, of seeing that the past is not just the womb of the present but the only material out of which the present can be constructed".Regardless of the reason for the community's new conditions, these conditions become the "raw material" for people to redefine their stories.As people are compelled to reconcile new conditions with their imagined communities, they are also compelled to imagine their community in new ways, to rewrite the community's story as they rewrite their own.Thus, the impacts take shape in, and are indicated by, the modification of the narratives of the residents about their community.We will illustrate how these ideas play out in communities by introducing a historical case study.This historical case study elucidates how the narratives of the residents of Vance, Alabama, changed with the building of the Mercedes-Benz plant in their community.
Shifts in Individual Community Stories: Vance, Alabama, and the Mercedes Benz Plant
On 30 September 1993, Mercedes-Benz announced the site for its first U.S. factory-Vance, Alabama, population 300.The $540 million, 1500 employee, high-tech facility began producing over 60,000 sports utility vehicles per year in February 1997.[1] See Hudspeth (1995) for particulars about the "winning" of the Mercedes plant in Vance.News articles published in The Tuscaloosa News (TTN) between 10 October 1993 and 7 December 1994 report the following details of the Mercedes-Benz deal: The State of Alabama offered a lucrative incentive package to Mercedes-Benz that was estimated at four times the amount that other states offered.The original agreement included a pledge by the Tuscaloosa City Council to spend $30 million to buy and develop the plant site.Once the 966-acre tract was cleared, leveled and prepared for construction, the entire 966 acres was to be sold to Mercedes for $100.00.The Alabama legislature also set up a plan to allow Mercedes to keep five percent of its workers' wages to pay off construction debts (the workers would get a matching tax break) and approved a twenty-five-year corporate tax holiday for the company.The state also was to pay the workers while they trained.The Alabama Department of Economic and Community Affairs approved a grant application dated December 1993 for estimated state funded improvements to the site and surrounding area totaling $426.3 million, which breaks down as follows: site acquisition, $5.3 million; site preparation, $12.4 million; site improvement, $10.0 million; water and sewer' $11.0 million; railroad extension, $4.0 million; job training facility, $30.0 million; service center building, $5.0 million; fire station, $0.6 million; interstate interchange and access roads, $50.0 million; plant facility, equipment and other expenses, $300 million.
Although construction began on schedule, a key event transpired early in the construction schedule when the City of Tuscaloosa annexed a previously unincorporated 14-mile corridor of highway to the plant and placed a "City of Tuscaloosa" water tower at the plant site facing the town of Vance.Regarding this event, the mayor of Vance said: "That's something the citizens of Vance resent-having a company come in and all of a sudden rename your community . . .Emotions run deep when you start doing that" (TTN, 8 August 1995).Many Vance residents were also upset when the Mercedes-Benz plant chose to use a Tuscaloosa postal address instead of one from Vance.Less than two years into the project, many Vance residents felt that creating national (if not international) recognition for their community, ironically, had precipitated its loss of identity.Newspaper articles and editorials began to reflect these new narratives about Vance.
How did the residents' narratives about Vance change because of the Mercedes-Benz plant?How did they redefine their community story?To address these questions, we used information from twenty-eight Vance resident interviews originally collected between January 1994 and November 1996.These interviews consisted of 17 male and 11 female Vance residents, representing a cross-section of the community (town leaders, people in the coffee shop, at public meetings, and those being relocated).Participants were identified through preexisting contacts with local community members and through additional snowball sampling procedures.Specifically, maximum verification purposive sampling (Guba and Lincoln 1989;Kuzel 1992) techniques were used in an effort to assure a broad range of perspectives.Respectively, efforts were taken to identify a wide variety of people-e.g., those individuals who would be in the available labor market for employment in the factory, those who expected to reap economic benefits from construction efforts, those who expected positive/negative impacts to themselves and/or the community, those who had played a key role in locating the pant in Vance-for inclusion in the study.To wit, participants were selected from residents of the Vance community including, for example, (1) those who may or may not have been seeking employment with Mercedes-Benz, (2) residents who had land holdings in the community and/or adjacent to the proposed plant site, (3) residents identified by other community members as "key actors"-as documented through newspapers, news broadcasts, and city positions, and/or (4) residents identified through other interviewees who could provide a unique perspective.In addition to the formal interviewing procedures, numerous other informal interviews and observations were made over the same period in town meetings and on the street.
While, "there is not one single way to analyze qualitative data", (Creswell 2002, p. 258), efforts were taken to maintain the methodological rigor and analytical defensibility (see Anfara et al. 2002) of our research.For example, in addition to prolonged engagement and observation in the field, interviews were recorded when informants agreed.In a few cases only, field notes were taken.Recorded interviews were transcribed verbatim for analysis.In an effort to assure data quality, when possible, transcripts and cursory analysis were subjected to member checks (Creswell and Miller 2000).Throughout the analytic process, transcripts and field notes were subjected to multiple readings to identify common themes.These were identified through words, sentences, or other units of information that constitute recurrent patterns in interviews with most, if not all, subjects.We viewed such themes as storylines in the emerging community narratives.Though themes are intrinsic to the information provided by the subjects, the researchers identified and then coded them according to concepts or easily remembered words that conveyed similar meanings.In other words, we reconciled our understanding of an emerging larger story with each subject's individual story.Each interview provided information that either enhanced or cast doubt on emerging storylines.Depending on the degree of doubt, coded themes were modified or even discarded as new evidence emerged across interviews (see Miles et al. 2014).As the analytic process progressed, our research team continued coding data to help to ensure that interview procedures and field observations reflected the concepts and themes found in the interview data.Ultimately, through a triangulation of prolonged engagement in the field, member checks, and insights gleaned from a broad range of perspectives, we determined the salience of the coded themes by the attention and importance given to them in the interviews.In an effort to ensure the validity and reliability of coding, multiple team members would compare coding decisions and work together to continually refine our analytic process (Berg 2008).To further safeguard the integrity of emergent patterns, we use representative-often lengthy-quotes to exemplify the themes we identified in the residents' narratives, focusing on how residents perceived changes in their community and, in turn, how they modified their community stories to reconcile these changes.
Residents' Stories about Their Community
The following are excerpts from the residents' narratives that describe their perceptions of what their community was before the announcement and building of the Mercedes-Benz plant.
Sense of Belonging, Identity and Security
Interviewees felt strongly that they belonged to an identifiable community in which they had a secure place and where they knew the other members who belonged to it.One resident commented: "I could ride up the road and wave at all the people I knew . . .wave at friends.It was just a friendly community, period".Another said: "Everybody knows everybody, they're friends".There is a strong sense of longevity to the community in these narratives.People have been "rooted" to the place for generations and still identify specific places with long-time family names and histories: "Vance is a community where everyone knows one another.Most of the people of Vance have lived here all of their lives.Their parents lived here.Their grandparents have lived here.It's that kind of community".One long-time community resident described family properties that link multiple family members and generations to place and community: Here's the place my mother and father moved back to when he quit saw milling in Birmingham.They moved back here and farmed it with my grandparents-another uncle lived here and another in that house up there.The next house here on the right, my mother's uncle and his family lived there.My first cousin and her husband own this place here.So, due to those large families and bean farmers, they had a lot of property through here that was in the family.This place right here, I'm told, was bought for $1000 an acre-that's before Mercedes-there's 359 acres of it.
Significantly, interviewees also felt that they could call on their "community" generically if help was needed and that word would get out quickly through informal channels of communication: Well, it's like this-if I broke a leg I can make one phone call to somebody and they'd be feeding my animals, they'd never miss a meal-I could make another phone call to someone else if my truck broke or my car broke and it would be fixed.We've always had a certain amount of dependence on the other one's ability of what they could do and would do if we got in a bind.
Another interviewee also commented on the access to local resources and help: "If I needed help wiring up a plug in my house, I didn't have to call in an electrician, I just kind of mentioned that I had to do this down at the store and somebody was there".
The community was often characterized as "family".It was seen as close-knit and trustworthy.One resident described trust for his neighbors and residents who lived close by: There ain't nothin' no stronger than a family or community from my point of view.People knowing that there's people out there they can trust that's your neighbor, not necessarily your neighbor but the man down the street.He might be a mile away, but if you need him you know he's there.He might not come to see you every week.You might not see him for a month but you know if you need that fella he's there.
One resident described the closeness of neighbors who, like family, came to help without being asked: Even though we all live in our own houses, homes, it was like a big family.I know people who live two or three miles up the road and they know me.Vance is the type of place where if anyone had a problem, needed help, they wouldn't have to ask . . .Vance is the type of place that when you would go into the town of Vance, you threw you hand up and said "Hi" to everybody.It was like one big family.This narrative about people pitching in to help was tied to the small size and intimacy of the community by other interviewees.As mournfully expressed by one long-time resident, this sense of closeness is accompanied by a sense of communal loss associated with the recent changes: Here in this community, we help each other when needed-young and old alike.We won't have that anymore.You don't have it in Birmingham.You don't have that in Tuscaloosa, except maybe in secluded neighborhoods.Everybody here knows everybody and if you get in trouble, you don't have to go far to find somebody that'll help you when you need it.Everybody knows whose kids is whose, so we take care of and look out for each other's kids, we help each other out when we have trouble.It's been that way ever since I've been here.We moved here in [1930s]-I was five years old.It's always been a close-knit community.I care as much about these kids that run around here as I do my own.I just think that everybody needs to help raise our kids-we won't have that again.
A sense of security was mentioned frequently.For example, one resident said: "I left my key in my car".Another said simply, "We were secure".In both cases, the sense of security was associated with the community's size and the fellowship of residents-although one resident explained that this sense had shifted with recent changes in the community: "Until very recent history, I didn't lock the doors to my house".
Community Change
When asked how their community had changed since the arrival of Mercedes-Benz, the narrative became more complicated, indicating nuanced themes.Analysis of the interviews identified five primary sub-themes: Displacement, Insecurity, Loss of autonomy, Strain on existing friendships, and Loss of a small-town feeling.
Displacement.People juxtaposed their feelings of belonging before the building of Mercedes-Benz and their sense of displacement afterward.Their narratives often reveal the loss of personal identity as identification with their changed community: "My feeling down there is not good.I don't feel like I belong here.I exist.You've read the story in school called 'Man Without a Country?' Okay, that's what you feel like; you feel like your country has stepped on you".This feeling was particularly acute if people had lived in Vance all of their lives: You don't belong.I mean, where we were, I had been there all my life.My whole life was within a two-mile radius, which a lot of people may think this is stupid, but you had your life in a two-mile radius.The church was a mile from the house.I worked at the school that was two miles from my house.And so everything, my whole life, was right there in that two-mile radius.And when that's always been, you know it's hard to readjust.
Insecurity.After the plant was operating, instead of the factory providing a general sense of security, personal safety and safeguarding property became an obsession; the sense of communal trust had evaporated: My whole attitude of life has changed.My attitude of life before this happened was that you come to my house and you said I want to buy this or I want to buy that or can I sell you this . . .I didn't need a piece of paper.I didn't need to know your background.I didn't need to know who you were.We shook hands on it and that was fine with me.I don't trust nobody no more. . . .My faith in people has went from here to way down.
Another resident similarly described the new sense of distrust: "I just don't have any faith and that's a terrible thing not to have".
Familiar routines were displaced by new ones that residents have yet to reconcile.The following illustrates this new sense of insecurity: They're not as secure as they used to be.I mean you take a road that had twenty cars a day on it and you knew every one of them to a hundred twenty cars a day and you don't know but about ten of 'em You don't feel as secure.I wouldn't.I don't feel as secure . . .here.I lock my door.I take my key out of the car now.I don't leave my lawn mower sittin' outside.My boat's locked up.And there's nothing wrong with this community, not a thing in the world; it's just my faith in people has went to nothin'.It's terrible, but that's the truth.
One respondent who lost his house to the state so that the latter could build an off-ramp for the new plant said this about insecurity: Well, I just don't think the ones that are left feel secure anymore.They don't know when somebody's gonna come in and, you know, like the highway department's gonna come through and widen the roads there and they're gonna have to move.I don't think anybody is secure anymore, and you talk to people now and you tell them how we were done and they say, 'Why, I can believe they can do that, that they can just come in and make you move and you not want to move.'People just really do not realize the state has that power.
Loss of Autonomy.Associated with an increased sense of insecurity is a feeling of losing control over one's future.The Mercedes-Benz plant physically dislocated many households.One resident commented, "I thought that I had enough power that what was mine was mine, but now I've found out it's only if somebody else doesn't want it.I don't have any".Another stated, "It wasn't that you'd lost your home.That wasn't no consideration to them whatsoever.The only thing people had to say was, 'Did you get a fair price?'That was all they cared about".
Strain on Friendships.The previous quote shows that even old friendships were strained as the new plant began to take shape.Some people wondered why their friends did not care about their displacement, only whether the change turned out well financially and how the new industry could be economically beneficial.
Well, as soon as people heard, they decided they wouldn't sell.If you were sitting somewhere and a big plant was going in, what would you do?Are you going to sell it for $500 an acre when next year it may be worth $20,000?I understood both sides because I was a landowner, and I also understood the people, my very good friends, that were being displaced.I understood their problems in not being able to stay in the community in which they have lived all their life like I have.I realized that we need industry, and I'm all for that.The changes also affected people's relationships in informal organizations, such as church.Old friends moved out of the area served by the local churches: It has torn the churches up because so many of the staunch back-bone Christians had to move.Most of us around here are Baptist.They couldn't find anything around here-two or three of the couples ended up in Brookwood and had to join that church; some ended up in Coaling, some in Tuscaloosa-they were scattered everywhere.
Loss of a Small-Town Feeling.Perhaps the most frequently cited change in the emerging community narrative post Mercedes-Benz was the loss of a small-town feeling.Residents expressed contradictory feelings about the economic benefits the community had and their emotions about losing their community: even when residents said they knew the town needed more industry, they yearned for the small and intimate feeling they fear will be lost.
Oh, I'm glad.We are all glad.We have lost quite a number of large industries and we are glad for [Mercedes Benz] to come anywhere in Alabama.And, of course, we were most glad that it came to Tuscaloosa County.We realized when they told us that it was coming here that we were going to have to sacrifice; we knew this.We knew that our little town as we knew it was gone.I've known everybody all my life, and 35 people were displaced and found that they could not buy land anywhere, they had to move out of the area-I mean good people.
Another resident described the loss this way: "Vance is slipping away.Vance is not as it once was at this point, it's not the same since Mercedes Benz".
People felt that, as friends left the community, even if they moved to the next county, the sense of community was diminished: We've had too many friends and family go.They haven't gone far, but I cannot get in my truck and spend one minute to get to some of my friends' house, my very close friends.I can't do that anymore.Long distance to call, fifteen or so minutes to get there.Which it don't sound like far, but it's a long cry from what it was.Some also were concerned that Vance would simply become an industrial park: We were talking about spin-off industries a while ago.If Mercedes-Benz grows and does what they have projected it to do, there is going to be more of an invasion, more land taking, more families that will have to go, more roads that will come through and I think they're thinking more about that end of it.In other words, Vance will not be Vance anymore.It will be almost non-existent.You're looking at a giant industrial park.
Where opposition was expressed toward the Mercedes-Benz plant, it tended to be about losing the small-town atmosphere: The opposition of Mercedes is on that basis.Folks don't want to lose the small-town flavor and the convenience of the big city next door.We fear the fact that Tuscaloosa will come out, and already is, and imposing these new regulations and we're losing the right and the freedom to do with our property as we see fit.
The loss of the small-town feeling is also related to spatial changes.As one resident described: There're other factors that need to be looked at.I've lived here sixteen years.I can go out my door and look any direction and I don't see a neighbor.That's the way I chose to live.That's the way I choose to live now.I have been denied that right if I have to live in a subdivision.That's not the way I want to live.I want to live in a private atmosphere like I live now.Not because I'm antisocial, it's what I like.
Others expressed similar views: "When we first got here we tried town living, but when you're out on your curb and somebody's looking, you know, it's uncomfortable if you're not used to it.We weren't in town very long".Another resident said, "When somebody's up against you, you can't walk out in your yard without your shirt on and somebody seeing and being offended by it, you know.That's the kind of atmosphere I don't want to live in".There was, in addition, the ever-present fear of becoming a Tuscaloosa suburb: The first remark I made when they announced it was you can kiss Vance goodbye.In the end the economic factors will force Vance probably to be absorbed into the city of Tuscaloosa or Birmingham or some other large municipality-most likely Tuscaloosa because Tuscaloosa will be able to provide the people that will be here the services they need and desire.Vance probably will not have enough tax-base to support what the people here will want in services-especially as it grows.You'll have then people who will not be spending their money in Vance but will be demanding services from Vance where they live.It's like my piggy bank-it can't sustain me without my putting something back in it for so long.So, the purse strings will tighten-Vance won't be able to do it because they won't have the income and the people will say if you can't do it, we'll get Tuscaloosa to do it.And it has happened in other communities in other areas.
The relationship to Tuscaloosa has been for Vance residents to maintain a separate identity but to benefit from the larger town's proximity.The convenience will remain, but the identity will erode: My personal opposition to it is basically this is home, this is where I intended to raise my children because I like the community.I like the convenience of everything.I like the small-town atmosphere and the convenience of being able to shop not 20 miles away and run back.I can, in 25 min, be in three different Walmart stores-there's not many places in the country that you can be in three big super stores or Walmart stores in 25 min.When people start moving in here along with business that might come, all this will disappear.It won't be no small town that convenient to the big city anymore.
Conclusions
This study offers a theoretical discussion for conceptualizing "community as story"-narratives that create and recreate a definition and relationship to community.Community narratives are typically reflected in stories that reminisce about what the community was and speculate about what it will be, and become part of a new community story.We used a variety of disciplinary sources and quotes to show how community as story can be seen as a dynamic process in which individual and collective narratives are combined to form a mythic structure that shapes the residents' perceptions.Additionally, we discussed how community stories occur in time and place, and how, as changes to the place occur, residents are compelled to adjust their stories, their identities and definitions.Using Vance, Alabama, as a historical empirical illustration of the community as story approach, we explored the changing nature of this community after the construction of a manufacturing plant.In this case, individual subjective experiences of change in Vance contributed to rewriting the story of the community as residents dealt with a sense of loss, feelings of displacement, insecurity, loss of autonomy, relationship strain, and the erosion of a small-town feeling.While our use of this case study was singularly aimed at providing an empirical illustration of our broader theoretical discussion for conceptualizing "community as story", nevertheless, it points to future ways in which researchers can refine understating of how community stories are rewritten over time.For example, our data do not allow a clear understanding of community narratives that existed prior to the installation of the Mercedes-Benz plan; rather, we focus on a "contemporary" rewriting of community transpiring in the face of social change.Furthermore, this historical case study was attuned to specific actors who occupied and inherited stories at a particular place and time; what stories are being told in the same place today?
One final dip into the proverbial Jell-O community: "All communities . . .are imagined" (Anderson 2006, p. 6).Envisioning community as story highlights the dynamic and subjective relationship residents have with their imagined communities and the places where those relationships happens.A continuing narrative allows people to reconcile changes in their community, to imagine a new community as they perceive a shift from a former one.Their story may not match the emergent meta-narrative, but they will be familiar with it and, indirectly, contribute to it.These narratives
|
### pytorch-lightning-conference-seed
Use this seed to refactor your PyTorch research code for:
- a paper submission
- a new research project.
[Read the usage instructions here](https://github.com/williamFalcon/pytorch-lightning-conference-seed/blob/master/HOWTO.md)
#### Goals
The goal of this seed is to structure ML paper-code the same so that work can easily be extended and replicated.
###### DELETE EVERYTHING ABOVE FOR YOUR PROJECT
---
<div align="center">
# Your Project Name
[](https://www.nature.com/articles/nature14539)
[](https://papers.nips.cc/book/advances-in-neural-information-processing-systems-31-2018)
[](https://papers.nips.cc/book/advances-in-neural-information-processing-systems-31-2018)
[](https://papers.nips.cc/book/advances-in-neural-information-processing-systems-31-2018)
<!--
ARXIV
[](https://www.nature.com/articles/nature14539)
-->
<!--
Conference
-->
</div>
## Description
What it does
## How to run
First, install dependencies
```bash
# clone project
git clone https://github.com/YourGithubName/Your-project-name
# install project
cd Your-project-name
pip install -e .
pip install -r requirements.txt
```
Next, navigate to [Your Main Contribution (MNIST here)] and run it.
```bash
# module folder
cd src/
# run module (example: mnist as your main contribution)
python simplest_mnist.py
```
## Main Contribution
List your modules here. Each module contains all code for a full system including how to run instructions.
- [Production MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/production_mnist)
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)
## Baselines
List your baselines here.
- [Research MNIST](https://github.com/PyTorchLightning/pytorch-lightning-conference-seed/tree/master/src/research_mnist)
### Citation
```
@article{YourName,
title={Your Title},
author={Your team},
journal={Location},
year={Year}
}
```
|
Tadaiķi Parish
Tadaiķi Parish (Tadaiķu pagasts) is an administrative unit of South Kurzeme Municipality, Latvia. The parish has a population of 987 (as of 1/07/2010) and covers an area of 77.6 km2.
Villages of Tadaiķi parish
* Aistere
* Brenči
* Lieģi (Jaunlieģi)
* Šukteri
* Vārve
|
[Federal Register Volume 75, Number 38 (Friday, February 26, 2010)]
[Page 8976]
[FR Doc No: 2010-3918]
-----------------------------------------------------------------------
DEPARTMENT OF HEALTH AND HUMAN SERVICES
National Institutes of Health
National Institute of Dental & Craniofacial Research; Notice of
Closed Meeting
Pursuant to section 10(d) of the Federal Advisory Committee Act, as
amended (5 U.S.C. App.), notice is hereby given of the following
meeting.
The meeting will be closed to the public in accordance with the
provisions set forth in sections 552b(c)(4) and 552b(c)(6), Title 5
U.S.C., as amended. The grant applications and the discussions could
disclose confidential trade secrets or commercial property such as
patentable material, and personal information concerning individuals
associated with the grant applications, the disclosure of which would
constitute a clearly unwarranted invasion of personal privacy.
Name of Committee: National Institute of Dental and Craniofacial
Research Special Emphasis Panel; Review of R13 Application for NIH
Support of Conferences and Scientific Meetings.
Date: March 26, 2010.
Time: 1 p.m. to 5 p.m.
Agenda: To review and evaluate grant applications.
Place: National Institutes of Health, One Democracy Plaza, 6701
Democracy Boulevard, Bethesda, MD 20892 (Virtual Meeting).
Contact Person: Victor Henriquez, PhD, Scientific Review
Officer, DEA/SRB/NIDCR, 6701 Democracy Blvd., Room 668, Bethesda, MD
20892-4878, 301-451-2405, [email protected].
(Catalogue of Federal Domestic Assistance Program Nos. 93.121, Oral
Diseases and Disorders Research, National Institutes of Health, HHS)
Dated: February 19, 2010.
Jennifer Spaeth,
Director, Office of Federal Advisory Committee Policy.
[FR Doc. 2010-3918 Filed 2-25-10; 8:45 am]
BILLING CODE 4140-01-P
|
'''
streamlit-exp-v2.py
'''
import numpy as np
import pandas as pd
import streamlit as st
from pathlib import Path
import seaborn as sns
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, CategoricalColorMapper
def gen_random_data():
'''
Generates random data to represent a high-dimensional data set.
dimensions: (1000, 1001)
'''
np.random.seed(8)
# generate column names, data and label names
column_names = ['col_' + str(x) for x in range(1, 769)]
data = np.random.uniform(-10, 10, size=(1000, 768))
label_names = ['label_' + str(x) for x in range(1, 21)]
# insert numpy array, return pandas dataframe
df = pd.DataFrame(data, columns=column_names)
# generate list of labels and randomly insert into pandas dataframe
labels = np.random.choice(label_names, size=len(df))
df.insert(loc=0, column='label', value=labels)
return df
@st.cache
def gather_umap_data():
'''
Gather UMAP data from pseudo-embeddings, filename1 and filename2
will be updated in near future to reflect name in config file
(which will most likely have to do with some sort of database
because of the size of the embeddings and their UMAP version)
'''
cwd = Path.cwd()
filename1 = 'all_umap_pseudo_embeddings.json'
filename2 = 'centers_umap_pseudo_embeddings.json'
path_data = cwd/'data'/'current_centroid_data'
df_all = pd.read_json(
path_data/filename1,
orient='columns'
)
df_centers = pd.read_json(
path_data/filename2,
orient='columns'
)
return df_all, df_centers
def bokeh_plot(df_all):
label_count = len(df_all['label'].unique())
color_scheme = sns.color_palette(
'Paired',
label_count
).as_hex()
datasource_all = ColumnDataSource(df_all)
# datasource_center = ColumnDataSource(df_centers)
color_mapping = CategoricalColorMapper(
factors=[
str(x) for x in df_all['label'].unique()
],
palette=color_scheme
)
TOOLTIPS = [
('X', '@x'),
('Y', '@y'),
('Label', '@label')
]
p = figure(
plot_width=800,
plot_height=800,
tools="pan,wheel_zoom,reset,save",
tooltips=TOOLTIPS
)
p.circle(
'x',
'y',
source=datasource_all,
color=dict(field='label', transform=color_mapping),
size=8,
line_color='grey',
line_alpha=0.5
)
st.bokeh_chart(p, use_container_width=True)
def main():
df_all, df_centers = gather_umap_data()
st.title('Test Visualization App')
option_all = st.sidebar.selectbox(
'See all data?',
('True', 'False')
)
option_center = st.sidebar.selectbox(
'See centroid data?',
('True', 'False')
)
bokeh_plot(df_all)
if option_all == 'True':
st.dataframe(df_all)
if option_center == 'True':
st.dataframe(df_centers)
if __name__ == '__main__':
main()
|
[Federal Register Volume 72, Number 62 (Monday, April 2, 2007)]
[Page 15767]
[FR Doc No: E7-6030]
-----------------------------------------------------------------------
DEPARTMENT OF VETERANS AFFAIRS
[OMB Control No. 2900-0099]
Proposed Information Collection Activity: Proposed Collection;
Comment Request
AGENCY: Veterans Benefits Administration, Department of Veterans
Affairs.
ACTION: Notice.
-----------------------------------------------------------------------
SUMMARY: The Veterans Benefits Administration (VBA), Department of
Veterans Affairs (VA), is announcing an opportunity for public comment
on the proposed collection of certain information by the agency. Under
the Paperwork Reduction Act (PRA) of 1995, Federal agencies are
required to publish notice in the Federal Register concerning each
proposed collection of information, including each proposed extension
of a currently approved collection, and allow 60 days for public
comment in response to the notice. This notice solicits comments on the
information needed to request a change of education program or place of
training.
DATES: Written comments and recommendations on the proposed collection
of information should be received on or before June 1, 2007.
ADDRESSES: Submit written comments on the collection of information
through http://www.Regulations.gov or to Nancy J. Kessinger, Veterans
Benefits Administration (20M35), Department of Veterans Affairs, 810
Vermont Avenue, NW., Washington, DC 20420 or e-mail to
[email protected]. Please refer to ``OMB Control No. 2900-0099''
in any correspondence. During the comment period, comments may be
viewed online through the Federal Docket Management System (FDMS) at
www.Regulations.gov.
FOR FURTHER INFORMATION CONTACT: Nancy J. Kessinger at (202) 273-7079
or FAX (202) 275-5947.
SUPPLEMENTARY INFORMATION: Under the PRA of 1995 (Public Law 104-13; 44
U.S.C. 3501-3521), Federal agencies must obtain approval from the
Office of Management and Budget (OMB) for each collection of
information they conduct or sponsor. This request for comment is being
made pursuant to Section 3506(c)(2)(A) of the PRA.
With respect to the following collection of information, VBA
invites comments on: (1) Whether the proposed collection of information
is necessary for the proper performance of VBA's functions, including
whether the information will have practical utility; (2) the accuracy
of VBA's estimate of the burden of the proposed collection of
information; (3) ways to enhance the quality, utility, and clarity of
the information to be collected; and (4) ways to minimize the burden of
the collection of information on respondents, including through the use
of automated collection techniques or the use of other forms of
information technology.
Title: Request for Change of Program or Place of Training--
Survivors' and Dependents' Educational Assistance, (Under Provisions of
Chapter 35, Title 38, U.S.C., VA Form 22-5495).
OMB Control Number: 2900-0099.
Type of Review: Extension of a currently approved collection.
Abstract: Spouses, surviving spouses, or children of veterans who
are eligible for Dependent's Educational Assistance, complete VA Form
22-5495 to change their program of education and/or place of training.
VA uses the information collected to determine if the new program
selected is suitable to their abilities, aptitudes, and interests and
to verify that the new place of training is approved for benefits.
Affected Public: Individuals or households.
Estimated Annual Burden: 12,646 hours.
Estimated Average Burden per Respondent: 20 minutes.
Frequency of Response: On occasion.
Estimated Number of Respondents: 38,418.
Dated: March 23, 2007.
By direction of the Secretary.
Denise McLamb,
Program Analyst, Records Management Service.
[FR Doc. E7-6030 Filed 3-30-07; 8:45 am]
BILLING CODE 8320-01-P
|
plugins/catppuccin!: update integrations
This PR updates catppuccin integrations and sorts them to alphabetical order
I think we need a type which would allow for integrations.mini = enable and integrations.mini.enabled = true. Catppuccin does this at compile time.
|
[Congressional Record Volume 163, Number 57 (Monday, April 3, 2017)]
[House]
[Page H2629]
By Mr. AMASH:
H.R. 1850.
Congress has the power to enact this legislation pursuant
to the following:
Article I, Section 8, Clause 7 states: ``The Congress shall
have Power . . . To establish Post Offices and post Roads.''
|
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class UpDown : MonoBehaviour
{
public float vertical = 1;
public bool stop;
public float speed;
// Start is called before the first frame update
void Start()
{
}
// Update is called once per frame
void Update()
{
transform.Translate(Vector3.up * speed * Time.deltaTime*vertical);
if(transform.position.y<=-3.2f)
{
vertical = 1;
}
else if (transform.position.y >= 3.2f)
{
vertical = -1;
}
}
}
|
East of Kadlak Island, Alaska. . . South of Queen Charlotte Islands. Off Gray's Harbor, Washington. .
below under that species.
The species to wliich horealis (and especially the Californian form) shows tlie greatest resemblance is Perricr's Crossaster australis from the Straits of Ma<rellan. The differences are evidently not fj^eat and it is not at all improbable tiiat tiie ranges of the two will some day be found to run together. Tiie west coast of South America is not as yet well worked in tleep water. Crossaster nephini Bell may be a link in this chain. Solaster australis" differs in having a single papida', not several, in each mesh of the skeleton, especially on disk; in iiaving a series of suboral spines (us in hypothrissus); and in having less prominent marginal plates with shorter spines. There seem to be differences in the ])axilla>, judging by Perrier's figure, but it is perhaps not safe to make comparisons without specimens. Australis has nine or ten rays wiiich are about as slender as in Californian specimens of horealis though longer (R = 3.25 r).
|
windows 7 64 bit installation on ssd crucial 300c can't install
I have a small problem.
Today my Crucial c300 arrived. Now I want a clean install windows 7 64 bit. After firing up the dvd, I get a screen which says that it needs drivers for my hardware. Why cant I install?
I have the following specs
Biostar i55 motherboard
sata 1 - Crucial 3ooc
sata 2 - CD/DVD drive
Processor Intel i860
I tried the following, pff where shall I begin
bios settings:
sata -> ACHI
sata -> IDE
sata -> RAID0
Downloaded the ACHI/RAID drivers from the website of Biostar for my motheboard and copied it on a usb device, and pointed the browser of the windows 7 installation guide to a compatible driver, no luck.
The copy of the windows 7 version I used was badly burned, I burned a new one on 22 KBPS and I could install Windows 7 64 bit without a problem. Same ISO.
just a thought - you might want to consider deleting the question...
Maybe this answer could be relevant to other people. I think it's best to let it be. I have spend 3 hours try and die, till i read something about a badly burned disk. I tried the same iso again, this time I used winiso to burn a new copy. No problems with installing.
It seems that your BIOS settings might be wrong. If there is a RAID0 I would usually just set the ACHI sata to RAID = depending on your mother board. However there might be a problem of the Motherboard with the new hard disk you just bought. I would suggest you upgrade the motherboard firmware and BIOS version too.
|
Video+Games+as+experiential+learning+tools
Please add to our databased of video games that you think you could use in your classroom.
__Curricular Links__
Language Arts
Social
Science
Math
|
[Congressional Record (Bound Edition), Volume 153 (2007), Part 9]
[House]
[Page 13135]
PERMISSION TO REDUCE TIME FOR ELECTRONIC VOTING DURING CONSIDERATION OF
H.R. 1427, FEDERAL HOUSING FINANCE REFORM ACT OF 2007
Mr. SPRATT. Mr. Speaker, I ask unanimous consent that, during
consideration of H.R. 1427, pursuant to House Resolution 404, the Chair
may reduce to 2 minutes the minimum time for electronic voting under
clause 6 of rule XVIII and clauses 8 and 9 of rule XX.
The SPEAKER pro tempore. Is there objection to the request of the
gentleman from South Carolina?
There was no objection.
____________________
|
Ranger Sequoia
- NCR Veteran Ranger
The is a weapon in Fallout: New Vegas.
Characteristics
Variants
* Hunting revolver, a less powerful, yet more valuable scoped variant of the.
Locations
* White Legs in Zion Canyon sometimes carry this weapon.
Bugs
Behind the scenes
Gallery
"Секвойя" рейнджера
|
How to create object from an xml
How do you create an object by XElement? I can create a list, but I want a single object and not a list.
Here is my code:
XElement elem = XElement.Load(path);
var myList = from n in elem.Descendants("NodeNumber1")
select new
{
Name = n.Attribute("Name").Value,
MyObj = from o in n.Descendants("NodeChild")
select new
{
var1 = o.Descendants("var1").FirstOrDefault().Value,
var2 = o.Descendants("var2").FirstOrDefault().Value,
}
};
NodeChild is in NodeNumber1 once, so I want it as an object and not as a list. Even var1 and var2 are defined once in NodeChild - but they are not problematic because I use FirstOrDefault).
How I will create it as a single object and not as a list?
Well you've got two "levels" of output here - one with Name/MyObj, and one with var1/var2. Which do you want to make singular? (Sample XML and expected output would be useful.)
I get the name - is already singular, and MyObj as a list which has the Var1/Var2, I want this list will become a single object.
So you want myList to still be a list? Can you see how your question is confusing, and could have been much more clearly described, with sample XML and expected output? Please bear this in mind for next time.
var axe = elem.Descendants("NodeNumber1")
.Select(n => new
{
Name= n.Attribute("Name").Value,
MyObj= from o in n.Descendants("NodeChild")
select new
{
var1= o.Descendants("var1").FirstOrDefault().Value,
var2= o.Descendants("var2").FirstOrDefault().Value,
}
})
.First();
Or using existing query:
var axe = axesList.First();
Your suggestion worked, I edited your answer because I wanted that inner list will be single object and not the external list
|
// Copyright 2021 Rustamov Azer
#ifndef MODULES_TASK_4_RUSTAMOV_A_HISTOGRAM_STRETCH_HISTOGRAM_STRETCH_H_
#define MODULES_TASK_4_RUSTAMOV_A_HISTOGRAM_STRETCH_HISTOGRAM_STRETCH_H_
#include <vector>
using Matrix = std::vector<int>;
Matrix generate_random_image(int w, int h, int min_y = 30, int max_y = 192);
void data_distribution(const int data_size, std::vector<int>* limits,
int* num_threads, int* count);
Matrix make_histogram(const Matrix& image, int w, int h);
int get_min_y(const Matrix& histogram);
int get_max_y(const Matrix& histogram);
void get_min_max_y_std(const Matrix& image, const int& h, const int& w,
int* min_y, int* max_y);
Matrix stretch_histogram(const Matrix& histogtram,
const int& min_y, const int& max_y);
Matrix increase_contrast(const Matrix& image, int w, int h,
const int& min_y, const int& max_y);
void increase_contrast_part(const Matrix* image, Matrix* result,
int start, int size, int min_y, int max_y);
Matrix increase_contrast_std(const Matrix& image, int w, int h,
const int& min_y, const int& max_y);
Matrix histogram_sretch_algorithm(const Matrix& image, const int w, const int h);
Matrix histogram_sretch_algorithm_std(const Matrix& image, const int w, const int h);
#endif // MODULES_TASK_4_RUSTAMOV_A_HISTOGRAM_STRETCH_HISTOGRAM_STRETCH_H_
|
Behavior of Monotonic Loading for Glass Fibre based High Performance Concrete in External Beam-Column Joint using ANSYS. Analysis
. Strength, ductility of structures differ primarily on appropriate detailing of. beam column joints need a vital role in the structural reliability of the structures given with appropriate stiffness and ultimate strength to maintain the loads transmitted from beam and column. Beam column joints defined as the reinforced concrete buildings, in which portion of columns and beams having their intersections. Although these forces greater than these are affected during earthquakes, joints are relentlessly damaged. As far as earthquake is affected, research on beam-column joint is essential. In HPC, these materials with admixtures are meticulously designated and proportioned to produce very high early, ultimate strengths and durability away from conventional concrete. The admixtures like flyash, silicafume, ground granulated blast furnace slag (GGBFS), which are combined with its strength and durability and boost its marketability as a natural friendly product. The most important purpose of the present study is to investigate the performance of high performance reinforced beam-column joints (replacement of cement with GGBFS). Ground granulated blast furnace GGBFS is employed as a partial replacement of cement with glass fibre and super plasticizer is applied to accomplish required workability. In this study, a evaluation of control specimen and specimen of beam column joint with 7.5% GGBFS and 0.3% glass fibre replacement intended as per IS 456:2000 and IS 13920:2016. Also, to ascertain the performance of beam-column joints subjected to monotonic loading for high performance concrete employing with Ground Granulated Blast Furnace Slag (GGBFS) and glass fibre.
Introduction
The importance of concrete structures with high ductility which have revealed again by earthquakes in various parts across the world. Ductility defined as the ability of reinforced concrete sections, elements and structures to enthrall the larger energy emitted during the tremors devoid of losing their strength below greater scale and reversible deformations. Strength, ductility of structures be contingent mostly on appropriate detailing of the reinforcement in beam column joints. Beam column joints provides a critical role in the structural consistency of the structures providing with sufficient intensity and strength to withstand the loads transferred from beam column.
Beam-column joints are critical zones for transferal of loads. When forces bigger than these are applied during earthquakes, joints are severally damaged. HPC is formed by make use of superplasticizer, micro fillers, and various types of fiber. The admixtures like fly ash, silica fume, GGBFS are enhanced both with strength, durability and improving the market ability as a global friendly material.
The quantities which essential components are mixed, admixtures used, comprises the major variation between the conventional concrete with HPC. The necessary lower watercement ratio of 0.30 is required for a high range water reducing admixture.
GGBFS is a non-metal product, necessitating of silicates, alumino-silicates of the calcium and other sources, developed in a liquefied condition quickly with iron in a blast furnace. GGBFS replacement builds lower heat of hydration, greater stability and improved resistance to sulfate and chloride attack produced with conventional concrete, from a structural view. It lessens the use of cement all over the production of concrete, which fosters to environmental protection. The complement of GGBFS to concrete will alter in a minimal growth in elastic modulus for afforded compressive strength, even if the discrepancies are not huge enough to be importance in design.
Material properties and Investigations
Cement of grade 53 used for the investigation was assessed its properties corresponding to IS4031:1988 giving specific gravity of 3.15 and other physical properties are within the parameters indicated by the Indian Standard Code.
GGBFS is a hydraulically latent substance, with lime offered with cement, a resultant reaction pertaining to Calcium Alumino Silicate elements set in. As an significance, cementitious compounds which utilized and classified as ancillary CSH gel outcomes in growth of further CSH, as a primary binding material, stimulates to the strength and durability properties of the structure. The collaboration of GGBFS and Cement in incidence of water is illustrated with. Cem-FIL AR glass fibers, an inimitable as a concrete reinforcement having the equivalent specific gravity as the aggregates which assures the fiber dispersion is easier to accomplish than with other fibers. According to ASTM Standards, specific gravity is 2.68 g/cm 3 , filament diameter is 14 µm and Elastic Modulus as 72 GPa.
In the neighborhood, local sand is consumed as fine aggregate which passes through 4.75mm and retained on 0.075mm sieves. The river sand is related to Zone II as per IS 383-1970. Coarse aggregate is trampled granite angular aggregate passing through 20 mm then retained at 4.75mm sieve. The coarse aggregate corresponds to IS 383-1970 and is examined as per IS 2386-1963 for the physical properties. Its Specific gravity of 2.71, Bulk density as 1465 kg/m3 and Bulk density as 1519 kg/m3 in loose and compact state.
Super plasticizer CONPLAST SP430, formed on Sulfhanated naphthalene polymers, conforms with IS 9103 1999 and ASTM C 494 used as water reducing admixture for this study. Designed for the proportion of Mix M75 by weight basis conforming ACI 211.4R-93 ACI Mix Design is implemented. Mix proportioning aspects are formulated with this Table 1: Table 1: Mix proportioning of M75 Grade
Numeric Analysis using ANSYS
ANSYS accords with engineering simulation solution accompanies in engineering simulation which design process necessitates. Specimens examined in a loading frame of 1000 kN capacity with relentless 150 kN load, about 20 % of the axial capacity of the column employed to the column for maintaining the specimens in position. A hydraulic jack capacity 500 kN was applied to operate load at the beam and 50 kN load cell capacity to calculate the operated load accurately with a continuing increase of load employed at the end of the beam. By employing the LVDTs, distortion of the beam is measured. In this study incorporating all these parameters, the HPC with GGBFS blended cement and fibre, when beam-column joint is imperiled to monotonic loading, in which four specimens are casted and tested for Ultimate load vs deflection at ultimate crack criteria tabulated in Table 4. Stiffness rigidity of an object, the amount to which it resist the deformation in the response to a force applied to it. Structural stiffness operates natural period and the seismic forces. It is load necessary to affect unit deflection on beam column joint where S1 increasing from S0 and starts lowered down to S2 as given in fig.6.
Fig 6. Stiffness behavior of controlled specimens
Displacement ductility is a rate of the enacted postelastic deformation on a member is vital that an earthquake resistant structure is effective of twisting in a ductile manner imperiled to adjacent loads in several cycles in the elastic range. In this study ductility factor is well-described as the ratio of maximum deflection to the yield deflection. Similarly beam-column joint where S1 increasing from S0 and starts lowered down to S2 and increased in S3 as given in Fig.7. Fig 7. Displacement ductility of controlled specimens
Discussions
The deflection standards in the testing of controlled specimens in ANSYS were noted at free end of beam. The load Vs deflection behaviour for specimens S0, S1, S2 and S3 shows uniform variation between the ANSYS and experimental values from initial crack load to ultimate load and the variation in percentage fall ranges of 5-10% as shown in Table 5. Assessment between the load vs deflection findings retrieved from ANSYS analysis and from the experimental analysis displays that the ANSYS analysis results are merely some stiff than the experimental results. In ANSYS analysis at 15 kN is 10% decrease to the first crack load of 15 kN found in the experimental analysis, the initial crack load achieved. The ultimate load contracted in ANSYS is 30 kN which is 18.88% lower than the ultimate load achieved in the experimental analysis as shown in Table 5.
Conclusion
In this present study, the concrete mix of M75 has been designed as 1:1.04:1.99:0.27. The concrete with various percentage replacement levels of GGBFS in cement mix quantities have been arrived and the tests have been conducted. In the investigational studies the subsequent conclusions were made: 1. The primary crack load of these specimen S3 is 34% in excess of the control specimen S3. This is due to the presence of GGBFS and Glass fibre and the ductile detailing given in the column as per IS13920.From the experimental outcomes, it can be determined that the improved strength characteristics is showed by 7.5% replacement of GGBFS and 0.3% replacement of glass fibre. 2. The High Performance Concrete joints by means of GGBFS and glass fibre endure huge displacements without evolving broader cracks compared to the HPC joints specifies that expose excessive ductility to the HPC joints having the vital properties on beam-column joints. 3. As per IS 13920:1993, Ultimate load carrying capacity is high for S3 related to control specimen S0 owing to the presence of GGBFS and Glass fibre and the ductile detailing specified in the column. 4. Ultimate load capacity is very high for S3 compared to control specimen whereas ultimate load bearing capacity of the joints also improved in specimen with GGBFS with glass fibre when associated to controlled specimen. 5. Fibres are intercepting the cracks to preclude them from transmitting in identical direction, when the micro-cracks developed in the matrix. Later, the cracks taken a diverged path, which necessitates additional power for more dissemination causing in greater load carrying capacity. 6. Ductile detailing gives better strength and the load vs deflection results achieved for the controlled and GGBFS specimens shows that the yield and ultimate load has substantially improved for the specimen S3. The yield load for the specimen S3 is identified at 35kN signifies a rise up of 18.63% from the yield load value of 30 kN for the control specimen comparing IS 456:2000 and IS 13920:1993 specimen. 7. Evaluation of results between the load vs deflection, acquired from ANSYS and investigational research substantiates the ANSYS results has smaller stiff than the experimental results. 8. After the investigational study and load vs deflection results attained from ANSYS analysis for the control and GGBFS beam specimens displays that the yield and ultimate load has significantly augmented for the IS 13920: 1993 GGBFS specimen, where first crack load, yield load and ultimate load originate in ANSYS analysis are inferior than the values attained.
|
/**
* Copyright (C) 2009 GIP RECIA http://www.recia.fr
* @Author (C) 2009 GIP RECIA <[email protected]>
* @Contributor (C) 2009 SOPRA http://www.sopragroup.com/
* @Contributor (C) 2011 Pierre Legay <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* ESUP-Portail Commons - Copyright (c) 2006-2009 ESUP-Portail consortium.
*/
package org.esupportail.commons.services.ldap;
import java.util.List;
import org.esupportail.commons.exceptions.GroupNotFoundException;
/**
* The interface of LDAP group services.
*/
public interface LdapGroupService extends BasicLdapService {
/**
* Search a group in the LDAP directory from a unique identifier.
* @param id the identifier
* @return the LdapGroup that corresponds to the given id.
* @throws LdapException
* @throws GroupNotFoundException
*/
LdapGroup getLdapGroup(String id) throws LdapException, GroupNotFoundException;
/**
* Tell if a group matches a filter.
* @param id the group's unique identifier in the LDAP directory
* @param filter the filter
* @return true if the group matches the filter.
* @throws LdapException
*/
boolean groupMatchesFilter(String id, String filter) throws LdapException;
/**
* @param token
* @return The list of LdapGroup that corresponds to a token.
* @throws LdapException
*/
List<LdapGroup> getLdapGroupsFromToken(String token) throws LdapException;
/**
* @param filterExpr
* @return The list of LdapGroup that corresponds to a filter.
* @throws LdapException
*/
List<LdapGroup> getLdapGroupsFromFilter(String filterExpr) throws LdapException;
/**
* @return the attributes to display when searching for groups.
*/
List<String> getSearchDisplayedAttributes();
}
|
Client communications in multi-tenant data center networks
ABSTRACT
The present subject matter relates to client communication in multi-tenant data center networks. In an example implementation, a unicast packet comprises a status indicative of one of a forwarding table-match and a forwarding table-miss at a source tunnel end-point (TEP) in a multi-tenant data center network.
BACKGROUND
A multi-tenant data center network generally involves a virtual internet protocol (IP) sub-network overlaid on an L3 fabric network. The L3 fabric network has multiple tunnel end-points (TEPs) connected in a meshed topology. The virtual IP sub-network has virtual network devices (VNETs) connected to the TEPs of the L3 fabric network. Clients, such as, personal computers, smartphones, tablets, servers, and the like, can connect to the multi-tenant data center network through any of the VNETs, and can communicate with each other through the respective VNETs and TEPs.
BRIEF DESCRIPTION OF DRAWINGS
The following detailed description references the drawings, wherein:
FIG. 1 illustrates a data center network environment, according to an example implementation of the present subject matter;
FIG. 2 illustrates a multi-tenant data center network environment for communication between a source client and a target client, according to an example implementation of the present subject matter;
FIG. 3 illustrates the multi-tenant data center network environment for communication between a source client and a target client, according to an example implementation of the present subject matter;
FIG. 4 illustrates the multi-tenant data center network environment for communication between a source client and a target client, according to an example implementation of the present subject matter;
FIG. 5 illustrates a method of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter;
FIG. 6 illustrates a method of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter;
FIG. 7 illustrates a method of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter;
FIG. 8 illustrates a method of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter; and
FIG. 9 illustrates a system environment, according to an example implementation of the present subject matter.
DETAILED DESCRIPTION
The present subject matter describes communication between clients in a multi-tenant data center network having a virtual IP sub-network overlaid on an L3 fabric network. Clients may be understood as devices, such as personal computers, smartphones, tablets, servers, and the like, that function as tenants to the multi-tenant data center network. Clients can connect to the multi-tenant data center network to communicate with each other.
A multi-tenant data center network, hereinafter referred to as the data center network, may deploy a tunneling scheme for communication across the L3 fabric network. The tunneling scheme may, for example, be based on a virtual extensible local area network (Vx LAN) protocol, a network virtualization using generic routing encapsulation (NVGRE) protocol, a generic network virtualization encapsulation (GENEVE) protocol, or a stateless transport tunneling (STT) protocol. The tunneling scheme allows for the clients to roam across the data center network while retaining their IP address irrespective of the point of connection on the data center network.
The L3 fabric network and the virtual IP sub-network of the data center network are referred to as an underlay network and an overlay network, respectively. The underlay L3 fabric network includes tunnel end-points (TEPs) connected in a meshed topology, such that each TEP is one hop away from any other TEP in the mesh. The overlay virtual IP sub-network includes virtual network devices (VNETs), with one VNET generally connected to one TEP of the underlay network. Clients roaming in the data center network can connect to any of the VNETs, and can communicate with each other through the connected VNETs and TEPs.
The connection between any two TEPs in the mesh is referred to as a tunneling path. According to the tunneling scheme, each TEP in the underlay L3 fabric network maintains a forwarding table with forwarding table (FT) entries for the clients. An FT entry for a client is indicative of a tunneling path applicable for the client, depending on the TEP associated with the client. A TEP is said to be associated with a client, when the client is locally connected to the TEP. A TEP learns about the location of a client and adds an FT entry for the client after receiving data from the client.
When data is to be sent from a source client to a target client, the TEP associated with the source client looks up its forwarding table for the FT entry for the target client. The TEP associated with the source client may hereinafter be referred to as a source TEP. If the FT entry for the target client is present, then the source TEP transmits the data to the TEP associated with the target client based on the tunneling path indicated by the FT entry. The TEP associated with the target client may hereinafter be referred to as a destination TEP. The destination TEP then forwards the data to the target client. If the FT entry for the target client is not present, then the source TEP transmits the data to all other TEPs in the mesh. The transmission of data to all the other TEPs is referred to as “data flooding”. The TEPs that receive the flooded data look up their respective forwarding tables. The TEP having the target client, as a local client, forwards the data to the target client. For the purpose of description herein, the presence of an FT entry for a client in a forwarding table during a forwarding table lookup is referred to as a “forwarding table-match”, and the absence of an FT entry for a client in a forwarding table during a forwarding table lookup is referred to as a “forwarding table-miss”.
The data, whether sent to a destination TEP due to a forwarding table-match or transmitted to all the TEPs due to a forwarding table-miss, can be transmitted by the source TEP in a unicast packet. The unicast packet herein may be understood as a packet configured for sending to a specific receiving TEP. The unicast packet includes the data of the source client for the target client. A TEP receiving the unicast packet cannot distinguish whether the packet is sent due to a forwarding table-match at the source TEP or is flooded due to a forwarding table-miss at the source TEP. In other words, with the unicast packet, the receiving TEP cannot determine whether the source TEP has an FT entry for the target client or not.
Further, when target client is roaming and has moved to a new TEP, the source TEP may not learn about the new TEP, i.e., the TEP currently associated with the target client, until the source TEP receives some data from the target client via the currently associated TEP. Till the source TEP learns about the currently associated TEP, the source TEP may either have a stale FT entry for the target client or have no FT entry at all for the target client. Further, the TEP previously associated with the target client may either have deleted the FT entry indicating the target client as a local client or have an updated FT entry indicating the tunneling path to the TEP currently associated with the target client. In a scenario where the source TEP has a stale FT entry and the previously associated TEP has deleted the FT entry for the target client, the source TEP sends a unicast packet to the previously associated TEP, but the data does not reach the target client. In a scenario where the source TEP has a stale FT entry and the previously associated TEP has the new FT entry for the target client, the source TEP sends a unicast packet to the previously associated TEP, but the previously associated TEP cannot forward the unicast packet to the currently associated TEP for further sending the data to the target client. In both the above scenarios, the data cannot reach the target client, which results in a “traffic black hole” from the source client to the target client. This traffic black hole continues till the source TEP learns about the TEP currently associated with the target client based on data sent by the target client via the currently associated TEP.
Further, in a scenario where the source TEP does not have the FT entry for the target client, the source TEP generates replicated unicast packets and floods the replicated unicast packets to all the other TEPs in the mesh. Each of the TEPs, on receiving a replicated unicast packet, looks up its forwarding table for the FT entry for target client as a local client. The TEP currently associated with the target client then forwards the data to the target client. In this scenario, the source TEP continues to flood replicated unicast packets to all the other TEPs, till the source TEP learns about the TEP currently associated with the target client based on data sent by the target client via the currently associated TEP. This results in increase in network utilization in the underlay network.
The present subject matter describes methods and systems for communication between clients in a multi-tenant data center network. The methods and the systems of the present subject matter facilitate in reducing the time duration of traffic black hole from a source client to a target client and in reducing the network utilization in the un delay network when the target client is roaming and has moved to a new TEP.
In accordance with an example implementation of the present subject matter, a source TEP transmits unicast packets with a status indicative of a forwarding table-match or a forwarding table-miss at the source TEP. With this status in the unicast packet, any receiving TEP can determine whether the source TEP has an FT entry for the target client in its forwarding table or not. With this determination, the receiving TEP can initiate actions, in accordance with the present subject matter, for reducing the time duration of traffic black hole and for reducing the network utilization of the underlay network when the target client has moved to a new TEP.
In a scenario where the source TEP has a stale FT entry and the previously associated TEP has deleted the FT entry for the target client, the source TEP transmits a unicast packet to the previously associated TEP with a status indicative of the forwarding table-match. Since the previously associated TEP does not have the FT entry for the target client, the previously associated TEP determines that the source TEP has a stale FT entry for the target client. At this, the previously associated TEP may send a response message to the source TEP with an indication to delete the stale FT entry for the target client. On receiving the response message, the source TEP may delete the stale FT entry and transmit subsequent unicast packets for the target client to all other TEPs with a status indicative of the forwarding table-miss. With this, the TEP currently associated with the target client, on receiving a unicast packet, sends the data to the target client. The actions initiated by the previously associated TEP and the actions performed by the source TEP, as described above, enable a reduction in the time duration of traffic black hole from the source client to the target client.
In a scenario where the source TEP has a stale FT entry and the previously associated TEP has the new FT entry for the target client indicating the currently associated TEP, the source TEP transmits a unicast packet to the previously associated TEP with a status indicative of the forwarding table-match. The previously associated TEP may forward the unicast packet to the currently associated TEP with a status indicating relaying of the packet by the previously associated TEP. The relaying of the unicast packet to the currently associated TEP helps in substantially eliminating the traffic black hole for the target client. Also, with the status indicating the relaying of the packet, the current associated TEP determines that the previous associated TEP is not the original source of the unicast packet, but a packet forwarder for data from the source client.
Further, since the previously associated TEP has the new FT entry indicating the currently associated TEP, the previously associated TEP determines that the source TEP has a stale FT entry for the target client. At this, the previously associated TEP may send a response message to the source TEP with an indication to update the stale FT entry for the target client. On receiving the response message, the source TEP may update the stale FT entry and transmit subsequent unicast packets for the target client to the currently associated TEP with a status indicative of the forwarding table-match. The actions initiated by the previously associated TEP and the actions performed by the source TEP, as described above, enable a reduction in the time duration of transmission of packets from the source client to the target client.
Further, in a scenario where the source TEP does not have the FT entry for the target client, the source TEP transmits replicated unicast packets to all the other TEPs with a status indicative of the forwarding table-miss. The TEP currently associated with the target client accordingly then sends the data to the target client. Since the unicast packet has the status of the forwarding table-miss, the currently associated TEP determines that the source TEP does not have an FT entry for the target client. At this, the currently associated TEP may send a response message to the source TEP with an indication to add the FT entry for the target client. On receiving the response message, the source TEP may add the FT entry and transmit subsequent unicast packets for the target client to the currently associated TEP with a status indicative of the forwarding table-match. The actions initiated by the currently associated TEP and the actions performed by the source TEP, as described above, enable a reduction in the network utilization of the underlay network.
The following detailed description refers to the accompanying drawings. Wherever possible, the same reference numbers are used in the drawings and the following description to refer to the same or similar parts. While several examples are described in the description, modifications, adaptations, and other implementations are possible. Accordingly, the following detailed description does not limit the disclosed examples. Instead, the proper scope of the disclosed examples may be defined by the appended claims.
FIG. 1 illustrates a data center network environment 100, according to an example implementation of the present subject matter. The data center network environment 100 may be a multi-tenant network environment having a virtual IP sub-network overlaid on an L3 fabric network for client communication. A tunneling scheme based on, for example, a Vx LAN protocol, an NVGRE protocol, a GENEVE protocol, or an STT protocol, may be deployed for communication across the L3 fabric network. Clients, such as personal computers, smartphones, tablets, servers, and the like, can roam across the data center network while retaining their IP address irrespective of the point of connection on the data center network.
The L3 fabric network of the data center network environment 100 includes a source TEP 102 and a receiving TEP 104, as shown. The source TEP 102 is a TEP to which a source client 106 is connected locally. The receiving TEP 104 can be any TEP of the underlay network, connected to the source TEP 102 for receiving packets from and transmitting packets to the source TEP 102. Each of the source TEP 102 and the receiving TEP 104 maintains a forwarding table with FT entries for the clients depending on the TEP associated with the respective client. It may be noted that, for the sake of simplicity, only two TEPs, the source TEP 102 and the receiving TEP 104 are shown in the data center network environment 100; however, the data center network environment 100 may include more than two TEPs.
When the source client 106 wishes to send data to a target client, the source client 106 sends data in the form of a data packet 108 to the source TEP 102. The source TEP 102 on receiving the data packet 108 looks up its forwarding table and determines whether the forwarding table has the FT entry for the target client. The source TEP 102 generates and transmits a unicast packet 110 with the data and a status of a forwarding table-match when the FT entry for the target client is present in the forwarding table. The source TEP 102 generates and transmits the unicast packet 110 with the data and a status of a forwarding table-miss when the FT entry for the target client is not present in the forwarding table.
In an example implementation, the source TEP 102 may set a reserved bit in a packet header of the unicast packet 110 to ‘0’ to indicate the forwarding table-match, and set the reserved bit in the packet header to ‘1’ to indicate the forwarding table-miss. The receiving TEP 104, on receiving the unicast packet 110, decodes the packet header to determine the status of the reserved bit, and accordingly determine whether the source TEP 102 has the FT entry for the target client or not. The determination of whether the source TEP has the FT entry for the target client or not, based on the status in the unicast packet, enables initiation of actions to reduce the time duration of traffic black hole and to reduce utilization of the underlay network. The actions initiated based on the determination are described in detail through the description with reference to FIG. 2, FIG. 3, and FIG. 4.
In an example implementation, a TEP of the underlay network may include one or more processors. The processor(s) may be implemented as microprocessors, microcomputers, microcontrollers, digital signal processors, central processing units, state machines, logic circuit ries, and/or any devices that manipulate signals based on operational instructions. Among other capabilities, the processor(s) may fetch and execute computer-readable instructions stored in a memory coupled to the processor(s). The memory can be internal or external to the TEP. The memory may include any non-transitory computer-readable storage medium including, for example, volatile memory (e.g., RAM), and/or non-volatile memory (e.g., EPROM, flash memory, NVRAM, memristor, etc.). The functions of the various elements shown in FIG. 1, including any functional blocks referring to a TEP may be provided through the use of dedicated hardware as well as hardware capable of executing computer-readable instructions.
FIG. 2, FIG. 3, and FIG. 4 illustrate a multi-tenant data center network environment 200 for communication between a source client and a target client, according to an example implementation of the present subject matter. As shown, the multi-tenant data center environment 200 includes an underlay network 202 having four TEPs, namely TEP 1 204-1, TEP 2 204-2, TEP 3 204-3, and TEP 4 204-4. The TEPs as shown are connected in a meshed topology, such that each TEP is just one hop away from the other TEP in the mesh. The multi-tenant data center network environment 200 also includes an overlay network 206 having VNETs 206-1, 206-2, 206-3, 206-4, with one VNET connected to one TEP. It may be noted that the VNETs 206-1, . . . , 206-4 may share a same sub-net. Further, as shown, four clients, namely client 1 210-1, client 2 210-2, client 3 210-3, and client 4 210-4, are connected to the VNETs of the overlay network 206. Client 2 is roaming and has moved from TEP 2 to TEP 3.
Each of the TEPs 204-1, . . . , 204-4 maintains a forwarding table with FT entries indicative of tunneling paths for the clients 210-1, . . . , 210-4. Table 1 shows a forwarding table that may be maintained by TEP 1 before client 2 has moved from TEP 2 to TEP 3. Since client 1 is local to TEP 1, the FT entry for client 1 in the forwarding table of TEP 1 does not indicate a tunneling path. Since client 2 is connected to TEP 2, the FT entry for client 2 indicates a tunneling path TEP 1 to TEP 2. Since client 3 is connected to TEP 3, the FT entry for client 3 indicates a tunneling path TEP 1 to TEP 3. Similarly, since client 4 is connected to TEP 4, the FT entry for client 4 indicates a tunneling path TEP 1 to TEP 4.
TABLE 1 Forwarding Table at TEP 1 Client 1 Local Client 2 Tunneling path TEP 1 to TEP 2 Client 3 Tunneling path TEP 1 to TEP 3 Client 4 Tunneling path TEP 1 to TEP 4
For the purpose of description herein, consider a case where client 1 wishes to send data to client 2, where client 2 is roaming and has moved from TEP 2 to TEP 3 as shown in FIG. 2 to FIG. 4. Client 1 may be referred to as a source client, and client 2 may be referred to as a target client. Further, TEP 1 may be referred to as a source TEP, and TEP 3 may be referred to as a currently associated TEP of client 2.
FIG. 2 illustrates a scenario where client 2 has moved from TEP 2 to TEP 3, TEP 1 has a stale FT entry for client 2, and TEP 2 has deleted the FT entry for client 2. As shown, the forwarding table 212 of TEP 1 has the FT entry for client 2 indicating a tunneling path TEP 1 to TEP 2, and the forwarding table 214 of TEP 2 has no FT entry for client 2. On receiving the data in the form of a data packet 216 from client 1, TEP 1 looks up its forwarding table 212 and determines whether the forwarding table 212 has the FT entry for client 2. Since TEP 1 has the stale FT entry for client 2, TEP 1 transmits a unicast packet 218 to TEP 2 with the data and a status indicative of a forwarding table-match. In an example, a reserved bit in a packet header of the unicast packet 218 may be set as ‘0’ to indicate the forwarding table-match. TEP 2 on receiving the unicast packet 218 decodes the packet header and determines based on the status that TEP 1 has the FT entry for client 2. Since TEP 2 does not have the FT entry for client 2, TEP 2 determines that TEP 1 has a stale FT entry for client 2. After determining this, TEP 2 sends a response message 220 to TEP 1 with an indication that the forwarding table 214 of TEP 2 does not include the FT entry for client 2 and TEP 1 can delete the stale FT entry for client 2. In an example implementation, the response message 220 may include the following information: client #=2 and TEP ID=Null. On receiving the response message 220, TEP 1 deletes the stale FT entry from its forwarding table 212. After deleting the stale FT entry for client 2, TEP 1 transmits subsequent unicast packets for client 2 to all the TEPs with a status indicative of a forwarding table-miss. In an example implementation, the reserved bit in the packet header of the unicast packet may be set to ‘1’ to indicate the forwarding table-miss. TEP 3 on receiving the unicast packet from TEP 1 sends the data to client 2.
FIG. 3 illustrates a scenario where client 2 has moved from TEP 2 to TEP 3, TEP 1 does not have the FT entry for client 2. As shown, the forwarding table 302 of TEP 1 has no FT entry for client 2. Since client 2 is moved to TEP 3, the forwarding table 304 of TEP 3 has an FT entry for client 2 indicating that client 2 is local to TEP 3. On receiving the data in the form of a data packet 306 from client 1, TEP 1 looks up its forwarding table 302 and determines whether the forwarding table 302 has the FT entry for client 2. Since TEP 1 does not have the FT entry for client 2, TEP 1 transmits a replicated unicast packet 308-1, 308-2, 308-3 to each of TEP 2, TEP 3, and TEP 4 with the data and a status indicative of a forwarding table-miss. In an example, a reserved bit in a packet header of the unicast packet 218 may be set as ‘1’ to indicate the forwarding table-miss. Each of TEP 1, TEP 2, and TEP 3 on receiving the unicast packet from TEP 1 looks up its forwarding table and determines whether the respective forwarding table has the FT entry for client 2. Since TEP 3 determines from its forwarding table 304 that client 2 is local to it, TEP 3 sends the data to client 2.
Further, since the unicast packet 308-2 has the status of forwarding table-miss, TEP 3 determines based on the status that TEP 1 does not have the FT entry for client 2. After determining this, TEP 3 sends a response message 310 to TEP 1 with an indication that TEP 1 can add an FT entry for client 2. On receiving the response message 310, TEP 1 adds an FT entry for client 2 in its forwarding table 302. After adding the FT entry for client 2, TEP 1 transmits subsequent unicast packets for client 2 to TEP 3 with a status indicative of a forwarding table-match.
In an example implementation, the response message 310 may include the following information: client #=2 and TEP ID=TEP 3. In an example implementation, the response message 310 may also include a timestamp information indicative of a time at which TEP 3 learnt about client 2. The timestamp information allows TEP 1 to add the FT entry for client 2 based on the latest location of client 2.
FIG. 4 illustrates a scenario where client 2 has moved from TEP 2 to TEP 3, TEP 1 has a stale FT entry for client 2, and TEP 2 has an FT entry for client 2 that indicates a tunneling path TEP 2 to TEP 3. As shown, the forwarding table 402 of TEP 1 has the FT entry for client 2 indicating a tunneling path TEP 1 to TEP 2, and the forwarding table 404 of TEP 2 has the FT entry for client 2 indicating a tunneling path TEP 2 to TEP 3. On receiving the data in the form of a data packet 406 from client 1, TEP 1 looks up its forwarding table 402 and determines whether the forwarding table 402 has the FT entry for client 2. Since TEP 1 has the stale FT entry for client 2, TEP 1 transmits a unicast packet 408 to TEP 2 with the data and a status indicative of a forwarding table-match. In an example, a reserved bit in a packet header of the unicast packet 408 may be set as ‘0’ to indicate the forwarding table-match. Since TEP 2 has the FT entry for client 2, TEP 2 forwards a unicast packet 410 to TEP 3 with a status indicating relaying of the unicast packet 410 by TEP 2. In an example implementation, a reserved bit in the packet header of the unicast packet may be set as ‘1’ to indicate that the unicast packet is relayed by TEP 2, and may be set as ‘0’ to indicate that the unicast packet is originally transmitted by the TEP 2. It may be noted that the reserved bit used for indicating the relaying of packets is different from the reserved bit used for indicating the forwarding table-match and the forwarding table-miss. TEP 3 on receiving the unicast packet 410 from TEP 2 sends the data to client 2.
Further, TEP 2 on receiving the unicast packet 408 decodes the packet header and determines based on the status that TEP 1 has the FT entry for client 2. Since TEP 2 has the FT entry for client 2 that indicates the client 2 is local to TEP 3, TEP 2 determines that TEP 1 has a stale FT entry for client 2. After determining this, TEP 2 sends a response message 412 to TEP 1 with an indication that TEP 1 can update the stale FT entry for client 2. On receiving the response message 412, TEP 1 updates the stale FT entry in its forwarding table 402. After updating the stale FT entry for client 2, TEP 1 transmits subsequent unicast packets for client 2 to TEP 3 with a status indicative of a forwarding table-match.
In an example implementation, the response message 412 may include the following information: client #=2 and TEP ID=TEP 3. In an example implementation, the response message 412 may also include a timestamp information indicative of a time at which TEP 2 learnt about TEP 3 having client 2 as a local client. The timestamp information allows TEP 1 to update the FT entry for client 2 based on the latest location of client 2.
FIG. 5 illustrates a method 500 of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter. The order in which the method 500 is described is not intended to be construed as a limitation, and any number of the described method steps can be combined in any order to implement the method 500. Furthermore, the method 500 can be implemented by processor(s) or computing device(s) through any suitable hardware, a non-transitory machine readable medium, or combination thereof. Further, although the method 500 is described in context of a source TEP in a multi-tenant data center network, other suitable computing devices or systems may be used for execution of at least one step of method 500. It may be understood that steps of method 500 can be executed based on instructions stored in a non-transitory computer readable medium, as will be readily understood. The non-transitory computer readable medium may include, for example, digital memories, magnetic storage media, such as a magnetic disks and magnetic tapes, hard drives, or optically readable digital data storage media.
Referring to FIG. 5, at block 502, a data packet of a source client for a target client is received at a source TEP. At block 504, it is determined whether a forwarding table of the source TEP includes an FT entry for the target client. Based on the determination, if the FT entry for the target client is present in the forwarding table of the source TEP, then a unicast packet is transmitted to a destination TEP with a status indicative of a forwarding table-match, at block 506. Based on the determination, if the FT entry for the target client is not present in the forwarding table of the source TEP, a unicast packet is transmitted to all other TEPs with a status indicative of a forwarding table-miss, at block 508.
In an example implementation, a reserved bit in a packet header of the unicast packet may be set to a first status, for example ‘0’, to indicate the forwarding table-match, and may be set to a second status, for example ‘1’, to indicate the forwarding table-miss.
FIG. 6 illustrates a method 600 of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter. The order in which the method 600 is described is not intended to be construed as a limitation, and any number of the described method steps can be combined in any order to implement the method 600. Furthermore, the method 600 can be implemented by processor(s) or computing device(s) through any suitable hardware, a non-transitory machine readable medium, or combination thereof. Further, although the method 600 is described in context of a source TEP in a multi-tenant data center network, other suitable computing devices or systems may be used for execution of at least one step of method 600. It may be understood that steps of method 600 can be executed based on instructions stored in a non-transitory computer readable medium, as will be readily understood. The non-transitory computer readable medium may include, for example, digital memories, magnetic storage media, such as magnetic disks and magnetic tapes, hard drives, or optically readable digital data storage media.
Referring to FIG. 6, at block 602, a unicast packet is transmitted to a destination TEP with a status indicative of a forwarding table-match when an FT entry for a target client is present in a forwarding table of a source TEP. At block 604, a response message is received from the destination TEP indicating that a forwarding table of the destination TEP does not include an FT entry for the target client. Based on the response message, the FT entry for the target client is deleted from the forwarding table of the source TEP, at block 606. In an example implementation, the response message may include the information of the target client and a TEP ID as “Null”.
FIG. 7 illustrates a method 700 of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter. The order in which the method 700 is described is not intended to be construed as a limitation, and any number of the described method steps can be combined in any order to implement the method 700. Furthermore, the method 700 can be implemented by processor(s) or computing device(s) through any suitable hardware, a non-transitory machine readable medium, or combination thereof. Further, although the method 700 is described in context of a source TEP in a multi-tenant data center network, other suitable computing devices or systems may be used for execution of at least one step of method 700. It may be understood that steps of method 700 can be executed based on instructions stored in a non-transitory computer readable medium, as will be readily understood. The non-transitory computer readable medium may include, for example, digital memories, magnetic storage media, such as a magnetic disks and magnetic tapes, hard drives, or optically readable digital data storage media.
Referring to FIG. 7, at block 702, a unicast packet is transmitted to all other TEPs with a status indicative of a forwarding table-miss when an FT entry for a target client is not present in a forwarding table of a source TEP. At block 704, a response message is received from a TEP currently associated with the target client indicating that a forwarding table of the currently associated TEP includes an FT entry for the target client. The currently associated TEP is from the other TEPs that received the unicast packet with the status indicative of the forwarding table-miss. Based on the response message, the FT entry for the target client is added in the forwarding table of the source TEP, at block 706. In an example implementation, the response message may include the information of the target client and a TEP ID as TEP 3. In an example implementation, the response message may also include a timestamp information indicative of a time at which the currently associated TEP learnt about the target client. The timestamp information allows the source TEP to add the FT entry for the target client based on the latest location of the target client.
FIG. 8 illustrates a method 800 of client communication in a multi-tenant data center network, according to an example implementation of the present subject matter. The order in which the method 800 is described is not intended to be construed as a limitation, and any number of the described method steps can be combined in any order to implement the method 800. Furthermore, the method 800 can be implemented by processor(s) or computing device(s) through any suitable hardware, a non-transitory machine readable medium, or combination thereof. Further, although the method 800 is described in context of a source TEP in a multi-tenant data center network, other suitable computing devices or systems may be used for execution of at least one step of method 800. It may be understood that steps of method 800 can be executed based on instructions stored in a non-transitory computer readable medium, as will be readily understood. The non-transitory computer readable medium may include, for example, digital memories, magnetic storage media, such as a magnetic disks and magnetic tapes, hard drives, or optically readable digital data storage media.
Referring to FIG. 8, at block 802, a unicast packet is transmitted to a destination TEP with a status indicative of a forwarding table-match when an FT entry for a target client is present in a forwarding table of a source TEP. At block 804, a response message is received from the destination TEP comprising information of a TEP currently associated with the target client. Based on the information of the currently associated TEP in the response message, the FT entry for the target client is updated in the forwarding table of the source TEP, at block 806. In an example implementation, the response message may include the following information of the target client and a TEP ID as TEP 3. In an example implementation, the response message may also include a timestamp information indicative of a time at which the destination TEP learnt about the currently associated TEP having the target client as a local client. The timestamp information allows the source client to update the FT entry for the target client based on the latest location of the target client.
Further, in an example implementation, the destination TEP may forward the unicast packet to the currently associated TEP with a status indicative of relaying of the unicast packet by the destination TEP to the currently associated TEP. In an example implementation, a reserved bit in a packet header of the unicast packet may be set to a first status, for example ‘1’, to indicate the relaying of the unicast packet, and may be set to a second status, for example ‘0’, to indicate the original transmission of the unicast packet.
FIG. 9 illustrates a system environment 900, according to an example implementation of the present subject matter. In an example implementation, the system environment 900 includes a processor 902 communicatively coupled to a non-transitory computer readable medium 904 through a communication link 906. The processor 902 functions to fetch and execute computer-readable instructions from the non-transitory computer readable medium 904. The processor 902 may be a processing resource of a TEP in an underlay network of a multi-tenant data center network, in accordance with the present subject matter. The processor 902 and the non-transitory computer readable medium 904 are also communicatively coupled to other TEP(s) 908.
The non-transitory computer readable medium 904 can be, for example, an internal memory device or an external memory device. In an example implementation, the communication link 906 may be a direct communication link, such as any memory read/write interface. In another example implementation, the communication link 906 may be an indirect communication link, such as a network interface. In such a case, the processor 902 can access the non-transitory computer readable medium 904 through a network (not shown). The network may be a single network or a combination of multiple networks and may use a variety of different communication protocols.
In an example implementation, the non-transitory computer readable medium 904 includes a set of computer readable instructions that facilitate in reducing the time duration of traffic black hole and utilization of underlay network when a target client is roaming and has moved from one TEP to another TEP. The set of computer readable instructions can be accessed by the processor 902 through the communication link 906 and subsequently executed to perform acts for communication between clients in a multi-tenant data center network.
Referring to FIG. 9, in an example, the non-transitory computer readable medium 904 includes instructions 910 that cause the processor 902 to receive at a TEP a data packet of a source client for a target client. The non-transitory computer readable medium 904 includes instructions 912 that cause the processor 902 to transmit a unicast packet to a destination TEP with a status indicative of a forwarding table-match when an FT entry for the target client is present in a forwarding table of the TEP. The non-transitory computer readable medium 904 includes instructions 914 that cause the processor 902 to transmit a unicast packet to all other TEPs with a status indicative of a forwarding table-miss when the FT entry for the target client is not present in the forwarding table of the TEP.
In an example implementation, the non-transitory computer readable medium 904 may further include instructions that cause the processor 902 to: receive from the destination TEP a response message indicating to delete the FT entry for the target client from the forwarding table of the TEP; and delete the FT entry of the target client from the forwarding table of the TEP.
In an example implementation, the non-transitory computer readable medium 904 may further include instructions that cause the processor 902 to: receive from a TEP currently associated with the target client, a response message indicating that a forwarding table of the currently associated TEP includes an FT entry for the target client, wherein the currently associated TEP is from the other TEPs that received the unicast packet with the status indicative of the forwarding table-miss; and add the FT entry for the target client in the forwarding table of the TEP.
In an example implementation, the non-transitory computer readable medium 904 may further include instructions that cause the processor 902 to: receive from the destination TEP a response message comprising information of a TEP currently associated with the target client; and update the FT entry for the target client in the forwarding table of the TEP based on the currently associated TEP.
Although implementations for client communication in multi-tenant data center networks have been described in language specific to structural features and/or methods, it is to be understood that the present subject matter is not limited to the specific features or methods described. Rather, the specific features and methods are disclosed and explained as example implementations for client communication in multi-tenant data center networks.
We claim:
1. A method of communication between clients in a multi-tenant data center network, the method comprising: receiving at a source tunnel end-point (TEP) a data packet of a source client for a target client; determining whether a forwarding table of the source TEP includes a forwarding table (FT) entry for the target client; transmitting a unicast packet with a status indicative of a forwarding table-match to a destination TEP when the FT entry for the target client is present in the forwarding table of the source TEP; and transmitting a unicast packet with a status indicative of a forwarding table-miss to all other TEPs when the FT entry for the target client is not present in the forwarding table of the source TEP.
2. The method as claimed in claim 1, comprising: receiving from the destination TEP a response message indicating that a forwarding table of the destination TEP does not include an FT entry for the target client; and deleting the FT entry for the target client from the forwarding table of the source TEP.
3. The method as claimed in claim 1, comprising: receiving from a TEP currently associated with the target client, a response message indicating that a forwarding table of the currently associated TEP includes an FT entry for the target client, wherein the currently associated TEP is from the other TEPs that received the unicast packet with the status indicative of the forwarding table-miss; and adding the FT entry for the target client in the forwarding table of the source TEP.
4. The method as claimed in claim 1, comprising: receiving from the destination TEP a response message comprising information of a TEP currently associated with the target client; and updating the FT entry for the target client in the forwarding table of the source TEP based on the currently associated TEP.
5. The method as claimed in claim 1, comprising: setting a reserved bit in a packet header of the unicast packet to a first status to indicate the forwarding table-match; and setting the reserved bit in the packet header of the unicast packet to a second status to indicate the forwarding table-miss.
6. The method as claimed in claim 1, comprising: forwarding the unicast packet by the destination TEP to a TEP currently associated with the target client, wherein the unicast packet comprises a status indicative of relaying of the unicast packet by the destination TEP to the currently associated TEP.
7. A system for communication between clients in a multi-tenant data center network, the system comprising a tunnel end-point (TEP) to: receive, from a source TEP, a unicast packet with data of a source client for a target client, wherein the unicast packet comprises a status indicative of one of a forwarding table-match and a forwarding table-miss at the source TEP; and determine, based on the status, whether the unicast packet is for the forwarding table-match or the forwarding table-miss.
8. The system as claimed in claim 7, wherein, when the unicast packet is determined to be for the forwarding table-match, the TEP is to: determine whether a forwarding table of the TEP includes a forwarding table (FT) entry for the target client; and when the FT entry for the target client is not present in the forwarding table of the TEP, send a response message to the source TEP indicating to delete an FT entry for the target client from a forwarding table of the source TEP.
9. The system as claimed in claim 7, wherein, when the unicast packet is determined to be for the forwarding table-match, the TEP is to: determine whether a forwarding table of the TEP includes a forwarding table (FT) entry indicating a TEP currently associated with the target client; forward the unicast packet to the currently associated TEP with a status indicative of relaying of the unicast packet to the currently associated TEP; and send a response message to the source TEP indicating to update an FT entry for the target client in a forwarding table of the source TEP based on the currently associated TEP.
10. The system as claimed in claim 9, wherein the TEP is to set a reserved bit in a packet header of the unicast packet to a first status to indicate the relaying of the unicast packet.
11. The system as claimed in claim 9, wherein the TEP is to add in the response message a timestamp information indicative of a time at which the TEP learnt about the currently associated TEP.
12. The system as claimed in claim 7, wherein, when the unicast packet is determined to be for the forwarding table-miss, the TEP is to: determine whether a forwarding table of the TEP includes a forwarding table (FT) entry for the target client; and when the FT entry for the target client is present in the forwarding table of the TEP, send a response message to the source TEP indicating to add an FT entry for the target client in a forwarding table of the source TEP.
13. The system as claimed in claim 12, wherein the TEP is to add in the response message a timestamp information indicative of a time at which the TEP learnt about the target client.
14. A non-transitory computer-readable medium comprising computer-readable instructions for communication between clients in a multi-tenant data center network, the computer-readable instructions are executable by a processor: receive at a tunnel end-point (TEP) a data packet of a source client for a target client; transmit a unicast packet to a destination TEP with a status indicative of a forwarding table-match when a forwarding table (FT) entry for the target client is present in a forwarding table of the TEP; and transmit a unicast packet to all other TEPs with a status indicative of a forwarding table-miss when the FT entry for the target client is not present in the forwarding table of the TEP.
15. The non-transitory computer-readable medium as claimed in claim 14, comprising computer-readable instructions executable by the processor to: receive from the destination TEP a response message indicating to delete the FT entry for the target client from the forwarding table of the TEP; and delete the FT entry of the target client from the forwarding table of the TEP.
|
package com.podcopic.animationlib.library.quint;
import com.podcopic.animationlib.library.BaseEasingMethod;
public class QuintEaseInOut extends BaseEasingMethod {
public QuintEaseInOut(float duration) {
super(duration);
}
@Override
public Float calculate(float t, float b, float c, float d) {
if ((t/=d/2) < 1) return c/2*t*t*t*t*t + b;
return c/2*((t-=2)*t*t*t*t + 2) + b;
}
}
|
RuntimeError: cannot schedule new futures after shutdown
Got an error while using your pySmartDL:
2022-02-09T15:19:28.538821+00:00 app[web.1]: Exception in thread Thread-5:
2022-02-09T15:19:28.538840+00:00 app[web.1]: Traceback (most recent call last):
2022-02-09T15:19:28.538858+00:00 app[web.1]: File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
2022-02-09T15:19:28.539230+00:00 app[web.1]: self.run()
2022-02-09T15:19:28.539247+00:00 app[web.1]: File "/usr/lib/python3.8/threading.py", line 870, in run
2022-02-09T15:19:28.539522+00:00 app[web.1]: self._target(*self._args, **self._kwargs)
2022-02-09T15:19:28.539540+00:00 app[web.1]: File "/usr/local/lib/python3.8/dist-packages/pySmartDL/pySmartDL.py", line 646, in post_threadpool_actions
2022-02-09T15:19:28.539776+00:00 app[web.1]: SmartDLObj.retry(str(pool.get_exception()))
2022-02-09T15:19:28.539793+00:00 app[web.1]: File "/usr/local/lib/python3.8/dist-packages/pySmartDL/pySmartDL.py", line 341, in retry
2022-02-09T15:19:28.539941+00:00 app[web.1]: self.start()
2022-02-09T15:19:28.539957+00:00 app[web.1]: File "/usr/local/lib/python3.8/dist-packages/pySmartDL/pySmartDL.py", line 300, in start
2022-02-09T15:19:28.540098+00:00 app[web.1]: req = self.pool.submit(
2022-02-09T15:19:28.540114+00:00 app[web.1]: File "/usr/local/lib/python3.8/dist-packages/pySmartDL/utils.py", line 343, in submit
2022-02-09T15:19:28.540293+00:00 app[web.1]: future = super().submit(fn, *args, **kwargs)
2022-02-09T15:19:28.540308+00:00 app[web.1]: File "/usr/lib/python3.8/concurrent/futures/thread.py", line 179, in submit
2022-02-09T15:19:28.540443+00:00 app[web.1]: raise RuntimeError('cannot schedule new futures after shutdown')
2022-02-09T15:19:28.540498+00:00 app[web.1]: RuntimeError: cannot schedule new futures after shutdown
@AbirHasan2005 were you able to find any solution?
|
Comment cannot be submitted on mobile
Comment submit button is fully obscured by edit toolbar (found using Samsung Internet on Samsung Galaxy S8)
Posts otoh can be submitted, perhaps because their submit button is higher on the page.
Workaround: when you're ready to submit, switch the phone to landscape mode. The editor is even less usable in landscape (the text area can be fully obscured), but the Submit button is reachable.
User AndHisHorse reports a possibly more convenient workaround of tapping above the editor to dismiss it, granting access to the submit button.
Should be a bit better now, but should in the long-term be covered by #226
|
---
name: Typography
route: /typography
---
import {Playground} from 'docz';
import CodeBlock from 'src/components/CodeBlock';
import Heading from 'src/components/Heading';
import List from 'src/components/List';
import Text from 'src/components/Text';
import typography from 'src/typography';
# Typography
Typography styles are created using [Kyle A. Matthews](https://kyleamathews.github.io/typography.js/)' awesome `typography` library.
The following configuration is used:
<CodeBlock
language="javascript"
value={JSON.stringify(typography.options, null, 2)}
/>
## Usage
The generated typography styles are passed to the `Text` and `Heading` components. They are also injected in the `Provider` component. This allows consistent application of typography styles across UI components and native DOM elements.
<Playground>
<List direction="vertical">
<Heading level={1}>Heading 1</Heading>
<Heading level={2}>Heading 2</Heading>
<Heading level={3}>Heading 3</Heading>
<Heading level={4}>Heading 4</Heading>
<Heading level={5}>Heading 5</Heading>
<Heading level={6}>Heading 6</Heading>
<Text>Regular Text</Text>
<Text bold>Bold Text</Text>
<Text variant="code">Code Text</Text>
</List>
</Playground>
|
Blackberry - Setting an alarm programmatically?
Is there an API for the Blackberry on changing or setting the Clock Alarm? Also, is it possible?
+1. Pls tell if you found anything
I don't think you can do it with the built-in alarm application, but would adding an event to the calendar work? Try using the PIM APIs to do that.
There was another question along these lines, and I'm still curious: Why would someone want an outside application to reset the built-in alarm?
|
Board Thread:Fun And Games/@comment-28890853-20181230030434/@comment-35230176-20190115191529
"Find out where we're going... and see what our future holds!"
Yells Rex as he battles Jin. Daniel said.
|
Yield and physical characterization of Passiflora cincinnata in the Brazilian Savanna1
1 Received: Sep. 23, 2020. Accepted: Jan. 15, 2021. Published: Feb. 11, 2021. DOI: 10.1590/1983-40632021v5165795. 2 Universidade de Brasília, Brasília, DF, Brasil. E-mail/ORCID<EMAIL_ADDRESS>3 Empresa Brasileira de Pesquisa Agropecuária (Embrapa Cerrados), Planaltina, DF, Brasil. E-mail/ORCID<EMAIL_ADDRESS><EMAIL_ADDRESS><EMAIL_ADDRESS>4 Empresa Brasileira de Pesquisa Agropecuária (Embrapa Semiárido), Petrolina, PE, Brasil. E-mail/ORCID<EMAIL_ADDRESS>For the Caatinga passion fruit (Passiflora cincinnata Mast.), there are no indications of varieties specific for the Brazilian Savanna conditions, as well as conduction techniques that make it possible to express a greater yield potential. This study aimed to evaluate the yield and quality of CPEF2220 and CBAF2334 populations in espalier and trellis conduction systems. A completely randomized design was used, in a 2 x 2 factorial arrangement, with three replications of four plants per plot. The fruit yield and physical characteristics were evaluated throughout the production cycle. The average yield was 8.0 kg plant-1 (3.5-14.9 kg plant-1) and the average number of fruits per plant was 139.1 (55.8-283.5), with average pulp yield of 29 %. There was a tendency to increase the number and yield of fruits for the CPEF2220 population conducted in the espalier system. The plant survival rate was 41 % (CPEF2220) and 87.5 % (CBAF2334), with a higher adaptation to the Brazilian Savanna conditions, while the CPEF2220 population presented a higher yield potential. Reductions in mass, longitudinal and equatorial diameter and fruit shape were observed throughout the harvest, with some exceptions for the espalier system, which showed more elongated fruits. The trellis system showed a greater yield potential for the parent populations of the passion fruit BRS Sertão Forte, for the study conditions.
INTRODUCTION
The Passiflora cincinnata Mast. species produces fruits of green color and is popularly known as Caatinga passion fruit, or bush passion fruit. It is native from the biomes Caatinga, Cerrado (Brazilian Savanna) and transition areas between the Brazilian For the Caatinga passion fruit (Passiflora cincinnata Mast.), there are no indications of varieties specific for the Brazilian Savanna conditions, as well as conduction techniques that make it possible to express a greater yield potential. This study aimed to evaluate the yield and quality of CPEF2220 and CBAF2334 populations in espalier and trellis conduction systems. A completely randomized design was used, in a 2 x 2 factorial arrangement, with three replications of four plants per plot. The fruit yield and physical characteristics were evaluated throughout the production cycle. The average yield was 8.0 kg plant -1 (3.5-14.9 kg plant -1 ) and the average number of fruits per plant was 139.1 (55.8-283.5), with average pulp yield of 29 %. There was a tendency to increase the number and yield of fruits for the CPEF2220 population conducted in the espalier system. The plant survival rate was 41 % (CPEF2220) and 87.5 % (CBAF2334), with a higher adaptation to the Brazilian Savanna conditions, while the CPEF2220 population presented a higher yield potential. Reductions in mass, longitudinal and equatorial diameter and fruit shape were observed throughout the harvest, with some exceptions for the espalier system, which showed more elongated fruits. The trellis system showed a greater yield potential for the parent populations of the passion fruit BRS Sertão Forte, for the study conditions. KEYWORDS: Caatinga passion fruit, tropical fruit, espalier and trellis conduction systems.
The BRS Sertão Forte (BRS SF) variety is available on the market, resulting from the crossing of two populations (CBAF2334 and CPEF2220) of plants originated and selected in the Brazilian semi-arid region. Under conditions of the Pernambuco state and in the Brazilian Savanna of the midwest plateau (highland), the variety may produce 18-30 t ha -1 year -1 , depending on the crop management conditions (Embrapa 2016).
Due to the climatic differences between the semi-arid region (Moura et al. 2007) and the Brazilian Savanna (Coutinho 2018), it is necessary to understand the productive performance of parents under specific conditions of the Brazilian Savanna for different conduction systems (espalier and trellis), what may contribute to understand the variety and establish strategies for cultivation recommendations.
The conduction of plants in a trellis system results in a higher fruit yield for the P. edulis species, if compared to the espalier one, whereas the P. setacea species yields about 13,754 kg ha -1 in the trellis and 10,492 kg ha -1 in the espalier systems (Komuro 2008, Costa et al. 2014. Although the trellis conduction system yields 30-40 % more, when compared to the wire espalier conduction system, it must be considered that the cost for implementing it is higher (Rigden 2011), since more support brackets are required to accommodate the main wire, as well as more time and labor. For P. cincinnata, there is lack of information regarding the conduction system, what is important for decisionmaking, due to the higher cost of implementation and maintenance when carried out in trellises, if compared to espaliers (Carvalho et al. 2015).
Thus, this study aimed to evaluate the yield and quality of CBAF2334 and CPEF2220 populations, which have the parental genotypes of the P. cincinnata cv. BRS SF, under cultivation conditions in the Brazilian Savanna, using the espalier and trellis conduction systems.
MATERIAL AND METHODS
Parental populations of the Passiflora cincinnata cv. BRS SF, CBAF2334 and CPEF2220, which are part of the Embrapa Semiárido germplasm bank, were evaluated. The populations were cultivated between April 2015 and July 2016, at the Embrapa Cerrados, in Planaltina, DF, Brazil (15º36'13.02"S, 47º43'17.34"W and average altitude of 1,050 m).
Pits with dimensions of 60 cm in diameter and 60 cm in depth were made with the aid of a motorized drill bit. Planting fertilization was carried out based on the soil analysis, using as a reference the fertilization for sour passion fruit: dolomitic limestone to raise the V to 50 % (Brasil & Nascimento 2010); 250 g pit -1 of P 2 O 5 (source: simple superphosphate); 100 g pit -1 of N (source: ammonium sulphate); 100 g pit -1 of K 2 O (source: potassium chloride); 100 g pit -1 of FTE BR12; and 10 L pit -1 of organic matter (source: chicken litter). The first cover fertilization was carried out at 60 days after planting, followed by others at 45-day intervals, using 100 g plant -1 (1:2 of potassium chloride and ammonium sulphate).
The crops were implemented in the field as shown in Figures 1A and 1B, with seedlings at the age of three months, on April 09, 2015, in the spacing between plants and between rows (2.5 m × 2.5 m for the espalier and 2.5 m × 5.0 m for the trellis conduction systems). The experimental design for the parents CPEF2220 and CBAF2334 was completely randomized, with three replications of four plants for each conduction system (trellis and espalier) ( Figure 1A).
For the study of fruit yield and physical characteristics carried out between November 2015 and July 2016, each population and its respective conduction system with three replications were considered, with 10 fruits per plot and all fruits of the plot being analyzed in the harvest of July 2016.
For the physical evaluations of fruits carried out between May and June 2016, a factorial arrangement (2 x 2) was considered, being two populations (CBAF2334 and CPEF2220) and two conduction systems (espalier and trellis), with three replications each and eight fruits analyzed per plot.
The climatic conditions of the cultivation period (April 2015 to July 2016) were expressed by an average temperature of 22.1 ºC, average relative humidity of 64.2 % and precipitation of 1,090 mm. The fruits of the plants from each plot were collected weekly from November 2015, the beginning of the first harvest. The collection took place after the abscission (after detaching from the plant, reaching physiological maturation), until the end of the e-ISSN 1983-4063 -www.agro.ufg.br/pat -Pesq. Agropec. Trop., Goiânia, v. 51, e65795, 2021 Yield and physical characterization of Passiflora cincinnata in the Brazilian Savanna material's life cycle (July 2016), with the last harvest comprising the fruits that fell on the soil and those that were trapped in the plant.
The analyzed data were: total number of fruits, obtained by counting the fruits per plant and per plot; longitudinal and equatorial length and diameter of the fruit, determined with a digital caliper (StainlessHardened™); and fruit mass, measured by a centesimal semi-analytical scale (OhausAdventurer™). The values for total mass and number of fruits were determined per plant. The mass and respective longitudinal (length) and equatorial (width) diameters were determined by analyzing 10 fruits from each plot, according to availability and collected weekly.
During the months of May and June 2016, of the ten fruits per plot, eight were evaluated for peel/skin thickness with a digital caliper (StainlessHardened™); peel and pulp mass (with and without seeds) obtained on a semi-analytical centesimal scale (OhausAdventurer™); pulp volume with and without seeds determined in a 100 mL graduated cylinder; and fruit shape obtained by the ratio between longitudinal (length) and equatorial diameter.
The seeds were detached from the pulp using a food blender with a blunt blade, and soon after they were separated from the pulp in a sieve. The seed fresh weight was determined by the difference in the pulp mass with and without seeds. The pulp yield was calculated in percentage values from the mass/mass ratio of the pulp without and with seeds per fruit, and the mass/mass ratio and volume/volume of the pulp without seeds per pulp with seeds.
Regression analyses were performed in each population and its respective conduction system to evaluate the monthly yield of fruits harvested on the ground between November 2015 and July 2016. For the physical characteristics of the fruits collected between May and June 2016, a 2 x 2 factorial scheme (populations x conduction systems) was used in a completely randomized design. The significance of the treatments was evaluated by variance analysis (Anova), the assumptions of normality for the residues were verified by the Shapiro-Wilk test (Miot 2017), and the homogeneity of variance by means of the Levene test (Levene 1960). For the comparison of means, the Tukey test was used, at 5 % of probability. All analyses were performed by the R statistical software, version 3.5.0 (R Core Team 2018).
RESULTS AND DISCUSSION
The total fruit yield of P. cincinnata (CPEF2220 and CBAF2334) conducted in the espalier and trellis systems ranged from 3.5 to 14.9 kg plant -1 , with mean value of 8.0 kg plant -1 . The linear regression analysis showed the existence of a different behavior in yield for the two populations in the two conduction systems (Figures 2A and 2B). There was a trend of linear increase in yield only for the CPEF2220 population conducted in the espalier system (Figure 2A).
A higher yield for the CPEF2220 population was observed in the trellis system (Figure 2A), which represented an average yield potential (125.7 %) higher than that for the espalier system. A similar behavior was observed in relation to the average number of fruits per plant, whose values varied from 55.8 to 283.5 fruits plant -1 , with an average of 139.1 fruits plant -1 . The CPEF2220 population showed a fruit yield 153.7 % higher than the CBAF2334 one.
Similarly to yield, a tendency to increase the number of fruits per plant ( Figure 2C) was observed only in the espalier system of the CPEF2220 population; while, in the trellis system of this population and in both systems for the CBAF2334 population, the same fact did not occur, indicating a random number of fruits per plant that suffer abscission along the production period and harvest. The obtained result was close to the average yield of 32 accessions of P. cincinnata cultivated in Petrolina (Pernambuco state, Brazil), in the espalier conduction system (Araújo et al. 2008), which was 15.88 kg plant -1 . Costa et al. (2014) also observed a higher yield for P. setacea (BRS PC) in the trellis conduction system, in relation to the espalier one, possibly due to the better distribution of the plant branches in the trellis, in relation to the espalier, and a consequent greater exposure to the sun and pollinators (Guimarães et al. 2013, Costa et al. 2014. The CPEF2220 and CBAF2334 parents showed a different behavior, concerning the ability to adapt to environmental conditions, regardless of the conduction system. The highest percentage of plant survival was found within the CBAF2334 population (87.5 %), in relation to the CPEF2220 one (41 %).
Individuals who increased the yield by up to three times with the density decrease, as well as others who did not respond to the decrease, were also verified, particularly within the CPEF2220 population.
There was a tendency for a significant decrease in the average fruit mass during the harvest (Figure 3), except for the CPEF2220 population, when carried out in the espalier system, which showed stability for fruit size from the beginning to the end of cultivation.
A trend towards a decrease in fruit mass with an increase in the number of fruits was observed for melon and sour passion fruit (Queiroga et al. 2008, Nogueira Filho et al. 2011, with the decrease being attributed to competition for plant reserves. The average fruit mass differed significantly only among the plant populations, with the CBAF2334 population presenting fruits of higher mass (Table 1). There was no significant interaction between conduction factors and plant populations.
The mean fruit longitudinal diameter of the two populations was 53 mm, while the equatorial diameter was 51 mm, varying throughout the fruiting period (Figure 4).
There was a significant trend towards a decrease in the longitudinal and equatorial diameters of the populations conducted in the trellis system ( Figure 4). This trend was not observed for the populations when conducted in the espalier system, except for the longitudinal diameter of the CBAF2334 population. Queiroga et al. (2008) also verified a reduction in the length and width of melon fruits throughout the crop cycle, which was attributed to a higher yield in the number of fruits, a fact that may also have occurred with the P. cincinnata fruits (Figures 2C and 2D).
The relation between the longitudinal and equatorial diameters indicates the shape of the fruit, and, the closer the value to 1, the more rounded the fruit will be (Jesus et al. 2015). The fruits of the whole trial showed values that varied from 0.95 to 1.14, indicating a shape close to the rounded one. However, the fruits of the CBAF2334 population proved to be significantly more elongated (Table 1).
Significant differences in the fruit shape were observed between the espalier and trellis conduction (Table 1), with little evident practical effect. There was a tendency for a significant reduction in the fruit shape, except for the CPEF2220 population conducted in the espalier system, which tended towards a constant shape ( Figure 5). Komuro (2008) evaluating P. edulis and Silva et al. (2004) P. alata also observed differences in the fruit size, when using conduits in a vertical espalier and T espalier (which resembles the trellis because it is positioned horizontally). Variations in the size and shape of P. edulis fruits, as a function of the growing environment, were also observed by Scorza et al. (2017). The fruit shape and dimensions are regulated by genes belonging to the MAD-box complex and play an important role during the reproductive development (Cutri & Dornelas 2012). Scorza et al. (2017) studied two genes of this complex in P. edulis and found that PeFUL, FRUITFULL (FULL) homologue, presented a broad pattern of expression in vegetative and reproductive tissues, suggesting that these genes may also be related to the development of P. cincinnata fruits.
There was no interaction between the conduction systems and plant population, nor any Yield and physical characterization of Passiflora cincinnata in the Brazilian Savanna significant variation, in relation to the seed mass (Table 2). Significant differences, however, were observed in the peel mass and thickness and in the seedless pulp mass and volume between the CPEF2220 and CBAF2334 plant populations ( Table 2). The CPEF2220 population plants showed peel with lower mass and thickness, while fruits of the CBAF2334 population had a greater mass and volume of seedless pulp, indicating that the two genotypes have beneficial characteristics for consumption in retail and industry. The volume of pulp with seeds for the CBAF2334 population (Table 2) presented a value close to that described by Magalhães (2010) (47.1 mL). The peel thickness of the CPEF2220 population recorded values that can be classified as very thin, and CBAF2334 as thin (Jesus et al. 2015). The significant differences observed in the mass and volume of the seedless pulp (Table 2) between the plant populations were correlated with the greater fruit mass of the CBAF2334 population (Table 1).
There was no interaction or significant differences between the P. cincinnata populations and the conduction systems, in relation to pulp yield. The yield of the seedless pulp mass, in relation to the fruit mass, was 26.63-31.37 %, with an average value of 29 %, while the mass of pulp with seeds, in relation to the fruit mass, was 63.04-66.58 %, with an average value of 64.81 %. The mass yield of pulp without seeds, in relation to the pulp with seeds, was 40.18-47.29 %, with an average value of 43.73 %, while the volume yield of pulp without seeds, in relation to the pulp with seeds, was 48.88-49.10 %, with an average of 44.99 %. Lessa (2011) observed, for P. cincinnata and the BRS SF cultivar, a pulp yield of 31.88 % and 28.70 %, respectively, values close to those found in the present study. These values are also close to those found for the P. edulis BRS Sol do Cerrado cultivar (Tupinambá et al. 2012), which presented an average pulp yield with seeds of 31.45 %, in relation to the fruit. 1. The trellis conduction system enabled the greatest yield potential of the Passiflora cincinnata populations in the conditions of the Brazilian Savanna, with a greater yield potential for the CPEF2220 population and a higher survival rate for the CBAF2334 population; 2. The P. cincinnata fruits showed characteristics of very thin skin/peel and pulp yield compatible with the fruits of P. edulis. The trellis conduction system reduced the average value of mass, shape, longitudinal (length) and equatorial (width) diameter of the fruits throughout the fruiting period; 3. The espalier conduction system influenced the fruit shape, which was more elongated, regardless of the evaluated population.
|
PyQt - QComboBox connection confirmed but function is not being called
I am having a bit of trouble with the signal/slot issues using PyQt. My code is below, but it probably deserves a bit of explanation. The first two QObject.connect() return True, so I know the connection is established. However, when changing the selection in the comboBox, the function getParameters is not called as expected. The 5 connects below that were for debugging and testing the other signals associated with ComboBox. Those do not print the the log as expected either.
From what I've read else where there are newer ways to specify a connection, could this be the issue? And if so, can someone give me an example of that format? Thanks!
#interactive GUI connections:
resultCombo = QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentIndexChanged(const QString & text)"), self.getParameters)
resultSpin = QObject.connect(self.dlg.ui.spinBox_bands, SIGNAL("valueChanged(int i)"), self.getParameters)
QMessageBox.information( self.iface.mainWindow(),"Info", "connections: ComboBox = %s SpinBox = %s"%(str(resultCombo), str(resultSpin)) )
QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentIndexChanged(const QString & text)"), self.log1)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentIndexChanged(int index)"), self.log2)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentTextChanged(const QString & text)"), self.log3)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("highlighted(const QString & text)"), self.log4)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("activated(const QString & text)"), self.log5)
def log1(self, input):
QgsMessageLog.logMessage("currentIndexChanged string. input = " + str(input), "Debug", 0)
def log2(self, input):
QgsMessageLog.logMessage("currentIndexChanged int. input = " + str(input), "Debug", 0)
def log3(self, input):
QgsMessageLog.logMessage("currentTextChanged string. input = " + str(input), "Debug", 0)
def log4(self, input):
QgsMessageLog.logMessage("highlighted string. input = " + str(input), "Debug", 0)
def log5(self, input):
QgsMessageLog.logMessage("cactivated string. input = " + str(input), "Debug", 0)
I solved it. As I guessed, it did have to do with the "new style" connection syntax. I'm not entirely sure why the old style was connecting, but not calling the connected function, but it is now working with the following code:
self.dlg.ui.comboBox.currentIndexChanged['QString'].connect(self.getParameters)
self.dlg.ui.spinBox_bands.valueChanged.connect(self.getParameters)
For those that don't know (I didn't and couldn't find good docmentation - link?), the ['QString'] argument allows you to select the type of result for overloaded signals. This was important for me as I'm using the type to distinguish between the senders. However, I suppose I should be more explicit and use
sender = self.sender()
in my getParameters function, but this is working.
|
#include <iostream>
#include <cstring>
#include <windows.h>
#include <cctype>
#include <cstdlib>
#include <iomanip>
using namespace std;
#define CSI const signed int
//simpletron codes
CSI READ=10; // read a word from kboard to spec. location in memory
CSI WRITE=11; // write a word from memory to the screen
CSI LOAD=20; // load a word from spec. location in memory into accumulator
CSI STORE=21; // store a word from acc. into spec. location in memory
CSI ADD=30; // add a word from spec. location in memory to word in accumulator.leaves result in acc.
CSI SUBTRACT=31; // subtract as for add
CSI DIVIDE=32; // divide as for add
CSI MULTIPLY=33; // multiply as for add
CSI MODULUS=34; // modulus as for add
CSI BRANCH=40; // unconditional branch (goto)
CSI BRANCHNEG=41; // branch if acc is negative
CSI BRANCHZERO=42; // branch if acc is zero
CSI HALT=43; // end program
CSI CLEARSCREEN=99; // clear the screen
//function prototypes
void begin();
void clrscr();
void entercode(signed int[]);
void execute(signed int[]);
void read(signed int[],int);
void write(signed int[],int);
void dump(signed int[],signed int,signed int,signed int,int,int);
void add (signed int[],signed int*,int);
void subtract(signed int[],signed int*,int);
void multiply(signed int[],signed int*,int);
void divide(signed int[],signed int*,int);
void modulus(signed int[],signed int*,int);
int main()
{
signed int Memory [100]={0}; //simpletron memory
begin();
entercode(Memory);
system("PAUSE");
clrscr();
execute(Memory);
return 0;
}
void begin()
{
clrscr();
cout<<"*** Welcome to Simpletron. ***"<<endl<<endl<<
"*** Please enter your program one instruction ***"<<endl
<<"*** (or data word) at a time. I will type the ***"<<endl<<
"*** location number and a question mark (?). ***"<<endl<<
"*** You then type the word for that location. ***"<<endl<<
"*** Type the sentinel -99999 to stop entering ***"<<endl<<
"*** your program. v.1.0 (c) Russ 2001 ***"<<endl;
}
void clrscr()
{
COORD coordScreen = { 0, 0 };
DWORD cCharsWritten;
CONSOLE_SCREEN_BUFFER_INFO csbi;
DWORD dwConSize;
HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
GetConsoleScreenBufferInfo(hConsole, &csbi);
dwConSize = csbi.dwSize.X * csbi.dwSize.Y;
FillConsoleOutputCharacter(hConsole, TEXT(' '), dwConSize, coordScreen,
&cCharsWritten);
GetConsoleScreenBufferInfo(hConsole, &csbi);
FillConsoleOutputAttribute(hConsole, csbi.wAttributes, dwConSize, coordScreen,
&cCharsWritten);
SetConsoleCursorPosition(hConsole, coordScreen);
}
void entercode(signed int array[])
{
char buffer[81];// for input
for(int i=0;i<100;i++)
{
entry: cout<<endl<<(i<10 ? "0":"")<<i<<" ? ";// print 1 as 01 etc.
cin.getline(buffer,80);
if (buffer[0] != '+' && buffer[0] != '-')// expect + or - as first char
{
cout<<endl<<"Not a valid simpletron code"<<endl;
goto entry;
}
int bufflen=strlen(buffer);
if(bufflen !=5 && (strcmp(buffer,"-99999")))// simpletron codes are SDDDD or -99999
{
cout<<endl<<"Not a valid simpletron code"<<endl;
goto entry;
}
for(int j=1;j<bufflen;j++)
{
if (!isdigit(buffer[j]))
{
cout<<endl<<"Not a valid simpletron code"<<endl;
goto entry;
}
}
if (!strcmp(buffer,"-99999"))// find sentinel
{
cout<<endl<<"*** Program loading finished. ***"<<endl<<
"*** Program execution begins. ***"<<endl;
return;
}
signed int a=atoi(buffer);
array[i]=a;// fill memory with program code
}
}
void execute(signed int array[])
{
signed int Accumulator=0,InstructionRegister=0,OpCode=0;
int Counter=0,Operand=0;
bool Flag=TRUE;
while(Flag)
{
InstructionRegister=array[Counter];
OpCode=InstructionRegister /100;
Operand=InstructionRegister %100;
Counter++;
switch(OpCode)
{
case READ: {
read(array,Operand);
break;
}
case WRITE:{
write(array,Operand);
break;
}
case CLEARSCREEN :{
clrscr();
break;
}
case HALT: {
cout<<endl<<"*** Program execution terminated. ***"<<endl;
Flag=FALSE;
dump(array,Accumulator,InstructionRegister,OpCode,Operand,(Counter-1));
break;
}
case BRANCH: {
Counter=Operand;
break;
}
case BRANCHZERO:{
if (Accumulator==0) Counter=Operand;
break;
}
case BRANCHNEG: {
if (Accumulator <0) Counter=Operand;
break;
}
case LOAD: {
Accumulator=array[Operand];
break;
}
case STORE: {
array[Operand]=Accumulator;
break;
}
case ADD: {
add(array,&Accumulator,Operand);
break;
}
case SUBTRACT: {
subtract(array,&Accumulator,Operand);
break;
}
case MULTIPLY: {
multiply(array,&Accumulator,Operand);
break;
}
case DIVIDE: {
divide(array,&Accumulator,Operand);
break;
}
case MODULUS: {
modulus(array,&Accumulator,Operand);
break;
}
default: {
cerr<<endl<<"*** Fatal runtime error - undefined code ***"<<endl;
dump(array,Accumulator,InstructionRegister,OpCode,Operand,(Counter-1));
exit(0);
}
}
}
}
void read(signed int array[],int Operand)
{
cout<<endl<<"Enter word for location :- "<<Operand<<" ? ";
char buffer[81];
bool ok=TRUE;
while (ok)
{
entry: cin.getline(buffer,80);
if (buffer[0] != '+' && buffer[0] != '-')// expect + or - as first char
{
cout<<endl<<"First character has to be a + or - sign."<<endl<<"Try again:- ? ";
goto entry;
}
signed int a=atoi(buffer);
if (a<-9999 || a>9999)
{
cout<<endl<<"Number entered out of range (-9999 to +9999) "<<endl<<"Try again :- ? ";
goto entry;
}
array[Operand]=a;
ok=FALSE;
}
}
void write(signed int array[],int Operand)
{
cout<<endl<<"Word at memory location :- "<<Operand<<" is "<<showpos<<array[Operand];
}
void add (signed int array[],signed int* Acc,int Operand)
{
if ((*Acc+array[Operand])>9999)
{
cout<<endl<<"*** Fatal error :- Accumulator overflow ***"<<endl<<"*** Program terminated ***"<<endl;
exit(0);
}
else
*Acc += array[Operand];
}
void subtract (signed int array[],signed int* Acc,int Operand)
{
if ((*Acc-array[Operand])<-9999)
{
cout<<endl<<"*** Fatal error :- Accumulator overflow ***"<<endl<<"*** Program terminated ***"<<endl;
exit(0);
}
else
*Acc -= array[Operand];
}
void multiply(signed int array[],signed int* Acc,int Operand)
{
if (((*Acc * array[Operand]) < -9999) || ((*Acc * array[Operand]) > 9999))
{
cout<<endl<<"*** Fatal error :- Accumulator overflow ***"<<endl<<"*** Program terminated ***"<<endl;
exit(0);
}
else
*Acc *= array[Operand];
}
void divide (signed int array[],signed int* Acc,int Operand)
{
if (array[Operand]==0)
{
cout<<endl<<"*** Fatal error :- Division by zero ***"<<endl;
exit(0);
}
if (((*Acc / array[Operand]) < -9999) || ((*Acc / array[Operand]) > 9999))
{
cout<<endl<<"*** Fatal error :- Accumulator overflow ***"<<endl<<"*** Program terminated ***"<<endl;
exit(0);
}
else
*Acc /= array[Operand];
}
void modulus (signed int array[],signed int* Acc,int Operand)
{
if (array[Operand]==0)
{
cout<<endl<<"*** Fatal error :- Division by zero ***"<<endl;
exit(0);
}
if (((*Acc / array[Operand]) < -9999) || ((*Acc / array[Operand]) > 9999))
{
cout<<endl<<"*** Fatal error :- Accumulator overflow ***"<<endl<<"*** Program terminated ***"<<endl;
exit(0);
}
else
*Acc %= array[Operand];
}
void dump(signed int array[],signed int Acc,signed int IR,signed int OpCode,int
operand,int counter)
{
system("PAUSE");
clrscr();
cout<<"REGISTERS :-"<<endl<<"Accumulator "<<showpos<<setfill('0')<<left<<setw(5)<<Acc<<endl
<< "Counter "<<noshowpos<<setw(2)<<counter<<endl<<
"Instruction Register "<<showpos<<setw(5)<<IR<<endl<<
"Operation Code "<<setw(3)<<OpCode<<endl<<
"Operand "<<noshowpos<<setw(2)<<operand<<endl<<endl;
cout<<"MEMORY :-"<<endl;
cout<<" 0 1 2 3 4 5 6 7 8 9";
for (int i=0;i<100;i++)
{
if (i%10==0)
{
cout<<noshowpos<<endl<<setw(2)<<i;
}
cout<<" "<<setw(5)<<showpos<<array[i];
}
}
|
function isEmptyStr(str) {
return !str || str.replace(/(^\s+)|(\s+$)/g, "").length == 0;
}
function _epp(str) {
return isEmptyStr(str)
}
$(document).keydown(function (e) {//阻止backspace返回上一页面
var doPrevent = false;
var e = e || window.event;
var varkey = (e.keyCode) || (e.which) || (e.charCode)
if (varkey == 8) {
doPrevent = true;
var d = e.srcElement ||e.target
var tag = d.tagName.toUpperCase()
var tp = d.type || $(d).attr("type")
if (tp) tp = tp.toUpperCase()
if ((tag == 'INPUT' || tag == 'TEXTAREA'|| tag == 'PASSWORD')
&& !($(d).attr("readonly") || $(d).attr("disabled"))
&& !(!tp || tp=='BUTTON' || tp=='RADIO' || tp=='CHECKBOX' || tp=='SUBMIT')) {
doPrevent = false
}
}
if (doPrevent) {
e.preventDefault();
e.stopPropagation();
}
});
function $emptyplaceholder(element) {
var $content = $(element).val();
return ($content.length === 0) || $content == $(element).attr("placeholder");
};
function initPhd(){
if (!('placeholder' in document.createElement('input'))) {
$("textarea[placeholder], input[placeholder]").each(function (index, element) {
if (!$(element).hasClass('donot_initphd') && $(element).attr("placeholder") || $emptyplaceholder(element)) {
$(element).val($(element).attr("placeholder"));
$(element).data("pintuerholder", $(element).css("color"));
$(element).css("color", "rgb(169,169,169)");
$(element).focus(function () { $hideplaceholder($(this)); });
$(element).blur(function () { $showplaceholder($(this)); });
}
})
};
}
$(function(){
initPhd()
})
function $hideplaceholder(element) {
if ($(element).data("pintuerholder")) {
$(element).val("");
$(element).css("color", $(element).data("pintuerholder"));
$(element).removeData("pintuerholder");
}
}
function $showplaceholder(element) {
if (($(element).val().length === 0 || $(element).val() == $(element).attr("placeholder")) && $(element).attr("type") != "password") {
$(element).val($(element).attr("placeholder"));
$(element).data("pintuerholder", $(element).css("color"));
$(element).css("color", "rgb(169,169,169)");
}
};
function fn_tp_aj(tp,url,data,m0a1n2){
layer.confirm(tp, {icon: 3, title:'提示'}, function(index){
fn_ntp_aj(url,data,m0a1n2);
});
}
function fn_ntp_aj(url,data,m0a1n2,fail){
$.ajax({
type:"POST",
url:url,
data:data,
beforeSend:function(){layer.msg('操作中,请稍后......', {time:-1,icon: 16,shade: 0.3, scrollbar:false});},
datatype: "text",
success:function(ret){
layer.closeAll();
if ("0" ==ret){
if(!m0a1n2 || m0a1n2==0){
layer.msg("操作成功!")
}else if (m0a1n2==1){
layer.alert("操作成功!",function(index){location.reload();});
}else if(typeof(m0a1n2)=='function'){
m0a1n2()
}else{
}
}else if (typeof(fail)=='function'){
fail(ret)
}else if ("-1"==ret){
layer.alert('操作失败,请联系管理员!', { icon:2, title:'错误'});
}else{
layer.alert(ret, { icon:2, title:'错误'});
}
},
error: function(){
layer.alert('操作失败,请稍后重试!', { icon:2, title:'错误'});
}
});
}
function fn_ntp_json(url,paraobj,m0a1n2,fail,cfg){
var wtnid=null;//这。。后面异步还能读的到吗?
cfg = cfg||{clsA:true}
$.ajax({
type:"POST",
url:url,
data:paraobj,
beforeSend:function(){wtnid=layer.msg('操作中,请稍后......', {time:-1,icon: 16,shade: 0.3, scrollbar:false});},
datatype: "JSON",
success:function(json){
if (cfg.clsA)layer.closeAll();
var data = eval("("+json+")")
if ("0" ==data.ret){
if(!m0a1n2 || m0a1n2==0){
layer.msg("操作成功!")
}else if (m0a1n2==1){
layer.alert("操作成功!",function(index){location.reload();});
}else if(typeof(m0a1n2)=='function'){
m0a1n2(data)
}else{
}
}else if (fail!=null && typeof(fail)=='function'){
layer.close(wtnid)
fail(data)
}else if ("-1"==data.ret){
layer.msg('操作失败,请联系管理员!', { icon:2, title:'错误'});
}else{
layer.msg(data.ret, { icon:2, title:'错误'});
}
},
error: function(){
layer.alert('操作失败,请稍后重试!', { icon:2, title:'错误'});
}
});
}
function fn_aj_async(url){
var ret = null;
$.ajax({//同步的
type:"POST",async: false,
url:url,
datatype: "text",
beforeSend:function(){layer.msg('操作中,请稍后......', {time:-1,icon: 16,shade: 0.3, scrollbar:false});},
success:function(text){
layer.closeAll();
ret = text;
},
error: function(){
layer.alert('网络阻塞,请重试!', { icon:2, title:'错误'});
}
})
return ret;
}
//弄个参数最全的,但是不保证后面还有参数,url和data必须有,当然data可以为空,剩余的参数都用对象传递
function aj(url,data,para){
function _aj(url,data,para){
$.ajax({
type:para.type?para.type:"POST",//默认post
url:url,
data:data,
timeout:para.timeout?para.timeout:0,
beforeSend:!para.beforeSend?function(){layer.msg('操作中,请稍后......', {time:-1,icon: 16,shade: 0.3, scrollbar:false});}:para.beforeSend,
datatype: para.datatype?para.datatype:"JSON",//默认json
success:function(json){
if (!para.beforeSend)layer.closeAll();
var rtn = eval("("+json+")")
if ("0" ==rtn.ret){
if(!para.succ){
layer.msg("操作成功!")
}else if(typeof(para.succ)=='function'){
para.succ(rtn)
}else{
layer.alert("操作成功!",function(index){location.reload();});
}
}else if (para.fail!=null && typeof(para.fail)=='function'){
para.fail(rtn)
}else if ("-1"==rtn.ret){
layer.alert(!para.neg1?'操作失败,请联系管理员!':para.neg1, { icon:2, title:'错误'});
}else{
layer.alert(ret, { icon:2, title:'错误'});
}
},
error: function(){
if (para.error && typeof(para.error)=='function'){
para.error()
}else{
layer.alert(!para.error?'操作失败,请重试!':para.error, { icon:2, title:'错误'});
}
},
complete:function(XMLHttpRequest, textStatus){
if (para.complete){
para.complete(XMLHttpRequest, textStatus)
}
}
});
}
if (para && para.confirm){
layer.confirm(para.confirm, {icon: 3, title:'提示'}, function(index){
_aj(url,data,para)
});
}else{
_aj(url,data,para)
}
}
//获取某个节点下的所有子节点
function getAllYeziNodes(treeNode, yeziarr) {
if (treeNode.isParent) {
var childrenNodes = treeNode.children;//加var代表是局部变量
if (childrenNodes) {
for (var x = 0; x < childrenNodes.length; x++) {//加var代表是局部变量
if (childrenNodes[x].isParent) {
getAllYeziNodes(childrenNodes[x], yeziarr);
} else {
yeziarr.push(childrenNodes[x])
}
}
}
}
return;
}
|
using System;
using System.Collections.Generic;
using System.Drawing;
using System.IO;
using System.Net;
using System.Windows.Forms;
namespace DropStatistics.ImageDownloader
{
class PoeImageDownloader
{
private Dictionary<string, ImageCache> ImagesCache = new Dictionary<string, ImageCache>();
private readonly string LocalPluginDirectory;
public PoeImageDownloader(string pluginDirectory)
{
LocalPluginDirectory = pluginDirectory;
}
public string GetImage(string metadata)
{
ImageCache result;
if (!ImagesCache.TryGetValue(metadata, out result))
{
result = DownloadImage(metadata);
ImagesCache.Add(metadata, result);
}
if (result.bIsDownloaded)
return result.FilePath;
else
return null;
}
//Images from site:
//http://webcdn.pathofexile.com/image/Art/2DItems/Currency/CurrencyRerollRare.png
private ImageCache DownloadImage(string metadata)
{
//Metadata will be always contains (ends with) ".dds" keyword. Check AddItemToCells.
metadata = metadata.Replace(".dds", ".png");
var url = "http://webcdn.pathofexile.com/image/" + metadata;
var filePath = LocalPluginDirectory + "/resources/" + metadata;
ImageCache img = new ImageCache()
{
FilePath = filePath,
Url = url
};
try
{
if (File.Exists(img.FilePath))
{
img.bIsDownloaded = true;
return img;
}
var settingsDirName = Path.GetDirectoryName(img.FilePath);
if (!Directory.Exists(settingsDirName))
Directory.CreateDirectory(settingsDirName);
WebClient webClient = new WebClient();
webClient.DownloadDataAsync(new Uri(img.Url), img.FilePath);
webClient.DownloadDataCompleted += img.OnGetDownloadedStringCompleted;
}
catch
{
MessageBox.Show("DropStatistics Error processing: Url: " + img.Url + ", Path: " + img.FilePath,
"Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
return img;
}
private class ImageCache
{
public bool bIsDownloaded;
public string Url;
public string FilePath;
public void OnGetDownloadedStringCompleted(object sender, DownloadDataCompletedEventArgs e)
{
var contentType = ((WebClient)sender).ResponseHeaders[HttpResponseHeader.ContentType];
if (e.Error == null && contentType == "image/png")
{
Bitmap flaskImg;
using (var ms = new MemoryStream(e.Result))
{
flaskImg = new Bitmap(ms);
}
if (FilePath.Contains("Flasks"))//Cut 1/3 of flask image
{
flaskImg = CropImage(flaskImg, new System.Drawing.Rectangle(0, 0, flaskImg.Width / 3, flaskImg.Height));
}
flaskImg.Save(FilePath, System.Drawing.Imaging.ImageFormat.Png);
bIsDownloaded = true;//Due to async processing this must be in the last line
}
else
{
MessageBox.Show("DropStatistics couldn't download images from:" + Url,
"Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
//from http://stackoverflow.com/questions/9484935/how-to-cut-a-part-of-image-in-c-sharp
private Bitmap CropImage(Bitmap source, System.Drawing.Rectangle section)
{
return source.Clone(section, source.PixelFormat);
}
}
}
}
|
High-resolution computed tomography using edge-on detectors with temporally offset depth-segments
ABSTRACT
Disclosed is a measurement method performed by a Computed Tomography, CT, system. The CT system includes an x-ray source (60) and an x-ray detector (50) array of photon counting edge-on detectors (5), wherein each photon counting edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays (45). The method includes to apply a time offset measurement scheme that provides a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlaps in time. Disclosed is also a corresponding CT system (10), a control unit for a CT system and a measurement circuit for a CT system. A computer program (225) controlling a CT system is also disclosed. The disclosed technology provides for a higher sampling frequency in the angular direction (55).
TECHNICAL FIELD
The proposed technology relates to a measurement method performed by a Computed Tomography system. The proposed technology also relates to devices and systems configured to perform the measurement method.
BACKGROUND
Radiographic imaging such as x-ray imaging has been used for years in medical applications and for non-destructive testing.
Normally, an x-ray imaging system includes an x-ray source and an x-ray detector array consisting of multiple detectors comprising one or many detector elements (independent means of measuring x-ray intensity/fluence). The x-ray source emits x-rays, which pass through a subject or object to be imaged and are then registered by the detector array. Since some materials absorb a larger fraction of the x-rays than others, an image is formed of the subject or object.
An example of a commonly used x-ray imaging system is an x-ray computed tomography, CT, system, which may include an x-ray tube that produces a fan- or cone beam of x-rays and an opposing array of x-ray detectors measuring the fraction of x-rays that are transmitted through a patient or object. The x-ray tube and detector array are mounted in a gantry that rotates around the imaged object. An illustration of a fan beam CT geometry is shown in FIG. 3.
The dimensions and segmentation of the detector array affect the imaging capabilities of the CT system. A plurality of detector elements in the direction of the rotational axis of the gantry, i.e. the z-direction of FIG. 3 enables multi-slice image acquisition. A plurality of detector elements in the angular direction (ξ in FIG. 3) enables measurement of multiple projections in the same plane simultaneously and this is applied in fan/cone-beam CT. Most conventional detectors are so called flat-panel detectors, meaning that they have detector elements in the slice (z) and angular (ξ) directions.
X-ray detectors made from low-Z materials need to have a substantial thickness in the direction of the x-ray beam in order to have sufficient detection efficiency to be used in CT. This can be solved by, for example, using an “edge-on” geometry, as in U.S. Pat. No. 8,183,535, in which the detector array 50 is built up of a multitude of edge on detectors 5, which comprise thin wafers of a low-atomic number material, oriented with the edge towards the impinging x-rays 45. FIG. 2 shows a schematic illustration of an array of edge-on detectors 5, showing the position of the x-ray source 60, the direction of the x-rays 45, the detector array 50, a single edge-on detector 5 and the angular direction 55 of movement of the detector array 50. It is common that each photon-counting edge-on detector 5 has a plurality of detector elements 15 on a 2D grid on the wafer. An example of an edge-on semiconductor wafer is illustrated in FIG. 1, which shows the different detector elements 15 in a column on the detector array 50 and the direction of the impinging x-rays 45. Each individual wafer is, for example, oriented such that it has detector elements 15 in the slice direction (z) and in the direction of the x-rays 45, as schematically illustrated in FIG. 3. The edge-on geometry for semiconductor detectors is also suggested in U.S. Pat. Nos. 4,937,453, 5,434,417, US 2004/0251419 and WO 2010/093314. Wafer detectors that are oriented with a slight angle with respect to the direction of the x-rays 45 are normally also included in the term “edge-on”.
Detector elements 15 at different depths into the detector material with respect to the impinging x-rays 45 will be referred to as different “depth segments”. The detector elements 15 at different depths are generally aligned such that several detector elements 15 (from different depths) measure the same X-rays 45.
FIG. 9 is a schematic diagram illustrating a semiconductor detector module implemented as a multi chip module similar to an exemplary embodiment U.S. Pat. No. 8,183,535. In this example, the detector elements 15 are organized in three depth segments with respect to the direction of the incoming x-rays 45. This example shows how the semiconductor detector module also can have the function of substrate in a Multi Chip Module (MCM). The signal is routed 37 from the detector elements 15 to inputs of parallel processing circuits (e.g. ASICs) 30. It should be understood that the term Application Specific Integrated Circuit (ASIC) is to be interpreted broadly as any general circuit used and configured for a specific application. The ASIC processes the electric charge generated from each x-ray and converts it to digital data, which can be used to obtain measurement data such as a photon count and/or estimated energy. The ASICs are configured for connection to digital data processing circuitry 20 so the digital data may be sent to further digital data processing circuitry 20 and/or memories located outside of the MCM and finally the data will be the input for image processing to generate the reconstructed image.
For a given rotational position, each detector element 15 measures the transmitted x-rays 45 for a certain projection line. Such a measurement is called a projection measurement. The collection of projection measurements for many projection lines is called a sinogram. The sinogram data is utilized through image reconstruction to obtain an image of the interior of the imaged object. Each projection line (a point in the sinogram) is given by an angular coordinate, θ, and a radial coordinate, r, as defined in FIG. 7. Each measurement with a detector element 15 at a specific coordinate given by (r,θ) is a sample of the sinogram. More samples in the sinogram generally lead to a better representation of the real sinogram and therefore also a more accurately reconstructed image. An example of how a detector array 50, similar to that displayed in FIG. 3, samples the sinogram space is shown in FIG. 8 A for two angular positions of the gantry separated by Δθ. The different r positions of the samples come from the different detector elements 15 in the detector array 50.
Generally, the gantry rotates continuously and each detector element 15 measures the x-rays 45 within a frame time. A measurement period is here defined as the interval in time during which a certain detector element 15 is occupied with a measurement. The length of the measurement period can be, but does not have to be, equal to the frame time. The measurement period is much smaller than the total data acquisition time and multiple measurement periods follow directly after each other throughout the measurement. The length of the measurement period is referred to as the temporal sampling interval and the reciprocal of the sampling interval 1/T is referred to as the sampling frequency. The angular sampling interval of the CT system 10 is given by the angular velocity of the gantry, ω=dθ/dt, and the temporal sampling interval, T, by Δθ=ωT. A schematic illustration of the angular sampling is displayed in FIG. 4, where the photon counting edge-on detector 5 and the X-ray source 60 are illustrated for two different positions separated in time by the sampling interval T. The radial coordinate for all projection lines corresponding to a specific detector element 15 is invariant to the rotation of the gantry.
In order to perform an accurate image reconstruction from tomographic data, it is essential that there are a sufficient amount of angular samples. Insufficient angular sampling can lead to artifacts in the image, aliasing and poor resolution.
One way to increase the angular sampling frequency (without using specific oversampling schemes, as described in a later) is to decrease the temporal sampling interval T. This is, however, often limited by the detector electronics. Another way to obtain higher angular sampling frequency is to decrease the rotation speed ω and lower the flux (in order not to increase the patient dose). This comes with a noise penalty for conventional energy integrating detectors since less flux implies more relative electronic noise when integrating the signal. For photon counting detectors, however, decreasing the flux does not come with a noise penalty, since there is no integration process. Therefore, it is possible to use a higher sampling rate in photon-counting CT compared to conventional CT.
There are several oversampling schemes developed for computed tomography, for example: “quarter-detector offset” and “flying focal spot”. The “quarter-detector offset” method is well known and implies that the detector elements are spatially offset with respect to the central line of the fan-beam by one quarter of the detector width. This implies that the projections at θ and θ+180 degrees are not the same, but offset by half a pixel. This produces an oversampling (two times higher) in the radial direction. The method “flying focal spot” implies that the focal spot is moved during the measurement in order to produce more projection lines. This method can produce an oversampling in both the radial and the angular directions. For edge-on detectors, the flying focal-spot method has the disadvantage that the spectral response of the detector changes if you change the alignment of the detector with respect to the source by moving the source.
In U.S. Pat. No. 7,696,481 there is described a method for oversampling using for multi-layer detectors where the detector elements in the different layers are spatially offset with re sect to each other. This produces an oversampling in both the radial and the angular direction. However, when low-Z materials are used as detector material, the fraction of photons which scatter in the detector is significant, therefore it can be beneficial to have anti-scatter modules inter folded between at least a subset of the detector modules, as described in U.S. Pat. No. 8,183,535 B2. If such anti-scatter modules are used, it is preferable to align the anti-scatter modules (and the detectors modules) with the direction of the x-rays 45 in order to maintain detection efficiency (if not aligned, the anti-scatter modules will also absorb primary radiation which otherwise could be collected by the detector). Therefore, if anti-scatter modules that are inter folded between the detector modules are used, an oversampling scheme that includes spatial shift between the detector elements in the different depth segments is impractical.
SUMMARY
It is a general object of the proposed technology to provide a mechanism whereby a Computed Tomography, CT, technology can be provided with a higher sampling frequency in the angular direction.
It is a more specific object to provide a Computed Tomography, CT, system that provides for higher sampling frequency in the angular direction.
Another object is to provide a measurement method that leads to a higher sampling frequency in the angular direction.
Still another object is to provide a control unit for a Computed Tomography, CT, system that provides for a higher sampling frequency in the angular direction.
Yet another object is to provide a measurement circuit in a Computed Tomography, CT, system that provides for a higher sampling frequency in the angular direction.
A further object is to provide a computer program to control a CT system that provides for higher sampling frequency in the angular direction.
According to a first aspect there is provided a Computed Tomography, CT, system comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the CT system is configured to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that the respective measurement periods of the at least two different detector elements at least partially overlaps in time.
According to a second aspect there is provided a measurement method performed by a CT system, the CT system comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the method comprises to apply a time offset measurement scheme that provides a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that the respective measurement periods of the at least two different detector elements at least partially overlaps in time.
According to a third aspect there is provided a control unit for a Computed Tomography, CT, system that comprises an x-ray source and an x-ray detector array of photon counting edge-on detectors, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the control unit is configured to control the CT system to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that the respective measurement periods of the at least two different detector elements at least partially overlaps in time.
According to a fourth aspect there is provided a measurement circuit in a Computed Tomography, CT, system comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the measurement circuit is configured to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that the respective measurement periods of the at least two different detector elements at least partially overlaps in time.
According to a fifth aspect there is provided a computer program comprising instructions, which when executed by at least one processor cause the processor(s) to control a CT system comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements, arranged at different spatial locations in the direction of incoming x-rays, so that the CT system operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlaps in time.
Insufficient angular sampling frequency leads to sampling artifacts, aliasing and impaired spatial resolution in the reconstructed CT image. The described method is an effective way to increase the angular sampling frequency for edge-on detectors by utilizing the built-in redundancy of the depth segments on the detector. Higher sampling frequency allows higher gantry rotation speeds without introducing artifacts. Also, if the method is used to oversample the signal, then it will not be necessary to low-pass filter the data during the image reconstruction to prevent aliasing, thus saving more of the original image data. The proposed method is easily implemented in today's edge-on detectors.
BRIEF DESCRIPTION OF THE DRAWINGS
FIG. 1 is a schematic diagram illustrating an example of a single edge-on photon counting detector.
FIG. 2 is a schematic diagram illustrating an example of an array of edge-on detectors.
FIG. 3 is a schematic diagram illustrating an example of a fan-beam CT system.
FIG. 4 is a schematic diagram illustrating the angular sampling geometry of one edge-on detector.
FIG. 5 is a schematic diagram illustrating two different time-sampling schemes for a detector with three depth segments, with and without an offset between the measurement periods.
FIG. 6 is a schematic diagram illustrating the position of the detector elements in a column of three depth segments at the start and end of a measurement period.
FIG. 7 is a schematic diagram illustrating the definitions of the sinogram coordinates.
FIG. 8 is a schematic diagram illustrating the sampling of the sinogram.
FIG. 9 is a schematic diagram illustrating an example of an edge-on detector with three depth segments, measurement circuit and a control system.
FIG. 10 is a schematic flow diagram illustrating a method for implementing a time offset measurement scheme in an edge-on detector system.
FIG. 11 is a schematic flow diagram illustrating a method for implementing pre-configured control settings in the measurement circuit of an edge-on detector.
DETAILED DESCRIPTION
Throughout the drawings, the same reference designations are used for similar or corresponding elements.
For a better understanding of the proposed technology, it may be useful to begin with a brief system overview and/or analysis of the technical problem.
According to the proposed technology, a temporal offset is introduced between the start measurement periods of the detector elements in the different depth segments on an edge-on detector. Since the gantry rotates continuously, an offset in time corresponds to an offset in the angular coordinate of the measurements from the different depth segments, thus producing a higher sampling frequency in the angular direction. The sampling frequency can in practice be increased by the number of depth segments on the detector. Also, if the gantry rotates in helical mode, i.e. a detector element performs the measurements on a spiral with respect to the imaged object, then this method also achieves higher sampling rate in the axial (z) direction.
Increasing the angular sampling frequency has the potential to reduce aliasing, improve the spatial resolution and suppress sampling artifacts. The method can also be used to allow faster image acquisition since the gantry can be rotated faster while maintaining the angular sampling rate.
The proposed technology aims to provide mechanisms whereby a higher angular sampling frequency can be obtained from measurements performed by a Computed Tomography system, CT system. To this end there is proposed a measurement method and corresponding devices aimed at providing such a mechanism.
The proposed technology therefore provides a measurement method performed by a Computed Tomography system, CT system 10. The CT system 10 comprising an x-ray source 60 and an x-ray detector array 50 of photon counting edge-on detectors 5, wherein each photon counting edge-on detector 5 has a number of depth-segments, also referred to as detector elements 15, arranged at different spatial locations in the direction of incoming x-rays 45. The method comprises to apply a time offset measurement scheme that provides a time offset between measurement periods for at least two different detector elements 15 located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
Put in slightly different words, there is provided a measurement method performed by a CT system 10. According to the method, a time offset measurement scheme is applied to enable a higher angular sampling frequency which in turn leads to a richer measurement output per measurement time. The applied time offset measurement scheme controls the time settings when measurements of various detector elements 15 in the X-ray detector array 50 of photon counting edge-on detectors 5 are initiated. According to the method, the measurements performed by at least two detector elements 15 located at different depths are initiated with a relative time delay. FIG. 5 provides an illustration of a simplified case of an X-ray detector array 50 of photon counting edge-on detectors 5 having three different detector elements located at different depths. According to an example embodiment of the proposed method the time offset scheme acts to initiate the first detector element to perform a measurement at t₀, the applied time offset scheme then initiates the second detector element to perform a measurement at the time t₀+T/3, where T denotes the sampling period or integration period for the detectors. In this illustrative example a third detector element located at different depth from both the first and second detector element is then initiated to perform a measurement at t₀+2T/3. As can be seen in FIG. 5, at least two of the measurement periods of the detector elements overlap during the measurement procedure. This ensures that a higher angular sampling frequency is obtained. The example provided by the embodiment of FIG. 5 is merely an illustrative example. Other measurement schemes are possible, it is, for example, possible to form groups of depth segments for which the same offset is applied. Within each group of depth segments, the detector elements initiate the measurements at the same time. With this offset scheme, the sampling frequency is lower than if we were to have a unique time offset for each depth segment, but there is still oversampling, if there are at least two different groups of depth segments with a time offset with respect to each other. However, for each projection line, more statistics is acquired if the depth segments are grouped, which can be beneficial when using some kinds of image reconstruction algorithms.
It should also be noted that the duration of the measurement periods for different detector elements located at different depths may vary. That is, a first detector element may be controlled to perform measurements during a specifically set measurement period that is different from the measurement period of a second detector element controlled to perform measurements during a measurement period time offset from the measurement period of the first detector. It is possible to accord different measurement periods for all, or a subset, of different detector elements within an edge-on detector.
According to a particular embodiment there is provided a method, wherein the at least two different detector elements comprises three or more different detector elements and wherein the time offset is chosen so that at least two measurement periods of the three or more different detector elements at least partially overlap in time.
According to another particular embodiment of the proposed technology there is provided a method, wherein the size of the time offset is further chosen to be a fraction of the time duration of at least one of the measurement period(s).
In other words the size or duration of the time offset between two measurement initiations are chosen so that a desirable angular sampling pattern is achieved. By selecting the offset to be a fraction of one of the measurement periods, one ensures that an increase in the angular sampling frequency is achieved.
A possible embodiment provides a method that comprises to apply a time offset measurement scheme that provides a time offset between the measurement periods for different detector elements of the same edge-on detector.
In other words, there is provided a measurement, or sampling, scheme for detector elements that are arranged on a single detector wafer, thus limited by the necessary relative geometry of the detector elements. By introducing a time offset between the measurement periods of the different detector elements, the sampling patterns can be distributed in the angular direction such that a higher sampling rate is achieved.
By way of example, the proposed technology provides an embodiment of a method, wherein the method comprises to apply the time offset to a number of adjacent edge-on detectors.
According to this embodiment a common time offset may be provided to a number of adjacent edge-on detectors. A common time offset scheme will simplify the data processing steps, since the same data processing steps can be applied to the data from each detector without extra consideration about unique sampling patterns. Also, a common time offset scheme ensures that the object is uniformly sampled in the angular direction, which may prevent non-uniformities in the image such as streaks.
An optional embodiment of the proposed method provides for a measurement method wherein the measurement periods for the at least two different detector elements located at different depths are different and wherein the time offset is a fraction of the time duration of the shorter measurement period.
According to a particular embodiment of the proposed method each of the measurement periods have the same time duration.
This particular embodiment ensures that the time offset between the initiations of the measurements in the different depth segments are conserved for all consecutive measurements.
Having described a measurement method that utilizes a time offset measurement scheme, in what follows there will be described various devices and systems that are configured to operate based on the time offset measurement scheme of the proposed technology. The advantages and explanations provided with reference to the proposed method are equally valid for the devices.
According to a particular embodiment of the proposed technology there is provided a Computed Tomography, CT, system 10 comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors 5, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements 15, arranged at different spatial locations in the direction of incoming x-rays 45. The CT system 10 is configured to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements 15 located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
The CT system 10 according to the proposed technology is in other words configured to operate based on a measurement scheme for providing at least partially overlapping measurement periods for at least two different detector elements 15 that are located at different depth.
FIG. 9 shows an example of a photon counting edge-on detector 5 that may be included in a CT system 10, comprising the detector wafer, individual connections from a measurement circuit 30 to each detector element, at least two depth segments 15 in the direction of the x-rays 45. The photon counting edge-on detector 5 also comprises a measurement circuit 30 and a unit for control and read-out 20. A time offset scheme between the measurement periods of the different depth segments can be applied such that an oversampling in the angular direction is achieved during continuous rotation of the CT gantry. FIG. 2 shows an example of how the individual photon counting edge-on detectors 5 can be arranged to form x-ray detector array 50 used in a CT system 10. FIG. 2 also shows the position of the x-ray source 60, the direction of the x-rays 45.
A particular embodiment of the proposed technology provides a Computed Tomography, CT, system 10 wherein the at least two different detector elements 15 comprises three or more different detector elements 15 and wherein the time offset is chosen so that at least two measurement periods of the three or more different detector elements 15 at least partially overlap in time.
Another possible embodiment provides a CT system 10, wherein the size of the time offset is further chosen to be a fraction of the time duration of at least one of the measurement period(s).
Yet another embodiment provides a CT system 10 wherein time offset measurement scheme provides a time offset between the measurement periods for different detector elements 15 located at different depths of the same edge-on detector.
A particular embodiment provides a CT system 10 wherein a plurality of adjacent edge-on detectors are provided with the same time offset between measurement periods.
According to an optional embodiment there is provided a CT system 10 wherein the measurement periods for the at least two different detector elements 15 located at different depths are different and wherein the time offset is a fraction of the time duration of the shorter measurement period.
According to an alternative embodiment there is provided a CT system 10 wherein each of the measurement periods have the same time duration.
Below follows a number of detailed embodiments of the proposed technology. These embodiments are merely exemplary and are intended to facilitate the understanding of the proposed technology. The provided embodiments should therefore not be construed as limiting.
In one example embodiment, the offset pattern is such that the start time of the measurement periods of the different depth segments are evenly distributed over the measurement period (T). For example, in the case of three depth segments, if the first segment starts a measurement period at t₀, then the second segment starts at t₀+T/3 and the third segment starts at t₀+2T/3. A new measurement for the first segment it then initiated at t₀+T and so on. FIG. 5 shows a schematic illustration of the applied time offset between the measurement periods. This offset pattern implies that the projection measurements from the different depth segments are evenly distributed in the angular direction. An illustration of the angular sampling due to this offset pattern is showed in FIG. 6, where A shows the position of the detector elements at the beginning and the end of the measurements periods which starts at t₀ and ends at t₀+T for all three depth segments. B shows the offset of the angular sampling due of an offset of the measurement periods of the different depth segments. FIG. 8 shows how the sampling scheme in the sinogram changes when applying the time offset between the measurement periods, where A shows the sampling scheme without offset and B shows the sampling scheme with offset.
If the measurement periods for the detector elements in all depth segments are synchronized in time, then all detector elements measure along the same projection line, i.e. same position (r,θ) in the sinogram.
It is common to align the detector elements on the edge-on detector with respect to the source such that a specific x-ray beam, i.e. projection line, illuminates one detector element of each depth segment. Let us call the detector elements that measure the same projection line a column. At each moment in time, all detector elements in the column measure the same projection line. The measurement period defines a time window during which the projection data is collected, and for continuous rotation, the angular coordinate of the measured projection line changes over time, which implies that a measurement consists of data from a set of projection lines with different angular coordinates defined by when the measurement periods is initiated and terminated. Now, if the measurement periods of all detector elements in a column are initiated and terminated simultaneously, i.e. the measurement periods are synchronized in time, the all the detector elements in the column collect data from the same set of projection lines for each measurement period. On the other hand, if the measurement periods are offset in time, i.e. there if a shift in time between the initiation and termination of the measurements carried out by the different detector elements in the column, then each detector element in the column collects data from a different set of projection lines.
In another example embodiment, groups of depth-segments are formed and a temporal offset between the measurement periods of the groups is applied. This can be beneficial if the number of counts in each projection measurement is very low, since low counts can lead to errors in the reconstruction algorithm and grouping the depth segments increases the statistics for each projection measurement.
In yet another example embodiment, an offset between the measurement periods of the depth-segments can be used to ensure that the measurements from depth segments of a misaligned detector are performed on the same projection line, given that the misalignment of the detector is in the direction of rotation of the gantry. In this case, the offset pattern is given by the degree of misalignment of the edge-on detectors with respect to the direction of the x-rays 45.
If the amount of data produced using this scheme is too large to be practical due to limited data read-out bandwidth, then a decimation of the data can be performed prior to the read-out. The benefit of reducing aliasing is still there since the highly sampled data can be low-pass filtered during the decimation, thus removing high frequency content, which could otherwise cause aliasing.
Having described a few illustrative examples of the proposed technology, below will follow a detailed description of a control unit for a Computed Tomography, CT, system, where the control unit is configured to control the CT system to operate based on the time offset measurement scheme of the proposed technology.
The proposed technology also provides a control unit 20 for a Computed Tomography, CT, system 10 that comprises an x-ray source and an x-ray detector array of photon counting edge-on detectors 5, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements 15, arranged at different spatial locations in the direction of incoming x-rays 45. The control unit 20 is configured to control the CT system 10 to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements 15 located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlaps in time.
FIG. 10 displays a schematic illustration of a control unit 20 configured to control a CT system 10 comprising edge-on detectors. The control unit 20 may comprise a memory 35 and one or several processors or processing circuit ries 27. Examples of processing circuitry includes, but is not limited to, one or more microprocessors, one or more Digital Signal Processors (DSPs), one or more Central Processing Units (CPUs), video acceleration hardware, and/or any suitable programmable logic circuitry such as one or more Field Programmable Gate Arrays (FPGAs), or one or more Programmable Logic Controllers (PLCs).
A computer program may in particular embodiments program the control unit 20 and/or the memory 25 that the control system may comprise. The CT system 10 may according to a particular embodiment be controlled by a computer program 225. The proposed technology therefore provides a computer program 225 comprising instructions, which when executed by at least one processor 27, cause the processor(s) to control a CT system 10 that comprises an x-ray source 60 and an x-ray detector array 50 of photon counting edge-on detectors 5, wherein each photon counting edge-on detector 5 has a number of depth-segments, also referred to as detector elements 15, arranged at different spatial locations in the direction of incoming x-rays 45, so that the CT system 10 operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements 15 located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlaps in time.
The computer program 225 may in certain embodiments also comprise instructions for changing the measurement scheme used by the CT system 10.
In FIG. 10, each detector element 15 on a photon-counting edge-on detector 5 is individually connected to a measurement circuit 30. The measurement circuit 30 processes the electric charge generated from each x-ray 45 and converts it to digital data, which can be used to obtain measurement data such as a photon count. The measurement circuit 30 may comprise one or many counters, which count the number of x-rays 45 detected by a detector element 15 within a measurement period. The measurement circuit 30 may in particular embodiments comprise a memory 35. The measurement circuit 30 in turn is controlled by and communicates with the control unit 20. The control unit 20 can read out data from the measurement circuit 30. The control unit 20 may in particular embodiments comprise a memory 25. The control unit 20 is configured to operate the measurement circuit 30 by sending commands. The commands may include: initiate measurement, terminate measurement, read counter and reset counter. The control unit 20 may therefore be configured to determine the measurement scheme, which may comprise the initiation time and duration of one or many measurement periods for each individual detector element, used by the detector elements 15 and also to instruct the measurement circuit 30 to perform measurements according to a particular measurement scheme. The control system can, in other words be configured to apply a time-offset scheme according to an example embodiment.
The measurement data output of the CT system 10 may be extracted from the control system.
A particular embodiment of the proposed technology provides a control unit 20 wherein the at least two different detector elements 15 comprises three or more different detector elements 15 and wherein the time offset is chosen so that at least two measurement periods of the three or more different detector elements 15 at least partially overlap in time.
A possible embodiment of the proposed technology provides a control unit 20 wherein the size of the time offset is further chosen to be a fraction of the time duration of at least one of the measurement period(s).
According to another embodiment of the proposed control unit 20 there is provided a control unit 20 wherein time offset measurement scheme provides a time offset between the measurement periods for different detector elements 15 located at different depths of the same edge-on detector.
Another possible embodiment provides a control unit 20 wherein a plurality of adjacent edge-on detectors are provided with the same time offset between measurement periods.
Still another embodiment provides a control unit 20 wherein the measurement periods for the at least two different detector elements 15 located at different depths are different and wherein the time offset is a fraction of the time duration of the shorter measurement period.
According to a particular embodiment there is provided a control unit 20 wherein each of the measurement periods have the same time duration.
The proposed technology also provides a measurement circuit 30 in a Computed Tomography, CT, system 10 comprising an x-ray source and an x-ray detector array of photon counting edge-on detectors 5, wherein each edge-on detector has a number of depth-segments, also referred to as detector elements 15, arranged at different spatial locations in the direction of incoming x-rays 45. The measurement circuit 30 is configured to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements 15 located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
FIG. 11 displays a schematic illustration of a measurement setup of an edge-on detector where the measurement circuit 30 comprises a set of pre-configured control settings. Each detector element on a photon-counting edge-on detector may be connected individually 37 to the measurement circuit 30. The measurement circuit 30 processes the electric charge generated from each x-ray and converts it to digital data, which can be used to obtain measurement data such as a photon count. The measurement circuit 30 may comprise one or many counters, which count the number of x-rays 45 detected by a detector element within a measurement period. The measurement circuit 30 may in particular embodiments comprise a memory 35. The measurement circuit 30 may comprise one or several processors or processing circuit ries. Examples of processing circuitry includes, but is not limited to, one or more microprocessors, one or more Digital Signal Processors (DSPs), one or more Central Processing Units (CPUs), video acceleration hardware, and/or any suitable programmable logic circuitry such as one or more Field Programmable Gate Arrays (FPGAs), or one or more Programmable Logic Controllers (PLCs). The measurement circuit 30 comprises, in the example embodiment illustrated in FIG. 11, at set of pre-configured control settings, which may comprise the initiation time and duration of one or many measurement periods for one or many individual detector elements 15. The pre-configured control settings may, in other words, comprise a time-offset scheme for the measurement periods of the detector elements 15.
A possible embodiment of the proposed technology provides a measurement circuit 30 where the at least two different detector elements 15 comprises three or more different detector elements 15 and wherein the time offset is chosen so that at least two measurement periods of the three or more different detector elements 15 at least partially overlap in time.
Another possible embodiment provides a measurement circuit 30 wherein the size of the time offset is further chosen to be a fraction of the time duration of at least one of the measurement period(s).
By way of example, the proposed technology provides a measurement circuit 30 wherein time offset measurement scheme provides a time offset between the measurement periods for different detector elements 15 located at different depths of the same edge-on detector.
Another possible embodiment provides a measurement circuit 30 wherein a plurality of adjacent edge-on detectors are provided with the same time offset between measurement periods.
An alternative embodiment of the proposed technology provides a measurement circuit 30 wherein the measurement periods for the at least two different detector elements 15 located at different depths are different and wherein the time offset is a fraction of the time duration of the shorter measurement period.
According to a particular embodiment of the proposed technology there is provided a measurement circuit 30 wherein each of said measurement periods have the same time duration.
It will be appreciated that the methods and arrangements described herein can be implemented, combined and re-arranged in a variety of ways.
For example, embodiments may be implemented in hardware, or in software for execution by suitable processing circuitry, or a combination thereof.
The steps, functions, procedures, and/or blocks described herein may be implemented in hardware using any conventional technology, such as discrete circuit or integrated circuit technology, including both general-purpose electronic circuitry and application-specific circuitry.
Alternatively, or as a complement, at least some of the steps, functions, procedures, and/or blocks described herein may be implemented in software such as a computer program for execution by suitable processing circuitry such as one or more processors or processing units.
It should also be understood that it may be possible to re-use the general processing capabilities of any conventional device or unit in which the proposed technology is implemented. It may also be possible to re-use existing software, e.g. by reprogramming of the existing software or by adding new software components.
The invention claimed is:
1. A computed tomography (CT) system comprising: an x-ray source; and an x-ray detector array of photon-counting edge-on detectors, wherein each photon-counting edge-on detector has a number of detector elements, arranged at different spatial locations in the direction of incoming x-rays, a circuit configured to operate the computed tomography (CT) system based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
2. The computed tomography (CT) system according to claim 1, wherein said at least two different detector elements comprise three or more different detector elements and wherein the time offset is chosen so that at least two measurement periods of said three or more different detector elements at least partially overlap in time.
3. The computed tomography (CT) system according to claim 1, wherein the size of said time offset is further chosen to be a fraction of the time duration of at least one of the measurement periods.
4. The computed tomography (CT) system according to claim 1, wherein time offset measurement scheme provides a time offset between the measurement periods for different detector elements located at different depths of each photon counting edge-on detector.
5. The computed tomography (CT) system according to claim 4, wherein a plurality of adjacent photon counting edge-on detectors are provided with a time offset between measurement periods.
6. The computed tomography (CT) system according to claim 1, wherein the measurement periods for said at least two different detector elements located at different depths are different and wherein the time offset is a fraction of a time duration of a shorter measurement period.
7. The computed tomography (CT) system according to claim 1, wherein each of said measurement periods has the same time duration.
8. A computed tomography (CT) system comprising: a circuit configured to control an x-ray detector array of photon-counting edge-on detectors, each detector having a number of elements, to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
9. The computed tomography (CT) system according to claim 8, wherein said at least two different detector elements comprise three or more different detector elements, and wherein the time offset is chosen so that at least two measurement periods of said three or more different detector elements at least partially overlap in time.
10. The computed tomography (CT) system according to claim 8, wherein the size of said time offset is further chosen to be a fraction of a time duration of at least one of the measurement periods.
11. The computed tomography (CT) system according to claim 8, wherein time offset measurement scheme provides a time offset between the measurement periods for different detector elements located at different depths of each photon counting edge-on detector.
12. The computed tomography (CT) system according to claim 11, wherein a plurality of adjacent photon counting edge-on detectors are provided with the same time offset between measurement periods.
13. The computed tomography (CT) system according to claim 8, wherein the measurement periods for said at least two different detector elements located at different depths are different, and wherein the time offset is a fraction of a the time duration of a shorter measurement period.
14. The computed tomography (CT) system according to claim 8, wherein each of said measurement periods has the same time duration.
15. A computed tomography (CT) system comprising: a circuit configured to operate based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements of a photon-counting edge-on detector, the detector elements located at different depths, and wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
16. The computed tomography (CT) system according to claim 15, wherein said at least two different detector elements comprise three or more different detector elements, and wherein the time offset is chosen so that at least two measurement periods of said three or more different detector elements at least partially overlap in time.
17. The computed tomography (CT) system according to claim 15, wherein the size of said time offset is further chosen to be a fraction of a time duration of at least one of the measurement periods.
18. The computed tomography (CT) system according to claim 15, wherein the time offset measurement scheme provides a time offset between the measurement periods for different detector elements located at different depths of each photon counting edge-on detector.
19. The computed tomography (CT) system according to claim 18, wherein a plurality of adjacent photon counting edge-on detectors are provided with the same time offset between measurement periods.
20. The computed tomography (CT) system according to claim 15, wherein the measurement periods for said at least two different detector elements located at different depths are different, and wherein the time offset is a fraction of a time duration of a shorter measurement period.
21. The computed tomography (CT) system according to claim 15, wherein each of said measurement periods has the same time duration.
22. A measurement method performed by a computed tomography (CT) system, said computed tomography (CT) system comprising an x-ray source, a circuit configured for executing the measurement method, and an x-ray detector array of photon-counting edge-on detectors, wherein each photon-counting edge-on detector has a number of detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the measurement method comprises: applying a time offset measurement scheme that provides a time offset between measurement periods for at least two different detector elements located at different depths, wherein the time offset is chosen so that at least two measurement periods at least partially overlap in time.
23. The measurement method according to claim 22, wherein said at least two different detector elements comprise three or more different detector elements, and wherein the measurement method further comprises choosing the time offset so that at least two measurement periods of said three or more different detector elements at least partially overlap in time.
24. The measurement method according to claim 23, wherein choosing the time offset comprises choosing the size of said time offset to be a fraction of a time duration of at least one of the measurement periods.
25. The measurement method according to claim 22, further comprising applying a time offset measurement scheme that provides a time offset between the measurement periods for different detector elements of each photon counting edge-on detector.
26. The measurement method according to claim 25, wherein applying the time offset measurement scheme comprises applying said time offset to a number of adjacent photon counting edge-on detectors.
27. The measurement method according to claim 22, wherein the measurement periods for said at least two different detector elements located at different depths are different, and wherein the time offset is a fraction of a time duration of a shorter measurement period.
28. The measurement method according to claim 22, wherein each of said measurement periods has the same time duration.
29. A non-transitory computer-readable medium having stored thereon a computer program comprising instructions, which when executed by at least one processor, cause the at least one processor to control a computed tomography (CT) system comprising an x-ray source and an x-ray detector array of photon-counting edge-on detectors, wherein each photon-counting edge-on detector has a number of detector elements, arranged at different spatial locations in the direction of incoming x-rays, wherein the at least one processor controls the computed tomography (CT) system so that the computed tomography (CT) system operates based on a time offset measurement scheme for providing a time offset between measurement periods for at least two different detector elements located at different depths, wherein the instructions comprise a requirement that the time offset is chosen so that at least two measurement periods at least partially overlaps in time.
|
178 LAND- AND FRESHWATER MOLLUSCS 48 species from that small island. There is no reason to expect that the other islands, some of them moreover being larger than St. Martin, should be in a much worse condition for molluscan life than this island. It is only the want of carefully exploring the islands, that accounts for the small number of molluses recorded Bon them. It is much to be regretted that the Leyden Museum possesses only 9 species of the land- and freshwater-molluses of the Dutch West-Indian islands, 7 of them from Curacao (4 however new for that island, so far as I know), 1 from Bonaire, and 1 from St. Martin. In order to get a survey of the terrestrial and fluviatile molluses, at present recorded from the Dutch islands in West-India, I have compiled the following lists, giving for each island the molluscs hitherto known. Well stated localities on the islands, at which the various species have been collected, are wanting in most cases. St. Martin only is making a favorable exception in this respect. I have added a list of the papers, in which I found mentioned the molluses, as inhabiting the different islands. Many references in this Bibliography are second-hand, owing to the fact that the library of the Leyden Museum possesses only very few periodicals and papers dealing with Malacology ; moreover many important malacological periodicals are not present in any public library in the Netherlands. This fact will account for its probable incompleteness, and will also, I hope, excuse the errors it might contain. Lately Mr. M. M. Schepman wrote to me, that he had composed, already in the year 1911, a list of the Mollusca of the Dutch West-Indies for an Encyclopedia of Dutch West-India, but that, owing to some contributors not sending in their manuscripts, his paper has not yet been published at this moment. I am sorry not to have been able to consult this paper, that, no doubt, will contain valuable matter as regards our knowledge of this subject. In the following lists, the species represented in the Leyden Museum by specimens from the Dutch West-Indian islands, are marked with an asterisk. Notes from the Leyden Museum, Vol. XXXVI.
|
<?php
/**
* HttpMethod
*
* PHP version 5
*
* @category Class
* @package Glue\SpApi\OpenAPI\Clients\ProductPricingV0
* @author OpenAPI Generator team
* @link https://openapi-generator.tech
*/
/**
* Selling Partner API for Pricing
*
* The Selling Partner API for Pricing helps you programmatically retrieve product pricing and offer information for Amazon Marketplace products.
*
* OpenAPI spec version: v0
*
* Generated by: https://openapi-generator.tech
* OpenAPI Generator version: 3.3.4
*/
/**
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
namespace Glue\SpApi\OpenAPI\Clients\ProductPricingV0\Model;
use \Glue\SpApi\OpenAPI\Clients\ProductPricingV0\ObjectSerializer;
/**
* HttpMethod Class Doc Comment
*
* @category Class
* @description The HTTP method associated with the individual APIs being called as part of the batch request.
* @package Glue\SpApi\OpenAPI\Clients\ProductPricingV0
* @author OpenAPI Generator team
* @link https://openapi-generator.tech
*/
class HttpMethod
{
/**
* Possible values of this enum
*/
const GET = 'GET';
const PUT = 'PUT';
const PATCH = 'PATCH';
const DELETE = 'DELETE';
const POST = 'POST';
/**
* Gets allowable values of the enum
* @return string[]
*/
public static function getAllowableEnumValues()
{
return [
self::GET,
self::PUT,
self::PATCH,
self::DELETE,
self::POST,
];
}
}
|
Register the URI of an EPackage to the EMF Runtime in the Eclipse IDE
I have created an ecore metamodel and I want to register it with the EMF Runtime. As I'm working with the model within the IDE, I want to do this in my currently running Eclipse instance, i.e. not programmatically.
Put differently, I want to be able to find the metamodel's URI at Navigate->Open EPackage.
I'm using Eclipse Indigo, Modeling edition.
EMF works with elements deployed in plugins, if you want to work with a metamodel in your workspace, you can:
create a dynamic instance instead of registering the metamodel (and creating a "real" instance).
create a small Eclipse plugin that would create this action to register your metamodel.
Otherwise, you will have to launch a new Eclipse instance. Since EMF works with metamodels deployed in plugins, most of the EMF based tools assume that the metamodel is in a plugin and working with a metamodel from the workspace is always more tricky than working with the same metamodel from a plugin.
Stephane Begaudeau
Disclaimer: I am the leader of an EMF based project of the Eclipse Foundation.
|
#Construa um algoritmo para calcular a média de um conjunto de valores inteiros e positivos, fornecidos pelo usuário através do teclado. O dado que finaliza é o número –1, e este não deve ser considerado.
soma = 0
cont = 0
n = 0
while n != -1:
n = int(input("Digite um número: "))
if n != -1:
soma += n
cont += 1
media = soma / cont
print(f"Média = {media}")
|
Bug: Failed import on sphinx 7.3
Sphinx 7.3 stopped exporting py_sig_re
Extension error:
Could not import extension sphinxcontrib.autodoc_pydantic (exception: cannot import name 'py_sig_re' from 'sphinx.domains.python'
Addressed in https://github.com/sphinx-doc/sphinx/issues/12303.
Commit where it disappeared https://github.com/sphinx-doc/sphinx/pull/12297/files
hi! we have the same issue / same error here . our build runs fine on sphinx 7.3.5 so I believe the 7.3.6 released triggered this issue the autodoc_pydantic! Many thanks for all of the work you all are doing on this extension 🙌
@all-contributors please add @lwasser for issue
@all-contributors please add @lwasser for bug
Closed by @AA-Turner via https://github.com/sphinx-doc/sphinx/issues/12303
Thank you @mansenfranzen !
|
Without loot boxes, what does the future of mobile gaming look like?
Did you know EA’s Star Wars mobile gacha game went untouched during the Battlefront II controversy? Did you know EA has a Star Wars mobile gacha game?
Despite my rant several months ago about my dislike Dragalia Lost’s overly grindy events, I recently hooked myself on it again. Even in that piece, I admit there’s a lot I like about Dragalia in particular, and I still maintain there are many things I enjoy that are innate to typical gacha game design. Gacha games have an extremely low barrier to entry, I’m excited by multiplayer games where my friends and I are dealt different tools to play with, and I’ve always been a sucker for a perpetually growing grind. I’m not ashamed to admit I enjoy gacha games as they are.
But Dragalia’s future doesn’t look so bright, nor does that of the entire gacha “genre.” Fire Emblem Heroes and Animal Crossing Pocket Camp have just been shut down in Belgium, much like a plethora of prolific gacha games before it such as Final Fantasy Brave Exvius. United States senator Josh Hawley recently proposed “The Protecting Children from Abusive Games Act” to regulate games that practice this sort of model. I’m unsure whether this legislation will actually pass, but even if it doesn’t, it’s amplifying the outcry against loot boxes so much that it looks like a matter of time until something like it passes.
My enjoyment of Dragalia Lost is threatened by this push for regulating loot boxes. I could potentially lose hours of progress and so much time I spent on what I consider to be a pretty good game if this bill advances far enough to ban loot boxes entirely. And even more strongly than I wrote two years ago, I believe this is legislation is exactly what the mobile market needs.
Before we dive too deep into how it would affect the mobile market, let’s recap what The Protecting Children from Abusive Games Act actually is, assuming it isn’t watered down in the legislative process. As its name suggests, it aims to block games played by minors from selling addictive microtransactions. Hawley clarified in an interview with Kotaku that this bill is targeting “both loot boxes and pay-to-win,” which would also regulate microtransactions like buying extra turns in Candy Crush and so on. Technically this spares adult-only games, but I’m pretty sure minors can legally play every game published by EA, Nintendo, or any other company a typical mobile player might recognize. The only exceptions the bill outlines are cosmetic items, difficulty levels, and single-purchase add-ons.
Should this bill pass, the gacha game market probably won’t go extinct. The power of waifu-starved adults will make sure it won’t. But it will undergo an exodus leaving a Grand Canyon-sized hole in the mobile market. If this bill is successfully enforced, that leaves three foreseeable options for… almost every mainstream mobile game, really. Is Super Mario Run still mainstream?
Option one to simply shut down, at least in North America. Most obscure, low-performing gachas in the endless sea of mobile shovelware would probably have no other choice. Massive companies like DeNA and GungHo, on the other hand, have too much of a stake in the mobile market to let that happen so easily. Belgium is a small player in the mobile market, but the US is one of that market’s biggest fishes. In the event these companies fail to successfully lobby against the bill, their biggest games will probably fall back on the other options.
The second option is to lock these games behind an Adults Only barrier or something equivalent. Yeahhh… no.
That leaves the most complicated option, which I’m hoping for most — to redesign these games with some other form of monetization in lieu of gacha pulls. Remember, this law covers more than just loot boxes. Given that its goal is to prevent monetization that promotes addiction, we can assume that any repeatable purchase with gameplay repercussions would fall under “abusive.” That means no time skippers, no grind boosters, no last-chance revives, or anything else loathed in pay-to-win games.
Even patching out gacha payments could open a big can of worms. When loot boxes were removed from Belgium’s version of Overwatch without any other concessions, international players half-jokingly demanded the same treatment, joking only in the sense that they knew it wouldn’t happen. A free-to-play game would not make that concession without adding a new monetization method in its place.
Any backlash to those additions would be much more serious unless these additions were simultaneously added to non-US versions. We probably wouldn’t see any reworks comparable to Final Fantasy XIV: A Realm Reborn. Piling up more monetization schemes on top of gacha pulls is still problematic for those other countries, but I expect that to be the most realistic strategy, especially since Japan’s hyper-popular Granblue Fantasy already does that by selling character skins.
These limitations herd games taking this option towards using only cosmetic or single-time convenience microtransactions. A select few of big shakers in the F2P market like Path of Exile demonstrate they can find continual success by sticking with that kind of business model. Unfortunately, no such equivalents are popular on mobile, so it may be much more challenging for mobile games to convert to a safer model. But to be blunt, the only games that have any reason to take this option are games that mean something more to their creators than only revenue.
That still leaves a metaphorical Grand Canyon-sized hole that future games will try to fill. Given the mobile market’s track record for lazily copying trends, we’d see most of those games fall back towards examples that previous mobile games attempted to set. Since this bill is cracking down on so many tools that “games as a service” abuse, I expect many more one-time payment games like Super Mario Run.
Actually, Super Mario Run is technically an example of how expansion packs are exempt from the bill since you can play the first world for free and pay to access everything else. Mobile games have almost always had a massive stigma, and even after introducing this bill, many consumers would be hesitant to drop $10 on a game they expect to be shallow and cheaply made. That’s even a part of why Super Mario Run had a (relatively) rocky reception. Mobile players expect they never have to commit their cash to get into a game’s meat and potatoes. But should the US become a country where most freemium “services” are not viable products and most competitors charge up front, this free-to-start model would suddenly be a tool for dealing with that stigma. Larger and more ambitious mobile games will likely take a similar approach, but with episodic releases like the decently received Final Fantasy Dimensions.
Aside from following whatever current mobile leaders survive the transition to fair monetization, I also expect service-styled games to try one other idea. Since most successful mobile RPGs add content in bulk updates, we might see new ones start charging for major content updates as if they’re micro-MMO expansions. It’s obviously a long shot to start charging money for things that used to be taken for granted as free, but that’s kinda why gacha games are so reviled in the first place. If the premium MMO market is any indication, only a select few mobile games could thrive with this model (if any), but they would become very notable contenders on that fact alone much like how very few MMOs survive on subscription models.
We’ll also probably see even more idle clickers stuffed with ads than we currently do and we will hate them. Actually, we might see a lot more games use ads in the place of current payment systems. Many, many more ads. That’s…quite scary actually, but it’s easier to multitask while ignoring ads than open loot boxes with one hand while using the other to work a part-time job, so… it’s the lesser of two evils? I hope?
You’ll have to forgive me for using so many maybes. We’ve never seen an entire “genre” of games supported by the industry’s biggest publishers get outlawed. We can only estimate what the most successful mobile games in a post-pay-to-win world would be because so many games that would have been that successful were starved out by the games this law would ban. It doesn’t help that this law remains somewhat vague, so people are worried about collateral damage towards games that are actually worth supporting. Heck, I don’t want so many F2P games to be banned. F2P and mobile markets are extremely important to people who lack the time or money to play other games.
But by the same token, I would argue people who can’t afford to play premium games are those who have the most to lose should they fall prey to the loot box’s indiscriminate temptations. Many fewer F2P games will thrive if this bill passes, but those that do will have to do so by leaning more on good game design. We’ll still see bad eggs invoke dubious business practices (methinks the exemption for buying “difficulty levels” is prone to abuse), but with a law that defines abusive microtransactions so broadly, that would be a lot more difficult than it currently is.
Watching the mobile market devolve into further and further complacency with gambling-based monetization has made me a lot more bitter now than when I wrote that blog proclaiming my favor of gacha games two years ago. Right now, this bill tastes like three scoops of cotton candy ice cream. And it comes in a waffle cone. We should always be careful of our calories, but right now I just want my ice cream.
About The Author
Christopher Hovermale
I'm a former Contributor who goes by the screen name Cedi or CediFonei on most corners of the internet! Not quite obligatory disclosure; I backed Chris Niosi's TOME RPG on Kickstarter. I really wish that wasn't the first Kickstarter game I ever backed...
More Stories by Christopher Hovermale
|
//REV: header file for searcher, which provides various search functions
//User will call one of these, passing the appropriate inputs,
//and it will automatically construct everything and run the sweep I guess....
//REV: TOdo, need to make it so that RESTART will automatically find/choose correct algo. In other words, dont need to tell it type for it to load...
//So, we need to store the algo in the state file (duh..).
//takes user options and runs corresponding search funct?
//"Format" is required...for each different search, what kind of info it needs to parameterize it. I guess just parameterize it with a global varlist.
//REV: OK how to parameterize it with some global varlist?
//Do like PARAMNAME, PARAMMIN, PARAMMAX? How to name it? In individual varlists?
//Use varlists NAMES to find it, inside what? A hierarchical? No...just use a varlist structure... I.e. named list of varlists...Ugh.
//This seems "best", but then user needs to be careful to construct his scripts/etc. as max/min/etc. Or give a better way to read varlists,
//like VAL MIN MAX STEP etc.? So, basically ways of taking 2-d values? Need to take "column" headers, etc. Need it for simulation anyway (read mii-sans
//data?). OK, I guess I can do this. So, now we have some kind of 2d var list structure, where we tell it the variable (row#?) and the column name
// (e.g. var name vs val vs etc.?)
//Effectively we have a 2-column thing now. Make it even more general? We want to eventually have more "abstract" i.e. jagged ones, as we talked about
//before...i.e. nested information.
//I.e. varlists are arbitrary? but then how do we hold arrays? In varlist, it's just an arbitrary list until some endthing? How do I know where it
//ends in read-in? That is the problem?
//Need a way to have arbitrary length array stored somewhere? Only up to 3D array? Either 1D of array type (i.e. all guys are arrays), or 2D of single
//value types...? Need a way to arbitrarily "flatten" matrices of arbitrary length. Ways of storing in memory. Of course, we can but reading/writing
//will not be compressed/efficient. If it includes strings, that will make it only more complex.
//But in the eventuality that I want to run experiments/store it on NSIM side, need a way to do it, right? I don't want to have to repeat every single
//variable every single time. E.g., better to have a way to compress it uniquely. But representation will change if I change number of guys.
//E.g.
//We can rearrange this in some way to make it "hierarchical" based on most efficient graph/tree search... i.e. might be more efficient to "list" it
//by timepoints (var4) if there are more (fewer?) of those. But then how to arbitrarily do it without actually listing out every single one, e.g.
//tell it what each "location" is and then store it that way. I.e. don't write "neuron#" or "time point" each time., just do it? HDF5 might be the
//most elegant way of doing this? But how do I go through and analyse that? E.g. electrode # etc. Doing e.g. EL1 and EL2, or some arbitrary number of
//electrodes (and their positions). How to list those out then? Like, how to specify "experiment" to do most efficiently?
//I.e. TIMING, LOCATION, STRENGTH of each stimulation or something like that. Need a way to parameterize that in an N dimensional space. Timing of
//each is a dimension? Similar to eye movement/visual stimulation analysis. Too many dimensions, and not clear how to orient them to make it most
//informative. Like, I want to basically "list" parameters in some hierarhical way, and finally have e.g. TIMEPOINT and NEURONNUMBER known without
//listing them. Need a "organization" file (how to interpret data file), and then "data file" itself. Same file is obviously best. HDF5?
//Anyway, in this case, do named varlists obviously, easiest. Wasteful. Want to have 2d table things to read too though. Use HDF5...exists, and
//efficient. PRoblem is that stored information might have arbitrary lengths (i.e. strings), or might be doubles or whatever. But to process what
//user is doing, he will make it via some CMD program? Some GUI?
// VAR1 VAR2 VAR3 VAR4 THING
// 1 1 1 1 2.5
// 1 1 1 2 3.5
// 1 1 1 3 95.9
// 1 1 2 1 2.5
// 1 1 2 2 3.5
//For now just pass MIN MAX etc.. Yea, way to handle 2d after all. It contains..? How about going from HDF5 to?
//Hm, this is kind of nasty...because when I "extracted" each column, I would need to appropriately have the return function return
//the kind, or cast it manually each time -_-
#include <searcher.h>
//REV: Where to set parameters?
//We can do it "above" to handle options, but that makes it a pain. Assume all variables are already set and I just "check" for them?
//Inside the algorithm itself?
//Another option is to have a set "config" file for running the sweep. For example "load" etc.? I don't want to specify (on command line?)
//The arguments each time I start a run? Or do I? I want it to be "stored". It is stored, in the file itself of course.
//Make an error if user "doubly defines" things. E.g. allow him to set a "config" file for the search algorithm, or a "default", but then also add
//cmd line arguments. Give a warning (error/exit?) if cmnd line overrides confnig file, or config overrides config,e tc.
//REV: This is the thing that is compiled into the library! I.e. globally accessible is this!
//I need to add everything to CPP files so that I can separately compile them...and do it faster...
//Let's do it now I guess? They are all including each other though, which causes some problems.
searcher::searcher( )
{
//REV: Nothing to do
}
void searcher::register_funct( const std::string& name, const fake_system_funct_t& funct)
{
fakesys.register_funct( name, funct );
}
void searcher::run_search( optlist& opts )
{
//parse to required guys that I want... ONLY ROOT RANK SHOULD EXECUTE THIS, CRAP.
preparseopts( opts );
filesender* fs = filesender::Create( _runtag , fakesys, _wrkperrank, _writefiles );
opts.enumerateparsed();
opts.enumerateextras();
parseopts( opts ); //Could just get individual things like GETSEARCHTYPE, etc. To reduce "fake" internal members we don't need...
//REV: Moved this up here to save user from doing it...
//But this way at least we are kind of "explicit" about what we consume..?
run_search( _searchtype, _scriptfname, _mydir, opts, *fs, _writefiles );
}
//REV: Faster to specify some struct to handle all options, this way it can easily know how many args it wants, and what are usage things so that
//they can be printed...
void searcher::preparseopts( optlist& opts )
{
//set internal variables with parseopts
auto a = opts.get_opt_args( "WRITEFILES" );
if( a.size() == 0 )
{
//fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES **NOT** defined. Will *not* write files to filesystem (I.e. will use memfsys)\n");
_writefiles = false;
}
else
{
//fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES defined, *will* write files to filesystem (I.e. will not use memfsys)\n");
_writefiles = true;
}
//set internal variables with parseopts
a = opts.get_opt_args( "WORKERSPERRANK" );
if( a.size() == 0 )
{
//defaulting to 1.
_wrkperrank=1;
fprintf(stdout, "REV: found no cmd line arg, so SETTING WORKERS PER RANK TO [%ld]\n", _wrkperrank);
}
else
{
if( a[0].size() > 0 )
{
_wrkperrank = std::stol(a[0][0]);
fprintf(stdout, "REV: SETTING WORKERS PER RANK TO [%ld]\n", _wrkperrank);
}
else
{
fprintf(stderr, "ERROR. SEARCHER in option WORKERSPERRANK: Requires at least 1 argument (*#WORKERS PER RANK*)\n");
exit(1);
}
}
//set internal variables with parseopts
a = opts.get_opt_args( "TAG" );
if( a.size() == 0 )
{
//We have no tag. We use the default.
//fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES **NOT** defined. Will *not* write files to filesystem (I.e. will use memfsys)\n");
//_writefiles = false;
}
else
{
if( a[0].size() > 0 )
{
_runtag = a[0][0];
}
else
{
fprintf(stderr, "ERROR. SEARCHER in option TAG: Requires at least 1 argument (*TAG* of this run, for scratch naming purposes)\n");
exit(1);
}
//fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES defined, *will* write files to filesystem (I.e. will not use memfsys)\n");
//_writefiles = true;
}
}
void searcher::parseopts( optlist& opts )
{
//set internal variables with parseopts
auto a = opts.get_opt_args( "WRITEFILES" );
if( a.size() == 0 )
{
fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES **NOT** defined. Will *not* write files to filesystem (I.e. will use memfsys)\n");
_writefiles = false;
}
else
{
fprintf(stdout, "SEARHER: Parseopts, -WRITEFILES defined, *will* write files to filesystem (I.e. will not use memfsys)\n");
_writefiles = true;
}
auto b = opts.get_opt_args( "DIR" );
if( b.size() == 0)
{
fprintf(stderr, "ERROR SEARHER: Parseopts, did *not* specify a -DIR for running, please specify and run again\n");
exit(1);
}
else
{
if(b[0].size() == 0 || b[0].size() > 1)
{
fprintf(stderr, "ERROR SEARCHER: Parseopts, -DIR option had [%ld] arguments, expects only 1 (name of dir)\n", b[0].size() );
exit(1);
}
else
{
_mydir = b[0][0];
}
fprintf(stdout, "SEARCHER: Using specified DIR [%s] as DIR for running search\n", _mydir.c_str());
}
auto c = opts.get_opt_args( "SEARCHTYPE" );
if( c.size() == 0 )
{
fprintf(stderr, "ERROR SEARHER: Parseopts, did *not* specify a -SEARCHTYPE for running, please specify and run again\n");
exit(1);
}
else
{
if(c[0].size() == 0 || c[0].size() > 1)
{
fprintf(stderr, "ERROR SEARCHER: Parseopts, -SEARCHTYPE option had [%ld] arguments, expects only 1 (searchtype)\n", c[0].size() );
exit(1);
}
else
{
_searchtype = c[0][0];
}
fprintf(stdout, "SEARCHER: Using specified SEARCHTYPE [%s] for running search\n", _searchtype.c_str() );
}
auto d = opts.get_opt_args( "WORKSCRIPT" );
if( d.size() == 0 )
{
fprintf(stderr, "ERROR SEARHER: Parseopts, did *not* specify a -WORKSCRIPT for running, please specify and run again\n");
exit(1);
}
else
{
if(d[0].size() == 0 || d[0].size() > 1)
{
fprintf(stderr, "ERROR SEARCHER: Parseopts, -WORKSCRIPT option had [%ld] arguments, expects only 1 (searchtype)\n", d[0].size() );
exit(1);
}
else
{
_scriptfname = d[0][0];
}
fprintf(stdout, "SEARCHER: Using specified WORKSCRIPT [%s] for running search\n", _scriptfname.c_str() );
}
}
//varlist will contain required um, data files I guess?
void searcher::run_search( const std::string& searchtype, const std::string& scriptfname,
const std::string& mydir, optlist& opts,
filesender& fs,
const bool& writefiles )
{
fprintf( stdout, "RUNNING SEARCH WITH: searcytype [%s], scriptfname [%s], mydir [%s]\n", searchtype.c_str(), scriptfname.c_str(), mydir.c_str() );
std::vector<std::string> registeredstypes = { "GRID",
"DREAM-ABC",
"DREAM-ABCz" };
//"MT-DREAMz" };
auto locs = find_string_in_vect( searchtype, registeredstypes );
if(locs.size() != 1)
{
fprintf(stderr, "ERROR, requested search type [%s] is not implemented/not available. Valid types:\n", searchtype.c_str());
//print1d_str_vec_row( registeredstypes );
for(size_t x=0; x<registeredstypes.size(); ++x)
{
fprintf(stderr, "[%s] ", registeredstypes[x].c_str());
}
fprintf(stderr, "\n");
exit(1);
}
pg = parampoint_generator(scriptfname, mydir);
//fprintf(stdout, "REV: Finished making parampoint generator, now will create FILESENDER (this will cause MPI ranks to initialize!!!!)\n");
if( searchtype.compare( "GRID" ) == 0 )
{
//pass as options...
search_grid( opts, pg, fs );
}
else if( searchtype.compare( "DREAM-ABC") == 0 )
{
//pass as options...
search_dream_abc( opts, pg, fs );
}
else if( searchtype.compare( "DREAM-ABCz") == 0 )
{
search_dream_abc_z( opts, pg, fs );
}
else
{
fprintf(stderr, "ERROR: searcher, not recognized search type [%s] (Or I made a misstype in the if/else! Sorry!)\n", searchtype.c_str());
doexit( &fs );
exit(1);
}
doexit( &fs );
}
void searcher::doexit( filesender* myfs )
{
fprintf(stderr, "ROOT FINISHED! Broadcasting EXIT (in SEARCHER.CPP)\n");
//std::string contents="EXIT";
//boost::mpi::broadcast(myfs->world, contents, 0);
//This won't work with worker threads. Need to iter through WORKERS.
myfs->signal_exit_to_workers();
delete(myfs);
}
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////// OLD RUN SEARCH
//varlist will contain required um, data files I guess?
void searcher::run_search( const std::string& searchtype, const std::string& scriptfname, const std::string& mydir, /*const*/ varlist<std::string>& params, const bool& writefiles )
{
pg = parampoint_generator(scriptfname, mydir);
fprintf(stdout, "REV: Finished making parampoint generator, now will create FILESENDER\n");
//REV: PG contains the "results" of each... parampoint_results, of type parampoint_result.
//That is: list of pset results, each of which has list of pitem results (specifically, varlist).
//OK, I can access those however I wish, e.g. I know last is the only one I care about etc.
//REV: User must have created his FAKESYSTEM calls before this point. In other words, in user program, he makes his main, he has his funct,
//he registers his funct, then when he calls this, he calls it with his list of his FAKE_SYSTEM stuff. OK.
//Can a static funct take an argument...? I guess so.
filesender* fs = filesender::Create( _runtag, fakesys, writefiles, 1 );
//REV: Oh crap, on this side, it might need to read them in the first place...hm.
//If we want master but not slaves to be different than writefiles, do it here...
//Calls to e.g. SEARCH_GRID etc. will call it. All calls to FS that master will make, I need to make sure to handle them properly...
fprintf(stdout, "RUNNING SEARCH ALGO: [%s]\n", searchtype.c_str() );
if( searchtype.compare( "grid" ) == 0 )
{
std::string varname = "GRID_MIN_MAX_STEP_FILE";
std::string minmaxfname = params.getTvar( varname );
bool hascolnames = true;
data_table dtable( minmaxfname, hascolnames );
fprintf(stdout, "Trying to get VARNAMEs\n");
std::vector<std::string> varnames = dtable.get_col( "NAME" );
fprintf(stdout, "Got varnames\n");
std::vector<double> mins = data_table::to_float64( dtable.get_col( "MIN" ) );
fprintf(stdout, "Got mins\n");
std::vector<double> maxes = data_table::to_float64( dtable.get_col( "MAX" ) );
std::vector<double> steps = data_table::to_float64( dtable.get_col( "STEP" ) );
fprintf(stdout, "Got STEP\n");
//Construct required stuff from PARAMS. I.e. min and max of each param? Need N varlists? Have them named? Specific name? Have array type of
//name PARAMS, etc.? Probably got from a file at beginning... Some special way of reading that...it should know varnames?
grid_state gs;
gs.search_grid( varnames, mins, maxes, steps, pg, *fs);
}
else if( searchtype.compare( "DREAM-ABC" ) == 0 )
{
std::string varname = "ABC_TEST_MIN_MAX_FILE";
std::string minmaxfname = params.getTvar( varname );
std::string obsdatafname = "ABC_TEST_OBSERV_DATA_FILE";
std::string observfname = params.getTvar( obsdatafname );
bool hascolnames = true;
data_table dtable( minmaxfname, hascolnames );
data_table obsvdtable( observfname, hascolnames );
fprintf(stdout, "Trying to get VARNAMEs\n");
std::vector<std::string> varnames = dtable.get_col( "NAME" );
fprintf(stdout, "Got varnames\n");
std::vector<double> mins = data_table::to_float64( dtable.get_col( "MIN" ) );
fprintf(stdout, "Got mins\n");
std::vector<double> maxes = data_table::to_float64( dtable.get_col( "MAX" ) );
fprintf(stdout, "Got maxes\n");
std::string statefname = "dreamsearch_state.state";
//Make a random "problem"
size_t ndims = varnames.size();
fprintf(stdout, "Getting observ data from [%s]\n", observfname.c_str() );
std::vector<std::string> obsv_varnames = obsvdtable.get_col( "NAME" );
std::vector<double> obsv_vals = data_table::to_float64( obsvdtable.get_col( "VAL" ) ); //REV: this will just be ERROR and 0 for me... heh.
//Load or run? Do it here or elsewhere? Do it in ABC?
search_dream_abc( statefname,
varnames,
mins,
maxes,
obsv_varnames,
obsv_vals,
pg,
*fs
);
}
else if( searchtype.compare( "DREAM-ABCz" ) == 0 )
{
//DREAM-ABCz expects:
//1) ABC TEST MIN/MAX file which has var namesand values
// e.g. list of #d: name min max values.
// Has headers: NAME MIN MAX
//2) ABC TEST OBSERV DATA FILE, which is filename of file
// containing observations from data (Y vector)
// Has headers: NAME
//
std::string varname = "ABC_TEST_MIN_MAX_FILE";
std::string minmaxfname = params.getTvar( varname );
std::string obsdatafname = "ABC_TEST_OBSERV_DATA_FILE";
std::string observfname = params.getTvar( obsdatafname );
bool hascolnames = true;
data_table dtable( minmaxfname, hascolnames );
data_table obsvdtable( observfname, hascolnames );
fprintf(stdout, "Trying to get VARNAMEs\n");
std::vector<std::string> varnames = dtable.get_col( "NAME" );
fprintf(stdout, "Got varnames\n");
std::vector<double> mins = data_table::to_float64( dtable.get_col( "MIN" ) );
fprintf(stdout, "Got mins\n");
std::vector<double> maxes = data_table::to_float64( dtable.get_col( "MAX" ) );
fprintf(stdout, "Got maxes\n");
std::string statefname = "dreamsearch_state.state";
//Make a random "problem"
size_t ndims = varnames.size();
fprintf(stdout, "Getting observ data from [%s]\n", observfname.c_str() );
std::vector<std::string> obsv_varnames = obsvdtable.get_col( "NAME" );
std::vector<double> obsv_vals = data_table::to_float64( obsvdtable.get_col( "VAL" ) ); //REV: this will just be ERROR and 0 for me... heh.
//Using that, I search.
//I need a way to pass more variables further in, by passing
//a "named" varlist for example, which DREAM_ABC_Z struct
//knows how to handle natively.
//This "search" funct thing calls with no arguments
//(kind of a problem). However, I'd like to be able to change
//things like observations part way through? No...
//Or only copy certain aspectds like current positions to
//a new sweep?
//Whatever, just add everything to a "command like" processor,
//which then goes to a "named" varlist.
//The "named" varlist is not hierarchical? We want a "named"
//hierarchical varlist, which has variables for model, under
//a general one with e.g. numthreads, etc. Workers need to
//know # of GPU it's working on etc. Worker only reports when
//GPU is ready? Does psweep2 *know* about GPUs?
//I can query GPUs on machine and only grab those of certain
//type I want (?). Ideal situation is to keep most "code"
//loaded on GPU, and only modulate state variables...
search_dream_abc_z( statefname,
varnames,
mins,
maxes,
obsv_varnames,
obsv_vals,
pg,
*fs
);
}
else if( searchtype.compare( "MT-DREAMz" ) == 0 )
{
fprintf(stderr, "REV: Error, requested search algo MT-DREAMz is not implemented yet!\n");
exit(1);
}
else
{
fprintf(stderr, "REV: ERROR, search algorithm type [%s] not found\n", searchtype.c_str() );
}
fprintf(stderr, "ROOT FINISHED! Broadcasting EXIT\n");
//std::string contents="EXIT";
//boost::mpi::broadcast(fs->world, contents, 0);
fs->signal_exit_to_workers();
delete(fs);
}
|
How to debug Greasemonkey scripts in Firebug?
How to debug Greasemonkey scripts in Firebug?
I can't debug any Greasemonkey scripts in Firebug because they don't appear in the list under the Script tab (anymore).
The answers at "How to debug Greasemonkey script with the Firebug extension?" apparently don't work with the latest versions of Firefox + Firebug?
I've tried:
Creating a new Firefox profile
about:config setting extensions.firebug.filterSystemURLs to false
Updating to Firebug 2.0.2
I had this problem before and solved it by creating a new Firefox profile but that hasn't worked this time.
Please does anyone know how to get this working or if there is an alternative to Firebug that works with Greasemonkey?
I'm on Firefox 31.0.
FF 31 greatly improved debugging, allegedly, but I haven't played around with that yet. Last I checked, GM debugging with Firebug was busted again on FF30. Try FF's native debugger.
New Debugging in FF31 mostly refers to bootstrapped addons. I use console for debugging GM scripts.
What if you put debugger keyword in your script and run it with debugger active? Will it stop there and show you the source?
@MadBender, that doesn't work for a Greasemonkey script.
You can use the "Browser Toolbox" to debug GM scripts. See this answer for how to enable it. I do not recommend this though, because it lays bare the whole browser and you can really frak things up if you're not very careful.
Issue 7513: 2.0 cannot debug userscripts in Greasemonkey
Firebug 1.x works with Greasemonkey 1.x well, while I can debug userscripts installed in Greasemonkey using Firebug to set a breakpoint. But after I upgrade my Firefox to 30 and the Firebug was upgraded to 2.0. It seems does not work any more.
Project Member simon.lindholm10
Not that surprising, the method of script discovery changed pretty fundamentally in 2.0... Previously we were iterating over all scripts everywhere and trying to tie them somewhere, now we start from the web page and hook script creation for it, which is much more sane.
The way to get this fixed (presuming it doesn't work in the built-in debugger) is to file it against Devtools at https://bugzilla.mozilla.org, or against Greasemonkey; there's very little we can do on the Firebug side I believe.
|
/**
* @file ServoMotor.h
* @brief Header file of the StepperMotor class
*
* The StepperMotor class is implemented and tested for the 28BYJ-48 Stepper Motor
* Usage example (basic):
*
#include "StepperMotor.h"
#define StepperMotor_Pin_1 0
#define StepperMotor_Pin_2 1
#define StepperMotor_Pin_3 2
#define StepperMotor_Pin_4 3
#define STEP_ANGLE_FULL_STEP 0.176
#define STEP_ANGLE_HALF_STEP 0.0879
int main(void) {
// instantiate the StepperMotor object
component::StepperMotor myStepperMotor(component::mode::halfStep,
io::Pin(StepperMotor_Pin_1,io::PortB),
io::Pin(StepperMotor_Pin_2,io::PortB),
io::Pin(StepperMotor_Pin_3,io::PortB),
io::Pin(StepperMotor_Pin_4,io::PortB));
int16_t l_angle = -180;
int16_t l_step = static_cast<int16_t>(l_angle / STEP_ANGLE_HALF_STEP);
// set speed [steps/s]
// full step: max speed: 500p/s
// half step: max speed: 1000p/s
// set speed [°/s]
// full step: max speed: 40°/s
// half step: max speed: 80°/s
// Mainloop
while (1) {
while (!myStepperMotor.goalReached())
{
// set motor steps
myStepperMotor.step(l_step,1000);
//myStepperMotor.step(l_step,80,STEP_ANGLE_HALF_STEP);
}
}
return 0;
}
*
*
* class to control a servo motor
* @author Farid Oubbati (https://github.com/faroub)
* @date March 2020
*/
#ifndef STEPPERMOTOR_H
#define STEPPERMOTOR_H
#include "ha_base.h"
#include "Pin.h"
namespace component
{
enum class mode : uint8_t {
fullStep=0, /**< */
halfStep, /**< */
};
class StepperMotor
{
public:
StepperMotor(const mode &ar_mode,
const io::Pin &ar_pinCoil1,
const io::Pin &ar_pinCoil2,
const io::Pin &ar_pinCoil3,
const io::Pin &ar_pinCoil4);
~StepperMotor();
/** Turn servo motor On.
*/
void step(const int16_t a_step,
const uint16_t a_speed);
void step(const int16_t a_step,
const uint16_t a_speed,
const float a_stepAngle);
void step(const int16_t a_step,
const uint16_t a_speed,
const uint16_t a_accel,
const uint16_t a_decel);
void stepPulse(const uint8_t a_stepPulse);
void stepDelay(uint8_t a_stepDelay);
uint8_t goalReached();
void setCurrentPos(uint16_t a_currentPos);
uint16_t currentPos();
uint8_t computeStepDelay(uint16_t a_step,
const uint16_t a_speed,
const uint16_t a_accel,
const uint16_t a_decel);
uint16_t m_accelTime; /**< pulse delay in ms */
uint16_t m_decelTime; /**< pulse delay in ms */
uint16_t m_constSpeedTime; /**< pulse delay in ms */
private:
// uint8_t computeStepDelay(int16_t a_step,
// const uint16_t a_speed,
// const uint16_t a_accel,
// const uint16_t a_decel);
io::Pin m_pinCoil1; /**< pin object */
io::Pin m_pinCoil2; /**< pin object */
io::Pin m_pinCoil3; /**< pin object */
io::Pin m_pinCoil4; /**< pin object */
mode stepMode; /**< steps per revolution */
uint8_t m_goalReached;
uint16_t m_currentPos; /**< pulse delay in ms */
};
}
#endif // STEPPERMOTOR_H
|
From StrategyWiki, the video game walkthrough and strategy guide wiki
Jump to navigation Jump to search
Rogue Legacy
Developer(s)Cellar Door Games
Publisher(s)Cellar Door Games
Distributor(s)Steam
Release date(s)
Windows, Steam
Genre(s)Platform, Roguelike
System(s)Linux, Mac OS, Windows
ModesSingle player
Rating(s)N/A
Websitehttp://www.roguelegacy.com/
PCGamingWiki
TwitchRogue Legacy Channel
TwitterSearch
FacebookSearch
Rogue Legacy (RL) is an indie, 2D platform, single player roguelike released on June 27, 2013 for Windows. RL features procedurally generated rooms and a layout similar to Super Metroid and Castlevania: Symphony of the Night.
Title screen.
Unlike other roguelikes, Rogue Legacy allows players to build off of their previous character. Players take control of a knight. Upon dying, that character is lost and a new "heir" must be chosen. The game then gives a few different characters to choose from. Each character has randomized traits to give the feel of a genetically different child. Traits change a player's size, environment, stats, and ability (e.g. magic or a ranged weapon). In addition, any money acquired will be available for the heir to spend on the "manor" (a tree of upgrades), which is used to give permanent upgrades to future heirs.
Setting[edit]
For more details on the story, see Rogue Legacy/Walkthrough.
The game takes places in a medieval fantasy setting. The entire game takes place on the outskirts of and inside of a randomized castle. The story revolves around the lineage of one noble family. You are the heir to the throne.
Table of Contents
Rooms
Castle Hamson
Forest Abkhazia
The Maya
The Darkness
|
how about mac software
Hi team,
I moved this from here: https://github.com/MicrosoftDocs/azure-docs.nl-nl/issues/579
@WillemVerwijs commented 9 hours ago — with docs.microsoft.com
We have some of those non professionals who use Eppul Mek (or however you write that). Is there a client for those lo...s and cloud you also make a link available to that software. prio:... well. low? or is that too high?
Document Details
⚠ Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.
ID: 524e47b0-1295-7d07-0cbc-86e6daaa1358
Version Independent ID: 1cb3a5d8-4f16-583c-ad85-325f7638a873
Content: Connect to Windows Virtual Desktop Windows 10 or 7 - Azure
Content Source: articles/virtual-desktop/connect-windows-7-and-10.md
Service: virtual-desktop
GitHub Login: @Heidilohr
Microsoft Alias: helohr
@WillemVerwijs You can access Windows Virtual Desktop resources from your macOS devices with our downloadable client. This guide will tell you how to set up the client.
https://docs.microsoft.com/en-us/azure/virtual-desktop/connect-macos
https://docs.microsoft.com/nl-nl/azure/virtual-desktop/connect-macos
We will now close this issue. If there are further questions regarding this matter, please tag me in a comment. I will reopen it and we will gladly continue the discussion.
|
Thread:<IP_ADDRESS>/@comment-22439-20130122132826
{| id="w" width="100%" style="background: transparent; "
Welcome, <IP_ADDRESS>!
* Please read our Manual of Style and other policies for guidelines on contributing.
* Internal pages:
* Things to cleanup
* Things to edit
* League of Legends Wiki's forum
* Forum:General Discussion
* Special Pages
* External Wikipedia pages:
* How to edit a page
* Contributing
* Editing, policy, conduct, and structure tutorial
* How to write a great article
* }
|
import { Body, Controller, Get, Headers, Post } from "@nestjs/common";
import { CreateSliderDto } from "./dto/create-slider.dto";
import { SliderService } from "./slider.service";
@Controller('slider')
export class SliderController {
constructor(private readonly sliderService: SliderService) { }
@Get('list')
async getSlider() {
const list = await this.sliderService.list();
return list
}
@Post()
async addSlider(@Body() body, @Headers('Authorization') token) {
const slider = await this.sliderService.create({ ...body, token: token });
return slider;
}
@Post('remove')
async removeSlider(@Body() body, @Headers('Authorization') token) {
const slider = await this.sliderService.remove({ ...body, token: token });
return slider;
}
}
|
/**
* Copyright 2017 Miroslav Pokorný
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using Hermes.Protoc.Messages;
namespace Hermes.Communication
{
/// <summary>
/// Interface which represent Communication session
/// </summary>
public interface ICommunication
{
/// <summary>
/// Action which may be invoked on success in method AddConnection
/// </summary>
Action<ICommunication, string> AddConnectionCallback { get; set; }
/// <summary>
/// Action which may be invoked after removing connection from ICommunication
/// </summary>
Action<ICommunication, string> RemoveConnectionCallback { get; set; }
/// <summary>
/// Master connection is connection which is first connection in communication (communication responsible for creating communication session)
/// </summary>
string MasterConnection { get; }
/// <summary>
/// Communication constraints for master connection
/// </summary>
CommunicationConstraint MasterConstraint { get; }
/// <summary>
/// Communication constraints for slave connections
/// </summary>
CommunicationConstraint SlaveConstraint { get; }
/// <summary>
/// Maximal number of parallel connections per one communication session
/// </summary>
uint MaxConnections { get; }
/// <summary>
/// Communication Session ID
/// </summary>
string SessionId { get; set; }
/// <summary>
/// Current status of communication eg. Ready, Waiting, Disconnected
/// </summary>
string Status { get; }
/// <summary>
/// Add new connection to communication process
/// Return true on success, false if connection was not added to communication
/// </summary>
/// <param name="connectionId">ID of connection which will be added to communication</param>
/// <returns>Return true on success, false if connection was not added to communication</returns>
bool AddConnection(string connectionId);
/// <summary>
/// Disable sending data with specific mimeType to connection specified by connectionId
/// </summary>
/// <param name="connectionId">End point which will not receive specified mimeType</param>
/// <param name="mimeType">MimeType which will not being send to client (for example not supported video format.)</param>
void DisableSendingData(string connectionId, string mimeType);
/// <summary>
/// Get all connection IDs participating in this communication
/// Return array of string with all connection IDs participating in this communication
/// </summary>
/// <returns>Return array of string with all connection IDs participating in this communication</returns>
string[] GetConnectionIds();
/// <summary>
/// Get all connections IDs which has enabled specified mimeType
/// Return array of string with connection IDs which support specified mimeType
/// </summary>
/// <param name="mimeType">Mime type of data</param>
/// <returns>Return array of string with connection IDs which support specified mimeType</returns>
string[] GetConnectionIdsSupportMimeType(string mimeType);
/// <summary>
/// Get all generated stream IDs associated with one specified connection
/// Return array of IDs associated with specified connection. If there is no generated ID associated with connection, then return empty array
/// </summary>
/// <param name="connectionId">Connection ID</param>
/// <returns>Return array of IDs associated with specified connection. If there is no generated ID associated with connection, then return empty array</returns>
uint[] GetConnectionStreamIds(string connectionId);
/// <summary>
/// Get media header (first bytes of video/audio, eg. Initialization segment) for specified streamId
/// Return First bytes of stream or empty byte array if header is not stored
/// </summary>
/// <param name="streamId">ID of stream</param>
/// <returns>Return First bytes of stream or empty byte array if header is not stored</returns>
byte[] GetMediaStreamHeader(uint streamId);
/// <summary>
/// Return unique id for new stream. Range of ids is > 50000.
/// </summary>
/// <param name="applicantConnectionId">Id of connection, which require new id.</param>
/// <returns>Return new unique id (unique in ICommunication object) or return 0 if there is some error (connectionId not participating in communication).</returns>
uint GetNewUniqueStreamId(string applicantConnectionId);
/// <summary>
/// Check constraints if communication is allowed (check is done based on Master an Slave constraints)
/// Return true if communication is allowed, otherwise return false
/// </summary>
/// <param name="connectionId"></param>
/// <param name="dataType"></param>
/// <returns>Return true if communication is allowed, otherwise return false</returns>
bool IsCommunicationAllowed(string connectionId, Message.Types.DataType dataType);
/// <summary>
/// Check if connection is full
/// Return true if connection is full, otherwise return false
/// </summary>
/// <returns>Return true if connection is full, otherwise return false</returns>
bool IsCommunicationFull();
/// <summary>
/// Check if master is still connected to server
/// Return true if master is connected to server, otherwise return false
/// </summary>
/// <returns>Return true if master is connected to server, otherwise return false</returns>
bool IsMasterConnected();
/// <summary>
/// Check if media stream header exist
/// Return true if exist, otherwise false
/// </summary>
/// <param name="streamId">ID of stream</param>
/// <returns>Return true if exist, otherwise false</returns>
bool IsMediaStreamHeaderExist(uint streamId);
/// <summary>
/// Check if specified mimeType is enabled for specified connection (by default all mimeTypes should be enabled)
/// Return true if mime type is enabled, otherwise return false
/// </summary>
/// <param name="connctionId">End point connection ID (receiver)</param>
/// <param name="mimeType">Mime type of data</param>
/// <returns>Return true if mime type is enabled, otherwise return false</returns>
bool IsMimeTypeEnabled(string connctionId, string mimeType);
/// <summary>
/// Check if requestStreamId action is allowed by specific connection
/// Return true if requestStreamId is allowed, otherwise return false
/// </summary>
/// <param name="connectionId">Connection ID</param>
/// <returns>Return true if requestStreamId is allowed, otherwise return false</returns>
bool IsRequestStreamIdAllowed(string connectionId);
/// <summary>
/// Remove connection from communication process
/// </summary>
/// <param name="connectionId">ID of connection which will be removed from communication</param>
void RemoveConnection(string connectionId);
/// <summary>
/// Set constraints for master connection (has higher priority than slave constraints, [for example if master has disabled video then slaves also has disabled video])
/// </summary>
/// <param name="constraint"></param>
void SetMasterConstraint(CommunicationConstraint constraint);
/// <summary>
/// Set media header for specific streamId
/// </summary>
/// <param name="streamId">ID of stream</param>
/// <param name="headerBytes">First bytes of media (Initialization segment)</param>
void SetMediaStreamHeader(uint streamId, byte[] headerBytes);
/// <summary>
/// Set constraints for slave connection (has lower priory than master constraints, [for example if slave has disabled video, master could have video enabled])
/// </summary>
/// <param name="constraint"></param>
void SetSlaveConstraint(CommunicationConstraint constraint);
/// <summary>
/// Stop stream. Check if stream is associated with specific connection. On success release resources associated with stream.
/// Return true on success, otherwise return false.
/// </summary>
/// <param name="streamId">Stream ID to stop</param>
/// <param name="connectionId">Connection which want to stop stream</param>
/// <returns>Return true on success, otherwise return false.</returns>
bool StopStream(uint streamId, string connectionId);
}
}
|
using System;
using Vanara.PInvoke;
using static Vanara.PInvoke.Kernel32;
using static Vanara.PInvoke.User32_Gdi;
namespace Vanara.Windows.Shell
{
/// <summary>Wraps the icon location string used by some Shell classes.</summary>
public class IndirectString
{
/// <summary>Initializes a new instance of the <see cref="IndirectString"/> class.</summary>
public IndirectString() { }
/// <summary>Initializes a new instance of the <see cref="IndirectString"/> class.</summary>
/// <param name="module">The module file name.</param>
/// <param name="resourceIdOrIndex">
/// If this number is positive, this is the index of the resource in the module file. If negative, the absolute value of the number
/// is the resource ID of the icon in the module file.
/// </param>
public IndirectString(string module, int resourceIdOrIndex)
{
ModuleFileName = module;
ResourceId = resourceIdOrIndex;
}
/// <summary>Returns true if this location is valid.</summary>
/// <value><c>true</c> if this location is valid; otherwise, <c>false</c>.</value>
public bool IsValid => System.IO.File.Exists(ModuleFileName) && ResourceId != 0;
/// <summary>Gets or sets the module file name.</summary>
/// <value>The module file name.</value>
public string ModuleFileName { get; set; }
/// <summary>Gets or sets the resource index or resource ID.</summary>
/// <value>
/// If this number is positive, this is the index of the resource in the module file. If negative, the absolute value of the number
/// is the resource ID of the icon in the module file.
/// </value>
public int ResourceId { get; set; }
/// <summary>Gets the icon referred to by this instance.</summary>
/// <value>The icon.</value>
public string Value
{
get
{
if (!IsValid) return null;
using (var lib = LoadLibraryEx(ModuleFileName, Kernel32.LoadLibraryExFlags.LOAD_LIBRARY_AS_IMAGE_RESOURCE))
{
if (ResourceId >= 0) throw new NotSupportedException();
const int sz = 2048;
var sb = new System.Text.StringBuilder(sz, sz);
LoadString(lib, -ResourceId, sb, sz);
return sb.ToString();
}
}
}
/// <summary>Tries to parse the specified string to create a <see cref="IndirectString"/> instance.</summary>
/// <param name="value">The string representation in the format of either "ModuleFileName,ResourceIndex" or "ModuleFileName,-ResourceID".</param>
/// <param name="loc">The resulting <see cref="IndirectString"/> instance on success.</param>
/// <returns><c>true</c> if successfully parsed.</returns>
public static bool TryParse(string value, out IndirectString loc)
{
var parts = value?.Split(',');
if (parts != null && parts.Length == 2 && int.TryParse(parts[1], out var i) && parts[0].StartsWith("@"))
{
loc = new IndirectString(parts[0].TrimStart('@'), i);
return true;
}
loc = new IndirectString();
return false;
}
/// <summary>Returns a <see cref="System.String"/> that represents this instance.</summary>
/// <returns>A <see cref="System.String"/> that represents this instance.</returns>
public override string ToString() => IsValid ? $"@{ModuleFileName},{ResourceId}" : string.Empty;
}
}
|
Board Thread:Fun and games/@comment-4024597-20160125102420/@comment-25301846-20160222184350
2-5 Puff Shroom's left (or right) horn is grey at the end.
|
Breastfeeding in the first hour of life in Brazilian private hospitals participating in a quality-of-care improvement project
Background The Baby-Friendly Hospital Initiative’s Step 4 recommends: “support mothers to start breastfeeding as soon as possible after birth”, thus contributing to the reduction of neonatal mortality. The objective of this study is to estimate the prevalence of breastfeeding in the first hour of life in private maternity hospitals participating in the “Adequate Childbirth Project”, a quality-of-care improvement project, and to analyze determinants of this outcome. Methods Secondary analysis of data collected by the cross-sectional evaluative “Healthy Birth Study”, conducted in 2017 in 12 maternity hospitals participating in the Adequate Childbirth Project, where 4800 mothers were interviewed, and hospital records were observed. Conditions that prevented breastfeeding at birth, such as maternal HIV-infection and newborns’ severe malformations, were excluded. Multiple logistic regression was performed according to a hierarchical theoretical model. Results The prevalence of breastfeeding in the first hour of life was 58% (CI 95% 56.6–59.5%). Lower maternal education (aOR 0.643; CI 95% 0.528–0.782), lower economic status (aOR 0.687; CI 95% 0.504–0.935), cesarean section delivery (ORa 0.649; CI 95% 0.529–0.797), preterm birth (aOR 0.660; CI 95% 0.460–0.948) and non-rooming-in at birth (aOR 0.669; CI 95% 0.559–0.800) were negatively associated with the outcome. Receiving information during prenatal care about the importance of breastfeeding at birth (aOR 2.585; CI 95% 2.102–3.179), being target of the quality-of-care improvement project (aOR 1.273; CI 95% 1.065–1.522), skin-to-skin contact at birth (aOR 2.127; CI 95% 1.791–2.525) and female newborn (aOR 1.194; CI 95% 1.008–1.415) were factors positively associated with the outcome. Conclusions The private maternities participating in the Healthy Birth Study showed a good prevalence of breastfeeding in the first hour of life, according to WHO parameters. Prenatal guidance on breastfeeding at birth, being target of the quality-of-care improvement project and skin-to-skin contact at birth contributed to breastfeeding in the first hour of life.
Background
Breastfeeding reduces deaths of children under 5 years of age by 13% [1] and prevents child morbidity due to diarrhea and respiratory infections [2]. Even in high-income populations, its practice is important, as it reduces mortality from necrotizing enterocolitis and sudden infant death syndrome [3].
Breastfeeding at birth prevents the colonization of child's gastrointestinal tract by gram-negative bacteria in the hospital environment. Colostrum contains immunological factors that protect the newborn and stimulate his active immune response [4]. A study carried out with data from 67 countries found an inverse correlation between breastfeeding in the first hour of life and neonatal mortality [5]. A survey conducted in Ghana with 10,947 children showed a 22% reduction in neonatal mortality associated with breastfeeding in the first hour of life, compared to those who started breastfeeding after 24 h [6]. A study of 37,350 children, carried out from the II Human Development Survey of India, showed an almost 3 times higher risk of mortality in non-breastfed children in the first hour of life [7].
The Baby-Friendly Hospital Initiative motivates facilities providing maternity and newborn services worldwide to implement the Ten Steps to Successful Breastfeeding.
Step 4 recommends: "Facilitate immediate and uninterrupted skin-to-skin contact and support mothers to initiate breastfeeding as soon as possible after birth". The skin-to-skin contact should remain after birth for at least one hour, mothers being encouraged to identify whether the baby shows signs of wanting to be breastfed, and help should be offered, if necessary [8]. This was the theme of the World Breastfeeding Week in 2007: "Breastfeeding: the 1st Hour -Save ONE million babies!" [9]. In Brazil, the National Demography and Health Survey, carried out in 2006, found that 43% of children started breastfeeding in the first hour of life [10], while in 2008, in the Brazilian capitals, this proportion was 67.7% [11]. In the "Birth in Brazil" survey, conducted between 2011 and 2012, 56% of children born in hospitals with more than 500 births/year (corresponding to 78.6% of hospital births) were breastfed in the first hour of life [12]. Systematic reviews [13,14] point to cesarean section delivery as the most important risk factor for not breastfeeding in the first hour of life. This is worrying, since in 2008, Brazil contributed with 15% of the total unnecessary cesarean sections that occurred in the world [15]. Birth in a private maternity also proved to be a risk factor for delayed initiation of breastfeeding, while delivery in a Baby-Friendly Hospital was a protective factor [13].
The high contribution of the Brazilian private sector to the performance of cesarean sections and consequent neonatal outcomes stimulated the creation of the "Adequate Childbirth Project", aiming at prenatal and childbirth care improvement and reduction of the number of cesarean sections and hospitalizations in neonatal ICU [16]. The project was structured into four components: governance, women's empowerment, reorganization of care and monitoring [17]. The Healthy Birth Study assessed the degree of implementation and the effects of this project.
In Brazil, 30% of mothers give birth in the private sector, but private maternity hospitals breastfeeding practices, specially at birth, as well the factors associated with these practices, are seldom studied. The present study innovates investigating the prevalence of breastfeeding in the first hour of life in private maternity hospitals participating in a quality-of-care improvement project and analyzed the determinants of this outcome.
Methods
This study is a secondary analysis of data collected by the Healthy Birth Study, a cross-sectional evaluative investigation carried out in 2017, 18 months after the beginning of the implementation of the Adequate Childbirth Project (ACP).
The Healthy Birth Study selected a convenience sample of 12 hospitals from the 23 private hospitals that joined the project to improve prenatal and childbirth care. For the selection of these hospitals, three criteria were considered: location of the hospital by geographic macro-region (at least one hospital from the northeast, southeast and south regions), type of hospital (owned or not by a health insurance company) and performance of the hospital in the prenatal and childbirth care improvement project (hospitals that reported good and bad results in achieving the ACP goals, according to administrative data provided by the project coordination board, were selected) [18]. Among the hospitals participating in the study, one was situated in the northeast region, nine in the southeast region and two in the south of Brazil.
The Healthy Birth Study's sample was calculated to detect a 10% reduction in the proportion of cesarean sections, using a 50% cesarean rate as a reference, with 80% power and a 5% significance level. Overall, the sample size of 4800 women (12 hospitals times 400 women) had an accuracy of 80% in detecting a 2.5% reduction in the prevalence of cesarean sections. Losses and refusals accounted for about 5% of women and were replenished to complete 400 women in each hospital.
Electronic forms using the REDCap [19] application were developed. A pilot study was carried out in one of the maternity hospitals participating in the prenatal and childbirth care improvement project not included in this evaluative research. The questionnaires were tested and the logistical aspects of the fieldwork were refined.
Trained interviewers, external from the hospitals, conducted data collection, addressing all women admitted to the hospital who met the eligibility criteria to participate in the study, until 400 participants were included in each hospital. Women who did not speak Portuguese, with hearing loss, whose delivery occurred outside the hospital and who were hospitalized for judicial termination of pregnancy were not eligible for the study. The women were interviewed face to face at least 6 h after vaginal delivery and 12 h after cesarean section, after reading and signing an informed consent form.
Three instruments were used: 1. questionnaire applied to the puerperal woman with questions related to maternal characteristics, pregnancy and prenatal care, childbirth care, newborn and infant feeding; 2. form for extracting data from the medical record of the puerperal woman and the newborn about the type of pregnancy, type of delivery, condition of the newborn and feeding of the newborn during hospitalization; 3. form for extracting data from the prenatal card [18].
In prolonged hospitalizations, data were collected from medical records until the 28th day of infant hospitalization and on the woman's 42nd day of hospitalization. In the case of hospital transfer, data were obtained from the medical records of the hospital from which the puerperal woman and/or the newborn were discharged. Data collection was conducted from March to August 2017. Due to the variation in the size of hospitals, the time required for data collection ranged from 1 to 4 months, depending on the total number of births per month in each participating hospital [18].
The present study used as inclusion criteria living newborns with gestational age ≥ 34 weeks (stillborns: n = 19). Newborns or women who met one or more of the following criteria were excluded: newborns with malformations that could disturb or prevent breastfeeding (malformations: n = 47), sons of HIV-infected mothers (n = 6), postpartum women with severe maternal morbidity or transferred to the Intensive Care Unit (ICU) due to complications during delivery (n = 244) and babies transferred to the ICU at birth (n = 527).
A total of 785 women were excluded, and data from 4,093 binomials were analyzed. Post-hoc calculations showed that, considering a prevalence of exclusive breastfeeding of 60% and a significance level of 5%, the sample after the exclusions used in the present analysis had 90% power to detect 5% differences in the proportion of this outcome.
The outcome was breastfeeding in the first hour of life (yes/no), obtained from two questions to the puerperal woman: "After birth, did you breastfeed in the delivery room?" and "How long, more or less, did it take you to breastfeed for the first time?" Breastfeeding in the first hour of life was considered when the answer to the first question was "yes" and/or the second covered up to 1 h. A hierarchical theoretical model was adopted, with maternal and household characteristics as distal exposure variables; pregnancy and prenatal care characteristics as intermediary exposure variables and hospital, delivery, and newborn characteristics as proximal exposure variables ( Fig. 1).
The selected maternal characteristics were age (< 20; 20-34; ≥ 35), self-reported skin color (white; black; brown/yellow/indigenous), having a partner (single/separated/widowed; married/living with a partner), education (up to 14 years; ≥ 15 years). Household characteristics were analyzed based on economic conditions (class Aabove average; B-average; C + D + E-below average) [20]. Characteristics of pregnancy and prenatal care were parity (primiparous; multiparous), being target of the Adequate Childbirth Project (yes; no), maternal diseasehypertension, diabetes, or other chronic diseases (yes; no), number of prenatal consultations (≤ 7; 8 or more), beginning of prenatal care (up to 12 weeks; ≥ 13 weeks), guidance on breastfeeding during prenatal care (yes; no). The characteristics of childbirth care selected were presence of a companion during delivery (yes; no/did not want), type of delivery (vaginal; cesarean section), skin-to-skin contact at birth (yes; no) and roomingin (measured by the question: "did the baby go to the room with the mother?": yes; no). Features of the newborn selected were birth weight (< 2500 g; ≥ 2500 g), gestational age (34-36 weeks; ≥ 37 weeks) and sex (male; female).
Different groups of women were target of the Adequate Childbirth Project, as defined by the hospital manager, such as: 1. primiparous women; 2. pregnant women belonging to Robson Groups 1 to 4 (nulliparous or multiparous without previous cesarean section, single fetus, term, cephalic, according to WHO [20]) because these women had greater chances of vaginal delivery; 3. women served by the on-call team; 4. pregnant women without previous uterine scarring whose delivery was performed by the on-call team [18].
All statistical analyses were performed using SPSS software version 17 (https:// www. Ibm. com/), using data weighting and incorporating the design effect, considering the complex sampling process [18]. Considering the sample weight, the final number analyzed was 4060 binomials.
Initially, univariate analysis was conducted to identify the distribution of exposure variables and the outcome under investigation. Then, bivariate analysis was applied between each exposure variable and the outcome, using Pearson's chi-square test and crude odds ratios (OR) were obtained with their respective 95% confidence intervals (95% CI). The variables that obtained p ≤ 0.20 in the bivariate analysis were included in the statistical modeling. Finally, multiple logistic regression was conducted, following the hierarchical conceptual theoretical model applied, according to the temporal proximity of each variable with the outcome [21].
Results
The prevalence of breastfeeding in the first hour of life was 58.0% (CI 56.6-59.5%). There was a wide variation between hospitals, five maternities with prevalence rates above 80% and three less than 30%. There was also discrepancy within the regions, as both the highest (93.0%) and the lowest prevalence (6.3%) were found in the Southeast Region.
Most of the women had ages between 20 and 34 years, were white, lived with a partner, had a high level of education and belonged to economic class B. The distal factors associated with the outcome (p < 0.20) in the bivariate analysis were age, skin color, marital status, education level and socioeconomic class (Table 1).
Concerning the intermediate characteristics, more than half of the mothers were target of the Adequate Childbirth Project, almost 60% of the women were primiparous, just over a tenth had some pathology (hypertension, diabetes, or other chronic disease), the majority started prenatal care in the first trimester and was accompanied by 8 or more prenatal visits. About three quarters of women received prenatal guidance on breastfeeding at birth. Being target of the Adequate Childbirth Project, maternal pathology, number of prenatal visits, beginning of prenatal care and receiving prenatal guidance on breastfeeding in the first hour of life were associated with the outcome in the bivariate analysis (Table 2). Most women had a companion during delivery and more than three quarters underwent cesarean section. Most newborns were born at term, the sex ratio was similar, about 3% had low birth weight and almost 60% had skin-to-skin contact with the mother at birth. The majority (64.2%) of the newborns did not follow to rooming-in directly after birth, staying for a while in a nursery/heated crib/incubator. In bivariate analysis, among proximal characteristics, type of delivery, sex of the baby, preterm birth, low birth weight, skinto-skin contact with the mother and rooming-in were associated with breastfeeding in the first hour of life (Table 3).
In the multivariate analysis, women with lower education, economic status below average, who underwent cesarean section, whose newborn had gestational age between 34 and 36 weeks and those that did not follow directly to rooming-in had lower rates of breastfeeding in the first hour of life. Being target of the Adequate Childbirth Project, receiving prenatal guidance about breastfeeding at birth, female newborn and skin-toskin contact at birth increased the chances of breastfeeding in the first hour of life (Table 4).
Discussion
In private maternity hospitals participating in the Healthy Birth Study 58% of newborns were breastfed in the first hour of life, a percentage classified as "good" (50-89%), according to WHO parameters [22]. This prevalence stands between that found by the National Demography and Health Survey in 2006 (42.9%) [11] and by the survey carried out in 2008 in the capitals and the Federal District (67.7%) [10]. It is similar to the prevalence of 57.8% verified in the WHO Global Maternal and Perinatal Health Survey covering 24 countries in Africa, Asia and Latin America [23] and well above that found by other investigations in private maternity hospitals. In the study "Birth in Brazil", carried out between 2011 and 2012, 25.3% of newborns in private maternity hospitals were breastfed in the first hour of life [24], while in 1999/2001, in 25 private hospitals in the city of Rio de Janeiro, this prevalence was only 1.6% [25].
Despite the unfavorable scenario of private maternity hospitals, where the practice of elective cesarean section is very high [26], the quality-of-care improvement project increased the prevalence of breastfeeding by almost 30% in the first hour of life in the target group, reflecting advances in the reorganization of prenatal and childbirth care. The prevalence of breastfeeding at birth in private maternity hospitals before the Adequate Childbirth Project was much lower, around 26% [24]. Meantime not all hospitals surveyed seemed to respond in the same way. A quarter of hospitals had a low prevalence (< 30%), according to WHO parameters [22], while in half of them more than 60% of babies suckled in the first hour of life. Other factors were also associated with the outcome. The prevalence of breastfeeding in the first hour of life in women with less than 15 years of schooling was about 35% lower than those with higher schooling. Probably, higher educational levels allow greater access to information on the benefits of breastfeeding at birth [27,28]. In a systematic review that brought together studies from Asia, Africa and South America, low maternal education was also a risk factor for not breastfeeding in the first hour of life [13]. In the same direction, a dose-response effect was found between economic class and the prevalence of breastfeeding at birth, women from classes C, D and E having breastfed about 30% less than women from class A and women from class B 15% less. The more unfavorable the socioeconomic stratum, the lower this practice was shown [13].
Receiving prenatal guidance on breastfeeding in the first hour of life more than doubled the prevalence of the outcome. Guidance on breastfeeding during prenatal care proved to be an important factor to improve breastfeeding at birth in several studies [12,13,[28][29][30][31], because during pregnancy women need guidance and support to breastfeed [13,29].
Cesarean section was one of the factors most associated with the outcome, in the present study and in other settings, decreasing the prevalence of breastfeeding in the first hour of life by almost 40%. Cesarean section appears as a risk factor [13] related to postpartum pain [14,32,33], less skin-to-skin contact with the mother [14,34], maternal anesthesia [25,28,29,35,36], difficulty in holding the child soon after birth [29] or postpartum surgical procedures, which delay and often interrupt mother-baby contact [12,25,37]. Mothers with vaginal delivery may have a more active participation in the breastfeeding process at birth, facilitating skin-to-skin contact, recognizing signs that the newborn is able to be breastfed and releasing oxytocin [24,34,38]. The Adequate Childbirth Project was effective in reducing the percentage of cesarean sections in the maternity hospitals studied, with an overall reduction of 10% in the event compared to the same maternities at the beginning of the quality-of-care improvement project [39], contributing to the increase in the prevalence of breastfeeding in the first hour of life in that context.
Female babies were almost 20% more likely to be breastfed in the first hour of life when compared to male babies. Boccolini et al [40] found a similar association in a Brazilian study, that may be attributed to a higher probability of adverse events in pregnancy among male children, or to cultural beliefs, as female babies being expected to suck the breast more gently than male ones.
Late preterm newborns (gestational age 34-36 weeks) had 35% less chance of being breastfed at birth when compared to those born at term. Vieira et al [29] argue that preterm newborns are less likely to be breastfed in the first hour of life, as they are sleepier, do not coordinate well suction-breath-swallowing and have less suction reflex. Even early term newborns seem to have some difficulty in establishing exclusive breastfeeding in relation to those born from 38 weeks of gestational age, as they have a greater chance of adverse events, such as respiratory diseases and longer hospital stay [41]. Newborns of spontaneous labor tend to be born at a higher gestational age, since elective caesarean sections may be contributing to the reduction of gestational age [42]. Skin-to-skin contact between mother and child at birth doubled the prevalence of breastfeeding in the first hour of life. In Rio de Janeiro, a similar result was observed, hospital routines being one of the reasons that prevent this early contact with the mother [25]. Lau [33] in a study in Singapore found that skin-to-skin contact in the first 30 min of life had a positive association with breastfeeding in the first hour of life, both in cesarean sections and in vaginal deliveries. Skin-to-skin contact between mother and child is also important for the maintenance of the baby's body temperature, for cardiorespiratory stability and to bond mother and child, facilitating the establishment of early initiation of breastfeeding [33].
More than half of the newborns did not remain rooming-in since birth, not because of the severity of the babies, since only 3.2% of them went to the semiintensive neonatal unit, about 80.7% being allocated in a nursery/heated crib/incubator after birth. In a Brazilian study, the prevalence of rooming-in in private maternity hospitals was also low (51.3%) [24]. Rooming-in has been an international recommendation since the 1950s [43], adopted in Brazil as a law since 1993 [44]. Among the benefits of rooming-in is greater contact between mother and baby, which enhances autonomy to understand and to take care of the child, in addition to more opportunities of interaction with the health team [40,45].
The present study has limitations. Cross-sectional studies not always allow the establishment of a causal relationship between exposure variables and the outcome, however, most of the variables studied have a clear temporality relationship with breastfeeding at birth. Intentionally, a convenience sample of private maternity hospitals was adopted, located in three Brazilian regions, not covering the North and Midwest regions, which impairs their representativeness in relation to the private network.
Conclusions
We conclude that the private maternity hospitals participating in the Healthy Birth Study had a good prevalence of breastfeeding in the first hour of life, according to WHO parameters [22]. Prenatal guidance on breastfeeding at birth, being target of a project for improving the quality of care and immediate skin-to-skin contact contributed to the outcome, showing the importance of implementing these actions for breastfeeding in the first hour of life. Other factors related to women's conditions and to delivery were associated with the outcome in the multivariate model, indicating that they should also be object of care to improve this practice.
We recommend a greater investment in expanding the practice of breastfeeding in the first hour of life, vital for child's health and wellbeing. Improving quality-of-care models should be encouraged, so that the private sector can be supported to reorganize prenatal and childbirth care in a humanization view. Good hospital care practices for childbirth are necessary, with the creation of mechanisms that stimulate vaginal delivery, skin-to-skin contact and immediate rooming-in, to favor breastfeeding at birth.
|
Add "-I" flag to string of paths in bash
Having a string of space separated paths, relative or absolute, example:
/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd
How can I process this in bash in order to prepend every one of these paths with -I? Example output should be:
-I/aaaa/bbbb/ccc -I/ddas/sdsa/dasd -I./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer -I../dasd/dsad -I../../sdasd/sdsd
Thanks
Edit for context:
As some of you may already have guessed, the purpose of this is to prepend folder paths with the -I flag for gcc commands.
I'm using this in a makefile. The following (slightly modified from anubhava's suggestion) works perfectly:
#to include subdirectories in source
TEMP := $(shell find $(SOURCE_PATH)* -type d)
TEMP := $(shell echo $(TEMP) | awk 1 ORS=' ')
TEMP := $(shell printf -- "-I%s " ${TEMP} )
ifdef TEMP
INC_PATHS += $(TEMP)
endif
In general, it's not safe to assume that none of the paths will themselves contain whitespace.
You're right. But this just means that I need to figure out a way to first get these paths in between quotation marks. Shouldn't be that hard
You can use printf:
s='/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd'
printf -v output -- "-I%s " $s
echo "$output"
-I/aaaa/bbbb/ccc -I/ddas/sdsa/dasd -I./dasd/dsd -Idasd/dsda/dsd -Idsd/dsad/erer/rerer -I../dasd/dsad -I../../sdasd/sdsd
Or if using an array:
arr=(/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd)
printf -v output -- "-I%s " "${arr[@]}"
If you have the paths in an array:
paths=(/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd)
then you can use bash's find-and-replace syntax:
includes=("${paths[@]/#/-I}")
You can provide an array as a series of arguments to a command (or function):
compile $the_file "${includes[@]}"
You can do a similar transform on $@ (in quotes) in a bash function
with_includes() {
# If you need to do something with the first few arguments,
# collect them here and then call shift:
# the_file=$1; shift
# But you need to check $# to make sure the arguments exist :)
local includes=("{@/#/-I}")
compile $the_file "${includes[@]}"
}
#!/usr/local/bin/bash
echo "/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd" | \
sed s'/ / -I/'g
I agree with the others that this is bad form, however. Your paths should ideally be on a seperate line.
#!/usr/local/bin/bash
echo "/aaaa/bbbb/ccc /ddas/sdsa/dasd ./dasd/dsd dasd/dsda/dsd \
dsd/dsad/erer/rerer ../dasd/dsad ../../sdasd/sdsd" | \
tr ' ' '\n' > tempfile
let "lines=$(wc -l tempfile | cut -d' ' -f1)"
let "lines=lines+1"
let "counter=1"
function loop() {
[ "${counter}" -lt "${lines}" ] && \
echo "Current loop iteration: ${counter}." && \
sed -n "${counter}p" tempfile | sed s'@^@ -I@'g >> tempfile2 && \
let "counter=counter+1" && \
loop
}
loop
[ -f ./tempfile ] && \
echo "File exists: ./tempfile." && \
rm ./tempfile
[ ! -f ./tempfile ] && \
echo "File deleted: ./tempfile."
Then you can just edit the beginning of each line in tempfile2 to do whatever, and run it as a shell script.
Inb4 "Oh God no, he's using test! That could accidentally create an infinite loop! WE'RE ALL GOING TO DIE!"
Infinite loops aren't the end of the world. The worst it will cause is a segfault. Occasionally they can even be useful. OP, you will need to make sure you include "&& \" (without quotes) at the end of every command within a test block though; except for the last one of course.
I learned to do it this way because I've spent time in Sparta OpenBSD, where rm and most other utilities don't have -v flags, and we also aren't afraid of infinite loops. ;)
|
Power supply monitoring for an implantable device
ABSTRACT
A method and an apparatus for projecting an end of service (EOS) and/or an elective replacement indication (ERI) of a component in an implantable device and for determining an impedance experienced by a lead associated with the implantable device. An active charge depletion of an implantable device is determined. An inactive charge depletion of the implantable device is determined. A time period until an end of service (EOS) and/or elective replacement indication (ERI) of a power supply associated with the IMD based upon the active charge depletion, the inactive charge depletion, and the initial and final (EOS) battery charges, is determined. Furthermore, to determine the impedance described above, a substantially constant current signal is provided through a first terminal and a second terminal of the lead. A voltage across the first and second terminals is measured. An impedance across the first and second terminals is determined based upon the constant current signal and the measured voltage.
BACKGROUND OF THE INVENTION
1. Field of the Invention
This invention relates generally to implantable medical devices, andmore particularly to methods, apparatus, and systems for monitoring power consumption and impedance characteristics relating to implantablemedical devices.
2. Description of the Related Art
There have been many improvements over the last several decades in medical treatments for disorders of the nervous system, such as epilepsy and other motor disorders, and abnormal neural discharge disorders. Oneof the more recently available treatments involves the application of an electrical signal to reduce various symptoms or effects caused by such neural disorders. For example, electrical signals have been successfully applied at strategic locations in the human body to provide various benefits, including reducing occurrences of seizures and/or improving orameliorating other conditions. A particular example of such a treatment regimen involves applying an electrical signal to the vagus nerve of the human body to reduce or eliminate epileptic seizures, as described inU.S. Pat. No. 4,702,254 to Dr. Jacob Zabara, which is hereby incorporated by reference in its entirety in this specification.Electrical stimulation of the vagus nerve may be provided by implantingan electrical device underneath the skin of a patient and performing a detection and electrical stimulation process. Alternatively, the system may operate without a detection system if the patient has been diagnosed with epilepsy, and may periodically apply a series of electrical pulse sto the vagus (or other cranial) nerve intermittently throughout the day,or over another predetermined time interval.
Many types of implantable medical devices, such as pacemakers and drug infusion pumps, typically include custom integrated circuits that are complex, expensive, and specific to the intended use. These systems also typically employ proprietary communications techniques to transfer information between the implant and an external programmer. The custom circuitry is developed because of the need to keep power consumption ata minimum, to conform to the allowable size for implantable devices, andto support the complexity of the detection and communication techniques,while still supplying the particular intended therapy.
Typically, implantable medical devices (IMDs) involving the delivery of electrical pulses to body tissues, such as pacemakers (heart tissue) andvagus nerve stimulators (nerve tissue), comprise a pulse generator for generating the electrical pulses and a lead assembly coupled at its proximal end to the pulse generator terminals and at its distal end toone or more electrodes in contact with the body tissue to be stimulated.One of the key components of such IMDs is the power supply, ordinarily a battery, which may or may not be rechargeable. In many cases surgery is required to replace an exhausted battery. To provide adequate warning of impending depletion of the battery and subsequent degradation of theoperation of the IMD, various signals may be established and monitored.One such signal is an elective replacement indicator (ERI) that may indicate that an electrical device component, such as a battery, has reached a point where replacement or recharging is recommended. Another indicator may be an end of service (EOS) signal, which may provide an indication that the operation of the implanted device is at, or near,termination and delivery of the intended therapy can no longer be guaranteed. ERI and EOS are commonly used indicators of the present status of an IMD battery. ERI is intended to be a warning signal of an impending EOS indication, providing sufficient time (e.g., several weeks or months) in typical applications to schedule and perform the replacement or recharging.
Generally, battery-powered IMDs base the EOS and the ERI signals on battery voltage and/or battery impedance measurements. One problem associated with these methodologies is that, for many batterychemistries, these measured battery characteristics do not havemonotonically-changing values with respect to remaining charge. Forexample, lithium/carbon mono fluoride (Li/CFx) cells commonly used inneurostimulators and other IMDs have a relatively flat voltage discharge curve for the majority of their charge life, and present status of the battery cannot be accurately and unambiguously determined from a measured battery characteristic.
Another problem associated with this methodology is the variability of current consumption for a specific device's programmed therapy or circuitry. This variability, combined with the uncertainty of the battery's present status prior to ERI or EOS, hinders reliable estimation of the anticipated time until reaching ERI or EOS. For scheduling purposes, it is desirable to have a constantly available and reliable estimate over all therapeutic parameter ranges and operation settings of the time until the device will reach EOS, and provide an indication, similar in purpose to ERI, when that time reaches a specific value or range.
Impedance measurements are used to assess the integrity of the electrical leads that deliver the stimulation provided by a pulse generator. A change in the impedance across the leads that deliver the electrical pulses may be indicative either of changes in a patient's body or in the electrical leads themselves. For example, damage in thelead, which may be induced by a break in one or more filaments in amultifilament lead wire, or changes in the body tissue where stimulation is delivered, may affect the efficacy of the stimulation therapy.Therefore, it is desirable for changes in the lead impedance, which maybe indicative of various changes or malfunctions, to be accurately detected.
For instance, the integrity of the leads that deliver stimulation is of interest to insure that the proper therapy dosage is delivered to the patient. Some IMDs, most notably pacemakers, provide a voltage-controlled output that is delivered to one or more body locations (such as the heart). Other IMDs, such as a vagus nervestimulator device developed by Cyberonics, Inc., provide a current-controlled output. Generally, however, state-of-the-art measurements of lead impedance involve an analysis of the delivery of a voltage signal from a capacitive (C) energy storage component throughthe resistive (R) lead impedance and an examination of the decay of that signal based upon a time-constant proportional to the product of the resistance and capacitance (RC). The total equivalent impedance present at the leads and the known energy source total equivalent capacitance cause a time-constant discharge curve. As the voltage on the capacitance is discharged through the resistance, the exponential decay of this voltage may be monitored to determine the decay time constant RC. From that time constant and an estimate of the known equivalent capacitance C, the equivalent resistance R presented by the leads may be mathematically estimated. However, this type of measurement may lead to inaccuracies for a number of reasons, including the fact that the discharging of the voltage signal may be affected by other resistance sand capacitances in the system, the accuracy of the capacitor, the time,voltage, and algorithmic accuracies of the measurement system, and the like. It would be desirable to have a more efficient and accurate method, apparatus, and/or system to measure or assess the impedance present at the leads that deliver an electrical stimulation or therapy.
The present invention is directed to overcoming, or at least reducing,the effects of, one or more of the problems set forth above.
SUMMARY OF THE INVENTION
In one aspect of the present invention, a method is provided for projecting an end of service date and/or elective replacement indication of a power supply in an implantable medical device, the power supply having an initial electrical charge and a final electrical charge.According to a preferred embodiment, the method comprises determining anactive charge depletion of an IMD, determining an inactive chargedepletion of the implantable device, and determining a time period until an end of service (EOS) and/or elective replacement indication (ERI) ofa power supply associated with the IMD based upon the active chargedepletion, the inactive charge depletion, and the initial and final(EOS) battery charges.
In another embodiment, a method for projecting an end of service and/oran elective replacement indication of an IMD having a power supply with an initial electrical charge and a final electrical charge comprises determining a current usage rate for at least one future idle period,and determining charge depleted during at least one previous idle period. The method also comprises determining a current usage rate for at least one future stimulation period, and determining charge depleted during at least one previous stimulation period. A total charge depleted by the IMD is determined based upon the charges depleted during the atleast one previous idle period and the at least one previous stimulation period, respectively. A total future charge depletion is determined based upon the current usage rate during the at least one future stimulation period and the current usage rate during the at least one future idle period. A time period until an end of service (EOS) and/orE RI of a power supply (e.g., a battery) of the IMD is determined based upon the total charge depleted and the total future charge depletion, aswell as the initial and final (EOS) battery charges.
In a further embodiment of the present invention, a method is provided for projecting an end of service date and/or elective replacement indication of a power supply in an implantable medical device, the powersupply having an initial electrical charge and a final electrical charge. According to a preferred embodiment, the method comprises determining a charge depletion of an IMD and determining a time period until an end of service (EOS) and/or elective replacement indication(ERI) of a power supply associated with the IMD based upon the chargedepletion and the initial and final (EOS) battery charges.
In a further embodiment of the present invention, a method for projecting an end of service and and/or elective replacement indication of an IMD having a power supply with an initial electrical charge and a final electrical charge comprises determining a previous active depleted charge of an IMD and determining a future or potential active currentusage rate of the IMD. The method also comprises determining a previous inactive depleted charge of the IMD and determining a future or potential inactive current usage rate of the IMD. A time period until an EOS and/or ERI of a power supply associated with the implantable device is determined based upon the previous active depleted charge, the potential active current usage rate, the previous inactive depleted charge, the potential inactive current usage rate, and the initial and final (EOS) battery charges.
In another aspect of the present invention, an implantable medicaldevice is provided for projecting an end of service and/or an elective replacement indication of a power supply in the IMD. The IMD comprises a battery with an initial electrical charge and a final electrical charge to provide power for at least one operation performed by the implantabledevice. The device further comprises a stimulation unit operativelycoupled to the battery, the stimulation unit providing a stimulation signal to at least one body location. The stimulation unit preferably comprises an electrical pulse generator, but may alternatively comprise a drug pump, a magnetic field generator, a mechanical vibrator element,or other device for stimulating body tissue. The IMD also preferably comprises a controller operatively coupled to the stimulation unit andthe battery. The controller is adapted to determine an active currentusage rate and an inactive current usage rate of the IMD, as well as anactive electrical charge depleted by the battery during stimulation and an inactive electrical charge depleted by the battery during inactive periods in which no electrical stimulation is provided to the patient.The controller is further adapted to determine a time period until an end of service of a power supply associated with the IMD based upon the active current usage rate and the inactive current usage rates, the active and inactive electrical charges depleted, and the initial and final electrical charges of the battery.
In still another aspect, the present invention comprises an IMD for projecting an EOS and/or an ERI of a battery. The IMD comprises a battery with an initial and a final (EOS) electrical charge, a stimulation unit providing an electrical stimulation signal, and a controller. The controller is adapted to determine first and second active current usage rates for current usage in a first stimulation therapy and a second stimulation therapy, respectively. The controller is also adapted to determine first and second inactive (i.e.,non-stimulating) current usage rates in a first inactive mode and a second inactive mode, respectively. In addition, the controller is adapted to determine an active electrical charge depleted by the battery during stimulation and an inactive electrical charge depleted during inactive periods. The controller also determines a time period until an EOS and/or an ERI of the battery, based upon the first and second active current usage rates, the first and second inactive current usage rates,the active and inactive electrical charges depleted, and the initial and final electrical battery charges.
In another aspect of the present invention, a system is provided for projecting an EOS and/or an ERI of a power supply of an IMD. The system comprises an external device (i.e., a device outside the body of the patient) for performing remote communications with the IMD, and the IM Dis also capable of communicating with the external device as well as delivering a stimulation signal to the patient. The IMD comprises a battery to provide power for delivering the stimulation signal, a communications unit to provide communications between the external device and the IMD, and a stimulation unit operatively coupled to the battery for providing a stimulation signal. The system also comprises a controller operatively coupled to the stimulation unit and to the battery. The controller comprises a charge depletion circuit for determining both an active charge depletion and an inactive chargedepletion of the IMD. The controller further comprises an EOS/ERIcircuit for determining a time period until an end of service and/or an elective replacement indication of a power supply associated with theimplantable device, based upon the active charge depletion, the inactive charge depletion, and the original and EOS battery charges.
In yet another aspect of the present invention, a computer readableprogram storage device encoded with instructions is provided for projecting an end of service and/or an elective replacement indication of a power supply in an IMD. The computer readable program storage device is encoded with instructions that, when executed by a computer,determine an active charge depletion and an inactive charge depletion ofthe IMD, and also determines a time period until an end of service and/or an elective replacement indication of a power supply associatedwith the IMD based upon the determined active charge depletion, the determined inactive charge depletion, and the initial and final battery charges.
In another aspect of the present invention, a method is provided for determining an impedance presented by a lead associated with an IMD. Inthe method, a substantially constant current signal is provided through a first terminal and a second terminal of the lead. A voltage across thefirst and second terminals is measured, and an impedance across thefirst and second terminals is determined based upon the constant current signal provided and the measured voltage.
In another aspect of the present invention, an IMD is provided that comprises circuitry for determining an impedance presented by a lead associated with the IMD. The IMD comprises an amplifier circuit for providing a substantially constant current signal through a first terminal and a second terminal of a lead. The IMD further comprises a voltage measurement unit to measure a voltage across the first and second terminals. The implantable device additionally comprises an impedance determination unit to determine an impedance between the first and second terminals based upon the constant current signal and the voltage.
In another aspect of the present invention, a system is provided for determining an impedance experienced by a lead associated with an IMD.The system comprises an external device communicating with the IMD, andthe IMD is in turn adapted to communicate with the external device andto deliver a stimulation signal to a lead coupled to the IMD. The IMDcomprises an amplifier circuit for providing a substantially constant current signal through a first terminal and a second terminal of thelead. The IMD also includes a voltage measurement unit to measure a voltage across the first and second terminals, and an impedance determination unit to determine an impedance between the first and second terminals based upon the constant current signal and the measured voltage. The IMD may also include a communications unit for communicating data relating to the impedance determination to the external device.
In yet another aspect of the present invention, a computer readableprogram storage device encoded with instructions is provided for determining an impedance experienced by a lead coupled to an IMD. The computer readable program storage device is encoded with instructions that when executed by a computer, preferably within the IMD, provides a substantially constant current signal through a first terminal and a second terminal of the lead, measures a voltage across the first and second terminals, and determines an impedance across first and second terminals based upon the constant current signal and the voltage.
BRIEF DESCRIPTION OF THE DRAWINGS
The invention may be understood by reference to the following description taken in conjunction with the accompanying drawings, inwhich like reference numerals identify like elements, and in which:
FIG. 1A is a stylized diagram of an implantable medical device suitable for use in the present invention implanted into a patient's body;
FIG. 1B is a stylized diagram of another embodiment of an implantablemedical device suitable for use in the present invention implanted into a patient's body;
FIG. 1C illustrates an implantable medical device suitable for use inthe present invention, showing the header and electrical connectors for coupling the device to a lead/electrode assembly;
FIG. 1D shows a lead and electrodes suitable for use in the presentinvention attached to a vagus nerve of a patient;
FIG. 2 is a block diagram of an implantable medical device and an external unit that communicates with the implantable medical device, in accordance with one illustrative embodiment of the present invention;
FIG. 3 is a stylized diagram of an output current signal provided by theimplantable medical device of FIGS. 1 and 2, provided to illustrate certain stimulation parameters in accordance with one illustrative embodiment of the present invention;
FIG. 4 is a flowchart represent ation of a method of providing a warning signal relating to a power supply of the implantable medical device, in accordance with one illustrative embodiment of the present invention;
FIG. 5 is a flowchart represent ation of a method of performing a calibration of a charge depletion tabulation, in accordance with one illustrative embodiment of the present invention;
FIG. 6 is a more detailed flowchart illustrating a method of performing the charge depletion calculation indicated in FIG. 4, in accordance with one illustrative embodiment of the present invention;
FIG. 7 is a more detailed flowchart illustrating a method of performing an end-of-service (EOS) and/or an elective replacement indication (ERI)determination, as indicated in FIG. 4, in accordance with one illustrative embodiment of the present invention;
FIG. 8, is a block diagram of the stimulation unit shown in FIG. 2, in accordance with one illustrative embodiment of the present invention;
FIG. 9 is a block diagram of the impedance measurement unit shown in FIG. 2, in accordance with one illustrative embodiment of the presentinvention;
FIG. 10 is a flowchart of a method of performing an impedance measurement, in accordance with one illustrative embodiment of the present invention; and
FIG. 11 is a flowchart of a method of performing a calibration of an A/D converter used for impedance measurement, in accordance with one illustrative embodiment of the present invention.
While the invention is susceptible to various modifications and alternative forms, specific embodiments thereof have been shown by way of example in the drawings and are herein described in detail. It shouldbe understood, however, that the description herein of specific embodiments is not intended to limit the invention to the particular forms disclosed, but on the contrary, the intention is to cover all modifications, equivalents, and alternatives falling within the spirit and scope of the invention as defined by the appended claims.
DETAILED DESCRIPTION OF SPECIFIC EMBODIMENTS
Illustrative embodiments of the invention are described herein. In the interest of clarity, not all features of an actual implementation are described in this specification. In the development of any such actual embodiment, numerous implementation-specific decisions must be made to achieve the design-specific goals, which will vary from one implementation to another. It will be appreciated that such a development effort, while possibly complex and time-consuming, would nevertheless be a routine undertaking for persons of ordinary skill inthe art having the benefit of this disclosure.
Embodiments of the present invention provide methods and apparatus for monitoring and/or estimating the electrical charge depletion of animplantable medical device (IMD). Estimating battery life may be based upon estimated future charge depletion and actual past charge depletion.Embodiments of the present invention provide for an elective replacement indicator (ERI) signal to provide a warning for performing an electrical diagnostic operation upon the IMD. This electrical diagnostic operation may include replacing an electrical component in the IMD, performing additional evaluation(s) of the operation of the IMD, replacing orrecharging a power source of the IMD, and the like. A more detailed description of an IMD suitable for use in the present invention is provided in various figures and the accompanying description below.
Generally, IMDs contain power storage devices or battery units toprovide power for the operations of the IMD. Embodiments of the presentinvention determine an estimated usable life remaining in the battery unit based upon determining initial and final battery charges, chargedepleted by operations of the IMD, and a future depletion rate.Embodiments of the present invention may be performed in a standalone manner within the IMD itself, or in conjunction with an external device in communication with the IMD. Utilizing embodiments of the presentinvention, an end of service (EOS) signal or an ERI signal may be provided, indicating that the IMD is at or near termination of operations and/or the battery power has reached a level at which replacement should be considered to avoid interruption or loss of therapy to the patient.
Other embodiments of the present invention provide for determining thelead impedance. This process involves determining the voltage across a lead associated with the IMD, based upon the delivery of a constant current signal. The impedance may be measured on demand or at predetermined periodic intervals to detect significant changes in impedance across the leads of the IMD. Changes in the impedance may be logged and time-stamped, and saved in a memory in the IMD for diagnostic considerations. Voltage and current measurements associated with the IMDmay be calibrated using various impedance measurements in order to enhance the accuracy of lead impedance measurements.
FIGS. 1A-1D illustrate a generator 110 having main body 112 comprising a case or shell 121 (FIG. 1A) with a connector 116 (FIG. 1C) for connecting to leads 122. The generator 110 is implanted in the patient's chest in a pocket or cavity formed by the implanting surgeon just below the skin (indicated by a dotted line 145), similar to the implantation procedure for a pacemaker pulse generator. A stimulating nerve electrode assembly 125, preferably comprising an electrode pair, is conductivelyconnected to the distal end of an insulated electrically conductive lead assembly 122, which preferably comprises a pair of lead wires (one wire for each electrode of an electrode pair). Lead assembly 122 is attached at its proximal end to the connector 116 on case 121. The electrode assembly is surgically coupled to a vagus nerve 127 in the patient's neck. The electrode assembly 125 preferably comprises a bipolar stimulating electrode pair (FIG. 1D), such as the electrode pair described in U.S. Pat. No. 4,573,481 issued Mar. 4, 1986 to Bull ara.Persons of skill in the art will appreciate that many electrode designs could be used in the present invention. The two electrodes are preferably wrapped about the vagus nerve, and the electrode assembly 125is preferably secured to the nerve 127 by a spiral anchoring tether 128(FIG. 1D) such as that disclosed in U.S. Pat. No. 4,979,511 issued Dec.25, 1990 to Reese S. Terry, Jr. and assigned to the same assignee as the instant application. Lead assembly 122 is secured, while retaining the ability to flex with movement of the chest and neck, by a sutureconnection 130 to nearby tissue.
In one embodiment, the open helical design of the electrode assembly 125(described in detail in the above-cited Bull ara patent), which is self-sizing and flexible, minimizes mechanical trauma to the nerve and allows body fluid interchange with the nerve. The electrode assembly 125preferably conforms to the shape of the nerve, providing a low stimulation threshold by allowing a large stimulation contact area withthe nerve. Structurally, the electrode assembly 125 comprises two electrode ribbons (not shown), of a conductive material such as platinum, iridium, platinum-iridium alloys, and/or oxides of the foregoing. The electrode ribbons are individually bonded to an inside surface of an elastomeric body portion of the two spiral electrodes125-1 and 125-2 (FIG. 1D), which may comprise two spiral loops of a three-loop helical assembly. The lead assembly 122 may comprise two distinct lead wires or a coaxial cable whose two conductive elements are respectively coupled to one of the conductive electrode ribbons 125-1and 125-2. One suitable method of coupling the lead wires or cable tothe electrodes comprises a spacer assembly such as that disclosed inU.S. Pat. No. 5,531,778, although other known coupling techniques may beused. The elastomeric body portion of each loop is preferably composed of silicone rubber, and the third loop 128 (which typically has no electrode) acts as the anchoring tether 128 for the electrode assembly125.
In certain embodiments of the invention, eye movement sensing electrodes133 (FIG. 1B) may be implanted at or near an outer periphery of each eye socket in a suitable location to sense muscle movement or actual eye movement. The electrodes 133 may be electrically connected to leads 134implanted via a catheter or other suitable means (not shown) and extending along the jawline through the neck and chest tissue to the stimulus generator 110. When included in systems of the presentinvention, the sensing electrodes 133 may be utilized for detecting rapid eye movement (REM) in a pattern indicative of a disorder to be treated, as described in greater detail below.
Alternatively or additionally, EEG sensing electrodes 136 may optionally be implanted in spaced apart relation through the skull, and connected to leads 137 implanted and extending along the scalp and temple and then along the same path and in the same manner as described above for the eye movement electrode leads. Electrodes 133 and 137, or other types of sensors, may be used in some embodiments of the invention to trigger administration of the electrical stimulation therapy to the vagus nerve127 via electrode assembly 125. Use of such sensed body signals to trigger or initiate stimulation therapy is hereinafter referred to as a feedback loop mode of administration. Other embodiments of the presentinvention utilize a continuous, periodic or intermittent stimulus signal applied to the vagus nerve (each of which constitutes a form of continual application of the signal) according to a programmed on/off duty cycle without the use of sensors to trigger therapy delivery. This type of delivery may be referred to as a prophylactic therapy mode. Bothprophylactic and feedback loop administration may be combined or delivered by a single IMD according to the present invention. Either or both modes may be appropriate to treat the particular disorder diagnosed in the case of a specific patient under observation.
The pulse generator 110 may be programmed with an external computer 150using programming software of the type copyrighted by the assignee ofthe instant application with the Register of Copyrights, Library of Congress, or other suitable software based on the description herein,and a programming wand 155 to facilitate radio frequency (RF)communication between the computer 150 (FIG. 1A) and the pulse generator110. The wand 155 and software permit noninvasive communication with the generator 110 after the latter is implanted. The wand 155 is preferably powered by internal batteries, and provided with a “power on” light to indicate sufficient power for communication. Another indicator light maybe provided to show that data transmission is occurring between the wand and the generator.
FIG. 2 illustrates one embodiment of an IMD 200 (which may comprise pulse generator 110) for performing neuro stimulation in accordance with embodiments of the present invention. In one embodiment, the implantablemedical device 200 comprises a battery unit 210, a power-source controller 220, a stimulation controller 230, a power regulation unit240, a stimulation unit 250, an impedance measurement unit 265, a memory unit 280 and a communication unit 260. It will be recognized that one or more of the blocks 210-280 (which may also be referred to as modules)may comprise hardware, firmware, software, or any combination of the three. The memory unit 280 may be used for storing various program codes, starting data, and the like. The battery unit 210 comprises a power-source battery that may be rechargeable or non-rechargeable. The battery unit 210 provides power for the operation of the IMD 200,including electronic operations and the stimulation function. The battery unit 210, in one embodiment, may be a lithium/thionyl chloride cell or, more preferably, a lithium/carbon mono fluoride (Li/CFx) cell.The terminals of the battery unit 210 are preferably electrically connected to an input side of the power-source controller 220 and the power regulation unit 240.
The power-source controller 220 preferably comprises circuitry for controlling and monitoring the flow of electrical power to various electronic and stimulation-delivery portions of the IMD 200 (such as the modules 230-265 and 280 illustrated in FIG. 2). More particularly, the power-source controller 220 is capable of monitoring the power consumption or charge depletion of the implantable medical device 200and is capable of generating the ERI and the EOS signals. The power-source controller 220 comprises an active charge-depletion unit222, an inactive charge-depletion unit 224, and an ERI/EOS calculation unit 226. The active charge-depletion unit 222 is capable of calculating the charge depletion rate of the implantable medical device 200 during active states, and may comprise sub-units to calculate the chargedepletion rates of a plurality of active states comprising different charge depletion rates. The active state of the implantable medicaldevice 200 may refer to a period of time during which a stimulation is delivered by the implantable medical device 200 to body tissue of the patient according to a first set of stimulation parameters. Other active states may include states in which other activities are occurring, suchas status checks and/or updates, or stimulation periods according to a second set of stimulation parameters different from the first set of stimulation parameters. The inactive charge-depletion unit 224 is capable of calculating the charge depletion rate of the implantablemedical device 200 during inactive states. Inactive states may also comprises various states of inactivity, such as sleep mode, wait modes,and the like. The ERI/EOS calculation unit 226 is capable of performing calculations to generate an ERI signal and/or an EOS signal. One or more of the active charge-depletion unit 222, the inactive charge-depletion unit 224, and/or the ERI/EOS calculation unit 226 may be hardware,software, firmware, and/or any combination thereof.
The power regulation unit 240 is capable of regulating the power delivered by the battery unit 210 to particular modules of the IMD 200according to their needs and functions. The power regulation unit 240may perform a voltage conversion to provide appropriate voltages and/or currents for the operation of the modules. The power regulation unit 240may comprise hardware, software, firmware, and/or any combination thereof.
The communication unit 260 is capable of providing transmission and reception of electronic signals to and from an external unit 270. The external unit 270 may be a device that is capable of programming various modules and stimulation parameters of the IMD 200. In one embodiment,the external unit 270 is a computer system that is capable of executing a data-acquisition program. The external unit 270 is preferably controlled by a healthcare provider such as a physician, at a basestation in, for example, a doctor's office. The external unit 270 may bea computer, preferably a handheld computer or PDA, but may alternatively comprise any other device that is capable of electronic communications and programming. The external unit 270 may be used to download various parameters and program software into the IMD 200 for programming theoperation of the implantable device. The external unit 270 may also receive and upload various status conditions and other data from the IMD200. The communication unit 260 may comprise hardware, software,firmware, and/or any combination thereof. Communications between the external unit 270 and the communication unit 260 may occur via a wireless or other type of communication, illustrated generally by line275 in FIG. 2.
Stimulation controller 230 defines the stimulation pulses to be delivered to the nerve tissue according to parameters and waveforms that may be programmed into the IMD 200 using the external unit 270. The stimulation controller 230 controls the operation of the stimulation unit 250, which generates the stimulation pulses according to the parameters defined by the controller 230 and in one embodiment provides these pulses to the connector 116 for delivery to the patient via lead assembly 122 and electrode assembly 125 (see FIG. 1A). Various stimulation signals provided by the implantable medical device 200 may vary widely across a range of parameters. The Stimulation controller 230may be hardware, software, firmware, and/or any combination thereof.
FIG. 3 illustrates the general nature, in idealized represent ation, ofan output signal waveform delivered by the output section of a pulse generator 110 (such as stimulation unit 250 shown in FIG. 2) to lead assembly 122 and electrode assembly 125 in an embodiment of the presentinvention. This illustration is presented principally for the sake of clarifying terminology, including the parameters of signal on-time,off-time, frequency, pulse width, and current. In the treatment of aneuropsychiatric disorder in an exemplary implementation, thestimulation unit 250 of the IMD 200 delivers pulses having a desired output signal current and frequency, with each pulse having a desired output signal pulse width. The pulses are delivered for the duration ofthe output signal on-time (stimulation period), and are followed by the output signal off-time during which no output signal is delivered (idle period). This periodic stimulation reduces the symptoms of theneuropsychiatric disorder. Stimulation parameters suitable for treatment of a variety of medical conditions can be found in the following patents: U.S. Pat. No. 4,702,254, U.S. Pat. No. 5,025,807, U.S. Pat. No.4,867,164, and U.S. Pat. No. 6,622,088 (epilepsy); U.S. Pat. No.5,188,104 and U.S. Pat. No. 5,263,480 (eating disorders); U.S. Pat. No.5,215,086 (migraine headaches); U.S. Pat. No. 5,231,988 (endocrine disorders); U.S. Pat. No. 5,269,303 (dementia); U.S. Pat. No. 5,299,569and U.S. Pat. No. 6,622,047 (neuropsychiatric disorders); U.S. Pat. No.5,330,513 and U.S. Pat. No. 6,721,603 (pain); U.S. Pat. No. 5,335,657(sleep disorders); U.S. Pat. No. 5,540,730 (motility disorders); U.S.Pat. No. 5,571,150 (coma); U.S. Pat. No. 5,707,400 (refractory hypertension); U.S. Pat. No. 6,587,719 and U.S. Pat. No. 6,609,025(obesity); U.S. Pat. No. 6,622,041 (congestive heart failure). Each ofthe foregoing patents is hereby incorporated by reference herein in its entirety.
In one embodiment of the invention, the IMD 200 determines EOS and ERIvalues by using a known initial battery charge (Q₀) and a predetermined EOS battery charge (Q_(EOS)) indicative of the end of useful battery service, together with the charge actually depleted (Q_(d)) by the IMD(calculated from the current usage rates for idle and stimulation periods (r_(i) and r_(s)), and the length of the respective idle and stimulation periods), to calculate for a desired time point how much useful charge remains on the battery (Q_(r)) until the EOS charge is reached, and how long at projected current usage rates the device can operate until EOS or ERI. Once the charge actually depleted by operation of the device (Q_(d)) is known, the current usage rates are then applied to the remaining useful charge Q_(r) to determine the time remaining until EOS and/or ERI.
The present invention allows EOS and ERI determinations to be made without measurements or calculations of internal battery impedance or other battery parameters. Instead, the device maintains a precise record of the current used during idle and stimulation periods, and subtractsthe charge represented by the current used from the total available battery charge to determine the charge remaining on the battery. Because the relative duration of stimulation and idle periods are determined bythe stimulation programming parameters of the IMD, a determination of EOS and ERI can be calculated in a straightforward manner based upon thecurrent usage rates associated with the programming parameters.
Consistent with the foregoing, FIG. 4 provides a flowchart depiction ofa method for determining the remaining time to EOS and/or ERI based on known or determined IMD characteristics such as battery charge and current usage rates. In one embodiment, the current usage of the IMD 200is calibrated during manufacture (step 410). Current drawn by the IMDfrom the battery is defined as electrical charge per unit time. The total charge depleted from the battery as a result of the operations ofthe IMD may be determined by multiplying each distinct current rate used by the IMD by its respective time used. In one embodiment, as part ofthe calibration, during manufacturing, a power supply capable of generating known currents and voltages may be used to characterize the power consumption or current depletion of the implantable medical device200 during its stimulation and idle modes The power consumption data thus obtained is preferably stored in a memory of the IMD.
Once the charge usage characteristics of the IMD are known, the battery may be subsequently installed into the implantable medical device 200for operation and ther after a record of power consumed by theimplantable medical device 200 is maintained. In a particular embodiment, the calibration step 410 involves calibration of currentusage for idle periods (r_(i)) and stimulation periods (r_(s)) of the device. Current may thus be used as a proxy value for electrical chargedepletion, and the calibration step allows a precise determination ofthe amount of electrical charge used by the device after implantation.As used herein, the terms “depletion rate,” “consumption rate,” and“usage rate” may be used interchangeably and refer to the rate at which electrical charge is depleted from the battery. However, as noted above,current may be used as a proxy for electrical charge, and where this isthe case, current rates r_(i) and r_(s) may also be referred to as“current usage,” “current rate,” “current consumption,” “chargedepletion,” “depletion rate” or similar terms.
As previously noted, the IMD 200 has a number of settings and parameters(e.g., current, pulse width, frequency, and on-time/off-time) that canbe changed to alter the stimulation delivered to the patient. These changes result in different current usage rates by the IMD 200. Inaddition, circuit variations from device to device may also result indifferent current usage rates for the same operation. Calculations andestimations are preferably performed during the manufacturing process inorder to calibrate accurately and precisely the current usage rates ofthe IMD 200 under a variety of stimulation parameters and operating conditions. A calibration of the current usage rates and a determination of the charge present on the battery at the time of implant allow a more accurate assessment of actual and predicted charge depletion after theIMD 200 is implanted. The initial charge on the battery may include a safety factor, i.e., the charge may be a “minimum charge” that all batteries are certain to possess, even though many individual batteries may have a significantly greater charge. Nothing herein precludes adetermination of a unique initial charge for each individual battery.However, it will be recognized that such individual determinations maynot be economically feasible. A more detailed illustration and description of the step (410) of calibrating current usage and initializing the battery charge for the implantable medical device 200is provided in FIG. 5 and the accompanying description below.
After calibrating the current usage characteristics of the IMD 200, theIMD may be implanted and subsequently a charge depletion calculation is performed (step 420). This calculation may be performed by the IMDitself, the external unit 270, or by both, and includes determining the actual electrical charge depleted from the battery 210 and estimating future current usage (i.e., depletion rates), which are then used to calculate an elective replacement indication (ERI) and/or an end of service (EOS) signal (step 430). A more detailed illustration and description of the step 420 of calculating the electrical chargedepleted is provided in FIG. 6 and the accompanying description below.In step 430 an estimated time until an elective replacement indication will be generated and/or the estimated time until the end of service are calculated utilizing the initial battery charge, the actual charge consumed and the estimated future charge depletion calculated in light of the calibration performed during manufacture. A more detailed description and illustration of the step 430 of calculating the time toE RI and/or EOS is provided in FIG. 7 and the accompanying description below.
Referring now to FIG. 5, a flowchart diagram is provided depicting in greater detail the step 410 (FIG. 4) of calibrating and initializing theIMD 200 during manufacturing. In one embodiment, the current rates forthe IMD 200 during stimulation are calibrated (block 510). During manufacturing, several different combinations of measurements may be calibrated. More specifically, measurements of charge depletion relating to different types of pulses (i.e., pulses having different stimulation parameters) are calibrated to ensure that current usage measurements forthe IMD are accurate over a wide range of stimulation parameters. In other words, various pulses having a range of current amplitudes, pulse widths, frequencies, duty cycles and/or lead impedances into which the pulses are delivered are used to calibrate the measurement of currentusage during stimulation to establish a baseline of the measurement of charge depletion for various types of pulses. All operational variables relating to or affecting the current usage rates of the IMD may be considered.
More particularly, during manufacture of the IMD 200, several combinations of data points relating to various current rates resulting from various combinations of pulse parameters are used in one embodiment to generate a linear equation that relates various pulse parameters to current rate, which may then be used to determine charge depletion. Forexample, for a first stimulation, pulses of a certain frequency are provided and for a second stimulation, the frequency of the pulses used may be doubled. Therefore, the estimated current usage rate for the second stimulation may be estimated to be approximately double that ofthe power consumption or charge depleted due to the first stimulation.As another example, a first stimulation may be of a first pulse width and a second stimulation may be of a pulse width that is double that ofthe width of the first pulse. Therefore, a relationship between the pulse width to the current consumption of the second pulse may be estimated to be approximately double that of the current usage rate ofthe first pulse. In one embodiment, a graph may be generated using the various types of stimulation versus the current consumption associatedwith that stimulation.
As yet another example, a first stimulation pulse may have a first current amplitude and a second stimulation may have a current amplitude that is double that of the first stimulation pulse. Therefore, thecurrent consumption of the second stimulation pulse may be estimated tobe approximately double that of the current consumption of the first stimulation pulse. The power consumption is directly proportional to thecurrent consumption. Therefore, a relationship of a pulse parameter to current usage rate may be estimated or measured such that an interpolation may be performed at a later time based upon the linear relationship developed during the calibration of the power consumption during stimulation. It may be appreciated that the relationships of some pulse parameters to current usage rate may not be a simple linear relationship, depending upon such pulse characteristics as the type of pulse decay (i.e., square wave, exponential decay), for example.Nevertheless, calibration of current usage rate for various pulse parameters may be performed by routine calculation or experiment for persons of skill in the art having the benefit of the present disclosure.
Referring again to FIG. 5, current usage during an idle (i.e.,non-stimulating) period is calibrated in step 520. From the idle current consumption and the stimulation current consumption calibration, the overall current consumption may be modeled based upon programmed settings. It should be noted that while the invention as shown in the drawings describes a device having two current usage patterns associatedwith an idle period and a stimulation period, such a two-state embodiment is described solely for clarity, and more complex embodiments are possible involving a third state such as, by way of nonlimitingexample, a current usage rate associated with electrical sensing of thelead electrodes, which may be defined by a third current rate r₃.Four-state or even higher state embodiments are possible, although wherethe differences in current usage rates are small, or where a particular current usage rate comprises only a tiny fraction of the overall time ofthe device, the complexity required to implement and monitor the time such current rates are actually used by the device may render the device impractical. These multi-state embodiments may be implemented if desired, however, and remain within the scope and spirit of the presentinvention.
Using the calibration of current usage during stimulation periods (step510) and idle periods (step 520), a calculation may optionally be made to initialize the charge depleted, if any, during manufacturing operations, such as the charge depleted during testing of the device after assembly (block 530). In a preferred embodiment, all of the calibrations are performed with a calibrated current source device, andnot a battery, and in this case there is no charge depletion during manufacturing operations. In another embodiment, the amount of chargedepleted during manufacturing may small, in which case the initialization procedure may also be omitted. The calibration and/or initialization steps of FIG. 5 allow the IMD 200, via power-source controller 220, to maintain a running tally of how much charge has been depleted from the device. When the battery unit 210 is first inserted into the implantable medical device 200, the charge depleted is generally initialized to zero so that a running tabulation may begin from zero for maintaining a running tally of the charge depleted fromthe battery over the life of the implantable medical device 200. In one embodiment, the charge depleted tally is incremented throughout the operating life of the device and at any point the running tally may be subtracted from the known initial charge of the battery to determine the remaining charge. In an alternative embodiment, the charge depleted tally could be initialized to the value of the battery initial charge and the tally decremented throughout the device operation and directly used as the remaining charge. In either implementation, information relating to the baseline charge remaining on the battery at the end of manufacturing may be retained to calculate the estimated time to EOS orE RI.
Turning now to FIG. 6, a flowchart depiction of the step 420 of calculating charge depleted by the device is provided in greater detail.For simplicity, only the two-current state of a single idle period and asingle stimulation period is shown. Embodiments having additional current usage rates are included in the present invention. The IMD 200may determine a current depletion rate r_(i) for idle periods (block610). The rate is preferably stored in memory. In one embodiment, the determination is made by the IMD 200 after implantation. In a preferred embodiment, the idle current depletion rate may be a rate determined during manufacturing (i.e., a rate calibrated in step 520) and stored inthe memory 280. An idle period is defined as a time period when theimplantable medical device 200 is not performing active stimulation,i.e., is not delivering a stimulation pulse to the electrodes. Various electronic functions, such as tabulation and calculation of numbers or execution of various software algorithms within the IMD 200 may take place during the idle period.
As noted, the current rate r_(i) during idle periods 610 may be predetermined during the manufacturing process (step 520) and may include various considerations, such as the power consumption of theoperation of various electronics in the implantable medical device 200,even though no active stimulation may be taking place during that time period. However, since the implantable medical device 200 may be occasionally reprogrammed while still implanted inside a patient's body,the number and duration of idle periods may vary according to the duty cycle and frequency of the stimulation pulses. Therefore, the IMD 200(e.g., via the power source controller 220 in the device) may maintain a running tabulation of the idle periods, and for each idle period a certain amount of charge depleted during the idle period (i.e., off time) is tabulated and stored in memory 280 (step 620).
It will be appreciated that the depleted charge may be obtained in anumber of different ways, each within the scope of the presentinvention. Specifically, the total time of all idle periods since implantation, initialization, or since a previous idle power depletion calculation, may be maintained as a running total idle time in memory,or alternatively a running tally of charge depleted during idle periods may be maintained. While these values are different numerically, theyare directly related by simple equations as discussed more fully hereinafter. At an update time, the total idle time may be periodically accessed and multiplied by the idle period current usage rate to determine the total power depleted during idle periods since implantation, initialization, or the previous calculation.
The IMD 200 may also maintain in memory 280 a tabulation of currentusage rates (i.e., charge depletion) for a wide range of stimulation settings (step 630). In another embodiment, theoretical charge depletion calculations relating to particular types of stimulation may be provided to the IMD 200. The stimulation parameter settings may then be used bythe device to maintain a running tabulation of the charge depleted during stimulation periods using a current usage rate r_(s) calculated from the pulse width, pulse amplitude, pulse frequency, and other parameters which may impact the current usage rate. This method provides specific current usage rates for a variety of stimulation parameter settings and lead impedances without requiring the storage of currentusage rates for all possible stimulation parameter settings and leadimpedances.
In one embodiment, the charge depleted may be stored in micro-amp seconds; however, various other measurement units may be utilized. Inone embodiment, the IMD 200 itself may be capable of calculating thecurrent usage rate for a particular combination of programmed output settings based upon a known relationship between current usage rates and different combinations of programmed settings. The relationship may then be used to interpolate a particular current usage rate for a particular combination of programmed output settings. However, in order to reduce the computation load on the device, some or all of these calculations,including the interpolation, are preferably performed by an external programmer 270. Therefore, upon programming or performing routine maintenance of the implantable medical device 200, the external unit 270may perform the calculations to determine the current usage rate during future stimulation cycles based upon the settings implemented during the programming or maintenance operation.
For example, if the stimulation for a particular patient is set to a particular pulse width, the external device 270 may factor in the calibration data and determine a current usage rate for a particular setof stimulation settings. Therefore, for each stimulation period, the charge that is depleted is tabulated for the stimulation period(“on-time”) by multiplying the stimulation time by the current usagerate and a running tabulation is maintained (block 640). For example, ifthe predetermined current usage rate for each second of stimulation at a particular combination of parameter settings is 100 micro amps, and thestimulation is 30 seconds long, a calculation is made by multiplying the30 second time period for the stimulation, by the 100 micro amps to arrive at 3000 micro amp seconds of charge consumed, which is then added to a running charge consumption tally.
As illustrated in FIG. 6, the sum of the tabulations of the chargedepleted for the idle period (off-time or inactive period; step 620) andthe charge depleted for the stimulation period (on-time or active period; step 640) are added to arrive at a total charge depleted by theIMD 200 (block 650). It will be appreciated that the sum of idle period and stimulation charge depletion may occur at the conclusion of one or more cycles of idle period and stimulation period, or continuously throughout idle periods and stimulation periods. Occasionally during the operational life of the IMD 200, various stimulation parameters may be changed to provide different types of stimulation. However, utilizing the steps described herein, a running tally (or a periodically updated tally) of the charge depletion is maintained, such that even when thestimulation settings change, the device maintains a substantially accurate reflection of the actual charge that has been depleted by theIMD 200, and future depletion calculations are based on the depletion rate for the newly programmed settings.
The memory 280 may store the results of the charge calculations (step660). The data stored may include both the current usage rates for idle and stimulation periods of the IMD 200, as well as the total chargedepleted. This data may be utilized by the IMD 200 and/or external unit270 to determine various aspects of the device, including the amount of remaining battery life.
The calculations associated with steps 620, 640 and 650 may be expressed mathematically. In particular, the total charge available from the battery Q_(tot) after it is placed in the IMD 200 may be represented asthe difference between an initial battery charge Q₀ and the EOS battery charge Q_(EOS), as expressed in Equation 1.Q _(tot) =Q ₀ −Q _(EOS) Equation 1The charge depleted by the IMD 200 during idle periods Q_(i) (step 620)may be expressed as the idle period current usage rate r_(i) multiplied by the total duration of all idle periods Δt_(i) according to equation2.Q ₁ =r _(i) ×ΣΔt _(i) Equation 2Where multiple idle rates are present, the above equation will be solved for each idle current usage rate and the results summed to obtain Q_(i).Similarly, the charge depleted during stimulation periods Q_(s) (step640) may be expressed as the stimulation period current usage rate r_(s)multiplied by the total duration of all stimulation periods Δt_(s)according to equation 3.Q _(s) =r _(s) ×ΣΔt _(s) Equation 3Again, where multiple stimulation rates are used the equation will be solved for each stimulation rate and the results summed. The total charge depleted Q_(d) is the sum of Q_(i) and Q_(s), as shown in equation 4.Q _(d) =Q _(i) +Q _(s) Equation 4.Finally, the charge remaining until EOS (Q_(r)) at any arbitrary point in time is the difference between the total energy or charge availableQ_(tot) and the charge actually depleted from the battery Q_(d) at that same timepoint, as expressed in equation 5 (step 650).Q _(r) =Q _(tot) −Q _(d) Equation 5This may be accomplished by counters that record the amount of time the device uses the idle current usage rate(s) and the stimulation currentusage rate(s), respectively, which are then multiplied by the applicable current usage rate to obtain the total consumed charge during the idle and stimulation periods. Alternatively, separate registers may directly maintain a running tally of the charge depleted during stimulation periods and idle periods, respectively.
Turning now to FIG. 7, a more detailed flow chart depicting the calculation of the time to the end of service (EOS) and/or elective replacement indicator (ERI) signals, as indicated in step 430 of FIG. 4,is illustrated. The IMD 200 is programmed for delivering to the patient electrical pulses having predetermined parameters (step 710).Programming the stimulation settings may be performed during manufacturing and/or by a healthcare provider when the external unit 270gains communication access to the IMD 200. Occasionally, medical personnel may determine that an alteration of one or more of thestimulation parameters is desirable. Implementation of such changes may easily be accomplished to optimize the therapy delivered by the IMD.Alternatively, as part of a routine diagnostic process, a predetermined change to the stimulation settings may be performed. Additionally, theIMD 200 may have multiple sets of stimulation parameters stored in memory and may switch between the different stimulation modes represented by those parameters at preset times or at the occurrence of certain physiological events. When a change in one or more stimulation parameter settings is implemented (whether by programming or accessing data from memory), the IMD 200 and/or the external unit 270 may determine an updated stimulation period current usage rate r_(s)associated with the new parameter settings, and subsequent updates tothe total charge consumed will be based upon the new stimulation period current usage rate (step 720). The rates may either be stored in memory or calculated from an equation by interpolation among known current rates for known parameter settings, as previously described. It is also possible that changes to the software or firmware of the device could change the idle period depletion rate, in which event a new idle period current usage rate r_(i) may also be calculated and reflected in subsequent calculations of total charge depleted (step 720).
Because the duty cycle (on-time to off-time ratio) is also a programmed parameter, the present invention allows both the idle period currentusage rate (r_(i)) and the stimulation period current usage rate (r_(s))to be combined into a single rate for purposes of projecting future energy or charge depletion and calculating a time to EOS and/or ERI.This rate represents the total current usage rate (r_(t)) of the device(step 725). Following updates to the stimulation and/or idle period current usage rates r_(s) and r_(i), the updated rates are then used to calculate a new total charge remaining Q_(r), by a method substantially as shown in FIG. 6 and previously described. Once the total charge remaining is retrieved from memory, the remaining time to an activation of an EOS is calculated (step 730) by using the total depletion rater_(t) and the total charge remaining Q_(r) on the battery until EOS.More particularly, the time remaining is calculated by dividing the remaining charge by the total depletion rate as shown in Equation 6.t=Q _(r) /r _(t) Equation 6
At a predetermined time period before the end of service of the battery unit 210 is reached, an ERI signal, which may prompt the healthcare provider and/or the patient to schedule elective replacement of an electronic device, may be asserted to provide a warning. ERI is typically determined as simply a predetermined time, for example from 1week to 1 year, more typically 6 months, earlier than EOS. In an alternative embodiment, the ERI signal may be defined as a particular charge level remaining (Q_(ERI)) above the EOS charge, Q_(EOS). In this embodiment, the time period remaining until the ERI signal could be calculated by dividing Q_(EOS) by the total depletion rate r_(t) and subtracting the resulting time period from the time to EOS as calculated in equation 6.
The time to EOS provides a warning to the healthcare provider and/or patient that the energy or charge supply will be depleted very shortly.Therefore, the time to EOS is reported to the implantable medical device200 and/or to the external device 270 (block 740). The ERI is also reported to the implantable medical device 200 and/or to the external device 270, which is then brought to the attention of the patient and/ora medical professional.
In addition to battery life, for diagnostic purposes the impedance ofthe various leads that deliver stimulation provided by the IMD 200 is also of interest. Lead impedance measurements and known output current signal characteristics may be used to calculate consumed stimulation charge. Sudden changes in lead impedance may indicate any of a number of changes in the operation of the implantable medical device 200. Changes in impedance may indicate that the leads delivering the stimulation have moved or have been damaged, or that the patient's body where thestimulation was delivered may have changed in some way.
Turning now to FIG. 8, a block diagram is provided depicting in further detail an embodiment of the stimulation unit 250 of FIG. 2. The stimulation unit 250 of the IMD 200 comprises an op amp unit 820, which may comprise one or more operational amplifiers that are capable of delivering a controlled current signal for stimulation. In one embodiment, the controlled current is a constant current or a substantially constant current. The stimulation unit 250 may also comprise an amplifier control circuitry unit 810 that may contain circuitry and/or programmable logic to control the operation of the op amps 820. Additionally, the stimulation unit 250 may be coupled to leads122, which may comprise a pair of signal wires capable of delivering an electrical signal to an electrode pair 125-1 and 125-2 (FIG. 1D) each coupled to a distal end of one of the leads 122. The leads 122 (and the electrodes 125-1 and 125-2) are capable of providing a complete circuit between the implantable medical device 200 and the region of the body/tissue to which the electrodes are attached, which may be approximated as an equivalent impedance. Each lead 122 may comprise asingle strand wire or, more preferably, a multi-strand wire braided or otherwise coupled together as a single functional wire. Each of the two lead wires 122 in this embodiment is provided with a separate socket and connector 116, as shown in FIG. 1C. In another embodiment, two leads 122may be combined into a single coaxial cable (as shown in FIGS. 1A and1D), with a single socket providing both coaxial connectors 116.
Embodiments of the present invention provide for utilizing the delivery of a constant current signal for delivery of stimulation, and measurement of the impedance experienced by the leads 122. In ap referred embodiment, the controlled or constant current signal provided by the stimulation unit 250 is independent of the impedance experienced across the leads 122. For example, even if the impedance experienced across the leads 122 changes, the op amp 820, in conjunction with the amplifier control circuitry 810, adjusts to deliver a controlled or constant current despite the change in the impedance experienced across the leads 122.
Since a controlled, constant current is delivered despite variations inthe impedance across the leads 122, the voltage across the lead terminals provide an indication of the lead impedance. For example, ifthe nerve tissue to which the leads 122 are connected has an impedance of 1000 ohms, a particular stimulation may call for a one milliampconstant current signal. In this case, even if a 5000 ohms impedance is experienced across the leads 122, the stimulation unit 250 will still provide a one milliamp current. Hence, the power may vary but thecurrent remains constant. In other words, the op amp 820 will stabilize itself utilizing various circuitry, including the amplifier control circuitry 810, to provide a constant current signal even if theimpedance experienced by the leads 122 varies during the period the signal is provided. Therefore, using Ohm's Law, V=IR, a measurement ofthe voltage across the leads 122 will provide an indication of the actual impedance experienced by the leads 122.
Turning now to FIG. 9, a block diagram depiction of one embodiment ofthe impedance measurement unit 265 from FIG. 2 is provided. In one embodiment, the impedance measurement unit 265 comprises a voltage measurement unit 910, an A/D converter (analog to digital converter) 920and an impedance calculation unit 930. The voltage measurement unit 910is capable of measuring or determining the voltage differential betweenthe terminals of the leads 122. The signal from the voltage measurement unit 910 is generally an analog signal, which may be sent to the A/D converter 920. The A/D converter 920, which preferably has been calibrated prior to the operation of the IMD 200, will convert the analog voltage measurement signal to a digital signal. In alternative embodiments of the present invention the impedance measurement unit 265may be implemented without the use of the A/D converter 920 and still remain within the scope of the present invention.
Although certain embodiments may be implemented without it, the A/D converter 920 may be beneficial for enhancing the resolution of the voltage signal, thereby providing for enhanced analysis of the voltage across the leads 122. Based upon the voltage across the leads 122, andthe constant current signal provided by the stimulation unit 250, theimpedance calculation unit 930 calculates the impedance by dividing the voltage across the lead terminals 122 by the current delivered by thestimulation unit 250. The impedance calculation unit 930 may be a hardware unit, a software unit, a firmware unit, or any combination thereof, which may be located in various portions of the IMD 200,including in the impedance measurement unit 265, in the stimulation controller 230, in the power source controller 220, or in any other portion of the IMD 200.
In an alternative embodiment, the calculation described as being performed by the impedance calculation unit 930 may alternatively be performed by the external unit 270, which may receive the signal relating to the constant current stimulation signal and the measured voltage signal. One of the advantages of utilizing the embodiments provided by the present invention is that substantially any size of a constant or controlled current stimulus signal may be used to perform the impedance measurement, thereby conserving battery power of theimplantable medical device 200. Accordingly, the smallest stimulation signal that may reliably be provided by the stimulation unit 250 may beused to perform the impedance measurement. Thus, the impedance measurement may be made without imposing a significant charge depletion burden on the battery. Additionally, the impedance of the leads 122themselves is also accounted for when analyzing the impedance.Furthermore, the A/D converter 920 may be calibrated prior to theoperation of the implantable medical device 200, for example, during the manufacturing process.
Turning again to FIGS. 1A-1D, the leads 122 are shown connected to tissue (e.g., nerve tissue 127) in a patient's body and to the IMD 200.The implantable medical device 200 may comprise a main body 112 (FIG.1A) in which the electronics described in FIG. 2 are enclosed. Coupled to the main body 112 is a header 114 (FIG. 1A) designed with terminal connectors 116 (FIG. 1C) for connecting to leads 122. The main body 112may comprise a titanium case 121 and the header 114 may comprise abiocompatible polymer such as polyurethane or acrylic. The leads 122projecting from the header 114 may be attached to the tissue utilizing a variety of methods for attaching the leads 122 to tissue. A first end ofthe leads 122 is coupled to connector(s) 116 on the header 114, and a distal end is coupled to the tissue by electrodes 125-1 and 125-2, which together provide a cathode and an anode (FIG. 1D). Therefore, thecurrent flow may take place from one electrode 125-1 to a second electrode 125-2 via the tissue, thereby delivering the stimulation.
The system illustrated in FIGS. 1A-1D may be viewed as an electrical circuit that includes a current or voltage source (i.e., the battery 210of the IMD 200) being connected to an impedance (i.e., the equivalent impedance of the tissue) via a pair of wires (i.e., the leads 122). The total impedance connected to the IMD 200 includes the impedance of thelead wires 122 as well as the impedance across the terminals 116 of the leads 122 to the tissue. One of the biggest components of the impedance experienced by terminals 116 on the header 114, to which the leads 122are connected, is the impedance of the tissue. Therefore, if a break in any one portion of the lead wires 122 occurs (such as a break in one or more strands of a multistrand wire), the impedance may rise significantly, which may provide an indication that a break in the lead wire 122 has occurred.
Turning now to FIG. 10, a flowchart depicting steps for determining theimpedance experienced by the leads 122 of the IMD 200 is provided. As shown in step 1010, stimulation is delivered by the IMD 200 to the tissue of the patient by one of a number of available stimulation delivery modes, such as a constant current signal pulse (step 1010). To conserve battery power, impedance may be determined using a small magnitude and/or short duration pulse. The resultant voltage induced across the leads 122 is measured (block 1020) upon delivery of thestimulation signal. Voltage measurement may be performed by a voltage measurement unit 910 (FIG. 9) during delivery of the stimulation current signal. The IMD 200 adjusts the time at which the voltage is measured such that it occurs while the stimulation current signal is being delivered.
An analog-to-digital (A/D) conversion is preferably performed on the voltage signal (block 1030). Although, embodiments of the presentinvention may be performed without utilizing an A/D converter 920, in ap referred embodiment an A/D converter 920 (FIG. 9) is used to provide precise resolution of the voltage signal. The A/D converter 920 is preferably calibrated prior to the conversion of the voltage signal from analog to digital. Finally, the impedance is calculated utilizing the amplitude of the current delivered for stimulation and the corresponding voltage measurement, as shown in step 1040. The voltage resulting fromthe current signal delivered as stimulation is divided by the value ofthe current to arrive at the total impedance across the terminals 116 ofthe header 114 (FIGS. 1A-1D). In one embodiment, the predetermined impedance of the lead 122 itself may be subtracted to arrive at theimpedance across the lead terminals 116, which corresponds to theimpedance of the tissue 1030. Various operational adjustments to theoperation of the IMD 200 may be made based upon the determination of theimpedance across the terminals 116.
FIG. 11 provides a flowchart depicting the steps for performing the calibration of the A/D converter 920 (FIG. 9). In a preferred embodiment, the calibration of the A/D converter 920 is performed prior to implanting the IMD 200 in the body of the patient, more preferably during the manufacturing process of the IMD 200. Referring to FIG. 11, a predetermined, known impedance is provided for the calibration of theA/D converter 920, as depicted in step 1110. The known impedance is electrically connected across the two distal ends of leads 122 (which may or may not include electrode assembly 125), and the other ends ofthe lead wires 122 are connected to the terminals 116 of header 114(step 1220). With the leads 122 connected between the IMD 200 and the known impedance, a constant current test signal is driven through thelead 122, through the known impedance, and back to the IMD 200 (step1130).
The constant current test signal may comprise a series of individual constant current signals that may vary in duration of current amplitude from one signal to another in the series of test signals, provided that each individual test pulse comprises a constant current. During the delivery of each constant current test pulse to the known impedance, a corresponding voltage resulting from the driving of the constant current is measured across the terminals 116 of the IMD 200 (step 1240). This measurement of voltage at the terminals 116 allows a comparison to a theoretical indication of what the measurement should be by calculation from the known current being driven, and the known impedance across the leads 122. This theoretical voltage calculation value is then used withthe actual voltage measured across the terminals 116 to calibrate theA/D converter 920 (block 1150). Calibration of the A/D converter 920should provide improved accuracy for measurements subsequently processed by the A/D converter 920. In another embodiment, the calibration process may be performed using multiple known impedances and corresponding resulting multiple measured voltages. Such a calibration over a range ofimpedances may provide further improved accuracy.
Utilizing embodiments of the present invention, a more accurate assessment of the status of the battery and the impedance experienced bythe leads 122 may be assessed, thereby providing better warnings to the user and/or to a healthcare provider assessing the operations of the IMD200. Various end of service signals (EOS) and/or elective replacement indication (ERI) signals may be provided to indicate the status of theoperation of the IMD 200. Additionally, the impedance experienced by the leads 122 of the IMD 200 may be analyzed to assess the integrity of the leads 122 or any drastic changes in the tissue to which the stimulation signal is provided.
The particular embodiments disclosed above are illustrative only, as the invention may be modified and practiced in different but equivalent manners apparent to those skilled in the art having the benefit of the teachings herein. Furthermore, no limitations are intended to the details of construction or design herein shown, other than as described in the claims below. The particular embodiments disclosed above may be altered or modified and all such variations are considered within the scope and spirit of the invention. Accordingly, the protection sought herein is as set forth in the claims below.
1. A method for determining a time period until an end of service of an energy storage device in an implantable medical device, said energy storage device comprising a total available electrical charge that maybe obtained from said energy storage device, said method comprising:determining a first current usage rate for a first future current usage period of an implantable medical device; determining a second currentusage rate for a second future current usage period of an implantablemedical device; determining a future combined current usage rate based upon said first future current usage rate and said second current usagerate; and determining a total charge depleted by said implantablemedical device by determining a charge depleted during at least a first prior current usage period, and determining a charge depleted during atleast a second prior current usage period; and determining a time period until an end of service of the energy storage device based upon said total available electrical charge, said total charge depleted, and said future combined current usage rate.
2. The method of claim 1, wherein said energy storage devices comprises a battery.
3. The method of claim1 wherein said first prior current usage period comprises a stimulation period.
4. The method of claim 1 wherein said second prior current usage period comprises an inactive period.
5. The method of claim 1, further comprising the step of determining a third current usage rate for a third future current usage period of an implantable medical device, and wherein said step of determining a future combined current usage rate comprises determining a future combined current usage rate based upon said first future current usage rate, said second current usage rate,and said third current usage rate.
6. The method of claim 5 wherein said first future current usage period comprises an inactive period, said second future current usage period comprises a stimulation period, and said third future current usage period comprises a stimulation period,and wherein said second future current usage rate is different from said third current usage rate.
7. A method for determining the remaining useful life of an energy storage device in an implantable medicaldevice, said energy storage device having a total available electrical charge that may be obtained from said energy storage device, said method comprising: determining an active charge depletion of the implantablemedical device; determining an inactive charge depletion of saidimplantable medical device; and determining a time period until an endof service of the energy storage device based upon said active chargedepletion, said inactive charge depletion, and said total available charge.
8. The method of claim 7, wherein determining an active chargedepletion of an implantable medical device further comprises determining a charge depleted during a previous stimulation performed by saidimplantable device.
9. The method of claim 8, wherein determining anactive charge depletion of an implantable medical device further comprises determining an active current usage rate for a future stimulation to be performed by said implantable device.
10. The method of claim 9, wherein determining an active current usage rate for a future stimulation to be performed by said implantable medical device further comprises correlating a first predetermined current usage rate with a first stimulation performed by said implantable device.
11. The method of claim 10, wherein correlating said first predetermined currentusage rate with said first stimulation performed by said implantabledevice further comprises calibrating said first predetermined currentusage rate based upon at least one stimulation parameter.
12. The method of claim 10, wherein correlating said first predetermined current usagerate with said first stimulation performed by said implantable device further comprises calibrating said first predetermined current usagerate based upon a lead impedance.
13. The method of claim 10, wherein determining an active current usage rate for a future stimulation to be performed by said implantable medical device further comprisescorrelating a second predetermined current usage rate with a second stimulation performed by said implantable device.
14. The method of claim 13, wherein correlating said second predetermined current usagerate with said second stimulation performed by said implantable device further comprises calibrating said second predetermined current usagerate based upon at least one stimulation parameter.
15. The method of claim 7, wherein said energy storage device comprises a battery.
16. The method of claim 7, wherein determining an inactive charge depletion ofan implantable medical device further comprises determining a chargedepleted during a previous idle period of said implantable medicaldevice.
17. The method of claim 7, wherein determining an inactive charge depletion of said implantable medical device further comprises determining an idle current usage rate for a future idle period of saidimplantable medical device.
18. The method of claim 17, further comprising calibrating said idle current usage rate for a previous idle time period.
19. The method of claim 7, further comprising generating an end of service signal based upon a determination that said time period until said end of service equals zero.
20. The method of claim 7,further comprising generating an end of service signal based upon adetermination that said end of service has already occurred.
21. The method of claim 20, further comprising generating an elective replacement indicator signal based upon a determination that said time period until said end of service is less than or equal to a predetermined period.
22. The method of claim 21, wherein said predetermined period is six months.
23. A method for determining a time period until an end of service of an energy storage device in animplantable medical device, said energy storage device comprising an initial charge corresponding to a substantially full charge and a final charge corresponding to an end of useful life, comprising: determining an idle current usage rate for at least one future idle period of animplantable medical device; determining charge depleted during at least one previous idle period; determining a stimulation current usage rate for at least one future stimulation to be performed by said implantablemedical device; determining charge depleted during a previous stimulation performed by said implantable medical device; determining atotal charge depleted by said implantable medical device based upon said charge depleted during said previous idle period and said chargedepleted during said previous stimulation by said implantable medicaldevice; determining a future combined current usage based upon said idle current usage rate and said stimulation current usage rate; and determining a time period until an end of service of the energy storage device based upon said total charge depleted, said future combined current usage rate, said initial battery charge and said final battery charge.
24. The method of claim 23, wherein said method further comprises determining a previous idle current usage rate and a previous stimulation current usage rate, and wherein said step of determining charge depleted during a previous idle period comprises multiplying said previous idle current usage rate by the duration of said previous idle period, and said step of determining charge depleted during a previous stimulation comprises multiplying said previous stimulation currentusage rate by the duration of said stimulation.
25. The method of claim23, further comprising generating an end of service signal based upon adetermination that said time period until said end of service equals zero.
26. The method of claim 25, wherein further comprising generating an elective replacement indicator signal based upon a determination that said time period until said end of service is less than or equal to a predetermined period.
27. The method of claim 26 wherein said predetermined period is six months.
28. A method for determining the remaining useful life of a battery in an implantable medical device,said battery having a first charge and a second charge less than said first charge comprising: determining a total available charge for said battery comprising the difference between said first charge and said second charge; determining a charge depletion of the implantable medicaldevice; and determining a time period until an end of service of a powersupply associated with said implantable medical device based upon said charge depletion and said total available charge.
29. A method for determining a remaining time until the end of service of a battery of animplantable medical device, said battery comprising a total available electrical charge that may be obtained from said battery, said method comprising: determining a previous active charge depletion of animplantable device; determining a future active current usage rate of said implantable device; determining a previous inactive chargedepletion of said implantable device; determining a future inactive current usage rate of said implantable device; determining a remaining time period until an end of service of said battery based upon said total available electrical charge, said previous active chargedepletion, said future active current usage rate, said previous inactive charge depletion, and said future inactive current usage rate.
30. The method of claim 29, further comprising the step of generating an end of service signal based upon a determination that said time period until said end of service equals zero.
31. The method of claim 29, further comprising the step of generating an end of service signal based upon adetermination that said end of service has already occurred.
32. The method of claim 29, further comprising the step of generating an elective replacement indicator signal based upon a determination that said time period until said end of service is less than or equal to a predetermined period.
33. The method of claim 30, wherein said predetermined time period is six months.
34. An implantable medicaldevice, comprising: an energy storage device to provide power for an operation performed by said implantable medical device, characterized bya total available electrical charge defined by the difference between an initial electrical charge and a final electrical charge; a stimulation unit, operatively coupled to said energy storage device for providing a stimulation signal; and a controller operatively coupled to said stimulation unit and said energy storage device, said controller comprising: an active charge depletion determination unit adapted to determine an electrical charge depleted by said energy storage device during stimulation operations of the implantable medical device; an inactive charge depletion determination unit adapted to determine an electrical charge depleted by said energy storage device during inactive periods of the implantable medical device; and an end of service determination unit adapted to determine a time period until an end of service of said energy storage device based upon said total available electrical charge, said active charge depletion, and said inactive charge depletion.
35. The implantable device of claim 34, wherein saidimplantable device is a vagus nerve stimulator device.
36. Theimplantable device of claim 34, wherein said active charge depletion determination unit is adapted to determine an active charge depletion relating to at least one previous stimulation period performed by saidimplantable device.
37. The implantable device of claim 34, wherein said active charge depletion determination unit is adapted to determine anactive current usage rate for a future stimulation period to be performed by said implantable medical device
38. The implantable medicaldevice of claim 34, wherein said inactive charge depletion determination unit is adapted to determine an idle charge depletion relating to atleast one previous idle period of said implantable medical device. 39.The implantable medical device of claim 34, wherein said inactive chargedepletion determination unit is adapted to determine an idle currentusage rate for a future idle period of said implantable medical device.40. The implantable medical device of claim 34, wherein said controller is adapted to generate an end of service signal based upon adetermination that said time period until said end of service equals zero.
41. The implantable medical device of claim 34, wherein said controller is adapted to generate an elective replacement indicator signal based upon a determination that said time period until said endof service is less than or equal to a predetermined period.
42. Theimplantable medical device of claim 41, wherein said predetermined period is six months.
43. A system for determining remaining useful life of a battery in an implantable medical device, comprising: an external monitoring device for performing remote communications with animplantable medical device; an implantable medical device adapted to communicate with said external device and to deliver a stimulation signal to a patient, said implantable device comprising: a battery toprovide power for an operation performed by said implantable medicaldevice; a communications unit to provide for communications between said external monitoring device and said implantable medical device; a stimulation unit, operatively coupled to said battery, to provide a stimulation signal; and a controller operatively coupled to said stimulation unit and said battery, said controller being adapted to determine: an active charge depletion of said implantable medicaldevice; an inactive charge depletion of said implantable medical device;and a time period until an end of service of said battery based upon said active charge depletion and said inactive charge depletion.
44. Thesystem of claim 43, wherein said controller further comprises an active charge depletion determination unit to determine an active chargedepletion relating to a previous stimulation period performed by saidimplantable medical device.
45. The system of claim 44, wherein said controller further comprises an inactive charge depletion determination unit to determine an inactive charge depletion relating to a previous inactive period of said implantable medical device.
46. The system of claim 45, wherein said controller further comprises an EOS/ERIdetermination unit to determine said time period until an end of service of said battery based upon said active charge depletion and said inactive charge depletion.
47. The system of claim 46, wherein at least one of said active charge depletion determination unit, said inactive charge depletion determination unit, and said EOS/ERI determination unit comprises of at least one of hardware, software, firmware, and a combination of said hardware, software and firmware.
48. The system of claim 43, wherein said controller is adapted to determine an active current usage rate relating to a potential stimulation to be performed by said implantable device
49. The system of claim 43, wherein said controller is adapted to determine an idle current usage rate relating to a potential idle period of said implantable medical device.
50. Thesystem of claim 43, wherein said controller is adapted to generate an end of service signal based upon a determination that said time period until said end of service equals zero.
51. The system of claim 43,wherein said controller is adapted to generate an end of service signal based upon a determination that said end of service time has already occurred.
52. The system of claim 43, wherein said controller is adapted to generate an elective replacement indicator signal based upon adetermination that said time period until said end of service is lessthan or equal to a predetermined period.
53. The system of claim 52,wherein said predetermined period is 6 months.
54. A computer readableprogram storage device encoded with instructions that, when executed bya computer, performs a method, comprising: determining an active chargedepletion of an implantable medical device; determining an inactive charge depletion of said implantable medical device; and determining atime period until an end of service of an energy storage device associated with said implantable medical device based upon said active charge depletion and said inactive charge depletion.
55. The computer readable program storage device of claim 54, wherein determining anactive charge depletion of an implantable medical device comprises determining the charge depleted during a previous stimulation period performed by said implantable medical device.
56. The computer readableprogram storage device of claim 54, wherein determining an active chargedepletion of an implantable medical device comprises determining anactive current usage rate for a future stimulation period to be performed by said implantable medical device.
57. The computer readableprogram storage device of claim 56, wherein determining an active current usage rate for a future stimulation period to be performed by said implantable medical device further comprises correlating a first predetermined current usage rate with a first stimulation performed by said implantable device.
58. The computer readable program storage device of claim 57, wherein correlating said first predetermined currentusage rate with said first stimulation performed by said implantabledevice further comprises calibrating said first predetermined currentusage rate based upon at least one stimulation parameter.
59. The computer readable program storage device of claim 57, wherein determining an active current usage rate for a future stimulation period further comprises correlating a second predetermined current usage rate with a second stimulation performed by said implantable device.
60. The computer readable program storage device of claim 59, whereincorrelating said second predetermined current usage rate with said second stimulation performed by said implantable device further comprises calibrating said second predetermined current usage rate based upon at least one stimulation parameter.
61. The computer readableprogram storage device of claim 54, wherein determining an inactive charge depletion of an implantable medical device comprises determining the charge depleted during a previous inactive period of the implantablemedical device.
62. The computer readable program storage device of claim 61, wherein determining an inactive charge depletion of animplantable medical device comprises determining an inactive currentusage rate for a future inactive period of the implantable medicaldevice.
61. The computer readable program storage device of claim 62,further comprising calibrating said inactive current usage rate for a previous inactive period.
62. The computer readable program storage device of claim 54, further comprising generating an end of service signal based upon a determination that said time period until said endof service equals zero.
63. The computer readable program storage device of claim 54, further comprising generating an elective replacement indicator signal based upon a determination that said time period until said end of service is less than or equal to a predetermined period. 64.The computer readable program storage device of claim 65, wherein said predetermined period is six months.
|
Talk:Tracks & Troubles/@comment-<IP_ADDRESS>-20130608171613/@comment-4397412-20130608214245
There's a spoiler with the unblocked kiss?! :O where????
|
Bresaola Prima Scelta - Treo - 90 g
Barcode:<PHONE_NUMBER>332 (EAN / EAN-13)
This product page is not complete. You can help to complete it by editing it and adding more data from the photos we have, or by taking more photos using the app for Android or iPhone/iPad. Thank you! ×
Product characteristics
Common name: Bresaola
Quantity: 90 g
Packaging: Plastique, Frais, Atmosphère protectice
Brands: Treo
Categories: Meats, Prepared meats, Beef, Meat preparations, Beef preparations, Dried meats, Bresaola, fr:Charcuteries de bœuf
Labels, certifications, awards: No gluten
Manufacturing or processing places: Italie
EMB code: IT H5H5G CE
Stores: Franprix
Countries where sold: France
Ingredients
→ Ingredients are listed in order of importance (quantity).
Ingredients list:
Viande de bœuf, sel, dextrose, arômes naturels, conservateur :E250, E252.
If this product has an ingredients list in English, please add it. Edit the page
Ingredients analysis:
Palm oil free Non-vegan Non-vegetarian
→ The analysis is based solely on the ingredients listed and does not take into account processing methods.
We need your help!
NOVA group
4 - Ultra processed food and drink products
Nutrition facts
NutriScore color nutrition grade
⚠️ Warning: the amount of fiber is not specified, their possible positive contribution to the grade could not be taken into account.
⚠️ Warning: the amount of fruits, vegetables and nuts is not specified on the label, it was estimated from the list of ingredients: 0%
Nutrient levels for 100 g
2 g Fat in low quantity
0.7 g Saturated fat in low quantity
0.4 g Sugars in low quantity
4 g Salt in high quantity
Serving size: serving
Comparison to average values of products in the same category:
(12 products)
(221 products)
(1055 products)
(1271 products)
(3665 products)
(3706 products)
(45537 products)
(81265 products)
→ Please note: for each nutriment, the average is computed for products for which the nutriment quantity is known, not on all products of the category.
Proteins 33 g +11% +1% +30% -13% +58% +99% +76% +75%
Salt 4 g +11% +3% +42% -4% +239% +152% +61% +105%
Fruits‚ vegetables‚ nuts and rapeseed‚ walnut and olive oils (estimate from ingredients list analysis) 0 %
Packaging
Recycling instructions and/or packaging information:
Packaging parts:
Number Shape Material Recycling
Plastic
Data sources
_
Product added on by stephane
Last edit of product page on by moon-rabbit.
Product page also edited by beniben, scanparty-franprix-05-2016, yuka.UjQ5Uktwb0V2Lzh4b2NJZjlSTGI4Y01vMnEzMEFVbXVLdEVVSVE9PQ.
If the data is incomplete or incorrect, you can complete or correct it by editing this page.
|
It just doesnt work
I have installed betterdiscord but whenever i start up discord i just cant find the place to change my background so im assuming it didnt work so i tried reinstalling it and it still didnt work and i did put it out of my download folder and opened it with admin so i have no clue what the problem is i have windows 10 btw
Can you elaborate on this:
cant find the place to change my background
BBD doesn't just "have a place to change the background" you need a theme for that.
|
import matchers from 'expect/build/matchers'
import { equals } from 'expect/build/jasmineUtils'
import { getPath } from 'expect/build/utils'
import { applyJestMatcher, resolveExpected } from './utils'
import toThrowMatcher from './toThrowMatchers'
export default function(chai, utils) {
return {
toHaveBeenCalled() {
this.called
},
toHaveBeenCalledTimes(expected) {
this.callCount(expected)
},
toHaveBeenCalledWith(...expected) {
const received = utils.flag(this, 'object')
const calls = received.getCalls()
this.assert(
calls.some(call => equals(call.args, expected)),
`expected spy to have been called with arguments #{exp} at least once`,
`expected spy not to have been called with arguments #{exp}`,
expected
)
},
toHaveBeenLastCalledWith(...expected) {
const received = utils.flag(this, 'object')
const lastCall = received.lastCall
this.assert(
equals(lastCall.args, expected),
`expected last spy to have been called with arguments #{exp}, actual [${
lastCall.args
}]`,
`expected last spy not to have been called with arguments #{exp}, actual [${
lastCall.args
}]`,
expected
)
},
toHaveBeenNthCalledWith(nth, ...expected) {
const received = utils.flag(this, 'object')
const nthCall = received.getCall(nth - 1)
this.assert(
equals(nthCall.args, expected),
`expected spy ${nth} to have been called with arguments #{exp}, actual [${
nthCall.args
}]`,
`expected spy ${nth} not to have been called with arguments #{exp}, actual [${
nthCall.args
}]`,
expected
)
},
toHaveReturned() {
const received = utils.flag(this, 'object')
const returnValues = received.returnValues
this.assert(
returnValues.length > 0,
'expected spy to have returned at least once',
'expected spy not to have returned anything'
)
},
toHaveReturnedTimes(expected) {
const received = utils.flag(this, 'object')
const returnValues = received.returnValues
this.assert(
returnValues.length === expected,
'expected spy to have returned #{exp} times',
'expected spy not to have returned #{exp} times',
expected
)
},
toHaveReturnedWith(expected) {
const received = utils.flag(this, 'object')
const calls = received.getCalls()
this.assert(
calls.some(call => equals(call.returnValue, expected)),
`expected spy to have returned #{exp} at least once`,
`expected spy not to have returned #{exp}`,
expected
)
},
toHaveLastReturnedWith(expected) {
const received = utils.flag(this, 'object')
const lastCall = received.lastCall
this.assert(
equals(lastCall.returnValue, expected),
`expected last spy to have returned #{exp}, actual [${lastCall.args}]`,
`expected last spy not to have returned #{exp}, actual [${
lastCall.args
}]`,
expected
)
},
toHaveNthReturnedWith(nth, expected) {
const received = utils.flag(this, 'object')
const nthCall = received.getCall(nth - 1)
this.assert(
equals(nthCall.returnValue, expected),
`expected spy ${nth} to have returned #{exp}, actual [${nthCall.args}]`,
`expected spy ${nth} not to have returned #{exp}, actual [${
nthCall.args
}]`,
expected
)
},
toBe(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBe, received, expected)
this.assert(
pass,
'expected #{this} to equal #{exp}',
'expected #{this} not to equal #{exp}',
resolveExpected(expected)
)
},
toBeCloseTo(expected, precision = 2) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeCloseTo,
received,
expected,
precision
)
this.assert(
pass,
`expected #{act} to be close to #{exp} +/- ${Math.pow(10, -precision)}`,
`expected #{act} not to be close to #{exp} +/- ${Math.pow(
10,
-precision
)}`,
resolveExpected(expected),
received
)
},
toBeDefined() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeDefined, received)
this.assert(
pass,
'expected #{this} to be defined',
'expected #{this} not to be defined'
)
},
toBeFalsy() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeFalsy, received)
this.assert(
pass,
'expected #{this} to be falsy',
'expected #{this} not to be falsy'
)
},
toBeGreaterThan(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeGreaterThan,
received,
expected
)
this.assert(
pass,
'expected #{act} to be greater than #{exp}',
'expected #{act} not to be greater than #{exp}',
expected,
received
)
},
toBeGreaterThanOrEqual(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeGreaterThanOrEqual,
received,
expected
)
this.assert(
pass,
'expected #{act} to be greater than or equal #{exp}',
'expected #{act} not to be greater than or equal #{exp}',
expected,
received
)
},
toBeLessThan(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeLessThan,
received,
expected
)
this.assert(
pass,
'expected #{act} to be less than #{exp}',
'expected #{act} not to be less than #{exp}',
expected,
received
)
},
toBeLessThanOrEqual(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeLessThanOrEqual,
received,
expected
)
this.assert(
pass,
'expected #{act} to be less than or equal #{exp}',
'expected #{act} not to be less than or equal #{exp}',
expected,
received
)
},
toBeInstanceOf(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toBeInstanceOf,
received,
expected
)
this.assert(
pass,
'expected #{act} to be an instance of #{exp}',
'expected #{act} not to be an instance of #{exp}',
expected.name,
received
)
},
toBeNull() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeNull, received)
this.assert(
pass,
'expected #{this} to be null',
'expected #{this} not to be null'
)
},
toBeTruthy() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeTruthy, received)
this.assert(
pass,
'expected #{this} to be truthy',
'expected #{this} not to be truthy'
)
},
toBeUndefined() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeUndefined, received)
this.assert(
pass,
'expected #{this} to be undefined',
'expected #{this} not to be undefined'
)
},
toBeNaN() {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toBeNaN, received)
this.assert(
pass,
'expected #{this} to be NaN',
'expected #{this} not to be NaN'
)
},
toContain(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toContain, received, expected)
this.assert(
pass,
'expected #{act} to contain #{exp}',
'expected #{act} not to contain #{exp}',
expected,
received
)
},
toContainEqual(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toContainEqual,
received,
expected
)
this.assert(
pass,
'expected #{act} to contain #{exp}',
'expected #{act} not to contain #{exp}',
expected,
received
)
},
toEqual(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toEqual, received, expected)
this.assert(
pass,
'expected #{act} to equal #{exp}',
'expected #{act} not to equal #{exp}',
resolveExpected(expected),
received
)
},
toHaveLength(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toHaveLength,
received,
expected
)
this.assert(
pass,
'expected #{this} to have length of #{exp}',
'expected #{this} not to have length of #{exp}',
expected
)
},
toMatch(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(matchers.toMatch, received, expected)
this.assert(
pass,
'expected #{act} to match #{exp}',
'expected #{act} not to match #{exp}',
expected,
received
)
},
toMatchObject(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toMatchObject,
received,
expected
)
this.assert(
pass,
'expected #{act} to match object #{exp}',
'expected #{act} not to match object #{exp}',
expected,
received
)
},
toHaveProperty(property, value) {
const received = utils.flag(this, 'object')
const valuePassed = arguments.length === 2
let { pass } = valuePassed
? applyJestMatcher(matchers.toHaveProperty, received, property, value)
: applyJestMatcher(matchers.toHaveProperty, received, property)
const path = getPath(received, property)
const traversedPath = path.traversedPath.join('.')
this.assert(
pass,
valuePassed
? `expected #{this} to have property #{exp} of ${value}`
: 'expected #{this} to have property #{exp}',
valuePassed
? `expected #{this} not to have property #{exp} of ${value}`
: 'expected #{this} not to have property #{exp}',
traversedPath
)
},
toMatchSnapshot() {
// not implement for now
},
toMatchInlineSnapshot() {
// not implement for now
},
toStrictEqual(expected) {
const received = utils.flag(this, 'object')
const { pass } = applyJestMatcher(
matchers.toStrictEqual,
received,
expected
)
this.assert(
pass,
'expected #{act} to equal #{exp}',
'expected #{act} not to equal #{exp}',
resolveExpected(expected),
received
)
},
toThrow(expected) {
const received = utils.flag(this, 'object')
const valuePassed = arguments.length === 1
let { pass } = valuePassed
? applyJestMatcher(toThrowMatcher.toThrow, received, expected)
: applyJestMatcher(toThrowMatcher.toThrow, received)
this.assert(
pass,
valuePassed
? `expected function to throw error #{exp}`
: 'expected function to throw error',
valuePassed
? `expected function not to throw error #{exp}`
: 'expected function not to throw error',
expected
)
},
toThrowErrorMatchingSnapshot() {
// not implement for now
},
toThrowErrorMatchingInlineSnapshot() {
// not implement for now
}
}
}
|
Board Thread:Hype Train for GoF3/@comment-27056410-20161207191157/@comment-16922870-20161210052620
Wow that's cool the polls jumped and so did our view count!!
|
TAHITI PETREL
One bird shot November 21, while sitting on the water has been identified as this rare species. It probably cannot be separated from the very similar Phoenix Island Petrel in flight. MOTTLED PETREL
None were seen in October and onl y'c 2 in November. Both were seen close to the ship and in good light so that the dark upperparts and heavy black border of the underwing were clearly visible.
|
User:Captainsood
Captain Sood is a Master Mariner with Diverse skills including Oil Chemical and Gas transportation, Navigation, Quality Assurance, Project and Site Management for the Marine Industry. He is an Indian citizen.
|
Method and apparatus to deform the cross section of one or more longitudinal sections of an oblong object
ABSTRACT
The invention relates to such a method and apparatus, as well as to the resulting deformed object out of a relatively hard material such as e.g. a metal or plastic wire in which an undeformed section ( 2 ) is clamped near its ends ( 3, 4 ) and is subsequently upset in a controlled way by an axial pressure force ( 22 ) to a predefined shortened and thickened longitudinal section ( 5 ), with which simultaneously over substantially the entire contour sheath ( 19 ) of this section ( 2 ) a counterforce ( 29 ) is exerted so that, with the progressive deformation during upsetting, the total volume of the longitudinal section that is deforming, remains substantially constant. In this thickened section ( 5 ) a flattened zone ( 8 ) can subsequently be applied in order to realise a final shape ( 10 ) such as an opening ( 26 ). The invention concerns in particular also the use of a deformation line ( 40 ) for manufacturing a heddle for weaving looms.
TECHNICAL FIELD
The invention relates to a method and an apparatus to deform the cross section of one or more longitudinal sections of an oblong object as well as the resulting deformed object and a number of its applications. The oblong object according to this invention in particular mainly consists of relatively hard material and can e.g. be a metal or a thread of synthetic plastic material.
BACKGROUND ART
For a number of applications, there is a need for oblong objects with a practically constant cross section, such as wires, bars, laths, pipes, profiles and compact wire bunches, in particular cables that have to be extensively crosswise deformed in certain zones along their length, e.g. flattened and/or perforated, and in which the object should preferably be at least as strong in those zones as elsewhere. In the past, suggestions have been made to add a considerable amount of hard but plastically deformable material in these zones and in one way or another join it under pressure with the object or with separately connectable parts of it into deformed cross sections in deforming moulds. This however is a technically quite complicated and rather expensive solution.
BRIEF DESCRIPTION
Consequently, the need has now risen to have oblong objects consisting of only one hard material (without deformable additional material) that have enough strength along each sector of their length, but in which along their length considerably deformed, e.g. considerably thinned longitudinal sections are locally present. Likewise, it is sometimes required that the object volume is larger per length-unit in these thinned sections than elsewhere; amongst other reasons to be able to realise sufficient strength at that section. In other words, for certain applications it is necessary to possess a considerably larger strength over longer sections of an oblong object—and not necessarily near the end of this object—and to largely maintain that strength in those sections, possibly with a local increase in volume (per length-unit) in those longitudinal sections. The present invention aims at fulfilling this requirement.
In addition, practice necessitates that the deforming process allows a certain degree of flexibility so that a significant number of pre-defined deformation states can be realised in the afore-mentioned longitudinal sections. Hence, the method and the apparatus according to the invention have to be programmed and accurately controlled to that end. The invention also aims at realising in at least part of the deformed longitudinal sections specific, e.g. hardening or consolidating crystal transformations or microstructures that may additionally increase the resistance against e.g. a cyclic loading and that may therefore amongst other things additionally increase the fatigue resistance of the object when applying certain relatively hard materials.
An additional aim of the invention is to realise this deforming process in appropriate consecutive phases or steps, preferably in a semi-continuous or continuous process with an apparatus suitable for that purpose. The invention aims in particular at applying the method to metal wires, e.g. steel wires, that may have relatively small cross sections.
Bearing upon specific deformation states given certain applications, the invention aims at realising longitudinal sections provided with eye-shaped, slit-shaped, loop-shaped or hook-shaped openings in them, or with e.g. pin-shaped, cup-shaped or other segments in them. The deformed longitudinal sections in this can be situated at one or both ends of the object, e.g. a wire segment, and/or can be somewhere in between those ends. Such applications are frequently needed, e.g. in the textile industry for amongst other guiding devices for transporting continuous yarn. A slit-shaped opening at the end of a wire can e.g. result in or be further transformed to a fork-shaped wire end as guiding device. When this guiding device consists of two or more slits next to each other, this can result into a comb-shaped wire end.
In particular, the invention pursues an application of the method and the apparatus to manufacture warp wire for weaving looms. To this end, i.a. oval warp eyes are applied to the wires in between their wire ends.
It is known that the opening or eye in the up- and downward moving heddle provides the passage for a warp yarn of the woven fabric to be produced. The eye in the usual steel wire heddles is usually constituted by a separate oval metal ring. This small ring is tightly enclosed by the continually passing heddle wire and the assembly is consolidated by means of tin-plating. In use, the resulting tin-plated inner side of the eye is however subject to an important amount of frictional wear, caused by the transversally moving warp yarns. The tin layer wears off and causes a roughening of the side into an abrasive surface. Hence, the heddles have a limited life span and need to be replaced on a regular basis in function of the operational requirements. Given the fact that the number of warp threads in a woven fabric—and consequently the number of heddles in a loom—is very high, replacing them is a time consuming operation. These rings can also be deployed as eyelet guides for e.g. thread tighten ers on amongst others winding reels for textile yarn.
Hence, it is also a purpose of the invention to provide an oblong object, in particular a wire-shaped object as guiding device for textile applications, e.g. a heddle wire or another yarn guide, in which this substantial disadvantage of low wear resistance is avoided.
According to the invention, these needs are approached or met by providing a method and an apparatus for deforming the cross section of at least one longitudinal section of an oblong object consisting predominantly of a relatively hard material in which
- - this undeformed section is clamped near its ends by clamping devices that have their faces facing each other and that is subsequently - upset in a controlled way by means of an axial pressure force in between these faces into a predefined shortened and thickened longitudinal section while at the same time - an inwardly directed counter pressure is exerted along substantially the entire contour sheath of this undeformed section while shortening by means of upsetting into the predefined thickened section, in order to ensure that the total volume of the deforming longitudinal section bounded by this contour sheath and the mentioned faces remains substantially constant during the progressive deformation. The aforementioned inwardly directed counter pressure along the contour sheath is in most cases not a genuine additional force but only a counterforce that guarantees that the progressive deformation proceeds correctly and is constantly guided onto the gradually outward moving contour sheath in such a way that the longitudinal axis of the thickened section substantially continues to coincide or substantially coincide with the one of the undeformed or non deformed section.
When referring to relatively hard material in here, we mean a material that is little or poorly compressible to a smaller volume but that is significantly plastically deformable in a cold or hot state under relatively high pressures and without breaking; in other words that has a relatively high pressure modulus, such as metals or plastics, and that is not brittle. The material may be a metal, e.g. steel and more in particular a high carbon steel alloy or a stainless steel spring wire. The oblong object to be deformed is e.g. a steel wire with e.g. a circular or rectangular cross section. The invention is preferably aimed or intended for wires with a cross section surface in between 0.5 mm² and 80 mm². The upsetting can take place cold (room temperature) as well as hot. When upsetting hot, the axial upsetting pressure needed is lower than with upsetting cold; for metals however we do then have an extra need for heating energy by means of e.g. electrical induction. When the relatively hard material is a plastic, then “engineering plastics” can be considered. The upsetting of the synthetic plastic material will be facilitated by heating it up properly to soften the said material before and/or during the mechanical deformation process.
An important aspect of the invention relates subsequently to a further deformation of at least part of the already thickened longitudinal section by means of a transversal pressure operation with e.g. appropriate punches, preferably in a die or mould, into a more or less flattened zone. In this zone, subsequently an opening can for instance be produced, in particular an oval eye, to realise a longitudinal section with a desired final shape. This opening in the final shape can also be of another shape or include a slit-shaped, loop-shaped or hook-shaped and/or pin-shaped or cup-shaped segment. Loop is here intended among other things as an eye at the end of the oblong object. According to an additional aspect, at least part of the resulting longitudinal sections with final shape can also be at least locally hardened, amongst other things to increase its wear resistance. Hardening can also take place with known treatments such as laser treatment, nit rating, cementating, applying very hard wear-proof top coats such as DLC, DLN, etc.
According to another important aspect, the invention also provides in the preferably automized feed or supply and transit or transmission of the oblong object to be deformed to and through e.g. a deformation line with successive work stations for the possibly continuous or semi-continuous deformation of longitudinal sections in sequential steps to their desired final shape by means of upsetting, flattening, etc. and subsequently in the separation of the object into pieces with adequate lengths, e.g. in between or at the level of the said longitudinal sections. The successive work stations hence include at least one upsetting station, at least one flattening station, at least one pressing station for the application of a final shape and a cutting station. The process in accordance with the invention in sequential steps has in particular an important application in e.g. the production of heddles for weaving looms. According to an important aspect, the invention also allows to deform and treat multiple oblong objects, e.g. wires, almost simultaneously or parallel next to each other in order to distinctively increase the productivity of the process. The invention thus provides in particular methods that are more specifically defined i.a. in claims 8 to 17 and as explained further on.
According to the invention, the apparatus for carrying out this method includes in particular
- - clamping devices that have their faces facing each other to clamp and upset the object in between the ends of the longitudinal section to be deformed, - means to exert an axial pressure on the longitudinal section in between the named opposed faces to shorten and thicken this longitudinal section with the help of these clamping devices by means of upsetting and - movable counter-pressure means, that extend near the contour of the longitudinal section to be deformed to exert the inwardly directed counter pressure and - means that ensure a cooperation between the named means for the axial pressure and the named counter-pressure means to make sure that the volume of the space for the material to be upset, bounded by the said faces and the said movable counter-pressure means, remains substantially constant.
The invention also relates to the oblong objects themselves, e.g. the plastic or metal wires and wire segments, obtained in accordance with one or other described method, the objects being provided with at least one deformed longitudinal section with a flattened zone or with a final shape. The invention relates in particular to such objects or wires in which the cross section shape of this longitudinal section differs from the cross section shape of the rest of the wire or object and in which in between both section shapes a substantially fluently changing transition zone is present. Therefore, areas along the wire with a lesser strength (e.g. welds or other connections) are better avoided.
The longitudinal segment out of the object, in particular a stainless steel spring wire segment, can be provided with a said deformed longitudinal section near at least one of its ends. When said segment is applied as heddle for looms it will preferably have a deformed longitudinal section at both ends, in which at least one of these longitudinal sections will have another shape or opening than an oval eye and in which furthermore in between these ends a deformed longitudinal section shaped as an oval eye is present, bounded by a ring. If so required, the plane of the oval eye in this heddle may form an angle with the plane of at least one of the other openings in the sections near the heddle ends.
Finally, regardless of the specific method and apparatus, the invention provides oblong objects, in particular of metal, with deformed longitudinal sections in which in at least part of the final shapes of these sections, certain microstructures are present as a result of e.g. a certain upsetting and/or flattening and/or hardening treatment that may produce a hardening and/or consolidating effects in those sections.
BRIEF DESCRIPTION OF THE DRAWINGS
Further details of the invention will be clarified referring to an embodiment and applications, illustrated in the attached figures and an example in which additional characteristics and advantages of the method, apparatus, obtained product and applications hereof will be described. Obviously, the invention is not limited to this embodiment.
FIG. 1 schematically illustrates a wire in the upsetting apparatus right before upsetting.
FIG. 2 shows this wire with a shortened and thickened longitudinal section, obtained at the end of the upsetting process.
FIG. 3 is a front view of an embodiment of an upset longitudinal section with subsequently (in dotted line) a flattened zone with an oval contour after applying a transversal pressure operation.
FIG. 4 is a top view of the embodiment in FIG. 3 with the application of an initial split or slit in the oval zone as an intermediate step in realising the desired final shape.
FIG. 5 is a top view of FIG. 3 with the oval zone as final shape after pressing open the split shown in FIG. 4.
FIG. 6 illustrates an embodiment of a cylindrical upset longitudinal section that is more or less flattened out to an almost rectangular zone.
FIG. 7 is a schematic top view of FIG. 6.
FIG. 8 shows six possible final shapes: a hook and a pin or a loop, that can be applied in the somewhat flattened rectangular zone, or a thickened end portion that terminates in a blunt or sharp needle like extremity.
FIG. 9 schematically represents consecutive work stations in a continuous process of upsetting a longitudinal section into the appropriately cut wire length with its final shape.
FIG. 10 relates to an embodiment of the apparatus for controlled upsetting of a longitudinal section of a round wire right before starting the upsetting operation.
FIG. 11 relates to the same apparatus as FIG. 10, but with the upset final situation.
FIG. 12 is a cross section of the apparatus according to FIG. 10.
DETAILED DESCRIPTION
The circular round wire 1 with a diameter “d”, outlined in FIG. 1, includes an undeformed longitudinal section 2 with a length “l” that can be considerably longer than the length “L” of the upset, shortened, cylindrical longitudinal section 5 as shown in FIG. 2. Because of the shortening, the upset section 5 thickens up to a diameter “D”. The volume of section 2 before upsetting is according to the invention preferably substantially equal to that of section 5 after upsetting this undeformed section 2 in between its ends 3 and 4. With a shortening in which L is about one third of “l” the thickness D in section 5 becomes roughly 1.5 to 1.7 times d.
The upsetting, illustrated in FIGS. 1 and 2, takes place in between the faces 17 and 18 of block-shaped clamping devices 15 and 16 that have adequate recesses in their surface. Firstly, these clamping devices are pushed towards one another, crosswise around the wire surface (according to arrows 23 in FIG. 1). The wire is, in doing so, adequately enclosed and clamped by them. Subsequently, the in this way clamped wire ends 3 and 4 are lengthwise pushed towards one another (according to arrows 22) in between the respective faces 17 and 18.
To guarantee that the upsetting operation proceeds correctly and that in the thereby gradually deforming longitudinal section 2 there is among others no buckling deformation instead of an upsetting, it is necessary to provide an appropriate counter pressure or counterforce (arrows 29) along substantially the entire contour sheath 19 of this section 2. This inwardly directed more or less radial counter pressure 29 is preferably adjusted in such a way that with a gradual shortening to a predefined thickened section 5, shown in FIG. 2, the counter pressure means or counterforce means 7 move (more or less radially) outward in a controlled way (according to arrows 28). This degree and speed of controlled moving outward is according to the invention preferably adjusted in such a way that the total volume of the thickening longitudinal section, permanently bounded by the contour sheath 19 and the faces 17 and 18, remains substantially constant. The co-action or cooperation of the counter pressure means 6 and 7 with the clamping devices 15 and 16 will be clarified when discussing the apparatus in FIGS. 10 and 11. We can here suffice by mentioning that the counter pressure means 6 and 7 include adequately shaped and fittingly placed gliding pieces 7 that backward-sloping move up in complementary grooves 6 in the clamping devices 15 and 16 according to arrows 30 in FIG. 1 when these clamping devices move toward one another according to arrows 22 during the upsetting operation. During this moving up, the pressure planes 31 of the gliding pieces 7 that are facing each other, adequately move outward around the contouring plane 19 of section 2 in order to allow the gradual and controlled thickening towards the thickened section 5. In most cases, an adequate compression spring loading 56 or similar is provided against the end surfaces 46 and 47 of the gliding pieces 7, in particular to get the gliding pieces 7 back into their starting position after ending the upsetting operation, which is when opening the upsetting mould for a next upsetting cycle of a new to be deformed longitudinal segment.
The wire with the thus upset section 5 is then ready for a further deformation phase, in particular a flattening of these sections according to another aspect of the invention. It can however also relate to another deformation, e.g. a bending, whether or not linked to a flattening. The clamping blocks 15, 16 are subsequently moved away from each other again according to arrows 24 in FIG. 2 in order to transit or transport the wire to a next deformation station. They are also laterally moved away from each other (arrows 25 in FIG. 2) onto the appropriate distance “l” in order to receive a new section 2 to be upset and to resume the upsetting cycle in the upsetting station.
FIG. 3 is a frontal outline of an accordingly flattened and thinned zone 8 (dotted line) by means of exerting a transversal pressure 32 on the in advance thickened section 5, e.g. in a die or mould. This may take place according to the principles, known as such, of deformation by pressing in adequate or suitable shaping moulds by means of adjusted shaping stamps or punches. The thickened section in FIG. 3 consists here of two tapered ends 33 connected by a thicker middle or central section 34. The flattening can result in e.g. a local thinning to about ⅓ of d. In the top view according to FIG. 4, this leads to e.g. the deformation of the thickened section 5 of FIG. 3 to a disc-shaped zone 8 with a more or less oval outer contour 20. The inner wall of the die cavity may be shaped according to the dotted line 35.
In a consecutive step, outlined in FIG. 4, an initial split or slit 9 can be pressed in centrally in between two opposed cutting stamps as a pre-shape for shaping an oval-shaped eye 26 later on. This eye is shown in FIG. 5 and is surrounded by a ring 27. The eye 26 is obtained by pressing open split 9 further on in a controlled manner with a stamp according to arrows 36. The outer contour 20 of the eye hereby approaches the outlined dotted line 35 in FIG. 4. At the same time, a preferably fluently changing transition zone 43 is realised in between the final shape 10 and the bounding non-deformed parts of the object 1.
Finally, the final shape is obtained by appropriately calibrating the cross section of the ring 27, preferably into a rounded shape for the longitudinal section with eye 26. In principle, these steps are known in moulding technology. For this calibration into the rounded final shape of the ring 27, the outer contour 20 as well as the inner contour 21 are preferably both subject to a rounding operation. In doing so one has a choice: either a substantially circular round cross section of this ring 27 or an oval round section. The major axis of that oval can be e.g. perpendicular to the plane of the eye 26 or can be parallel to it. This is thus an example of the formation of a heddle eye. It is also possible to apply dies in such a way that the opposed parts of the ring 27 on both sides of the eye 26 are not entirely in the same plane as known from e.g. WO 0055407.
For the production of e.g. a hook-shaped end 37 and/or a pin-shaped end 38 to a heddle, shown in FIG. 8, one can start from e.g. a by means of upsetting cylindrically thickened section 5 with diameter D and length L, as suggested in FIG. 6. The flattening operation under a transversal pressure according to arrows 32 produces e.g. a substantially rectangular section 8 with width B, as shown in FIG. 7. The deformation under pressure in a following mould to a longitudinal section such as a hook 37 or a pin 38 or both or a loop 52 or 53 with another non-oval opening may take place with moulding techniques that are known as such. Afterwards, a screw thread can be applied to the pin end 38 in order to attach e.g. a loading member for the heddle. As an alternative thickened wire end portions 57 with either a blunt extremity 54 or with a sharp needle like extremity 55 can be produced.
The degree of upsetting is obviously chosen in function of the required length L for a sufficient possibility of expanding laterally afterwards in e.g. the flattening operation into an adequately flattened and hence thinned zone 8. The larger D/d for a predefined L, the longer length l of the corresponding longitudinal section 2 to be upset will thus have to be. This allows in particular to maintain a sufficient strength in the considerably flattened zone, more specifically by realising a local increase of volume in that place. Indeed, the volume per length-unit of that thinned section will generally have been increased in comparison with the volume of the undeformed wire 1 per equal length-unit, among other reasons because of the presence there of a much larger width B (FIG. 7).
In the schematic arrangement of a deformation line 40 according to FIG. 9 for the production of an oblong object 1, more in particular a heddle 42, we start off with a wire reel 39 from which the wire is unwound and fed to a first work station: an upsetting station 11 a. In that, an undeformed longitudinal section 2 is clenched and upset near its ends 3 and 4 on the in advance programmed length l into e.g. a longitudinal section 5, 33-34 as shown in FIG. 3 according to the method described herein before. Subsequently, another undeformed longitudinal section 2 is similarly clenched and upset in station 11 b into e.g. a cylindrical section 5 according to FIG. 6. The wire with the different thickened sections 5 is consecutively transmitted to the respective flattening stations 12 a and 12 b for the possibly simultaneous realisation of the flattened sections 8, shown in FIG. 7, resp. in FIG. 4. In the next step, the oval heddle eye 26 is applied in the pressing station 13 a and results in the final shape 10 according to FIG. 5.
Practically simultaneously, the realisation of the hook 37 and the pin 38 as final shape can take place in the pressing station 13 b. Finally the division in heddles with a central eye 26, final hook 37 and final pen 38 takes place in the next step in the cutting station 14. The completion or finishing of the contour of the hook 37 and the pin 38 and a possible hardening of the central opening of the eye may conclude the manufacturing.
Said hardening and finishing can also be carried out after the pressing step (in station 13) and before the cutting through in station 14. The cutting through according to FIG. 9 takes place at the spot indicated by the arrow 41 in the cutting station in the short flattened part 8 in between the pin 38 and the hook 37.
In conclusion, we have clarified a method here to manufacture a wire segment, in particular a metal wire segment 42 shaped as a heddle for looms in which the object 1 is supplied or delivered in consecutive steps to an upsetting station 11 a, respectively 11 b for one or more longitudinal sections 2 to be deformed, and that after the upsetting operation into their thickened section 5 are transmitted to a flattening station 12 a, respectively 12 b to realise the said flattened zone 8 and to a pressing station 13 a to apply a pre-opening 9 and an oval opening 26 in the flattened zone 8, respectively 13 b to apply another deformation 37, 38 in another flattened zone 8. Afterwards, the object with the in this way deformed longitudinal sections with final shape 10 is divided in a cutting station 14 into metal wire segments 42 with adequate lengths between said deformed longitudinal sections with final shape 10.
The deformation line 40 to manufacture this heddle thus consists of at least one upsetting station 11 provided with clamping devices 15, 16 with faces 17, 18 facing each other to clamp and upset the object 1 in between the ends 3, 4 of the longitudinal section 2 to be deformed. In addition, the station 11 comprises means to exert an axial pressure 22 on the longitudinal section 2 in between the said opposed faces to shorten and thicken this longitudinal section by means of upsetting and using movable counter pressure means 7 that extend near the contour sheath 19 of the longitudinal section to be deformed in order to exert an inwardly directed counter pressure 29. The station 11 also consists of means 6 that ensure a cooperation between the said means for the axial pressure 22 and the said counter pressure means 7 so that the volume of the space for the metal or plastic to be upset, bounded by the said faces and the said movable counter pressure means, remains substantially constant.
The deformation line comprises furthermore at least one flattening station 12 with moulding means 32 for a controlled transversal pressure operation and at least one pressing station 13 with moulding means to produce appropriate openings 9, 26 and for other deformations 37, 38 and a cutting station 14 with cutting means 41. Finally, the deformation line comprises the means necessary for a programmed step by step transit or transmission of the heddle wire 1 to be deformed through this line till past the cutting station 14 and of means for the appropriate crosswise putting in place and removal or take-away of the said clamping devices 15, 16; of the various moulding means for the flattening pressure 32 on the one hand and in the pressing stations 13 on the other hand and of the cutting means in the cutting station in the work stations 11, 12, 13, 14.
FIGS. 10 and 11 schematically illustrate a longitudinal section view of the upper half of an apparatus for upsetting a longitudinal section 2 (in FIG. 10) of a round, in particular a circular round wire 1 into a thickened, substantially circular cylindrical longitudinal section 5 in FIG. 11. A schematic cross-section view of the solid cylindrical clamping blocks 151 and 162 of the apparatus (lower and upper half) is shown in FIG. 12. In FIG. 10, these clamping blocks 151 and 162 are in their starting position right before upsetting.
In FIG. 12, near the centre 48 of the clamping blocks, radial directed recesses 49 are shown in which longitudinal ribs 50 can move (slide) lengthwise along the apparatus. These ribs constitute the underside or base of the gliding pieces 7 that can move up backwards during upsetting according to arrows 30 in the complementary, also radial directed grooves 6. Actually, along the entire contour of the apparatus, six radial grooves 6 for gliding pieces 7 have been provided. Three grooves are in the left block 151 and constitute a 120° angle among each other. The three similar other grooves (in dotted line) are in the right block 162 and are alternating in relation to the grooves in the left block 151.
During upsetting, the blocks 151 and 262 move toward each other up until the end position of FIG. 11. The three left gliding pieces 7, under a 120° angle, and with one of which in alignment with the illustrated vertical axis X, move up in clamping block 151. The three right gliding pieces 7 with longitudinal ribs 51 (in dotted line), of which one in alignment with axis Y, analogously and simultaneously move up in clamping block 162. In this way, the wire surface plane 19 of section 5 remains firmly fitted in between the six radial longitudinal ribs 50 and 51, as can be deducted from FIG. 12. Consequently, the object (the wire) is practically completely enclosed. During this upsetting, the ribs 50 and 51 appropriately move outward radially in order to allow the thickening to section 5.
Example
Starting from a round stainless steel spring wire with diameter d=0.9 mm, in order to shape a heddle eye, a length l=12.50 mm thereof was upset to a thickened section 5 according to FIG. 3 with L=5.5 mm and a thickness D of about 1.5 mm in the middle section 34. The subsequently oval-shaped flattened section 8 (FIG. 4) had a thickness of about 0.6 mm. The major axis of the final oval eye 26 (inner side 21 of the ring 27) was almost 6 mm long and the length of the minor axis was almost 3 mm. The length of the minor axis of the outer side 20 (hence the width) of the ring 27 was about 5 mm.
Obviously, the shape of the thickened section 5 can be adapted as desired. Instead of a shape 33, 34 as in FIG. 3, oppositely a diabolo shape or a spherical or cubic shape may be chosen. The object doesn't need to be a round wire but may have another cross section profile. The application of these and other variations in the method and the apparatus are assumed to be part of the invention.
1. A method for deforming the cross section of at least one longitudinal section (2) of an oblong object (1) consisting predominantly of relatively hard material in which this undeformed section (2) is clamped near its ends (3, 4) by clamping devices (15, 16) with faces facing each other (17, 18) and subsequently is upset in a controlled manner by an axial pressure force (22) in between these faces into a predefined shortened and thickened longitudinal section (5) and in which simultaneously along practically the entire contour sheath (19) of this undeformed section (2) an inwardly directed counter pressure (29) is exerted in such a way that, with the progressive deformation during the shortening by upsetting to the predefined thickened section (5), the total volume of the longitudinal section to be deformed, bounded by this contour sheath and the said faces (17, 18) remains substantially constant.
2. Method in accordance with claim 1 in which the said hard material is a metal.
3. (canceled)
4. Method in accordance with claim 2 in which the hard material is a stainless spring steel.
5. Method in accordance with claim 1 in which the said hard material is a synthetic plastic material.
6. Method in accordance with claim 1 in which the oblong object (1) is a wire, in particular with a round cross section.
7. Method in accordance with claim 1 in which the upsetting takes place cold or hot.
8. Method in accordance with claim 1 in which subsequently at least part of this thickened section (5) is further deformed by means of a transversal pressure operation (32) into a more or less flattened zone (8).
9. Method in accordance with claim 8 in which, in order to obtain a longitudinal section with a final shape (10), an oval opening (26) is applied or produced in this flattened zone (8).
10. Method in accordance with claim 8 in which, as final shape (10) in the flattened zone (8), another opening (52), a slit-shaped opening (53), a loop-shaped (52, 53), or a hook-shaped segment (37) and/or a pin-shaped segment (38) is applied or a thickened end portion (57) with either a blunt extremity (54) or with a sharp needle like extremity (55).
11. Method in accordance with claim 8 in which said object (1) is fed in consecutive steps to at least one upsetting station (11) for a number of longitudinal sections (2) to be deformed and after the upsetting operation into their thickened section (5) are transmitted to at least one flattening station (12) to apply the said flattened zone (8) in at least part of the amount of thickened sections.
12. Method in accordance with claim 11 in which said object (1) with flattened zones (8) is transmitted in a subsequent step to at least one pressing station (13) to apply a final shape (10), in particular an opening.
13. Method in accordance with claim 12 in which the said object (1) is transmitted in a subsequent step to a cutting station (14) in which the object (1) is divided into pieces (42) with adequate lengths between the said deformed longitudinal sections.
14. Method in accordance with claim 13 to manufacture a wire segment, in particular a metal wire segment (42) shaped as a heddle for weaving looms in which the object (1) is supplied or fed in consecutive steps to an upsetting station (11 a), respectively (11 b) for one or more longitudinal sections to be deformed, and which after the upsetting operation into their thickened section (5) is transmitted to a flattening station (12 a), respectively (12 b) to realise the said flattened zone (8) and to a pressing station (13 a) to apply a pre-opening (9) in one particular place and subsequently an oval opening (26) in a flattened zone (8), respectively (13 b) to apply another deformation (37, 38) in another flattened zone (8) after which the object with the in this way deformed longitudinal sections with final shapes (10) are divided in a cutting station (14) in metal wire segments (42) with adequate lengths between said deformed longitudinal sections with a final shape (10).
15. Method in accordance with claim 9 in which at least part of the said deformed longitudinal sections with final shape (10) are at least locally hardened.
16. Method in accordance with claim 1 in which several oblong objects (1) are substantially simultaneously deformed side by side and subsequently treated.
17. Method in accordance with claim 9 in which at least part of the openings in subsequent longitudinal sections with a final shape (10) are situated in substantially the same plane.
18. An apparatus for carrying out the method according to claim 1 comprising clamping devices (15, 16) with faces (17, 18) facing each other to clamp and upset the object (1) in between the ends (3, 4) of the longitudinal section (2) to be deformed,—means to exert an axial pressure (22) on the longitudinal section (2) in between the said opposed faces to shorten and thicken this longitudinal section by means of upsetting and movable counter-pressure means (7) that extend near the contour sheath (19) of the longitudinal section to be deformed to exert an inwardly directed counter pressure (29) and means (6) that ensure a cooperation between the said means for the axial pressure (22) and the said counter-pressure means (7) to make sure that the volume of the space for the material to be upset, bounded by the said faces and the said movable counter-pressure means, remains substantially constant.
19. Deformation line (40) to manufacture a heddle with the method according to claim 14 comprising at least one upsetting station (11) provided with clamping means (15, 16) with faces (17, 18) facing each other to clamp and upset the object (1) in between the ends (3, 4) of the longitudinal section (2) to be deformed, means to exert an axial pressure (22) on the longitudinal section (2) in between the said opposed faces to shorten and thicken this longitudinal section by means of upsetting and movable counter pressure means (7) that extend near the contour sheath (19) of the longitudinal section to be deformed in order to exert an inwardly directed counter pressure (29) and means (6) that ensure a cooperation between the said means for the axial pressure (22) and the said counter pressure means (7) so that the volume of the space for the material to be upset, bounded by the named faces and the named movable counter pressure means, remains substantially constant and at least one flattening station (12) with moulding means (32) for a controlled transversal pressure operation,—at least one pressing station (13) with moulding means to apply an appropriate pre-opening (9) and an oval eye (26) and for other deformations (37, 38) and a cutting station (14) with cutting means (41) as well as means for a programmed step by step transmission of the heddle wire (1) to be deformed through the deformation line (40) till past the cutting station and—means for an appropriate crosswise putting into place and removal of the said clamping devices (15, 16), of the various moulding means for the flattening pressure (32) as well as for the pressing stations (13) and of the cutting means in the cutting station (14) in the working stations (11, 12, 13, 14).
20. Oblong object obtained according to the method in claim 8, provided with at least one deformed longitudinal section with flattened zone (8) or with a final shape (10) and in which the cross section shape differs from the cross section shape of the rest of the said object and in which in between both section shapes a substantially fluently changing transition zone (43) is present.
21. Longitudinal segment (42) out of the object according to claim 20, in particular a stainless steel spring wire segment, that is provided with a said deformed longitudinal section near at least one of its ends (44, 45).
22. Segment (42) according to claim 21 shaped as a heddle for looms in which said segment has a deformed longitudinal section at both ends (44, 45), of which at least one of these longitudinal end sections has another shape or opening than an oval eye and in which furthermore in between these ends a deformed longitudinal section shaped as an oval eye (26), bounded by a ring (27) is present.
23. Heddle in accordance with claim 22 in which the plane of the oval eye (26) forms an angle with the plane of at least one of the other openings in the sections near the ends (44, 45).
24. Oblong object, in particular of metal, in accordance with claim 20 provided with at least one deformed longitudinal section with a flattened zone (8) or with a final shape (10) of which the section comprises a consolidating microstructure.
25. Method in accordance with claim 10 in which at least part of the said deformed longitudinal sections with final shape (10) are at least locally hardened.
|
Method for fabricating acoustic wave device
ABSTRACT
A method for fabricating an acoustic wave device includes the steps of forming an insulating material layer on a piezoelectric substrate, forming a patterned photoresist on the insulating material layer, patterning the insulating material layer, and forming a piezoelectric-substrate exposed depression corresponding to a region where an interdigital transducer electrode is to be formed on a first insulator layer composed of the insulating material layer, depositing a metallic material on the piezoelectric substrate to form the interdigital transducer electrode in the piezoelectric-substrate exposed depression such that the overall interdigital transducer electrode is thinner than the first insulator layer and coating the photoresist with a metallic material, removing the photoresist and the metallic material on the photoresist, and depositing a second insulator layer so as to cover the interdigital transducer electrode and the first insulator layer.
BACKGROUND OF THE INVENTION
1. Field of the Invention
The present invention relates to an acoustic wave device used as, for example, a resonator or a band-pass filter, and more specifically, to an acoustic wave device that includes an insulator layer for improving temperature characteristics that is arranged so as to cover an interdigital (IDT) electrode and a method for fabricating the same.
2. Description of the Related Art
An acoustic wave resonator and an acoustic wave filter are widely used in a band-pass filter in a mobile communications device. One example of an acoustic wave device of this kind is disclosed in Japanese Unexamined Patent Application Publication No. 2004-112748. FIG. 13 is a front sectional view that schematically illustrates a surface acoustic wave device described in Japanese Unexamined Patent Application Publication No. 2004-112748. In a surface acoustic wave device 101 illustrated in FIG. 13, an IDT electrode 103 is provided on a piezoelectric substrate 102. An insulator layer 104 is provided so as to cover the IDT electrode 103. Here, the piezoelectric substrate 102 is made of a piezoelectric material that has a negative temperature coefficient of frequency, such as a lithium tantalate (LiTaO₃) substrate or a lithium niobate (LiNbO₃) substrate. On the other hand, the insulator layer 104 is made of an insulating material that has a positive temperature coefficient of frequency, such as silicon dioxide (SiO₂). Consequently, the surface acoustic wave device 101 having favorable temperature characteristics is provided.
In the surface acoustic wave device 101, the insulator layer 104 made of, for example, a SiO₂ film, is formed after the IDT electrode 103 is formed. Therefore, when the insulator layer 104 is formed by a thin-film forming method, such as vapor deposition, a projection 104 a and a depression 104 b, are inevitably formed. This is because the IDT electrode 103 is present under the insulator layer 104 and thus the surface of the insulator layer 104 is raised at a region where the IDT electrode 103 is present, which results in the projection 104 a.
However, when the projection and depression are formed, an unwanted ripple appears in the frequency characteristics. An acoustic wave device that does not include any projections and depressions is disclosed in WO 2005/034347 A1. The structure of the acoustic wave device described in WO 2005/034347 A1 is illustrated in FIG. 14 with a schematic front sectional view. In an acoustic wave device 111, an IDT electrode 113 is provided on a piezoelectric substrate 112. A first insulation layer 114 having the same film thickness as the IDT electrode 113 is formed around a region where the IDT electrode 113 is present. A second insulator layer 115 is formed so as to cover the IDT electrode 113 and the first insulator layer 114. Here, after the first insulator layer 114 and the IDT electrode 113 is formed, the second insulator layer 115 is formed. As a result, the second insulator layer 115 has a surface 115 a having no projections or depressions, so the second insulator layer 115 is substantially flat.
Because the insulator layer 104 in the surface acoustic wave device 101 described in Japanese Unexamined Patent Application Publication No. 2004-112748 has a sufficient thickness over a region where the IDT electrode 103 is present, the insertion loss tends to deteriorate. In addition, when the thickness of the insulator layer 104 is increased in order to improve temperature characteristics, a problem exists in which the fractional bandwidth is reduced.
If, in the acoustic wave device 114 described in WO 2005/034347 A1, the second insulator layer 115 formed on a region where the IDT electrode 113 is present has a reduced thickness to improve the temperature characteristics, it is difficult to improve the temperature characteristics using the second insulator layer 115. On the other hand, if the second insulator layer 115 has a sufficiently increased thickness to improve the temperature characteristics, the thickness of the second insulator layer 115 over the IDT electrode 113 is increased, so a problem in which the fractional bandwidth is reduced exists.
That is, in the surface acoustic wave device 101 or 111 described in Japanese Unexamined Patent Application Publication No. 2004-112748 and WO 2005/034347 A1, there is a problem in which the insertion loss is increased or the fractional bandwidth is reduced when the thickness of the insulator layer over the IDT electrode is sufficiently increased to improve the temperature characteristics. If the thickness of the insulator layer 104 or 115 over the IDT electrode 103 is reduced to avoid this problem, the temperature characteristics cannot be sufficiently improved.
SUMMARY OF THE INVENTION
To overcome the problems described above, preferred embodiments of the present invention provide an acoustic wave device in which temperature characteristics are improved while the fractional bandwidth is less susceptible to being reduced without causing an increase in the insertion loss in a structure in which an insulation layer to improve the temperature characteristics is provided on an IDT electrode.
According to a first preferred embodiment of the present invention, an acoustic wave device includes a piezoelectric substrate that includes a first main surface and a second main surface, at least one IDT electrode provided on the first main surface of the piezoelectric substrate, and an insulator layer provided to improve a temperature characteristic arranged on the piezoelectric substrate so as to cover the IDT electrode. Where the wavelength of an acoustic wave is λ, when a surface of the insulator layer is classified into a first surface region under which the IDT electrode is located and a second surface region under which no IDT electrode is located, the height of the surface of the insulator layer in at least one portion of the second surface region is greater than the height of the surface of the insulator layer from the piezoelectric substrate in at least one portion of the first surface region by at least about 0.001λ. Specifically, the height of the surface of the insulator layer in at least one portion of the second surface region is greater than the height of the surface of the insulator layer in the first surface region by at least about 0.001λ, and the height of the surface of the insulator layer in the second surface region is greater than the height of the insulator layer in at least one portion of the first surface region by at least about 0.001λ.
According to a second preferred embodiment of the present invention, an acoustic wave device includes a piezoelectric substrate that includes a first main surface and a second main surface, at least one IDT electrode provided on the first main surface of the piezoelectric substrate, and an insulator layer to improve a temperature characteristic arranged on the piezoelectric substrate so as to cover the IDT electrode. The insulator layer includes a surface that has a projection protruding upward in at least one portion of a region under which no IDT electrode is located, and the height of the projection from the surface of the insulator layer disposed around the projection is at least about 0.001λ (λ is the wavelength of an acoustic wave).
That is, in the first and second preferred embodiments of the present invention, the insulator layer for improving the temperature characteristic is arranged so as to cover the IDT electrode, and the surface of the insulator layer in the first surface region, under which the IDT electrode is located and that in the second surface region, under which no electrode is located, have different heights. In the first preferred embodiment, the height of the surface of the insulator layer in at least one portion of the second surface region is greater than the height of the surface of the insulator layer from the piezoelectric substrate in at least one portion of the first surface region by at least about 0.001λ. In the description of the second preferred embodiment, the upwardly protruding projection is disposed in at least one portion of the region under which no IDT electrode is located, i.e., of the second surface region in the first preferred embodiment, and the height of the projection from the surface of the insulator layer disposed around the projection is at least about 0.001λ.
According to a preferred embodiment of the present invention, the insulator layer over the IDT electrode has a thinner portion. Therefore, the insertion loss is less prone to being degraded and the fractional bandwidth is less prone to being narrowed, while at the same time sufficient advantageous effects of improving the temperature characteristic are obtained from the other thicker portion of the insulator layer.
The structure of the insulator layer for improving the temperature characteristic is not particularly limited. According to a specific preferred embodiment of the present invention, the insulator layer may include a first insulator layer and a second insulator layer, the first insulator layer being disposed around the IDT electrode and having a film thickness greater than that of the IDT electrode, the second insulator layer being disposed so as to cover the first insulator layer and the IDT electrode and having the same film thickness. In this case, the acoustic wave device can be easily provided merely by the formation of the second insulator layer having the same film thickness after the formation of the IDT electrode and the first insulator.
Preferably, the first and second insulator layers may be made of the same insulator material. In this case, because the first insulator layer and the second insulator layer can be made of the same material, the process can be simplified and the cost of manufacturing can be reduced. Alternatively, the first and second insulator layers may be made of different insulator materials.
Preferably, the piezoelectric substrate may be made of a piezoelectric material having a negative temperature coefficient of frequency, and the insulator layer for improving the temperature characteristic may be made using silicon oxide. In this case, the insulator layer made of silicon oxide and having a positive temperature coefficient of frequency sufficiently reduces changes in the frequency characteristic due to temperature.
Preferably, the IDT electrode may have a duty ratio of between about 0.25 and about 0.60. This range of the duty ratio prevents an increase in electric resistance of the IDT electrode, and according to preferred embodiments of the present invention, the temperature characteristic can be improved while the degradation of the insertion loss and the narrowing of the fractional bandwidth are prevented.
Preferred embodiments of the present invention can use various types of waves as an acoustic wave. For example, a surface acoustic wave device, which utilizes a surface acoustic wave, can be provided in accordance with a preferred embodiment of the present invention.
A method for fabricating an acoustic wave device according to a preferred embodiment of the present invention includes the steps of forming an insulating material layer on a piezoelectric substrate, forming a patterned photoresist on the insulating material layer, patterning the insulating material layer, and forming a piezoelectric-substrate exposed depression corresponding to a region in which an IDT electrode is to be formed on a first insulator layer made of the insulating material layer, depositing a metallic material on the piezoelectric substrate to form the IDT electrode in the piezoelectric-substrate exposed depression such that the IDT electrode is thinner than the first insulator layer and coating the photoresist with a metallic film, removing the photoresist and the metallic layer on the photoresist by the lift-off technique, and depositing a second insulator layer so as to cover the IDT electrode and the first insulator layer.
In the acoustic wave device according to the first and second preferred embodiments of the present invention, the surface of the insulator layer in at least one portion of the second surface region, under which no IDT electrode is located, is higher than the surface of the other portion of the insulator layer by at least about 0.001λ. Therefore, the temperature characteristic can be reliably improved due to the surface portion of the insulator layer having a sufficient thickness. In addition, because the surface of the insulator layer in at least one portion of the first surface region is lower, as described above, and the thickness of the insulator layer is reduced, the insertion loss is less prone to being degraded and the fractional bandwidth is less prone to being narrowed.
Accordingly, the temperature characteristic can be improved without causing a narrowing of the fractional bandwidth and an increase in the insertion loss.
With the method for fabricating an acoustic wave device according to preferred embodiments of the present invention, after the completion of the insulating material layer on the piezoelectric substrate, the patterned photoresist is formed on the insulating material layer, the laminated film in which the insulating material layer and the photoresist are laminated is patterned, and the first insulator layer having the piezoelectric-substrate exposed depression in which the IDT electrode is to be formed is formed. Then, the metallic material is deposited so as to be thinner than the first insulator layer, thereby forming the IDT electrode in the piezoelectric-substrate exposed depression. The photoresist and the metallic film on the photoresist are removed by the lift-off technique, and finally, the second insulating material film is formed so as to cover the IDT electrode and the insulating material layer. In this case, the first insulator layer is thicker than the IDT electrode. Therefore, by deposition of the second insulating material layer, the acoustic wave device having a relatively thin insulator layer over the IDT electrode can be easily fabricated.
Other features, elements, steps, characteristics and advantages of the present invention will become more apparent from the following detailed description of preferred embodiments of the present invention with reference to the attached drawings.
BRIEF DESCRIPTION OF THE DRAWINGS
FIG. 1A is a schematic front sectional view that illustrates a main portion of a surface acoustic wave device according to a preferred embodiment of the present invention, and FIG. 1B is a plan view of the surface acoustic wave device.
FIGS. 2A to 2F are schematic front sectional views describing the steps of fabricating a surface acoustic wave device according to a preferred embodiment of the present invention.
FIG. 3A illustrates a relationship between the height x of a projection of an insulator layer and the temperature characteristic TCF in the surface acoustic wave device according to a preferred embodiment of the present invention, and FIG. 3B illustrates an enlarged portion thereof.
FIG. 4A illustrates a relationship between the height x of a projection of an insulator layer and the fractional bandwidth BW in the surface acoustic wave device according to a preferred embodiment of the present invention, and FIG. 4B illustrates an enlarged portion thereof.
FIG. 5A illustrates a relationship between the height x of a projection of an insulator layer and the impedance ratio in the surface acoustic wave device according to a preferred embodiment of the present invention, and FIG. 5B illustrates an enlarged portion thereof.
FIG. 6 illustrates a relationship between the thickness of a second insulator layer and the temperature characteristic TCF in a related-art surface acoustic wave device prepared for comparison.
FIG. 7 illustrates a relationship between the thickness of a second insulator layer and the fractional bandwidth BW in the related-art surface acoustic wave device prepared for comparison.
FIG. 8 illustrates a relationship between the amount of improvement in the temperature characteristic and the fractional bandwidth in each of the surface acoustic wave device according to a preferred embodiment of the present invention and that according to a related-art example.
FIG. 9 illustrates a relationship between the duty of an IDT electrode and the temperature characteristic in the surface acoustic wave device according to a preferred embodiment of the present invention.
FIG. 10 illustrates a relationship between the height x of a projection of a surface of an insulator layer and the amount of a frequency shift immediately after power is turned on.
FIG. 11 is a schematic front sectional view for describing the surface acoustic wave device according to one modified example of preferred embodiments of the present invention.
FIG. 12 is a schematic front sectional view for describing the surface acoustic wave device according to another modified example of preferred embodiments of the present invention.
FIG. 13 is a schematic front sectional view that illustrates one example of a related-art surface acoustic wave device.
FIG. 14 is a schematic front sectional view that illustrates another example of the related-art surface acoustic wave device.
DETAILED DESCRIPTION OF PREFERRED EMBODIMENTS
Specific preferred embodiments of the present invention will be described below with reference to the drawings.
FIG. 1A is a partial enlarged front sectional view that illustrates an enlarged portion of a region where an IDT electrode is formed in a surface acoustic wave device according to a first preferred embodiment of the present invention, and FIG. 1B is a schematic plan view of the surface acoustic wave device.
A surface acoustic wave device 1 according to the present preferred embodiment is a one-port surface acoustic wave resonator, as illustrated in FIG. 1B. This surface acoustic wave resonator is used as one of a plurality of resonators defining a band-pass filter at a transmission side of a duplexer of a mobile phone.
As illustrated in FIG. 1B, the surface acoustic wave device 1 includes a piezoelectric substrate 2. The piezoelectric substrate 2 preferably is made using, but not limited to, a 128° rotated Y-plate X-propagation LiNbO₃ substrate, for example.
An IDT electrode 3 is provided on the piezoelectric substrate 2. The IDT electrode 3 includes a pair of comb-shaped electrodes each having a plurality of electrode fingers 3 a. In the direction of propagation of a surface acoustic wave, a reflector 4 is disposed at one side of the IDT electrode 3, and a reflector 5 is disposed at the other side of the IDT electrode 3. Each of the reflectors 4 and 5 has a structure in which a plurality of electrode fingers are shorted at both ends.
In the present preferred embodiment, each of the IDT electrode 3 and the reflectors 4 and 5 is preferably made of an electrode material principally composed of copper. More specifically, each of the IDT electrode 3 and reflectors 4 and 5 has a laminated structure in which a thinner adhesion layer made of titanium is disposed on the piezoelectric substrate 2, a main electrode layer made of copper is provided on the adhesion layer, and a protective electrode layer made of AlCu is laminated on the copper main electrode layer. In the present preferred embodiment, it is preferable that the titanium adhesion layer has a thickness of about 0.01λ, the main electrode layer has a thickness of about 0.04λ, and the protective electrode layer has a thickness of about 0.005λ, for example, where the wavelength of a surface acoustic wave is λ.
Each of the IDT electrode 3 and the reflectors 4 and 5 may also be made of a single metallic layer. Alternatively, each of the IDT electrode and the reflectors 4 and 5 may also have a structure in which a plurality of electrode layers are laminated, as in the present preferred embodiment.
An insulator layer 6 to improve temperature characteristics is preferably arranged so as to cover the IDT electrode 3 and the reflectors 4 and 5. In the present preferred embodiment, the insulator layer 6 includes a first insulator layer 7 and a second insulator layer 8. The first insulator layer 7 is disposed around the IDT electrode 3, i.e., between the electrode fingers and in a region outside the electrode. The second insulator layer 8 is arranged so as to cover the first insulator layer 7 and the IDT electrode 3. The first insulator layer 7 is also formed around the reflectors 4 and 5, i.e., between the electrode fingers of the reflectors and in a region outside the reflectors. The second insulator layer 8 is similarly arranged.
The thickness of the first insulator layer 7 is greater than that of the IDT electrode 3, i.e., that of the electrode. The difference in thickness therebetween is preferably at least about 0.001λ, for example, where the wavelength of a surface acoustic wave is λ.
The second insulator layer 8 is provided by forming an insulator layer having the same film thickness. Therefore, the second insulator layer 8 has a projection and a depression in its upper surface, as illustrated in the drawing. The upper-surface region of the insulator layer 6 is classified into a first surface region under which the IDT is disposed and a second surface region under which no IDT is disposed. Because the first insulator layer 7 is thicker than the IDT electrode 3, the height of the surface of the insulator layer 6 in the second surface region is greater than the height of the surface of the insulator layer 6 from a top surface 2 a of the piezoelectric substrate 2 in the first surface region, under which the IDT electrode 3 is disposed, by at least about 0.001λ, for example.
In other words, a projection and a depression are provided in the surface, i.e., the upper surface of the insulator layer 6, as illustrated in the drawing, and an upwardly protruding projection 6 a is provided in the second surface region, under which no IDT electrode is disposed. The height of the projection 6 a from the surface of the insulator layer formed around the projection 6 a is at least about 0.001λ, for example.
Similarly, also in the region in which the reflectors 4 and 5 are disposed, the surface of the insulator layer in the second surface region, around the region at which the electrode is disposed, is higher than the surface of the insulator layer in the first surface region, under which the electrode is disposed, by at least about 0.001λ, for example.
As described above, in the surface acoustic wave device 1 according to the present preferred embodiment, the insulator layer disposed in the second surface region has a sufficient thickness. Therefore, the temperature characteristics can be effectively improved. The insulator layer is thinner over the IDT electrode 3, so the fractional bandwidth is less prone to being narrowed and the insertion loss is less prone to being degraded, as is clear from an experimental example described later.
One example of a method for fabricating the surface acoustic wave device 1 will now be described with reference to FIGS. 2A to 2F.
FIGS. 2A to 2F are partial front sectional views that schematically illustrate a method for fabricating the surface acoustic wave device 1.
First, as illustrated in FIG. 2A, the piezoelectric substrate 2 composed of 128° rotated Y-plate X-propagation LiNbO₃ is prepared. Then, a SiO₂ film 7A is formed on the piezoelectric substrate 2 in order to form the first insulator layer 7 being thicker than the IDT electrode 3 by at least about 0.001λ, for example.
After that, as illustrated in FIG. 2B, a patterned photoresist 11 is formed by photolithography on a region around a region where the IDT electrode is to be formed. The patterned photoresist 11 covers the previously described second surface region.
After that, as illustrated in FIG. 2C, the SiO₂ film 7A that is not covered with the patterned photoresist 11, i.e., that is formed in the first surface region is removed by reactive ion etching, thus forming a piezoelectric-substrate exposed depression 12. In this manner, the first insulator layer 7 is formed.
Then, as illustrated in FIG. 2D, in order to form the IDT electrode 3, a metallic film 3A is formed over the entire surface. The metallic film 3A is thinner than the first insulator layer 7. The portion of the metallic film applied on the piezoelectric-substrate exposed depression forms the IDT electrode 3. The metallic film 3A is obtained by sequentially depositing a Ti layer, a Cu layer, and an AlCu layer by a thin-film forming method, such as vapor deposition, as with the case of forming the IDT electrode 3 described above.
Then, the patterned photoresist 11 is removed by the lift-off technique together with the metallic film 3A formed on the patterned photoresist 11. As illustrated in FIG. 2E, a structure in which the IDT electrode 3 and the first insulator layer 7 are formed on the piezoelectric substrate 2 is obtained.
After that, as illustrated in FIG. 2F, the second insulator layer 8 made of SiO₂ is formed over the entire surface. Because the first insulator layer 7 is thicker than the IDT electrode 3 by at least about 0.001λ, as described above, the projections and depressions having a substantially uniform shape are formed in the upper portion of the second insulator layer 8 along the width direction of interlocking of the IDT electrode, i.e., in a direction substantially perpendicular to the sectional views of FIGS. 2A to 2F by forming the second insulator layer 8 having the same film thickness by a commonly used thin-film forming method, such as vapor deposition or sputtering.
The fabricating method described above uses photoresist etching. However, the method for fabricating the surface acoustic wave device 1 is not limited to the above-described fabricating method. For example, the surface acoustic wave device 1 may be obtained by partially removing the surface of the insulator layer by, for example, etching, after the IDT electrode 3 and the insulator layer are formed on the piezoelectric substrate 2. In this case, the portion of the insulator layer removed by etching can be a portion over a region where the IDT electrode 3 is disposed. In this case, the first and second insulator layers are integrally made of the same material.
The first insulator layer and the second insulator layer may be made of the same material, as described above. They may be individually formed of the same material, as in the preferred embodiment described above. The first insulator layer and the second insulator layer may also be made of different insulating materials. Forming the first and second insulator layers from the same insulating material reduces kinds of materials and simplifies the fabricating process.
The insulating material of each of the first insulator layer and the second insulator layer is not particularly limited. In addition to silicon dioxide, an oxide or a nitride of silicon, tantalum, aluminum, or other suitable elements, for example SiO_(x)N_(y) or TaO_(x) may be used.
The electrode material of the IDT electrode is not particularly limited, so it is not limited to the material used in the above-described preferred embodiment. The IDT electrode is not limited to a laminated film in which a plurality of electrode layers are laminated, and it may also be made of a single metallic material.
In the surface acoustic wave device 1 described above, the height of the insulator layer 6 from the top surface 2 a of the piezoelectric substrate 2 in the first surface region was about 0.27λ. The height of the insulator layer 6 in the second surface region was about 0.27λ+xλ. As illustrated in FIGS. 3A and 3B, by variously changing x, i.e., the height of a projection of the insulator layer, this height corresponding to the height of the insulator layer in the second surface region minus the height of the insulator layer in the first surface region a plurality of types of the surface acoustic wave device 1 were fabricated.
The duty of the IDT electrode 3 was about 0.5.
The temperature coefficient of frequency TCF of each of a plurality of types of the surface acoustic wave devices 1 prepared in the above-described manner was measured. TCF was obtained by measuring the changes in resonant frequency due to temperature of the surface acoustic wave device 1 in a temperature range of about −25° C. to about +85° C.
As shown in FIGS. 3A and 3B, it was found that TCF approaches 0 ppm/° C. as the height x of the projection of the insulator layer increases. That is, it was found that changes in frequency characteristics due to temperature are reduced and the temperature characteristics are improved. In particular, it was found that, when x is at least about 0.001λ, the advantageous effects of the improvement in TCF are most evident.
Changes in the fractional bandwidth with respect to the height x of the projection of the insulator layer of the surface acoustic wave device 1 are illustrated in FIGS. 4A and 4B.
As shown in FIGS. 4A and 4B, it was found that the fractional bandwidth is reduced with an increase in the height x of the projection of the insulator layer. However, as is clear from the comparison between FIGS. 3 and 4, when the height of the projection of the insulator layer increases, the advantageous effects of improving temperature characteristics are increased, whereas the fractional bandwidth is not significantly reduced.
FIGS. 5A and 5B illustrate changes in the impedance ratio in the surface acoustic wave device 1 i.e., the ratio of impedance at an anti resonant frequency to impedance at a resonant frequency with respect to the height x of the projection of the insulator layer.
As shown in FIGS. 5A and 5B, when the height x of the projection of the insulator layer changes, the impedance ratio changes. When x is at least about 0.001λ, the impedance ratio is large, as compared to a related-art example in which x is zero, thereby enabling the insertion loss to be effectively reduced.
As described above, compared to when the height of the projection of the insulator layer is zero, i.e., compared to a structure corresponding to the related-art example illustrated in FIG. 14, if the height x of the projection of the insulator layer is at least about 0.001λ, the temperature characteristics can be effectively improved without a significant reduction in the fractional bandwidth and in the impedance ratio.
Accordingly, as is clear from FIGS. 3A to 5B, in the present preferred embodiment, the height x of the projection of the insulator layer preferably is at least about 0.001λ, for example.
For comparison, the surface acoustic wave device illustrated in FIG. 14 was fabricated in a similar manner to that of the above-described preferred embodiment. In the surface acoustic wave device 1 in the comparison example, a SiO₂ film for improving temperature characteristics having a film thickness of about 0.27λ, about 0.29λ, about 0.31λ, or about 0.33λ was formed as the second insulator layer. It is substantially the same as in the above-described preferred embodiment except that the surface of the second insulator layer is flat. The temperature characteristic and the fractional bandwidth in the surface acoustic wave device according to this related-art example were measured in substantially the same manner as in the above-described preferred embodiment. The results are shown in FIGS. 6 and 7.
FIGS. 6 and 7 illustrate changes in the temperature characteristic TCF and in the fractional bandwidth, respectively, with respect to the thickness of the second insulator layer in the surface acoustic wave device according to the related-art example prepared for comparison.
As shown in FIGS. 6 and 7, it was found that, in the surface acoustic wave device corresponding to the related-art example illustrated in FIG. 14, as the thickness of the second insulator layer increases, the temperature characteristic are improved, whereas the fractional bandwidth is shapely narrowed.
In contrast to this, as illustrated in FIGS. 3A to 5B, it was found that, in the surface acoustic wave device 1 according to the present preferred embodiment, as previously described, even when the advantageous effects of improving the temperature characteristics are sufficiently obtained from the thicker insulator layer in the second surface region, under which no IDT electrode is disposed, the fractional bandwidth is less prone to being narrowed, the impedance ratio is less prone to being reduced, and therefore, the insertion loss is less prone to being exacerbated. This is because the fractional bandwidth is less prone to being narrowed and the impedance ratio is less prone to being reduced due to a reduced thickness of the insulator layer over the IDT electrode, whereas the advantageous effects of improvement in the temperature characteristics are sufficiently obtained from an increased thickness of the insulator layer in the second surface region, under which no IDT electrode is disposed.
FIG. 8 illustrates a relationship between the amount of improvement in temperature characteristic of frequency and the fractional bandwidth in the surface acoustic wave device 1 according to the above-described preferred embodiment. In FIG. 8, the vertical axis represents a normalized fractional bandwidth relative to a fractional bandwidth in a surface acoustic wave device in which the insulator layer for improving the temperature characteristic is about 0.27λ and the surface of the insulator layer is substantially flat (x=0).
For comparison, a relationship between the amount of improvement in temperature characteristic of frequency and the fractional bandwidth in the surface acoustic wave device in the related art illustrated in FIG. 14 is also illustrated in FIG. 8.
As shown in FIG. 8, it is found that, in the surface acoustic wave device in the related art illustrated in FIG. 14, when the temperature characteristics are improved, the fractional bandwidth is significantly reduced with an increase in the amount of improvement in temperature characteristic of frequency and, in contrast to this, according to the above-described preferred embodiment, even when the amount of improvement in the temperature characteristic is increased, the fractional bandwidth is less prone to being narrowed.
The surface acoustic wave device illustrated in FIG. 13 in the related art was also fabricated in a similar manner to that of the above-described preferred embodiment except that a SiO₂ film having a thickness of about 0.27λ was formed over the entire surface so as to cover the IDT electrode. The impedance ratio was no more than about 50 dB, which revealed that the characteristics were significantly exacerbated, as compared to the above-described preferred embodiment.
In the above-described preferred embodiment, it is difficult for the fabricating method described with reference to FIG. 2 to form the insulator layer 6 such that the height x of the projection of the insulator layer is at least about 0.3λ due to process reasons. Accordingly, for manufacturing reasons, the upper limit of the height x of the projection of the insulator layer, i.e., the difference between the height of the surface of the insulator layer in the first surface region and that in the second surface region is about 0.3λ. When the surface acoustic wave device 1 is obtained using a fabricating method other than the above-described fabricating method, the upper limit is not limited to the upper limit value of about 0.3λ, and the height x of the projection of the insulator layer may be about 0.3λ or more.
In the above-described preferred embodiment, for the projection and depression in the surface of the insulator layer 6, the insulator layer projects in the second surface region, compared to that in the first surface region, and the shape of the IDT electrode 3 in cross section in the cross-sectional direction is substantially rectangular. However, it may be trapezoidal or inverted trapezoidal. The edges formed by the surface of the insulator layer and the projection may be rounded.
In the surface acoustic wave device 1, a plurality of types of the surface acoustic wave device 1 having a height x of the projection of the insulator layer of about 0.03λ and the duty ratio of the IDT electrode having different values were fabricated, and the temperature coefficient of frequency TCF was measured. The results are shown in FIG. 9. As is shown in FIG. 9, it was found that, when the duty ratio is about 0.60 or less, the absolute value of the temperature characteristic TCF can be less than about −14 ppm/° C., and the temperature characteristics can be effectively improved. When the duty ratio is less than about 0.25, it is not preferred because the electric resistance is too high. Accordingly, it is preferable that the duty ratio be in a range of about 0.25 to about 0.6, for example.
Next, in the surface acoustic wave device 1 according to the above-described preferred embodiment fabricated such that the height x of the projection of the insulator layer was about 0.01λ, about 0.02λ, or about 0.03λ, the behavior of changes in resonant frequency immediately after a power of about 0.9 W was turned on was observed. The results are shown in FIG. 10. For comparison, the surface acoustic wave device having a height x of the projection of the insulator layer being zero, i.e., that corresponds to the related-art example illustrated in FIG. 14 was fabricated in a similar manner, and the amount of the frequency shift immediately after power was turned on was measured. The results are shown in FIG. 10.
As shown in FIG. 10, it was found that, when the height x of the projection of the insulator layer increases, the amount of the frequency shift occurring immediately after power is turned on is reduced. That is, it was found that the withstand electric power is improved, and this can suppress to a greater extent the frequency shift occurring immediately after power is turned on. In particular, when the height x of the projection of the insulator layer is in a range of about 0.01λ to about 0.03λ, the amount of the frequency shift is significantly improved, and it is further improved when the height x of the projection of the insulator layer is in a range of about 0.02λ to about 0.03λ.
Therefore, it was found that, according to preferred embodiments of the present invention, a surface acoustic wave device having stable characteristics immediately after power is turned on is provided.
In the surface acoustic wave device 1 illustrated in FIG. 1A, the height of the insulator layer in the entire first surface region, under which the IDT electrode 3 is disposed, is less than the height of the insulator layer in the entire second surface region, under which no IDT electrode 3 is disposed. However, the surface of the insulator layer in at least one portion of the second surface region may be higher than the surface of the insulator layer region in at least one portion of the first surface region by at least about 0.001λ. As a result, as illustrated in FIG. 11, the height of the insulator layer 6 in at least one portion of the first surface region over the IDT electrode 3 may be less than the height of the insulator layer in the second surface region. In other words, the dimension W of the projection 6 a disposed in the surface of the insulator layer 6 along the dimensional width direction of electrode fingers may be greater than the gap between the electrode fingers.
As illustrated in FIG. 12, the surface of the insulator layer in at least one portion of the second surface region, under which no IDT electrode is disposed, may be higher than the surface of the insulator layer in the first surface region. In other words, the width direction dimension W′ of the projection 6 a disposed in the surface of the insulator layer 6 may be less than the gap between the electrode fingers.
In the above-described preferred embodiment, a one-port surface acoustic wave resonator is described as one example. However, the present invention is applicable to a surface acoustic wave resonator having a different structure and a surface acoustic wave filter device and is also applicable to an acoustic wave device that uses an acoustic wave different from a surface acoustic wave, such as a boundary acoustic wave.
While preferred embodiments of the present invention have been described above, it is to be understood that variations and modifications will be apparent to those skilled in the art without departing the scope and spirit of the present invention. The scope of the present invention, therefore, is to be determined solely by the following claims.
What is claimed is:
1. A method for fabricating an acoustic wave device, the method comprising the steps of: forming an insulating material layer on a piezoelectric substrate; forming a patterned photoresist on the insulating material layer, patterning the insulating material layer, and forming a piezoelectric-substrate exposed depression corresponding to a region where an interdigital transducer electrode is to be formed on a first insulator layer composed of the insulating material layer; depositing a metallic material on the piezoelectric substrate to form the interdigital transducer electrode in the piezoelectric-substrate exposed depression such that the overall interdigital transducer electrode is thinner than the first insulator layer and coating the photoresist with the metallic material; removing the photoresist and the metallic material on the photoresist; and depositing a second insulator layer so as to cover the interdigital transducer electrode and the first insulator layer; wherein the second insulator layer includes an upper surface and a lower surface opposed to the upper surface; the upper surface of the second insulator layer includes a first surface region under which the interdigital transducer electrode is located and a second surface region under which the interdigital transducer electrode is not located; and the step of depositing the second insulator layer is performed such that a height of at least a portion of the second surface region of the second insulator layer measured from the piezoelectric substrate is greater than a height of at least a portion of the first surface region of the second insulator layer measured from the piezoelectric substrate.
2. The method for fabricating an acoustic wave device according to claim 1, wherein the first and second insulator layers are made of the same insulating material.
3. The method for fabricating an acoustic wave device according to claim 1, wherein the first and second insulator layers are made of different insulating materials.
4. The method for fabricating an acoustic wave device according to claim 1, wherein the piezoelectric substrate is made of a piezoelectric material having a negative temperature coefficient of frequency, and the first and second insulator layers are made of silicon oxide.
5. The method for fabricating an acoustic wave device according to claim 1, wherein the interdigital transducer electrode has a duty ratio of between about 0.25 and about 0.60.
|
import DataType from "@ui5/webcomponents-base/dist/types/DataType.js";
/**
* @lends sap.ui.webcomponents.main.types.CalendarSelectionMode.prototype
* @public
*/
const CalendarSelectionModes = {
/**
* Only one date can be selected at a time
* @public
* @type {Single}
*/
Single: "Single",
/**
* Several dates can be selected
* @public
* @type {Multiple}
*/
Multiple: "Multiple",
/**
* A range defined by a start date and an end date can be selected
* @public
* @type {Range}
*/
Range: "Range",
};
/**
* @class
* Different date selection modes for <code>ui5-calendar</code>.
* @constructor
* @author SAP SE
* @alias sap.ui.webcomponents.main.types.CalendarSelectionMode
* @public
* @enum {string}
*/
class CalendarSelectionMode extends DataType {
static isValid(value) {
return !!CalendarSelectionModes[value];
}
}
CalendarSelectionMode.generateTypeAccessors(CalendarSelectionModes);
export default CalendarSelectionMode;
|
Student+participation+Rubric
* CATEGORY || **Poor 1** || **Fair 2** || **Good 3** || **Great 4** || **Points** ||
|
import { Component, OnInit, forwardRef } from '@angular/core';
import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
@Component({
selector: 'il-checkbox-control',
templateUrl: './checkbox-control.component.html',
styleUrls: ['./checkbox-control.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
useExisting: forwardRef(() => CheckboxControlComponent),
multi: true
}
]
})
export class CheckboxControlComponent {
private _value = false;
propagateChange: any = () => {};
validateFn: any = () => {};
get value(): boolean {
return this._value;
}
set value(val) {
this._value = val;
this.propagateChange(val);
}
toggleValue() {
this.value = !this._value;
}
writeValue(value) {
this.value = value;
}
registerOnChange(fn) {
this.propagateChange = fn;
}
registerOnTouched() {}
}
|
import argparse
import base64
import os
import requests
import yaml
# Deployment options
MINIKUBE_TARGET = 'minikube'
INGRESS_REMOTE_TARGET = 'oc-ingress'
OCP_TARGET = 'ocp'
OPENSHIFT_TARGET = 'oc'
KIND_TARGET = 'kind'
IMAGE_FQDN_TEMPLATE = "quay.io/{}/{}:{}"
def load_deployment_options(parser=None):
if not parser:
parser = argparse.ArgumentParser()
parser.add_argument(
'--namespace',
help='Namespace for all deployment images',
type=str,
default='assisted-installer'
)
parser.add_argument(
'--target',
help='Target kubernetes distribution',
choices=[MINIKUBE_TARGET, OPENSHIFT_TARGET, INGRESS_REMOTE_TARGET, OCP_TARGET, KIND_TARGET],
default=MINIKUBE_TARGET
)
parser.add_argument(
'--domain',
help='Target domain',
type=str
)
parser.add_argument(
'--replicas-count',
help='Replicas count of assisted-service',
type=int,
default=3
)
parser.add_argument(
'--enable-kube-api',
help='Assisted service support k8s api',
type=bool,
default=False
)
parser.add_argument(
'--enable-event-stream',
help='Assisted service support to stream events to kafka',
type=bool,
default=False
)
parser.add_argument(
"--storage",
help='Assisted service storage',
type=str,
default="s3"
)
parser.add_argument("--apply-manifest", type=lambda x: (str(x).lower() == 'true'), default=True)
parser.add_argument("--persistent-storage", type=lambda x: (str(x).lower() == 'true'), default=True)
parser.add_argument('-p', '--port', action="append", nargs=2,
metavar=('port', 'name'), help="Expose a port")
parser.add_argument("--image-pull-policy", help='Determine if the image should be pulled prior to starting the container.',
type=str, choices=["Always", "IfNotPresent", "Never"])
deploy_options = parser.add_mutually_exclusive_group()
deploy_options.add_argument("--deploy-tag", help='Tag for all deployment images', type=str)
deploy_options.add_argument("--deploy-manifest-tag", help='Tag of the assisted-installer-deployment repo to get the deployment images manifest from', type=str)
deploy_options.add_argument("--deploy-manifest-path", help='Path to local deployment images manifest', type=str)
deploy_options.add_argument('--disable-tls', action='store_true', help='Disable TLS for assisted service transport', default=False)
parsed_arguments = parser.parse_args()
if parsed_arguments.target != INGRESS_REMOTE_TARGET:
parsed_arguments.disable_tls = True
return parsed_arguments
def get_file_content(repo_url, revision, file_path):
"""Get a git project file content of a specific revision/tag"""
url = "%s/contents/%s?ref=%s" % (repo_url, file_path, revision)
response = requests.get(url)
response.raise_for_status()
return base64.b64decode(response.json()['content'])
def get_manifest_from_url(tag):
manifest_file = get_file_content("https://api.github.com/repos/openshift-assisted/assisted-installer-deployment", tag, "assisted-installer.yaml")
return yaml.safe_load(manifest_file)
def get_image_revision_from_manifest(short_image_name, manifest):
for repo_info in manifest.values():
for image in repo_info["images"]:
if short_image_name == image.split('/')[-1]:
return repo_info["revision"]
raise Exception("Failed to find revision for image: %s" % short_image_name)
def get_tag(image_fqdn):
return image_fqdn.split(":")[-1].replace("latest-", "")
def get_image_override(deployment_options, short_image_name, env_var_name, org="edge-infrastructure"):
# default tag is latest
tag = "latest"
image_from_env = os.environ.get(env_var_name)
if deployment_options.deploy_manifest_path:
print("Deploying {} according to manifest: {}".format(short_image_name, deployment_options.deploy_manifest_path))
with open(deployment_options.deploy_manifest_path, "r") as manifest_contnet:
manifest = yaml.safe_load(manifest_contnet)
tag = f"latest-{get_image_revision_from_manifest(short_image_name, manifest)}"
elif deployment_options.deploy_manifest_tag:
print("Deploying {} according to assisted-installer-deployment tag: {}".format(short_image_name, deployment_options.deploy_manifest_tag))
manifest = get_manifest_from_url(deployment_options.deploy_manifest_tag)
tag = f"latest-{get_image_revision_from_manifest(short_image_name, manifest)}"
elif deployment_options.deploy_tag:
print("Deploying {} with deploy tag {}".format(short_image_name, deployment_options.deploy_tag))
tag = deployment_options.deploy_tag
# In case non of the above options was used allow overriding specific images with env var
elif image_from_env:
print("Overriding {} deployment image according to env {}".format(short_image_name, env_var_name))
print("{} image for deployment: {}".format(short_image_name, image_from_env))
return image_from_env
image_fqdn = IMAGE_FQDN_TEMPLATE.format(org, short_image_name, tag)
print("{} image for deployment: {}".format(short_image_name, image_fqdn))
return image_fqdn
|
package parameter.validator;
import java.util.regex.Pattern;
public class RegexValidator implements ParameterValidator{
private final Pattern pattern;
public RegexValidator(Pattern pattern) {
this.pattern = pattern;
}
@Override
public boolean validate(String parameterValue) {
return this.pattern.matcher(parameterValue).matches();
}
}
|
public interface AnimalBehavior {
void move();
void makeSound();
}
|
Use pandas categorical dtype encoding where applicable
Woodwork uses the category dtype where applicable for certain Logical Types.
One of the benefits of using this pandas dtype is that pandas will automatically encode the column and provide you with a mapping.
EvalML is currently not using this functionality and may benefit from it (possible in the OneHotEncoder?)
Yep. Question: does our one-hot encoder currently output category type? I thought it did.
Filing as spike--if we're not using the right type, let's use the right type :)
More context for whoever picks this up: https://ayx-innovation-labs.slack.com/archives/CPYLCJR9Q/p1623272174251500
|
DISCUSSION: FORESTS, RESERVOIRS, AND STREAM FLOW 545 will occur any time when the thermometer does not go above thirty- Mr. Chittentwo degrees within a short time after a storm. The importance of dem presenting as small a surface to the action of such an air ag that is very apparent, and it is in storing up the snow in heaps and packing it away in deep pockets that the economy of nature is manifested. The center of the body will not melt at any time, and it requires. a very warm. day to get at the underside of the snowdrift. The grass will be growing all around it before the ground underneath it gets warmed up sufficiently to start a stream from it, but let a tree stick its head up through the crust and it [the snow] will go quickly. ‘I have yet to see the first body of perpetual snow lying among the trees. It will hardly do to say that the timber lies below the line of perpetual snow, for there are many banks which only disappear entirely once in ten years or so (when there comes a long dry summer), which have trees growing higher up on the same mountain side. “In any case, I do not wish to be understood as favoring the destruction of the forests of this or any other country. I never cut down a tree in my life and never saw one fall without feeling that I had lost a friend. Whatever is proven, there will always be abundant reasons for preserving extensive tracts of woodland everywhere that trees will grow, and it is time the matter became one of public concern.” IV. The following extract is given on account of its historic interest, and because it presents in exaggerated form the quixotism of the Ohio River reservoir project which the writer has criticized in his paper. It is from a book written by Colonel Charles Ellet, Jr., entitled “Fhe Mississippi and Ohio Rivers;” published in 1853, pages 303 to 305. “Réservoirs may eventually be made of sufficient capacity to hold all the annual excess, and make the daily flow almost entirely uniform. The banks of the Ohio and Mississippi, now broken by the current and lined with fallen trees, ready to be swept by the next freshet into the channel, there to form dangerous snags, may yet, in the course of a very few years, be cultivated and adorned down to the water’s edge. In the opinion of the writer, the grass will hereafter grow luxuriantly along the caving banks; all material fluctuations of the waters will be prevented, and the level of the river surface will become nearly stationary. Grounds, which are now frequently inundated and valueless, will be tilled and subdued; the sandbars will be permanently covered, and, under a uniform regimen of the stream, will probably cease to be produced. The channels will become stationary. The wharves will be built as the wharves on tide water, with little, if any, reference to the fluctuations of the surface. ‘The lower streets of all the river towns, no longer exposed to inundations, will acquire new value. The turbid waters will be arrested in the upper pools, and the Ohio first, and ultimately the Missouri and Mississippi, will be made to flow forever with a constant, deep, limpid stream. The ice will be swept off as it forms, and neither cold nor drought will longer be suftered injuriously to affect the navigation. The ocean steamers will
|
Unique filtered index and indexed view are ignored by optimizer
Suppose I have a table schema like this:
Partners table
ID Name
1 Test
Partners_Codes table
Partner_ID Code
1 'Test_Code'
I also have unique clustered index on Partner_ID in Partners_Codes:
create unique clustered index IX_Partners_Codes on Partners_Codes (Partner_ID);
Now, when I do query like this:
select
P.ID, P.Name
from dbo.Partners as P
left outer join dbo.Partners_Codes as PC on PC.Partner_ID = P.ID;
SQL Server optimizer is smart enough to see the indexes on Partners_Codes and not to query Partners_Codes at all and this good for performance.
Now suppose I have to add an Active column to Partners_Codes, so I could have many codes for each Partner, but only one of this codes could be Active.
Partners_Codes2 table
Partner_ID Code Active
1 'Test_Code' 1
1 'Test_Code_old1' 0
1 'Test_Code_old2' 0
There're 2 approaches I've tried - to use filtered index on Active = 1 or to create view and unique index on this view:
create table Partners_Codes2 (Partner_ID int, Code nvarchar(128), Active bit);
create view vw_Partners_Codes2
with schemabinding
as
select
Partner_ID, Code
from dbo.Partners_Codes2
where Active = 1;
create unique clustered index IX_vw_Partners_Codes2 on vw_Partners_Codes2 (Partner_ID);
create table Partners_Codes3 (Partner_ID int, Code nvarchar(128), Active bit);
create unique clustered index IX_Partners_Codes31 on Partners_Codes3 (Partner_ID, Code);
create unique nonclustered index IX_Partners_Codes32 on Partners_Codes3 (Partner_ID) include(Code, Active) where (Active = 1);
But for both approaches SQL Server optimizer will query Partners_Codes table even if it should know there's only one or zero row in table and I'm not fetching any data from the table.
My actual schema is bit more complicated, and I don't want to split data into several tables. The question is - is it possible to create filtered index or indexed view so optimizer will use it in the case shown?
sql fiddle demo
related links:
Optimizer Limitations with Filtered Indexes
Filtered index condition is ignored by optimizer
Are you on Enterprise Edition and/or are you specifying NOEXPAND when querying the view? You need at least one of those to be true for the index to be considered.
@Damien_The_Unbeliever yes, Enterprise Edition
@Damien_The_Unbeliever added the answer, thank you, you've helped me to found right direction
I've just read this article to the end and I've found that I've missed that last string:
The NOEXPAND hints are needed even in Enterprise Edition to ensure the
uniqueness guarantee provided by the view indexes is used by the
optimizer.
I think this one should be written with bold 50 font at the beginning of the article.
So I've just changed my query like this:
select
P.ID
from dbo.Partners as P
left outer join dbo.vw_Partners_Codes2 as PC with (noexpand) on PC.Partner_ID = P.ID;
and it works fine!!!
sql fiddle demo
Some more links:
How filtered indexes could be a much more powerful feature
Optimizer Limitations with Filtered Indexes
|
Category:LIU Brooklyn Blackbirds soccer
This category is for soccer at The Brooklyn Campus of Long Island University before LIU merged the athletic programs of its Brooklyn and Post campuses in July 2019. For soccer at the merged athletic program, see.
* Note: The school had gone by "Long Island" until 2013–14, at which time their school's athletic name became known as "LIU Brooklyn."
|
Improving sustainable use of genetic resources in biodiversity archives
View article
Biodiversity and Conservation
Note that a Preprint of this article also exists, first published August 12, 2019.
Introduction
Genetic resources archived in biodiversity collections are critically important for scientific research because they permit immediate access to large numbers of samples obtained across taxa, time and space, including samples that would be difficult or even impossible to obtain today (Droege et al., 2014; Burrell, Disotell & Bergey, 2015; Schäffer et al., 2017). Increasing reliance on archived genetic resources by a growing community of researchers, however, presents a significant challenge because current methods for sharing genetic resources are not sustainable; in most cases, researchers requesting access to genetic resources are provided with a piece of tissue that is consumptively subsampled from a permanently archived resource (Zimkus & Ford, 2014). Researchers then destroy this subsample during the course of DNA extraction, use the DNA that is required for their research and typically discard any remaining material. As a result, every request to use genetic resources results in depletion of samples that, left unchecked, will result in complete sample exhaustion and permanent loss of an irreplaceable resource. Because some tissues are present in very small quantities, some genetic resources can only be provided to one or a few researchers before an irreplaceable resource is lost forever. This issue becomes especially pressing when one considers the current extinction crises and increasingly strict regulations for scientific collecting that may prevent samples being replenished from wild specimens (Stuart et al., 2004; Watanabe, 2015). As a result, it is important to develop protocols that improve sustainable use of these resources.
Because the vast majority of requests to use archived genetic resources involve efforts to sequence DNA, protocols for DNA extraction from archival tissues are an obvious focal point for optimization aimed at improving sustainability of current practices. Most biodiversity collections aim to provide researchers requesting access to genetic material with enough tissue to conduct two DNA extractions (Zimkus & Ford, 2014), but collections staff and researchers are often unaware of how much tissue is optimal for extraction because few studies have investigated how sample age, preservation method, extraction protocol, type of tissue, and subsample size are related to the quantity, concentration, and quality of extracted DNA (but see Reineke, Karlovsky & Zebitz, 1998; Drabkova, Kirschner & Vlcek, 2002; Guo et al., 2009; Sawyer et al., 2012; Choi, Lee & Shipunov, 2015; Schiebelhut et al., 2016; Abdel-Latif & Osman, 2017). Even parameters that are known to impact extraction success are rarely quantified when biodiversity collections fulfill requests for access to genetic material. For example, tissue mass is known to be strongly correlated with extraction success (Hykin, Bi & McGuire, 2015) and has been shown to be correlated with extracted DNA concentration (Reineke, Karlovsky & Zebitz, 1998; Choi, Lee & Shipunov, 2015) but collections staff and researchers generally use a coarse visual estimate when removing tissue subsamples and rarely obtain quantitative size or mass data. It is not currently common practice to standardize tissue mass prior to DNA extractions (Wilcox et al., 2002; Aguirre-Peñafiel et al., 2014; Naccarato, Dejarnette & Allman, 2015) or to report masses if they were standardized (Kayes et al., 2013) except in experiments to compare various protocols or methods (Drabkova, Kirschner & Vlcek, 2002; Guo et al., 2009; Abdel-Latif & Osman, 2017; Yalçınkaya et al., 2017). In publications, researchers tend to qualitatively report the amount of starting material with phrases such “two small pieces” or “usually minute” (Hajibabaei et al., 2005; Jaksch et al., 2016).
The goal of the present study is to develop guidelines for more sustainable use of genetic resources in biodiversity collections, with a focus on determining the optimal amount of tissue for DNA extraction from amphibian tissue samples. In our first experiment we test whether the type of coarse visual estimates of tissue mass or size that are used by most collections staff who fulfill requests for access to genetic resources are capable of consistently yielding sufficient DNA for modern downstream sequencing applications. In our second experiment, we identify the tissue masses that result in the most efficient use of archived samples by conducting controlled extractions across a range of samples with known masses. In our third experiment, we test consistency of extraction success across replicate subsamples of a mass that appears to optimize yield while minimizing depletion of the archived samples during a single extraction. In our fourth and final experiment, we test whether our protocol is suitable for samples archived over a 25-year interval from 1984 (around the time collections started accumulating sample preserved specifically for use in molecular genetic studies) until 2001. Given the nature of natural history collections, it is probable that researchers will need to work with tissues of a variety of ages. Previous studies of bone and plant tissues have not recovered a significant correlation between DNA yield and tissue age (Sawyer et al., 2012; Choi, Lee & Shipunov, 2015), and, to our knowledge, previous published studies have not tested the correlation between age and total DNA yields using cryogenically preserved soft tissues from vertebrates. However, one study of herpetological specimens found a significant decrease in recovered sequence length as tissue age increased (Chambers & Hebert, 2016).
Materials and Methods
Sampling
We conducted all our experiments on amphibian tissues samples from the herpetological collection at the University of Kansas Biodiversity Institute. With more than 40,000 tissue samples in cryogenic storage, this collection is among the largest archives of its kind. This collection is also widely used by the scientific community, with more than 75 requests for access to genetic resources by the scientific community resulting in subsampling of more than 1,100 archived samples over the past 5 years. We focused on liver and muscle tissue because these tissues are the most abundant in biodiversity archives and are usually the standard tissue types collected in the field. Tissues were initially preserved using one of two strategies: immersion in high concentration ethanol or flash freezing in liquid nitrogen. Subsequent to initial preservation, samples were stored in a cryogenic facility, either in mechanical ultra-cold freezers at −80 °C (experiments 1–3) or liquid nitrogen cooled dewars at −180 °C (experiment 4).
Tissue extraction protocol
The majority of the tissues used in this experiment were stored in ethanol solution. Tissues that had been flash frozen and were not stored in ethanol solution were transferred to a 95% ethanol solution and allowed to thaw to −80 °C such that all tissues were under the same conditions at the time of massing. All tissues were next removed from ethanol and the ethanol was allowed to evaporate for up to 2 min to limit the contribution of ethanol to inferred tissue mass. Each tissue was subsampled with a sterile razor blade until the mass was within 0.5 mg of the target mass as measured by a Mettler Toledo XS105DU analytical balance scale (in eight cases, masses more than 0.5 mg under the target mass were used because there was not adequate tissue remaining for the full amount, see additional details below). Tissues were then placed in a solution of 10 μL protein kinase and 190 μL lysis buffer and incubated at 55 °C for approximately 24 h (several of the larger masses required longer incubation times for complete tissue digestion as determined by the absence of solid tissue pieces in the solution). Tissue solutions were vortexed once at the start of the incubation period for 10 s and one to three times at the end of the incubation period depending on the level of tissue digestion.
The extractions in this experiment were performed using the Promega Maxwell RSC Instrument (Promega Corporation, Madison, WI, USA). The Maxwell RSC uses paramagnetic particles along with magnetic plungers to lyse and capture DNA along with specialized reagents provided in single use cartridges (Kephart et al., 2006). Aside from lysis and transfer to a sterile Eppendorf tube for quantification and storage, the extraction process is entirely automated and occurs inside the instrument. This method was chosen for our experiments for three reasons, and in spite of the fact that the method has relatively high costs both in terms of initial investment in the machine (>$20,000) and for individual extractions (~$8 per cartridge) as of June 26, 2019. First, a recent comparative analysis of commonly used extraction protocols found that the Promega paramagnetic particle method results in particularly high DNA yields, high sample efficacy (measured in the success of PCR), and low error (Schiebelhut et al., 2016). Secondly, this automated extraction method allows for a high degree of uniformity across multiple trials and reduces the human error inherent in manual protocols. Finally, third, the Promega RSC instrument relies on sterile individual use cartridges, a drip-free protocol, and includes an automated UV sterilization of internal components following each extraction, which collectively minimize the potential for contamination.
In our study, we used the Promega blood DNA purification kit (Promega product ID: AS1010). We followed the manufacturer’s procedures (Maxwell(R) RSC Blood DNA Kit technical manual TM419; Promega, Madison, WI, USA) during the extraction except that elution buffer volume was doubled to 100 μL because at lower volumes the quantity of DNA could not be read by a fluorometer as DNA concentrations were too high. After extraction was completed, quantifications were performed using a Promega Quantus fluorometer.
Experiment 1: Testing the effectiveness of the “eyeball” method for obtaining tissues appropriate for extraction
We first conducted a preliminary experiment to determine if coarse visual assessment of tissue mass (i.e., the “eyeball” approach to tissue quantification used by most biodiversity collections staff) is capable of sampling tissues that result in consistent DNA yield which are sufficient for modern downstream DNA sequencing applications. The concentration and amount of DNA required for sequencing depends on the sequencing method used, ranging from less than 10 ng of DNA for Sanger sequencing a single DNA fragment to 500 ng for Illumina Truseq-style library preparation (Hutter et al., 2019) to over 1,000 ng for high coverage sequencing of an entire vertebrate genome via the Illumina platform (Arbor Biosciences, 2019). Because 1,000 ng is at the high end of the amount used for standard sequencing methods applied to typical vertebrate genomes (including whole genome sequencing and popular methods such as RADseq and probe capture), we used this amount as our threshold for establishing extraction success.
For this experiment, two experienced scientists (Drs. Carl Hutter and Shea Lambert) attempted to consistently subsample tissues with a mass considered sufficiently large for DNA extraction based on prior experience. Tissue subsamples obtained in this manner were then weighed prior to extraction and quantification. Although the researchers knew that their subsamples were being massed, they were asked to subsample per their normal procedures and were not given any feedback about the masses of their samples. Following extraction, we tested whether each sample passed our 1,000 ng minimum threshold for successful extraction. We also tested the basic prediction that tissue mass is correlated with DNA yield using a Pearson’s correlation test. Finally, we tested reliability of “eyeball” estimates of tissue mass by estimating variance in both the mass and DNA yield of resulting subsamples.
Experiment 2: Identification of optimal tissue mass for effective and efficient extraction
Our second experiment focused on identifying the optimal tissue masses for DNA extraction, which we define here as the masses that results in high DNA yield per unit tissue mass and high overall DNA yield. For this experiment, we conducted a total of 123 extractions from tissue samples of nine different masses: 1, 2, 4, 8, 10, 12, 14, 16, and 20 mg. This range was chosen because 1 mg was determined to be the smallest mass that could be reliably manipulated by the experimenter and 20 mg was the maximum mass recommended by our extraction protocol. Tissues were assigned to a sample mass if they were within 0.5 mg of the target mass. In eight cases, there was insufficient tissue to subsample the desired tissue mass and the actual subsample mass was therefore more than 0.5 mg outside the targeted masses. In these instances, tissues were placed in the category to which they were closest, and all were less than 1.2 mg from the target mass. Tissue samples for this experiment were 24 liver tissue samples obtained from Malagasy frogs in 2016 which were all from the family Mantellidae and one sample from Ranidae. Each tissue was sampled 4–12 times at various masses depending on the total tissue mass of the original sample. All of the samples used in this experiment were initially preserved in ethanol and stored at room temperature for a period of several weeks and up to 2 months before being transferred to cryogenic storage in either a mechanical ultracold freezer (−80 °C) or a liquid nitrogen cooled dewar (−180 °C). In each extraction run, four tissues each with four subsamples were extracted for a total of 16 extractions. The data was analyzed using a least squares regression to fit a trend line.
Experiment 3: Consistency of extraction yield at an optimal mass
Our third experiment assessed the consistency of extraction yield from tissue subsamples at a sample mass identified in Experiment 2 that results in both high DNA yield per unit mass and high overall DNA yield without involving masses so large as to permit only one or two extractions from small tissue samples. The subsample mass that best met these criteria was 8 mg. Because this experiment required four subsamples of 8 mg from each tissue, large samples such as those from Mantellidae were needed. Six Mantellidae tissues were sampled for a total of 32 subsamples (two tissues were used twice due to a lack of suitable tissues). In each extraction run, four tissues each with four subsamples were extracted for a total of 16 extractions.
Experiment 4: Impact of age on extractions using the optimal mass
The fourth experiment was conducted using 44 historical anuran samples including both ethanol preserved and flash frozen samples. These samples belonged to several different frog families: Bufonidae (three samples), Dendrobatidae (10), Hylidae (17), Leptodactylidae (11), and three from unknown families. These tissues ranged in collection date from 1984 to 2001 and included both liver and muscle tissue. We sampled, extracted, and quantified 8 mg of each tissue using the same procedure as described above. Data was analyzed using a Pearson’s correlation test.
Results
Experiment 1: Testing the effectiveness of the “eyeball” method for obtaining tissues appropriate for extraction
We found that coarse visual estimates of tissue subsamples resulted in a wide range of resulting tissue masses (0.65–14.93 mg). The mean mass was 3.33 mg with a standard deviation of 3.32 mg. All but the smallest of the tissues extracted during this experiment resulted in DNA yields that exceeded our 1,000 ng threshold. We also found that DNA yield is significantly positively correlated with original tissue mass (Pearson correlation test: t = 5.2299, r = 0.7600, df = 20, p-value < 0.001, Fig. 1). Additionally, when the three samples with the greatest mass were removed from the analysis, DNA yield was still found to be significantly positively correlated with tissue mass (Pearson correlation test: t = 2.3112, r = 0.4890, df = 17, p-value = 0.0336, Fig. 1).
Experiment 2: Identification of optimal tissue mass for effective and efficient extraction
In the second experiment, we recovered a non-linear relationship between tissue mass and both concentration and total DNA yield (Fig. 2). The smallest tissue subsamples (1, 2, and 4 mg) yielded a mean of 76.8 ng/µL of DNA. The intermediate tissues (8, 10, and 12 mg) yielded a mean of 123.5 ng/µL of DNA. The largest tissues (14, 16, and 20 mg) yielded a mean of 144.6 ng/µL of DNA. These data were best fit by the natural log equation y = 3,317.2 * ln(x) + 5,030.3 (R2 = 0.29, p-value < 0.0001). The relationship between tissue mass and DNA concentration shows a gradual decrease in the DNA gained per mg of tissue as the total tissue mass increases. While the natural log function does not have an asymptote, it may reach a point where the extra DNA that could be obtained is so little that it is not worth the additional destructive use of limited tissue resources. The intermediate and large tissue masses (8 mg and higher) also tend to result in higher overall DNA yields. Although these masses tend to result in both higher DNA concentrations and higher overall DNA yields, yield per unit mass is greatest for the small tissues, with a mean of 3,444.5 ng DNA/mg tissue, as compared to 1,288.7 ng DNA/mg tissue for intermediate masses and 922.6 ng DNA/mg tissue for large masses. These data were best fit by the natural log equation y = −1,337 * ln(x) + 4,470.3 (R2 = 0.55, p-value < 0.0001).
Experiment 3: Consistency of extraction yield at optimal mass
The third experiment further analyzed the precision of using 8 mg of tissue. We analyzed 28 mantellid tissue samples over 32 extractions. One tissue and its four corresponding subsamples were discarded from this analysis resulting in DNA concentrations that were significantly lower from those for all other tissues (Tukey Honest Significant Differences, p-values 2.07E−7 to 2.96E−2); we suspect that this tissue was degraded and does not contain sufficient quantities of DNA to result in useful yields following standard DNA extraction methods. The mean DNA concentration from samples extracted during this experiment was 133.75 ng/µL with a mean yield of 13,375 ng of DNA. The mean standard deviation of DNA concentration among subsamples of the same tissue was 19.12 ng and the mean range was 41.86 ng.
Experiment 4: Impact of age on extractions using the optimal mass
The fourth experiment tested whether the age of tissue samples impacts the expected relationship between sample mass and DNA yield for 44 archival tissues. The average mass of tissue used in this experiment was 7.86 mg with an average yield of 104.56 ng/µL of DNA. This experiment found no correlation (Pearson correlation: r = −0.06, p-value = 0.6904) between the age of a tissue sample and the concentration of DNA yielded (Fig. 3).
Discussion
The goal of our study was to develop guidelines for sustainable use of tissue samples archived in biodiversity collections that are destructively subsampled for DNA extraction. We found that current tissue sampling methods involving coarse visual assessment of tissue size generally yield sufficient DNA for modern downstream applications. However, the actual yield from samples obtained via the “eyeball” method is highly variable, and, because tissue mass is correlated with DNA yield, massing tissues prior to extraction will increase consistency and efficiency. Intermediate and large tissue masses yielded comparable concentrations of DNA, but small tissue masses had the greatest DNA yield per unit mass. Additionally, sample age was not correlated with DNA yield.
In our first experiment, we showed that the methods currently used by many biodiversity archives, which involve coarse visual estimates of tissue amounts that are considered sufficient for DNA extraction based on prior experience, generally yield more than enough DNA for most modern downstream applications, including whole genome sequencing. However, we also found that tissues subsampled in this manner do not produce consistent amounts of DNA because they encompassed a wide range of masses (0.64–14.93 mg), and DNA yield is strongly correlated with mass. Overall this experiment suggests that use of archived tissue samples would be more efficient if tissues were massed prior to distribution. Of course, this strategy does not come without costs. First, quantification of tissue subsample mass requires a significant additional investment in handling time and access to an expensive analytical balance capable of accurately weighing samples in the 1–20 mg range. As with any increase in handling time, this approach may also result in accelerated degradation of archived samples. However, the benefits of standardization may outweigh these costs, particularly in the case of samples that are only available in limited quantities.
Generally speaking, standardization of tissue masses provided to researchers for extraction will improve the process of intercollection tissue loans because loanees will be sure to receive a quantity of tissue that will result in the required quantity of DNA. The need for an overall standard tissue loan procedure has been previously highlighted (Droege et al., 2014) and we believe that, given the strong correlation between tissue mass and DNA yield, standardization of tissue mass could be one important step in this direction. Given the varying specimens housed in different tissue collections, researchers often require tissue loans from other institutions in order to complete their work. It is expected that these tissues will yield sufficient DNA for experimentation, but often collections do not wish to part with the last pieces of a tissue sample. A survey of 45 institutions with genetic resource holdings revealed that none of the 93% of institutions that offered loans sent loanees the entire tissue sample, and amount of tissue sent varied between institutions (Zimkus & Ford, 2014). For example, 25% of collections reported sending enough tissue for two extractions and 9% sent enough for three extractions, but only 21% of institutions quantified tissue sent (either by volume or mass). The loan procedures posted on the websites of seven major herpetological collections in the United States (Berkeley Museum of Vertebrate Zoology, California Academy of Sciences, Museum of Comparative Zoology at Harvard University, Smithsonian Museum of Natural History, University of Florida, University of Kansas, and University of Texas) revealed that these collections provided detailed and well-defined loan procedures for whole animal specimens, but generally provide little detail on procedures for providing genetic resources. Correspondence with collections managers at these institutions revealed a variety of approaches and techniques for determining the amount of tissue to provide researchers requesting access to genetic resources, including qualitative visual assessment, tissue volume, the minimum tissue required for the proposed project, and approximate mass (C. Huddleston, L. Scheinberg, C. Spencer, B. Zimkus, 2019, personal communications). Standardization of tissue masses would allow loanees to receive a previously agreed upon tissue mass that has been shown to yield appropriate amounts of DNA for their proposed downstream applications, while loaners can improve sustainable use of their tissue collections by only loaning the required amount of tissue.
In our second experiment, we recovered a non-linear increase in DNA concentration and total yield with increasing tissue mass, with the smallest masses resulting in considerably lower concentrations and yields than intermediate or large tissue masses. However, the yield per starting quantity mass of tissue, a measure of how efficiently we are recovering DNA from the original tissue sample, is highest at the smallest masses and declines dramatically with tissue sizes greater than 2 mg. For this reason, the decision about which mass is optimal for extraction will depend on a range of factors including the desired application and the total amount of tissue available. For samples available in only very limited quantities, extractions using only 2 mg of tissue will often be ideal because they generally result in sufficient DNA for most downstream sequencing applications while optimizing efficient use of the available material by maximizing DNA yield per unit tissue used (Fig. 2). In cases where larger initial tissue samples are available, it may be preferable to use somewhat larger tissue masses for extraction because masses of 8 g and larger tend to produce considerably higher DNA concentrations and overall yields than small starting masses. In most cases, a single extraction of a larger tissue that produces somewhat lower yields per unit tissue mass than smaller masses will generally be preferable to repeated extractions of smaller samples due to the significant increases in handling time and other expenses associated with extraction. We recommend subsampling more than 2 mg of tissue when removing samples from biodiversity archives for DNA extraction, depending on the amount of material available. Of course, the optimal tissue mass for DNA extraction will depend on the extraction method being utilized and also the intended downstream applications. For this reason, our results are specific to use of the Promega Maxwell platform. Additional work is required to determine the optimal tissue mass to subsample when other extraction methods are being employed. However, it is likely that all these methods will exhibit increased concentration and yield with tissue masses that are larger than the minimum that can be manipulated.
Our fourth experiment suggests that concentration and yield from samples obtained over a 25-year interval are not significantly correlated with age, reflecting previous findings that extraction quality is not correlated with age (Sawyer et al., 2012; Choi, Lee & Shipunov, 2015). This suggests that the same masses identified as being ideal for extraction of recent samples are also appropriate for historical samples. However, we did not evaluate other important factors influenced by age such as fragmentation, which might have similar yields with increasing age, but higher fragmentation.
We primarily focused on tissue types that are most commonly housed in biodiversity archives and used for extractions. We therefore did not analyze several other sources of genetic material in natural history collections, namely formalin-fixed specimens and tissue samples treated with RNAlater. Previous work has attempted to extract high quality DNA from formalin-fixed specimens with varying results (Hykin, Bi & McGuire, 2015; Jaksch et al., 2016). Because the procedures used for these types of extractions are more involved and less often used, we chose not to include any formalin-fixed tissue subsamples in our study but recommend repeating our study with these specimens once extractions procedures are better developed. Conversely, specimens treated with RNAlater are often deliberately collected fresh from the field for a specific hypothesis. These specimens are often used for RNA-Seq applications to assess variation in gene expression in different tissue types (Wang, Gerstein & Snyder, 2009), but are increasingly used for DNA work as well. These extractions were not included in this study due to the lack of these tissues in the University of Kansas herpetological collection and also because RNA extraction protocols have many more variables to consider (e.g., time to freezing, freezing temperature, amount of RNAlater used, freeze-thaw cycles). Further research is needed to determine if the results of our study also apply to RNAlater treated tissue samples.
Conclusions
Our experiments analyzed current practices in tissue subsampling and DNA extraction in biodiversity collections. We found that extractions using 2–8 mg of tissue were the most efficient and did not recover a strong correlation between DNA yield and tissue age. Two specific recommendations for improving sustainable use of genetic resources in biodiversity archives emerge from our study. Our first recommendation could be achieved with relatively minor adjustments to existing loan procedures while the second would require a dramatic change in how biodiversity archives provide researchers with access to genetic resources.
First, we discussed in detail the potential value of providing researchers with tissue samples of known mass. By standardizing the mass of tissues provided as gifts to researchers, the loaning institution be will be better able to ensure that researchers are provided with sufficient material while also being able to make more informed decisions about how limited resources are destructively sampled.
Our second recommendation derives from our finding that even very small quantities of tissue often produce far more DNA than is required for most applications. For example, we found that tissues subsamples weighing 8 mg tend to yield more than 13 times the amount of DNA that is required even for whole genome shotgun sequencing. In most cases, excess DNA obtained by researchers who receive tissue loans is discarded. Even in cases where institutions are capable of archiving extracted DNA and request return of unused material, this rarely happens in practice because it is very difficult to enforce such requests. As a result, the current practice of providing researchers with even very small tissue samples from permanently archived material for use in individual sequencing projects results in highly non-optimal use of limited archived resources. In the case of the University of Kansas herpetological collections, we are increasingly finding that popular tissue samples have been nearly or completely exhausted after providing multiple prior tissue gifts to researchers. In many cases, these researchers sequenced only one or a few loci via Sanger sequencing, meaning that we provided them with orders of magnitudes more irreplaceable genetic material than was necessary for their work.
One possible solution to this extremely inefficient use of archived resources is to end the practice of providing researchers directly with subsamples of archived tissues and to instead provide researchers with only the amount of extracted DNA that is required for their particular application. For example, in the case of a project involving Sanger sequencing of one or two loci, a biodiversity archive could send the researchers 50–100 ng of extracted DNA instead of a destructively subsampled piece of tissue that is expected to yield 10,000 ng of DNA. Rather than resulting in researchers discarding large quantities of irreplaceable DNA, this practice would lead to archiving this material so that it could then fulfill subsequent requests for genetic material from the same specimen. However, this would require DNA extraction by biodiversity archive staff followed by quantification and provision of the appropriate amount of DNA for the researcher’s required application. It would also require biodiversity collections to develop archival collections of not only tissues, but also extracted genomic DNA.
Although this approach could result in considerably more sustainable use of limited tissue resources, it does not come without substantial costs. First, it would require that staff at biodiversity collections extract and quantify DNA rather than merely sending a tissue sample. In many cases the staff responsible for preparing tissue loans will not have the requisite expertise, access to the necessary laboratory facilities, or time. Second, in-house extraction would require new protocols and facilities for archiving extracted DNA. Whether these costs are worthwhile will depend on the amount of material available and how heavily it is used by the research community. In the case of the University of Kansas herpetological collections, we now provide researchers only with an amount of extracted genomic DNA required for their research because we are finding that a significant number of samples in our archive have been used to the point that little or no tissue remains. We recommend that other biodiversity collections experiencing such over-use consider adopting a similar approach because it will radically improve sustainable use of genetic resources.
Supplemental Information
Tissue sample raw data for all experiments.
This document includes data on each tissue subsample used in the experiment. This information includes the tissue identity, age, tissue type, mass, and DNA yield as well as the experiment/trial in which it was used.
DOI: 10.7717/peerj.8369/supp-1
R code used to run analyses for all experiments.
DOI: 10.7717/peerj.8369/supp-2
Visitors Views Downloads
|
Board Thread:Human Roleplay/@comment-28578706-20161125174502/@comment-30898714-20161127151340
OOC: I MENT BRIAR IM SO SORRY XDDDDDDDDD
''Briar sighed. " I'll protect it. No matter what. "''
''Zioni snorted. " Pfft. "''
|
# frozen_string_literal: true
require 'helpers/parse_time_ranges'
RSpec.describe Helpers::ParseTimeRanges do
context 'with one range' do
let(:input_string) { '(11:00-12:00) - (11:00-11:30)' }
let(:first_ranges) { [[11 * 60, 12 * 60]] }
let(:second_ranges) { [[11 * 60, 11 * 60 + 30]] }
it 'returns parsed values' do
expect(call).to eq([first_ranges, second_ranges])
end
end
context 'with two ranges' do
let(:input_string) { '(9:00-9:30, 10:00-10:30) - (9:15-10:15)' }
let(:first_ranges) { [[9 * 60, 9 * 60 + 30], [10 * 60, 10 * 60 + 30]] }
let(:second_ranges) { [[9 * 60 + 15, 10 * 60 + 15]] }
it 'returns parsed values' do
expect(call).to eq([first_ranges, second_ranges])
end
end
def call
subject.call(input_string)
end
end
|
Mechanism of pseudo buffer pH maintenance
Some solutions, like concentrated HCl, act as buffers even though they lack the buffering component. What is the cause for such a behaviour?
I do wonder what your question is, but I have a hunch. Assuming that what you’re asking is essentially:
In a concentrated $\ce{HCl}$ solution, the $\mathrm{pH}$ will more or less be the same value even if we add external $\ce{H+}$ or $\ce{OH-}$. Why?
Then there are multiple effects at work:
Aquaeous $\ce{HCl}$ contains a buffering component — the $\ce{H3O+}$ (or closest relative) ion. It is the most acidic ion that can be present is solution, so it ‘buffers down’ the acidity of said solution to what the corresponding $\ce{H3O+}$ acidity would be.
$\ce{HCl}$ in itself is an acid, but at the same time $\ce{Cl-}$ is also a (very weak) base. For high total $\ce{HCl}$ concentrations, the ratio of $\ce{HCl}$ to $\ce{Cl-}$ is at equilibrium. If we add additional $\ce{H+}$ to the mixture, we are effectively shifting that equilibrium back to the $\ce{HCl}$ side, reducing the proton count and thus buffering the $\mathrm{pH}$. Therefore also, if we add a proton scavenger (colloquially also known as ‘base’), then that will shift the equilibrium to the $\ce{Cl-}$ side, and as long as there is still enough $\ce{HCl}$ around, the total proton concentration won’t change notably.
In essence, everything that can act as a Brønsted acid or base will buffer at a specific $\mathrm{pH}$ value, even if that $\mathrm{pH}$ value is far from what one would typically call ‘buffered’.
|
[Congressional Record (Bound Edition), Volume 147 (2001), Part 2]
[House]
[Page 2192]
PRAYER
The Chaplain, the Reverend Daniel P. Coughlin, offered the following
prayer:
Lord, God of history and ever-present, You sent Your prophet Isaias
to Your people when they were in need of hope and vision.
May Isaias' prophetic words guide us still. Send Your spirit upon
this Nation and this Congress, that we may be open to hearing Your word
and actively seek the salvation You alone can bring.
Make of us a people of compassion and holiness. In pursuing the
avenues of justice for all, may we be a sign to the community of
nations.
Help us to work toward the complete fulfillment of the deepest human
hopes and Your inspiring promises.
With humility let us embrace our calling; to be truly prophetic, as
Your servants of old, by earnestly fulfilling Your commands now and
forever. Amen.
____________________
|
package model
import (
"fmt"
"os"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/go-pg/pg/orm"
"golang.org/x/crypto/bcrypt"
)
type BaseModel struct {
CreatedAt *time.Time `json:",omitempty"`
UpdatedAt *time.Time `json:",omitempty"`
}
func (bm *BaseModel) BeforeInsert(db orm.DB) error {
now := time.Now()
if bm.CreatedAt == nil {
bm.CreatedAt = &now
}
if bm.UpdatedAt == nil {
bm.UpdatedAt = &now
}
return nil
}
func (bm *BaseModel) BeforeUpdate(db orm.DB) error {
now := time.Now()
bm.UpdatedAt = &now
return nil
}
type JwtToken struct {
Token string `json:"token"`
}
type Exception struct {
Message string `json:"message"`
}
type User struct {
Id int64 `json:",omitempty"`
Username string `sql:",unique,notnull" json:",omitempty"`
Email string `sql:",unique,notnull" json:",omitempty"`
Password string `json:",omitempty"`
PhoneNumber string `json:",omitempty"`
Connections []*Connection `pg:",many2many:user_connections" json:",omitempty"`
Comments []*Comment `json:",omitempty"`
Collections []*Collection `json:",omitempty"`
Resources []*Resource `json:",omitempty"`
BaseModel
}
func (u User) String() string {
return fmt.Sprintf("User<%d %s %s>", u.Id, u.Username, u.Email)
}
func (u *User) BeforeInsert(db orm.DB) error {
if err := u.BaseModel.BeforeInsert(db); err != nil {
return err
}
hashed, error := u.HashPassword()
if error != nil {
return error
}
u.Password = hashed
return nil
}
func (u *User) BeforeUpdate(db orm.DB) error {
if err := u.BaseModel.BeforeUpdate(db); err != nil {
return err
}
hashed, error := u.HashPassword()
if error != nil {
return error
}
u.Password = hashed
return nil
}
// HashPassword hash password before storage to database
func (u *User) HashPassword() (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(u.Password), 10)
return string(bytes), err
}
// CompareHashAndPassword compare stored hash with plain text password
func (u *User) CompareHashAndPassword(password string) bool {
err := bcrypt.CompareHashAndPassword([]byte(u.Password), []byte(password))
return err == nil
}
// GenerateToken generate authorization token
func (u User) GenerateToken() (string, error) {
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
"userId": u.Id,
"username": u.Username,
"email": u.Email,
"phoneNumber": u.PhoneNumber,
"iss": os.Getenv("ISSUER"),
"exp": time.Now().Add(time.Hour * 24).Unix(),
})
HMACSecret := os.Getenv("JWT_SECRET")
tokenString, error := token.SignedString([]byte(HMACSecret))
if error != nil {
return "", error
}
return tokenString, nil
}
type Connection struct {
Id int64 `json:",omitempty"`
InitiatorId int64 `sql:"unique:connected_users" json:",omitempty"`
RecipientId int64 `sql:"unique:connected_users" json:",omitempty"`
Recipient *User `json:",omitempty"`
Initiator *User `json:",omitempty"`
Users []User `pg:",many2many:user_connections" json:",omitempty"`
BaseModel
}
type Message struct {
Id int64
Content string
Connection *Connection
BaseModel
}
func (m Message) String() string {
return fmt.Sprintf("Message<%d %s>", m.Id, m.Content)
}
type Comment struct {
Id int64
UserId int64 `sql:",notnull"`
ResourceId int64 `sql:",notnull"`
Text string `sql:",notnull"`
Likes int64 `sql:",notnull"`
Resource *Resource `json:",omitempty"`
BaseModel
}
func (c Comment) String() string {
return fmt.Sprintf("Comment<%d %s %d>", c.Id, c.Text, c.UserId)
}
type Resource struct {
Id int64
UserId int64 `sql:",notnull" json:",omitempty"`
Title string `sql:",notnull" json:",omitempty"`
Link string `sql:",unique,notnull" json:",omitempty"`
Privacy string `sql:",notnull" json:",omitempty"`
Type string `sql:",notnull" json:",omitempty"`
Views int64 `json:",omitempty"`
Recommendations int64 `json:",omitempty"`
User *User `json:",omitempty"`
Comments []*Comment `json:",omitempty"`
Tags []*Tag `pg:",many2many:resource_tags" json:",omitempty"`
BaseModel
}
func (r Resource) String() string {
return fmt.Sprintf("Resource<%d %s %s>", r.Id, r.Title, r.Link)
}
type Collection struct {
Id int64
Name string `sql:",unique,notnull"`
UserId int64
Resources []*Resource
Tags []Tag `pg:",many2many:collection_tags"`
BaseModel
}
func (c Collection) String() string {
return fmt.Sprintf("Collection<%d %s>", c.Id, c.Name)
}
type Tag struct {
Id int64
Title string `sql:",unique,notnull"`
}
func (t Tag) String() string {
return fmt.Sprintf("Tag<%d %s>", t.Id, t.Title)
}
type ResourceTag struct {
TagId int64 `sql:",pk"`
ResourceId int64 `sql:",pk"`
}
type CollectionTag struct {
TagId int64 `sql:",pk"`
CollectionId int64 `sql:",pk"`
}
type UserConnection struct {
UserId int64 `sql:",pk"`
ConnectionId int64 `sql:",pk"`
}
type Recommendation struct {
ResourceId int64 `sql:",pk"`
UserId int64 `sql:",pk"`
}
type ResourceCollection struct {
ResourceId int64 `sql:",pk"`
CollectionId int64 `sql:",pk"`
}
|
User blog:Henry Ivth/Inkwell Story Contest Entry
Animal Jam related prizes.
I was told to write a story, I thought about writing the story of my life, but then I realised my lights may turn out in a few years or so. So I wanted to write my heart out before I go down, or maybe up, who knows, but no, it has to be about AJ. I was thinking of writing a story about my obsessive lemur, myself currently, but I don't like to write in first person, or I feel like the devil, and maybe I am. I will pour my mind, and blood, on to this screen whilst I can. I can't trust myself, I NEED rares.
So, if you want to get hooked up, my text number is 666.
Donate to me for blood, and trust me, you WILL NOT think twice.
|
Plural input electron beam parametric amplifier
Oct. 12, 1965 w. M. SACKINGER 3,212,017
PLURAL INPUT ELECTRON BEAM PARAMETRIC AMPLIFIER Filed Feb. 18, 1963 Output a INVENTOR.
BY I Wag? United States Patent 3,212,017 PLURAL INPUT ELECTRON BEAM PARAMETRIC AMPLlFlER William M. Sacldnger, Millport, N.Y., assignor to Zenith Radio Corporation, Chicago, llll., a corporation of Delaware Filed Feb. 18, 1963, Ser. No. 259,256 4 Ql aims. (Cl. 330-47) The present invention is directed to an electron beam parametric amplifier and concerns in particular an amplifier of the type having a plurality of signal inputs. The invention is a further development of the parametric amplifier described and claimed in Patent No. 3,059,138 issued to Glen Wade on October 16, 1962, and assigned to the same assignee as the present invention.
In a parametric amplifier of the type here disclosed, the electron beam is projected along a predetermined path which terminates in a collector for the beam. The electrons in the beam, when subjected to the restoring force of a focusing field, oscillate about their respective rest positions at a frequency referred to as the transverse resonant frequency or, for the usual case wherein the focusing results from a magnetic field, at the cyclotron frequency. The electron motion in a beam for which electron resonance has been established may be modified in response to an applied signal to effect modulation of the beam by that signal. Accordingly, it is the usual practice to position an input coupler along the beam path adjacent the electron source for the purpose of modulating the beam with a signal to be amplified.
Amplification is accomplished by expanding the electron motion representing the signal conveyed by the beam and this expansion is achieved by subjecting the signal modulated beam to a non-homogeneous pumping field. The energy required for the amplification is delivered by a pump signal source which is connected to an appropriately-shaped electrode structure adjacent to the beam. For the quadrupole-type amplifier, the pumping field is created by a symmetrical quadrupole electrode structure as described in the afore-identified Wade patent.
After amplification in the modulation expander, the signal carried by the beam is extracted by means of an output coupler which, in the usual case, is the same type of structure which has been employed for modulating the beam with the signal to be amplified. In other words, the usual signal coupler has bi-directional properties; it is employed ahead of the modulation expander for the purpose of impressing a signal on the beam and is utilized after the expander to demodulate or extract the amplified signal from the beam.
Thus, as generally constructed, the quadrupole-type parametric amplifier has three distinct components positioned along the beam path, these being the input modulator or input coupler, the modulation expander, and the output demodulator or output coupler, arranged in the recited order. The above-identified Wade patent specifically discloses a combination of modulation expander and output coupler. This combined structure may be termed a pumpler. The improvement of this invention features a modification of the pumpler which makes possible a plural input amplifier.
Moreover, it is desired effectively to cascade amplification and input stages, to allow for a plurality of different input signals. However inconsistent results would be obtained with present techniques because the first signal would be amplified more than the latter applied signal.
Accordingly, it is a general object of the present invention to provide an improved plural input electron beam parametric amplifier.
A primary aim of the invention is to enable cascaded 3,212,017 Patented Oct. 12, 1965 parametic amplification of a plurality of signals, each being given the same gain.
It is another object of the invention to provide a plural input amplifying system having improved phase stability, unidirectional properties, and improved performance with low complexity.
An electron beam parametric amplifier constructed in accordance with the invention comprises means for producing a magnetic field along a predetermined axis and for projecting an electron beam along the same axis through a region within the magnetic field. Input signal coupling means is provided for producing on the electron beam a cyclotron rotational motion corresponding to a first signal from a first signal source. The rotational motion has a predetermined cyclotron frequency and a predetermined exit radius. A combined signal coupling and modulation expanding device is disposed astride the beam downstream from the input coupling means and produces a rotating quadrupolar field component. The device presents a load conductance, to those electrons of the beam which are rotating substantially in phase with the purely tangential component of the rotating quadrupolar field, of a value to maintain the cyclotron rotational motion of such electrons at the predetermined exit radius. A second signal from a second signal source is concurrently coupled to the beam in the form of additional cyclotron motion which is superimposed on the cyclotron rotational motion corresponding to the first signal, the superimposed second signal being concurrently amplified exponentially by the combined device. A third coupler is provided for detecting the combined first and second signals.
The invention may also be expressed from another aspect as apparatus for amplifying electric waves according to the parametric principle. The apparatus comprises means for projecting an electron beam along a predetermined path and for imparting to electrons in the beam a first transverse motion representative of a first intelligence signal. A second transverse motion representative of a second intelligence signal is also imparted to the electrons in the beam. These electrons are subjected to a restoring-force field which produces periodic electron motion corresponding to the first and second signal and to the field. Means are also provided for adding, concurrently with the impartation of the second motion, a transverse periodic non-homogeneous restoring force field component to the restoring-force field, the component having a phase relationship with the above motions to deliver energy to components thereof in linear proportion to the amplitude of the components. Concurrently with the impartation of the second motion, energy, which was delivered to the first motion component, is extracted and dissipated in a resistive load. Finally, an output signal is derived from the resulting electron motions.
The features of the present invention which are believed to be novel are set forth with particularity in the appended claims. The invention, together with further objects and advantages thereof, may best be understood by reference to the following description taken in connection with the accompanying drawing in which the single figure isa schematic representation of a parametric amplifier embodying the present invention.
The theory of operation and structural details of a quadrupole type parametric amplifier are fully described in co pending application Serial No. 747,764, now abandoned, filed July 10, 1958, in the name of Glen Wade and assigned to the assignee of the present invention, and also are described and claimed in co pending application Serial No. 289,792 filed June 20, 1963, in the same name and assigned the same, the former having been abandoned expressly in favor of the latter continuation. Such devices by now have been in public use and are well understood by persons in this art. The function of the quadrupole field in achieving amplification is utilized by the modulation expander to be described herein. Moreover, the representative structural arrangement of a quadrupole type parametric amplifier disclosed in the earlier case are generally applicable in constructing physical embodiments of the device to be described. Accordingly, the representations of the figure annexed hereto are schematic and the structural description as well as the mode of operation will be discussed in much less detail in this text than in the Wade application.
Referring now more particularly to the single figure, the amplifying system there represented comprises an electron source for projecting an electron beam along a predetermined path designated by construction line 11, 11. The electron source may be entirely conventional and preferably includes the usual cathode together with suitable accelerating electrodes for developing a well defined beam or stream of electrons. Path 11 terminates on electron collector or anode 13 disposed transversely of the path.
Means for imparting to electrons in beam 11, 11 a transverse motion representative of a first intelligence signal, f include a pair of Cuccia coupler plates and 21. The first intelligence signal is fed to the Cuccia coupler from a signal source 22. A longitudinal magnetic field, designated by an arrow labeled H, is produced along beam path 11, 11 by a solenoid (not shown). This magnetic field produces a restoring-force field which in conjunction with a signal from source 22 produces cyclotron rotational motion of the electrons. The cyclotron frequency of this motion is determined by the strength of the magnetic field H and the motion radius is determined predominantly by the magnitude of intelligence signal f Upon leaving coupler 20, 21, each individual orbiting electron retains its exit radius throughout the drift space between the coupler and the next component.
Following input coupler 20, 21 is a single structure 59 serving as a combined signal coupler and modulation expander, which may be termed a pumpler. It comprises four inwardly curved plates 31-34 disposed coaxially of beam path 11 arranged in two mutually perpendicular pairs 31, 33 and 32, 34. This structure is likewise positioned Within the magnetic field H.
The above structure performs the function of modula tion expansion by producing a transverse periodic nonhomogeneous restoring-force field component which is algebraically additive to magnetic field H. The quadrupole field is produced by energy from a pump signal source 35 of a frequency f As shown, one side of source 35 is coupled to plate 31 and, through a series resonant circuit 38, to plate 33. The other side of source 35 is coupled, through series resonant circuit 38, in parallel to plates 32 and 34. Series resonant circuits 38 and 38 exhibit a minimum impedance at the pump frequency f With a quadrupole field, the maximum tangential accelerating force is exerted on an electron when it is in phase with the maximum gain axis. With this phase relationship, termed the r+ mode, maximum exponential expansion of the electron orbit takes place. Energy is thereby delivered to the orbiting electron by means of an increase in its radius of rotation and in linear proportion to its initial radius. On the other hand, the r mode denotes the phase relationship between the rotating electron and the quadrupole or pump field in which a maximum tangential decelerating force acts on an orbiting electron to exponentially decrease its radius. This is more fully discussed in the Wade application.
A signal source 39 of a frequency f is also coupled across plates 31, 33. The load conductance seen by plates 31, 33 is represented as a lumped conductance designated G which is placed in parallel with signal source 39. At the frequency f plates 32 and 34 are isolated from conductance G by the open circuit condition of circuit 38.
Similarly, network 38 represents an open circuit at frequency f Following the combined signal coupler and modulation expander 30 is a third coupler composed of plates 40 and 41 which remove or detect the combined first and second signals f and f which are transmitted to an output 42.
Dashed lines 5%) indicate that several additional pumplers Q would normally be used, as will be explained below.
Operation The novelty of the present invention resides in the pumpler structure 31 and, more specifically, in the adjustment of the value of load conductance G. From a broad standpoint, the purpose of the structure is to provide a parametric amplifier with plural inputs and to in sure that, while one signal such as f is being added to the beam, a signal such as f which was previously placed on the electron beam is not affected by the addition of the second signal or by its amplification. From an ideal standpoint, signal f should be unaffected as to gain by its passage through device Q. If device Q acted only as a quadrupole, signal f would be amplified by the modulation expansion action of the quadrupole. If, on the other hand, device 0 acted only as a coupler loaded by a conductance G, signal f would be attenuated. In accordance with the present invention, the conductance G and the intensity of the quadrupole pump field are balanced so that the amplitude of signal f remains unchanged.
To illustrate, a value of G will be computed for the one specific case in which an electron in beam path 11 is rotating in phase with the maximum gain axis of the quadrupole field of the pump structure. The two axes of maximum gain and maximum attenuation, as discussed above, may be designated with superscripts of plus and minus respectively. For the special case to be described the plus superscript will be emphasized.
The following is a mathematical derivation of that value of load conductance G of pumpler structure a which is necessary to meet the specific requirements set out above.
In Cuccia coupler structure 31, 33, the radius r of an electron orbit varies as a function of Z, the distance along the beam path, in accordance with where K is a constant. In a quadrupolar field the variation of r and Z is given by the solution to where A is a constant determined from boundary conditions. A boundary condition at the entrance to composite structure Q, where Z is equal to 0, is
where r,, is, in this instance, the exit radius from the input coupler. Substitu ting this boundary condition into Equation 4 gives where by definition Substitu ting Equation 5 into Equation 4, (6) r*=(r,,*+C*)e* +C'* For preferred operation in which we are dealing only with the maximum tangential amplification axis, only the upper sign applies, and we wish to specify that T r for all values of Z. From (6), this is only possible when The current induced in the coupler circuit which is connected to plates 31, 33 is, from Ramos theorem, proportional to the integral of the beam envelope. For both and modes,
where L is the length of the coupler and K is a constant. Substitu ting Equation 6 into 8 and integrating yields L x L mz mz But, it is well known, from simple Cuccia coupler analysis, that the beam deflection in a Cuccia coupler is proportional to the voltage; that is,
( 12 K A V,
where V is the terminal voltage of the coupler. tu ting (12) into (11) gives Substiand for an ordinary Cuccia coupler it can easily be proved that where I is beam current and V is beam voltage. Substitu ting Equation 14 into 13 we get I0 11 1 (15) 4Vd a rearranging,
ii E (16) 7.1V. d aL In summary, a conductance of the above value as expressed in Equation 16 will present a predetermined resistive load to those electrons of the beam which are essentially in phase with the purely tangential accelerating component, or maximum gain component, of the rotating quadropolar field, such that the cyclotron rotational motion due to the signal f is maintained at its initial entrance radius r throughout the pumpler structure Q.
Concurrently with the maintenance of motion due to h, signal f from source 39 is coupled to beam 11 in the form of additional cyclotron motion which is superimposed on the cyclotron motion corresponding to h. This added motion is amplified exponentially by the quadrupolar field in accordance with Equation 6 above, where 6 the initial radius r (corresponding to initial beam motion from f is zero.
As is seen from Equation 6, the component of electron rotational motion due to source f will start at zero radius at the pumpler entrance plane and will increase exponentially with distance through the pumpler. This beam motion will be linearly superimposed upon that motion due to h, previously specified. Since the external load conductance G (which is the output conductance of source 1}) is fixed by the previous requirement, the source f will not be matched to the equivalent conductance presented by the beam to the signal f Nevertheless, suflicient signal f will be transferred to the beam and will be exponentially amplified.
According to the special case of the above derivation, the predetermined load conductance will only maintain the cyclotron motion of those electrons which are in the r+ mode, which is the axis of maximum tangential acceleration of the quadrupolar field. In the case of electrons in the opposite or r mode, which is the axis of maximum tangential deceleration, it can easily be shown in accordance with the above derivation that with an rxL value of the order of three, a typical value, the orbital radius of these electrons will be reduced to an insignificant amount so as not to cause any undesirable side effects. Those electrons whose orbital motion have a phase intermediate the two extreme modes discussed above will be affected in a manner intermediate the two extreme orbits analyzed above.
From a practical point of view, the present invention finds a very useful application in a phased array radar system. In such a system a plurality of radar antennas each receive separate information signals, each in turn having a specific phase. Each signal is then amplified and sent to a common point, the signals then being combined to provide the proper informational output. The relative phases of the signals at the antennas vary with the angle between the antenna array and the returning electromagnetic wave, and position information is derived by comparison of the phases of these signals. Each signal from an antenna must therefore be amplified a predetermined amount which is substantially identical to the amplification of every other input signal, and each amplifying channel must have identical phase characteristics in order that when the signals are combined the resultant signal will be an accurate representation of the phase relation of the signals at the antennas. Only then will the direction of the returning radar signal be determinable.
The present invention can be used to particular advantage in such an application as an amplifying and combining device. Each signal channel can be attached to a pumpler structure, and its phase and amplitude information will be amplified and superimposed on the beam which is carrying all the different signal inputs previously put onto the beam in the same fashion. In passing through a subsequent pumpler, this, composite information is not alfected, because of the unique choice of load conductance. Equal amplification in each channel is readily achieved by equal pumping strength, equal lengths, and. equal load conductances for each pumpler. Equal phase characteristics for each channel are assured by the use of a common magnetic field, beam, and pump frequency.
The dashed lines 50 shown in the drawing indicate the subsequent pumpler structures which would be needed with a phased array radar system with many antennas. In such a system, the initial Cuccia coupler 20, 21 would be used only in conjunction with a modulation expander in order to amplify the signal f the same amount as the signal f A plural input parametric amplifier as described above provides improved system phase stability since all inputs are both combined and amplified in a single structure. Furthermore, since an electron beam device of this type is essentially unidirectional, system stability and flexibility is enhanced.
While a particular embodiment of the invention has been shown and described, it will be obvious to those skilled in the art that changes and modifications may be made without departing from the invention in its broader aspects, and, therefore, the aim in the appended claims is to cover all such changes and modifications as fall within the true spirit and scope of the invention.
I claim:
1. Apparatus for amplifying electron waves according to the parametric principle comprising:
means for projecting an electron beam along a predetermined path;
means for imparting to electrons in said beam a first transverse motion representative of a first intelligence signal;
means for imparting to electrons in said beam a second transverse motion representative of a second intelligence signal;
means for subjecting said electrons to a restoring-force field producing periodic electron motion corresponding to said signals and to said field;
means for adding, concurrent-1y with impartation of said second motion, to said restoring-force field a transverse periodic nonhomogeneous restoring-force field component having a phase relationship with said motions to deliver energy to components thereof in linear proportion to the amplitude of said components;
means for extracting, concurrently with impartation of said second motion, said energy delivered to the component of said first motion and dissipating said energy in a resistive load;
and means for deriving an output signal from the resulting electron motions.
2. Apparatus for amplifying electron waves according to the parametric principle comprising:
means for projecting an electron beam along a predetermined pat-h;
means for imparting to electrons in said beam a first transverse motion representative of a first intelligence signal;
means for imparting to electrons in said beam a second transverse motion representative of a second intelligence signal;
means for subjecting said electrons to a restoring-force field producing periodic electron motion corresponding to said signals and to said field;
means for adding, concurrently with impartation of said second motion, to said restoring-force field a transverse periodic nonhomogeneous restoring-force field component having a hase relationship with said motions to deliver energy to components thereof in linear proportion to the amplitude of said components;
means for extracting, concurrently with impartation of said second motion, at least a portion of said energy delivered to the component of said first motion and dissipating said energy in a resistive load;
and means for deriving an output signal from the resulting electron motions.
3. A plural input electron beam parametric amplifier comprising:
means for producing a magnetic field along a predetermined axis;
means for projecting an electron beam along said predetermined axis through a region within said magnetic field;
input signal coupling means for producing on said electron beam cyclotron rotational motion corresponding to a first signal from a first signal source, said rotational motion having a predetermined cyclotron frequency and a predetermined exit radius;
a combined signal coupling and modulation expanding device disposed astride said beam and producing a rotating quadrupolar field component, said device presenting a load conductance, to those electrons of said beam which are substantially in phase with the purely tangential component of said rotating quadrupolar field, which has a value to maintain said cyclotron rotational motion of such electrons at said exit radius and concurrently coupling a second signal from a second signal source to said beam in the form of additional cyclotron motion which is superimposed on said cyclotron rotational motion corresponding to said first signal, said superimposed second signal being concurrently amplified exponentially by said combined device;
and a third coupler for detecting said combined first and second signals.
4. A plural input electron beam parametric amplifier comprising:
means for producing a magnetic field along a predetermined axis;
means for projecting an electron beam along said predetermined axis through a region within said magnetic field;
a first combined signal coupling and modulation expanding device disposed astride said beam for producing on said electron beam cyclotron rotational motion corresponding to a first signal from a first signal source, and for exponentially expanding said motion, said motion having a predetermined cyclotron frequency and a predetermined exit radius;
a second combined signal coupling and modulation expanding device disposed astride said beam and producing a rotating quadrupolar field component, said device presenting a load conductance, to those electrons of said beam which are substantially in phase with the purely tangential component of said rotating quadrupolar field, which has a value to maintain said cyclotron rotational motion of such electrons at said exit radius, and concurrently coupling a second signal from a second signal source to said beam in the form of additional cyclotron motion which is superimposed on said cyclotron rotational motion corresponding to said first signal, said superimposed second signal being concurrently amplified exponentially by said combined device;
and a third coupler for detecting said combined first and second signals.
No references cited.
ROY LAKE, Primary Examiner.
1. APPARATUS FOR AMPLIFYING ELECTRON WAVES ACCORDING TO THE PARAMETRIC PRINCIPLE COMPRISING: MEANS FOR PROJECTING AN ELECTRON BEAM ALONG A PREDETERMINED PATH; MEANS FOR IMPARTING TO ELECTRONS INS SAID BEAM A FIRST TRANSVERSE MOTION REPRESENTATIVE OF A FIRST INTELLIGENCE SIGNAL; MEANS FOR IMPARTING TO ELECTRONS IN SAID BEAM A SECOND TRANVERSE MOTION REPRESENTATIVE OF A SECOND INTELLIGENCE SIGNAL; MEANS FOR SUBJECTING SAID ELECTRONS TO A RESTORING-FORCE FIELD PRODUCING PERIODIC ELECTRON MOTION CORRESPONDING TO SAID SIGNALS AND TO SAID FIELD; MEANS FOR ADDING, CONCURRENTLY WITH IMPARTATION OF SAID SECOND MOTION, TO SAID RESTORING-FORCE FIELD A TRANSVERSE PERIODIC NONHOMOGENEOUS RESTORING-FORCE FIELD COMPONENT HAVING A PHASE RELATIONSHIP WITH SAID MOTIONS TO DELIVER ENERGY TO COMPONENTS THEREOF IN LINEAR PROPORTION TO THE AMPLITUDE OF SAID COMPONENTS; MEANS FOR EXTRACTING, CONCURRENTLY WITH IMPARTATION OF SAID SECOND MOTION, SAID ENERGY DELIVERED TO THE COMPONENT OF SAID FIRST MOTION AND DISSIPATING SAID ENERGY IN A RESISTIVE LOAD; AND MEANS FOR DERIVING AN OUTPUT SIGNAL FRM THE RESULTING ELECTRON MOTIONS.
|
1. Introduction {#sec1-nutrients-08-00697}
An ancient Hindu fable tells of six learned blind men who approach an elephant. All are highly esteemed, but all are blind. The first blind man approaches the elephant and happens to bump up against its broad and sturdy side and declares "the elephant is very like a wall!" The second blind man feels the tusk and cries an elephant is "very much like a spear!" The third happens to grab the elephant's squirming trunk in his hands and boldly declares the elephant is "very like a snake!" The fourth blind man palpates the leg of the elephant and declares "it is clear the elephant is very like a tree!" The fifth blind man who happens to touch the elephant's ear declares "even the blindest man can tell that the elephant is very like a fan". The sixth blind man happens to grasp the swinging tail and declares to his comrades the elephant is "very like a rope!"
What then ensues is a long, passionate argument filled with heated dispute amongst these learned men which gets them nowhere. Although each is partly right, none of them has seen the whole picture (while learned, they are blind, after all!). This fable has been utilized in many different eras and many different cultures to recount arguments in areas as diverse as theology and politics. It illustrates the inaccuracy of seeing only a part of a subject and assuming that it is the whole. It is a cautionary tale that even learned men can sometimes be misled by their preconceived notions or only seeing a portion of the whole.
In the complex world of nutrition and particularly in the study of how the foods we eat relate to such chronic conditions as obesity, diabetes and cardiovascular disease (CVD), we are somewhat like the six blind men. Each of us sees a part of the complex puzzle and may assure our colleagues that, in fact, we have solved the entire riddle for how nutrition relates to various disease processes.
The scientific and medical communities have gone down the road of speculating cause and effect without conclusive evidence many times. We blamed salt consumption for contributing to hypertension \[[@B1-nutrients-08-00697]\], yet recent evidence suggests that this relationship is far more complex \[[@B2-nutrients-08-00697],[@B3-nutrients-08-00697]\]. We blamed dietary cholesterol for contributing to heart disease and warned a generation of Americans to avoid eating egg yolks, although that advice has subsequently been found to lack scientific justification \[[@B4-nutrients-08-00697]\].
The latest bête noire in nutrition is sweeteners, whether they be nutritive sweeteners, in general, and fructose containing sugars, in particular, or non-nutritive sweeteners (NNS). With the issue of sweeteners, the scientific community faces the problem of trying to offer advice without seeing the totality of the picture, much like the blind men approaching the elephant. It is time to pause and try to see the entire elephant.
This article is based on a symposium conducted at the Experimental Biology Meeting in March 2015, entitled "Sweeteners and Health: Current understandings, controversies, recent research findings and directions for future research". It is our hope that by providing a broad approach to high level evidence related to nutritive sweeteners, we can begin to get a clearer picture of the entire "elephant" about sweeteners and health rather than concluding that the health effects are due to a single component.
Added sugars are among the most controversial and hotly debated topics in all of nutrition \[[@B5-nutrients-08-00697],[@B6-nutrients-08-00697],[@B7-nutrients-08-00697],[@B8-nutrients-08-00697],[@B9-nutrients-08-00697],[@B10-nutrients-08-00697],[@B11-nutrients-08-00697],[@B12-nutrients-08-00697],[@B13-nutrients-08-00697],[@B14-nutrients-08-00697],[@B15-nutrients-08-00697],[@B16-nutrients-08-00697],[@B17-nutrients-08-00697],[@B18-nutrients-08-00697],[@B19-nutrients-08-00697],[@B20-nutrients-08-00697],[@B21-nutrients-08-00697],[@B22-nutrients-08-00697]\]. Consumption of added sugars has been associated with increased risk of obesity \[[@B23-nutrients-08-00697],[@B24-nutrients-08-00697],[@B25-nutrients-08-00697]\] as well as increased risk factors for cardiovascular disease (CVD) \[[@B26-nutrients-08-00697]\], including dyslipidemia \[[@B27-nutrients-08-00697],[@B28-nutrients-08-00697]\], elevated blood pressure \[[@B20-nutrients-08-00697],[@B29-nutrients-08-00697],[@B30-nutrients-08-00697]\], diabetes \[[@B21-nutrients-08-00697],[@B31-nutrients-08-00697],[@B32-nutrients-08-00697]\], non-alcoholic fatty liver disease \[[@B33-nutrients-08-00697],[@B34-nutrients-08-00697]\], and even cognitive decline \[[@B35-nutrients-08-00697]\] and cancer \[[@B36-nutrients-08-00697],[@B37-nutrients-08-00697]\]. Data to support these assertions, however, have been challenged consistently. Often these assertions have been based on research trials which provide added sugars in dosages well above those typically found in human consumption (supraphysiological) \[[@B12-nutrients-08-00697]\]. Studies comparing pure fructose to pure glucose, neither which is consumed to any appreciable degree in the human diet, have also been extrapolated to human nutrition \[[@B38-nutrients-08-00697],[@B39-nutrients-08-00697]\]. Although, some trials have compared sucrose to glucose or starch in isocaloric exchange and demonstrated harm with regard to sucrose in insulin/glucose markers and prediabetes/diabetes. Speculation about chronic conditions based on acute data has frequently been employed \[[@B40-nutrients-08-00697]\]. Theoretical models, epidemiologic studies which do not establish cause and effect \[[@B31-nutrients-08-00697],[@B32-nutrients-08-00697],[@B41-nutrients-08-00697]\] or data from animal models which can translate poorly to humans particularly in the areas of nutrition, metabolism, and behavior have further clouded the debate \[[@B42-nutrients-08-00697],[@B43-nutrients-08-00697],[@B44-nutrients-08-00697],[@B45-nutrients-08-00697]\]. Further controversy has arisen from failure by investigators to clearly acknowledge the limitations of their studies, and misinterpretation or overly simplistic interpretations by media or failure to acknowledge the totality of the evidence often for political reasons or recognition.
A vast amount of literature has been generated, particularly over the past decade, exploring potential linkages between added sugars and various health related conditions. The purpose of this review is to survey some of the modern science, particularly from high quality research trials such as randomized controlled trials, systematic reviews and meta-analyses, in an attempt to provide some clarity in this controversial area. Literature reviews in this manuscript were drawn from articles cited in the World Health Organization report commissioned by Te Morenga et al. \[[@B46-nutrients-08-00697]\], articles included in meta-analyses and systematic reviews utilized by the Scientific Advisory Committee on Nutrition (SACN) \[[@B47-nutrients-08-00697]\], references utilized by the Dietary Guidelines for Americans 2015--2020 \[[@B48-nutrients-08-00697]\], the American Heart Association statement on Carbohydrates and Cardiovascular Disease Risk \[[@B49-nutrients-08-00697]\] and randomized controlled trials conducted in the research laboratory of the two authors.
2. Levels of Evidence {#sec2-nutrients-08-00697}
Any discussion of health consequences related to added sugars and NNSs must take into account levels of evidence. According to guidelines published both in the United Kingdom and by the US Department of Agriculture (as depicted in [Figure 1](#nutrients-08-00697-f001){ref-type="fig"}), the evidence that has the least likelihood of bias is systematic reviews and meta-analyses of randomized controlled trials (RCTs) followed by randomized controlled trials \[[@B50-nutrients-08-00697]\]. It should be noted, however, that randomized controlled trials are difficult to apply in the area of nutrition because of the complexity of the field and potential for confounding. Cohort studies (see [Table 1](#nutrients-08-00697-t001){ref-type="table"}) and cross-sectional studies are more prone to bias because of confounding factors that cannot be controlled with this study design. Expert opinion is considered prone to bias as are ecological studies \[[@B50-nutrients-08-00697]\].
3. Controversies Related to Metabolism of Fructose Containing Sugars {#sec3-nutrients-08-00697}
Many of the controversies related to fructose related sugars are based on the well-known differences between metabolism of fructose and glucose in the liver \[[@B62-nutrients-08-00697]\]. Over 90% of fructose ingested is absorbed through the small intestine and metabolized in the liver on first pass. In contrast, glucose is metabolized by a variety of organs. It is important to note, however, that the pathways are interactive. Numerous studies including isotope studies have shown that roughly 50% of fructose is converted to glucose within the liver. An additional 15%--20% is converted to glycogen, 20%--25% to lactate, and a few percent to carbon dioxide \[[@B62-nutrients-08-00697],[@B63-nutrients-08-00697]\]. Multiple studies have shown that only 1%--5% of consumed fructose may follow the pathway of de novo lipogenesis and be converted into free fatty acids which are then packaged as triglycerides and either stored in the liver or released in the bloodstream \[[@B62-nutrients-08-00697],[@B64-nutrients-08-00697],[@B65-nutrients-08-00697]\]. Some short-term data with very large doses of pure fructose have suggested that increases in liver fat can be achieved over a short period of time; Faeh et al. gave seven healthy men six days of a high fructose diet comprising an extra 25% of total calories and demonstrated suppression of adipose tissue lipolysis \[[@B66-nutrients-08-00697]\].
Schwarz et al. utilizing a diet with 25% pure fructose demonstrated increased fractional hepatic DNL and liver fat \[[@B67-nutrients-08-00697]\]. Schwarz et al. studied 25 Latino children and 15 African American children and demonstrated over a ten-day period that replacing high fructose products with vegetables, bread or pasta demonstrated decreased liver fat in this population \[[@B68-nutrients-08-00697]\].
In certain animals, de novo lipogenesis can be a major pathway \[[@B69-nutrients-08-00697]\]. In humans, it is minimal. Some investigators have misinterpreted the effect of this pathway in humans to contend that fructose consumption can result in increased risk of non-alcoholic fatty liver disease (NAFLD) and insulin resistance \[[@B15-nutrients-08-00697]\].
The modern challenge to fructose, in retrospect, came from an opinion piece published in 2004 in the American Journal of Clinical Nutrition by Bray, Nielson and Popkin which asserted that "the increase in consumption of HFCS has a temporal relation to the epidemic of obesity, and the overconsumption of HFCS in calorically sweetened beverages may play a role in the epidemic of obesity" \[[@B8-nutrients-08-00697]\]. The authors were careful to point out that this temporal association did not establish cause and effect. It was widely misinterpreted by other scientists and the public at large to suggest that there was something unique about HFCS related to obesity. Subsequent research has shown that HFCS and sucrose have indistinguishable metabolic effects and health consequences in human beings \[[@B70-nutrients-08-00697],[@B71-nutrients-08-00697],[@B72-nutrients-08-00697]\].
It is also worth noting that sugar consumption has declined significantly in the United States, Britain, Canada, and Australia at a time when obesity rates have continued to rise. This was first reported in Australia and has become known as the "Australian Paradox" \[[@B73-nutrients-08-00697]\]. Furthermore, Mozaffarian et al. reported the impact of increased servings of different food and weight change over a four-year interval by combining Nurses' Health Study (NHSI) (1986--2006), NHSII (1991--2003), and the Health Professionals Follow-up Study (1986--2006) for a combined cohort of a 120,877 people. After multivariable-adjustment for age, Body Mass Index (BMI), sleep, physical activity, alcohol, television watching, smoking and all other dietary factors (French fries, potato chips, processed meat and red meats) all resulted in more weight gain over each four year period than did sugar sweetened beverages (SSB) \[[@B74-nutrients-08-00697]\]. These data should be treated with some caution since they come from cohort studies and do not represent a randomized controlled trial. It may be that all of these food products are simply markers for an overall diet that is energy dense and that it is the overall diet pattern, and not any individual component of it, that is associated with weight gain.
4. Effects of Sugars on Body Weight and Body Composition {#sec4-nutrients-08-00697}
It has been argued that consumption of sugars may predispose individuals to increase in adiposity, weight gain and ultimately overweight and obesity. A number of randomized controlled trials (RCT) have been performed exploring sugar consumption and weight. These RCTs have been aggregated in four recent meta-analyses, however, these studies employ different inclusion and exclusion criteria and reported different summary endpoint estimates and conclusions \[[@B46-nutrients-08-00697],[@B75-nutrients-08-00697],[@B76-nutrients-08-00697],[@B77-nutrients-08-00697]\] (See [Table 2](#nutrients-08-00697-t002){ref-type="table"}). Sievenpiper et al. \[[@B76-nutrients-08-00697]\] and Te Morenga et al. \[[@B46-nutrients-08-00697]\] looked at isocaloric exchange of either sugar or fructose with other macronutrients to assess effect of body weight in adults. Neither of these analyses showed significant effect of either sugar or fructose on body weight. With regard to sugars and weight loss Te Morenga et al. reviewed RCTs to examine whether or not the effect of weight and calories from sugars are reduced \[[@B46-nutrients-08-00697]\]. These investigators performed meta-analyses on five trials in children and demonstrated no significance in isocaloric trials of children and adults. A meta-analysis by Malik et al. found two of five trials resulted in significant weight loss resulting from a reduction in sugar calories in one model but not another \[[@B77-nutrients-08-00697]\]. It should be pointed out that in the trials that were meta-analyzed, subjects consumed not only less calories from sugar, but less total energy. Thus, it is not clear that the weight loss resulted from reduction in calories from sugar.
These four research groups also conducted meta-analyses in studies where an increased amount of sugar calories was given to adults who were consuming ad libitum diets. All four meta-analyses reported significant weight gain in this model although individual studies often did not. Thus, it is not clear whether the change in weight was due to an increase in the total number of calories consumed or some unique property of sugars. Recent meta-analyses by Dolan et al. of interventional studies utilizing the FDA Guidance for Evidence-Based Review both in normal weight \[[@B78-nutrients-08-00697]\] and obese individuals \[[@B79-nutrients-08-00697]\] did not support a link between obesity and fructose consumption with amounts up to the 90th percentile population consumption for fructose.
The report of the SACN in the UK, which is based on an extensive series of systematic reviews conducted according to clearly stated quality standards, reported that high levels of free sugar consumption were associated with excess energy intake \[[@B47-nutrients-08-00697]\]. Thus, weight gain in these studies could not be separated from calorie intake and could not be attributed to any unique property of free sugars. Although it could be argued that free sugar consumption may predispose to excess calorie intake. It has also been reported that fructose containing sugars may predispose individuals to abdominal weight gain \[[@B80-nutrients-08-00697],[@B81-nutrients-08-00697]\]. If this were true, it would represent a significant increased risk for both diabetes and the metabolic syndrome. Stanhope et al. reported a research trial comparing 25% of calories from fructose to 25% of calories from glucose \[[@B81-nutrients-08-00697]\]. Individuals in the fructose arm, over a 10-week period, increased their visceral abdominal fat. However, it should be noted that individuals also gained an average of two pounds over the course of this study. Furthermore, significance in abdominal weight gain occurred only pre-to-post in the fructose arm and this was not compared to the glucose arm. When this more appropriate glucose to fructose comparison was made, the significance disappeared. Maersk et al. \[[@B80-nutrients-08-00697]\] conducted a six-month study comparing one liter per day of sugar sweetened beverage versus comparable amounts of diet beverage, 1% milk, and water. These investigators reported that individuals in the sugar sweetened beverage group increased visceral abdominal fat compared to the other groups. It should be noted, however, that individuals also gained weight in this study which represents a confounding variable.
Three recent RCTs have been conducted employing slightly different strategies have explored aspects of sugar consumption and weight change. In one study, consumption of average amounts of fructose containing sugars for adults (HFCS or sucrose) did not result in increased body weight over a ten-week, free living trial \[[@B51-nutrients-08-00697]\]. In another study, mean amounts of these sugars were utilized as part of an overall hypocaloric diet and did not inhibit weight loss \[[@B52-nutrients-08-00697]\]. Of note, there were no differences between 10% and 20% of either HFCS or sucrose. In a larger trial involving 355 men and women who consumed either 8%, 18% or 30% of kcals/day of either sucrose or HFCS as part of a mixed nutrient diet, individuals gained an average of slightly over two pounds over a ten-week period. However, most of this was driven by the 30% kcals per day (above the 95% population consumption for fructose) \[[@B53-nutrients-08-00697]\]. At the end of the study, individuals consumed an average of more than 200 kcals/day compared to baseline. Thus, this should be viewed as a hypercaloric trial.
Fructose containing sugars led to the expected weight loss (with some exceptions in children) in subtraction trials which suggests that fructose containing sugars do not behave differently from other macronutrients (mainly starch) when comparisons are matched for calories. Another approach to this issue may be obtained from an *ad libitum* trial design where fructose containing sugars were freely replaced with other sources of energy in the diet and no strict control of the amount of sugars in the background diet occurred. CArbohydrate Ratio Management in European National Diets (CARMEN) trial \[[@B84-nutrients-08-00697]\] is the largest and longest trial using such a design. This diet compared ad libitum high complex carbohydrate diet to an ad libitum higher fat control trial in 398 obese individuals studied for over six months. Both ad libitum diets resulted in lost weight. There was no significant different between the ad libitum high sugars diet and the ad libitum high complex carbohydrate diet. There was a non-significant tendency toward greater weight loss in the latter. This trial also showed that under free living conditions it is possible to lose weight following an ad libitum high sugars diet employing a strategy to freely replace energy from high fructose containing sugars with other sources of energy in the diet. It also demonstrates that there is not clear advantage for reducing sugars as compared to fat in the diet \[[@B46-nutrients-08-00697],[@B75-nutrients-08-00697],[@B76-nutrients-08-00697],[@B77-nutrients-08-00697]\]. Given the complexity of weight gain and energy regulation it is unlikely that one component of the diet significantly impacts upon this problem. In fact, the consensus statement from the American Society of Nutrition on energy regulation specifically warns against isolating one component of the diet and blaming it for obesity \[[@B85-nutrients-08-00697]\]. Moreover, a large body of literature associates both increased caloric consumption from all sources \[[@B86-nutrients-08-00697]\] and decreased physical activity \[[@B87-nutrients-08-00697]\] as major components of weight gain. Indeed, the average American consumed 454 more calories in 2010 compared with 1970. Of these additional calories, 93% came from increased consumption of flour and cereal products or fats while only 7% (39 additional calories) came from all sugars combined. The percentage of calories from sugar in the diet in the United States actually declined from 19% to 17% over this period \[[@B88-nutrients-08-00697]\]. It should be pointed out, however, that sugars may provide excess energy due to their hedonic properties. In addition, increased sugars intake in some individuals may be a marker for an overall less healthy, energy dense diet.
The recent literature on the impact of added sugars on obesity and weight gain or weight loss remains in dispute. Most of the RCTs suggest that weight gain occurs only in hypercaloric trials and suggests that overall caloric consumption is likely to be a larger contributor to weight gain than any unique property of sugars \[[@B74-nutrients-08-00697],[@B75-nutrients-08-00697]\].
5. Risk Factors for Diabetes {#sec5-nutrients-08-00697}
Considerable confusion exists with regard to the potential impact of added sugars on risk factors for diabetes. A great deal of attention was paid to this issue in the media following two ecological studies which suggested that availability of sugars correlated with increased risk of diabetes \[[@B31-nutrients-08-00697],[@B32-nutrients-08-00697]\]. These types of ecological studies, however, must be treated with great caution. Ecological studies are considered one of the lowest forms of evidence. Furthermore, these studies have been criticized on a variety of technical grounds. In one ecological study, Goran et al. \[[@B32-nutrients-08-00697]\] reported that diabetes prevalence was 20% higher in European Union (EU) countries with higher availability of HFCS compared to countries with low availability. As noted by van Buul et al. however, HFCS consumption data in EU countries reported in this study were, in fact, not consumption data at all but production data \[[@B5-nutrients-08-00697]\]. Since HFCS travels freely across EU borders, production data cannot be assumed to be the equivalent of consumption data. In another ecological study, Basu et al. used food supply data from the UNFAO to determine market availability of different food items worldwide and concluded that sugar availability was associated with higher diabetes prevalence. Market availability of food, however, is a highly unreliable indicator of sugar consumption \[[@B6-nutrients-08-00697]\].
Prospective cohort studies have not documented a direct relationship between fructose and diabetes \[[@B89-nutrients-08-00697]\]. Pooled analysis of these cohorts did reveal that SSBs as a source of free sugar are associated with an increased risk of diabetes only when comparing highest and lowest levels of exposure \[[@B22-nutrients-08-00697],[@B90-nutrients-08-00697]\]. Pooled analyses of these cohorts, however, for total sugars, total sucrose, and total fructose have not yielded the same relationship \[[@B91-nutrients-08-00697]\]. In addition, systematic reviews and meta-analyses of sugar and diabetes risk factors have actually reported a decrease in risk factors such as glycosylated proteins \[[@B82-nutrients-08-00697]\]. A large cohort study in Europe also did not show an increase in diabetes risk with added sugars \[[@B92-nutrients-08-00697]\].
The question of whether or not sugar is a unique cause of diabetes has not been addressed in any RCT to our knowledge. Most of the data related to the question of a potential relationship between sugar consumption and diabetes comes from RCTs looking at risk factors for diabetes or cohort studies. Prospective cohort studies provide mixed evidence concerning sugar consumption and diabetes. Malik et al. reported meta-analyses of eight cohort studies, four of which did not find a significant effect of SSB with the incidence of diabetes and five did not adjust findings for energy intake and body weight \[[@B22-nutrients-08-00697]\]. A study published by the same group did not show a relation between sugar consumption and the risk of diabetes \[[@B93-nutrients-08-00697]\]. Other cohort studies have also failed to find significant associations between sugar intake and diabetes \[[@B94-nutrients-08-00697],[@B95-nutrients-08-00697],[@B96-nutrients-08-00697]\] and one study found a significant negative association \[[@B95-nutrients-08-00697]\]. With regard to systematic reviews and meta-analyses, few data are available to support an association between sugar intake and diabetes \[[@B94-nutrients-08-00697],[@B95-nutrients-08-00697],[@B96-nutrients-08-00697]\]. Cozma et al. reported a systematic review and meta-analysis of 18 feeding studies on fructose and diabetes risk and found no adverse impact on glycemic control including insulin, glucose, glycated blood proteins (including HbA1c) \[[@B82-nutrients-08-00697]\]. The SACN report published in 2015 \[[@B47-nutrients-08-00697]\] did not show an association between free sugars consumption and risk factors for diabetes.
Most randomized controlled trials of non-diabetic patients substituting sucrose for fructose in a controlled diet did not report adverse effects on multiple risk factors for diabetes \[[@B70-nutrients-08-00697],[@B78-nutrients-08-00697],[@B97-nutrients-08-00697],[@B98-nutrients-08-00697],[@B99-nutrients-08-00697]\].
Two recent RCTs have also not demonstrated increased risk factors for diabetes over a 10-week time period. In one study of 123 individuals who consumed average levels of fructose containing sugars (9% of calories from fructose itself or 18% of calories from either sucrose or HFCS) did not yield increases in fasting glucose, insulin, or insulin resistance via the homeostatic model of assessment (HOMA) \[[@B100-nutrients-08-00697]\]. Another RCT evaluated 267 individuals who consumed either HFCS or sucrose at dosage ranges between 8% and 30% of calories (25th through 95th percentile of calories) and also did not find any increase in risk factors for diabetes \[[@B53-nutrients-08-00697]\].
This literature taken together provides little direct evidence that sugar consumption increases risk factors of diabetes. Moreover, since the relationship between diabetes and obesity is well established and, as already indicated, scant evidence is available relating isocaloric substitution of sugars for other carbohydrates, it appears prudent to focus on other risk factors for diabetes such as obesity rather than singling out sugars. Since diabetes takes 20--30 years to develop short-term RCTs focusing on risk factors for diabetes should be taken with caution recognizing this limitation.
6. Risk Factors for Cardiovascular Disease {#sec6-nutrients-08-00697}
The American Heart Association (AHA) has recommended that adult males consume no more than 150 kcals per day and females no more than 100 kcals per day from added sugars \[[@B101-nutrients-08-00697]\]. This recommendation implies that higher levels of added sugars may increase the risk of heart disease. In addition, the DGAC 2015 concluded that there was "moderate" evidence in the association between added sugars and heart disease \[[@B48-nutrients-08-00697]\]. The SACN report published in 2015 did not find a linkage between sugars consumption and risk factors for heart disease \[[@B47-nutrients-08-00697]\]. The evidence in this area, however, is mixed and inconclusive \[[@B13-nutrients-08-00697]\]. To our knowledge there are no RCTs assessing a link between added sugars and CVD. Thus, the available data comes either from cohort studies or from RCTs examining risk factors for CVD.
Dietary sugars may have differential effects on blood lipids. A number of studies have demonstrated that diets containing greater than 20% of kcals from simple sugars may result in elevated fasting triglycerides which is a known risk factor for CVD (see [Table 3](#nutrients-08-00697-t003){ref-type="table"}) \[[@B32-nutrients-08-00697],[@B54-nutrients-08-00697],[@B55-nutrients-08-00697],[@B56-nutrients-08-00697],[@B57-nutrients-08-00697],[@B58-nutrients-08-00697],[@B59-nutrients-08-00697],[@B60-nutrients-08-00697],[@B61-nutrients-08-00697],[@B99-nutrients-08-00697]\]. The American Heart Association Scientific Statement on triglycerides lists avoiding excess fructose as one mechanism for decreasing the risk of hypertriglyceridemia \[[@B102-nutrients-08-00697]\]. Several recent systematic reviews and meta-analyses, however, have reported that in trials where fructose is substituted isocalorically for other carbohydrates it does not result in increased fasting triglycerides or post-prandial triglycerides \[[@B103-nutrients-08-00697],[@B104-nutrients-08-00697]\].
Two recent RCTs looked at the relationship between sugar consumption and triglycerides. In one involving 65 individuals where no weight gain occurred, no increase in triglycerides was found \[[@B51-nutrients-08-00697]\]. A larger trial involving 355 men and women who consumed between 8% and 30% of kcals per day as either sucrose or HFCS as part of a mixed nutrient diet reported a 10% increase in triglycerides \[[@B53-nutrients-08-00697]\]. It should be pointed out, however, that individuals in this trial gained approximately two pounds over the ten-week intervention and were consuming an average of over 200 kcals per day, more by the end of the study compared to baseline. Stanhope et al. followed various doses of HFCS given to young adults over a 16-day period and also reported increases in post-prandial triglycerides \[[@B107-nutrients-08-00697]\]. However, the short duration of this study and the fact that pre and post levels were within the low normal range must be taken into consideration when evaluating this finding.
The effects of added sugars on low density lipoprotein (LDL) have been variable \[[@B27-nutrients-08-00697],[@B59-nutrients-08-00697],[@B80-nutrients-08-00697],[@B102-nutrients-08-00697],[@B108-nutrients-08-00697]\] with some investigators reporting increases while other studies have not demonstrated this finding. It should be noted that a number of the trials where the increases in LDL occurred gave large dosages of added sugars often above the 90th percentile population \[[@B109-nutrients-08-00697]\].
A study by Yang et al. published in 2013 analyzed NHANES data from three different time periods (1988--1984, 1999--2004 and 2005--2010) and reported that the relative risk was 1.30 for those who consumed 10%--24.9% of calories from added sugars and 2.75 for those who consumed 25% or more calories from added sugars (approximately 10% of the population) when compared to those who consumed less than 10% of calories from added sugars. It should be noted that the authors also reported that the percentage of daily calories from added sugars was 16.8% in the 1999--2004 cohort and decreased to 14.9% in the 2005--2010 cohort \[[@B106-nutrients-08-00697]\]. Several RCTs involving levels of sugar consumption ranging from the 25th to the 95th percentile population consumption have demonstrated no changes in LDL cholesterol following ten weeks in a free living environment compared to baseline when consumed as part of mixed nutrient diet \[[@B53-nutrients-08-00697]\]. Thus, the effects of added sugars on lipids in adults remain in dispute.
Research evaluating the effects of added sugars on blood pressure have similarly shown mixed results \[[@B29-nutrients-08-00697],[@B30-nutrients-08-00697],[@B110-nutrients-08-00697]\]. For example, epidemiologic studies such as the Framingham Heart Study have reported an association between consuming one or more SSB per day and increased odds of developing high blood pressure \[[@B111-nutrients-08-00697]\]. The meta-analysis by Te Morenga et al. which reported on 12 trials (*n* = 324) found no significant effects of higher sugar intake on systolic blood pressure overall, although higher sugar intake was associated with significant increase in diastolic pressure of 1.4 mm/hg (95% CI: 0.3, 2.5 mm/hg; *p* = 0.02) \[[@B109-nutrients-08-00697]\]. Many of the trials reported in this systematic review, however, employed amounts of added sugars consumption above the 90th percentile population consumption level. A systematic review and meta-analysis by Ha and colleagues, involving 18 studies (*n* = 355), showed slight decreases in both diastolic and mean blood pressure when fructose was substituted either isocalorically for other carbohydrates (13 trials) or in hypercaloric trials (2 trials) \[[@B83-nutrients-08-00697]\]. Several recent RCTs have not shown increases in blood pressure. In a large study of 355 individuals followed for ten weeks at up to 30% of kcals per day up to the 95% percentile population consumption level of fructose \[[@B53-nutrients-08-00697]\], no increases in blood pressure were observed. Further RCTs compared fructose containing sugars to glucose at the 50th percentile population consumption and did not demonstrate increases in mean systolic or diastolic blood pressure \[[@B51-nutrients-08-00697]\].
Thus, if there is any association between sugar consumption and increases in blood pressure it would appear to occur at higher levels of sugar consumption (\>90th percentile population consumption) and even at that level may not exist.
Taken as a whole, it does not appear that sugar consumption within the normal range of the human diet increases the risk of cardiovascular disease. An exception, however, may occur with diets that contain greater than 20% of kcals from simple sugars in hypercaloric trials which may cause an increase in triglycerides. It should be noted that Archer et al. utilized NHANES data (NHANES 1988--1994, 1999--2004 and 2015) (*n* = 31,147) compared to the NHANES III Mortality Report (1988--2006) (*n* = 11,733) and reported that individuals who consumed 25% or more of calories from added sugars (approximately 77% of the population) experienced an increased associated risk of cardiovascular disease \[[@B105-nutrients-08-00697]\] compared to those who consumed less than 10% of calories from added sugars. These findings should be treated with caution given the multiple potential confounders inherent to all cohort studies. In particular, NHANES data has recently been challenged because of its use of memory based recall which has been found in multiple studies to be highly inaccurate. These investigators also noted that the percentage of daily calories from added sugars declined from 1999 to 2004 with a decline from 16.8% to 14.9% in 2005--2010 (9% decline).
To put the issue of SSB consumption in perspective, it should be noted that the major risk factors for heart disease are well established such as avoiding cigarette smoking, maintaining a proper weight, avoiding or controlling diabetes and leading a physically active lifestyle. It would appear prudent to focus more attention on these established risk factors than one component of overall approach to nutrition. RCTs of longer duration would be helpful in examining putative links between sugar consumption and risk factors for CVD.
7. Effects of Sugars on the Brain {#sec7-nutrients-08-00697}
The effects of sugar on the brain, in general, and on reward pathways, in particular, as well as on downstream portions of the brain has been an area of intense research and controversy. Early studies in this area were done largely on animals \[[@B43-nutrients-08-00697],[@B112-nutrients-08-00697],[@B113-nutrients-08-00697],[@B114-nutrients-08-00697]\], however, recent advances in functional MRI (fMRI) have allowed more studies to be conducted in human beings \[[@B115-nutrients-08-00697]\]. Animal studies in this area must be treated with great caution since there are multiple and significant differences between animal brains (in particular, rodents which are the most frequently used model) and human brains \[[@B116-nutrients-08-00697],[@B117-nutrients-08-00697]\]. Further confusion in this area has come from studies which have utilized a model comparing fructose versus glucose to examine effects on blood flow to the hypothalamus and reward pathways despite the fact that these monosaccharides are rarely consumed by themselves in human nutrition \[[@B118-nutrients-08-00697],[@B119-nutrients-08-00697]\]. Unfortunately, these trials of two monosaccharides in isolation have led to speculation that fructose and glucose interact differently in the brain thereby leading to potential for overconsumption of calories.
When similar studies have been repeated comparing the normally consumed sugars of sucrose or HFCS on blood flow to the hypothalamus and brain connectively, no differences have been reported between sweetened beverages consumed in the context of a mixed nutrient meal and an unsweetened control \[[@B120-nutrients-08-00697]\].
Stice et al. reported a trial of 70 individuals comparing various levels of sugar sweetened milkshakes to various levels of fat in milkshakes and reported that there was more stimulation of reward pathways following the highest level of sugar than fat \[[@B40-nutrients-08-00697]\]. These investigators speculated that these acute findings suggested that sugar should be regulated rather than fat with regard to lowering the prevalence of obesity. There are studies, however, which show exactly the opposite \[[@B121-nutrients-08-00697],[@B122-nutrients-08-00697]\].
Stephan et al. \[[@B35-nutrients-08-00697]\] using epidemiologic data suggested that increased consumption of fructose containing sugars could lead to dementia. Studies performed ranging in duration from 10 weeks to 24 weeks and employing average levels of consumption of fructose containing sugars have not found any evidence of cognitive change \[[@B123-nutrients-08-00697],[@B124-nutrients-08-00697]\].
Unfortunately, some investigators have speculated that sweetness from added sugars may lead to a form of sugar "addiction" \[[@B15-nutrients-08-00697],[@B125-nutrients-08-00697]\]. Animal data has also been used to buttress this claim \[[@B126-nutrients-08-00697],[@B127-nutrients-08-00697]\] despite the fact that the translation of animal data to humans in this area is fraught with complexity and speculation. Several recent reviews have provided extensive analyses questioning the fundamental premise of either food or sugar "addiction" \[[@B128-nutrients-08-00697],[@B129-nutrients-08-00697],[@B130-nutrients-08-00697]\]. Unfortunately, the popular press and the public has embraced the concept of sugar "addiction" which would appear to be a vast exaggeration of what the scientific data show. Clearly, this is an area where much more research is required.
8. Conclusions {#sec8-nutrients-08-00697}
There is no question that multiple, important links exist between nutrition and health. The current emphasis on added sugars, however, has created an environment that is "sugar centric" and in our judgment risks exaggerating the effects of these components of the diet with the potential unforeseen side effect of ignoring other important nutritional practices where significant evidence of linkages to health exists.
We have seen the attempt to focus on single nutrients in the diet before attempting to blame a variety of chronic illnesses on overconsumption of these components of the diet \[[@B131-nutrients-08-00697]\]. For example, dietary cholesterol was initially blamed as a significant positive factor in coronary artery disease although subsequent research has not supported this linkage. Subsequently, saturated fats were deemed to be a villain although recent evidence now suggests that the food matrix containing the saturated fats may be more important than the saturated fats themselves with regard to risk of CVD \[[@B132-nutrients-08-00697],[@B133-nutrients-08-00697],[@B134-nutrients-08-00697]\].
The same phenomenon may hold true for isolating components of the diet for supposed health benefits \[[@B135-nutrients-08-00697]\]. For example, even though oats have multiple health benefits, the exaggerated health claims caused one pundit to suggest that putting oats in carbonated soft drinks could lead to increase in their sales. There are multiple benefits of consuming protein yet the current fashion of critically accepting high protein diets for a variety of potential health benefits seems overwrought. These are but two of many examples. One has only to look at the popular press to find the current month's super food.
The history of nutrition is littered with attempts to isolate one nutrient, or class of nutrients, to claim a plethora of benefits or risk \[[@B131-nutrients-08-00697]\]. These have almost universally resulted in failure and disappointment. In the area of sugar sweetened beverages and various health considerations, the highest quality of evidence from systematic reviews, meta-analyses, and randomized controlled trials does not suggest signals for harm within the normal range of human consumption at least in short-term studies lasting six months or less and in longer-term cohort studies where fructose containing sugars are substituted isocalorically for other carbohydrates. This would suggest that some of the recently articulated restrictive guidelines from prestigious scientific and health organizations may be overly restrictive although longer term studies will be required to provide more certainty on this issue.
We wish to emphasize that we are not recommending excessive consumption of added sugars. It would appear to the authors, however, that a reasonable recommended upper limit of sugar may reside at consuming no more than 20% of calories from added sugars and then only in a hypercaloric situation. This recommendation rests largely on our view that the evidence suggests a potential signal for elevated triglycerides at consumption levels greater than 20% of calories in hypercaloric trials. We recognize, however, that definitive evidence in this area may be very difficult to generate. Longer term RCTs, particularly, with ad libitum sugar consumption designs may prove helpful. Current ad libitum trials are typically of a short duration.
There are well established risk factors for obesity, diabetes, and cardiovascular disease and considerable overlap amongst these entities when it comes to nutritional practices. For now, we would agree with the assertion in the Dietary Guidelines for American (2010) \[[@B136-nutrients-08-00697]\] that overconsumption of calories represents the single greatest health threat to individuals in the United States and elsewhere. This may, in part, be linked to the overall consumption patterns in what has been called the "Western" diet. Certainly, added sugars may be considered as components of this overall diet and, therefore, targets for reduction as are other energy dense components of this nutrition pattern. Singling out added sugars as major or unique culprits for metabolically based diseases such as obesity, diabetes, and cardiovascular disease appears inconsistent with modern high quality evidence and is very unlikely to yield health benefits. The reduction of these components of the diet without other reductions seems very unlikely to achieve any meaningful results. Perhaps in this situation, we should remember a favorite quotation of President John F. Kennedy who quoted Winston Churchill who, in turn, had paraphrased the philosopher George Santayana by saying "Those who fail to learn from history are doomed to repeat it".
J.R. and T.A. were responsible for conceptualizing the manuscript and had overall supervision of the manuscript.
Both authors participated in the writing of the manuscript and have approved it.
J.M. Rippe's research laboratory has received unrestricted grants and J.M. Rippe has received consulting fees from ConAgra Foods, Kraft Foods, the Florida Department of Citrus, PepsiCo International, The Coca Cola Company, the Corn Refiners Association, Weight Watchers International and various publishers.
The following abbreviations are used in this manuscript:
American Heart Association
Homeostatic Model of Assessment
Low Density Lipoprotein
Non-Alcoholic Fatty Liver Disease
Randomized Controlled Trials
Sugar Sweetened Beverages
::: {#nutrients-08-00697-f001 .fig}
Hierarchy of evidence in evidence based medicine.
::: {#nutrients-08-00697-t001 .table-wrap}
Randomized Control Trials Included.
Type of Analysis Findings
------------------------------------------------ --------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Lowndes et al. \[[@B51-nutrients-08-00697]\] 50th percentile consumption of fructose containing sugars No increase in body weight over 10 weeks and no increase in triglycerides. No increase in risk factors for diabetes
Lowndes et al. \[[@B52-nutrients-08-00697]\] Comparison between 10 and 20 percent of calories from either HFCS or sucrose in hypocaloric diets Significant weight loss occurred in all groups
Lowndes et al. \[[@B53-nutrients-08-00697]\] RCT 355 men and women consuming 8%, 18% or 30% of kcals per days either sucrose or HFCS Average weight gain over 2 pounds over 10 week period. Mostly driven by 30% kcal per day group. No increased risk factors for diabetes. 10% increase in triglycerides confounded by 2 pound weight gain.
Antar et al. \[[@B54-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Bantle et al. \[[@B55-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Black et al. \[[@B56-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Cooper et al. \[[@B57-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Groen et al. \[[@B58-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Marckmann et al. \[[@B59-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Sorensen et al. \[[@B60-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
Stanhope et al. \[[@B61-nutrients-08-00697]\] Randomized Control Trial Increase in fasting triglycerides from various levels of added sugar consumption
::: {#nutrients-08-00697-t002 .table-wrap}
Systematic Reviews and Meta-analyses Included.
Type of Analysis Findings
-------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------------------------------------------------------------
Sievenpiper et al. \[[@B76-nutrients-08-00697]\] Aggregated randomized control trials looking at isocaloric exchange of either sugar or fructose with other macronutrients to assess effects on body weight in adults No significant effect of either sugar or fructose on body weight
Te Morenga et al. \[[@B46-nutrients-08-00697]\] Aggregated randomized control trials looking at isocaloric exchange of either sugar or fructose with other macronutrients to assess effects on body weight in adults No significant effect of either sugar or fructose on body weight
Malik et al. \[[@B77-nutrients-08-00697]\] Meta-analysis of 5 trials 2 of 5 trials resulted in significant weight loss from reducing sugar calories in one model but not another
Dolan et al. \[[@B78-nutrients-08-00697]\] Normal weight individuals. Interventional Studies utilizing the FDA guidance for evidence based reviews No difference with regard to obesity from fructose consumption in normal weight individuals
Dolan et al. \[[@B79-nutrients-08-00697]\] Obese individuals. Interventional Studies utilizing the FDA guidance for evidence based reviews No difference with regard to obesity from fructose consumption in obese individuals
Cozma et al. \[[@B82-nutrients-08-00697]\] Systematic review and meta-analysis of 18 RCTs Decrease in risk factors for diabetes such as glycosylated proteins
Malik et al. \[[@B24-nutrients-08-00697]\] Meta-analysis of 8 cohort studies 4 did not find a significant effect of SSB on incidence of diabetes and 5 did not adjust findings for energy intake and body weight
Ha et al. \[[@B83-nutrients-08-00697]\] 15 studies involving 355 individuals Slight decreases in diastolic and mean blood pressure and isocaloric substitution or hypercaloric trials
::: {#nutrients-08-00697-t003 .table-wrap}
Cohort Studies Included.
Type of Analysis Findings
---------------------------------------------- --------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Hodge et al. \[[@B94-nutrients-08-00697]\] Cohort Study No significant association between sugar intake and diabetes
Meyer et al. \[[@B95-nutrients-08-00697]\] Cohort Study in Older women Significant negative association between sugar intake and diabetes
Colditz et al. \[[@B96-nutrients-08-00697]\] Cohort Study in women No association between sugar intake and diabetes
Interact \[[@B92-nutrients-08-00697]\] Cohort Study in European Adults No increase in diabetes risk with added sugars
Archer et al. \[[@B105-nutrients-08-00697]\] NHANES data analysis Individuals who consumed 25% or more of calories from added sugars experienced an increase associated risk of cardiovascular disease compared to individuals who consumed less than 10% of calories from added sugars
Yang et al. \[[@B106-nutrients-08-00697]\] NHANES data analysis CVD risk increased to 1.30 for individuals who consumed 10 to 24.9% of calories and 2.75 for those who consumed 25% or more calories for added sugars compared to individuals who consumed less than 10% of calories from added sugars
|
User:Abdullah5599@legacy41961806
Contact Me
* Talk Page (fastest)
* Skype: abdullah.arsym
* Zelda Universe: Minion (slowest)
Stats
* Join: May 29, 2010
* Autopatrol: Sep 21, 2010
* Patrol: Dec 27, 2010
* Admin: July 26, 2011
* Left: April, 2013
* Total edits:
* Daily Average:
Hello!
Games Beaten
* Ocarina of Time (2003)
* The Wind Waker (2006)
* The Minish Cap (2007)
* Twilight Princess (2008)
* Majora's Mask (April/May 2010)
* Four Swords (June 2010)
* Four Swords Adventures (Aug 2010)
* Skyward Sword (Nov 2011)
|
using Nudes.Retornator.Core;
namespace Nudes.Retornator.AspnetCore.Errors;
/// <summary>
/// The request is unprocessable
/// </summary>
public class UnprocessableEntityError : Error
{
/// <summary>
///
/// </summary>
public UnprocessableEntityError() : base("Unprocessable Entity", null)
{ }
/// <summary>
///
/// </summary>
/// <param name="description"></param>
public UnprocessableEntityError(string description) : base("Unprocessable Entity", description)
{ }
}
|
import { BaseEntity, Column, Entity, JoinColumn, ManyToOne, OneToMany, PrimaryGeneratedColumn } from 'typeorm';
import { ApiHideProperty, ApiProperty } from '@nestjs/swagger';
import { CultivoSiembra } from '../../cultivo/entities/cultivo.entity';
import { DiaControlInsumo } from './dia_control_insumo.entity';
@Entity('dias_control')
export class DiaControl extends BaseEntity {
@PrimaryGeneratedColumn('uuid')
id: string;
@ApiProperty({ format: 'date' })
@Column({ type: 'date', nullable: false })
fechaControl: Date;
@Column({ type: 'varchar', unique: false, length: 200, nullable: false })
descripcion: string;
@ApiHideProperty()
@ManyToOne(() => CultivoSiembra, (cultivoSiembra) => cultivoSiembra.id_cultivo)
@JoinColumn([{ name: 'cultivoIdCultivo', referencedColumnName: 'id_cultivo' }])
cultivo: CultivoSiembra;
@Column({ nullable: false })
cultivoIdCultivo: string;
@OneToMany(() => DiaControlInsumo, (diaControlInsumo) => diaControlInsumo.diaControl)
diasControlInsumos: DiaControlInsumo[];
}
|
# Using Restricted Boltzmann Machines to Model Molecular Geometries
Peter Nekrasov, Jessica Freeze, Victor Batista
Yale University, Department of Chemistry
<EMAIL_ADDRESS>
###### Abstract
Precise physical descriptions of molecules can be obtained by solving the
Schrodinger equation; however, these calculations are intractable and even
approximations can be cumbersome. Force fields, which estimate interatomic
potentials based on empirical data, are also time-consuming. This paper
proposes a new methodology for modeling a set of physical parameters by taking
advantage of the restricted Boltzmann machine’s fast learning capacity and
representational power. By training the machine on ab initio data, we can
predict new data in the distribution of molecular configurations matching the
ab initio distribution. In this paper we introduce a new RBM based on the Tanh
activation function, and conduct a comparison of RBMs with different
activation functions, including sigmoid, Gaussian, and (Leaky) ReLU. Finally
we demonstrate the ability of Gaussian RBMs to model small molecules such as
water and ethane.
## I. Introduction
Recent innovations in data science have led to a proliferation of machine
learning techniques, many of which are used to find patterns and categorize
data. Among these models, the restricted Boltzmann machine (RBM) has shown
special promise for its ability to quickly learn the probability distribution
of a training set and extract its key “features.” The RBM is a versatile tool
used in many practical applications ranging from social media services to
product recommendations. While RBMs are becoming increasingly popular for
conventional problems in data science, they are underutilized within the field
of chemistry. Though recently a few sporadic studies have used RBMs to model
self-avoiding walks of polymers [23] and perform quantum electronic structure
calculations [21], no systematic approach has been taken to develop this
software for a multitude of continuous systems.
There are several features which make the use of RBMs conducive to the field
of chemistry. For one, the restricted Boltzmann machine falls under a family
of energy-based models, which means it associates an energy value to any given
state of the machine. Because chemistry calculations constantly utilize energy
terms, the RBM can be adapted to complex physical computations. In this sense,
a trained RBM can serve as a Hamiltonian for a physical system [21].
Furthermore, the RBM’s strength lies in its simplicity: with a simple
structure and sampling algorithm, the RBM is straightforward to use and
efficient to implement.
Imagine the following predicament: a set of molecular geometries or quantum
states is generated either through experimental data or complex ab initio
calculations. While this dataset is thought to be an accurate representation
of the overall distribution, the number of samples is insufficient to perform
any meaningful analysis on the system. Furthermore, one would like to extend
the given representation to a complete ensemble within the phase space. In
this case, RBMs can enable us to learn the overall distribution of physical
parameters and predict points not included in the the given dataset (Figure
1).
Figure 1: Given a sparse set of data points (blue), our aim is to train the
model to predict the shape of the overall distribution and extract new data
points (red) which could have been part of the original distribution.
The advantage of this approach is that it requires no prior information about
the system in question. Most approximations of the quantum wave function
require an expression of the constituent energies of the system. Likewise,
molecular dynamics force fields require an understanding of interatomic
potentials such as electrostatic and van der Waals forces. Instead, the RBM
simply utilizes the statistical frequency of the training configurations to
learn an internal representation of their energies. While quantum calculations
are laborious to run, the RBM adopts a simple training and sampling algorithm,
providing us with resounding representational power at a low computational
cost.
In this paper we describe different types of RBMs and criteria for assessing
their performance. We then show how the Gaussian RBM (GRBM) learns diverse
sets of training data and reproduces various distributions. Finally, we use
GRBMs to represent molecular systems with multiple degrees of freedom to show
how they can learn bond and angle energies of small molecules such as H2O and
ethane.
## II. Methods
### Model overview
The RBM consists of two layers of neurons, a visible layer and a hidden layer
(shown in Figure 2). Every visible node $\displaystyle v_{i}$ is connected to
every hidden node $\displaystyle h_{j}$ by a set of weights, and each node has
its own offset, or bias. The state or value of a given node is dependent on
the state of the nodes it is connected to, as well as its bias. For ease of
computation, the values of the weights are stored in a weight matrix
$\displaystyle W$, where $\displaystyle W_{ij}$ represents the connection from
$\displaystyle v_{i}$ to $\displaystyle h_{j}$. Meanwhile, the values of the
visible and hidden biases are stored in bias vectors $\displaystyle a$ and
$\displaystyle b$, respectively. The RBM is “restricted” in the sense that
there are no connections between nodes in the same layer, which simplifies
learning [16].
Hidden layer Visible layer Figure 2: A sample RBM with three visible nodes and
eight hidden nodes (3-8-RBM). Every visible node is connected to every hidden
node, and each node has its own bias (not shown).
The visible layer serves as an input to the machine, where the number of
visible nodes corresponds to the number of variables that make up the data.
Values for the hidden nodes are then calculated by multiplying the visible
nodes by the weights, adding the biases, and then applying some sort of
activation function. This inference step can be viewed as a transformation
from the space of observable parameters to the space represented by the hidden
nodes. The method used for calculating between layers is formalized by a set
of conditional probabilities that appear in the section below. In this paper
we refer to a $\displaystyle n$-$\displaystyle k$-RBM as an RBM with
$\displaystyle n$ visible nodes and $\displaystyle k$ hidden nodes.
A notable feature of the RBM, in contrast with other machine learning models,
is that it does not have an “output” in the normal sense. Depending on the
situation, the output of an RBM can be the visible layer, hidden filters, or
energy. RBMs are trained in an unsupervised fashion (without labelling the
data), so the hidden nodes identify their own labels during the course of
training [5]. In a famous application of RBMs by Netflix to provide movie
recommendations (see the Netflix prize [15]), the RBM was trained on a large
set of movie ratings obtained from individual users, where the value of each
visible node corresponded to the rating of a given movie. Based on the simple
pattern that users who like movies from a certain director or genre are likely
to enjoy other movies from that same category, the RBM came to associate
hidden nodes with movie genres or directors. In this way, each hidden node
elucidates a connection or correlation between visible nodes, which is true
for continuous data as well. Note that understanding what each hidden node
represents requires additional post hoc analysis.
### Energy based models
As an energy based model, the RBM associates a scalar measure of energy to
each state of the machine. Usually the energy equation is a combination of
each layer times its respective biases and a term which relates the two
layers. The goal of training is to minimize the overall energy of the RBM with
respect to the training data. Once trained, the RBM associates lower energies
with inputs that fall within the training distribution and higher energies
with those that fall outside the training distribution. This becomes useful
when generating new configurations because the RBM estimates the energy of a
proposed configuration based on where it fits with the training distribution.
$\displaystyle p(h|v)$$\displaystyle p(v|h)$$\displaystyle p(h|v)$Visible
layerHidden layerInitial valuesReconstruction…$\displaystyle
v_{1}$$\displaystyle v_{2}$$\displaystyle v_{3}$$\displaystyle
v_{4}$$\displaystyle h_{1}$$\displaystyle h_{2}$$\displaystyle
h_{3}$$\displaystyle h_{4}$$\displaystyle h_{5}$$\displaystyle
h_{6}$$\displaystyle v_{1}$$\displaystyle v_{2}$$\displaystyle
v_{3}$$\displaystyle v_{4}$$\displaystyle h_{1}$$\displaystyle
h_{2}$$\displaystyle h_{3}$$\displaystyle h_{4}$$\displaystyle
h_{5}$$\displaystyle h_{6}$ Figure 3: The sampling algorithm for a 4-6-RBM
consists in using conditional probabilities to alternate between layers. This
technique, known as Gibbs sampling, is relevant to many aspects of using RBMs,
including estimating the gradient and calculating the error. This RBM happens
to consist of four visible nodes and six hidden nodes.
### Binary RBM
The simplest and most widespread version of the RBM is the binary-binary RBM
(BBRBM), which has binary visible and binary hidden units. In the binary-
binary RBM, an active unit is represented as 1 while inactive units are
represented with 0. For a BBRBM, the energy is given by:
$\displaystyle E(v,h)=-a^{T}v-b^{T}h-v^{T}Wh$
where $\displaystyle v$ is a vector of visible states, $\displaystyle h$ is
the hidden states, $\displaystyle a$ is the visible biases, $\displaystyle b$
is the hidden biases, and $\displaystyle W$ is the weight matrix representing
the connections between visible and hidden nodes. The visible states
$\displaystyle v$ serve as an input to the RBM whereas $\displaystyle a$,
$\displaystyle b$, and $\displaystyle W$ are all parameters learned during
training.
The joint probability of states $\displaystyle v$ and $\displaystyle h$ is
taken from the Boltzmann distribution:
$\displaystyle p(v,h)=\frac{1}{Z}e^{-E(v,h)}$
where $\displaystyle Z$ is the partition function, defined as:
$\displaystyle Z=\sum_{v}\sum_{h}e^{-E(v,h)}$
which is a sum of over all possible states of the machine. The partition
function $\displaystyle Z$ serves to normalize the joint probability
distribution so that probabilities sum to 1. From this, we can deduce the
marginal probability of a visible configuration $\displaystyle v$, given by
the sum of probabilities over all possible hidden configurations:
$\displaystyle\displaystyle p(v)$ $\displaystyle\displaystyle=\sum_{h}p(v,h)$
(1) $\displaystyle\displaystyle p(v)$
$\displaystyle\displaystyle=\frac{1}{Z}\sum_{h}e^{-E(v,h)}$ (2)
Because the state of the hidden layer depends on the state of the visible
layer, we must use a conditional probability $\displaystyle p(h|v)$ to
calculate the hidden states. This conditional probability is derived directly
from the energy equation [20], giving us the activation of hidden states:
$\displaystyle p(h=1|v)=\sigma(b+W^{T}v)$
where $\displaystyle\sigma(x)$ represents the sigmoid activation function.
Performing this operation only gives a set of probabilities that each hidden
neuron is active; one must then sample a Bernoulli distribution with the given
probabilities to reach the actual binary states of the hidden layer. Then, the
visible states are computed again from the hidden states using:
$\displaystyle p(v=1|h)=\sigma(a+Wh)$
which gives the probabilities of the visible neurons adopting a value of one.
Figure 3 shows how conditional probabilities are used to sample back and forth
between layers.
A major drawback of the binary RBM is that it is only able to represent binary
states or bit strings. As most empirical data is real-valued, the Gaussian RBM
was developed to model continuous variables.
### Gaussian RBM
The Gaussian RBM (GRBM), also called the Gaussian-binary RBM, is an effective
tool for modeling real-valued data. While the hidden layer remains binary, the
visible layer can now adopt real values, with an additional parameter
$\displaystyle\sigma$ representing the standard deviation for each visible
node. The equation for the joint energy becomes:
$\displaystyle\displaystyle E_{GRBM}(v,h)=\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}-b^{T}h-\frac{v^{T}}{\sigma^{2}}Wh$ (3)
where $\displaystyle\left\lVert\cdot\right\rVert$ is the Euclidean norm. This
equation is similar to the binary energy except that the visible states are
divided by $\displaystyle\sigma^{2}$ as a form of normalization, and the first
term is replaced by $\displaystyle\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}$ which serves as a parabolic containment of
the visible states. This means that the overall energy increases the further
the visible states $\displaystyle v$ are from the visible biases
$\displaystyle a$. Whereas the states in a binary RBM are bounded by 0 and 1,
it is important that the visible states of a Gaussian RBM are restrained by
this parabolic term in order to prevent them from trailing off.
Similar to the binary RBM, the conditional probability of the hidden states
given the visible states is:
$\displaystyle p(h=1|v)=\sigma(b+W^{T}\frac{v}{\sigma^{2}})$
whereas the conditional probability of the visible states given the hidden
states is:
$\displaystyle p(v\ |\ h)=\mathcal{N}(a+Wh,\sigma^{2})$
where $\displaystyle\mathcal{N}(\mu,\sigma^{2})$ is a Gaussian function with
mean $\displaystyle\mu$ and variance $\displaystyle\sigma^{2}$. In practice
this amounts to adding Gaussian noise after calculating visible states. A
detailed derivation of the conditional distributions can be found in [20].
While initially there appears to be nothing inherently Gaussian about this
energy equation, the probability starts to take on the form of a Gaussian
function when substituted into the Boltzmann distribution:
$\displaystyle\displaystyle p(v,h)$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}+b^{T}h+\frac{v^{T}}{\sigma}Wh}$
If we ignore the second and third terms of the energy equation, we see that
the distribution of visible states follows a Gaussian distribution centered at
the visible bias $\displaystyle a$ with variance $\displaystyle\sigma^{2}$.
Moreover, it has previously been shown using these equations that the RBM is
equivalent to a mixture of Gaussian (MoG) model, with the locations of each
Gaussian represented by the column vectors of the weight matrix (for a
detailed derivation see [11]).
The energy and sampling equations are usually simplified by normalizing the
data to zero mean and unit variance, and then setting $\displaystyle\sigma=1$.
However, one can choose to calculate the $\displaystyle\sigma$ of the data in
advance, or alternatively one can use $\displaystyle\sigma$ as a separate
parameter which is optimized during training. While training data need not
follow a Gaussian distribution, the GRBM works best when modeling this type of
distribution. Fortunately, most distributions found in nature are Gaussian due
to the central limit theorem.
### Log Likelihood Estimates
The goal of training the RBM is to maximize the likelihood of the data under
the model. By tweaking the weights and biases, the RBM can associate higher
probabilities with configurations found in the training data. For practical
purposes, we work with the logarithm of the likelihood which allows us to
write products as the sum of logarithms. Since $\displaystyle\log(x)$ is a
monotonically increasing function, maximizing the log-likelihood is the same
as maximizing the likelihood. The log-likelihood for a training sample
$\displaystyle x$ is given by:
$\displaystyle\displaystyle\mathcal{L}(x)$ $\displaystyle\displaystyle=\log
p(x)$ $\displaystyle\displaystyle=\log\sum_{h}p(x,h)$
$\displaystyle\displaystyle=\log\sum_{h}\frac{e^{-E(x,h)}}{Z}$
$\displaystyle\displaystyle=\log\sum_{h}e^{-E(x,h)}-\log Z$
using (1) and (2). By optimizing the log likelihood of the training data, we
maximize the likelihood of the training samples over all possible samples in
our visible space.
In the case where there is an entire set of training samples, we take the
average log-likelihood $\displaystyle\hat{\ell}$ by computing the expectation
of the log-likelihood over all the samples:
$\displaystyle\hat{\ell}=\bigg{\langle}\log\sum_{h}e^{-E(x,h)}\bigg{\rangle}-\log
Z$
Average log-likelihood is a rigorous way of monitoring the training and
convergence of an RBM and demonstrating its modeling capacity in statistical
terms. The most difficult part about estimating this likelihood is calculating
the partition function $\displaystyle Z$, which is intractable in most cases.
In this study we use importance sampling to estimate $\displaystyle Z$
whenever calculating log-likelihood (see [8] for more details).
### Training algorithm
The gradient of $\displaystyle\hat{\ell}$, which is given by the derivative of
$\displaystyle\hat{\ell}$ with respect to the model parameters
$\displaystyle\theta$, comes out to be the difference between the data-based
distribution and the distribution given by the entire model:
$\displaystyle\frac{d\hat{\ell}}{d\theta}\propto\bigg{\langle}\frac{dE(x,h)}{d\theta}\bigg{\rangle}_{x}-\bigg{\langle}\frac{dE(v,h)}{d\theta}\bigg{\rangle}_{v}$
where $\displaystyle x$ are explicit training samples and $\displaystyle v$
are samples from the model distribution.
Computing the partial derivatives of the energy function with respect to each
parameter provides us with the gradient approximations for the weights and
biases:
$\displaystyle\frac{\partial\hat{\ell}}{\partial
a}\propto\langle{x}-a\rangle_{{x}}-\langle v-a\rangle_{v}$
$\displaystyle\frac{\partial\hat{\ell}}{\partial b}\propto\langle
p(h=1|{x})\rangle_{{x}}-\langle p(h=1|v)\rangle_{v}$
$\displaystyle\frac{\partial\hat{\ell}}{\partial W}\propto\langle{x}\
p(h=1|{x})^{T}\rangle_{{x}}-\langle v\ p(h=1|{v})^{T}\rangle_{v}$
$\frac{\partial\hat{\ell}}{\partial\sigma}\propto\bigg{\langle}\frac{\left\lVert
x-a\right\rVert-2x^{T}Wp(h=1|x)}{\sigma^{3}}\bigg{\rangle}_{{x}}\\\
-\bigg{\langle}\frac{\left\lVert
v-a\right\rVert-2v^{T}Wp(h=1|v)}{\sigma^{3}}\bigg{\rangle}_{v}$
These gradients are used as the update rules for the RBM. Typically training
is done in batches, and the gradients are computed after each batch. The
gradients are then added to the existing RBM parameters which gives rise to a
new set of weights and biases. While the first term for each gradient can be
calculated directly from the training data, the second term is almost always
intractable, as it requires independent samples from an unknown model
distribution.
Several algorithms are currently available for estimating the second term. The
most widely used algorithm is Contrastive Divergence. In Contrastive
Divergence, sampling between visible and hidden layers is used to create a
Markov chain (as shown in Figure 3). The Markov chain is initialized at a
training point, and the conditional probabilities are used to get the visible
and hidden states after $\displaystyle k$ iterations, in a process known as
Gibbs sampling. The value of these layers after sampling serves as a
sufficient estimation of the model’s expectation. Typically $\displaystyle k$
is set to 1, as it provides a good estimation of the gradient and also
minimizes computation time.
Persistent Contrastive Divergence (PCD) is another algorithm which has been
proposed for estimating the model distribution. Instead of restarting the
Markov chain for each data point, one persistent Markov chain is retained in
memory and extended after each batch. While its success has mostly been
reported in binary RBMs [18], some have had success in using PCD to train
Gaussian RBMs [3]. A reported improvement on PCD is the Parallel Tempering
algorithm (PT), which uses multiple Markov chains at different temperatures
with a certain probability that states from different chains will swap [2].
The reasoning behind PT is that having different temperatures will ensure that
the Markov chain is fully exploring high energy states. Though these
algorithms have all been successfully implemented in binary RBMs, there is
little evidence that they work in Gaussian RBMs. In practice, we found that
both PCD and PT led to divergence unless a very small learning rate was used
($\displaystyle\alpha=0.0001$), which led to long training times with marginal
improvements in likelihood. Overall, CD-1 was simplest to use and almost never
led to divergence. Our results match those in [11] which conclude that CD is
the best algorithm currently suited for training GRBMs.
### Monte Carlo sampling of an RBM
Because the RBM provides us with a measure of energy, we can sample new
configurations from a trained RBM using the Metropolis Monte Carlo algorithm.
This is done by initializing a random configuration and evaluating its energy.
Then for each iteration, a new sample $\displaystyle x^{\prime}$ is generated
by adding a random displacement vector to the previous sample $\displaystyle
x$ and evaluating the energy of the new sample. If the energy is lower than
the previous, the new sample is accepted as part of the simulation. If not,
the acceptance ratio is calculated using the Boltzmann distribution:
$\displaystyle\frac{p(x^{\prime})}{p(x)}=\exp{\left(\frac{E(v,h)-E(v^{\prime},h^{\prime})}{k_{B}T}\right)}$
Finally we generate a random number between 0 and 1, and if our number falls
below this acceptance ratio, we keep the configuration as part of our
ensemble. The Boltzmann constant $\displaystyle k_{B}$ and temperature
parameter $\displaystyle T$ are usually set to one, though they may be useful
in simulating higher temperatures or exploring higher energy states.
The benefit of this Metropolis Monte Carlo method is that the normalization
for the RBM need not be known. Whereas the true probability density of the
joint states requires knowledge of the partition function $\displaystyle Z$,
which is intractable in most cases, the term cancels when calculating the
acceptance ratio at each step in the simulation. The result is a Monte Carlo
simulation which follows the probability density given by the model.
### Computational complexity
As we can see, the energy calculations for an RBM are much simpler than the
energy calculations for a quantum system. A typical Hartree-Fock calculation
has computational complexity $\displaystyle\mathcal{O}(n^{4})$, where
$\displaystyle n$ is the total number of basis functions, since the number of
two-electron integrals necessary to build the Fock matrix is $\displaystyle
n^{4}$ [4]. In practice this often becomes $\displaystyle\mathcal{O}(n^{3})$
as most programs will ignore integrals that are close to zero. Depending on
the choice of basis set and the size of the atoms, one can have anywhere from
3 to 100 basis functions for one given atom. Therefore the complexity becomes
$\displaystyle\mathcal{O}(b^{3}a^{3})$ where $\displaystyle b$ is the average
number of basis functions per atom and $\displaystyle a$ is the number of
atoms. Because the complexity scales at least cubically with system size,
these calculations become quite expensive as one increases either the atoms or
basis functions included.
Meanwhile, RBM energy computations have complexity
$\displaystyle\mathcal{O}(NH)$ where $\displaystyle N$ is the number of
visible nodes and $\displaystyle H$ is the number of hidden nodes. In the
proposed method, only three nodes are needed per additional atom, as compared
to the large number of basis functions needed in Hartree-Fock calculations.
Thus the complexity is $\displaystyle\mathcal{O}(3aH)$, expressed in terms of
the number of atoms $\displaystyle a$. If we take the number of hidden nodes
to be the same as the number of visible nodes, then the computational
complexity grows quadratically with system size, and increasing atom size has
no impact on the complexity of calculations. Moreover these calculations are
performed using matrix multiplication which is simpler than integration.
Visible layer Hidden layer $\displaystyle\mathbf{p(h|v)}$
$\displaystyle\mathbf{p(v|h)}$ $\displaystyle\mathbf{E(v,h)}$ References
Binary Binary $\displaystyle\sigma(b+W^{T}v)$ $\displaystyle\sigma(a+Wh)$
$\displaystyle-a^{T}v-b^{T}h-v^{T}Wh$ [15] [5] [9] Gaussian Binary
$\displaystyle\sigma(b+W^{T}\frac{v}{\sigma})$
$\displaystyle\mathcal{N}(a+Wh,\sigma^{2})$ $\displaystyle\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}-b^{T}h-\frac{v^{T}}{\sigma}Wh$ [20] [24] [1]
Gaussian Gaussian
$\displaystyle\mathcal{N}(b+\sigma_{h}W^{T}\frac{v}{\sigma_{v}},\sigma_{h}^{2})$
$\displaystyle\mathcal{N}(a+\sigma_{v}W\frac{h}{\sigma_{h}},\sigma_{v}^{2})$
$\displaystyle\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma_{v}^{2}}+\frac{\left\lVert
h-b\right\rVert^{2}}{2\sigma_{h}^{2}}-\frac{v^{T}}{\sigma_{v}}W\frac{h}{\sigma_{h}}$
[7] [13] Gaussian ReLU $\displaystyle max(0,\eta+\mathcal{N}(0,\sigma(\eta))$
$\displaystyle\mathcal{N}(a+Wh,\sigma^{2})$ N/A [12] Gaussian Leaky ReLU
$\displaystyle max(\mathcal{N}(c\eta,c),\mathcal{N}(\eta,1))$, where
$\displaystyle c\in(0,1)$ $\displaystyle\mathcal{N}(a+Wh,\sigma^{2})$
$\displaystyle\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}-b^{T}h-\frac{v^{T}}{\sigma}Wh$
$\displaystyle+\sum_{h_{j}>0}(\frac{h_{j}^{2}}{2}+\log\sqrt{2\pi})+\sum_{h_{j}\leq
0}(\frac{h_{j}^{2}}{2c}+\log\sqrt{2c\pi})$ [10] Gaussian (Noisy) Tanh
$\displaystyle\mathcal{N}(\tanh(\eta),1)$
$\displaystyle\mathcal{N}(a+Wh,\sigma^{2})$ $\displaystyle\frac{\left\lVert
v-a\right\rVert^{2}}{2\sigma^{2}}-b^{T}h-\frac{v^{T}}{\sigma}Wh$
$\displaystyle+\sum^{J}_{j=1}(h_{j}\tanh^{-1}(h_{j})+\frac{1}{2}\ln(1-h_{j}^{2}))$
See appendix.
Table 1: Different versions of RBMs found in the literature, where
$\displaystyle\eta=b+W^{T}\frac{v}{\sigma}$. This table is intended to
represent four main activation functions which are prominent in neural
networks: linear (Gaussian), sigmoid (binary), ReLU, and Tanh. In the design
of a restricted Boltzmann machine, one typically starts with a function that
defines the overall energy $\displaystyle E(v,h)$, from which the conditional
probabilities are derived. Advantages of different activation functions
Figure 4: Comparison of RBM Performance on Natural Image Patches (left) and
H2O Hartree-Fock Data (right). Learning on natural image patches was performed
using 196-196-RBMs, which H2O learning was performed using 3-8-RBMs. In both
cases, Tanh RBM achieved the maximum overall log-likelihood, however it has
trouble maintaining convergence on datasets with few parameters. Though the
GRBM had only the second best performance on both datasets, it trains slower
and shows better consistency across different data.
Figure 5: Training and simulation of a GRBM using two different toy sets of
data. The top row was trained using a GRBM-2-2 (two visible nodes and two
hidden nodes), while the bottom row was trained using a GRBM-2-4 (two visible
nodes and four hidden nodes). Each hidden node represents an independent
component in the data, shown by the arrows on the model distributions in the
third column. The independent components of the simulated data (shown by the
arrows on the second column) align with the independent components on the
training data (first column). The RBM parameters were averaged over 100 trials
of training.
Figure 6: Bond and angle energies evaluated using Hartree-Fock (top row) and a
GRBM-3-6 (bottom row). The GRBM is able to provide a close approximation of
the original energy contour based on a limited set of points sampled from the
original distribution.
Figure 7: Ethane geometries generated by Hartree-Fock and GRBM. An ethane
molecule has 18 internal coordinates, represented by the entries in the
z-matrix above. That specific set of coordinates results in the ethane
molecule drawn in the top right corner. This figure displays the posterior
distributions for each pair of parameters for the ethane molecule generated by
Hartree-Fock (black) and GRBM (red). The GRBM is able to reproduce the
molecular geometries by fitting Gaussians to the original data.
### Generating training datasets
To assess the validity of the methodology and accuracy of training over a wide
range of distributions, we trained the RBM on a set of toy datasets. In the
first toy dataset, random samples were taken from a Laplacian distribution and
fitted to the line $\displaystyle y=x$, with Gaussian noise of unit variance
added in both dimensions. The second dataset was generated by sampling from
two independent Laplacian distributions and using a mixing matrix to merge the
sources. An observable $\displaystyle x$ is generated from independent
Laplacian sources $\displaystyle s=(s_{1},s_{2})$ using a mixing matrix
$\displaystyle A$:
$\displaystyle x=As$
where $\displaystyle A$ is a linear transformation from the source space to
the observable space. Given a set of observables, the RBM is capable of
estimating the mixing matrix and recovering source data [11]. In the context
of this task, independent component analysis is the standard computational
method for separating observed data into source components. For this reason,
the independent components of the original data were compared with the
independent components of the data generated by RBM. Independent component
analysis was performed using the FastICA algorithm.
Moving to more complex data, different RBMs were also evaluated on natural
image patches taken from [11]. These image patches come from randomly sampling
the greyscale images found in the van Hateran natural image database [19].
Each patch consists of 14 x 14 pixels, and for our purposes we used only
10,000 of these patches during training.
Meanwhile, molecular training data for H2O and ethane was generated by
performing molecular Monte Carlo simulations and evaluating the energy of each
proposed sample using the restricted Hartree-Fock method. The Hartree-Fock
calculations made use of the basis set cc-pVDZ because it had the best
agreement with literature values for the bond angle and bond length of H2O.
These calculations were run using the PySCF module for quantum chemistry.
### Different RBMs
There are many types of RBMs apart from those already discussed. By altering
the equation for the overall energy, a variety of hidden and visible units can
be implemented. Table 1 summarizes the energy equations and conditional
probabilities for a few different types of RBMs. These RBMs are mostly defined
by the activation functions used to calculate the hidden layer, including
sigmoid, Gaussian, ReLU.
Noticing the absence of an RBM with Tanh activation function, we derived a new
energy function and conditional probabilities for an RBM with Tanh hidden
units using the method outlined in [14] (see appendix for derivation). After
developing the Tanh RBM, we implemented the full range of RBMs defined in
Table 1, which use common activation functions from machine learning. This
implementation can be found at https://github.com/peter1255/RBM_chem.
Depending on the correlations found within the data, different activation
functions may be better for modeling different data. It has been shown that
non-linear hidden units expand the capabilities of the RBM by allowing the
model to represent non-linear correlations between visible units [14]. The
type of non-linearity chosen may have an effect on the model’s representation
of the data, though a more detailed comparison of RBMs is needed.
## III. Results and Discussion
### Comparison of RBMs
The RBMs found in Table 1 were implemented and trained on both natural image
data and H2O geometries. During 20 epochs of training, the performances of
RBMs were compared through thorough calculations of the log-likelihood. These
results are shown in Figure 4.
First proposed by [12], the ReLU RBM trains well but lacks an appropriate
joint energy equation [17], making it impossible to calculate log-likelihood
or generate simulations. Though we tried using the Gaussian joint energy
equation (3) in test simulations not shown here, values quickly diverged as
large hidden terms outweighed the parabolic containment term. Making a slight
modification of ReLU into Leaky ReLU gives a viable energy equation which can
be successfully trained and used for both sampling and calculating log-
likelihood [10]. While this version of the RBM has not yet reached widespread
use, we show in 4 that it performs on a level comparable with the previously
established GRBM.
Out of all the RBMs tested, the Tanh RBM achieved the best maximum log-
likelihood on both natural image data and H2O molecular geometries. However,
when trained on data sets with few parameters, like H2O data, the Tanh RBM has
trouble maintaining convergence and tends to diverge after reaching its
maximum. While the Tanh RBM shows great promise for representing a variety of
data, additional research must be done to learn how to prevent divergence in
general cases.
The model that achieves second-best performance in terms of log-likelihood is
the Gaussian RBM, which is quite close in the case of molecular data (Figure
4). For this reason, we selected this version of the model to test the
proposed simulation method on molecular data.
### Modeling two-dimensional mixtures
Figure 5 compares toy datasets with the datasets generated by a trained GRBM
using the Monte Carlo method outlined above. The left column displays the
original datasets, while the middle column displays the data generated by RBM.
The two datasets appear quite similar to one another, though some of the
pointedness in the original data is lost through the RBM’s Gaussian
probability distribution. After performing independent component analysis on
both training and simulated data, we can see that the independent components
(shown using arrows) from the simulated data (middle column) match those from
the original data (left column).
Then, evaluating the energy of each point in the visible space using equation
(3), we graphed the energy contour represented by the GRBM (right column). By
moving along this energy contour we are able to generate the states
represented in the middle column. Because the Monte Carlo method favors lower
energy states, most of the samples lie within the middle of the contour.
Furthermore, the arrows represented by each hidden node of the RBM match the
independent components of the reconstructed data, showing that the Monte Carlo
sampling method preserves the original components of the data.
Furthermore, very few hidden nodes were required for reproducing the data. In
the case of the linear distribution which has only one independent component,
only two hidden nodes were needed. In the case of the cross shaped
distribution, only four hidden units were needed. In general, two hidden nodes
are needed for each independent component in the data: because the hidden
nodes are binary, they can only represent their positive vector span.
Therefore an additional hidden node is needed to represent the opposite
direction. This also provides a useful heuristic for determining the number of
hidden nodes to include in an RBM. In this case, using any more arrows would
be redundant, as the arrows begin to overlap.
### Modeling molecular geometries
After learning toy distributions, the RBM was trained on molecular geometries
of H2O generated by Hartree-Fock. Since the geometry of H2O is defined by
three parameters (two bonds and one angle), three visible nodes were included
in the GRBM. The GRBM was trained on a set of 10000 geometries. Figure 3
displays the overall bond and angle energies evaluated using Hartree-Fock (top
row) and GRBM (bottom row).
Not only is the energy contour of the RBM similar to the energy determined by
quantum methods, the RBM was able to pick up on subtleties including the
rotated parabola which defines the bond and angle energies (first and second
columns). Looking at the bond and angle energies given by the RBM, the left
arm of the parabola shows the energy associated with atom-atom repulsion,
while the right side of the graph shows the energy associated with atom-atom
attraction. Both methods match the literature values for the bond length and
angle of H2O.
Similar to the previous figure, energy contours (third and fourth columns)
were generating by fixing one parameter to its given minimum and evaluating
the energy with respect to the other two parameters. As we can see, the energy
contours inferred by GRBM are quite similar to the original contours given by
Hartree-Fock. Though the stratification of the contour layers is slightly
different (a fundamental limitation of the RBM model structure) the overall
shape of the contours is quite similar. The GRBM accurately represents the
relationship between different data parameters.
Finally, we use the RBM to model a larger molecule: ethane. Though it only
consists of 6 atoms, ethane requires 18 internal coordinates to represent all
its various bonds, angles, and dihedrals. In general, a molecule with
$\displaystyle n$ atoms requires $\displaystyle 3n-6$ unique coordinates,
which can be stored in the form of a z-matrix. Therefore we train an RBM with
18 visible nodes on the internal ethane coordinates, and then sample from it
to generate new geometries.
Figure 7 shows the difference between the original dataset (black) and the
GRBM generated data (red). The reconstructed data is able to approximate
molecular geometries by fitting Gaussian curves to the original data. For
simple unimodal data, the GRBM does a good job of reproducing the existing
shape of the data. However, for more complex distributions (i.e. the bimodal
dihedral angle found in coordinate 15 of 7) the GRBM is unable to provide a
fit for this complex pattern and instead regresses to the mean in between the
two modes. Nevertheless, the rest of the marginal distributions are well
represented, and the GRBM captures the essence of the joint distributions.
Through our Monte Carlo sampling algorithm, the GRBM provides an accurate
picture of the dynamics of an ethane molecule.
Using the RBM it is easy to evaluate the energies of any configurations at a
fast speed. By scaling the energy units of the RBM to proper Hartree units, we
could use the RBM to represent bond and angle energies without having to
perform any quantum calculations. Moreover, because of the low computational
cost in generating states of a water molecule, the technique here could also
be extended to model an entire body of liquid water in an aqueous solution.
## IV. Conclusion
In this paper, we have demonstrated the usefulness of RBMs in modeling complex
molecular systems. Because the model is adaptable to a wide variety of data,
the proposed methodology can be used for a variety of problems in chemistry.
Furthermore, the relative simplicity and efficiency of the model should make
it accessible to a wider scientific audience. The RBM is a useful method for
generating a complete molecular ensemble given a sparse set of data. We hope
that the RBM will allow for a new cycle of experiment and theory, where
samples generated from experiments are treated with RBMs to get more
information about ensemble systems.
### Note
The software package used for this study along with a tutorial for how to use
it to model molecular data is available on Github at
https://github.com/peter1255/RBM_chem.
## References
* [1] Cho, K.H., Raiko, T., Ilin, A. Gaussian-Bernoulli deep Boltzmann machine. Proceeding of the The International Joint Conference on Neural Networks (IJCNN) 1–7 (2013).
* [2] Cho, K., Raiko, T., & Ilin, A. Parallel tempering is efficient for learning restricted Boltzmann machines. Proceedings of the International Joint Conference on Neural Networks (IJCNN) 3246-3253 (2010).
* [3] Courville, A., Bergstra, J., & Bengio, Y. A spike and slab restricted Boltzmann machine. Proceeding of the Society for Artificial Intelligence and Statistics (2011).
* [4] Echenique, P. & Alonso, J. L. A mathematical and computational review of Hartree-Fock SCF methods in quantum chemistry. Molecular Physics 105:3057-3098 (2007).
* [5] Hinton, G. E. & Salakhutdinov, R. R. Reducing the Dimensionality of Data with Neural Networks. Science 313;5786: 504–507 (2006).
* [6] Hinton, G.E. Scholarpedia, 2(5):1668 (2007).
* [7] Karakida, R., Okada, M., Amari, S. Dynamical analysis of contrastive divergence learning: Restricted Boltzmann machines with Gaussian visible units. Neural Netw. 79:78-87 (2016).
* [8] Krause, O., Fischer, A., & Igel, C. Algorithms for estimating the partition function of restricted Boltzmann machines. Artifical Intelligence 278 (2020).
* [9] Larochelle, H.; Bengio, Y. Classification using discriminative restricted Boltzmann machines. Proceedings of the 25th international conference on Machine learning - ICML:536 (2008).
* [10] Li, C.L., Ravanbakhsh, S., & Poczos, B. Annealing Gaussian into ReLU: a new sampling strategy for leaky-ReLU RBM. arXiv preprint arXiv:1611.03879 (2016).
* [11] Melchior, J., Wang, N., Wiskott, L. Gaussian-binary restricted Boltzmann machines for modeling natural image statistics. PLOS ONE 12;3 (2017).
* [12] Nair, V. & Hinton, G. E. Rectified linear units improve restricted boltzmann machines. In ICML (2010).
* [13] Ogawa, S., & Mori, H., A Gaussian-Gaussian-restricted-Boltzmann-machine-based deep neural network technique for photovoltaic system generation forecasting, IFAC-PapersOnLine, 52;4:87-92 (2019).
* [14] Ravanbakhsh, S. et al. Stochastic neural networks with monotonic activation functions. Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (2016).
* [15] Salakhutdinov, R., Mnih, A., & Hinton, G.E. Restricted Boltzmann machines for collaborative filtering. Proceedings of the 24th International Conference on Machine Learning (2007).
* [16] Smolensky, P. Information processing in dynamical systems: Foundations of harmony theory. Parallel Distributed Processing, 1:194-281 (1986).
* [17] Su, Q. et al. Unsupervised Learning with Truncated Gaussian Graphical Models. Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence (2017).
* [18] Tieleman, T. Training restricted boltzmann machines using approximations to the likelihood gradient. In ICML 25:1064–1071 (2008).
* [19] van Hateren, J. H., & van der Schaaf, A. Independent Component Filters of Natural Images Compared with Simple Cells in Primary Visual Cortex. In Proceedings of Biological Sciences 359-366 (1998).
* [20] Wang, N., Melchior, J., & Wiskott, L. Gaussian-binary Restricted Boltzmann Machines on Modeling Natural Image Statistics. CoRR. (2014).
* [21] Xia, R. & Kais, S. Quantum machine learning for electronic structure calculations. Nature Communications 9:4195 (2018).
* [22] Yang, E., Ravikumar, P., Allen, G.I., and Liu, Z. Graphical models via generalized linear models. In NIPS (2012).
* [23] Yu, W. et al. Generating the conformational properties of a polymer by the restricted Boltzmann machine. J. Chem. Phys. 151, 031101 (2019).
* [24] Ji Zhang, Hongjun Wang, Jielei Chu, Shudong Huang, Tianrui Li, Qigang Zhao, Improved Gaussian–Bernoulli restricted Boltzmann machine for learning discriminative representations. Knowledge-Based Systems 185 (2019).
## V. Appendix
### Derivation of Tanh energy
First we define the activation function $\displaystyle
f(\eta_{j})=\tanh(\eta_{j})$ where
$\displaystyle\eta_{j}=b_{j}+\sum_{i=1}^{I}W_{ij}v_{j}$ assuming data is
normalized to unit variance. The inverse function is $\displaystyle
f^{-1}(h_{j})=\tanh^{-1}(h_{j})$. The corresponding antiderivatives are:
$\displaystyle\displaystyle F(\eta_{j})$ $\displaystyle\displaystyle=\int
f(\eta_{j})d\eta$ $\displaystyle\displaystyle=\ln{\cosh{\eta_{j}}}+C$
$\displaystyle\displaystyle F^{*}(h_{j})$ $\displaystyle\displaystyle=\int
f^{-1}(h_{j})dh$
$\displaystyle\displaystyle=h_{j}\tanh^{-1}(h_{j})+\frac{1}{2}\ln(1-h_{j}^{2})+C$
Meanwhile, the activation for the visible units is linear so that
$\displaystyle\overline{f}(\nu_{i})=\nu_{i}$ where
$\displaystyle\nu_{i}=a_{i}+\sum^{J}_{j=1}W_{ij}h_{j}$. Following the same
step we get that $\displaystyle\overline{F}(\nu_{i})=\frac{1}{2}\nu_{i}^{2}$
and $\displaystyle\overline{F}^{*}(v_{i})=\frac{1}{2}v_{i}^{2}$.
From Ravanbakhsh et al. [14], the conditional distribution is defined as:
$\displaystyle
p(h_{j}|\eta_{j})=\exp(h_{j}\eta_{j}-F(\eta_{j})-F^{*}(h_{j})+g(h_{j}))$
Replacing the values of $\displaystyle F(\eta_{j})$ and $\displaystyle
F^{*}(h_{j})$ we get the conditional probability:
$\displaystyle\displaystyle p(h_{j}|\eta_{j})$
$\displaystyle\displaystyle=\exp(h_{j}\eta_{j}-\ln{\cosh{\eta_{j}}}$
$\displaystyle\displaystyle-$ $\displaystyle\displaystyle
h_{j}\tanh^{-1}(h_{j})-\frac{1}{2}\ln(1-h_{j}^{2})+g(h_{j}))$
$\displaystyle\displaystyle=\exp(\tanh(\eta_{j})\eta_{j}-\ln{\cosh{\eta_{j}}}$
$\displaystyle\displaystyle-$
$\displaystyle\displaystyle\tanh(\eta_{j})\tanh^{-1}(\tanh(\eta_{j})+g(h_{j}))-\frac{1}{2}\ln(1-h_{j}^{2})+g(h_{j}))$
$\displaystyle\displaystyle=\exp(\tanh(\eta_{j})\eta_{j}-\ln{\cosh{\eta_{j}}}$
$\displaystyle\displaystyle-$
$\displaystyle\displaystyle\tanh(\eta_{j})\tanh^{-1}(\tanh(\eta_{j}))-\frac{1}{2}\ln(1-h_{j}^{2})+g(h_{j}))$
$\displaystyle\displaystyle=\exp(-\ln{\cosh{\eta_{j}}}-\frac{1}{2}\ln(1-h_{j}^{2})+g(h_{j}))$
$\displaystyle\displaystyle=\frac{1}{\cosh{\eta_{j}}}\frac{1}{\sqrt{1-\tanh^{2}{\eta_{j}}}}\exp(g(h_{j}))$
$\displaystyle\displaystyle=\frac{1}{\cosh{\eta_{j}}\operatorname{sech}{\eta_{j}}}\exp(g(h_{j}))$
$\displaystyle\displaystyle=\exp(g(h_{j}))$
For simplicity we define $\displaystyle
g(h_{j})=-\frac{1}{2}h_{j}^{2}-\log\sqrt{2\pi}$ so that the conditional
probability distribution becomes a Gaussian with mean $\displaystyle h$ and
unit variance, meaning that Gaussian noise is added to the hidden units after
applying the Tanh activation. Similarly we get that $\displaystyle
p(v|\nu)=\mathcal{N}(\nu,1)$, as expected for the Gaussian visible layer.
The joint energy equation was generalized in Yang et al.[22] through the
following:
$\displaystyle E(v,h)=-v^{T}Wh+\overline{F}^{*}(v)+F^{*}(h)$
Replacing $\displaystyle\overline{F}^{*}$ and $\displaystyle F^{*}$ we get the
energy for a joint configuration of the tanh RBM:
$\displaystyle
E(v,h)=-v^{T}Wh+\frac{1}{2}||v||^{2}+\sum^{J}_{j=1}(h_{j}\tanh^{-1}(h_{j})+\frac{1}{2}\ln(1-h_{j}^{2}))$
From the joint energy we can derive the marginal probability of a given
visible configuration $\displaystyle v$:
$\displaystyle\displaystyle p(v)$
$\displaystyle\displaystyle=\frac{1}{Z}\int_{h}e^{-E(v,h)}dh$
$\displaystyle\displaystyle=\frac{1}{Z}\int_{h}e^{v^{T}Wh-\frac{1}{2}||v||^{2}-\sum^{J}_{j=1}(h_{j}\tanh^{-1}(h_{j})-\frac{1}{2}\ln(1-h_{j}^{2}))}dh$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\int_{h}e^{v^{T}Wh-\sum^{J}_{j=1}(h_{j}\tanh^{-1}(h_{j})+\frac{1}{2}\ln(1-h_{j}^{2}))}dh$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\int_{h}e^{\sum^{J}_{j=1}\eta_{j}h_{j}-h_{j}\tanh^{-1}(h_{j})-\frac{1}{2}\ln(1-h_{j}^{2})}dh$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\int_{h}\prod^{J}_{j}e^{\eta_{j}h_{j}-h_{j}\tanh^{-1}(h_{j})-\frac{1}{2}\ln(1-h_{j}^{2})}dh$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\prod^{J}_{j}\int_{h_{j}}e^{\eta_{j}h_{j}-h_{j}\tanh^{-1}(h_{j})-\frac{1}{2}\ln(1-h_{j}^{2})}dh_{j}$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\prod^{J}_{j}e^{\eta_{j}\tanh(\eta_{j})-\tanh(\eta_{j})\tanh^{-1}(\tanh(\eta_{j}))-\frac{1}{2}\ln(1-\tanh^{2}(\eta_{j}))}$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\prod^{J}_{j}e^{-\frac{1}{2}\ln(1-\tanh^{2}(\eta_{j}))}$
$\displaystyle\displaystyle=\frac{1}{Z}e^{-\frac{1}{2}||v||^{2}}\prod^{J}_{j}e^{-\frac{1}{2}\ln(1-\tanh^{2}(b_{j}+\sum_{i=1}^{I}W_{ij}v_{j}))}$
where Z is the partition function.
Since the marginal energy is given by
$\displaystyle E(v)=-\log p(v)=F(v)-\log(Z)$
where $\displaystyle F(v)$ is the free energy of the given configuration, we
have the following expression for the free energy
$\displaystyle
F(v)=\frac{||v||^{2}}{2}+\sum^{J}_{j}\frac{1}{2}\log(1-\tanh^{2}(b_{j}+\sum_{i=1}^{I}W_{ij}v_{j}))$
|
import React from 'react';
import {connect} from 'react-redux';
import Slider from 'react-input-slider';
import main from '../../style/main';
import {setVolume} from '../../actions/player';
import VolumeIcon from './VolumeIcon';
import {Wrapper} from './style';
const {colors: {purple}} = main;
const mapStateToProps = storeData => ({
player: storeData.player
});
const mapDispatchToProps = {setVolume};
const connectFunction = connect(mapStateToProps, mapDispatchToProps);
export const Volume = function({player, setVolume}) {
const {volume, isMuted} = player;
const onChange = ({x}) => setVolume(x);
return (
<Wrapper>
<VolumeIcon/>
<Slider
axis="x"
xmin={0}
xmax={100}
xstep={1}
x={isMuted ? 0 : volume}
onChange={onChange}
styles={{
track: {
height: '3px',
width: '30%',
backgroundColor: 'black',
display: 'block',
marginLeft: '10px',
cursor: 'pointer'
},
thumb: {
height: '13px',
width: '13px',
bottom: '5px',
right: '8px',
backgroundColor: purple
},
active: {
backgroundColor: purple
}
}}
/>
</Wrapper>
);
};
export default connectFunction(Volume);
|
window.qs = new URLSearchParams(location.search);
window.ws = new WebSocket("ws://localhost:" + qs.get("port") + "/" + qs.get("path"));
window.ws_data = null;
ws.onopen = function(event) {
ws.send({url: qs.get("boot")});
};
ws.onclose = function(event) {
var reason;
// See http://tools.ietf.org/html/rfc6455#section-7.4.1
if (event.code == 1000) {
reason = "Normal closure, meaning that the purpose for which the connection was established has been fulfilled.";
} else if (event.code == 1001) {
reason = "An endpoint is \"going away\", such as a server going down or a browser having navigated away from a page.";
} else if (event.code == 1002) {
reason = "An endpoint is terminating the connection due to a protocol error";
} else if (event.code == 1003) {
reason = "An endpoint is terminating the connection because it has received a type of data it cannot accept (e.g., an endpoint that understands only text data MAY send this if it receives a binary message).";
} else if (event.code == 1004) {
reason = "Reserved. The specific meaning might be defined in the future.";
} else if (event.code == 1005) {
reason = "No status code was actually present.";
} else if (event.code == 1006) {
reason = "The connection was closed abnormally, e.g., without sending or receiving a Close control frame";
} else if (event.code == 1007) {
reason = "An endpoint is terminating the connection because it has received data within a message that was not consistent with the type of the message (e.g., non-UTF-8 [http://tools.ietf.org/html/rfc3629] data within a text message).";
} else if (event.code == 1008) {
reason = "An endpoint is terminating the connection because it has received a message that \"violates its policy\". This reason is given either if there is no other sutible reason, or if there is a need to hide specific details about the policy.";
} else if (event.code == 1009) {
reason = "An endpoint is terminating the connection because it has received a message that is too big for it to process.";
} else if (event.code == 1010) { // Note that this status code is not used by the server, because it can fail the WebSocket handshake instead.
reason = "An endpoint (client) is terminating the connection because it has expected the server to negotiate one or more extension, but the server didn't return them in the response message of the WebSocket handshake. <br /> Specifically, the extensions that are needed are: " + event.reason;
} else if (event.code == 1011) {
reason = "A server is terminating the connection because it encountered an unexpected condition that prevented it from fulfilling the request.";
} else if (event.code == 1015) {
reason = "The connection was closed due to a failure to perform a TLS handshake (e.g., the server certificate can't be verified).";
} else {
reason = "Unknown reason";
}
console.log(reason);
document.title = "Disconnected: " + reason;
};
ws.onerror = function(event) {
console.log(event);
};
ws.send_before_actions = [];
ws.send_skip_action = false;
ws.send_actions = {};
ws.send_handler = ws.send;
ws.send = function send(data) {
try {
ws.send_before_actions.forEach(function (before_action) {
if (!ws.send_skip_action) {
before_action(event);
}
});
if (!ws.send_skip_action) {
var action = ws.send_actions[data.action];
if (action) {
action(data);
}
ws.send_handler(JSON.stringify(data));
}
}
catch(error) {
console.log(data);
console.log(error);
document.title = 'Error on message sent...';
}
finally {
ws.send_skip_action = false;
}
};
ws.onmessage_before_actions = [];
ws.onmessage_skip_action = false;
ws.onmessage_actions = {
page: function (event) {
var page = document.open("text/html", "replace");
page.write(ws_data.body);
page.close();
window.scrollTo(0, 0);
},
error: function (event) {
console.log(ws_data.title);
document.title = ws_data.title;
}
};
ws.onmessage = function(event) {
try {
ws.onmessage_before_actions.forEach(function (before_action) {
if (!ws.onmessage_skip_action) {
before_action(event);
}
});
if (!ws.onmessage_skip_action) {
ws_data = JSON.parse(event.data);
var action = ws.onmessage_actions[ws_data.action];
if (action) {
action(event);
}
}
}
catch(error) {
console.log(event);
console.log(error);
document.title = 'Error on message received...';
}
finally {
ws.onmessage_skip_action = false;
}
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.