content
stringlengths 7
2.61M
|
---|
<filename>src/coord/src/coord/dataflow_builder.rs
// Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Types and methods for building dataflow descriptions.
//!
//! Dataflows are buildable from the coordinator's `catalog` and `indexes`
//! members, which respectively describe the collection backing identifiers
//! and indicate which identifiers have arrangements available. This module
//! isolates that logic from the rest of the somewhat complicated coordinator.
use ore::stack::maybe_grow;
use dataflow_types::sinks::SinkDesc;
use dataflow_types::IndexDesc;
use super::*;
use crate::error::RematerializedSourceType;
/// Borrows of catalog and indexes sufficient to build dataflow descriptions.
pub struct DataflowBuilder<'a> {
pub catalog: &'a CatalogState,
pub indexes: &'a ArrangementFrontiers<Timestamp>,
pub persister: &'a PersisterWithConfig,
/// A handle to the storage abstraction, which describe sources from their identifier.
pub storage: &'a dataflow_types::client::Controller<Box<dyn dataflow_types::client::Client>>,
}
impl Coordinator {
/// Creates a new dataflow builder from the catalog and indexes in `self`.
pub fn dataflow_builder<'a>(&'a mut self) -> DataflowBuilder {
DataflowBuilder {
catalog: self.catalog.state(),
indexes: &self.indexes,
persister: &self.persister,
storage: &self.dataflow_client,
}
}
/// Prepares the arguments to an index build dataflow, by interrogating the catalog.
///
/// Returns `None` if the index entry in the catalog in not enabled.
pub fn prepare_index_build(
catalog: &CatalogState,
index_id: &GlobalId,
) -> Option<(String, IndexDesc)> {
let index_entry = catalog.get_by_id(&index_id);
let index = match index_entry.item() {
CatalogItem::Index(index) => index,
_ => unreachable!("cannot create index dataflow on non-index"),
};
if !index.enabled {
None
} else {
Some((
index_entry.name().to_string(),
dataflow_types::IndexDesc {
on_id: index.on,
key: index.keys.clone(),
},
))
}
}
}
impl<'a> DataflowBuilder<'a> {
/// Imports the view, source, or table with `id` into the provided
/// dataflow description.
fn import_into_dataflow(
&mut self,
id: &GlobalId,
dataflow: &mut DataflowDesc,
) -> Result<(), CoordError> {
maybe_grow(|| {
// Avoid importing the item redundantly.
if dataflow.is_imported(id) {
return Ok(());
}
// A valid index is any index on `id` that is known to the dataflow
// layer, as indicated by its presence in `self.indexes`.
let valid_index = self.catalog.enabled_indexes()[id]
.iter()
.find(|(id, _keys)| self.indexes.contains_key(*id));
if let Some((index_id, keys)) = valid_index {
let index_desc = IndexDesc {
on_id: *id,
key: keys.to_vec(),
};
let desc = self
.catalog
.get_by_id(id)
.desc()
.expect("indexes can only be built on items with descs");
dataflow.import_index(*index_id, index_desc, desc.typ().clone(), *id);
} else {
let entry = self.catalog.get_by_id(id);
match entry.item() {
CatalogItem::Table(_) => {
let source_description = self.catalog.source_description_for(*id).unwrap();
let persist_details = None;
dataflow.import_source(*id, source_description, persist_details);
}
CatalogItem::Source(source) => {
if source.connector.requires_single_materialization() {
let source_type =
RematerializedSourceType::for_connector(&source.connector);
let dependent_indexes = self.catalog.dependent_indexes(*id);
// If this source relies on any pre-existing indexes (i.e., indexes
// that we're not building as part of this `DataflowBuilder`), we're
// attempting to reinstantiate a single-use source.
let intersection = self.indexes.intersection(dependent_indexes);
if !intersection.is_empty() {
let existing_indexes = intersection
.iter()
.map(|id| self.catalog.get_by_id(id).name().item.clone())
.collect();
return Err(CoordError::InvalidRematerialization {
base_source: entry.name().item.clone(),
existing_indexes,
source_type,
});
}
}
let source_description = self.catalog.source_description_for(*id).unwrap();
let persist_desc = self
.persister
.load_source_persist_desc(&source)
.map_err(CoordError::Persistence)?;
dataflow.import_source(*id, source_description, persist_desc);
}
CatalogItem::View(view) => {
let expr = view.optimized_expr.clone();
self.import_view_into_dataflow(id, &expr, dataflow)?;
}
_ => unreachable!(),
}
}
Ok(())
})
}
/// Imports the view with the specified ID and expression into the provided
/// dataflow description.
pub fn import_view_into_dataflow(
&mut self,
view_id: &GlobalId,
view: &OptimizedMirRelationExpr,
dataflow: &mut DataflowDesc,
) -> Result<(), CoordError> {
// TODO: We only need to import Get arguments for which we cannot find arrangements.
for get_id in view.global_uses() {
self.import_into_dataflow(&get_id, dataflow)?;
// TODO: indexes should be imported after the optimization process, and only those
// actually used by the optimized plan
if let Some(indexes) = self.catalog.enabled_indexes().get(&get_id) {
for (id, keys) in indexes.iter() {
// Ensure only valid indexes (i.e. those in self.indexes) are imported.
// TODO(#8318): Ensure this logic is accounted for.
if !self.indexes.contains_key(*id) {
continue;
}
let on_entry = self.catalog.get_by_id(&get_id);
let on_type = on_entry.desc().unwrap().typ().clone();
let index_desc = IndexDesc {
on_id: get_id,
key: keys.clone(),
};
dataflow.import_index(*id, index_desc, on_type, *view_id);
}
}
}
dataflow.insert_view(*view_id, view.clone());
Ok(())
}
/// Builds a dataflow description for the index with the specified ID.
pub fn build_index_dataflow(
&mut self,
name: String,
id: GlobalId,
index_description: IndexDesc,
) -> Result<DataflowDesc, CoordError> {
let on_entry = self.catalog.get_by_id(&index_description.on_id);
let on_type = on_entry.desc().unwrap().typ().clone();
let mut dataflow = DataflowDesc::new(name);
self.import_into_dataflow(&index_description.on_id, &mut dataflow)?;
dataflow.export_index(id, index_description, on_type);
// Optimize the dataflow across views, and any other ways that appeal.
transform::optimize_dataflow(&mut dataflow, self.catalog.enabled_indexes())?;
Ok(dataflow)
}
/// Builds a dataflow description for the sink with the specified name,
/// ID, source, and output connector.
///
/// For as long as this dataflow is active, `id` can be used to reference
/// the sink (primarily to drop it, at the moment).
pub fn build_sink_dataflow(
&mut self,
name: String,
id: GlobalId,
sink_description: SinkDesc,
) -> Result<DataflowDesc, CoordError> {
let mut dataflow = DataflowDesc::new(name);
self.build_sink_dataflow_into(&mut dataflow, id, sink_description)?;
Ok(dataflow)
}
/// Like `build_sink_dataflow`, but builds the sink dataflow into the
/// existing dataflow description instead of creating one from scratch.
pub fn build_sink_dataflow_into(
&mut self,
dataflow: &mut DataflowDesc,
id: GlobalId,
sink_description: SinkDesc,
) -> Result<(), CoordError> {
dataflow.set_as_of(sink_description.as_of.frontier.clone());
self.import_into_dataflow(&sink_description.from, dataflow)?;
dataflow.export_sink(id, sink_description);
// Optimize the dataflow across views, and any other ways that appeal.
transform::optimize_dataflow(dataflow, self.catalog.enabled_indexes())?;
Ok(())
}
}
|
def increment_non_pass_counts(self, non_pass_counts):
for phase in non_pass_counts:
if phase in self._phase_statuses:
status, _ = self._phase_statuses[phase]
if status != TEST_PASS_STATUS:
non_pass_counts[phase] += 1 |
Enhanced Interfacial Adhesion of Polydimethylsiloxane (PDMS) by Control of the Crosslink Density. In this paper, we report a simple, fast, and one-step approach to improve the adhesion force of polydimethylsiloxane (PDMS) by incorporating inorganic nanoparticles that can control the physical, mechanical, and adhesion properties of the PDMS. An organic/inorganic PDMS-based composite was fabricated by the hydrosilylation of vinyl-decorated silica nanoparticles (v-SNPs) and the PDMS. The v-SNP/PDMS composite showed a significantly decreased elastic modulus and increased elongation compared with that of pristine SNPs incorporated with the PDMS composite (SNP/PDMS) and pristine PDMS. Furthermore, the v-SNP/PDMS composite exhibited a low glass-transition temperature and sharp crystallization and melting peaks in the differential scanning calorimetry curve compared with those of pristine PDMS and the SNP/PDMS composite. Moreover, the v-SNP/PDMS composite showed a high swelling ratio and crosslinked molecular weight and low gel fraction. These results may originate from the suppression of the PDMS-curing networks as the addition of the v-SNPs creates a low curing density because of the chemical bonding between PDMS and the v-SNPs. Finally, the v-SNP/PDMS composite showed an improvement of ~426% in the adhesion force compared with pristine PDMS and the SNP/PDMS composite. We anticipate that this v-SNP/PDMS composite could be used as a highly adhesive and hydrophobic coating material for various applications in industry. |
# __main__.py
import inspect
import logging
from .eventproducermeta import *
from .event import *
class Foo(EventProducer):
on_update = Event(str)
def __init__(self):
pass
@event(name='on_update', signature=str)
def do_work(self):
self.on_update('done')
@event(name='on_update', signature=str)
def do_more_work(self):
self.on_update('really done')
@event(name='on_call', signature=str)
def __call__(self):
self.on_call('asdfa')
if __name__ == '__main__':
from datetime import datetime
count = [0]
def callback(owner: object, name: str) -> None:
count[0] += 1
total_time = datetime.now()
max_iterations = 100000
steps = 10000
for event_count in range(steps, max_iterations+steps, steps):
start_time = datetime.now()
foo = Foo()
for events in range(event_count):
foo.on_update += callback
# for event in range(event_count):
foo.do_work()
print((datetime.now() - start_time).total_seconds())
print('Took {0} secs'.format(datetime.now() - total_time))
|
Part of the appeal of Governors Island—the 172-acre public park located right off the southern shore of Manhattan—is the park’s feeling of seclusion. Just 800 yards away from New York City, it’s close enough to visit with a short ferry ride, yet remote enough that patches of the immaculately designed park don’t get cell service.
That treasured sense of disconnect can be a challenge for the park’s planners, though. How do you still provide all of the information and resources that visitors might need out there, without ruining the park’s biggest asset?
The answer, according to the Brooklyn-based architecture firm StudioKCA, is the Mobile Information Unit (MIU)–essentially, a pop-up rest stop that provides Wi-Fi, lighting, and seating, as well as information for your park stay and power for your devices.
StudioKCA was hired by the Friends of Governors Island, the nonprofit in charge of developing the park, which originally asked for stationary charging stations. But with such a sprawling park, the firm thought that it would be most efficient to make the stations mobile. The studio designed it to attach to the back of a custom bicycle, from which the multipurpose MIU unfolds from a three-foot cube into a nine-foot, solar-powered canopy. The idea is that eventually, visitors will be able to pick up a MIU upon arriving on the island and bike the mobile station to any part of the park—then set up camp with everything they might need to power lights, a phone, a stereo, and even a small refrigerator.
For now, the MIUs are still in a prototyping phase, though two of the units in their current form are being tested on the island right now. Members of Friends of Governors Island are riding them around and providing feedback. So far, says StudioKCA principle Jason Klimoski, the most-common asks have been for more storage and battery power—both of which the studio is working on adding to a third prototype. The architects are also designing the next iteration of the unit to be powered by pedaling, in addition to its solar-powered canopy.
Klimoski says that the firm will be working on refining the unit over the next year and a half, at which point it plans to offer the MIUs more broadly beyond Governors Island. The firm wants to make them available for purchase for both parks and individuals, and make the units customizable—with options for more battery power, for instance, or units that come equipped with speakers and a mini fridge.
One aspect that is unlikely to change in future versions is a core design feature: the retractable canopy that opens up the cube into a full-fledged information stop. “The most important thing we were looking at was the opening and closing aspect of it,” says Klimoski. “For that we were inspired by flowers that open up during the day and close up again at night.” |
import os
import sys
import time
import json
import logging # Logging
from datetime import date, datetime
import requests
import urllib.parse as parse
from bs4 import BeautifulSoup as bs
from win10toast import ToastNotifier # pip install win10toast
from inspect import currentframe #: PMI
terminalInfo = False #: True
## -- SPECIAL DEFS --
def ping(host):
os.system("cls && ping -n 1 " + host)
def debugLog(def_return=False):
debugFile = 'debug.log'
logging.basicConfig(
filename = debugFile,
encoding = 'utf-8',
format = '%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt = '%Y%m%d%H%M%S',
level = logging.DEBUG)
if def_return: #
return getLastLineContent(debugFile,0,14)
## -- DO DEFS --
def doSyncControl(user_name, method, json_file=None):
date_time = date.today().strftime("%Y-%m-%d")
if json_file == None:
json_file = f'backups/json/{user_name}-alltime.json'
jsonContent = getJsonData(json_file) # Dict
for artistName, artistCount in jsonContent.items():
artistScrobbleCount = getArtistScrobbleCount(user_name, artistName, date_time, method)
print(f'{artistName} -> {artistCount}:{artistScrobbleCount} {artistCount==artistScrobbleCount}')
if jsonContent[artistName] != artistScrobbleCount:
return False, jsonContent.keys()
return True, None
def doJsonUpdate(json_path, new_data):
with open(json_path, 'w') as json_file:
json.dump(new_data, json_file)
def doAlltimeJsonSync(user_name, artist_names):
jsonPath = f'backups/json/{user_name}-alltime.json'
gettingMethod = 'all'
syncDiff, syncArtistsNames = doSyncControl(user_name, gettingMethod)
print(syncDiff)
if not syncDiff:
newData = {}
for artistName in artist_names:
newData[artistName] = getArtistAllScrobbleCount(user_name, artistName)
doJsonUpdate(jsonPath, newData)
def doRunLastNotifier(current_profile_data):
printRunningDef(currentframe())
# Get notifier data
if current_profile_data["last_tracks"] != None:
username = current_profile_data["username"]
song_name = current_profile_data["last_tracks"][0][0]
artist_name = current_profile_data["last_tracks"][0][1]
artistCountUrl = f'https://www.last.fm/user/{username}/library/music/+noredirect/{parse.quote(artist_name)}?date_preset=ALL'
artistCountDom = getDom(getResponse(artistCountUrl))
artistCount = artistCountDom.find_all("p", {"class":"metadata-display"})[0].text
msgLastTrack = f'\nLast track: {song_name} | {artist_name} ({artistCount})'
else:
msgLastTrack = ''
doRunNotifier(
f'Profile: {current_profile_data["display_name"]} (@{current_profile_data["username"]})', # Title
f'Current Scrobbles: {current_profile_data["scrobbled_count"]}{msgLastTrack}') # Content
def doCheckChange(current_profile_data, user_name):
printRunningDef(currentframe())
processPrefix = 'Process: '
while True:
print(f'{processPrefix}Syncing profile..')
newProfileData = getSearchUser(user_name, False)
if current_profile_data != newProfileData:
print(f"{' '*len(processPrefix)}New profile information has been obtained.")
if newProfileData["scrobbled_count"] != current_profile_data["scrobbled_count"]:
doRunLastNotifier(newProfileData)
printStatus(newProfileData, True)
current_profile_data = newProfileData
else:
print(f"{' '*len(processPrefix)}No changes to profile information.")
def doRunNotifier(title_msg=' ', content_msg=' '):
printRunningDef(currentframe())
icoDomain = 'https://www.last.fm'
imgDir = 'images/media'
imgName = 'lastfm.ico'
imgPath = f'{imgDir}/{imgName}'
if not os.path.exists(imgPath): # ico not exist
img_url = f'{icoDomain}{getFaviconUrl(icoDomain)}'
doDownloadImage(imgName, img_url, imgDir)
notifier = ToastNotifier() # class
notifier.show_toast(title_msg, content_msg, imgPath) # title="Notification", msg="Here comes the message", icon_path=None, duration=5, threaded=False
def doDownloadImage(img_name, img_url, img_dir=None, open_mode='wb'): # doDownloadImage('images/avatars', 'MyAvatar', 'AvatarUrl')
printRunningDef(currentframe())
if img_dir != None:
doDirCreate(img_dir)
img_name = f'{img_dir}/{img_name}'
if '.' not in img_name: # Dosya uzantısı isimde yoksa url sonundan alınır.
img_name = f"{img_name}{img_url[img_url.rfind('.'):]}"
if not os.path.exists(img_name):
imgResponse = getResponse(img_url)
imgContent = imgResponse.content
with open(img_name, open_mode) as file:
file.write(imgContent)
def doDirCreate(dir_name):
printRunningDef(currentframe())
dirList = dir_name.split('/')
for d in dirList:
try:
if d == dirList[-1] :
os.mkdir(dir_name)
else:
os.mkdir(d) # Directory Created
except FileExistsError:
pass # Directory already exists
def doCalcAlltimeTodayCount(first_alltime, today_box): # total contribution to artists listened to today
print(first_alltime)
print(today_box)
def doDictJsonSave(json_name, save_dict, json_dir='backups/json', open_mode='w'):
json_ex = '.json'
if json_dir != None:
doDirCreate(json_dir)
json_name = f'{json_dir}/{json_name}'
if json_ex not in json_name[-5:]:
json_name = f'{json_name}{json_ex}'
with open(json_name, open_mode) as json_file:
json.dump(save_dict, json_file)
## -- GET DEFS -- (RETURN)
def getSearchUser(user_name, status_print=True, refresh_bool=True, follow_print=True):
printRunningDef(currentframe())
if user_name:
urlDict, domsDict, responsesDict = {},{},{}
urlDict['user_url'] = "https://www.last.fm/user/" + user_name
# Get profile page
responsesDict["profile_dom"] = getResponse(urlDict['user_url'])
# Get follow pages
if follow_print:
urlDict['fallowing_url'] = urlDict['user_url']+'/following'
urlDict['fallowers_url'] = urlDict['user_url']+'/followers'
responsesDict["following_dom"] = getResponse(urlDict['fallowing_url'])
responsesDict["followers_dom"] = getResponse(urlDict['fallowers_url'])
# Get response doms
for responseKey, responseValue in responsesDict.items():
domsDict[responseKey] = getDom(responseValue)
userProfileInfos = getProfileInfos(domsDict) # Get profile infos
# Prints
if status_print:
printStatus(userProfileInfos, refresh_bool)
return userProfileInfos
def getProfileInfos(doms_dict):
printRunningDef(currentframe())
profileDict = {}
if "profile_dom" in doms_dict:
profileDom = doms_dict["profile_dom"]
profileDict["username"] = getUsername(profileDom)
profileDict["user_avatar"] = getUserAvatar(profileDom)
profileDict["display_name"] = getDisplayName(profileDom)
profileDict["scrobbling_since"] = getProfileSince(profileDom)
profileDict["last_tracks"] = getLastScrobs(profileDom, 3)
profileDict["background_image"] = getBackgroundImage(profileDom)
### !!!
profileDict["scrobbled_count"] = int(getHeaderStatus(profileDom)[0]) # Profile Header: Scrobbles
profileDict["artists_count"] = int(getHeaderStatus(profileDom)[1]) # Profile Header: Artist Count
profileDict["likes_count"] = int(getHeaderStatus(profileDom)[2]) # Profile Header: Loved Tracks
### !!!
profileDict['today_artists'], profileDict['today_tracks'], profileDict['old_today_tracks'] = getTodayListening(profileDict["username"])
profileDict['artist_count_alltime'] = getArtistAllTimeCount(profileDict["username"], profileDict['today_tracks'], profileDict['old_today_tracks']) # username, today_tracks, old_today_tracks
if all(key in doms_dict for key in ('following_dom', 'followers_dom')): # Ayrı bir post isteği gerekir.
followsDom = [doms_dict["following_dom"], doms_dict["followers_dom"]]
profileDict["follows"] = {}
profileDict["follows"]["following"] = getUserFollowing(followsDom[0]) # Following
profileDict["follows"]["followers"] = getUserFollowers(followsDom[1]) # Followers
profileDict["follows"]["following_gt"] = getUserGT(profileDict["follows"]["following"], profileDict["follows"]["followers"])
# Get Counts
profileDict["follows"]["following_counts"] = int(getUserFollowingCount(followsDom[0])) # Following
profileDict["follows"]["followers_counts"] = int(getUserFollowersCount(followsDom[1])) # Followers
profileDict["follows"]["fb_count"] = int(getDictValueCount(profileDict["follows"]["following_gt"], True))
profileDict["follows"]["no_fb_count"] = int(profileDict["follows"]["following_counts"] - profileDict["follows"]["fb_count"])
return profileDict
def getResponse(response_url):
while True:
printRunningDef(currentframe())
# urlPart1, urlPart2, urlPart3 = response_url.partition("+noredirect/")
# if "/" in urlPart3:
# urlPart3 = urlPart3.replace('/','%2F')
# response_url = f'{urlPart1}{urlPart2}{urlPart3}'
response = requests.get(response_url)
responseCode = response.status_code
if terminalInfo:
print(f'Request: {response_url[:]} : {responseCode}')
if responseCode in range(200,299):
if "https://www.last.fm/" in response_url:
pageContent = getDom(response)
ogUrl = pageContent.find("meta", property="og:url")['content']
if terminalInfo:
print(f'responseUrl = ogUrl {response_url == ogUrl}')
if response_url != ogUrl:
print('Url değişimi algılangı istek düzeltiliyor..')
response_url = ogUrl
continue
return response
print(f'Trying to reconnect to {response_url[19:]} address..')
def getDom(response):
while True:
printRunningDef(currentframe())
pageContent = bs(response.content, 'html.parser')
return pageContent
def getFaviconUrl(site_url): # Belirtilen sayfadaki iconu çeker.
printRunningDef(currentframe())
while True:
iconResponse = getResponse(site_url)
if iconResponse.status_code in range(200,299):
iconDom = getDom(iconResponse)
iconUrl = iconDom.find("link", {"rel":"icon"})['href']
return iconUrl # return '/static/images/favicon.702b239b6194.ico'
def getBackgroundImage(profile_dom):
printRunningDef(currentframe())
backgroundPath = 'images/background'
backgroundName = f'{getUsername(profile_dom)}-bg-{getCurrentSession()}'
try:
backgroundImageUrl = profile_dom.find("div", {"class":"header-background header-background--has-image"})["style"][22:-2]
doDownloadImage(backgroundName, backgroundImageUrl, backgroundPath)
except:
backgroundImageUrl = "No Background (Last.fm default background)"
return backgroundImageUrl # Replaced: background-image: url();
def getUserAvatar(profile_dom):
printRunningDef(currentframe())
avatarPath = 'images/avatar'
avatarName = f'{getUsername(profile_dom)}-av-{getCurrentSession()}'
defaultAvatarId = "8<PASSWORD>"
# defaultImageUrl:("https://lastfm.freetls.fastly.net/i/u/avatar170s/818148bf682d429dc215c1705eb27b98.png")
profileAvatarUrl = profile_dom.find("meta", property="og:image")["content"]
if defaultAvatarId in profileAvatarUrl:
profileAvatarUrl = "No Avatar (Last.fm default avatar)"
else:
doDownloadImage(avatarName, profileAvatarUrl, avatarPath)
return profileAvatarUrl
def getHeaderStatus(profile_dom):
printRunningDef(currentframe())
headerStatus = [0, 0, 0]
headers = profile_dom.find_all("div", {"class": "header-metadata-display"})
for i in range(len(headers)):
headerStatus[i] = headers[i].text.strip()
headerStatus[i] = getRemoval(headerStatus[i],',', int) # {} içerisindeki {}'i kaldır ve {} olarak geri al.
return headerStatus
def getRemoval(inside_obj, find_obj=' ', return_type=None):
if return_type == None:
return_type = type(inside_obj)
if type(inside_obj) != str: # int'de işlem yapılamaz
inside_obj = str(inside_obj)
if type(find_obj) != str:
find_obj = str(find_obj)
if find_obj in inside_obj:
inside_obj = inside_obj.replace(find_obj,'')
if return_type != type(inside_obj):
if return_type == int:
inside_obj = int(inside_obj)
elif return_type == float:
inside_obj = float(inside_obj)
# print(f'{inside_obj}: {type(inside_obj)}')
return inside_obj
def getUsername(profile_dom):
printRunningDef(currentframe())
profileUserName = profile_dom.find("h1", {"class":"header-title"})
return profileUserName.text.strip()
def getDisplayName(profile_dom):
printRunningDef(currentframe())
profileDisplayName = profile_dom.find("span", {"class":"header-title-display-name"})
return profileDisplayName.text.strip()
def getCurrentSession(get_length=None):
printRunningDef(currentframe())
'''
get_length 14 = %Y%m%d%H%M%S
get_length 12 = %Y%m%d%H%M
get_length 10 = %Y%m%d%H
get_length 8 = %Y%m%d (Default: None)
get_length 6 = %Y%m
get_length 4 = %Y
'''
session = datetime.now().strftime('%Y%m%d%H%M%S') #YearMonthDayHourMinuteSecond
if get_length == None:
session = session[0:8] # %Y%m%d
else:
session = session[:get_length]
return session
def getUserFollowingCount(following_dom):
printRunningDef(currentframe())
while True:
try:
topHeader = following_dom.find("h1", {"class":"content-top-header"}).text # Path
userFollowing = topHeader[topHeader.find("(")+1:topHeader.find(")")] # Parantez arası değeri
try:
userFollowing = int(userFollowing) # Sayı değilse
except:
userFollowing = 0
return userFollowing
except:
continue
def getUserFollowersCount(followers_dom):
printRunningDef(currentframe())
while True:
try:
topHeader = followers_dom.find("h1", {"class":"content-top-header"}).text # Path
userFollowers = topHeader[topHeader.find("(")+1:topHeader.find(")")] # Parantez arası değeri
try:
userFollowers = int(userFollowers) # Sayı değilse
except:
userFollowers = 0
return userFollowers
except:
continue
def getUserFollowing(following_dom):
printRunningDef(currentframe())
followingDict = {}
while True:
following = following_dom.find_all(attrs={"class": "user-list-name"})
for userName in following: # Bir sayfada max 30
userName = userName.text.strip()
followingDict[userName] = True
if following_dom.find("li", {"class": "pagination-next"}):
pageNo = following_dom.find("li", {"class": "pagination-next"})
currentFollowingPageUrl = f"https://www.last.fm/user/{getUsername(following_dom)}/following{pageNo.a['href']}"
following_dom = getDom(getResponse(currentFollowingPageUrl)) # current followers page dom
else:
return followingDict
def getUserFollowers(followers_dom):
printRunningDef(currentframe())
followersDict = {}
while True:
followers = followers_dom.find_all(attrs={"class": "user-list-name"})
for userName in followers: # Bir sayfada max 30
userName = userName.text.strip()
followersDict[userName] = True
if followers_dom.find("li", {"class": "pagination-next"}):
pageNo = followers_dom.find("li", {"class": "pagination-next"})
currentFollowersPageUrl = f"https://www.last.fm/user/{getUsername(followers_dom)}/followers{pageNo.a['href']}"
followers_dom = getDom(getResponse(currentFollowersPageUrl)) # current followers page dom
else:
return followersDict
def getUserGT(following_box, followers_box):
printRunningDef(currentframe())
userGt = {}
for userName in following_box:
if userName in followers_box:
userGt[userName] = True
else:
userGt[userName] = False
return userGt
def getProfileSince(profile_dom):
printRunningDef(currentframe())
profileSince = profile_dom.find("span", {"class":"header-scrobble-since"})
return profileSince.text.partition("• scrobbling since")[2].strip() # Sonrasını al
def getLastScrobs(profile_dom, get_count):
printRunningDef(currentframe())
lastTracks = {}
for x in range(get_count): # X kadar al.
try:
lastTrackDom = profile_dom.find_all("tr", {"class":"chartlist-row chartlist-row--with-artist chartlist-row--with-buylinks js-focus-controls-container"})[x]
lastTrackSongName = lastTrackDom.find("td", {"class":"chartlist-name"}).text.strip()
lastTrackArtist = lastTrackDom.find("td", {"class":"chartlist-artist"}).text.strip()
lastTrackDate = lastTrackDom.find("td", {"class":"chartlist-timestamp"}).text.strip()
lastTracks[x] = [lastTrackSongName,lastTrackArtist,lastTrackDate]
except:
lastTracks = None
break
return lastTracks
def getDictValueCount(dicti, key):
printRunningDef(currentframe())
return sum(key for _ in dicti.values() if _)
def getFollowDict(following_box, followers_box, followback_box):
f = {}
for username in following_box:
f[username] = {}
f[username]['following'] = True
if username in followers_box:
f[username]['follower'] = True # 2. true takip ettiği / false etmediği
f[username]['user_fb'] = followback_box[username]
else:
f[username]['follower'] = False
f[username]['user_fb'] = followback_box[username]
f[username]['link'] = f'https://last.fm/user/{username}'
for username in followers_box:
f[username] = {}
f[username]['follower'] = True
if username in following_box:
f[username]['following'] = True # 2. true takip ettiği / false etmediği
f[username]['user_fb'] = followback_box[username]
else:
f[username]['following'] = False
f[username]['user_fb'] = False
f[username]['link'] = f'https://last.fm/user/{username}'
return f
def getTodayListening(user_name):
printRunningDef(currentframe())
jsonDir = 'backups/json'
jsonName = f'{user_name}-today-{appSession}.json'
jsonPath = f'{jsonDir}/{jsonName}'
today = date.today()
today = today.strftime("%Y-%m-%d")
pageNo = 1
todayTracks = {}
if os.path.exists(jsonPath): # Önceden bir today jsonu varsa
oldTodayTracks = getJsonData(jsonPath) # Eskisini kaydet
else:
oldTodayTracks = None
while True:
todayListeningUrl = f'https://www.last.fm/user/{user_name}/library/artists?from={today}&rangetype=1day&page={pageNo}'
todayListeningDom = getDom(getResponse(todayListeningUrl))
try:
todayListeningDomTracks = todayListeningDom.find_all("tr", "chartlist-row")
for i in todayListeningDomTracks:
artistName = i.find("td","chartlist-name").text.strip()
artistCount = i.find("span","chartlist-count-bar-value").text.strip()
todayTracks[artistName] = getRemoval(artistCount[:artistCount.rfind(' ')], ',', int) # Boşluğun hemen öncesine kadar al. (123 scrobbles)
except:
pass # Bir hata gerçekleşirse dict boş gönderilir.
if todayListeningDom.find("li", {"class": "pagination-next"}):
pageNo += 1
else:
doDictJsonSave(jsonName, todayTracks) # Json save
todayArtists = list(todayTracks.keys()) # Bugün dinlenen sanatçıların isimleri
return todayArtists, todayTracks, oldTodayTracks
def getArtistAllCount(user_name, artist_names):
artistScrobbs = {}
for artistName in artist_names:
artistCountUrl = f'https://www.last.fm/user/{user_name}/library/music/+noredirect/{parse.quote(artistName)}' # Artist alltime details
artistCountDom = getDom(getResponse(artistCountUrl))
artistScrobbleCount = getRemoval(artistCountDom.find_all("p", {"class":"metadata-display"})[0].text, ',', int)
artistScrobbs[artistName] = artistScrobbleCount # library_header_title, metadata_display
return artistScrobbs
def getJsonData(json_path):
with open(json_path) as jsonFile:
return json.load(jsonFile)
def getArtistAllTimeCount(user_name, artists_box, old_artists_box): # total contribution to artists listened to today
printRunningDef(currentframe())
jsonDir = 'backups/json'
jsonName = f'{user_name}-alltime-{appSession}.json'
jsonPath = f'{jsonDir}/{jsonName}'
if old_artists_box == None:
if os.path.exists(jsonPath):
syncBool, syncArtistNames = doSyncControl(user_name, 'all')
if not syncBool:
doAlltimeJsonSync(user_name, syncArtistNames)
if not os.path.exists(jsonPath): # ico not exist
artistNames = artists_box.keys()
alltimeJson = getArtistAllCount(user_name, artistNames)
else:
alltimeJson = getJsonData(jsonPath)
for artistName, artistCount in artists_box.items():
if old_artists_box == None:
oldCount = artists_box[artistName]
else:
if artistName in old_artists_box: # Sanatçı önceden dinlendiyse
oldCount = old_artists_box[artistName] # Sanatçının önceden kayıtlı olan dinlenme sayısı
else: # Yeni bir sanatçı dinlendiyse
oldCount = artists_box[artistName] # Sanatçının önceden kayıtlı olan dinlenme sayısı
if artistName in alltimeJson: # Artist kullanıcının tüm zamanlarına önceden kaydedilmişse
alltimeJson[artistName] += (artistCount-oldCount)
else: # Önceden kayıt edilmemiş kişiler.
alltimeJson[artistName] = getArtistAllCount(user_name, [artistName])[artistName]
doDictJsonSave(jsonName, alltimeJson)
return alltimeJson
def getDictDiff(dict_x, dict_y): # Not yet
dict_diff = None
return dict_diff
def getDictKeyNo(key, d): # key, dict
dictKeys = d.keys()
dictKeysList = list(dictKeys)
keyIndexNo = dictKeysList.index(key) + 1
return keyIndexNo
def getArtistScrobbleCount(user_name, artist_name, date_time, method):
if method == 'today':
return getArtistTodayScrobbleCount(user_name, artist_name, date_time)
elif method == 'all':
return getArtistAllScrobbleCount(user_name, artist_name)
def getArtistTodayScrobbleCount(user_name, artist_name, from_set):
artistTodayScrobbleUrl = f'https://www.last.fm/tr/user/{user_name}/library/music/+noredirect/{parse.quote(artist_name)}?from={from_set}&rangetype=1day'
artistTodayScrobbleDom = getDom(getResponse(artistTodayScrobbleUrl))
artistTodayScrobbleElement = artistTodayScrobbleDom.find_all("p", {"class":"metadata-display"})[0].text
artistTodayScrobbleCount = getRemoval(artistTodayScrobbleElement, ',', int)
return artistTodayScrobbleCount
def getArtistAllScrobbleCount(user_name, artist_name): # Belirtilen kullanıcının o sanatçıyı ne kadar dinlediği
artistCountUrl = f'https://www.last.fm/user/{user_name}/library/music/+noredirect/{parse.quote(artist_name)}' # Kullanıcıya ait sanatçı sayfası
artistCountDom = getDom(getResponse(artistCountUrl)) # Kullanıcıya ait sanatçı sayfasının çekimi
artistScrobbleCount = getRemoval(artistCountDom.find_all("p", {"class":"metadata-display"})[0].text, ',', int) # Sayaçın bulunması ve düzenlenmesi
print(artist_name, artistScrobbleCount)
return artistScrobbleCount # library_header_title, metadata_display
def getLastLineContent(file_name,start,end): # Belirtilen dosyanın en altındaki öğenin çekimi
with open(file_name, 'r') as f: # Dosyanın okunması
return f.readlines()[-1][start:end] # Satırın okunup döndürülmesi
## -- PRINT DEFS --
def printRunningDef(def_info): # O an çalışan fonksiyonun ekrana yazdırılması
if terminalInfo:
time.sleep(0.03)
currentLine = def_info.f_back.f_lineno
defName = def_info.f_code.co_name
with open('main.py', 'r') as f:
mainLinesLength = len(str(len(f.readlines())))
currentLineLength = len(str(currentLine))
print(f"Process: [{'0'*(mainLinesLength-currentLineLength)+str(currentLine) if currentLineLength < mainLinesLength else currentLine}]:{defName}")
def printStatus(upi_dict, refresh_bool): # printStatus(userProfileInfos, react)
print(f'\n*** {time.strftime("%H:%M:%S")} ***')
upi_acot = upi_dict['artist_count_alltime']
upi_lts = upi_dict["last_tracks"]
upi_sc = upi_dict["scrobbled_count"]
upi_ac = upi_dict["artists_count"]
upi_lc = upi_dict["likes_count"]
upi_tt = upi_dict['today_tracks']
upi_bi = upi_dict["background_image"]
upi_ua = upi_dict["user_avatar"]
upi_ss = upi_dict["scrobbling_since"]
upi_dn = upi_dict["display_name"]
upi_un = upi_dict["username"]
if "follows" in upi_dict:
# Following
upi_fgc = upi_dict["follows"]["following_counts"]
upi_fg = upi_dict["follows"]["following"]
# Followers
upi_fsc = upi_dict["follows"]["followers_counts"]
upi_fs = upi_dict["follows"]["followers"]
# Followback
upi_fb = upi_dict["follows"]["following_gt"]
upi_fbc = upi_dict["follows"]["fb_count"]
upi_nofbc = upi_dict["follows"]["no_fb_count"]
printFollowStat(upi_fg, upi_fs , upi_fb, upi_fgc, upi_fsc, upi_fbc, upi_nofbc)
printRecentTracks(upi_lts, upi_sc) # Last Tracks Prints
printTodayAllTime(upi_acot, upi_tt) # Total, Today Prints
# Adresses
print(f'\nProfile: {upi_dn} (@{upi_un})')
print(f'Scrobbling Since: {upi_ss}')
print(f'Avatar: {upi_ua}')
print(f'Background: {upi_bi}')
# Headers
print(f'Scrobbles: {upi_sc} | ', end="")
print(f'Artists: {upi_ac} | ', end ="")
print(f'Loved Tracks: {upi_lc}'),
if refresh_bool:
refresh_time = 0
if refresh_time == 0:
print('\nChecking profile again..')
else:
print(f'\nIt will be checked again in {refresh_time} seconds..')
time.sleep(refresh_time)
doCheckChange(upi_dict, upi_un)
def printTodayAllTime(artists_alltime, artists_today): # Kullanıcının bugün dinlediği sanatçıların tüm zamanda ne kadar dinlendiğinin ekrana yazdırılması
if len(artists_today) > 0:
print(f'\nYour total contribution to the artist today;')
for todayArtistName, todayArtistCount in artists_today.items():
todayArtistNo = getDictKeyNo(todayArtistName, artists_today) # belirtilen anahtarın sözlükte kaçıncı sırada olduğunu çek
try:
count_msg = f'{artists_alltime[todayArtistName]} (Today: {todayArtistCount})'
except:
count_msg = f'Today: {todayArtistCount}'
finally:
print(f'[{todayArtistNo}]: {todayArtistName} - {count_msg}')
def printTodayListening(tracks_today): # Kullanıcının bugün dinlediği şarkıların ekrana yazdırılması
if bool(tracks_today): # Dict boş dönmezse
print('Today Listening Artists;')
for todayArtistName, todayArtistCount in tracks_today.items():
todayArtistRank = getDictKeyNo(todayArtistName, tracks_today) # belirtilen anahtarın sözlükte kaçıncı sırada olduğunu çek
print(f'{todayArtistRank}: {todayArtistName} ({todayArtistCount})')
else: # Dict boş ise false döndürür.
print('No songs were listened to today.')
def printRecentTracks(last_tracks, scrobbled_count): # Kullanıcının son dinlediği şarkıalrın ekrana yazdırılması
if last_tracks != None:
print(f'\nRecent Tracks;', end='')
recentTracks = last_tracks
for trackNo in recentTracks:
print(f'\n[{trackNo+1}]:', end=" ")
for trackValueNo in range(len(recentTracks[trackNo])):
print(recentTracks[trackNo][trackValueNo], end= " | ")
else:
print()
elif scrobbled_count > 0:
print("\nRecent Tracks: realtime tracks is private.")
def printDictValue(print_dict): # Bir sözlük içeriği yazdırma fonksiyonu
for key, value in print_dict.items():
print(f'{key} ({value})')
def printus(dict_name, user_dict, count_dict): # Farklı bir sözlük içeriği yazdırma fonksiyonu
print(f'{dict_name}: ({count_dict});')
for user, value in user_dict.items(): # user, bool
print(f'[{value}]: {user}')
def printFollowStat(fg, fs, fb, fgc, fsc, fbc, nofbc): # KUllanıcının takip detaylarının ekrana yazdırılması
print(f'\nFollows;')
if False:
printus("Following", fg, fgc) # Following
printus("Followers", fs, fsc) # Followers
printus("Followback", fb, fbc)
print(f"Following: {fgc}, Followers: {fsc}, Followback: {fbc}")
if fgc != fbc:
print(f"Users who don't follow you back ({nofbc});")
f = getFollowDict(fg, fs, fb)
for user in f:
if f[user]['following'] == True and f[user]['follower'] == False:
print(f"FG:[{f[user]['following']}], FR:[{f[user]['follower']}], FB:[{f[user]['user_fb']}] | {f[user]['link']}, @{user}")
elif fgc != 0:
print(f'{fbc} users you follow are following you.')
if __name__ == '__main__':
try:
username = sys.argv[1] # Username input from command line
except:
username = input('Username: @')
appSession = debugLog(True)
# ping("www.last.fm") # Ping test
getSearchUser(username)
|
<commit_msg>Make the expected HTTP status code configurable
<commit_before>import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120, help='total timeout in seconds to wait for the url to be available')
argparser.add_argument('-d', '--delay', type=int, default=1, help='delay in seconds between each retry')
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
response.raise_for_status()
return response.status_code
if check() == 200:
sys.exit(0)
else:
sys.exit(1)
<commit_after>import sys, requests, argparse
from retrying import retry
argparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('url')
argparser.add_argument('-t', '--timeout', type=int, default=120, help='total timeout in seconds to wait for the url to be available')
argparser.add_argument('-d', '--delay', type=int, default=1, help='delay in seconds between each retry')
argparser.add_argument('-s', '--status-code', type=int, default=200, help='expected HTTP status code')
args = argparser.parse_args()
@retry(stop_max_delay = args.timeout * 1000, wait_fixed = args.delay * 1000)
def check():
print('Checking', args.url, 'for HTTP code', args.status_code)
sys.stdout.flush()
response = requests.get(args.url, timeout = 1)
if response.status_code != args.status_code:
raise Exception('Unexpected HTTP status code {} (Wanted: {})'.format(response.status_code , args.status_code))
try:
check()
sys.exit(0)
except Exception as e:
print(str(e))
sys.exit(1)
|
Lead and -Aminolevulinic Acid Dehydratase Polymorphism: Where Does It Lead? A Meta-Analysis Background Lead poisoning affects many organs in the body. Lead inhibits -aminolevulinic acid dehydratase (ALAD), an enzyme with two co-dominantly expressed alleles, ALAD1 and ALAD2. Objective Our meta-analysis studied the effects of the ALAD polymorphism on a) blood and bone lead levels and b) indicators of target organ toxicity. Data source We included studies reporting one or more of the following by individuals with genotypes ALAD1-1 and ALAD1-2/2-2: blood lead level (BLL), tibia or trabecular lead level, zinc protoporphyrin (ZPP), hemoglobin, serum creatinine, blood urea nitrogen (BUN), dimercaptosuccinic acidchelatable lead, or blood pressure. Data extraction Sample sizes, means, and standard deviations were extracted for the genotype groups. Data synthesis There was a statistically significant association between ALAD2 carriers and higher BLL in lead-exposed workers (weighted mean differences of 1.93 g/dL). There was no association with ALAD carrier status among environmentally exposed adults with BLLs < 10 g/dL. ALAD2 carriers were potentially protected against adverse hemapoietic effects (ZPP and hemoglobin levels), perhaps because of decreased lead bioavailability to heme pathway enzymes. Conclusion Carriers of the ALAD2 allele had higher BLLs than those who were ALAD1 homozygous and higher hemoglobin and lower ZPP, and the latter seems to be inversely related to BLL. Effects on other organs were not well delineated, partly because of the small number of subjects studied and potential modifications caused by other proteins in target tissues or by other polymorphic genes. distributed in an intermediate pool of soft tissues, skin, and muscle. Elimination halflives for lead are estimated at 30-40 days in blood and up to 20 years or longer in bone (Marcus 1985a(Marcus, 1985b. Lead is eliminated mainly in the urine. Lead is a potent inhibitor of -aminolevulinic acid dehydratase (ALAD), coproporphyrinogen oxidase, and ferrochelatase, enzymes that catalyze the second, sixth, and final steps, respectively, in the biosynthesis of heme (Onalaja and Claudio 2000;). Because the metal has the greatest effect on ALAD, measurement of ALAD activity can be used as a marker of effect of lead exposure (). ALAD, an octameric zinc-containing enzyme, catalyzes the condensation of two molecules of 5-aminolevulinic acid (ALA) into one molecule of monopyrrole porphobilinogen (PBG). Inhibition of ALAD activity produces increased urinary excretion of ALA (). Lead displaces zinc from the enzyme's active site, and the inactivation of ALAD has been implicated in the pathogenesis of lead poisoning. The resulting accumulation of its substrate, ALA, has been shown to have a neuropathogenic effect, probably by acting as a -aminobutyric acid (GABA) receptor agonist in the nervous system (Brennan and Cantrill 1979). Human ALAD, encoded by a single gene localized to the chromosome 9q34 region, is a polymorphic enzyme with two alleles, ALAD1 and ALAD2 , which are co-dominantly expressed (). The difference between the two alleles lies in a single G→C transversion mutation of nucleotide 177 in ALAD2; the allozyme resulting from the ALAD2 allele contains the substitution of a neutral asparagine for a positively charged lysine at residue 59 (b). Three differently charge allozymes, ALAD1-1, 1-2, and 2-2, result from the expression of the ALAD1 and ALAD2 genes. In several white populations, the frequencies of the ALAD1 and ALAD2 genes have been estimated to be 0.9 and 0.1, respectively. Asian and African populations have lower frequencies of the ALAD2 allele (). Several epidemiologic studies have attempted to correlate the ALAD allelic variations with a differential susceptibility to lead poisoning. The biologic plausibility for a differential role of the two alleles lies in the fact that the lysine substitution at residue 59 changes the electrical charge of the enzyme (); the more electronegative ALAD2 enzyme may thus have a higher affinity/stability for the lead cation than ALAD1 (b). This could result in an alteration of lead toxicokinetics and susceptibility to lead toxicity. The first studies comparing BLL and ALAD polymorphism were conducted on a chronically exposed population of 202 male lead workers in a German factory (), and an environmentally exposed population of 1,051 children with elevated free erythrocyte protoporphyrin (). These studies showed that individuals carrying one or two copies of the ALAD2 allele exhibited higher BLLs than homozygous individuals with only the ALAD1 allele. These findings led to the suggestion that ALAD2 may be a determinant for increased susceptibility to lead toxicity (a). However, some studies have reported either no difference among individuals homozygous for ALAD1 relative to individuals carrying the ALAD2 allele, or the differences among the two groups were not statistically significant. The extreme variability in the published data is due to several factors: relatively small numbers of subjects, different frequencies of the ALAD2 allele in various populations, and different levels of lead exposure as determined by BLLs in the populations studied. We used a series of meta-analyses to quantify the effects of this genetic polymorphism and to understand lead toxicokinetics. Methods Study selection. MEDLINE (National Library of Medicine 2006) and Web of Science (Thomson Scientific 2006) databases were searched to January 2006 for English-language publications of observational studies. The citations in the articles identified were also searched to find other potentially eligible studies. Common text words and Medical Subject Headings (MeSH) related to lead poisoning, gene polymorphism, and ALAD were used. No attempt was made to contact the authors of any of the articles, except to resolve discrepancies in the reported values. We required that two a priori criteria be met for inclusion in the meta-analysis: a) sample sizes, means, and SDs were either reported or could be determined for the ALAD1-1 and ALAD1-2/2-2 genotypes; and b) combined with one or more of the following measures-BLL, tibia lead level, trabecular (patella or calcaneus) lead level, zinc protoporphyrin (ZPP), hemoglobin, serum creatinine, dimercaptosuccinic acid-chelatable lead, and systolic or diastolic blood pressure. When multiple studies used the same cohort of subjects, the first publication that reported the values of the variables of interest was included. Data extraction. Sample sizes, means, and SDs according to genotype (homozygous ALAD1-1 and ALAD2-2 and heterozygous ALAD1-2) were extracted independently by two authors (F.S and D.M.). Wu et al. reported data in groups of workers subjected to high and low lead exposures. We mathematically combined the data of the two groups to extract the means for all exposed workers according to the genotype. The pooled estimate of the variance from two independent samples was used to extract the SDs according to genotype. Therefore, only one effect size was entered in the model. The data from each study were entered twice to minimize data-entry errors. Statistical analysis. The data were analyzed using Stata software version 7 (StataCorp., College Station, TX, USA). In each study the size of the effect was calculated by the difference between the means of the ALAD1-2/2-2 and the ALAD1-1 groups. Each mean difference was weighted according to the inverse of its variance, and the average was taken . To combine data from studies in which the same outcome was measured by different scales (serum creatinine), or when the outcome value was measured by different methods (bone lead, ZPP), the mean difference was standardized by dividing by the within-group SD; the results were then weighted and the average, or standardized mean difference (SMD), taken. The WMD or SMD in each study was pooled using a random-effects model. Results are given with 95% confidence intervals (CIs). Between-study heterogeneity in the results of the studies was assessed using a chi-square test and the I 2 measure of inconsistency. Significant heterogeneity was defined as a chisquare test p-value < 0.1. I 2 takes values between 0% and 100% with higher values denoting greater degree of heterogeneity (I 2 = 0-25%: no heterogeneity; I 2 = 25-50%: moderate heterogeneity; I 2 = 50-75%: large heterogeneity; I 2 = 75-100%: extreme heterogeneity) (). Furthermore, to examine between-study heterogeneity, we used a priori stratified analyses including the study design (occupational and environmental studies) and age status (children and adults) and presence of Hardy-Weinberg equilibrium (HWE). Publication bias was assessed using the methods proposed by Begg and Mazumdar and by Egger et al.. All p-values are two-tailed. Results The search procedure yielded 45 references that were retrieved for additional information (Figure 1). We initially excluded 4 review papers, 5 non-English research articles, and 2 articles that reported data on different variant of the ALAD2 polymorphism. Of the remaining 34 articles, 7 did not have relevant data for effect size calculation. Moreover, the corresponding author of a study of environmentally exposed children () was contacted twice by monthly e-mail to resolve some discrepancy in their reported study. Three months after failing to receive an answer, we decided to exclude the study. Therefore, 24 studies were included in the meta-analysis (;;Duydu and Suzen 2003;;;;;Lee ;;;Schwartz et al., 1997aSchwartz et al., 1997bSchwartz et al., 2000;;;;;a;Wu et al., 2004), and of these, 11 were multiple publications that often had other different outcomes of interest. When we found a discrepancy in the reported studies, the authors were contacted and the corrected data were used. Table 1 characterizes the studies that did meet criteria for inclusion. ALAD polymorphism and blood lead level. Nine occupational studies (;;;;Schwartz et al., 2000;a;) were included in our analysis, and 5 environmental exposure studies of which 3 were conducted among adults (;;) and 2 among children (;a). Thus, a total of 14 studies were included in our analysis of blood lead level and ALAD polymorphism. Each of the studies was rechecked for HWE. We did not find HWE in the study by Wetmur et al. (1991a) that presented separate data on previously reported studies of occupational exposure in adults () and environmental exposure in children (). The absence of HWE is most likely because of ethnicities of the populations: the occupational study comprised workers of German and Turkish origins (), whereas the study of children included whites, blacks, Hispanics, and Asians (). Table 2 shows the frequency of the ALAD polymorphism and the status of the HWE in the studies analyzed. There is evidence that inclusion of studies that deviate from HWE can affect the pooled estimate and be potential sources of heterogeneity across the studies (). Hence, we conducted pooled analysis with and without studies that deviated from HWE. Pooled WMD analysis among the 14 studies, which included a total of 6,672 subjects, 5,861 (87.84%) were homozygous for ALAD1 and 811 (12.16%) carried the ALAD2 allele, showed a large heterogeneity among the studies ( 2 13 = 54.75; p = 0.000; I 2 = 76.3%) (Table 3; Figure 2). In subgroup analysis (subgroups were defined by the type of study and by population, that is, occupationally and environmentally exposed adults and children), there was no heterogeneity between occupational studies (I 2 = 0), between the studies of environmentally exposed children (I 2 = 0), and moderate heterogeneity among the studies of environmentally exposed adults (I 2 = 55.2%). After removal of the studies not in HWE, the overall heterogeneity decreased ( 2 11 = 17.92; p = 0.07), and the variation in WMD attributable to heterogeneity was moderate (I 2 = 38.6%) (Table 3). Overall, the pooled WMD analysis indicated that the carriers of ALAD2 allele had a significantly higher BLL (2.31 g/dL; 95% CI, 0.93 to 3.70) compared with carriers homozygous for the ALAD1 allele, a finding that was mostly driven by the occupational studies. Removal of the two studies not in HWE resulted in a not significantly higher WMD level of BLL (0.86 g/dL; 95% CI, -0.1 to 1.73). There was no evidence of publication bias according to Begg's test (p = 1.0, with continuity correction) and Egger's test (p = 0.10). Environmental adult studies. By contrast, the WMD in adults environmentally exposed to lead was 0.05 g/dL (95% CI, -0.79 to 0.88), which was not statistically significant. Environmental children studies. Pooled analysis of the two studies of children showed a WMD in BLL of 7.34 g/dL (95% CI, 4.92 to 9.76), with the individuals carrying ALAD2 having significantly higher BLLs (p = 0.00). However, the data should be viewed cautiously because other than deviation from HWE, the individuals selected for the study reported by Wetmur et al. (1991a) had higher initial clinical evaluations of elevated erythrocyte protoporphyrin (FEP) levels thus introducing potentially serious selection bias in the study design. ALAD polymorphism and lead: a meta-analysis Environmental Health Perspectives VOLUME 115 | NUMBER 1 | January 2007 37 a These studies use the same population data. b These studies use the same population data. c These studies use the same population data. d These studies use the same population data. e These studies use the same population data. ALAD polymorphism and heme synthesis. Zinc protoporphyrin (ZPP). Six published occupational studies related ZPP to ALAD polymorphism (;;Lee ;;;). Because the methods used to measure ZPP were not uniform, we calculated the SMD. The overall pooled SMD was -0.09, indicating that individuals carrying the ALAD 2 allele had lower ZPP values (Figure 3). However, the SMD was not statistically significant (95% CI, -0.22 to 0.03; p = 0.13). Heterogeneity was not significant ( 2 5 = 3.88, p = 0.56; I 2 = 0.0%), indicating that the studies were homogeneous. There was no evidence of publication bias according to Begg's test (p =1.0, with continuity correction) and Egger's test (p = 0.37). ALAD polymorphism and bone compartment. Tibia lead level. Ten studies reported data on lead levels in tibia bone and ALAD polymorphism as an outcome measure. Four studies that relied on previous data sets Lee ;;) and two that did not have data based on the polymorphism (;) were excluded, leaving four studies for analysis: two studies involving lead workers (;) and two of environmentally exposed adults (;). Because the methods used to measure tibia lead levels were not the same in all studies, the pooled SMD was calculated. The overall pooled SMD of -0.07 was not significant (95% CI, 0.20 to 0.05) and no significant heterogeneity existed among the studies (I 2 = 0.0%) ( Table 4). Difference between trabecular and cortical bone lead level. Two environmental studies (;) and one involving lead workers () were analyzed for differences between trabecular (patella and calcaneus) and cortical (tibia) bone lead levels and ALAD polymorphism. The overall pooled SMD (SMD = 0.03; 95% CI, 0.21 to 0.26) was not significant, but there was moderate heterogeneity (I 2 = 50.9%) (Table 4). Overall, these analyses showed no significant difference between ALAD genotypes and trabecular and cortical bone lead concentrations. WMD These data indicate that the bioavailability of lead is greater in ALAD1-1 individuals than in ALAD1-2 individuals. ALAD polymorphism and kidney function. Serum creatinine. Four studies reported serum creatinine values and ALAD polymorphism (a;;;). The study by Bergdahl et al. (1997a) was excluded because it was not possible to calculate the mean and SD. Therefore, only three studies were analyzed by pooled SMD: two conducted in environmentally exposed individuals (;), and one in lead-exposed workers (). Very high heterogeneity was present (p < 0.001; I 2 = 92.9% ) (Table 4), that could be attributed to different levels of lead exposure. Pooled analysis of the two studies reporting low levels of lead exposure (environmental studies) shows that individuals carrying the ALAD2 allele had a corresponding significantly higher serum creatinine (SMD = 0.48; 95% CI, 0.33 to 0.62) than those individuals homozygous for ALAD1. ALAD polymorphism and blood pressure. Two cross-sectional studies related systolic blood pressure to ALAD polymorphism ). The pooled WMD was 0.30 mmHg higher in individuals carrying the ALAD2 allele, but the difference was not statistically significant (95% CI, -2.18 to 2.78) ( Table 4). Heterogeneity was present among three studies ;) relating diastolic blood pressure to ALAD polymorphism ( 2 2 = 6.16; p = 0.05; I 2 = 66.9%) ( Table 4). This heterogeneity was most likely due to the different frequency of the ALAD2 allele in the population investigated, as well as the level of lead exposure. Exclusion of the occupational study ), which has a low frequency of ALAD2 allele and modestly higher levels of lead exposure, resulted in a nonsignificant test for heterogeneity ( 2 1 = 0.47, p = 0.49), a significant pooled WMD that was 1.88 mmHg higher in individuals carrying the ALAD2 allele (95% CI, 0.46 to 3.31; p = 0.01) ( Table 4). Discussion Our goal in this study was to determine the associations of ALAD polymorphism on blood lead levels and bone deposition, and the role of this polymorphism as a modifier of target organ lead toxicity. Overall, our meta-analysis shows that individuals carrying the ALAD2 allele had generally higher blood lead levels than those homozygous for ALAD1. The data suggest that carrying the ALAD allele is a significant determinant for blood lead concentrations among individuals subjected to high levels, such as lead-exposed workers. ALAD2 does not appear to be a significant determinant of blood lead concentrations among adult individuals exposed to relatively low lead levels (< 10 g/dL). The biologic plausibility for a differential role of the two ALAD alleles lies in the fact that the ALAD2 enzyme could potentially have a higher affinity and stability for lead than ALAD1. Among lead workers, carriers of the ALAD2 allele had a higher percentage of lead bound to the ALAD enzyme compared to ALAD1 homozygotes (b). The higher percentage of lead bound to the ALAD 2 enzyme translates to lower levels of bioavailable lead; the reverse is true in ALAD1 homozygotes. This is consistent with our results. We found that people carrying the ALAD2 allele had a weighted average of 21.30 g lower DMSA-chelatable lead than individuals lacking the allele. The insertion of ferrous iron (Fe 2+ ) into the porphyrin ring to form heme is catalyzed by the mitochondrial enzyme ferrochelatase, which shows reduced activity in the presence of lead (Ponka 1997).This reduction in ferrochelatase activity frees protoporphyrin to accept zinc, resulting in the formation of zinc protophorphyrin, which is characteristically increased in both lead poisoning and iron deficiency. The increased amount of lead bound to the ALAD 2 isozyme should result in decreased lead available to inhibit ferrochelatase, which would thus be available to catalyze the formation of heme with subsequent formation of ALAD polymorphism and lead: a meta-analysis Environmental Health Perspectives VOLUME 115 | NUMBER 1 | January 2007 39 Table 4. Summary effect size between ALAD1-2/2-2 and ALAD1-1 carriers on various outcomes. Weight (%) -1.3047 1.30471 0 WMD hemoglobin in the presence of Fe 2+. In contrast, the weaker binding of lead to ALAD1 results in more bioavailable lead that can inhibit ferrochelatase. This results in increased formation of ZPP and decreased production of heme and hemoglobin. Publication bias Our meta-analysis supports these modifying effects of the ALAD2 allele. Hemoglobin level was 0.18 g/dL (95% CI, 0.05 to 0.31) higher in lead workers with the ALAD1-2 genotype. Although ALAD2 carriers had a lower ZPP (SMD = -0.10), the difference was not statistically significant. ZPP is characteristically increased in lead poisoning and starts to rise exponentially only at blood lead concentrations > 30 g/dL in adults or > 25 g/dL in children (Baldwin and Marshall 1999). It is thus reasonable to expect a modifying effect on ZPP by ALAD polymorphism with increased lead exposure. The absence of a significant effect could be due to differences in exposure levels to the toxicant across the study populations. Schwartz et al. found that workers carrying the ALAD2 allele in the plant with the highest lead exposures were associated with lower ZPP measurements. The association of ALAD2 with lower ZPP was also reported by Alexander et al., and this association was more pronounced in workers with blood lead concentrations ≥ 40 g/dL. Significantly higher levels of ZPP were reported in ALAD1 homozygous Japanese lead workers compared with ALAD2 carriers at BLLs > 20 g/dL (). Overall, these studies indicate that the ALAD allele is a modifying factor in the formation of ZPP at higher blood lead levels (> 20 g/dL), and that ALAD2 carriers exhibit lower levels of ZPP and higher levels of hemoglobin. Differences in lead accumulation in various bone types have been reported. Tibia concentrations differ from those observed in the patella. The cortical bone of the tibia represents a long-term storage depot with an elimination half-life for lead in excess of a decade. ALAD status may modify the way in which lead partitions between these bone depots (). That is, the variant ALAD2 protein may effectively increase the blood and soft tissue (e.g., spleen and kidney) compartment half-lives of lead, thus decreasing partitioning to the cortical bone compartment. Our meta-analysis did not find a significant association between ALAD polymorphism and accumulation of lead in the different bone compartments. More recently, emphasis has focused on the role of vitamin D receptor (VDR) polymorphism in modulating the lead level in the bone compartment (;). The vitamin D endocrine system plays an essential role in calcium homeostasis and bone metabolism. Vitamin D is a prohormone that is metabolically converted to the active metabolite 1,25-dihydroxyvitamin D (calcitriol), which facilitates calcium absorption from the gut and directly stimulates osteoblasts, the bone-forming cells. These effects are mediated through activation of the VDR, which alters the transcription rates of target genes responsible for the biological response (). Lead is a divalent cation that behaves like calcium in biological systems, and interactions between lead and calcium have been reported. Calcium and calcitriol deficiencies result in increased lead absorption from the gut (Fullmer 1990). Conversely, higher dietary calcium intake results in lower BLLs in children () and in reduced bone lead accumulation in animals (). VDR polymorphism may thus influence lead uptake and retention in bone storage pools. Theppeang et al. found a significantly higher patella lead burden in lead workers carrying the VDR B allele. Schwartz et al. previously reported in adjusted analyses that lead workers carrying the VDR B allele had significantly higher tibia lead levels (on average 6.4 g/g) than workers with the VDR bb genotype. Associations of ALAD polymorphism and renal effects of lead exposure have also been reported. Smith et al. found that the ALAD2 carriers were more susceptible to decrements in renal function as measured by increases in serum creatinine and blood urea nitrogen (BUN). The increased serum creatinine in individuals carrying ALAD2 was confirmed in a sample of 89 lead workers (a). Conversely, Korean lead workers with the ALAD1-2 genotype exhibited lower BUN and serum creatinine (). The pooled SMD in our meta-analysis showed higher serum creatinine values among ALAD2 carriers. However, there was significant heterogeneity among the studies that might be ascribed to the level of lead exposure, frequency of the polymorphism in the population investigated, and other possible confounders (e.g., age, sex). Pooled analysis of the studies reporting low levels of lead exposure (environmental studies) shows that individuals carrying the ALAD2 allele had a corresponding statistically significant mean average of 0.10 mg/dL higher serum creatinine than those individuals homozygous for ALAD1. The effect of lead on blood pressure has also been widely investigated (;). The available literature suggests that there is a positive, albeit weak association between systolic blood pressure and blood lead concentration. A recent meta-analysis showed that a 2-fold increase in blood lead concentration is associated with a rise in systolic pressure of 1.0 mmHg (95% CI, 0.5 to 1.4; p < 0.001) and an increase in diastolic pressure of 0.6 mmHg (95% CI, 0.4 to 0.8; p < 0.001) (). Our meta-analysis did not find a difference in systolic blood pressure associated with ALAD polymorphism. However, individuals carrying the ALAD2 allele who were environmentally exposed to lead showed an increase in diastolic blood pressure of 1.88 mmHg. The biological plausibility of a causal relationship between elevated blood pressure and lead exposure has been studied mainly in animals and in vitro. Experiments have demonstrated that lead affects the smooth muscles of blood vessels by interfering with the Na + /K +pump, cyclic AMP, calcium ions (Ca 2+ ), and the rennin-angiotensin system (;;). In this context, the presence of other polymorphic genes, such as that coding for endothelial nitric oxide synthase (eNOS), may play an additional role. Endothelial NOS converts L-arginine into nitric oxide, causing relaxation of vascular smooth muscle (Vaziri and Ding 2001;) and associations among eNOS genotypes, hypertension, lead exposure, and intracellular Ca 2+ concentrations have been reported (;). Conclusions Measurement of blood lead level is the most convenient, readily available, and logistically feasible biomarker for assessing risk of lead toxicity. However, the presence of the ALAD2 allele may obscure the clinical interpretation of blood lead values in terms of target organ toxicity. ALAD2 carriers generally show higher BLLs in adults at increased levels of lead exposure, and appear to be protected against adverse hemapoietic effects as measured by hemoglobin levels. The modifying effects of ALAD on other organs remain unclear, partly because of the the small number of studies. These numbers are relatively small and therefore any inferences have to be cautious (). The strength of the present analysis, however, is based on the aggregation of published studies, thus there is more information for investigating the effect of the allele under investigation. Moreover, the role of other genes such as VDR could alter lead deposition in bone. The increasing application of molecular epidemiologic methods has emphasized the interaction between genes and the environment. Multiple gene polymorphisms suggest that genes having a small effect may interact to determine the overall risk. This meta-analysis identifies several issues: a) there are numerous potential sources of heterogeneity, including varying allele frequencies and HWE in the populations; b) in the context of gene-environment interactions, genegene interactions may play a role (for example, ALAD, VDR, and eNOS may interact to modify lead levels in several organs). |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Obshtestvo
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package me.grada.ui.fragment;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.view.View;
import java.util.List;
import butterknife.ButterKnife;
/**
* Created by yavorivanov on 27/12/2015.
*/
public abstract class BaseFragment extends Fragment {
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
ButterKnife.bind(this, view);
}
/**
* Overriding the permission callback in the base fragment due to a bug in Android
* See: https://code.google.com/p/android/issues/detail?id=189121
*/
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
@NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
FragmentManager childFragmentManager = getChildFragmentManager();
if (childFragmentManager != null) {
List<Fragment> fragments = childFragmentManager.getFragments();
if (fragments != null) {
for (Fragment fragment : fragments) {
fragment.onRequestPermissionsResult(requestCode, permissions, grantResults);
}
}
}
}
}
|
The design of a storage assembly, such as a closet organizer for installation in a closet of a residential home, is usually a difficult and time consuming experience for an ordinary consumer. For example, to design a suitable and desired custom closet organizer, the consumer initially must accurately measure all of the walls of the closet in which the closet organizer is to be installed, identify potential storage elements that can be included in the closet organizer and then sort through and decide among the various storage elements and also potential configurations for the selected storage elements. Then, the consumer must decide which of the individual walls of the closet are to include particular storage elements or configurations of storage elements. The selection of storage elements is further complicated because the consumer must also consider the interconnection mechanisms of differently shaped and sized storage elements to ensure that the selected storage elements can physically be joined together according to rules for assembly of the storage assembly, while still fitting into the space in the closet available for installation of the storage elements.
Although some computer-based storage assembly custom design software applications are available, many of these applications do not provide an adequate level of assistance and checks on the practicability of desired, custom storage assembly design for the ordinary consumer who desires to design a custom storage assembly. First, a large majority of these applications are stand alone products that do not provide any or current pricing and do not have an appropriately intuitive interface for assisting the consumer in the design process. In addition, in some computer-based design software applications providing for online design capabilities, the consumer oftentimes can design the storage assembly using only the design capabilities that are available online, and then must subsequently make several modifications to the design offline. These offline modifications can be based upon, for example, an offline calculation of the cost of certain custom design options offered by a custom design storage assembly seller or manufacturer.
In addition, current software applications do not guide the consumer to design a custom storage assembly for a particular type of storage location using an initial proposed layout of storage elements that can be readily and easily installed in the storage location. The consumer attempting to custom design a storage assembly from scratch, therefore, is likely to quickly become frustrated and not proceed past the first step of developing an initial layout for a storage assembly from which further customization can be performed.
In addition, many applications do not link the selection of custom design options by a consumer to allowance rules for design of a storage assembly that are commonly known in the design industry. These allowance rules avoid design of a custom storage assembly that is non-functional, or an assembly that is impossible to install and does not fit into or optimally utilize available space within a storage location.
Therefore, a need exists for a system and method for designing a custom storage assembly that an ordinary consumer can easily use, that automates the process of designing a custom storage assembly in view of available space in a storage location, that utilizes the appropriate rules for designing storage assemblies that can be readily and correctly installed at a consumer's location and automatically calculates cost throughout the design process. |
Yesterday, Borders (BGPIQ) finally gave up. After not finding a buyer, the financially-strapped bookstore chain decided to close. The fire sale could happen as soon as Friday.
The Kobo digital reader came too late: Did you even know Borders had an official e-reader? The Kobo is a solid device, but it came out in Spring 2010 — years after the Amazon (AMZN) Kindle and a few weeks after the Apple (A PPL) iPad. Talk about awful timing. Borders' one salvo against the increasingly digital book universe was about two years too late.
Borders hasn't said what will happen to Kobo since the small company behind it has been using Borders as its main outlet, but I suspect it won't be pretty. Perhaps a tech giant like Microsoft (MSFT) can acquire it and somehow turn it into an interesting third option next to the Kindle and iPad.
That includes $1.7 million to president Mike Edwards and a pay boost of between 90 and 150 percent of their base salaries for five other top execs… [W]ith Borders total debt tipping the scales at $1.29 billion — owed mainly to Random House and Simon & Schuster (CBS) — and with the planned closure of at least 200 stores, the whole program seems preposterous.
You would think that the company would be marshaling its resources to keep the remaining stores open, not rewarding execs for getting Borders into Chapter 11 in the first place.
Barnes & Noble: Sold one million Nook books on Christmas Day 2010.
Amazon: Once a traditional book retailer, now selling more Kindle books than physical books.
Apple: The iBookstore is yet to take off, but groundbreaking book experiences are happening on the App Store.
Borders was the only major book retailer that was almost completely dependent on people getting in their car, going to their store, and purchasing a physical book. When it comes to that sales model, the chapter is closed. |
Skydrol
Skydrol is a fire-resistant aviation hydraulic fluid manufactured by Solutia (now part of Eastman Chemical Company), and formerly manufactured by Monsanto. There are various lines of Skydrol including Skydrol 500B-4, Skydrol LD-4, and Skydrol 5.
Skydrol is made of a fire-resistant phosphate ester base stock, with a number of oil additives dissolved into it to inhibit corrosion and prevent erosion damage to servo valves. It also includes a purple or green dye to ease identification. It has been approved by most airframe manufacturers including Airbus, Boeing and BAE Systems and has been used in their products for over 40 years.
Characteristics
Acid number (the proportional content of acid, not pH) and particulate contamination must be monitored while using Skydrol, and generally hydraulic systems should be sampled every C check.
Generally recommended contamination levels should be better than AS4059 Class 7 as new, and should not be allowed to degrade beyond Class 9. Skydrol has a 5-year shelf life from the date of manufacture.
Skydrol fluids are extremely irritating to human tissue. Gloves and goggles are recommended safety equipment when servicing Skydrol systems. If the fluid gets on the skin it creates an itchy, red rash with a persistent burning sensation. The effects subside within a few hours; egg white can be applied to the affected area to neutralize the burning. Animal studies have shown that repeated exposure to tributylphosphate, one of the phosphate esters used in Skydrol fluids, may cause urinary bladder damage. If Skydrol gets in the eyes, it creates an intense stinging sensation. The recommended treatment for this is to use an eye-wash station, sometimes mineral oil, castor oil or milk is used.
Skydrol fluids are incompatible with many plastics, paints and adhesives, which can be softened and eventually destroyed by exposure to Skydrol. Some materials (for example rayon, acetate) and rubber-soled shoes may also be damaged by Skydrol.
Production
The Skydrol series of phosphate ester hydraulic fluids were originally jointly developed by the Douglas Aircraft Company and Monsanto in the late 1940s to reduce the fire risk from leaking high pressure mineral oil-based hydraulic fluids impinging on potential ignition sources.
In 1949 Douglas first licensed Monsanto to produce a range of Skydrol materials under their patents. In the 1990s Monsanto became primarily a biotechnology company, and an independent chemical producer, Solutia, was created in 1997 to handle its chemical interests, including Skydrol.
Solutia Inc. built a new facility to produce Skydrol and SkyKleen aviation cleaning solutions in Anniston, Alabama in 2005. In 2012, Solutia was acquired by Eastman Chemical.
Uses
The first type of Skydrol used in aviation was Skydrol 7000 (now obsolete), which was dyed green in colour, as a fire-resistant lubricant in Douglas-designed cabin pressure superchargers (as piston-engined airliners do not have 'bleed air' pressurisation) used in the DC-6 and -7 series piston-engined aircraft, and first flight tested by United Airlines in 1949, who also used Skydrol 7000 in the hydraulic systems of these aircraft, as did quite a number of other airlines including Pan-Am, and KLM and BOAC in Europe.
With the introduction of jet aircraft operating at higher altitudes, and lower external temperatures there was a need for improved phosphate ester fluids. The story of the introduction of Skydrol type fluids in civil aviation is covered in a Kindle book entitled "The Skydrol Story", in which it describes how the Vickers Vanguard was the first non US built aircraft to introduce Skydrol as a hydraulic fluid when Trans-Canada Air Lines adopted it for their Vanguard fleet.
In the years following, during the flight testing of the Boeing 707 a test aircraft suffered a gear collapse which led to an ensuing fire fueled by leaking hydraulic fluid. As a result of this incident, Boeing implemented the use of Skydrol on the 707 and then later on the 720 and subsequent aircraft. Skydrol 500B (dyed purple in colour) then proliferated through the aerospace industry due to its flame retardant capability, but predominantly only in the civilian world on transport category aircraft.
Notable exceptions include the BAC Concorde, which used silicate ester fluid (Chevron M2V Oronite) due to the high temperature requirements.
Skydrol was never adopted into widespread military use, ostensibly because if an aircraft was hit by enemy fire on a mission it was believed that it is merely academic whether the fluid is flame retardant or not, as the aircraft would have been expected to be destroyed.
The predominant competing mineral oil fluid, MIL-PRF-5606 had higher flammability due to its lower flash point, however modern derivatives such as MIL-PRF-87257 have a flash point much closer to that of Skydrol.
Some smaller business jets still use MIL-H-5606, such as the Dassault Falcon series jets, most of the Cessna Citations and all models of Learjet. Business jets using Skydrol include the Cessna Citation X, Gulfstreams and Bombardier Challenger & Global Express Series.
Special seals had to be developed for use with Skydrol, as the elastomers available at the time were incompatible - the first seals used were made from butyl rubber, which were resistant to the phosphate ester fluid but suffered some early leakages. Modern Skydrol compatible seals are usually made from EPDM (ethylene propylene diene monomer ) or PTFE (polytetrafluroethylene). |
/**
* @author Petr Hrebejk
*/
class CheckRenderer extends JPanel implements TreeCellRenderer {
private TristateCheckBox check;
private JLabel label;
private static final JList LIST_FOR_COLORS = new JList();
public CheckRenderer() {
setLayout(new BorderLayout() );
setOpaque(true);
this.check = new TristateCheckBox();
this.label = new JLabel();
add(check, BorderLayout.WEST );
add(label, BorderLayout.CENTER );
check.setOpaque(false);
label.setOpaque(false);
}
/** The component returned by HtmlRenderer.Renderer.getTreeCellRendererComponent() */
public Component getTreeCellRendererComponent(JTree tree, Object value,
boolean isSelected, boolean expanded, boolean leaf, int row, boolean hasFocus) {
JPanel panel = new JPanel();
panel.setLayout(new BorderLayout());
panel.setOpaque(true);
Node n = Visualizer.findNode(value);
ElementNode.Description description = n.getLookup().lookup(ElementNode.Description.class);
if ( description != null ) {
check.setVisible( description.isSelectable() || description.hasSelectableSubs() );
if( description.isSelectable() ) {
check.setSelected(description.isSelected());
} else {
check.setState( getCheckState( description.getSubs() ));
}
}
if ( isSelected ) {
label.setForeground(LIST_FOR_COLORS.getSelectionForeground());
panel.setOpaque(true);
panel.setBackground(LIST_FOR_COLORS.getSelectionBackground());
}
else {
label.setForeground(tree.getForeground());
panel.setOpaque(false);
//setBackground(tree.getBackground());
}
label.setText( n.getHtmlDisplayName() );
label.setIcon( new ImageIcon( n.getIcon(BeanInfo.ICON_COLOR_16x16) ) ); // XXX Ask description directly
panel.add(check, BorderLayout.WEST );
panel.add(label, BorderLayout.CENTER );
panel.setPreferredSize(new Dimension(label.getPreferredSize().width + check.getPreferredSize().width, panel.getPreferredSize().height));
return panel;
}
private State getCheckState( List<ElementNode.Description> children ) {
if( null == children )
return State.OTHER;
int selCounter = 0, unselCounter = 0;
for( ElementNode.Description d : children ) {
if( d.isSelectable() ) {
if( d.isSelected() )
selCounter++;
else
unselCounter++;
if( selCounter > 0 && unselCounter > 0 )
return State.OTHER;
}
}
return selCounter > 0 ? State.SELECTED : State.NOT_SELECTED;
}
public Rectangle getCheckBounds() {
return (Rectangle)check.getBounds().clone();
}
private enum State {
SELECTED, NOT_SELECTED, OTHER;
};
private static class TristateCheckBox extends JCheckBox {
private final TristateDecorator model;
public TristateCheckBox() {
super(null, null);
model = new TristateDecorator(getModel());
setModel(model);
setState(State.OTHER);
}
/** No one may add mouse listeners, not even Swing! */
@Override
public void addMouseListener(MouseListener l) { }
/**
* Set the new state to either SELECTED, NOT_SELECTED or
* OTHER.
*/
public void setState(State state) { model.setState(state); }
/** Return the current state, which is determined by the
* selection status of the model. */
public State getState() { return model.getState(); }
@Override
public void setSelected(boolean b) {
if (b) {
setState(State.SELECTED);
} else {
setState(State.NOT_SELECTED);
}
}
/**
* Exactly which Design Pattern is this? Is it an Adapter,
* a Proxy or a Decorator? In this case, my vote lies with the
* Decorator, because we are extending functionality and
* "decorating" the original model with a more powerful model.
*/
private class TristateDecorator implements ButtonModel {
private final ButtonModel other;
private TristateDecorator(ButtonModel other) {
this.other = other;
}
private void setState(State state) {
if (state == State.NOT_SELECTED) {
other.setArmed(false);
setPressed(false);
setSelected(false);
} else if (state == State.SELECTED) {
other.setArmed(false);
setPressed(false);
setSelected(true);
} else { // either "null" or OTHER
other.setArmed(true);
setPressed(true);
setSelected(true);
}
}
/**
* The current state is embedded in the selection / armed
* state of the model.
*
* We return the SELECTED state when the checkbox is selected
* but not armed, DONT_CARE state when the checkbox is
* selected and armed (grey) and NOT_SELECTED when the
* checkbox is deselected.
*/
private State getState() {
if (isSelected() && !isArmed()) {
// normal black tick
return State.SELECTED;
} else if (isSelected() && isArmed()) {
// don't care grey tick
return State.OTHER;
} else {
// normal deselected
return State.NOT_SELECTED;
}
}
/** Filter: No one may change the armed status except us. */
public void setArmed(boolean b) {
}
/** We disable focusing on the component when it is not
* enabled. */
public void setEnabled(boolean b) {
setFocusable(b);
other.setEnabled(b);
}
/** All these methods simply delegate to the "other" model
* that is being decorated. */
public boolean isArmed() { return other.isArmed(); }
public boolean isSelected() { return other.isSelected(); }
public boolean isEnabled() { return other.isEnabled(); }
public boolean isPressed() { return other.isPressed(); }
public boolean isRollover() { return other.isRollover(); }
public void setSelected(boolean b) { other.setSelected(b); }
public void setPressed(boolean b) { other.setPressed(b); }
public void setRollover(boolean b) { other.setRollover(b); }
public void setMnemonic(int key) { other.setMnemonic(key); }
public int getMnemonic() { return other.getMnemonic(); }
public void setActionCommand(String s) {
other.setActionCommand(s);
}
public String getActionCommand() {
return other.getActionCommand();
}
public void setGroup(ButtonGroup group) {
other.setGroup(group);
}
public void addActionListener(ActionListener l) {
other.addActionListener(l);
}
public void removeActionListener(ActionListener l) {
other.removeActionListener(l);
}
public void addItemListener(ItemListener l) {
other.addItemListener(l);
}
public void removeItemListener(ItemListener l) {
other.removeItemListener(l);
}
public void addChangeListener(ChangeListener l) {
other.addChangeListener(l);
}
public void removeChangeListener(ChangeListener l) {
other.removeChangeListener(l);
}
public Object[] getSelectedObjects() {
return other.getSelectedObjects();
}
}
}
} |
A Taiwanese court on Tuesday cleared the main opposition's presidential candidate Ma Ying-jeou of corruption charges after a high-profile trial, a verdict expected to boost his chances in the race.
While the ruling party cried foul, citing political interference, Ma hailed what he called a "milestone ruling."
"I am not excited about the verdict because I knew from the very beginning that I'm innocent," said Ma, who chaired the nationalist Kuomintang party (KMT) until he resigned following his indictment.
Outside court, dozens of supporters holding placards and chanting slogans burst into cheers on hearing the ruling.
The former justice minister, once tasked with stamping out corruption, was charged with misusing more than 11 million Taiwan dollars (330,000 US dollars) in expense accounts during his time as Taipei mayor from 1998 to 2006.
Ma denied the charges, insisting he acted in exactly the same way as some 6,500 other government chiefs entitled to special expenses.
He blasted the legal process as "a waste of national resources" and called on the state not to appeal, saying it would take up to 1,000 prosecutors and a decade to look into all the senior officials entitled to such expenses.
"This is good news for the Kuomintang and its supporters" after six months of corruption allegations, said George Tsai, a political science professor at Chinese Culture University.
"An immediate and effective impact will be a boost of their morale," Tsai told AFP.
But he warned Ma had still not cleared all the hurdles as prosecutors could appeal the verdict.
It was not immediately clear if prosecutors would do so. They have 10 days to do so, but if they do go ahead, the High Court would hand out its verdict shortly before the March presidential vote.
Ma is locked in a tight race to succeed President Chen Shui-bian, standing against former premier Frank Hsieh of the ruling Democratic Progressive Party (DPP).
"Ma Ying-jeou is not guilty," the Tapei court's chief judge announced in a brief ruling.
Court spokesman Liu Shou-sung said the judges "found Ma made no attempt to obtain illegal benefits."
"Nor had he used tricks to mislead his (Taipei city government) accountants and auditing staff into wrongdoing," he said.
Accordingly, "the charges of corruption and breach of trust against Ma do not stand."
The DPP immediately hit out. "The ruling indicates politics has interfered in the judicial system," DPP legislator Hsieh Hsin-ying said.
Another DPP legislator, Wang Shih-chien, said he "could not figure out how the judges could make such a ruling, which runs against the public's consensus of corruption. Taiwan's justice is dead."
One of Ma's former aides at Taipei city hall, however, was sentenced to 14 months in prison. He had been charged with switching receipts in filing Ma's expenses claims.
Ma's running mate is veteran economist Vincent Siew, while the DPP's Hsieh has invited former premier Su Tseng-chang onto his ticket, according to news reports.
Hsieh has also come under prosecutors' investigation for alleged misused of campaign donations during his 2002 run for the mayor's office in the southern city of Kaohsiung. |
Social Media as a Form of Virtual Whistleblowing: Empirical Evidence for Elements of the Diamond Model This article originally advances the field of organizational whistleblowing by empirically investigating the suitability of the four elements of the fraud diamond as a means to understand the intention to disclose wrongdoing through virtual channels. This article also makes a contribution on the theme of whistleblowing as it relates to customers, an under-studied, however, relevant stakeholder in this field. The main findings of the article are as follows: (a) the four elements of the fraud diamond as they relate to whistleblowinga combination of pressure, financial incentive, opportunity and rationalization, and capabilitycan explain the intentions behind customer reports of wrongdoing; (b) online social media channels are customers preferred means of whistleblowing; (c) the elements of opportunity and capability are strongly correlated with the use of social media as a method of disclosing wrongdoing; and (d) virtual channels can be useful for whistleblowers in order to avoid potential retaliation. Unique managerial and academic implications of these research findings are also discussed, extending the layers of knowledge on whistleblowing in organizations. Introduction In recent years, the total global economic loss incurred due to fraud and wrongdoing in organizations has increased. A study conducted by the Association of Certified Fraud Examiners (ACFE) in 2016 reported that this total loss exceeded $6.3 billion, rising to $7 billion in 2018 (ACFE 2018). Parallelly, the number of whistleblowers who have observed and reported such wrongdoing has also increased. This group is dominated by employees (53%), followed by customers (21%) and then by anonymous/other whistleblowers (ACFE 2018). Whistleblowers have also played an important role during the 2020 coronavirus pandemic (Brown 2020). Whistleblowers play a vital role in revealing wrongdoing in contemporary organizations (Andrade 2015;Mason and Simmons 2018;Miceli and Near 2002;Loyens and Maesschalck 2014), which are considerably more complex and influenced by digital technologies; and context has unleashed a shift in how these wrongdoings tend to be exposed (;Lam and Harcourt 2019;Munro 2017). As Vandekerckhove et al. (2014a) outline, the use of online channels or social media can be considered a contemporary approach to exposing wrongdoing. This paper refers to the use of such methods as 'virtual' or 'online' whistleblowing (Cherry 2012;Lam and Harcourt 2019). Previous studies have linked whistleblowers' use of both internal channels, which involve reporting within the organization, for example to a supervisor, and external channels, which involve reporting outside the organization, for example to a news or government agency (;;Lee and Fargher 2018;Park and Blenkinsopp 2009;;Skivenes and Trygstad 2010). However, there has been an acute lack of discussion and significant lack of empirical evidence concerning virtual whistleblowing in general. To date, no existing research has considered virtual whistleblowing channels as a contemporary approach to reporting wrongdoing in a context of acute digitalization of contemporary organizations. Sharing information and speaking out about wrongdoing has become easier with the rapid proliferation of information technology, allowing individuals or groups to socialize and connect online across time and space. As pointed out by Bosua et al., these aspects of social media and online platforms have had a significant impact on potential whistleblowers. In addition, other benefits of online channels may be taken into account by whistleblowers, including speed of communication or sharing information, range of options, anonymity, ease of use and cost. As a corollary of this debate, the main objective of this work is to empirically test the factors that influence whistleblowers in revealing wrongdoing through virtual whistleblowing channels. Specifically, this work integrates the recently developed concept of the whistleblowing triangle (c;Smaili and Arroyo 2019) with the fraud diamond perspective. The whistleblowing triangle, which is an adaptation of the prior concept of the fraud triangle (Dellaportas 2013;Free 2015), is composed of three sides, each comprised of one or more elements, used to understand the intention behind the reporting of wrongdoing (c;Smaili and Arroyo 2019;Wolfe and Hermanson 2004), namely: (i) pressure (PRS) or financial incentive (FNI); (ii) opportunity (OPR); and (iii) rationalization (RNL). The fraud diamond perspective proposes the addition of a fourth element into the fraud triangle concept, which is the capability (CPB) of the whistleblower. Wolfe and Hermanson argue that this fourth element of the fraud diamond-capability-should be considered in analyses of the factors that lead people to report wrongdoing, because capability empowers individuals to turn an opportunity to disclose wrongdoing into reality. As far as we are aware, only a few previous studies have tested the components of the whistleblowing triangle (;;c;MacGregor and Stuebs 2014), and this is still considered a research gap. As proposed by Wolfe and Hermanson, the fourth element of the fraud diamond-capability-which is absent in the original fraud triangle model, must be added to the fraud triangle concept, as well as to the whistleblowing triangle (c;Smaili and Arroyo 2019). Consequently, this research considers this fourth 'diamond element' in order to understand the motivations behind virtual whistleblowing intention. According to Smaili and Arroyo and Latan et al. (2019c), there are two types of pressure: positive (internal pressure) and negative (external pressure). This study focuses on internal pressure, which positively encourages whistleblowing. This type of pressure relates to whistleblowers' personal moral and religious values and sense of social duty, and therefore comes from within. Financial incentives can also motivate observers to speak out about wrongdoing (;;Friebel and Guriev 2012), which relates to whistleblowers' expectations (;;Lee and Turner 2017). The financial incentives available to whistleblowers differ between nations, and depend on the relevant local legal regulations, such as the Dodd-Frank legislation in the US; in Indonesia, such financial incentives do exist, but are not explicitly mentioned or legislated. Furthermore, observers will often choose the easiest opportunity (e.g., means and channel) to blow the whistle, taking into account future risks and potential retaliation (Guthrie and Taylor 2017;;). In some cases, the complexity of using certain channels may deter whistleblowers from revealing wrongdoing (Casal and Bogui 2008;MacGregor and Stuebs 2014). Conversely, online channels, such as WikiLeaks, can ensure anonymity, while the sharing of information tends to be limited to certain groups on channels such as Facebook or Twitter and other social media sites. However, in many cases, individuals go through a process of rationalization before deciding whether to blow the whistle or remain silent when faced with wrongdoing, before helping the victims of fraud (;b;Smaili and Arroyo 2019). The use of online channels allows whistleblowers to share information about wrongdoing quickly and widely, minimizing the potential for harm to victims. Finally, whistleblowers' ability, confidence and skills help them in revealing wrongdoing through online channels. Therefore, this study aims to test a virtual whistleblowing model, providing the first empirical evidence on this topic using a research sample of Indonesian customers. As pointed out by Culiberg and Miheli, customers have received little attention in the whistleblowing literature to date, with most previous studies using organizational members as the research sample (e.g., employees, managers, internal auditors, audit committees etc.). However, external whistleblowers, including customers, can also be considered whistleblowers when they observe misconduct through direct interaction with the organization. This process is in reality no different from members of an organization identifying wrongdoing-it differs only in the way in which wrongdoing is observed and discovered. Given that food fraud and wrongful business practices have recently increased (Moy 2018), this perspective allows customers to engage in blowing the whistle. This paper argues that customers often observe wrongdoing by organizations, and that they should therefore be seen as active subjects in the area of whistleblowing (ACFE 2018). Furthermore, our model is tested in Indonesia; most extant research on this subject is based in Western countries, while studies in developing countries are relatively rare (;;Miceli and Near 2005). More importantly, Indonesia has the fourth largest population in the world, after China, India and the U.S, and is among the world's most enthusiastic nations in terms of internet use. For all the above reasons, it is undoubtedly worth testing this virtual whistleblowing model in an Indonesian context. This study both broadens and deepens our understanding of the field of whistleblowing, providing original evidence in three important ways. First, it responds to research suggestions from experts in the field-e.g., Vandekerckhove et al. (2014a) and Lam and Harcourt -and provides empirical evidence concerning the virtual whistleblowing model as a contemporary approach to uncovering wrongdoing. This is thought to be the first empirical study to consider online channels in relation to whistleblowing intention. Second, this study expands the concept of the whistleblowing triangle (Smaili and Arroyo 2019;c) by adding a fourth element-whistleblowers' capability-creating a single comprehensive model. To date, there has been a lack of empirical evidence relating to this concept in the whistleblowing literature, which is considered a persistent research gap. As far as we are aware, this is the first empirical study to apply the four elements of the fraud diamond to predicting whistleblowing intention (c;Smaili and Arroyo 2019;Wolfe and Hermanson 2004). Finally, the use of customers as the research sample is novel. As customers are considered a unique group of 'external whistleblowers', operating outside the boundaries of the organization, they are free from various risks and obstacles (for example, threat of dismissal, poor performance appraisal, unfair treatment, intimidation or verbal harassment). Therefore, they are not involved in conflicts related to professional ethics and loyalty, as organizational members are (Bouville 2008;Jubb 1999;Varelius 2009). However, other risks remain and may threaten them, such as lawsuits from unethical companies or requests for compensation due to disclosure of wrongdoing. The remainder of this paper is organized as follows. The next section presents the theoretical background and development of hypotheses, followed by the research methodology. Following this, the empirical results are presented. Finally, the results are discussed and implications for both academics and practitioners are given. Whistleblowing as Prosocial Behavior Whistleblowing has been defined by a number of scholars from various perspectives (Alford 2001;Dozier and Miceli 1985;Jubb 1999;King 1997;Near and Miceli 2011;Vinten 2000). One definition of whistleblowing that has been widely accepted in social science research is that whistleblowing constitutes the disclosure by members of an organization (including former members and job applicants) of illegal, immoral, or illegitimate practices (including omissions) by the employer, to persons or organizations who may be able to effect action (Near and Miceli 1985). According to this definition, only members of the organization can be considered whistleblowers. However, this paper argues that, due to advancements in digital technology, this definition is too narrow and restrictive, because access to relevant information is not always limited to members of the organization. Hence, wrongdoing is not only observed by organizational insiders, but also by outsiders such as customers, vendors, consultants, external auditors or competitors. For instance, customers who observe instances of food fraud in Indonesia can report their findings to formal agencies such as the consumer protection agency or the national agency of drug and food control, through online channels. These agencies tend to take decisive action against wrongdoing, such as removing products from the market and even withdrawing production permits. In addition, it is important to distinguish between bell-ringers and whistleblowers, as highlighted by Miceli et al.. Someone is called a bell-ringer when they suspect organizational wrongdoing and disseminate this information. In such a case, the bell-ringer does not necessarily intend to stop the wrongdoing, and has not directly observed the suspected misconduct in the workplace. Meanwhile, whistleblowers are the opposite. They observe wrongdoing directly and intend to stop it in order to help the victims. Therefore, in this paper, a broader definition of whistleblowing is adopted: whistleblowing is a deliberate, non-obligatory act of disclosure. It is made by a person who has-or has had-privileged access to an organization's data or information concerning non-trivial illegality or other wrongdoing, whether actual, suspected or anticipated, which implicates, and is under the control of, that organization, to an external entity which has the potential to rectify that wrongdoing (Jubb 1999). Whistleblowing is regarded as a prosocial behavior; that is, a behavior intended to benefit others, in this case by uncovering wrongdoing in an organization (Alford 1 3 2001; ;). As proposed by Dozier and Miceli in the Prosocial Organizational Behavior (POB) Model, whistleblowing is viewed as a prosocial behavior when the potential whistleblower observes wrongdoing and this motivates them to undertake three phases of action (Brief and Motowidlo 1986;). The first phase involves observing a questionable activity and labeling it as wrongful. In the second phase, the observer reacts to the wrongdoing by experiencing it as incorrect. Finally, in the third phase, the observer decides on a course of action where whistleblowing is an available option (Bjrkelo and Bye 2014;Near and Miceli 2011). Miceli et al. point out that this behavior does not have to be altruistic to be considered prosocial, and that while whistleblowers may feel morally compelled to act, they may simultaneously hold the view that the disclosure will result in some personal gain for themselves. The Whistleblowing Diamond In a recent study, Smaili and Arroyo proposed a new conceptual model called the whistleblowing triangle, an adaptation of the prior concept of the fraud triangle (Dellaportas 2013;Free 2015). The whistleblowing triangle model comprises the following three sides, each comprised of one or more elements: (i) pressure (PRS) or financial incentives (FNI); (ii) opportunities (OPR); and (iii) rationalization (RNL), all of which can help explain the intention behind whistleblowing. It is worth mentioning at this juncture that the use of the term 'triangle' is based on the three sides of grouped factors, rather than the total number of elements in the model. However, there is a lack of understanding about the relationships between these elements, and there is little empirical evidence for the model, with only two previous studies addressing this issue. First, a study by Brown et al. uses elements of the whistleblowing triangle as a proxy to explain the use of the Theory of Planned Behavior (TPB) regarding whistleblowing intention among management accountants. Their findings indicate that attitude and perceived behavioral control have a significant effect on whistleblowing intention. Second, a study by Latan et al. (2019c) uses the original propositions of Smaili and Arroyo to test the whistleblowing triangle model. Their results show that the elements of the whistleblowing triangle work as antecedents which trigger observers to blow the whistle. However, the triangle model, as it relates to both fraud and whistleblowing, is not the conclusive model in the business ethics literature. As Wolfe and Hermanson argue, this model can be enhanced and improved by adding a fourth element. In addition to pressure, financial incentives, opportunities and rationalization, the element of capability must be taken into account. An observer must have the capability to recognize wrongdoing and choose an open reporting channel in order to blow the whistle. The capability of the whistleblower is related to personal traits and abilities, which play a major role when revealing wrongdoing, even in the presence of other elements. This study includes the element of capability in order to test the 'whistleblowing diamond' model in an Indonesian context. As pointed out by Smaili and Arroyo and Latan et al. (2019c), more comprehensive research is needed to develop the whistleblowing triangle model, and to extend it using elements of the fraud diamond model. Given the lack of empirical evidence and the limited scope of previous studies, it is vital to deepen insights in this field. The following sections will describe the components of the whistleblowing diamond and formulate hypotheses based on this model. Pressures Affecting Whistleblowing Pressure has different meanings in different contexts. In this paper, pressure is defined as a positive incentive which motivates observers to reveal wrongdoing. Pressure can come from within the whistleblower (internal pressure), or outside the whistleblower (external pressure) (c;Smaili and Arroyo 2019). Internal pressure is related to an observer's personal moral, ethical and religious values, which may encourage him/her to uncover and reveal wrongdoing. This pressure usually arises from of a sense of social responsibility and the duty the observer feels to reveal the truth (Leys and Vandekerckhove 2014). On the other hand, external pressure relates to threats or retaliation, and can therefore be a disincentive to blow the whistle. This pressure usually reduces the whistleblower's motivation because of its potential negative effects on career and professional life. A whistleblower usually faces external pressure when revealing serious wrongdoing (;b;;Skivenes and Trygstad 2010). Due to the research sample used in this study, external pressure may be less relevant or have little impact and, therefore, internal, positive pressure will be focused on. While external pressures such as threats of dismissal or poor performance appraisal are not relevant for external whistleblowers, these factors may be more relevant when examining a sample of individuals who are members of an organization. However, external pressures do still threaten external whistleblowers, such as lawsuits from unethical companies or requests for compensation. In line with the Theory of Planned Behavior (TPB), a whistleblower experiences both personal and social pressure (internal), and organizational pressure (external) Smaili and Arroyo 2019). In the Indonesian context, external whistleblowers often speak out when confronted with unethical organizational behavior as a consequence of personal and social pressure, and this action is often carried out through online platforms and social media. As external whistleblowers experience less retaliation and have access to online reporting channels, they are often in a good position to reveal wrongdoing. At times, personal and social pressure may give the whistleblower greater courage, with the aim of helping victims and preventing wider damage. Conversely, internal whistleblowers often choose to remain silent about observed wrongdoing, as a result of organizational pressure (Culiberg and Miheli 2017;b;MacGregor and Stuebs 2014). This is due to the lack of protection for whistleblowers when revealing organizational wrongdoing through internal mechanisms. Since there is no law clearly regulating protection for whistleblowers in Indonesia, the use of these internal channels is less effective compared to online platforms. Meanwhile, previous studies indicate that pressure has a positive effect on the intention to blow the whistle (Smaili and Arroyo 2019), and internal pressure motivates the whistleblower to act (Chen and Lai 2014;c). Based on the above discussion, our first hypothesis is: H1 Pressure has a positive effect on online whistleblowing intention. Whistleblowing and Financial Incentives A whistleblower may consider financial incentives when reporting organizational misconduct. This motivating factor for uncovering wrongdoing is taken very seriously (;). Financial incentives and compensation schemes are designed to encourage whistleblowers to report wrongdoing which may result in large financial losses. Typically, observers use anonymous online channels to report their findings, and receive predetermined rewards. The use of anonymous online channels is intended to maintain the confidentiality of personal identities, and prevent retaliation against whistleblowers. Indeed, several regulatory bodies provide financial incentives for anyone who has information about wrongdoing in an organization. This is considered an effective way of uncovering wrongdoing in organizations, allowing for corrective action. A number of recent studies indicate that compensation and financial incentives can trigger whistleblowers to act (;;Friebel and Guriev 2012). In addition to financial incentives, there are also social and moral incentives (). However, these can be difficult to quantify and depend on the whistleblower's social norms, moral standards, and cultural environment. Social and moral incentives come under the broader category of ethical behavior and more stringent whistleblowing laws. Hence, this work argues that financial incentives can be more prominently and easily applied. However, as indicated by Berger et al., when whistleblowers focus on financial incentives, they tend to delay the revelation of wrongdoing until it results in significant losses. In this context, whistleblowers see revelation as an economic decision rather than an ethical one (;;c), and this action is therefore included in the category of prosocial behavior. However, external whistleblowers often recognize that financial incentives play an important role in their decision to act. Given that there are several financial incentive programs in place outside organizations, compared with the relative rarity of internal incentive programs, this motivates external whistleblowers. The results of previous studies by Andon et al., Latan et al. (2019c), Lee et al. and Rose et al. show that financial incentives have a positive effect on whistleblowing intention. Based on the above discussion, our second hypothesis is: H2 Financial incentives have a positive effect on online whistleblowing intention. Opportunity to Blow the Whistle This work defines opportunity as the availability of resources to support observers in revealing wrongdoing. Several factors increase opportunities for external whistleblowers: the availability of open reporting channels; support from bystanders; support from family and friends; as well as moral values and ethical standards. In addition, information technology also plays an important role in online whistleblowing intention. As Lam and Harcourt argue, the use of online channels for whistleblowing makes it possible to share information widely through messages, photographs and videos, with speed and anonymity. In addition, support from social media or website providers, technology (hardware and software) and the general public provide further opportunities for online whistleblowing. Several scholars even analogize such opportunities for disclosure as procedural justice (Brennan and Kelly 2007;;); that is, organizational justice relating to procedures in the workplace. When the general climate of procedural justice is elevated, observers may choose not to remain silent when faced with wrongdoing. Opportunity also relates to the type of wrongdoing and the individual whistleblower, which may require different reporting channels. For example, where the fraud takes place online, disclosure of the wrongdoing also tends to use online platforms. In addition, external whistleblowers may be forced to choose online channels to report wrongdoing as opposed to internal channels, because they do not have internal access to the organization. As theorized by Smaili and Arroyo, additional opportunities increase potential whistleblowers' intention to speak out about wrongdoing. Research from Brown et al. and Latan et al. (2019c) indicates that opportunities have a positive effect on the intention of accountants to reveal wrongdoing. Based on the above discussion, the third hypothesis derived is: H3 Opportunity has a positive effect on online whistleblowing intention. Rationalization of Whistleblowing Smaili and Arroyo define rationalization as a process of cognitive justification underlying the decision to blow the whistle. This represents a process of reasoning undertaken by whistleblowers considering their action (or inaction) when faced with wrongdoing, culminating in a decision which is in line with their own moral standards (;c). Rationalization is a cognitive process that enables observers to distinguish, for instance, between what actually happened and what should have happened (MacGregor and Stuebs 2014). Near and Miceli illustrate this process as a mechanism by which observers consider whether action should be taken to help victims. For observers with higher ethical standards, the process of rationalization may not be difficult, because they can easily make a decision and determine whether an instance of wrongdoing was serious, illegal or immoral before blowing the whistle. However, for observers with lower ethical standards, the rationalization process may not progress as smoothly, as they tend to be less engaged and more afraid of reporting wrongdoing. In this situation, the observer does not want to take any risks and therefore may remain silent (Reckers-Sauciuc and Lowe 2010). A rationalization process is necessary before a decision to blow the whistle is made. This process usually aligns with the observer's beliefs regarding wrongdoing and moral standards. Several previous studies have found that the rationalization process has a positive effect on helping the whistleblower make the decision to act (;Latan et al., 2019bMacGregor and Stuebs 2014;). Based on the above discussion, the fourth hypothesis is: H4 Rationalization has a positive effect on online whistleblowing intention. Whistleblowing Capability Capability relates to the whistleblower's ability to deal with wrongdoing. Capability relates to the individual whistleblower's strength, which can be considered a panacea when engaging with wrongdoing. Wolfe and Hermanson argue that capability is an important element of the fraud diamond model, because it involves psychological and technical factors that help the observer to speak out. The characteristics of whistleblowing capability include: being in the right position to blow the whistle; having the confidence to expose and report wrongdoing; having adequate technological skills; and having the ability to take action while under threat of retaliation. The capability element is also related to a proactive personality in whistleblowers, because, in general, those with a proactive personality feel more comfortable taking action regarding issues in the workplace (). In relation to online whistleblowing intention, such capabilities help the observer because online platforms require a certain level of ability to operate. A whistleblower with high capability will therefore be able to report wrongdoing more easily through such platforms. That is, they will not experience the fear of retaliation and threats that come along with more traditional methods. Conversely, an observer with lower capabilities may be reluctant to report wrongdoing and therefore choose to remain silent. Based on previous studies conducted by Boyle et al. and Wolfe and Hermanson, whistleblowers' capabilities do assist them in reporting wrongdoing. Hence, it seems that capability has a positive effect on online whistleblowing intention. Based on the above discussion, the fifth hypothesis is: H5 Capability has a positive effect on online whistleblowing intention. Figure 1 portrays the research framework empirically tested in this work. Sample and Data Collection The sample used in this study consists of customers who shop using Indonesian online stores. Customers were chosen as the sample in this study because, based on the ACFE report from 2018, they are particularly active subjects in observing and revealing wrongdoing, with the highest percentage after employees. In addition, based on the aforementioned ACFE report, the retail sector, including online stores, experiences a high rate of fraud, due to the recent increase in food fraud and wrongful business practices (Moy 2018); however, there is a lack of existing research addressing this area. Since the overall sampling frame in our case is unknown, with the total number of customers being difficult to identify, it is not possible to apply the use of probability sampling in this study to obtain a random and representative sample. Therefore, we have used non-probability sampling to collect data through online surveys. The use of non-probability sampling is considered appropriate when the number of respondents is very large and uncountable. Customers were identified using snowball sampling, through social media and ratings given on online stores' websites. In total, 1069 online shopping customers throughout Indonesia agreed to participate in the survey. A questionnaire link was sent to each customer, after pre-testing to minimize potential bias that might have threatened the validity of the results. This included accounting for possible measurement errors and identifying sources of bias in the survey method (e.g., nonresponse bias, common method bias, social desirability bias) to improve the quality of the survey (;Spekle and Widener 2018) and ensure the questionnaire was understood by the customers (Fowler 2013). This process involved asking three senior academics for their opinions and suggestions to assess the content validity of the questionnaire (Rossiter 2011), leading to improved clarity. In addition, the questionnaire was originally composed in English, and a back-translation procedure-from English to Indonesian and back into English-was used to ensure clarity of content. The final version of the questionnaire was first sent to 48 customers in order to conduct preliminary data analysis, assessing the validity and reliability of the indicators to ensure the feasibility of the survey instrument. The pre-testing results indicated that the questionnaire had good validity and reliability, making it suitable for further use. Data were collected during the period October 2018-March 2019, with a total number of 1069 questionnaires sent out. The questionnaire was sent via e-mail and social media and followed up with a notification message to ensure that the questionnaire was received. This method is considered the best way of reaching a broad field of respondents at low cost and in a short time-frame (). In order to increase the response rate, a reminder e-mail/message was sent at the end of each month and several telephone calls were made to those customers who had only provided telephone numbers and had not yet responded. In addition, customers were assured of their anonymity and that their names and details would not be disclosed. Informed consent was considered to have been obtained when customers completed the survey and sent it back to us, as in the conclusion of the survey they agreed that their responses would be used in this study. Finally, for the purpose of testing non-response bias (;Fowler 2013), the time span of data collection was set at five months. In total, 244 questionnaires were returned. From this initial rate of return, 37 were excluded due to being incomplete, giving a final response rate of 19.36%. Following Baruch and Holtom, a response rate of > 15% is widely considered acceptable among studies using the survey method. Groves et al. argue that online surveys tend to produce low response rates, but that the results are not jeopardized by bias as a result of this, as long as there is no significant difference between the samples of respondents and non-respondents. In order to ensure that the results were free from non-response bias, early and late responders were tested and compared, with the assumption that the late responders represent customers who did not respond to the survey (Fulton 2018). While sometimes questioned, this approach has been widely used in social sciences research. Groves suggests using a post hoc test as a more robust approach to detect this bias. Hence, both approaches were used in this study to test for nonresponse bias. First, a t-test was run to assess differences in the means of the two sample groups. The results did not show any significant differences between early and late responders. Table 1 shows the results for Levene's test, which was significant at a value of > 0.05, indicating that the assumption of homogeneity variance was fulfilled. Furthermore, significance values > 0.05 for equality of means were obtained in both sample groups for the variables tested. These results indicate that non-response bias is not currently detected in our data. However, we cannot confirm that our set of respondents is identical with the set of non-respondents, because this type of sample cannot be generalized in this way. Second, no differences in sociodemographic variables were found when running a Bonferroni test. This result indicates that the response rate is similar across subgroups, which means that non-response bias was not found in this case. However, we acknowledge that non-response bias may still exist, despite the fact that our testing did not detect it. Furthermore, the results were assessed for other biases, such as Common Method Bias (CMB), which often arises when using the survey method (). A full collinearity VIF (AFVIF) was used, an approach proposed by Kock to assess CMB by assessing the correlations between two measurements. The analysis results obtained an AFVIF value of 2.99 < 3.3, which indicates that CMB is not a threat to the results. Finally, Social Desirability Bias (SDB) was considered, a common bias which is often ignored in survey research. SDB generally refers to respondents' tendency to select responses that reflect societally approved behavior (Nunnally and Bernstein 1994;Chung and Monroe 2003). That is, respondents tend to choose answers which reflect positively on them. In order to detect this bias, an indirect questioning approach was applied by adding additional measurement items during the initial data collection (Fisher 1993). This bias was controlled for in the context of online whistleblowing intention, and the results showed that there were no significant differences (p < 0.05), between the two measurements. This indicates that the target respondents did not provide different answers when taking a personal point of view compared with a third-person perspective (see Table 1). However, once again, we acknowledge that these biases may still exist, even though we did not detect them at this time. Although we have tested and controlled for both biases, we cannot fully guarantee that our data are free from these issues. A summary of respondent profiles can be seen in Table 2. Measurement Items and Scales Measurement items and scale are core parts of quantitative research and often have effects on research results. A good measurement item must be able to capture the concept of the measured construct. This research adopts measurement items that were developed in previous whistleblowing studies. Proxies from prior studies are also used to develop several items in this study. Although our topic is a recently developed concept and few studies have so far addressed this issue (Smaili and Arroyo 2019), measurement items for constructs in this model have been established in two previous works through a series of tests and results (;c). We argue that these items have good validity and reliability, as well as the proven ability to measure empirically tested constructs. Hence, these items were adapted for use in the current research context with little modification. It is worth noting that using established measurement items is generally considered better practice than developing new ones, given the complexity of scale development (Fowler 2013;DeVellis 2017). To measure the elements of the whistleblowing diamond, measurement items adapted from Brown et al., Latan et al. (2019c) and Murphy and Free were used. Specifically, the elements were divided into the following categories: PRS, FNI, OPR, RNL and CPB. First, pressure (PRS) to engage in blowing the whistle was measured using 4 items adapted from Latan et al. (2019c) and Murphy and Free, with modification. We used a 7-point Likert scale ranging from 1 = "not likely" to 7 = "very likely" to measure this variable. For instance, respondents were asked "how likely are you to engage in blowing the whistle, because of the social pressure to do the right thing based on a certain situation in a scenario" and so on. Second, we measured the variable of financial incentive (FNI) using 2 items adopted from Latan et al. (2019c) and Brown et al.. Once again, a 7-point Likert scale was employed, with a scale ranging from 1 = "not likely" to 7 = "very likely" to measure this variable. In the same vein, respondents were asked, for example, "how likely are you to engage in blowing the whistle, in order to gain financial incentive and reputation". Third, the opportunity (OPR) for engaging with QRPs was measured using 4 items adapted from Latan et al. (2019c) and Brown et al.. We again used a 7-point Likert scale from 1 = "not likely" to 7 = "very likely". For example, respondents were asked about "possibilities to use online channels because of difficulties faced in the process of reporting internally" and so on. Fourth, we measured rationalization (RNL) using 5 items adopted from Latan et al. (2019c) and Murphy and Free. We used a 7-point Likert scale ranging from 1 = "not likely" to 7 = "very likely" and respondents were asked questions such as "how likely are you to engage in blowing the whistle, in order to help someone else by disclosing wrongdoing". Fifth, we measured capability (CPB) based on proxies provided by Wolfe and Hermanson. A 7-point Likert scale was also used to measure this construct, this time with 5 indicators. This scale ranged from 1 = "not likely" to 7 = "very likely". Respondents were asked questions such as "how likely are you to engage in blowing the whistle because of being in a good position to speak out" and so on. Finally, to measure online whistleblowing intention (OWB), measurement items based on studies from Lam and Harcourt were developed. This construct relates to the use of an online platform to act when observing wrongdoing, with a total of 5 items. To the best of our knowledge, measurement items for use in measuring OWB have not previously been developed. Again, we used a 7-point Likert scale with a scale ranging from 1 = "not at all" to 7 = "very much". Respondents were asked to indicate their potential use of online reporting channels to blow the whistle based on a particular scenario. All constructs can be considered to be captured appropriately when measurement items are able to reflect what they want to measure, which is indicated by good validity and reliability. The measurement objectives of the constructs in this model were achieved using a hypothetical scenario, with customers as actors. The scenario used in this study appears in Appendix 1. In this scenario, customers were asked to position themselves as a witness to food fraud, which is related to impaired products and wrongful business practices. We designed this scenario to capture the essence of each construct. A hypothetical scenario is the most common form of whistleblowing survey research, and explains customers' self-reported actions in response to observed wrongdoing in certain situations (Olsen 2014). A hypothetical scenario approach was chosen because it is difficult to directly measure observation of wrongdoing in the workplace. Scenario approaches are widely used in the whistleblowing literature (;a; Park and Lewis 2019; Valentine and Godkin 2019). In addition, the use of hypothetical scenarios does possess certain limitations, because the variables are measured without reallife decisions having to be made, which in some cases may not align with reality. Nevertheless, this is currently the best way to test online whistleblowing intention. Data Analysis Structural Equation Modeling (SEM), which is considered a second-generation analysis method, was employed to test our model and hypotheses. SEM has become a core part of quantitative analysis, which includes a variety of methods. The component-based SEM method, or 'soft modeling', was used in this study through a partial least squares path modeling (PLS-PM) approach (;Lohmller 1989). PLS-PM was chosen by considering a number of advantages related to its characteristics, which are superior to other SEM approaches (Latan and Noonan 2017). PLS was initially developed for two reasons. First, to test primitive models where there is a relative scarcity of theory and knowledge (Noonan and Wold 1986). Given that this model is still primitive, due to its recent development and relative scarcity in the literature, PLS was seen as a suitable approach in this regard (Wold 1989). In addition, PLS provides a high level of predictive accuracy in terms of model estimation and balancing causal-predictive relationships between variables (Lohmller 1989;Rigdon 2013). Second, PLS relaxes the heavy assumptions arising from the covariance-based SEM (CB-SEM) approach. That is, PLS employs soft modeling with light assumptions, because it is based on linear aggregates and offers flexibility for various applications in real-world cases (Sellin 1988). One advantage of PLS is that it avoids Heywood cases and factor indeterminacy, which can occur in CB-SEM, using the principle of consistency at large. Finally, PLS-PM provides user-friendly software with a graphical user interface. In this case, PLS offers advanced features that make it easy to run without the need to use syntax codes. Given the long journey of PLS towards achieving popularity in social sciences research, as well as the currently available guidelines and standards for reporting the results of PLS analysis, we followed the step-by-step guidelines for best practice which are available in the literature (;;Latan 2018) in reporting our PLS analysis results. Before analyzing our model, we calculated the adequacy of the sample size for our parameter estimates. We used the gamma-exponential method, and found that the minimum sample size for our model was 146 cases (where the minimum absolute significant path coefficient = 1.97, significant level = 0.05 and required power level = 0.80), which our study meets. In short, we used a three-step approach to report the results of our PLS analysis as follows. First, we report the results of the outer model, which is related to the assessment of the measurement model, to show that the indicators in the model are valid and reliable. Second, we report the results of the inner model, which is related to the assessment of the structural model, by looking at standard metrics in PLS and testing our hypotheses. Finally, we will provide the results of several robustness tests which were conducted to ensure that our main analysis results were free of certain systematic biases. We used the SmartPLS 3 software to analyze our data (). We implemented a number of specific settings before running this software. In the PLS algorithm settings, we selected the path weighting scheme with the maximum number of iterations set at 300 and a stop criterion of 10 −7 (= 1.0E−07). In terms of bootstrapping, we used 5000 subsamples to obtain stability of model estimates through confidence interval methods, namely a bias-corrected and accelerated (BCa) bootstrap. In addition, we set the level of significance to reject the null hypothesis at 5% (one-tailed). The results obtained are described below. Results Before reporting the results of our main analysis, we conducted factor analysis using principal component analysis (PCA) to assess the unidimensionality of construct measurements in our model. We obtained Kaiser-Meyer-Olkin Measure of Sampling Adequacy (KMO-MSA) values of > 0.5 for each construct in our model and rotation of matrix component values of > 0.60 for all items (see Table 3). From this, we can conclude that the measurement items form a single factor for each construct, and the items we developed (in this case the CPB and OWB) have good unidimensionality. Furthermore, we obtained Measurement Model Assessment We depended on several core metrics that are commonly used in PLS to test convergent and discriminant validity, as well as internal consistency reliability. First, we inferred convergent validity through loading factors and average variance extracted (AVE). The recommended values for the loading factor of the indicators in the model should be > 0.708, and the AVE value, used to explain the construct variance, should be > 0.5 (;;Latan 2018). However, in many cases, a loading factor value between 0.50 and 0.60 is obtained, due to the large number of items in the model. Such a value can still be acceptable, as long as the AVE value meets the threshold required to strengthen content validity. In Tables 3 and 4 we depict the results of our analysis for convergent validity. Our results fulfilled the rule of thumb and the threshold values for good convergent validity (see Fig. 2). Furthermore, we assessed construct reliability in the model using two measures: Cronbach's alpha () and Dijkstra-Henseler's A. Cronbach's alpha is a conservative measure and indicates the lower bound of reliability. This measure is useful when a small sample size is combined with a low number of indicators, while A serves as a good representation of a construct's reliability (Nunnally and Bernstein 1994). The recommended threshold values for Cronbach's alpha () and A range from 0.80 to 0.90. The results of our analysis, presented in Tables 3 and 4, show that the construct reliability in the model fulfills this rule of thumb. In addition to assessing convergent validity, we also assessed discriminant validity to ensure that the measurements of separate constructs are not unduly correlated with each other. We used the Heterotrait-Monotrait (HTMT) criterion, a new approach developed in PLS-PM to assess discriminant validity. The rule of thumb for assessing discriminant validity is indicated by a threshold value of > 0.90 for HTMT, which indicates conceptually similar constructs, while HTMT values < 0.85 indicate conceptually different constructs (;). From our results, shown in Table 5, we can conclude that the HTMT value is significantly lower than the specified threshold value. Hence, discriminant validity is fulfilled for our measurements. This means that each construct measurement in the model measures a different concept and the measurements are not correlated with each other. Structural Model Assessment We used the inner model of PLS to assess the structural model in relation to the quality of the PLS model; this allowed us to assess the variance in the model which can be explained, the magnitude of the influence and contribution of each variable and the significance of the relationships between the hypothesized variables. We used core metrics to assess the structural model, as recommended by several scholars (;;Latan 2018). This consists of coefficient of determination (R 2 ), effect size (f 2 ), predictive relevance (Q 2 ) and variance inflation factor (VIF). In addition, we assessed our model's out-of-sample predictive power by implementing the PLS predict procedure (). The results of the structural model evaluation we obtained are depicted in Table 6. As shown in Table 6, we obtained R 2 and adj. R 2 values for OWB of 0.694 and 0.701, respectively, which indicates the percentage of variance which can be explained by the predictors in our model. As Hair et al. note, these values fall into the large category; however, values that are too high, for example > 0.90, indicate over-fit and the occurrence of collinearity between variables. However, the magnitude of these values will depend on the number of predictor variables in the model, in relation to complexity and sample size. In the field of business ethics for instance, both values are often found to be lower than 0.50, considering the broad scope and complex phenomena used to explain the relationships between variables. In addition, we obtained effect size values (f 2 ) produced by the predictors in our model which ranged from 0.033 to 0.067, falling into the medium category. These values define the contribution of each predictor in the model to explain the variance of the dependent variable (in our case, OWB). The greater the f 2 value, the more important the role of this predictor variable in the model. Conversely, a smaller f 2 value indicates a relationship between predictor and outcome that is not significant; therefore, no variance in the model is explained. We also assessed predictive relevance (Q 2 ) as an alternative measure of R 2 to show the predictive power of our PLS model. A Q 2 value larger than zero is meaningful and indicates that the PLS model is worth testing. We ran a blindfolding procedure with omission distance (D) = 7 and produced a Q 2 value of 0.499, indicating the large predictive relevance of our PLS model. In addition, we obtained VIF values for each predictor in the model of less than 3.3, which indicates no significant correlation or collinearity between predictor variables in the model. Finally, we tested the model's out-of-sample predictive power by running the PLS predict algorithm () to generate holdout sample-based point predictions for the constructs in our model. Given that our sample size meets the minimum requirements and is large enough, we used ten folds and ten repetitions, and compared the root mean squared error (RMSE) values from the PLS-PM analysis with those generated by a nave linear benchmark (;). The results indicate that the PLS-PM analysis yields lower prediction errors than the nave benchmark for most of the indicators related to PRS, FNI, OPR, RNL, CPB and OWB, offering clear support for our model's predictive power. In addition, Q 2 predict values > 0 indicate that our model outperforms the nave benchmark (i.e., the indicator means from the analysis sample). Testing of Hypotheses We tested the derived hypotheses for the relationships between variables by performing a bootstrapping procedure. In testing these hypotheses, we assessed the direction of the path coefficients, and accepted or rejected each hypothesis based on a 95% confidence interval (CI), generated at the 5% significance level (one-tailed). Overall, our results support the hypotheses on the relationships between predictors and outcome. As shown in Table 7, we found that the relationships between PRS → OWB, FNI → OWB and OPR → AWB were significant, with beta () values of 0.193, 0.164 and 0.230, respectively, and significance at p ≤ 0.05 at 95% CI. From these results we can conclude that H1, H2 and H3 are fully supported. Additionally, we found the relationships RNL → OWB and CPB → OWB to be significant, with beta () values of 0.166 and 0.255, respectively, and significance at p ≤ 0.05 at 95% CI. Hence, we can conclude that H4 and H5 are also fully supported. Robustness Tests We performed several robustness tests to ensure that our main results are free from certain biases, such as endogeneity, non-linearity and unobserved heterogeneity. Several scholars (Peel 2018;) have noted these biases as a threat to results that can lead to mistakes in drawing conclusions, and therefore need to be tested. First, we tested endogeneity bias to assess whether there were interventions from omitted variables, the presence of reverse causality relationships, or other potential errors (e.g., sample-selection bias). To ensure that this bias did not affect our results, we conducted the Heckman test using a twostep procedure with the help of the Stata software. In the first step, we ran our model and data without controlling for endogeneity bias. In the second step, we controlled for the effects of endogeneity bias by including a third variable in our model equation. Our results, shown in Table 8, indicate that there are no differences in results whether or not this bias is controlled for, which indicates that endogeneity bias does not occur in our data or model. Second, we examined whether non-linear effects occur in the relationships between variables in our model, to ensure that linear assumptions are fulfilled. When a non-linear effect appears and there is assumed to be a linear relationship, this indicates a mirage. We tested this effect by using Ramsey's regression specification error test (RESET) and quadratic functions in the SmartPLS software. The results of our analysis for this bias, presented in Table 9, fully support a linear relationship between variables in the model. We found the presence of non-linear relationships between variables to be insignificant, with f 2 falling in the small category and p value > 0.05 for Ramsey's RESET. This indicates that non-linear effects do not appear in our model (Wooldridge 2020). Finally, we examined unobserved heterogeneity bias, which usually arises from differences between segments or clusters of the target population. Scholars usually assume that data come from a single population, but under certain conditions it may not. Hence, this bias usually occurs when performing sample selection. To test for this bias, we ran Finite Mixture PLS (FIMIX-PLS). After assessing goodness of fit and performing multiple procedures, such as Akaike's information criterion (AIC 3 ) and consistent AIC (CAIC), we found that FIMIX-PLS gave a final result of k = 1, indicating that our data is free from this bias. Discussion and Implications for Theory and Practice The intention to blow the whistle through online channels such as social media and other online platforms has become an area of study demanding urgent attention at this time (Cherry 2012;). The present research attempts to fill this gap by expanding the concept of the whistleblowing triangle, adding to it the fourth element of the fraud diamond-capability-and testing the expanded concept, called the whistleblowing diamond, as a predictor of online whistleblowing intention, using a sample of customers in Indonesia. Our findings answer the research calls of Vandekerckhove et al. (2014a) and Lam and Harcourt to provide the first empirical evidence related to these contemporary methods of blowing the whistle. In general, we find empirical support for the whistleblowing diamond elements in relation to online whistleblowing intention in Indonesia. Specifically, our main contributions can be presented as follows. First, we have identified a positive and significant effect on the relationship between pressure and online whistleblowing intention, where PRS encourages individual intention to blow the whistle. Our findings imply that whistleblowers are motivated by social pressure to make the decision to report wrongdoing. Whistleblowers who react upon discovering wrongdoing by an organization may be bound by moral values or religious loyalty. In the Indonesian environment, such values are highly emphasized. On the other hand, they may report wrongdoing because of the level of damage and loss caused by the wrongdoing, in which case the decision to report takes into account the possibility of helping the victims. Therefore, the whistleblower is under social pressure and is motivated by human relations to blow the whistle. Our findings support the propositions of Smaili and Arroyo and empirical evidence from Brown et al., Chen and Lai and Latan et al. (2019c), which indicate that PRS has a positive effect on OWB. Second, we identified a positive and significant effect on the relationship between financial incentives and online whistleblowing intention. Financial incentives are compensation programs or rewards given to whistleblowers who report serious wrongdoing which has the potential to cause significant losses. As pointed out by several scholars (;;c), the expectation of gaining financial incentives is another driving force for whistleblowers to report wrongdoing. This economic motive is a prosocial behavior, where in addition to helping the victims, whistleblowers also desire reward. In Indonesia, financial incentives are given for uncovering wrongdoing. Dozier and Miceli underline that such behavior is often found in various cases of whistleblowing. Several previous studies corroborate our findings (;c;), where financial incentives trigger online whistleblowing intention. Third, we found evidence of a positive relationship between opportunity and online whistleblowing intention, where OPR increased the intention to blow the whistle. Because whistleblowers are often operating under the threat and fear of retaliation, they will choose the easiest opportunity to blow the whistle. Opportunities are always related to the availability of supporting resources that help whistleblowers to take action. In addition, the availability of mobile devices allows the opportunity to blow the whistle even more easily (Lam and Harcourt 2019). Previous studies by Brown et al. and Latan et al. (2019c) show that such opportunities increase the intention to blow the whistle. That is, the easier the reporting channel is considered to be by whistleblowers-in this case, online whistleblowing-the more they will tend to blow the whistle, due to the minimized perceived level of risk. Among the sample analyzed, the use of social media seems to encourage customers to see more opportunities to disclose wrongdoing; in particular because the sample analyzed recognizes that social media may reduce the potential for retaliation by firms. Fourth, we identified evidence of a positive relationship between rationalization and online whistleblowing intention, where RNL increased the intention to blow the whistle. Rationalization is a process of reasoning used to choose between two options that are opposed to each other. In many cases, an observer may be confused in determining their own course of action, due to the inconsistency of the results of whistleblowing. For example, there are whistleblowers who receive praise when revealing wrongdoing, while others suffer retaliation. In such situations, rationalization helps the whistleblower to make a decision. Given the conditions in Indonesia, which prioritize values, morals and ethics, the rationalization process will be effective. Consequently, this process is considered to trigger the intention of the whistleblower to report wrongdoing. Several previous studies support this finding (;b, c;Near and Miceli 2011), resulting in a positive relationship between RNL and OWB. Finally, we can support a positive and significant effect on the relationship between capability and online whistleblowing intention. It is worth noting that the coef. value in this relationship was the highest among all relationships tested, indicating that the addition of the capability element to the whistleblowing triangle was pertinent in assessing online whistleblowing intention. Given the capabilities of whistleblowers in relation to personality and their ability to speak out, this is a factor that plays an important role in whistleblowing actions. Our findings fully support the role of capabilities in OWB. Adequate capability will help the observer when reporting wrongdoing that is considered serious. Our findings support previous research that indicates a positive relationship between CPB and OWB (;Wolfe and Hermanson 2004). The use of virtual channels, such as social media, has a significant relationship with the capability of the sample analyzed to blow the whistle. Our research provides a number of original theoretical and practical implications, as follows. In terms of theoretical implications, our findings add new evidence and extend the state-of-the-art research in the whistleblowing literature in complex, digitally enabled organizational contexts. More precisely, this can be considered the first empirical study to use online channels as a contemporary approach to whistleblowing. While most studies have dealt with traditional approaches to whistleblowing, such as using internal and external channels (;;Park and Blenkinsopp 2009), understanding of trends related to online whistleblowing is still limited (;Lam and Harcourt 2019), despite being a key contemporary issue in the field. In addition, our research contributes theoretically to the development of the whistleblowing triangle model (Smaili and Arroyo 2019;c), by developing the model into the whistleblowing diamond. To summarize, this article encapsulates a number of relevant implications regarding the previous literature. In line with the Theory of Planned Behavior (TPB) as applied to whistleblowing theory (), our research results are aligned with previous theory which indicates that pressure has a positive effect on the intention to blow the whistle (Smaili and Arroyo 2019), and that internal pressure motivates potential whistleblowers to act (Chen and Lai 2014;c). Our findings regarding the relationship between financial incentives and whistleblowing intention confirm previous theory such as Andon et al., Latan et al. (2019c), Lee and Fargher and Rose et al.. We also add to Smaili and Arroyo, Brown et al. and Latan et al. (2019c) because our findings indicate that, indeed, opportunities can have a positive effect on the intention of customers to reveal wrongdoing. Regarding the relationship between rationalization and online whistleblowing intention, our findings also suggest a positive link, confirming a number of prior studies (;b;MacGregor and Stuebs 2014;). We add to the developing debate suggesting that whistleblowers' capabilities have a positive effect, assisting them in reporting wrongdoing (Boyle et al. ; Wolfe and Hermanson. In terms of practical implications, our findings offer the following contributions. The sample analyzed prefers to report wrongdoing by means of social media (e.g., Facebook and Twitter), rather than using other online platforms or channels such as WikiLeaks, blogs and YouTube. The primary reasons for this appear to be due to opportunity and the potential of avoiding retaliation from firms. Therefore, firms should improve their communication with customers through the use of big data analytics in order to monitor comments from customers within their online social media channels and thus identify customers' perception of wrongdoing by firms. Firms may thereby correct themselves, explaining potential misunderstandings or misalignment of customers' expectations and, consequently, firms may improve customers' satisfaction and loyalty. The identification of the channels preferred by customers to disclose wrongdoing is important for firms to enhance their relationship with customers, as well as to improve the services they provide. In addition, firms can avoid problems with their image since they can proactively monitor customers' social media interactions, as it has been identified that social media is the virtual channel most preferred by customers to report wrongdoing. Investing in big data analytics would be a better way to allocate resources, rather than investing in firms' own online platforms for communication with customers. Limitations and Future Research Directions As with all research, this study has certain inevitable limitations. First, our study only examined whistleblowing intention, without considering actual behavior. As pointed out by Bjrkelo and Bye and Culiberg and Miheli, most of the previous research in the whistleblowing literature has focused on whistleblowing intention rather than actual whistleblowing. Both factors have advantages and disadvantages: actual whistleblowing tends to be difficult to measure, while the intention to blow the whistle may be reported differently in a study compared to action taken in a genuine situation. A meta-analysis study by Mesmer-Magnus and Viswesvaran concluded that predictors of the intent to blow the whistle may differ from actual whistleblowing, in which the results were found to be stronger for intention than actual behavior. Second, our main findings may not be generalizable to other cultural contexts. As explained by Vandekerckhove et al. (2014b), research on whistleblowing requires different methods and research design in each country and society. Furthermore, the concept of whistleblowing may have different meanings in languages around the world; a cross-cultural comparison study by Patel provides preliminary evidence indicating this difference. Finally, our study only considers the diamond elements as predictors of online whistleblowing intention. In this context, we have not examined several factors, such as the nature of wrongdoing or laws and policies, as proposed by Lam and Harcourt in the framework of the online whistleblowing model. We would suggest the following directions for future research. First, we make a research call to examine the effect of the diamond elements on actual behavior in online whistleblowing. Taking a reasoned approach to such actions, Bjrkelo and Bye suggest examining the relationship between intention and actual behavior in whistleblowing. In addition, a behavioral approach could be used to measure the actual behavior of the whistleblower. Second, the need for a cross-cultural comparison study considering the diamond elements and online whistleblowing intention should be addressed in the future. In addition, comparative studies between types of whistleblowers (online vs. external and internal) may lead to new avenues for future research (Culiberg and Miheli 2017;). adaptation, distribution and reproduction in any medium or format, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons licence, and indicate if changes were made. The images or other third party material in this article are included in the article's Creative Commons licence, unless indicated otherwise in a credit line to the material. If material is not included in the article's Creative Commons licence and your intended use is not permitted by statutory regulation or exceeds the permitted use, you will need to obtain permission directly from the copyright holder. To view a copy of this licence, visit http://creat iveco mmons.org/licen ses/by/4.0/. Scenario Mia is a housewife and a graduate student from a wellknown university in Indonesia. She majored in food science and nutrition. Besides being known as a smart student, she is also a critical thinker. For the past year, she has been a regular customer of food and beverage products from a company operating in Indonesia. Mia loves these products; as well as wishing to support national products, she also likes them because of the cheap price compared to competing products. She always buys these products through websites or online stores, as she is very active in using the internet and social media, and has capabilities in information systems and technology. Everything was going well, until one day she found several irregularities in these products. In her last purchase order, Mia found that the products smelled bad, even when stored in the refrigerator. She made sure that the expiration date had not yet passed. Mia then suspected that this impaired product indicated food fraud by the company, considering she had expertise regarding this area. To confirm her suspicions, she went to the factory where the products were manufactured. The distance was not too far, because it is still in the same province. She asked permission from the production manager to look around the factory, with the excuse of fulfilling her college assignments. In the end, she found that the products were being produced unsafely, and several other instances of fraud were also observed, such as the use of poor quality raw materials and production systems that were not environmentally friendly. Mia then returned home and realized that food fraud and such wrongful business practices have a very serious and widespread impact. In addition to damaging the market by selling products at lower prices, which has an impact on the viability of law-abiding and honest companies, it also has potential risks to the health and nutrition of humans. The latter is a major concern, which threatens human life. This caused her to be unable to sleep for several nights. Following these discoveries, Mia considered reporting these findings to the Consumer Protection Agency, the National Agency of Drug and Food Control or to the relevant authorities through available reporting mechanisms. She considered that if she were to reveal this misconduct through internal channels, the company might ignore it, therefore making the reporting ineffective, more difficult and not producing corrective actions. In addition, considering that she is not an employee there, Mia then decided to report this matter externally, through an online platform that she considered more appropriate. Mia wanted to forget about this case, but pressure from within herself, a sense of morality and social responsibility drove her to disclose it. However, she realized that by doing this, there were two consequences that may occur: on the one hand, obtaining financial rewards and praise; but on the other hand, being prosecuted by the company. After thinking about this for several days, Mia decided to postpone making a decision about the case until she found the right solution. |
NIGEL CLOUGH is showing his mean streak as he bids to turn Derby into Championship play-off contenders.
It was a good point and a good performance. Sometimes when you are away, it can be a negative 0-0.
Clough’s men are fast becoming the division’s hardest team to beat – having conceded only one goal in six games since Christmas.
They rode their luck at Turf Moor, with keeper Frank Fielding making brilliant saves from Charlie Austin and Jay Rodriguez, while Keith Treacy’s cross drifted on to the post.
Clough said: “It was a good point and a good performance. Sometimes when you are away, it can be a negative 0-0.
To be fair, Derby struggled against a Burnley side on the up.
But the Clarets couldn’t take advantage of a near gale at their backs in the first half, with Austin heading over from a Rodriguez cross.
Theo Robinson forced ex-Derby keeper Lee Grant to save his long-range effort at the other end. But the best chances came after the break – most of them to Burnley.
Fielding got down well to push away Austin’s header from Dean Marney’s right-wing cross.
The keeper was helpless as Treacy’s cross sailed over his head and hit the inside of the far post.
And he was relieved to see Austin’s shot skim wide of the far post after a great one-two with Josh McQuoid.
But Fielding was brilliant moments later to deny Rodriguez with his legs after Marvin Bartley’s low cross found the striker clear at the far post. |
1. Field of the Invention
This invention relates to a resistor array board which is interposed between circuit forming elements such that a plurality of resistors are inserted between the elements.
2. Related Art
In general, for mounting conventional protective resistors, there is employed, among others, a method in which terminals of protective resistors are inserted into through-holes of circuit forming elements and the terminals are attached thereto by soldering. There is also employed a method in which a plurality of resistors are loaded on a board by soldering and the board is connected to circuit forming elements.
As means for mounting, as one group, a plurality of protective resistors, Japanese Patent Application Unexamined Publication No. H10-185989 discloses a method in which a plurality of protective resistors are molded to form a resistor package such that male terminals of the protective resistors project from opposite surfaces of the resistor package, and the male terminals are inserted into through-holes of circuit forming elements and soldered, and then the resistor package is mounted between the circuit forming elements.
However, the above-mentioned conventional methods have such problems that the work for inserting the protective resistors is complicated and time consuming. Moreover, in the latter method for mounting the resistor package into the through-holes, although the protective resistors can be mounted as one group, there are encountered such problems that the method cannot cope with a construction in which the opposite ends of the protective resistors are press contacted with the two circuit forming elements, the resistor package must be totally discarded when the protective resistor(s) is broken, and heat accumulation adversely affects electronic parts.
It is, therefore, an object of the present invention to provide a resistor array board, in which a plurality of protective resistors can easily be inserted as one group merely by interposing a resistor array board between circuit forming elements and opposite ends of the protective resistors can properly be press contacted with the circuit forming elements.
Another object of the present invention is to provide a resistor array board, in which a protective resistor(s) can easily be replaced when broken.
To achieve the above objects, a resistor array board according to the present invention comprises a porous plate having a plurality of through-holes arranged in array and opening at opposite surfaces thereof; and a plurality of protective resistors removably loosely inserted into the through-holes.
Each of the protective resistors is resiliently retained by each electrically conductive spring element, and opposite ends of each of the protective resistors are press contacted, either directly or indirectly, with each of circuit forming elements which are arranged in opposing relation on each surface of the porous plate.
Preferably, one end of the protective resistor is resiliently retained by the electrically conductive spring, one end of the protective resistor is press contacted with one of the circuit forming elements through the electrically conductive spring element and the other end of the protective resistor is press contacted with the other circuit forming element.
The porous plate is formed of a metal plate, and the resistor array board further comprises means for connecting the porous plate to a grounding line of one or both of the circuit forming elements, so that the means serves as a shield plate in which a shielding effect can be obtained by the porous plate.
The porous plate has a through-hole therein formed separately from the plurality of through holes in which the plurality of protective resistors are loosely inserted, a grounding contact is loosely inserted in the separately formed through-hole, the grounding contact is resiliently retained by the electrically conductive spring element, opposite ends of the grounding contact are press contacted, either directly or indirectly, with the circuit forming element by a resilient force of the electrically conductive spring element, and a grounding line is formed between the circuit forming elements.
The porous plate has a through-hole therein formed separately from the plurality of through holes in which the plurality of protective resistors are loosely inserted, a power supply contact is loosely inserted in the separately formed through-hole, the power supply contact is resiliently retained by the electrically conductive spring element, opposite ends of the power supply contact are press contacted, either directly or indirectly, with the circuit forming element by a resilient force of the electrically conductive spring element, and a power supply line is formed between the circuit forming elements.
Preferably, the porous plate is formed of a metal plate and an inner peripheral surface of the through-hole for allowing the protective resistor of the porous plate to be loosely inserted therein is coated with an insulative material. |
It started with a tweet by Matt Simmons that got a reaction from me. Before I write a bunch of entries on this (instead of trying to cram even more complex thoughts into 140 characters) I want to talk about the significant differences I see between three basic things that are being talked about here (in general).
A body of professional knowledge is what it sounds like; in the case of system administration, it's some version of our accumulated experiences and wisdom. For now let's pretend that this body of knowledge will be descriptive ('when people did X this is what generally happened') instead of prescriptive ('do X instead of Y') and thus basically uncontroversial. I think that accumulating a body of knowledge is a noble endeavour but I also think that it's a lot of work, which means that it's not going to happen unless people find some way to pay for it.
(It's not enough for us to blog; blogging is to an established body of professional knowledge as research papers are to science textbooks. To have a real body of knowledge we need the equivalent of a textbook that people agree on. Putting everything together to create it is where a good part of the thankless hard work is.)
Certification sits on top of some body of professional knowledge. I see three broad levels of certification: certification that you have been exposed to the professional knowledge (this is the 'I got training' level of certification), certification that you know the professional knowledge, and certification that you can properly apply the professional knowledge. The latter sort of certification is the most useful to businesses, hiring managers, and so on (because it's what they really care about), but it also places a demand on the professional knowledge. To be able to meaningfully certify this you must be able to pose questions that have a (theoretically) objective correct answer and an incorrect answer, because that's what it takes to test people on it. This is fine for certain sorts of professional knowledge, where there already really is only one correct answer (eg, 'how do you use command X to do Y'). However it's my view that this is the least interesting thing to certify and what people really want to certify is much higher level and correspondingly much fuzzier about 'correctness' in its natural state.
(At this point I will note that a university degree is not certification in this sense. If it was we would not have all these stories about Computer Science graduates who can't program worth beans.)
Regulation is a significant step beyond mere certification where you basically get punished for not being certified or not doing things in the certified way, whether this is directly (by law) or tacitly (eg by increased liability). Unlike professional knowledge or certification, regulation is not something that can be done purely within a profession; it intrinsically needs the outside world to play along. Generally the outside world is at least a bit sceptical so this takes quite a bit of work one way or the other.
(The easiest way to get the outside world to care is for clearly slipshod work to kill people.)
As I see things there are major gulfs between all three of these things. The gulf between certification and regulation is obvious. The gulf between professional knowledge and strong certification is the distance from having 'best practices' to reaching consensus that some options are never valid. |
Pediatric asthma care in the emergency department: measuring the quality of history-taking and discharge planning. The National Asthma Education and Prevention Program NAEPP Guidelines include recommendations for history-taking and discharge planning during an asthma visit, but there are no tools to measure performance. The objectives of this study were to define and operationalize key elements of history-taking and discharge planning, to develop a tool for measuring these elements, and to evaluate the quality of history-taking and discharge planning in the emergency department (ED) during visits for asthma using the new tool. Expert opinion and extensive literature review were used to develop a 13-item checklist containing items that should be documented during history-taking and provided during discharge planning for an ED visit for an acute asthma exacerbation by children. A convenience sample of 90 pediatric emergency medicine physicians and allergists rated each item in the checklist. The checklist was used to score audiotapes of asthma visits in the ED. Subjects were 154 parents of asthmatic children aged 4-9 years seeking care in nine inner-city EDs affiliated with asthma centers participating in the National Cooperative Inner-City Asthma Study and the physician/providers who delivered care. Seven of the 13 items on the checklist were rated as required to be performed by more than 90% of the allergist/pediatric emergency medicine physicians. Only 10% of the 154 visits included all seven of the highly rated items, whereas 19% of the visits included three or fewer. Only 7 of the 13 items (54%) were performed in more than 50% of the visits, and 4 items were performed in fewer than 25% of visits. Based on expert ratings, the checklist for measuring elements of history-taking and discharge planning during asthma visits appears to have considerable face validity. In the visits studied, the overall performance of these elements was low. Interventions to improve performance on the checklist might lead to improved care for children with asthma who frequent the ED. |
AIMED: Evolving Malware with Genetic Programming to Evade Detection Genetic Programming (GP) has previously proved to achieve valuable results on the fields of image processing and arcade learning. Similarly, it can be used as an adversarial learning approach to evolve malware samples until static learning classifiers are no longer able to detect it. While the implementation is relatively simple compared with other Machine Learning approaches, results proved that GP can be a competitive solution to find adversarial malware examples comparing with similar methods. Thus, AIMED - Automatic Intelligent Malware Modifications to Evade Detection - was designed and imple-mented using genetic algorithms to evade malware classifiers. Our experiments suggest that the time to achieve adversarial malware samples can be reduced up to 50% compared to classic random approaches. Moreover, we implemented AIMED to generate adversarial examples using individual malware scanners as target and tested the evasive files against further classifiers from both research and industry. The generated examples achieved up to 82% of cross-evasion rates among the classifiers. |
Compression of Multidimensional Biomedical Signals With Spatial and Temporal Codebook-Excited Linear Prediction In this paper, we propose a model-based lossy coding technique for biomedical signals in multiple dimensions. The method is based on the codebook-excited linear prediction approach and models signals as filtered noise. The filter models short-term redundancy in time; the shape of the power spectrum of the signal and the residual noise, quantized using an algebraic codebook, is used for reconstruction of the waveforms. In addition to temporal redundancy, redundancy in the coding of the filter and residual noise across spatially related signals is also exploited, yielding better compression performance in terms of SNR for a given bit rate. The proposed coding technique was tested on sets of multichannel electromyography (EMG) and EEG signals as representative examples. For 2-D EMG recordings of 56 signals, the coding technique resulted in SNR greater than 3.4 plusmn 1.3 dB with respect to independent coding of the signals in the grid when the compression ratio was 89%. For EEG recordings of 15 signals and the same compression ratio as for EMG, the average gain in SNR was 2.4 plusmn 0.1 dB. In conclusion, a method for exploiting both the temporal and spatial redundancy, typical of multidimensional biomedical signals, has been proposed and proved to be superior to previous coding schemes. |
KIRKUK, Kurdistan region ‘Iraq’,— Kurdish Peshmerga forces detained a group of Shiite militia fighters south of Kirkuk Thursday on suspicion of kidnapping, military officials told Rudaw on condition of anonymity.
A group of nine Shiite militants travelling with six captives, including two Kurds, was stopped at a Peshmerga checkpoint in the Kirkuk suburb of Dubz. They were arrested after the identities of the captives, who were reported missing in September, were established.
Tensions have been high between the Peshmerga and Shiite militias in the so-called disputed territories which were recently freed from Islamic State (IS), partly in joint operations. Shiite militia groups were originally deployed to the area to support the Iraqi and Kurdish fight against IS, but have remained in the volatile territories despite a retreat by the militants, Kurdish officials say.
In November, two convoys of Shiite paramilitary forces arrived in Kirkuk’s southern outskirts, as assaults intensified in the area against ISIS. Sources told Rudaw that the Al-Ahbab and Al-Nasr brigades have been stationed just 15 km south of the oil-rich city with heavy artillery and medium range missiles.
Kirkuk’s police chief told Rudaw that clashes between ISIS and Kurdish security forces in the outskirts of the city continue as the jihadists attack the city suburbs with mortars.
“They attack Peshmerga positions in the outskirts and the Peshmerga respond,” Police Major Sarhad Qadir told Rudaw, though describing the overall situation as calm.
Airstrikes targeting IS positions in the suburbs of Kirkuk resumed on Friday afternoon.
Kirkuk is one of the disputed areas between Erbil and Baghdad and covered by the constitutional article numbered 140 through decoding conflict around it through the three phases, beginning with normalization of the situation in which the transition census concludes with a referendum population and gives them a choice between joining Kurdistan region or stay as an independent province.
Article 140 of the Iraqi constitution is related to the normalization of the situation in Kirkuk city and other disputed areas through having back its Kurdish inhabitants and repatriating the Arabs relocated in the city during the former regime’s time to their original provinces in central and southern Iraq.
The article also calls for conducting a census to be followed by a referendum to let the inhabitants decide whether they would like Kirkuk to be annexed to the autonomous Iraqi Kurdistan region or having it as an independent province.
The former regime of Iraqi President Saddam Hussein had forced over 250,000 Kurdish residents to give up their homes to Arabs in the 1970s, to “Arabize” the city and the region’s oil industry.
Kirkuk is the under full control of Kurdistan’s Peshmerga forces, after Iraqi army withdrew from the city in July 2014, under pressure of IS group attacks on Mosul and the Kirkuk province.
Copyright ©, respective author or news agency, rudaw.net | Ekurd.net | Agencies
Comments
Comments |
As the Summit of the Americas in Trinidad and Tobago recedes, several impressions dominate. The first is that most of the hemisphere remains enthralled by Obama-mania and his message to the hemisphere of inclusion, social justice and the more humble exercise of U.S. power and influence. There is a real electricity there, and on balance, much of the hemisphere is ready to put paid to the paralysis of past meetings and engage constructively with the new Administration. I’ve participated in a number of Summits previously, the only one with a similar positive spirit was the first, in Miami in 1994.
Some of the hemisphere remains skeptical, including the leaders of Argentina, Bolivia, Nicaragua, and others, but their pronouncements at the Summit were notable for the backing they did not receive from other leaders and simply came off as being tone deaf. Because really, even as global economic recovery continues to be of primary concern, which hemispheric leader wanted to use valuable time at the Summit to hear a diatribe from Nicaraguan President Daniel Ortega—who gamed Nicaragua’s election and now works hard to subvert Nicaraguan democracy through the institutions of democracy—about the previous alleged sins of the United States? Or to hear Bolivian President Evo Morales prattle on about goofy assassination plots he claims were cooked up in Washington. Talk about magical realism…
In any event, it’s clear that even Venezuelan President Hugo Chávez tried to be on his best behavior, perhaps having been sworn to do so behind the scenes by other regional leaders, perhaps being truly chastened at the Ibero-Americas Summit by the King of Spain, perhaps even being taken a little bit himself with Obama’s star power and ability to move crowds. As my father used to say, it surely takes one to know one. And if he knows nothing else, Chávez knows populism and symbolism. His stunt of giving a book to Obama on how foreign interests have dominated Latin America was just a bit forced, and clearly a miscalculation. (For Amazon.com though, it did push the book up from number 54,295 on its best-seller list to number 2.) A better choice for his book-of-the-month-club selection, for example, would have focused on the similarities between the U.S. and Latin American independence movements—with a heavy dose of Bolívar—to show the broad similarities between North and South America as a means to set a common agenda today built on the shared ideals and values that originally animated the hemispheric vision. Even so, no doubt President Obama will want to reciprocate, which could open an interesting channel of communication directly between the leaders, to tease out whether Chávez is interested merely in publicity stunts or if he is genuinely interested in turning the page. As an aside, sending a copy of Michael Reid’s recent offering, Forgotten Continent: The Battle for Latin America’s Soul, to Caracas would be a magnificent choice to consider.
Of course Cuba found its way onto the stage, as only Cuba can, midwifed onto the agenda by those such as Bolivia’s president and others who are looking for a way to embarrass the United States rather than to focus on ways to truly help their own citizens domestically. Cuba is a neuralgic issue for many in the hemisphere, just as it is among policy elites in Washington. President Obama’s recently announced opening to Cuba is an excellent step, consistent with recommendations made by Americas Society and Council of the Americas, and have been described by Secretary of State Hillary Clinton and others as an important “first” step pending concrete moves by Havana. That’s the right place for U.S. policy to be. Rather than get caught up in the moment, the Administration has put forth a credible, thoughtful, sound policy with prospects for further actions.
What’s curious, though, is the call by some policy advocates and regional leaders for Cuba to be re-admitted to both the Summit of the Americas process and also the Organization of American States. To do either, while Cuba remains proudly non-democratic, would be a fundamental mistake, and would be significant backtracking from years of settled policy across the hemisphere that democratic societies and institutions are the foundation for full access to the hemispheric community. No doubt there are other things that can be done to reach out to Cuba to gauge the seriousness of the regime for true reform if such steps are determined to be appropriate. But if the hemisphere refuses to stand up for democracy as a concept, we will have walked back from one of the most significant achievements that Latin America (and yes, this is primarily a Latin American issue rather than North America or the English-speaking Caribbean) has to its name. After all, of what value is the OAS Inter-American Democratic Charter, signed September 11, 2001, in Lima, or the MERCOSUR democracy clause for that matter, if democratic prerequisites are blatantly ignored? And if one of its signature achievements in recent years is stripped of any value, then the question must be asked about the OAS itself as an institution. This is not a road we want to take.
The Summit in Trinidad and Tobago was cathartic. Leaders got to blow off steam about the alleged misdeeds of the United States, the U.S. President got to listen and express a more humble approach to the region and various pet rocks were raised, in some cases for domestic audiences. Brazil and others largely remained out of the limelight, as they generally do in these sorts of forums, to ensure maximum diplomatic flexibility later. At the same time, some very real issues in the hemisphere have not gone away, and they require group attention. Economic recovery, energy and global climate change, improved personal security and many others. This is the real hemispheric agenda, and ultimately, the success of this Summit will be determined by what happens in the coming months to address them. Because if the nations of the hemisphere cannot create jobs in the formal economy for their own people, for example, or reduce the ravages of exploding crime and criminal behavior, does it really matter to their shrinking middle classes whether, as Bolivia’s president insists, Cuba is invited to the next Summit?
*Eric Farnsworth is a guest blogger to americasquarterly.org. He is Vice President of the Council of the Americas in Washington DC. |
<filename>extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/hotreload/NewFilter.java<gh_stars>1000+
package io.quarkus.vertx.http.hotreload;
import javax.enterprise.event.Observes;
import io.quarkus.vertx.http.runtime.filters.Filters;
public class NewFilter {
public void init(@Observes Filters filters) {
filters.register(rc -> {
rc.response().putHeader("X-Header-2", "Some new header");
rc.next();
}, 100);
}
}
|
/*
* Copyright The OpenTelemetry Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
Context,
CorrelationContext,
GetterFunction,
HttpTextPropagator,
SetterFunction,
} from '@opentelemetry/api';
import {
getCorrelationContext,
setCorrelationContext,
} from '../correlation-context';
const KEY_PAIR_SEPARATOR = '=';
const PROPERTIES_SEPARATOR = ';';
const ITEMS_SEPARATOR = ',';
// Name of the http header used to propagate the correlation context
export const CORRELATION_CONTEXT_HEADER = 'otcorrelations';
// Maximum number of name-value pairs allowed by w3c spec
export const MAX_NAME_VALUE_PAIRS = 180;
// Maximum number of bytes per a single name-value pair allowed by w3c spec
export const MAX_PER_NAME_VALUE_PAIRS = 4096;
// Maximum total length of all name-value pairs allowed by w3c spec
export const MAX_TOTAL_LENGTH = 8192;
type KeyPair = {
key: string;
value: string;
};
/**
* Propagates {@link CorrelationContext} through Context format propagation.
*
* Based on the Correlation Context specification:
* https://w3c.github.io/correlation-context/
*/
export class HttpCorrelationContext implements HttpTextPropagator {
inject(context: Context, carrier: unknown, setter: SetterFunction) {
const correlationContext = getCorrelationContext(context);
if (!correlationContext) return;
const keyPairs = this._getKeyPairs(correlationContext)
.filter((pair: string) => {
return pair.length <= MAX_PER_NAME_VALUE_PAIRS;
})
.slice(0, MAX_NAME_VALUE_PAIRS);
const headerValue = this._serializeKeyPairs(keyPairs);
if (headerValue.length > 0) {
setter(carrier, CORRELATION_CONTEXT_HEADER, headerValue);
}
}
private _serializeKeyPairs(keyPairs: string[]) {
return keyPairs.reduce((hValue: string, current: string) => {
const value = `${hValue}${hValue != '' ? ITEMS_SEPARATOR : ''}${current}`;
return value.length > MAX_TOTAL_LENGTH ? hValue : value;
}, '');
}
private _getKeyPairs(correlationContext: CorrelationContext): string[] {
return Object.keys(correlationContext).map(
(key: string) =>
`${encodeURIComponent(key)}=${encodeURIComponent(
correlationContext[key].value
)}`
);
}
extract(context: Context, carrier: unknown, getter: GetterFunction): Context {
const headerValue: string = getter(
carrier,
CORRELATION_CONTEXT_HEADER
) as string;
if (!headerValue) return context;
const correlationContext: CorrelationContext = {};
if (headerValue.length == 0) {
return context;
}
const pairs = headerValue.split(ITEMS_SEPARATOR);
pairs.forEach(entry => {
const keyPair = this._parsePairKeyValue(entry);
if (keyPair) {
correlationContext[keyPair.key] = { value: keyPair.value };
}
});
if (Object.entries(correlationContext).length === 0) {
return context;
}
return setCorrelationContext(context, correlationContext);
}
private _parsePairKeyValue(entry: string): KeyPair | undefined {
const valueProps = entry.split(PROPERTIES_SEPARATOR);
if (valueProps.length <= 0) return;
const keyPairPart = valueProps.shift();
if (!keyPairPart) return;
const keyPair = keyPairPart.split(KEY_PAIR_SEPARATOR);
if (keyPair.length != 2) return;
const key = decodeURIComponent(keyPair[0].trim());
let value = decodeURIComponent(keyPair[1].trim());
if (valueProps.length > 0) {
value =
value + PROPERTIES_SEPARATOR + valueProps.join(PROPERTIES_SEPARATOR);
}
return { key, value };
}
}
|
Self assembled nanocages from DNAprotoporphyrin hybrid molecules DNAorganic hybrid molecular building blocks are generated by covalent conjugation of the carboxyl groups of protoporphyrin IX with the amine functional groups of modified DNA oligomers. The hybrids are used to engineer DNA nanocages by self-assembly of the complementary DNAorganic molecule conjugates. The nanocages were found to be lined up in head to tail fashion under the selective ionic strength of the solution. Computational approach revealed the area and volume acquired by each DNAorganic hybrid nanocages. The current trends in structural DNA nanotechnology are to create predictable DNA nanostructures that involve DNAorganic hybrid molecules. 1 These hybrid molecular building block based nanostructures nd potential applications as probes, for creation of proteins arrays, molecular electronics, single nucleotide polymorphism, RNAomics, drug encapsulation and delivery, light harvesting, bio sensing and others. DNA-organic hybrids have the added advantages of better base pair delity, stability, directionality and DNA economy. However, apart from offering stability and exible directionality, the electronic and physical properties of the organic molecule in the DNA-organic hybrids oen remains unexplored. DNA organic hybrid nanostructures have the potential to harbour a combination drug. Such combinations may be created by using two different organic molecules to be linked to DNA in the nanostructures. Cage like DNA-nanostructures has also been shown to encapsulate various proteins. 14,15 Herein, we report the facile creation of nanocages from DNA-protoporphyrin IX (PpIX) hybrid molecular building blocks through selfassembly and explore their applications that involve the selfassembled nanocages as well as the organic molecule PpIX. PpIX has been explored explicitly for potential applications in photodynamic therapy (PDT). There are certain advantages of using PpIX as the organic counterpart in DNA-organic hybrid molecules as it is biomimetic in origin. 19 PpIX is a p-electron rich light sensitive molecule and thus envisioned to have multiple applications involving conductivity, FRET, ROS and sensing volatile organic compounds (VOCs). Recently, cationic porphyrin-tetrapeptide conjugates were synthesized which are good candidates for peptide delivery. 25 Free base porphyrin acting as molecular glue is used to assemble noncomplementary DNA sequences at ionic strength greater than 60 mM. 26 Although, porphyrin has been conjugated with DNA to create various nanoassemblies, PpIX, an important member of the porphyrin family has not featured in any of the nanostructures that involve a porphyrin compound. As such, there is no report of DNA-PpIX based creation of distinct nanostructures. For the rst time, we report the formation of denite nanostructures in the form of cages from the hybridization based self-assembly of complementary DNA that is covalently conjugated to PpIX (Scheme 1). The DCC/NHS mediated chemistry was used to couple the carboxyl groups of PpIX separately with the 5 0 -amine terminated 12 bases long single strand DNA (ssDNA, ODN1) and its complementary strand (ODN2). Due to the presence of two carboxyl groups in PpIX, two reaction products were formed depending on the number of ssDNA that are coupled to a single PpIX molecule (Fig. 1). The basicity of the medium favours the conjugation of oligonucleotides with PpIX, which is reected in a higher yield of (ODN1) 2 -PpIX and (ODN2) 2 -PpIX by $33% at pH 8 than at pH 7. The monoconjugated and the diconju- The resulting two DNA-PpIX conjugates formed were further conrmed by HPLC and MALDI-ToF Mass Spectrometry (MS) (ESI 3-5 and 6-9 ). Through MALDI-ToF mass spectrometry, it was observed that both the single conjugated and diconjugated oligonucleotides-PpIX conjugates are formed. The % intensity of diconjugates, (ODN1) 2 -PpIX and (ODN2) 2 -PpIX was more as compared to the single conjugated ODN1-PpIX and ODN2-PpIX (ESI 3 ). The peaks that were observed in HPLC corresponds to the unreacted DNA, single and di-conjugated DNA-PpIX and unreacted PpIX (ESI 6 ). With covalent attachment of a hydrophobic molecule like PpIX to ssDNA, the retention time increases than normal DNA, which is even more for the species that contains two ssDNA attached to a single PpIX molecule. Higher peak intensity for the di-conjugates further conrms preferable formation of the hybrid under the given reaction conditions. 27 The diconjugated DNA-PpIX hybrids (ODN1) 2 -PpIX and (ODN2) 2 -PpIX were hybridized in the presence of sodium chloride that lead to the self-assembly of the DNA-PpIX hybrids (ESI ). Solution mixture of (ODN1) 2 -PpIX (1 nmol) and (ODN2) 2 -PpIX (1 nmol) was heated to 90 C and then slowly cooled to 20 C with a ramp of 0.1 C s 1 in the presence of 10 mM TE, 10 mM magnesium chloride and 250 mM NaCl. This hybridization lead to the formation of higher ordered structures that are characterized indirectly and directly by various analytical techniques. Native PAGE shows the formation of DNA bands corresponding to the dimeric form of ODN1-ODN2 (12 2 2 48 bases). However, smeared bands with low gel mobility was also observed for the self-assembled (ODN1) 2 -PpIX and (ODN2) 2 -PpIX. These bands indicate that higher order structures are prominently formed at the hybridization condition used (Fig. 2). The position of the bands with respect to the ladder and discreteness suggest that self-assembly lead to the distribution of higher ordered structures where four or more dimeric units (equivalent to $200 bp and more) may be aligned together. Occurrence of higher ordered structures upon selfassembly of hybrids was further conrmed by dynamic light scattering studies (DLS). The DNA duplex has a pitch of 0.33 nm per base pair and a diameter of 2 nm. However, cumulant apparent hydrodynamic radius of around $54 nm was recorded following hybridization of (ODN1) 2 -PpIX and (ODN2) 2 -PpIX (ESI 10 and 11 ). The particle size inferred from DLS, points towards the aggregation of self-assembled DNA hybrids. Thermal melting temperature (T m ) of self-assembled (ODN1) 2 -PpIX and (ODN2) 2 -PpIX increased by 3 C as compared to the T m of ODN1-ODN2, which is 54 C (Fig. 3). The increase in thermal melting of self-assembled hybrids are attributed to the reduced congurational entropy and ion cloud sharing of the surrounding duplex DNA, coupled with increasing sticky end associations due to self-assembly. 13,28,29 The melting temperature of ODN1-ODN2 remains unchanged in the presence of free PpIX molecule at concentration equivalent to that in the DNA-PpIX hybrids. This indicates that an increase in melting temperature is not due to intercalation of PpIX on the DNA. The direct evidence of nano-assemblies formation was given by AFM studies (Fig. 4). Cage like morphology was observed for the self-assembled (ODN1) 2 -PpIX-(ODN2) 2 -PpIX. Each cage like structure formed is of $10 nm pertaining to head to tail distances of suffixed PpIX at both the ends of DNA nanocages. Interestingly, these dispersed 2D cages are also found to be aggregated in head to tail fashion to form staircase case like ordered structures. This phenomenon is not uncommon for pielectron rich porphyrin type molecules. Such molecules are inclined to interact with neighbouring similar molecules at elevated salt concentration. 30 This also explains the higher order bands in the native PAGE (Fig. 2). A wide distribution of these aggregates results in multiple bands with limited mobility in the PAGE. The CD spectrum of the self-assembled DNA-PpIX conjugates in the presence of 250 mM of NaCl were markedly different from the normal hybridized oligonucleotide duplex (Fig. 5). The changes in the positive and negative band at 280 and 245 nm respectively, for the self-assembled structures indicate a conformational constraint in the DNA duplex. (DNA) 2 -PpIX conjugates tend to aggregate due to p-p stacking of the aromatic PpIX. 31 The signicant changes in the characteristic peak of DNA below 250 nm is attributed to the head to tail attachment of DNA-PpIX conjugate and loss of helicity upon cage formation of extended structures. Decrease in molar positive ($31%) and negative ellipticity ($68%) of (ODN1) 2 -PpIX-(ODN2) 2 -PpIX suggests deformation in the native B form of the DNA. The emission spectra of self-assembled (ODN1) 2 -PpIX and (ODN2) 2 -PpIX are distinctly different from the diconjugated hybrids ( Fig. 6). At excitation wavelength of 400 nm, there is a considerable decrease in the intensity of the signature emission peaks of (ODN1) 2 -PpIX at 627 nm and 691 nm in (ODN1) 2 -PpIX-(ODN2) 2 -PpIX at 250 mM concentration of salt. The bands with low intensity is attributed to the residual non- stacked PpIX at two ends of each system. 32 As the concentration of salt is increased to 350 mM, more of (ODN1) 2 -PpIX and (ODN2) 2 -PpIX aggregate together in head to tail fashion at 25 C. This results in further decrease in intensity of emission of PpIX. However, the unaltered peak position of PpIX in the emission spectrum of the self-assembly indicate that the photophysical properties of PpIX is retained in the nanostructure. The emission spectra further conrm that closed loop structure formation does not take place upon hybridization, that would alter the spectra signicantly. Such structures were also not observed in AFM studies since the probability of their formation is entropically and enthalpically disfavoured. 32 Computational methods were engaged to theoretically evaluate the dimension of the nanocages formed from self assembly of (ODN1) 2 -PpIX and (ODN2) 2 -PpIX. Each of these protoporphyrin IX conjugated ssDNA structures was associated to constitute the DNA nanocages, where each of the di-conjugated DNA-PpIX units are held together by the hydrophobic interactions. The total length of a single DNA nanocage was found to be approximately 9.6 nm. This is in excellent agreement with the experimentally determined length from AFM studies. The total surface area and the volume of each DNA nanocage unit was found to be 8406 2 and 22 640 3 respectively ( Fig. 7 and ESI 12 ). These were calculated using 3 V: cavity, channel and cle volume calculator and extractor. 33,34 The use of solution phase chemistry to synthesize DNAorganic molecules has proved to be a simpler alternative to DNA synthesizer based approach. Post-synthetically created DNAorganic hybrids exhibit better directionality and DNA economy while creating a nanostructure. The use of 5 0 -NH 2 terminated oligonucleotides for conjugation with PpIX is a novice effort to be used in molecular recognition. Previously, we have reported the creation of DNA-PpIX conjugate that was used as a FRET acceptor for ROS generation. 24 Here, we have demonstrated the creation of distinct nanostructures from self-complementary DNA strands that are covalently attached to PpIX. The conjugates are well characterized and subsequently self-assembled through DNA hybridization. The self-assembly lead to the formation of cage like structures whose length and dimensions are determined by the length of the DNA sequence. Furthermore, such nanocages were found to align themselves in head to tail position forming extended structures resembling a ladder. Hypothetically, a 1-dimensional linear array formation is also a possibility, where two DNA-PpIX hybrids would be held together with a single DNA duplex formation. This would engage the two hybrids in every alternate position and go handin-hand to form extended linear arrays. However, such structures were not detected under the given experimental conditions. This indicates towards the signicant contribution of the PpIX molecule to inuence the outcome of the DNA-self assembly. It is desired that the electronic and photophysical properties of the organic counterpart in DNA-organic hybrids be retained to introduce versatility in the nanostructure for multifaceted applications. The DNA-PpIX nanostructures could nd potential applications in biochip formation, coherent ROS generation and also to hold proteins, drug molecules or other nanoparticles. Alternatively, the DNA strands can be wisely chosen to be aptamers for relevant cell surface marker proteins, whereby release of a cargo can be envisaged by the opening of the nanocages due to DNA-protein interaction. Fig. 6 Emission spectra of (ODN1) 2 -PpIX-(ODN2) 2 -PpIX (l exc 400 nm). Number of nanocages depicted are symbolic to denote more aggregation at higher salt concentration. Fig. 7 Computational studies depicting explicit nanocages formation. The structure of DNA and PpIX are represented in sticks with carbon, nitrogen, oxygen and phosphate atoms shown in green, blue, red and orange colors respectively using the PyMol molecular graphics system with head to tail length of 9.6 nm. |
/*********************************************************************
*
* Security command
*
*********************************************************************
* FileName: SecurityCmd_GSM.cpp
* Revision: 1.0.0
* Date: 08/05/2016
* Dependencies: SecurityCmd_GSM.h
* Arduino Board: Arduino Uno, Arduino Mega 2560, Fishino Uno, Fishino Mega 2560
*
* Company: Futura Group srl
* www.Futurashop.it
* www.open-electronics.org
*
* Developer: <NAME>
*
* Support: <EMAIL>
*
* Software License Agreement
*
* Copyright (c) 2016, Futura Group srl
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**********************************************************************/
#include "SecurityCmd_GSM.h"
#include "GenericCmd_GSM.h"
#ifdef __AVR__
#include <avr/pgmspace.h>
#include <avr/eeprom.h>
#endif
#if ARDUINO >= 100
#include "Arduino.h"
#else
#include "WProgram.h"
#endif
/****************************************************************************
* Function: SetCmd_ATQ_SPIC or SetCmd_ATQ_TPIN (FIBOCOM G510) or SetCmd_ATQ_QRTPIN (QUECTEL M95)
*
* Overview: This function is used to verify times remained to input SIM PIN/PUK. For details see AT commands datasheet
*
* PreCondition: None
*
* GSM cmd syntax: The syntax of command for SIM800C and SIM900 is AT+SPIC and the answer is +SPIC:<pin1>,<pin2>,<puk1>,<puk2>
* The syntax of command for G510 is AT+TPIN? and the answer is +TPIN:<pin1>,<pin2>,<puk1>,<puk2>
* The syntax of command for M95 is AT+QTRPIN and the answer is +QTRPIN:<pin1>,<pin2>,<puk1>,<puk2>
*
* Input: None
*
* Command Note: None
*
* Output: Return unsigned char
* Return -> 0 (System Busy. Command not executed)
* Return -> 1 (Command sent)
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
#if defined(SIMCOM_SIM800C) || defined(SIMCOM_SIM900) || defined(SIMCOM_SIM928A)
uint8_t SecurityCmd_GSM::SetCmd_ATQ_SPIC(void) {
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)ATQ_SPIC, (uint8_t *)Gsm.GSM_Data_Array, strlen(ATQ_SPIC));
Gsm.WritePointer = strlen(ATQ_SPIC);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_ATQ_SPIC, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
#endif
#ifdef FIBOCOM_G510
uint8_t SecurityCmd_GSM::SetCmd_ATQ_TPIN(void) {
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)ATQ_TPIN, (uint8_t *)Gsm.GSM_Data_Array, strlen(ATQ_TPIN));
Gsm.WritePointer = strlen(ATQ_TPIN);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_ATQ_TPIN, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
#endif
#ifdef QUECTEL_M95
uint8_t SecurityCmd_GSM::SetCmd_ATQ_QRTPIN(void) {
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)ATQ_QTRPIN, (uint8_t *)Gsm.GSM_Data_Array, strlen(ATQ_QTRPIN));
Gsm.WritePointer = strlen(ATQ_QTRPIN);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_ATQ_QTRPIN, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
#endif
#ifdef AI_THINKER_A9
uint8_t SecurityCmd_GSM::SetCmd_ATQ_CPINC(void) {
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)ATQ_CPINC, (uint8_t *)Gsm.GSM_Data_Array, strlen(ATQ_CPINC));
Gsm.WritePointer = strlen(ATQ_CPINC);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_ATQ_CPINC, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
#endif
/****************************************************************************/
/****************************************************************************
* Function: SetCmd_ATQ_CPIN
*
* Overview: This function is used to verify if PIN is required. For details see AT commands datasheet
*
* PreCondition: None
*
* GSM cmd syntax: AT+CPIN? and the answer is +CPIN:<code>
*
* Input: None
*
* Command Note: None
*
* Output: Return unsigned char
* Return -> 0 (System Busy. Command not executed)
* Return -> 1 (Command sent)
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
uint8_t SecurityCmd_GSM::SetCmd_ATQ_CPIN(void) {
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)ATQ_CPIN, (uint8_t *)Gsm.GSM_Data_Array, strlen(ATQ_CPIN));
Gsm.WritePointer = strlen(ATQ_CPIN);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_ATQ_CPIN, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
/****************************************************************************/
/****************************************************************************
* Function: SetCmd_CPIN_PUK
*
* Overview: This function is used to send PIN or PUK if required. For details see AT commands datasheet
*
* PreCondition: None
*
* GSM cmd syntax: AT+CPIN=<pin>[,<new pin>]
*
* Input: TypeOfCmd: Type of command to be execute
*
* Command Note: <pin> String type; password
* <new pin> String type; If the PIN required is SIM PUK or SIMPUK2: <PASSWORD> password
*
* Output: Return unsigned char
* Return -> 0 (System Busy. Command not executed)
* Return -> 1 (Command sent)
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
uint8_t SecurityCmd_GSM::SetCmd_CPIN_PUK(uint8_t TypeOfCmd) {
uint8_t Count;
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.BckCmdData[0] = TypeOfCmd;
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)AT_CPIN, (uint8_t *)Gsm.GSM_Data_Array, strlen(AT_CPIN));
EepromAddPinPuk(TypeOfCmd);
switch (TypeOfCmd)
{
case SIM_PIN1_REQ:
case SIM_PIN2_REQ:
case SIM_PH_PIN_REQ:
Count = CPIN_PIN_PUK;
do {
Gsm.GSM_Data_Array[Count] = eeprom_read_byte((uint8_t *)EepromAddPin++);
} while (Count++ < (CPIN_PIN_PUK + 5));
Gsm.GSM_Data_Array[Count++] = ASCII_CARRIAGE_RET;
Gsm.GSM_Data_Array[Count++] = ASCII_LINE_FEED;
//Count = 8;
break;
case SIM_PUK1_REQ:
case SIM_PUK2_REQ:
case SIM_PH_PUK_REQ:
Count = CPIN_PIN_PUK;
do {
Gsm.GSM_Data_Array[Count] = eeprom_read_byte((uint8_t *)EepromAddPuk++);
} while (Count++ < (CPIN_PIN_PUK + 9));
Gsm.GSM_Data_Array[Count++] = ASCII_COMMA;
//Count = CPIN_PUK_OFFSET;
do {
Gsm.GSM_Data_Array[Count] = eeprom_read_byte((uint8_t *)EepromAddPin++);
} while (Count++ < (CPIN_PUK_OFFSET + 5));
Gsm.GSM_Data_Array[Count++] = ASCII_CARRIAGE_RET;
Gsm.GSM_Data_Array[Count++] = ASCII_LINE_FEED;
//Count = 15;
break;
}
Gsm.WritePointer = Count;
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_CPIN, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
/****************************************************************************/
/****************************************************************************
* Function: SetCmd_CLCK
*
* Overview: This function is used to lock, unlock or interrogate a ME or a network facility <fac>. For details see AT commands datasheet
*
* PreCondition: None
*
* GSM cmd syntax: AT+CLCK=<fac>,<mode>[,<passwd>[,<class>]]
*
* Input: TypeOfFac: Type of facility
* Mode: 0 unlock; 1 lock; 2 Query status
*
* Command Note: <fac> "AO" BAOC (4 Digit PSWD) (Barr All Outgoing Calls)
* "OI" BOIC (4 Digit PSWD) (Barr Outgoing International Calls)
* "OX" BOIC-exHC (4 Digit PSWD) (Barr Outgoing International Calls except to Home Country)
* "AI" BAIC (4 Digit PSWD) (Barr All Incoming Calls)
* "IR" BIC-Roam (4 Digit PSWD) (Barr Incoming Calls when Roaming outside the home country)
* "FD" (4 Digit PSWD) SIM card or active application in the UICC (GSM or USIM) fixed dialling memory feature (if PIN2 authentication has not been done during the current session, PIN2 is required as <passwd>)
* "SC" SIM (4 Digit PSWD) (lock SIM/UICC card) (SIM/UICC asks password in MT power-up and when this lock command issued) Correspond to PIN1 code
* "PN" (8 Digit PSWD) Network Personalization, Correspond to NCK code
* "PU" (8 Digit PSWD) Network subset Personalization Correspond to NSCK code
* "PP" (8 Digit PSWD) Service Provider Personalization Correspond to SPCK code
* <mode> 0 unlock
* 1 lock
* 2 query status
* <passwd> String type
* <class> 1 Voice (telephony)
* 2 Data refers to all bearer services; with <mode>=2 this may refer only to some bearer service if TA does not support values 16, 32, 64 and 128)
* 4 Fax
* 7 All classes
* <status> 0 Not active
* 1 Active
*
* Output: Return unsigned char
* Return -> 0 (System Busy. Command not executed)
* Return -> 1 (Command sent)
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
uint8_t SecurityCmd_GSM::SetCmd_CLCK(uint8_t TypeOfFac, uint8_t Mode) {
uint8_t Count;
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.BckCmdData[0] = TypeOfFac;
Gsm.BckCmdData[1] = Mode;
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)AT_CLCK, (uint8_t *)Gsm.GSM_Data_Array, strlen(AT_CLCK));
FlashAddFac(TypeOfFac);
Gsm.ReadStringFLASH(FlashFacAdd, ((uint8_t *)Gsm.GSM_Data_Array + CLCK_FAC), strlen(FAC_AO));
Gsm.GSM_Data_Array[CLCK_FAC + 4] = ASCII_COMMA;
Gsm.GSM_Data_Array[CLCK_MODE] = Mode + 0x30;
if (Mode == 2) {
Gsm.GSM_Data_Array[CLCK_MODE + 1] = ASCII_CARRIAGE_RET;
Gsm.GSM_Data_Array[CLCK_MODE + 2] = ASCII_LINE_FEED;
Gsm.WritePointer = (strlen(AT_CLCK) + 8);
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_CLCK, ANSWER_SECURITY_AT_CMD_STATE);
return(1);
}
Gsm.GSM_Data_Array[CLCK_MODE + 1] = ASCII_COMMA;
Count = CLCK_MODE + 2;
do {
if (eeprom_read_byte(EepromAddPin) != 0x00) {
Gsm.GSM_Data_Array[Count] = eeprom_read_byte((uint8_t *)EepromAddPin++);
} else {
break;
}
} while (Count++ < (CLCK_MODE + 12));
Gsm.GSM_Data_Array[Count++] = ASCII_CARRIAGE_RET;
Gsm.GSM_Data_Array[Count++] = ASCII_LINE_FEED;
Gsm.WritePointer = Count;
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_CLCK, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
/****************************************************************************/
/****************************************************************************
* Function: SetCmd_CPWD
*
* Overview: This function is used to change a password for the facility lock function. For details see AT commands datasheet
*
* PreCondition: None
*
* GSM cmd syntax: AT+CPWD=<fac>,<old-passwd>,<new-passwd>]]
*
* Input: TypeOfFac: Type of facility
*
* Command Note: <fac> "AO" BAOC (4 Digit PSWD) (Barr All Outgoing Calls)
* "OI" BOIC (4 Digit PSWD) (Barr Outgoing International Calls)
* "OX" BOIC-exHC (4 Digit PSWD) (Barr Outgoing International Calls except to Home Country)
* "AI" BAIC (4 Digit PSWD) (Barr All Incoming Calls)
* "IR" BIC-Roam (4 Digit PSWD) (Barr Incoming Calls when Roaming outside the home country)
* "AB" (4 Digit PSWD) All Barring services
* "P2" (8 Digit PSWD) SIM PIN2
* "SC" (4 Digit PSWD) SIM (lock SIM/UICC card) (SIM/UICC asks password in MT power-up and when this lock command issued) Correspond to PIN1 code
* <pldpwd> String type (string should be included in quotation marks): password specified for the facility from the user interface or with command. If an old password has not yet been set, <oldpwd> is not to enter
* <newpwd> String type (string should be included in quotation marks): new password
*
* Output: Return unsigned char
* Return -> 0 (System Busy. Command not executed)
* Return -> 1 (Command sent)
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
uint8_t SecurityCmd_GSM::SetCmd_CPWD(uint8_t TypeOfFac) {
uint8_t Count;
if ((Gsm.StateWaitAnswerCmd != CMD_WAIT_IDLE) || (Gsm.UartState != UART_IDLE_STATE) || (Gsm.GsmFlag.Bit.CringOccurred == 1)) {
return(0); // System Busy
} else {
Gsm.ClearBuffer();
Gsm.BckCmdData[0] = TypeOfFac;
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 1;
Gsm.ResetFlags();
Gsm.ReadStringFLASH((uint8_t *)AT_CPWD, (uint8_t *)Gsm.GSM_Data_Array, strlen(AT_CPWD));
FlashAddFac(TypeOfFac);
Gsm.ReadStringFLASH(FlashFacAdd, ((uint8_t *)Gsm.GSM_Data_Array + CLCK_FAC), strlen(FAC_AO));
Gsm.GSM_Data_Array[CLCK_FAC + 4] = ASCII_COMMA;
Count = CLCK_FAC + 5;
do { // Reads from EEPROM the old PIN and puts it on the GSM data array
if (eeprom_read_byte(EepromAddPin) != 0x00) {
Gsm.GSM_Data_Array[Count] = eeprom_read_byte((uint8_t *)EepromAddPin++);
} else {
break;
}
} while (Count++ < (CLCK_FAC + 15));
Gsm.GSM_Data_Array[Count++] = ASCII_COMMA;
EepromAddPin = &NewPin[0];
do { // Reads from SRAM the new PIN and puts it on the GSM data array
if ((*(uint8_t *)EepromAddPin) != 0x00) {
Gsm.GSM_Data_Array[Count] = (*(uint8_t *)EepromAddPin++);
} else {
break;
}
} while (Count++ < (CLCK_FAC + 22));
Gsm.GSM_Data_Array[Count] = ASCII_CARRIAGE_RET;
Gsm.GSM_Data_Array[Count + 1] = ASCII_LINE_FEED;
Gsm.WritePointer = Count + 2;
Gsm.StartSendData(CMD_SECURITY_IDLE, WAIT_ANSWER_CMD_CPWD, ANSWER_SECURITY_AT_CMD_STATE);
}
return(1); // Command sent
}
/****************************************************************************/
/****************************************************************************
* Function: GsmSecurityWaitAnswer
*
* Overview: This function process the AT command answer of the command sent.
* The answer received and processed by this code regard the Generic Command Functions
* implemented in this library file
*
* PreCondition: None
*
* GSM cmd syntax: None
*
* Input: None
*
* Command Note: None
*
* Output: None
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
void SecurityCmd_GSM::GsmSecurityWaitAnswer(void) {
uint8_t StrPointer = 0xFF;
uint8_t StrPointerEnd = 0xFF;
if ((Gsm.StateSendCmd != CMD_SECURITY_IDLE) || (Gsm.UartState != UART_IDLE_STATE)) {
return;
}
if (Gsm.UartFlag.Bit.ReceivedAnswer == 0) {
return;
}
Gsm.UartFlag.Bit.ReceivedAnswer = 0;
if (Gsm.GsmFlag.Bit.CringOccurred == 1) {
// CRING OCCURRED. CMD SEND ABORTED
Gsm.RetryCounter = 0;
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 0;
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
return;
}
if (Gsm.ReadPointer > 0) {
if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_OK, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_OK)) != 0xFF) {
Gsm.RetryCounter = 0;
Gsm.GsmFlag.Bit.GsmSendCmdInProgress = 0;
switch (Gsm.StateWaitAnswerCmd)
{
case CMD_WAIT_IDLE:
break;
case WAIT_ANSWER_CMD_ATQ_SPIC:
case WAIT_ANSWER_CMD_ATQ_TPIN:
case WAIT_ANSWER_CMD_ATQ_QTRPIN:
case WAIT_ANSWER_CMD_ATQ_CPINC:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
#if defined(SIMCOM_SIM800C) || defined(SIMCOM_SIM900) || defined(SIMCOM_SIM928A)
StrPointer = Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_SPIC, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_SPIC));
#endif
#ifdef FIBOCOM_G510
StrPointer = Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_TPIN, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_TPIN));
#endif
#ifdef QUECTEL_M95
StrPointer = Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_QTRPIN, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_QTRPIN));
#endif
#ifdef AI_THINKER_A9
StrPointer = Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPINC, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPINC));
#endif
if (StrPointer != 0xFF) {
if (Gsm.FindColonCommaCarriageRet() != 0xFF) {
SecurityFlag.Bit.PinRetry = Gsm.ExtractParameterByte((Gsm.CharPointers[0] + 1), (Gsm.CharPointers[1] - (Gsm.CharPointers[0] + 1)));
SecurityFlag.Bit.Pin2Retry = Gsm.ExtractParameterByte((Gsm.CharPointers[1] + 1), (Gsm.CharPointers[2] - (Gsm.CharPointers[1] + 1)));
SecurityFlag.Bit.PukRetry = Gsm.ExtractParameterByte((Gsm.CharPointers[2] + 1), (Gsm.CharPointers[3] - (Gsm.CharPointers[2] + 1)));
SecurityFlag.Bit.Puk2Retry = Gsm.ExtractParameterByte((Gsm.CharPointers[3] + 1), (Gsm.CharPointers[4] - (Gsm.CharPointers[3] + 1)));
}
}
break;
case WAIT_ANSWER_CMD_ATQ_CPIN:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_READY, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_READY)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_READY; // SIM READY
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_SIM_READY_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_SIM_READY_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PIN, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PIN)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PIN1_REQ; // SIM PIN1 REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_SIM_PIN_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_SIM_PIN_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PUK, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PUK)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PUK1_REQ; // SIM PUK1 REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_SIM_PUK_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_SIM_PUK_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PH_PIN, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PH_PIN)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PH_PIN_REQ; // PH SIM PIN REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_PH_SIM_PIN_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_PH_SIM_PIN_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PH_PUK, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PH_PUK)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PH_PUK_REQ; // PH SIM PUK REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_PH_SIM_PUK_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_PH_SIM_PUK_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PIN2, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PIN2)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PIN2_REQ; // SIM PIN2 REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_SIM_PIN2_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_SIM_PIN2_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
} else if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CPIN_PUK2, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CPIN_PUK2)) != 0xFF) {
SecurityFlag.Bit.SIM_Status = SIM_PUK2_REQ; // SIM PUK2 REQUIRED
#ifdef GSM_SECURITY_DEBUG
Serial.print("\n");
Gsm.ReadStringFLASH((uint8_t *)STR_SIM_PUK2_DEBUG, (uint8_t *)Gsm.TempStringCompare, strlen(STR_SIM_PUK2_DEBUG));
Gsm.PrintScreenDebugMode();
#endif
break;
}
break;
case WAIT_ANSWER_CMD_CPIN:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
break;
case WAIT_ANSWER_CMD_CLCK:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
if (Gsm.TestAT_Cmd_Answer((uint8_t *)AT_ANSW_CLCK, (uint8_t *)Gsm.TempStringCompare, strlen(AT_ANSW_CLCK)) != 0xFF) {
if (Gsm.FindColonCommaCarriageRet() != 0xFF) {
SecurityFlag.Bit.ClckStatus = Gsm.ExtractParameterByte((Gsm.CharPointers[0] + 1), (Gsm.CharPointers[1] - (Gsm.CharPointers[0] + 1)));
}
}
break;
case WAIT_ANSWER_CMD_CPWD:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
// Save new PIN in Eeprom
switch (Gsm.BckCmdData[0])
{
case CODE_FAC_AO: // (4-digit PSWD)
case CODE_FAC_OI: // (4-digit PSWD)
case CODE_FAC_OX: // (4-digit PSWD)
case CODE_FAC_AI: // (4-digit PSWD)
case CODE_FAC_IR: // (4-digit PSWD)
case CODE_FAC_AB: // (4-digit PSWD)
case CODE_FAC_AG: // (4-digit PSWD)
case CODE_FAC_AC: // (4-digit PSWD)
EepromAddPin = Gsm.EepronAdd.StartAddShortPswdCode;
StrPointer = 0;
StrPointerEnd = 6;
break;
case CODE_FAC_SC:
EepromAddPin = Gsm.EepronAdd.StartAddPin1Code;
StrPointer = 0;
StrPointerEnd = 6;
break;
case CODE_FAC_FD:
EepromAddPin = Gsm.EepronAdd.StartAddPin2Code;
StrPointer = 0;
StrPointerEnd = 6;
break;
case CODE_FAC_PN: // (8-digit PSWD)
case CODE_FAC_PU: // (8-digit PSWD)
case CODE_FAC_PP: // (8-digit PSWD)
case CODE_FAC_P2: // (8-digit PSWD)
EepromAddPin = Gsm.EepronAdd.StartAddLongPswdCode;
StrPointer = 0;
StrPointerEnd = 10;
break;
default:
EepromAddPin = Gsm.EepronAdd.StartAddPin1Code;
StrPointer = 0;
StrPointerEnd = 6;
break;
}
do {
eeprom_write_byte(EepromAddPin++, NewPin[StrPointer++]);
} while (StrPointer <= StrPointerEnd);
break;
default:
break;
}
} else {
Gsm.ProcessGsmError();
switch (Gsm.StateWaitAnswerCmd)
{
case WAIT_ANSWER_CMD_CPIN:
if (SecurityFlag.Bit.IncorrectPSW != 1) {
GsmSecurityRetrySendCmd();
} else {
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
}
break;
default:
GsmSecurityRetrySendCmd();
break;
}
}
} else {
Gsm.InitReset_GSM();
}
}
/****************************************************************************/
/****************************************************************************
* Function: GsmSecurityRetrySendCmd
*
* Overview: This function retry to send AT command for a maximum of three time
*
* PreCondition: None
*
* GSM cmd syntax: None
*
* Input: None
*
* Command Note: None
*
* Output: None
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a public function
*****************************************************************************/
void SecurityCmd_GSM::GsmSecurityRetrySendCmd(void) {
if (Gsm.RetryCounter++ < 2) {
switch (Gsm.StateWaitAnswerCmd)
{
case WAIT_ANSWER_CMD_ATQ_CPIN:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
SetCmd_ATQ_CPIN();
break;
case WAIT_ANSWER_CMD_ATQ_SPIC:
case WAIT_ANSWER_CMD_ATQ_TPIN:
case WAIT_ANSWER_CMD_ATQ_QTRPIN:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
#if defined(SIMCOM_SIM800C) || defined(SIMCOM_SIM900) || defined(SIMCOM_SIM928A)
SetCmd_ATQ_SPIC();
#endif
#ifdef FIBOCOM_G510
SetCmd_ATQ_TPIN();
#endif
#ifdef QUECTEL_M95
SetCmd_ATQ_QRTPIN();
#endif
break;
case WAIT_ANSWER_CMD_CPIN:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
SetCmd_CPIN_PUK(Gsm.BckCmdData[0]);
break;
case WAIT_ANSWER_CMD_CLCK:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
SetCmd_CLCK(Gsm.BckCmdData[0], Gsm.BckCmdData[1]);
break;
case WAIT_ANSWER_CMD_CPWD:
Gsm.StateWaitAnswerCmd = CMD_WAIT_IDLE;
SetCmd_CPWD(Gsm.BckCmdData[0]);
break;
default:
break;
}
} else {
Gsm.InitReset_GSM();
}
}
/****************************************************************************/
/****************************************************************************
* Function: EepromAddPinPuk
*
* Overview: This function is used to extract the EEPROM address where the PIN and PUKE code are stored
*
* PreCondition: None
*
* GSM cmd syntax: None
*
* Input: TypeOfCmd: Type of password that is necessary to find the EEPROM memory
*
* Command Note: None
*
* Output: None
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a private function
*****************************************************************************/
void SecurityCmd_GSM::EepromAddPinPuk(uint8_t TypeOfCmd) {
char AddressBuffer[8];
switch (TypeOfCmd)
{
case SIM_PIN1_REQ: // (4-digit PSWD)
case SIM_PIN2_REQ: // (4-digit PSWD)
case SIM_PH_PIN_REQ: // (4-digit PSWD)
case SHORT_PSWD_REQ: // (4-digit PSWD)
switch (TypeOfCmd)
{
case SIM_PIN1_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPin1Code;
break;
case SIM_PIN2_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPin2Code;
break;
case SIM_PH_PIN_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPhPinCode;
break;
case SHORT_PSWD_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddShortPswdCode;
break;
}
break;
case SIM_PUK1_REQ: // (8-digit PSWD)
case SIM_PUK2_REQ: // (8-digit PSWD)
case SIM_PH_PUK_REQ: // (8-digit PSWD)
case LONG_PSWD_REQ: // (8-digit PSWD)
switch (TypeOfCmd)
{
case SIM_PUK1_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPin1Code;
EepromAddPuk = Gsm.EepronAdd.StartAddPuk1Code;
break;
case SIM_PUK2_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPin2Code;
EepromAddPuk = Gsm.EepronAdd.StartAddPuk2Code;
break;
case SIM_PH_PUK_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddPhPinCode;
EepromAddPuk = Gsm.EepronAdd.StartAddPhPukCode;
break;
case LONG_PSWD_REQ:
EepromAddPin = Gsm.EepronAdd.StartAddLongPswdCode;
break;
}
break;
}
}
/****************************************************************************/
/****************************************************************************
* Function: FlashAddFac
*
* Overview: This function is used to extract the FLASH address where the facility code is stored
*
* PreCondition: None
*
* GSM cmd syntax: None
*
* Input: TypeOfFac: Type of facility that is necessary to find the FLASH memory
*
* Command Note: None
*
* Output: None
*
* GSM answer det: None
*
* Side Effects: None
*
* Note: This is a private function
*****************************************************************************/
void SecurityCmd_GSM::FlashAddFac(uint8_t TypeOfFac) {
switch (TypeOfFac)
{
case CODE_FAC_AO: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_AO;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_OI: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_OI;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_OX: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_OX;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_AI: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_AI;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_IR: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_IR;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_AB: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_AB;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_AG: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_AG;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_AC: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_AC;
EepromAddPinPuk(SHORT_PSWD_REQ);
break;
case CODE_FAC_FD: // (4-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_FD;
EepromAddPinPuk(SIM_PIN2_REQ);
break;
case CODE_FAC_SC: // (4-digit PIN1)
FlashFacAdd = (uint8_t *)FAC_SC;
EepromAddPinPuk(SIM_PIN1_REQ);
break;
case CODE_FAC_PN: // (8-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_PN;
EepromAddPinPuk(LONG_PSWD_REQ);
break;
case CODE_FAC_PU: // (8-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_PU;
EepromAddPinPuk(LONG_PSWD_REQ);
break;
case CODE_FAC_PP: // (8-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_PP;
EepromAddPinPuk(LONG_PSWD_REQ);
break;
case CODE_FAC_P2: // (8-digit PSWD)
FlashFacAdd = (uint8_t *)FAC_P2;
EepromAddPinPuk(LONG_PSWD_REQ);
break;
default: // (4-digit PIN1)
FlashFacAdd = (uint8_t *)FAC_SC;
EepromAddPinPuk(SIM_PIN1_REQ);
break;
}
}
/****************************************************************************/
|
import numpy as np
import pytest
from geomfitty import geom3d
from .test_util import assert_vector_equal
def test_asserts():
assert_vector_equal([1, 0, 0], [1, 0, 0])
assert_vector_equal(2, 2)
with pytest.raises(AssertionError):
assert_vector_equal([0, 0], 0)
with pytest.raises(AssertionError):
assert_vector_equal(2, 2.5)
class AbstractTestGeom:
def gen_random_shape(self):
raise NotImplementedError
def test_distance_to_a_single_point(self):
shape = self.gen_random_shape()
distance = shape.distance_to_point(np.random.uniform(size=(3,)))
assert isinstance(distance, float)
assert distance > 0
def test_distance_to_a_multiple_points(self):
shape = self.gen_random_shape()
distance = shape.distance_to_point(np.random.uniform(size=(2, 3)))
assert distance.shape == (2,)
assert np.all(distance > 0)
@pytest.mark.skip
def test_equality(self):
shape = self.gen_random_shape()
assert shape == shape
@pytest.mark.skip
def test_unequality(self):
shape1 = self.gen_random_shape()
shape2 = self.gen_random_shape()
assert shape1 != shape2
class TestLine(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Line(np.random.uniform(size=(3,)), np.random.uniform(size=(3,)))
def test_line_contains_anchor_point_and_direction(self):
line = geom3d.Line([0, 0, 0], [1, 0, 0])
assert_vector_equal(line.anchor_point, [0, 0, 0])
assert_vector_equal(line.direction, [1, 0, 0])
def test_line_direction_is_normalized(self):
line = geom3d.Line([0, 0, 0], [2, 0, 0])
assert_vector_equal(line.direction, [1, 0, 0])
def test_distance_to_line_is_calculated(self):
line = geom3d.Line([0, 0, 0], [1, 0, 0])
assert line.distance_to_point([1, 1, 0]) == 1
assert line.distance_to_point([1, 1, 1]) == np.sqrt(2)
assert line.distance_to_point([1, 3, 4]) == 5
line = geom3d.Line([1, 0, 0], [1, 1, 0])
assert line.distance_to_point([3, 2, 4]) == 4
assert line.distance_to_point([0, 1, 0]) == np.sqrt(2)
def test_distance_to_line_can_take_multiple_points(self):
line = geom3d.Line([0, 0, 0], [1, 0, 0])
assert_vector_equal(line.distance_to_point([[1, 1, 0], [1, 3, 4]]), [1, 5])
class TestPlane(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Plane(np.random.uniform(size=(3,)), np.random.uniform(size=(3,)))
class TestSphere(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Sphere(np.random.uniform(size=(3,)), np.random.uniform())
class TestCylinder(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Cylinder(
np.random.uniform(size=(3,)),
np.random.uniform(size=(3,)),
np.random.uniform(),
)
class TestCircle3D(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Circle3D(
np.random.uniform(size=(3,)),
np.random.uniform(size=(3,)),
np.random.uniform(),
)
def test_distance_to_a_point(self):
circle = geom3d.Circle3D([0, 0, 0], [1, 0, 0], 1)
assert circle.distance_to_point([1, 1, 0]) == 1
assert circle.distance_to_point([0, 0, 2]) == 1
class TestTorus(AbstractTestGeom):
def gen_random_shape(self):
return geom3d.Torus(
np.random.uniform(size=(3,)),
np.random.uniform(size=(3,)),
np.random.uniform(),
np.random.uniform(),
)
def test_distance_to_a_point(self):
torus = geom3d.Torus([0, 0, 0], [1, 0, 0], 1, 0.5)
assert torus.distance_to_point([1, 1, 0]) == 0.5
assert torus.distance_to_point([0, 0, 2]) == 0.5
|
On the Di cultyof Approximately Maximizing Agreements We address the computational complexity of learning in the agnostic framework. For a variety of common concept classes we prove that, unless P=NP, there is no polynomial time approximation scheme for nding a member in the class that approximately maximizes the agreement with a given training sample. In particular our results apply to the classes of monomials, axis-aligned hyper-rectangles, closed balls and monotone monomials. For each of these classes we prove the NP-hardness of approximating maximal agreement to within some xed constant (independent of the sample size and of the dimensionality of the sample space). For the class of half-spaces, we prove that, for any > 0, it is NP-hard to approximately maximize agreements to within a factor of (418=415 ), improving on the best previously known constant for this problem, and using a simpler proof. An interesting feature of our proofs is that, for each of the classes we discuss, we nd patterns of training examples that, while being hard for approximating agreement within that concept class, allow e cient agreement maximization within other concept classes. These results bring up a new aspect of the model selection problem { they imply that the choice of hypothesis class for agnostic learning from among those considered in this paper can drastically e ect the computational complexity of the learning process. |
def _expand_contractions(self, s):
c = {'i\'m': 'i am',
'you\'re': 'you are',
'he\'s': 'he is',
'she\'s': 'she is',
'we\'re': 'we are',
'it\'s': 'it is',
'isn\'t': 'is not',
'aren\'t': 'are not',
'they\'re': 'they are',
'there\'s': 'there is',
'wasn\'t': 'was not',
'weren\'t': ' were not',
'i\'ve': 'i have',
'you\'ve': 'you have',
'we\'ve': 'we have',
'they\'ve': 'they have',
'hasn\'t': 'has not',
'haven\'t': 'have not',
'you\'d': 'you had',
'he\'d': 'he had',
'she\'d': 'she had',
'we\'d': 'we had',
'they\'d': 'they had',
'doesn\'t': 'does not',
'don\'t': 'do not',
'didn\'t': 'did not',
'i\'ll': 'i will',
'you\'ll': 'you will',
'he\'ll': 'he will',
'she\'ll': 'she will',
'we\'ll': 'we will',
'they\'ll': 'they will',
'there\'ll': 'there will',
'i\'d': 'i would',
'it\'d': 'it would',
'there\'d': 'there had',
'there\'d': 'there would',
'can\'t': 'can not',
'couldn\'t': 'could not',
'daren\'t': 'dare not',
'hadn\'t': 'had not',
'mightn\'t': 'might not',
'mustn\'t': 'must not',
'needn\'t': 'need not',
'oughtn\'t': 'ought not',
'shan\'t': 'shall not',
'shouldn\'t': 'should not',
'usedn\'t': 'used not',
'won\'t': 'will not',
'wouldn\'t': 'would not',
'what\'s': 'what is',
'that\'s': 'that is',
'who\'s': 'who is',}
for pat in c:
s = re.sub(pat, c[pat], s)
return s |
Impact on Breastfeeding According to Implant Features in Breast Augmentation: A Multicentric Retrospective Study Background Exclusive breastfeeding is highly recommended by the World Health Organization during the first 6 months of life. In parallel, breast augmentation with implants is one of the most performed operations in aesthetic surgery. Objective The goal of our study was therefore to analyze the potential impact of aesthetic breast implants on breastfeeding. Study Design A retrospective study was carried out in 3 French university hospitals. The main inclusion criterion was adult women of childbearing age (1850 years old) with bilateral breast hypoplasia. Some features of the surgery, such as the operative indication, the surgical approach, the implant position against the pectoral muscle, and implants features (material, volume, profile), were collected. We conducted a survey by phone about childbirth after the procedure. If the women had children after surgery, we asked them if they breastfed and the characteristics of breastfeeding. Results In total, 1316 patients received breast implants in the 3 centers from January 2011 to October 2016 and met our inclusion criteria. We included 1073 patients; 998 women had breast implants with no pregnancy. Among the 75 patients (7%) who gave birth after the surgery, 51 wanted to breastfeed (68%). The patients with a retroglandular implant were significantly less able to breastfeed compared with the patients with retromuscular implants (P = 0.0005). No difference was found for age, the type of surgery, the surgical approach, and the shape or type of implant between the successful breastfeeding group and failed breastfeeding group. Conclusion A woman with aesthetic breast implants has a 75% chance of breastfeeding if desired, regardless of the type and the volume of the implant and the surgical approach. She has an 82% probability of breastfeeding with retromuscular implants and 17% with retroglandular implants. |
extern crate drm;
extern crate image;
mod utils;
use utils::*;
use drm::control::Device as ControlDevice;
use drm::Device as BasicDevice;
use drm::buffer::DrmFourcc;
use drm::control::ResourceHandle;
use drm::control::{self, atomic, connector, crtc, property, AtomicCommitFlags};
fn find_prop_id<T: ResourceHandle>(
card: &Card,
handle: T,
name: &'static str,
) -> Option<property::Handle> {
let props = card
.get_properties(handle)
.expect("Could not get props of connector");
let (ids, _vals) = props.as_props_and_values();
ids.iter()
.find(|&id| {
let info = card.get_property(*id).unwrap();
info.name().to_str().map(|x| x == name).unwrap_or(false)
})
.cloned()
}
pub fn main() {
let card = Card::open_global();
card.set_client_capability(drm::ClientCapability::UniversalPlanes, true)
.expect("Unable to request UniversalPlanes capability");
card.set_client_capability(drm::ClientCapability::Atomic, true)
.expect("Unable to request Atomic capability");
// Load the information.
let res = card
.resource_handles()
.expect("Could not load normal resource ids.");
let coninfo: Vec<connector::Info> = res
.connectors()
.iter()
.flat_map(|con| card.get_connector(*con))
.collect();
let crtcinfo: Vec<crtc::Info> = res
.crtcs()
.iter()
.flat_map(|crtc| card.get_crtc(*crtc))
.collect();
// Filter each connector until we find one that's connected.
let con = coninfo
.iter()
.find(|&i| i.state() == connector::State::Connected)
.expect("No connected connectors");
// Get the first (usually best) mode
let &mode = con.modes().get(0).expect("No modes found on connector");
let (disp_width, disp_height) = mode.size();
// Find a crtc and FB
let crtc = crtcinfo.get(0).expect("No crtcs found");
// Select the pixel format
let fmt = DrmFourcc::Rgba8888;
// Create a DB
// If buffer resolution is above display resolution, a ENOSPC (not enough GPU memory) error may
// occur
let mut db = card
.create_dumb_buffer((disp_width.into(), disp_height.into()), fmt, 32)
.expect("Could not create dumb buffer");
// Map it and grey it out.
{
let mut map = card
.map_dumb_buffer(&mut db)
.expect("Could not map dumbbuffer");
for b in map.as_mut() {
*b = 128;
}
}
// Create an FB:
let fb = card
.add_framebuffer(&db, 32, 32)
.expect("Could not create FB");
let planes = card.plane_handles().expect("Could not list planes");
let (better_planes, compatible_planes): (
Vec<control::plane::Handle>,
Vec<control::plane::Handle>,
) = planes
.planes()
.iter()
.filter(|&&plane| {
card.get_plane(plane)
.map(|plane_info| {
let compatible_crtcs = res.filter_crtcs(plane_info.possible_crtcs());
compatible_crtcs.contains(&crtc.handle())
})
.unwrap_or(false)
})
.partition(|&&plane| {
if let Ok(props) = card.get_properties(plane) {
let (ids, vals) = props.as_props_and_values();
for (&id, &val) in ids.iter().zip(vals.iter()) {
if let Ok(info) = card.get_property(id) {
if info.name().to_str().map(|x| x == "type").unwrap_or(false) {
return val == (drm::control::PlaneType::Primary as u32).into();
}
}
}
}
false
});
let plane = *better_planes.get(0).unwrap_or(&compatible_planes[0]);
println!("{:#?}", mode);
println!("{:#?}", fb);
println!("{:#?}", db);
println!("{:#?}", plane);
let mut atomic_req = atomic::AtomicModeReq::new();
atomic_req.add_property(
con.handle(),
find_prop_id(&card, con.handle(), "CRTC_ID").expect("Could not get CRTC_ID"),
property::Value::CRTC(Some(crtc.handle())),
);
let blob = card
.create_property_blob(&mode)
.expect("Failed to create blob");
atomic_req.add_property(
crtc.handle(),
find_prop_id(&card, crtc.handle(), "MODE_ID").expect("Could not get MODE_ID"),
blob,
);
atomic_req.add_property(
crtc.handle(),
find_prop_id(&card, crtc.handle(), "ACTIVE").expect("Could not get ACTIVE"),
property::Value::Boolean(true),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "FB_ID").expect("Could not get FB_ID"),
property::Value::Framebuffer(Some(fb)),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "CRTC_ID").expect("Could not get CRTC_ID"),
property::Value::CRTC(Some(crtc.handle())),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "SRC_X").expect("Could not get SRC_X"),
property::Value::UnsignedRange(0),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "SRC_Y").expect("Could not get SRC_Y"),
property::Value::UnsignedRange(0),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "SRC_W").expect("Could not get SRC_W"),
property::Value::UnsignedRange((mode.size().0 as u64) << 16),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "SRC_H").expect("Could not get SRC_H"),
property::Value::UnsignedRange((mode.size().1 as u64) << 16),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "CRTC_X").expect("Could not get CRTC_X"),
property::Value::SignedRange(0),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "CRTC_Y").expect("Could not get CRTC_Y"),
property::Value::SignedRange(0),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "CRTC_W").expect("Could not get CRTC_W"),
property::Value::UnsignedRange(mode.size().0 as u64),
);
atomic_req.add_property(
plane,
find_prop_id(&card, plane, "CRTC_H").expect("Could not get CRTC_H"),
property::Value::UnsignedRange(mode.size().1 as u64),
);
// Set the crtc
// On many setups, this requires root access.
card.atomic_commit(&[AtomicCommitFlags::AllowModeset], atomic_req)
.expect("Failed to set mode");
let five_seconds = ::std::time::Duration::from_millis(5000);
::std::thread::sleep(five_seconds);
card.destroy_framebuffer(fb).unwrap();
card.destroy_dumb_buffer(db).unwrap();
}
|
The orchestra musicians and several of the principal singers of the New York City Opera have decided that the company’s 70th anniversary should be commemorated, bankruptcy and closing notwithstanding. And besides, the company’s demise has put its musicians out of work, and a concert honoring the anniversary seemed to them the proper way to address that.
So on Feb. 21, the 60 players of the New York City Opera Orchestra will be joined by a starry vocal crew — the sopranos Lauren Flanigan and Joélle Harvey, the mezzo-soprano Jennifer Rivera, the tenor Ryan MacPherson and the baritones Mark Delavan and Sidney Outlaw — for a concert at the New York City Center, where the company was born in 1944.
The proceeds from the concert will be donated to the the Emergency Relief Fund, which was started in 1967 by Local 802 of the American Federation of Musicians, to provide financial aid to musicians in need. Tickets are priced from $25 to $250 and are available at nycitycenter.org. |
Yesterday’s New York Times featured an illuminating profile of Momentum, the hard-left political activist organization that has effectively captured Britain’s Labour party for its radical leader Jeremy Corbyn. Co-written by reporter Stephen Castle and the Times‘ excellent London bureau chief Steven Erlanger, the article opened with an enthusiastic quote from the group’s vice chair, Jackie Walker, who exclaims, “We’re part of the biggest political movement in the last 50 years! The seeds we are sowing, who knows what fruit they will bring?”
What the piece does not mention is that Jackie Walker is an anti-Semite who blamed the Jews for being the “chief financiers” of the slave trade, falsely criticized Britain’s official Holocaust Memorial Day for not commemorating non-Jewish victims of genocide (it does), argued that Jewish schools do not need extra security in the face of threats, and said she hasn’t “heard a definition of anti-Semitism that I can work with.”
After her comment about Jews being behind the slave trade—a popular anti-Semitic conspiracy theory debunked by academics—Walker was suspended from the Labour party in May. But by the end of the month, she was reinstated, with no explanation from the party as to why she’d been cleared and no apology on her part. This week, Walker went after Holocaust Memorial Day and security at Jewish schools, provoking outrage across the political spectrum. (The recording of Walker’s most recent comments was leaked the day of the Times’ story, so it could not have been included.)
In response, Karen Pollock, the CEO of Britain’s Holocaust Educational Trust, said that Walker had “undermined and belittled” Jews. Jeremy Newmark, head of the Jewish Labour Movement, called on Walker to resign or apologize. “I am appalled that somebody who has already caused great hurt and pain to so many Jewish people by promoting an anti-Semitic myth would come to a training session designed to help Party activists address anti-Semitism and use the occasion to challenge the legitimacy of the training itself,” he said. Manuel Cortes, head of the country’s second largest railway union, called on Walker to resign or be removed from both the Labour party and Momentum. “TSSA will seriously reconsider our union’s support for Momentum if she is still in post by this time next week,” he said. Under pressure, Walker apologized, something she refused to do for her previous anti-Semitic outburst.
In an effort to shield herself from criticism, Walker has claimed to have Jewish ancestry, as though racism is only racism depending on who expresses it. In fact, as any student of anti-Semitism knows, there is a long history of anti-Semitic Jews, including in Britain, where one of the most famous examples resides today in Gilad Atzmon, an Israeli musician who has dubbed American Jews “the enemy within,” questioned the historicity of the Holocaust, and claimed that “robbery and hatred is imbued in Jewish modern political ideology on both the left and the right.”
The Times writes that “in the battle for Labour’s direction,” Walker’s Momentum group “is winning.” Unfortunately, the fact that Walker holds such a prominent position on Britain’s far-left is sadly not a coincidence or an aberration. It is representative of a deeper rot that has taken hold of the Labour party under its new leader Jeremy Corbyn.
Simply put, much like there is no Trump campaign without his horde of anti-Semitic promoters, there is no Corbyn movement in Britain without those who despise Jews. Corbyn himself has a long history of unsavory associations and appearances with Holocaust deniers and propagators of other anti-Semitic libels, who seemed to be attracted to his work. Corbyn’s personally-appointed confidante Ken Livingstone was suspended from the party after infamously claiming Hitler was a Zionist. Other pro-Corbyn supporters, party officials, and donors have been suspended for claiming Israel or Jews were behind everything from 9/11 to ISIS to the Sandy Hook massacre. When London’s Muslim mayor Sadiq Khan endorsed a challenger to Corbyn, he was immediately hit with a barrage of anti-Semitic abuse on social media.
In other words, the reason Walker has felt comfortable voicing her anti-Jewish sentiments is because she exists in a culture where such sentiments are often taken for granted. Her reinstatement and repeated offenses reveal a party whose leadership is clearly not committed to seriously combating anti-Semitic prejudice in its ranks.
In this light, it’s not surprising that a recent poll found that nearly 9 out of 10 British Jews feel that the Labour party is soft on anti-Semitism. What is surprising is that this is something that is happening in a Western democracy in 2016.
Previous: Jeremy Corbyn Slams Jewish Journalist for Writing About Anti-Semitism in Labour Party
Labour Officials Suspended After Claiming Jews Were Behind African Slave Trade, Israel Behind ISIS
Labour Party Suspends Three More Officials for Anti-Semitism
The Anti-Semitism Scandal Engulfing the Labour Party Was Entirely Predictable
Meet Jeremy Corbyn, the New Leader of Britain’s Labour Party
Yair Rosenberg is a senior writer at Tablet. Follow him on Twitter and Facebook. |
ADB's first Alternative Procurement Arrangement (APA) with the World Bank will allow procedures on co-financed projects to follow a single procurement framework, it said in a statement.
Asian Development Bank (ADB) on Thursday said it has entered into an arrangement with the World Bank to put in place a single framework that will make co-financed projects more efficient.
"The new arrangement allows co-financed projects to be implemented more efficiently by applying a single framework to the whole project. It will reduce transaction costs for ADB's clients," said Risa Zhijia Teng, Director General of ADB's Procurement, Portfolio, and Financial Management Department.
The APA is a step further towards reducing procurement timelines in line with Strategy 2030, she said.
Strategy 2030 is ADB's long-term plan to respond effectively to the Asia and Pacific region's changing needs.
ADB said it is negotiating with other multilateral agencies to expand the number of co-financed projects that will use similar arrangements.
This will lessen the burden on executing and implementing agencies that have traditionally used multiple procurement frameworks on a single project and project implementation, it added.
In 2017, ADB operations totalled USD 32.2 billion, including USD 11.9 billion in co-financing. |
#include <stdio.h>
#include <stdlib.h>
#define SIZE 5
int main(void)
{
unsigned i = 0, array [SIZE];
do
{
array [i] = rand();
++i;
} while (i < SIZE);
printf("Array Updated!\n");
i = 0;
do
{
printf("%d ", array[i]);
++i;
} while (i < SIZE);
printf("\n");
int a = 10;
while( a < 20 )
{
printf("a: %d\n", a);
a++;
}
return EXIT_SUCCESS;
}
/*
Array Updated!
1804289383 846930886 1681692777 1714636915 1957747793
a: 10
a: 11
a: 12
a: 13
a: 14
a: 15
a: 16
a: 17
a: 18
a: 19
*/
|
Ultra High Field fMRI of Human Superior Colliculi Activity during Affective Visual Processing Research on rodents and non-human primates has established the involvement of the superior colliculus in defensive behaviours and visual threat detection. The superior colliculus has been well-studied in humans for its functional roles in saccade and visual processing, but less is known about its involvement in affect. In standard functional MRI studies of the human superior colliculus, it is challenging to discern activity in the superior colliculus from activity in surrounding nuclei such as the periaqueductal gray due to technological and methodological limitations. Employing high-field strength (7 Tesla) fMRI techniques, this study imaged the superior colliculus at high (0.75mm isotropic) resolution, which enabled isolation of the superior colliculus from other brainstem nuclei. Superior colliculus activation during emotionally aversive image viewing blocks was greater than that during neutral image viewing blocks. These findings suggest that the superior colliculus may play a role in shaping subjective emotional experiences in addition to its visuomotor functions, bridging the gap between affective research on humans and non-human animals. extrastriate cortex, the midtemporal area, and the motor and premotor cortices in the primate brain 8,29. In both cases, SC activity may be expected to be modulated during affective processing especially when the visual stimulus requires orienting to threat. At standard fMRI resolution (eg. 2-3 mm isotropic), it is challenging to distinguish the SC from surrounding nuclei such as the PAG. Although there is an abundance of neuroimaging literature on the PAG's involvement in affective visual processing, evidence from studies of non-human animals suggests that both regions contribute to the activity in response to affective visual stimuli. Most fMRI methods cannot discern PAG response from SC response when viewing aversive vs. neutral images, as standard normalization and smoothing procedures introduce significant partial-volume issues. In this study, we overcame the technical challenges of discerning the SC from surrounding nuclei by using ultra-high field 7 Tesla fMRI. At a nominal isotropic resolution of 0.75 mm, we segmented the SC from functional scans (see Fig. 1) and investigated SC activation while participants viewed a set of natural scene images 30. To test for visual processing specificity, we compared BOLD signal in the SC with the inferior colliculus (IC), an auditory midbrain region selected for its comparable size and location. We then tested whether activation in the SC was greater during aversive image viewing blocks compared to that in neutral image viewing blocks. Results We first examined whether visual processing selectively elicited greater activation in the SC compared to a control region, the IC. The goal of this analysis was to provide convergent validation from functional data of our structural localization of the SC insofar as the SC is more intimately related to visual processing than is the IC. Consistent with this notion, the SC had overall greater activity during image viewing blocks than did the IC (Fig. 2, panel a). Next, we investigated whether activity in the SC was further modulated by affective processing. The SC showed, as predicted, greater activation during aversive image viewing blocks compared to neutral image viewing blocks (Fig. 2, panel b). As noted in the Methods, image features including contrast, luminance, and complexity did not strongly vary between the aversive and neutral categories of images. Nevertheless, we repeated our analysis after controlling for these features by including them as regressors in the first-level general linear models. The significance level was marginally reduced, but the estimated effect size remained high . Although our primary hypotheses concerned the SC and image category, we additionally report findings for laterality effects during overall image viewing. For lateral asymmetry in SC, prior work suggests that laterality effects in the SC may occur due to ocular dominance. A within-subject 2x2 ANOVA model across stimulus categories and lateral locations revealed a main effect of laterality, where the right SC was more strongly engaged during visual processing overall compared to the left SC (Fig. 2, panel c). We also examined the interaction effect between laterality and image category, but it did not reach ]. Intriguingly, the laterality effect was attenuated when image property regressors were added to the first level general linear models [ =. F 0 471, =. 0 045, suggesting that lateral differences in SC activity may be driven by visual features of the images. Discussion In this study, we examined functional activity in the human SC during affective visual stimulus processing. Functional activity in the SC was localized with high fidelity using ultra high-field, high-resolution fMRI and a custom segmentation procedure. The precise anatomical segmentation of the SC converged with functional results showing greater SC activity during visual stimulus processing in comparison to a similarly sized and spatially proximal control region, the IC. Supporting our main hypothesis, we then tested for and found greater functional activity in the SC during affective image viewing in comparison to neutral image viewing. The present findings have important implications for fMRI studies of affective processing in the human midbrain. Apart from a few notable exceptions 20,31,32, the SC is often overlooked in the human affective neuroscience literature. Instead, the majority of this work has focused on the adjacent PAG. However, both regions have been implicated in defensive behaviour based on research in non-human animals 16,17,. The present findings in combination with our prior report 35 suggest that affective image processing also engages both regions. In this respect, our findings underscore the importance of ultra high-field neuroimaging techniques for overcoming partial volume effects and examining the functional role of specific brainstem nuclei in affective processing 41. Previous neuroimaging studies have shown that midbrain activation in the vicinity of the SC is modulated by negative affective content, such as affective facial expression stimuli, independent of eye movement or covert attention. Presentation of faces that were conditioned to predict pain elicits greater activity in the vicinity of SC compared to presentation of unconditioned faces even in the blind hemifield of a cortically blind patient 45. However, previous studies were also limited insofar as the techniques are unable to draw firm conclusions about the SC given the larger voxel resolutions (>2 mm isotropic) and partial volume effects introduced by smoothing (>6 mm FWHM). Although recent reviews of affective modulation of visual perception 20,31,32 allude to the SC as a part of the affective visual system, our study demonstrates for the first time that the human SC is engaged in processing negative affective content in visual stimuli that signal threat or harm, which are of great ecological importance to humans and non-human animals. The SC has been extensively studied as an oculomotor region important for defensive responding 10. Findings in rodents, for example, have uncovered the importance of the SC in defensive responding using electrophysiological 16,17,46, pharmacological 12,16,19,47, and optogenetic 13,14,46,48 methods. Studies in non-human primates have generalized the SC's role in defensive behaviour and visual threat detection of natural predator using pharmacological 18 and lesion 24 methods, respectively. Due to structural and functional differences in the SC across mammalian species 8, it is important to investigate the SC in humans. This study provides a methodology for bridging studies of defensive behaviours in non-human animals with neuroimaging research of human affective experience. We 49 and others have proposed that affective experience relies on a distributed neural architecture that includes functional activity in early sensory systems. Our findings suggest that for visual affective processing, this model may be extended to include subcortical structures such as the SC. Of interest is whether the effects observed here resulted from the SC being part of an ascending SC-pulvinar-amygdala visual pathway 20,22, or rather from descending (top-down) input from distributed cortical areas (for a review of cortical input to the SC, see 8 ). www.nature.com/scientificreports www.nature.com/scientificreports/ While a number of neuroimaging and lesion studies have provided support for the functional role of the SC in affective visual processing through the SC-pulvinar-amygdala subcortical pathway 23,54, there has also been anatomical evidence from studies of monkey brains against the existence of such a subcortical pathway in primates (for a review, see 55 ). The SC may facilitate responding to threat by rapidly identifying context-salient visual features (e.g. threats arriving from different locations in the environment) and prepare the rapid coordination of behaviours accordingly (fleeing from or orienting toward these locations), with descending corticotectal pathways providing information about which contexts and features are more or less likely to be of relevance. Connections between the SC and the PAG are known to support these behaviours 19. Notably, other fMRI studies have shown greater neural activity in midbrain regions in humans during increasing threat proximity 34 and also during simulated gun shooting decisions 56. Future work using high-resolution imaging combined with functional connectivity analyses may help provide insight on the pathways driving functional activity in the SC in affective processing, and further examine the dynamic between SC and PAG during threat processing. Although not the main focus of the present study, we also observed lateral asymmetry in the SC's response during visual stimulus processing in general. The right SC showed greater activation compared to the left SC, which is consistent with findings from some previous fMRI studies. The lateral asymmetry effect was attenuated after controlling for stimuli visual features such as complexity, luminance, and contrast, suggesting that the observed greater activity in the right SC may be attributed to the processing of these visual features. While our findings provide support for a role of the human SC in affective visual processing, they also raise several questions for future work. Recent theoretical work has proposed an account of oculomotor behavior based on active inference 57. As an interface between perception and action with laminated internal architecture and multimodal sensory integration, the SC is an ideal site for investigating feedback and feedforward information flow in a hierarchical predictive coding framework 58. The present study provides a methodology for isolating functional activity in the SC using high-resolution fMRI. Future work may examine whether the affective modulation of visual stimuli has predictive value for oculomotor behaviour. Methods Participants. In a prior report, we examined functional activity in the PAG during affective image processing 35. The present report makes use of the same subject sample and dataset but addresses activation in the colliculi. Thirteen healthy, right-handed volunteers participated in the study and provided informed consent. Individuals received compensation for their participation. The study was conducted in accordance with the guidelines of the Partner's Health Institutional Review Board, which approved all procedures. Two participants were excluded due to a hard drive failure (1 subject) and ghosting (1 subject). In the remaining subjects, one participant only has data from the first two runs out of a total of three runs because of a failure in the stimulus display computer. Functional data collected from 11 participants (five male, age range, 20-35 y) were included in the analysed sample. Experimental design and behavioural analysis. Participants viewed a sample of 30 highly emotionally aversive photographs and 30 neutral photographs from a database of images normed to elicit affective experiences 30. Each block consisted of five images of one category, randomly sampled. Each image was presented for 2 s, and the inter-stimulus intervals ranged from 0.5, 1, 1.5, 2, to 2.5 s. One block of image presentation lasted 17.5 s in total. After each image viewing block, participants were prompted to report their experience across five categories, "Activated" (for arousal), "Angry", "Disgusted", "Sad", and "Scared", using a five-button response box. The labels were presented sequentially and in a random order with numbered scales from 0-4 to indicate the amount of affect or emotion from none to high. Reports were obtained during a 16 second period before the start of the next image viewing block. The self-report measures assessed the valence and emotional intensity of the aversive images, indicating that subjects experienced more anger . Image visual properties were extracted from neutral and aversive stimuli (described below) and no statistically significant difference was found between the complexity across stimuli categories. Some blocks of images sampled from another stimulus database (for details: K. Kveraga http://nmr.mgh.harvard.edu/kestas/affcon) were included for exploratory purposes, but they were not the focus of the analyses in the present work. Functional MRI preprocessing and analysis. Functional images were preprocessed with the FMRIB Software Library (FSL). Preprocessing steps were performed on each functional run separately. Functional time-series data were motion corrected to the middle slice within each run and filtered using a high-pass temporal filter with cut-off frequency equal to 0.01 Hz. We applied a 6-parameter rigid-body transform using the MCFLIRT tool in FSL. The maximal relative displacement ranged between 0.15 to 1.45 mm across scans (median = 0.39 mm, interquartile range = 0.29-0.61 mm). Similar to the procedures in our previous work 35, no smoothing or normalization was performed at this stage. Scientific RepoRtS | 10:1331 | https://doi.org/10.1038/s41598-020-57653-z www.nature.com/scientificreports www.nature.com/scientificreports/ Next, we isolated the SC and the IC (4 masks) for each functional run by manual segmentation. Masks were hand drawn using FSLeyes directly from the functional data. Because of their comparable size and adjacent anatomical location to those of the SC, the IC were selected as control regions to investigate the specificity of SC activity in response to visual stimulus processing. The masks were produced based on anatomical markers that delineate the SC and the IC from surrounding tissue 62. The medial and posterior boundaries are demarcated respectively by the positions of the PAG 35 and of the cerebral spinal fluid identified on a region of high signal variability (see Fig. 1 panel b and Supplementary Fig. S1). We then confirmed the shapes of the ROIs based on high-resolution FLASH images for each subject as well as high-resolution structural images of the SC at 9.4 Tesla MRI from an anatomical atlas 63. For our main statistical comparisons, we first averaged the time course signal across voxels in the SC and the IC to maximize signal-to-noise ratio, then applied general linear models consisting of regressors for stimulus onsets by image type convolved with the double gamma hemodynamic response function, their first-order temporal derivatives, and motion regressors to obtain parameter estimates for neutral and aversive conditions. Although we did not focus on voxel-wise analysis, we have provided a figure for illustrative purposes showing voxels with z > 1 in the SC and the IC for aversive and neutral conditions at the functional resolution of 0.75 mm 3 (see Fig. 1 panels c,d and Supplementary Fig. S2). We used robust regression 64 to minimize the influence of outliers. Motion parameters were included in the model to account for motion-induced response fluctuations. The parameter estimates for left and right SC and IC under aversive and neutral conditions were submitted to two-tailed paired t tests to examine the main effects of interest. Cohen's = d t n / z was used to calculate t test effect sizes given our within-subject design 65. A 2 2 ANOVA across SC lateral locations and stimulus condition was used to examine lateral asymmetry effects. In a second analysis, regressors of image visual properties (complexity, contrast, and luminance) were added to the general linear model to control for their influence on SC activity. Image visual properties, including complexity, contrast, and luminance, were computed using a custom MATLAB script. Image complexity was measured by edge density, which is the ratio of edge pixels identified by MATLAB's canny edge detector to non-edge pixels after the RBG image has been converted to gray scale 66. Image contrast was measured by the maximum intensity minus the minimum intensity of the converted gray-scale image. Image luminance was the mean luminance of all pixels after converting the image to HSV format, which separates image intensity from color information. Data availability The fMRI datasets generated during and/or analysed during the current study are available from the corresponding author upon reasonable request. The script for the main statistical analysis is available on Github: https://github.com/candiceyuxiwang/7TSC. |
The Ministry of Power, Works and Housing has said its proposed N10 billion solar powered streetlight project in 37 federal universities and seven teaching hospitals is aimed at boosting effective learning, innovation and advancement, through uninterrupted power supply, to the institutions.
The project, is part of the Energizing Education Programme (EEP) of the Federal Government aimed at rejuvenating the nation’s education system by providing uninterrupted power supply to the institutions, the ministry said.
It said these in response to reported criticism of the N10 billion project by the Senate Committee on Power, Steel Development and Metallurgy, during the budget defense by the ministry last week.
The ministry in a statement issued Saturday, said a newspaper had misrepresented the project, as a mere “solar-powered streetlight project in nine universities across the country valued at N10 billion,†while purportedly reporting the proceedings of the 2018 Budget Defense Meeting of the Senate Committee on Power, Steel Development and Metallurgy attended by the Managing Director of the Rural Electrification Agency, Mrs. Damilola Ogunbiyi.
At the hearing last week, the Committee Chairman, Senator Abaribe had said there was a better way to deploy the funds if the REA had N10 billion to “play round†with.
Senator Mohammed Hassan had also queried why the REA was expending the money to provide solar power for universities, when the rural areas, which it has mandate for, are still in darkness.
The Ministry, in the statement, said the project is far from being a “streetlight†project†and the attempt to belittle its impact is outrightly unpatriotic.
“Far from being a “streetlight†project, the EEP in fact seeks to rejuvenate the education system through electrifying a total of 37 federal universities and 7 university teaching hospitals, with Independent Power Plants (IPPS), which will boost effective learning, innovation and advancement through uninterrupted power supply.
“In addition to helping to extend electrification to rural and underserved areas in which the institutions are located ultimately, the Programme will enable the institutions benefit from world-class training schools, for the training of students in renewable energy, as well as provide optimised security, for the safety and well being of students and staff, through the installation of streetlights on campus which is only a small component of the Project.
“Although implementation of this programme is led by the power sector, through the Rural Electrification Agency, the Vice Chancellors and the Ministry of Education have signed onto this as a critical investment in the education sector.
“The deliberate attempt to water down the significant impact this Programme will have on the enhanced education of Nigerian students is outright unpatriotic as it seeks to prevent the socio-economic development of our nation.
“In addition to the open and transparent coverage of the milestones attained preparatory to the take off of the Project like the Meetings and signing of the Memorandum of Understanding between the REA and the first set of participating tertiary institutions, the Ministry holds itself ready to provide further details to the media and any other interested entities to stem any further misrepresentation,†it added. |
/**
* A compound key identifying a tile of a {@link ComputedImage}.
*/
static final class Key {
/**
* The image which own the tile as a weak reference. All {@code TileCache.Key} instances
* for the same image will share the same reference. Consequently it is okay to compare
* {@code image} fields directly instead of {@code image.get()}.
*/
private final Reference<ComputedImage> image;
/**
* Index of the tile owned by the image.
*/
private final int tileX, tileY;
/**
* Creates a new key identifying a tile or a cached image.
*
* @param image the image which own the tile.
* @param tileX the column index of the cached tile.
* @param tileY the row index of the cached tile.
*/
Key(final Reference<ComputedImage> image, final int tileX, final int tileY) {
this.image = image;
this.tileX = tileX;
this.tileY = tileY;
}
/**
* Returns the tile indices.
*/
final Point indices() {
return new Point(tileX, tileY);
}
/**
* Returns the error message when this tile can not be computed.
*
* @param key {@link Resources.Keys#CanNotComputeTile_2} or {@link Resources.Keys#TileErrorFlagSet_2}.
*/
final String error(final short key) {
return Resources.format(key, tileX, tileY);
}
/**
* Removes the raster associated to this key. This method is invoked for all tiles in an image being disposed.
* The disposal may happen either by an explicit call to {@link ComputedImage#dispose()}, or because the image
* has been {@linkplain ComputedTiles#dispose() garbage collected}.
*/
final void dispose() {
GLOBAL.remove(this);
}
/**
* Returns a hash code value for this key. Note that this is okay to use {@link #image} directly
* in hash code computation instead of {@link Reference#get()} because we maintain a one-to-one
* relationship between {@link ComputedImage} and its {@link Reference}.
*/
@Override
public int hashCode() {
/*
* Dispatch tileX and tileY on approximately two halves of 32 bits integer.
* 65563 is a prime number close to 65536, the capacity of 16 bits integers.
*/
return System.identityHashCode(image) + tileX + 65563 * tileY;
}
/**
* Compares this key with the given object for equality. See {@link #hashCode()} for a note about
* direct comparison of {@link #image} references.
*/
@Override
public boolean equals(final Object obj) {
if (obj instanceof TileCache.Key) {
final TileCache.Key k = (TileCache.Key) obj;
return image == k.image && tileX == k.tileX && tileY == k.tileY;
}
return false;
}
/**
* Returns a string representation of this key for debugging purposes.
*/
@Override
public String toString() {
return getClass().getSimpleName() + '[' + tileX + ", " + tileY + ']';
}
} |
import sys
ip_num = input()
n = int(ip_num.split(' ')[0])
h = int(ip_num.split(' ')[1])
per_array = []
width_array = []
sum = 0
if (n >= 1) and (n <= 1000):
if (h >= 1) and (h <= 1000):
per_array = list(map(int, input().split(' ')[:n]))
if(per_array[n-1] <= 2*h) and (per_array[0] >= 1):
for i in range (n):
if per_array[i] <= h:
width_array.append(1)
else:
width_array.append(2)
else:
sys.exit(0)
for i in range(len(width_array)):
sum = sum + width_array[i]
print(sum) |
#include "dust.h"
DashDust::DashDust( glm::vec2 pos, bool flipped, float angle) {
texture = Resources->getTexture("assets/images/dash_dust.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,140,140));
animation.addFrame(sf::IntRect(140,0,140,140));
animation.addFrame(sf::IntRect(280,0,140,140));
animation.addFrame(sf::IntRect(0,140,140,140));
animation.addFrame(sf::IntRect(140,140,140,140));
animation.addFrame(sf::IntRect(280,140,140,140));
animation.addFrame(sf::IntRect(0,280,140,140));
animation.addFrame(sf::IntRect(140,280,140,140));
animation.addFrame(sf::IntRect(280,280,140,140));
animation.addFrame(sf::IntRect(280,280,140,140));
sprite = new AnimatedSprite( sf::seconds(0.07), false, false );
sprite->setOrigin(0,100.f);
sprite->setPosition( pos.x, pos.y );
sprite->setRotation( angle );
if ( !flipped ) {
sprite->setScale( -0.7, 0.7 );
} else {
sprite->setScale( 0.7, 0.7 );
}
sprite->play(animation);
}
DashDust::~DashDust() {
delete sprite;
}
void DashDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() ) {
world->removeEntity(this,World::Layer::Midground);
}
}
void DashDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type DashDust::getType(){
return Entity::Type::Dust;
}
JumpDust::JumpDust( glm::vec2 pos, float angle, bool doub ) {
if ( !doub ) {
texture = Resources->getTexture("assets/images/jump_poof2.png");
} else {
texture = Resources->getTexture("assets/images/walljump_poof.png");
}
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,128,128));
animation.addFrame(sf::IntRect(128,0,128,128));
animation.addFrame(sf::IntRect(256,0,128,128));
animation.addFrame(sf::IntRect(0,128,128,128));
animation.addFrame(sf::IntRect(128,128,128,128));
animation.addFrame(sf::IntRect(256,128,128,128));
animation.addFrame(sf::IntRect(0,256,128,128));
animation.addFrame(sf::IntRect(128,256,128,128));
animation.addFrame(sf::IntRect(256,256,128,128));
sprite = new AnimatedSprite( sf::seconds(0.07), false, false );
sprite->setOrigin(64,64.f);
sprite->setPosition( pos.x, pos.y );
sprite->setRotation( angle );
sprite->setScale( 0.5, 0.5 );
sprite->play(animation);
}
JumpDust::~JumpDust() {
delete sprite;
}
void JumpDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() ) {
world->removeEntity(this, World::Layer::Midground);
}
}
void JumpDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type JumpDust::getType(){
return Entity::Type::Dust;
}
WallJumpDust::WallJumpDust( glm::vec2 pos, bool flipped, float angle) {
texture = Resources->getTexture("assets/images/walljump_poof2.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,128,128));
animation.addFrame(sf::IntRect(128,0,128,128));
animation.addFrame(sf::IntRect(256,0,128,128));
animation.addFrame(sf::IntRect(0,128,128,128));
animation.addFrame(sf::IntRect(128,128,128,128));
animation.addFrame(sf::IntRect(256,128,128,128));
animation.addFrame(sf::IntRect(0,256,128,128));
animation.addFrame(sf::IntRect(128,256,128,128));
animation.addFrame(sf::IntRect(256,256,128,128));
sprite = new AnimatedSprite( sf::seconds(0.07), false, false );
sprite->setOrigin(64.f,64.f);
sprite->setPosition( pos.x, pos.y );
sprite->setRotation( angle );
if ( flipped ) {
sprite->setScale( -0.7, 0.7 );
} else {
sprite->setScale( 0.7, 0.7 );
}
sprite->play(animation);
}
WallJumpDust::~WallJumpDust() {
delete sprite;
}
void WallJumpDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() ) {
world->removeEntity(this, World::Layer::Midground);
}
}
void WallJumpDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type WallJumpDust::getType(){
return Entity::Type::Dust;
}
LandingDust::LandingDust( glm::vec2 pos, float angle ) {
texture = Resources->getTexture("assets/images/landing_poof.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,128,128));
animation.addFrame(sf::IntRect(128,0,128,128));
animation.addFrame(sf::IntRect(256,0,128,128));
animation.addFrame(sf::IntRect(0,128,128,128));
animation.addFrame(sf::IntRect(128,128,128,128));
animation.addFrame(sf::IntRect(256,128,128,128));
animation.addFrame(sf::IntRect(0,256,128,128));
animation.addFrame(sf::IntRect(128,256,128,128));
animation.addFrame(sf::IntRect(256,256,128,128));
sprite = new AnimatedSprite( sf::seconds(0.07), false, false );
sprite->setOrigin(64,68.f);
sprite->setPosition( pos.x, pos.y );
sprite->setRotation( angle );
sprite->setScale( 0.7, 0.7 );
sprite->play(animation);
}
LandingDust::~LandingDust() {
delete sprite;
}
void LandingDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() ) {
world->removeEntity(this, World::Layer::Midground);
}
}
void LandingDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type LandingDust::getType(){
return Entity::Type::Dust;
}
ShockDust::ShockDust( glm::vec2 pos ) {
if ( Random->f(0,1) > 0.5 ) {
sound = sf::Sound( *Resources->getSound( "assets/audio/effects/shock1.ogg" ) );
} else {
sound = sf::Sound( *Resources->getSound( "assets/audio/effects/shock2.ogg" ) );
}
sound.play();
texture = Resources->getTexture("assets/images/lightning.png");
animation.setSpriteSheet(*texture);
float w = 96;
float h = 96;
int frames = 10;
int columns = 5;
int rows = 2;
int curframe = 0;
for (int y=0;y<rows&&curframe < frames;y++ ) {
for (int x=0;x<columns&&curframe < frames;x++ ) {
animation.addFrame(sf::IntRect(x*w,y*h,w,h));
curframe++;
}
}
sprite = new AnimatedSprite( sf::seconds(0.07), false, false );
sprite->setOrigin(w/2.f,h/2.f);
sprite->setPosition( pos.x, pos.y );
//sprite->setScale( 1, 1 );
sprite->play(animation);
}
ShockDust::~ShockDust() {
delete sprite;
}
void ShockDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() && sound.getStatus() != sf::SoundSource::Playing ) {
world->removeEntity(this, World::Layer::Foreground);
}
}
void ShockDust::draw(sf::RenderTarget& window){
if ( sprite->isPlaying() ) {
window.draw(*sprite);
}
}
Entity::Type ShockDust::getType(){
return Entity::Type::Dust;
}
PokeDust::PokeDust( glm::vec2 pos ) {
sound = sf::Sound( *Resources->getSound( "assets/audio/effects/stab1.ogg" ) );
sound.play();
texture = Resources->getTexture("assets/images/shock_wave.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,127,136));
animation.addFrame(sf::IntRect(127,0,127,136));
animation.addFrame(sf::IntRect(127*2,0,127,136));
sprite = new AnimatedSprite( sf::seconds(0.05), false, false );
sprite->setOrigin(127.f/2.f,136.f/2.f);
sprite->setPosition( pos.x, pos.y );
sprite->setScale( 0.7, 0.7 );
sprite->play(animation);
}
PokeDust::~PokeDust() {
delete sprite;
}
void PokeDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() && sound.getStatus() != sf::SoundSource::Playing ) {
world->removeEntity(this, World::Layer::Foreground);
}
}
void PokeDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type PokeDust::getType(){
return Entity::Type::Dust;
}
LavaDust::LavaDust( glm::vec2 pos ) {
sound = sf::Sound( *Resources->getSound( "assets/audio/effects/fire1.ogg" ) );
sound.play();
texture = Resources->getTexture("assets/images/explosion.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,96,96));
animation.addFrame(sf::IntRect(96,0,96,96));
animation.addFrame(sf::IntRect(96*2,0,96,96));
animation.addFrame(sf::IntRect(96*3,0,96,96));
animation.addFrame(sf::IntRect(96*4,0,96,96));
animation.addFrame(sf::IntRect(0,96,96,96));
animation.addFrame(sf::IntRect(96,96,96,96));
animation.addFrame(sf::IntRect(96*2,96,96,96));
animation.addFrame(sf::IntRect(96*3,96,96,96));
animation.addFrame(sf::IntRect(96*4,96,96,96));
animation.addFrame(sf::IntRect(0,96*2,96,96));
animation.addFrame(sf::IntRect(96,96*2,96,96));
animation.addFrame(sf::IntRect(96*2,96*2,96,96));
animation.addFrame(sf::IntRect(96*3,96*2,96,96));
animation.addFrame(sf::IntRect(96*4,96*2,96,96));
sprite = new AnimatedSprite( sf::seconds(0.05), false, false );
sprite->setOrigin(96.f/2.f,96.f/2.f);
sprite->setPosition( pos.x, pos.y );
//sprite->setScale( 0.7, 0.7 );
sprite->play(animation);
}
LavaDust::~LavaDust() {
delete sprite;
}
void LavaDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() && sound.getStatus() != sf::SoundSource::Playing ) {
world->removeEntity(this, World::Layer::Foreground);
}
}
void LavaDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type LavaDust::getType(){
return Entity::Type::Dust;
}
FireworkDust::FireworkDust( glm::vec2 pos ) {
sound = sf::Sound( *Resources->getSound( "assets/audio/effects/pop.ogg" ) );
sound.play();
texture = Resources->getTexture("assets/images/firework.png");
animation.setSpriteSheet(*texture);
animation.addFrame(sf::IntRect(0,0,70,70));
animation.addFrame(sf::IntRect(70,0,70,70));
animation.addFrame(sf::IntRect(70*2,0,70,70));
animation.addFrame(sf::IntRect(70*3,0,70,70));
animation.addFrame(sf::IntRect(70*4,0,70,70));
animation.addFrame(sf::IntRect(70*5,0,70,70));
animation.addFrame(sf::IntRect(70*6,0,70,70));
sprite = new AnimatedSprite( sf::seconds(0.15), false, false );
sprite->setOrigin(70.f/2.f,70.f/2.f);
sprite->setPosition( pos.x, pos.y );
sprite->setScale( 2, 2 );
sprite->play(animation);
}
FireworkDust::~FireworkDust() {
delete sprite;
}
void FireworkDust::update(double dt) {
sprite->update( sf::seconds( dt ) );
if ( !sprite->isPlaying() && sound.getStatus() != sf::SoundSource::Playing ) {
world->removeEntity(this, World::Layer::Foreground);
}
}
void FireworkDust::draw(sf::RenderTarget& window){
window.draw(*sprite);
}
Entity::Type FireworkDust::getType(){
return Entity::Type::Dust;
}
|
Synthesis of NoC Interconnects for Multi-core Architectures As SoC applications demand high performance and integration density, SoC designers consider multiple aspects during the design phase. This paper presents a Network-on-Chip (NoC) design methodology for generating high quality interconnects for multi-core System-on-Chip architectures. The design process incorporates the main objectives of low power and high performance during topology synthesis. A rendezvous interaction performance analysis method is presented where Layered Queuing Network models are invoked to observe the asynchronous interactions between NoC components and identify possible performance degradation within the on-chip multi-core network. Several experiments are conducted using various SoC benchmark applications to compare the power and performance outcomes of our proposed technique. |
Occurrence and Seasonal Variations of Lead Concentrations in River Water and Edible Vegetables Grown along Morogoro River Bank Urban vegetable farming is popular in Tanzania and other countries partly due to readily available market and reliable transport to reach consumers. River banks are usually used to grow such vegetables due to closer proximity to water and whenever necessary use such water for irrigation. However in urban settings river water pollution by toxic heavy metals and subsequent accumulation of the metals in nearby grown vegetables has been reported as among major sources of exposure to humans and animals. Heavy metals, including lead (Pb) have many health effects to human and animals ranging from acute to chronic illnesses. This study was conducted to assess the occurrence of lead in Morogoro river and in edible vegetables (Amaranthus retroflexus) grown along the river bank. Water and vegetable samples collected during dry and rainy seasons were prepared and analysed for occurrence and levels of lead using Atomic Absorption Spectrometry (AAS). It was found that concentration of Lead in the water and vegetable samples were up to 0.95 mg/L, and 0.026 mg/Kg respectively. It was also found that 77% of the water samples and 28% of the vegetable samples had lead levels above the WHO recommendations. The frequency and levels of lead occurrence varied with topographical, diurnal and seasonal characteristics of sample collection. The results of this study raise concerns on the use of water for human and animal consumption from rivers which run across urban areas and call for further studies to investigate for any health consequences to consumers |
// cleanUp remove the corresponding page document in mgo, remove all the files and their parent directory
func (p *Page) cleanUp(pId pageId){
session := p.pageManager.Session.Copy()
c := session.DB(config.Values.MgoDBName).C(config.Values.MgoCollectionName)
if err := c.Remove(bson.M{"pageId": pId}); err != nil {
fmt.Printf("err removing pageId: %s in mgo: ", pId)
}
session.Close()
if err := os.RemoveAll(filepath.Join(config.Values.UploadDir, string(pId))); err != nil {
fmt.Println("removeAll err: ", err)
}
} |
<filename>1.0.0a/src/manager/callback.c
#include "jada.h"
#include "jada_messages.h"
#include "manager.h"
static int iRequestId = 0;
static JADA_ReqId iReqId;
static int AMGR_Trim(char *szString)
{
int iLength;
iLength = strlen(szString);
szString[iLength] = '\0';
while (--iLength >= 0) {
if ((szString[iLength] != '\0') && (!isspace(szString[iLength]))) {
break;
}
}
szString[iLength + 1] = '\0';
return(0);
}
static int AMGR_FileExists(char *szFile)
{
int iRet = FALSE;
FILE *fp = NULL;
fp = fopen(szFile, "r");
if (fp != NULL) {
fclose(fp);
iRet = TRUE;
}
return(iRet);
}
static int AMGR_PrintStream(JADA_Stream stmSelf)
{
int iRet, iCounter = 0;
LongString szBuffer;
iRet = JADA_StmGetFirstLine(stmSelf, szBuffer, sizeof(szBuffer));
while (iRet == 0) {
fprintf(stderr, "Record %d:\n %s\n", ++iCounter, szBuffer);
iRet = JADA_StmGetNextLine(stmSelf, szBuffer, sizeof(szBuffer));
}
return(0);
}
CORE_Callback AMGR_MnuOpenConnection(int iAutoLogin)
{
int iRet;
ShortString szMyName, szMyPhone, szMyCustomer;
AMGR_Connection = JADA_ConnNew(__FILE__, __LINE__, TRUE);
CORE_ReturnValIfFail(AMGR_Connection != NULL, -1, ;, "errore generico");
iRet = JADA_ManageEvents(AMGR_Connection);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nella connessione al server");
iRet = JADA_ConnConnect(AMGR_Connection, JADA_SESSTYPE_SUPPORT, 30);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
if (iAutoLogin != TRUE) {
iRet = JADA_ConnGetParameter(AMGR_Connection, "sessione", "utente", szMyName, sizeof(szMyName));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = JADA_ConnGetParameter(AMGR_Connection, "sessione", "telefono", szMyPhone, sizeof(szMyPhone));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = JADA_ConnGetParameter(AMGR_Connection, "sessione", "cliente", szMyCustomer, sizeof(szMyCustomer));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = CORE_GetString("Nome utente", szMyName, sizeof(szMyName));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = CORE_GetString("Recapito telefonico", szMyPhone, sizeof(szMyPhone));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = CORE_GetString("Cliente", szMyCustomer, sizeof(szMyCustomer));
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = JADA_ConnSetParameter(AMGR_Connection, "sessione", "utente", szMyName);
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = JADA_ConnSetParameter(AMGR_Connection, "sessione", "telefono", szMyPhone);
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
iRet = JADA_ConnSetParameter(AMGR_Connection, "sessione", "cliente", szMyCustomer);
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
}
iRet = JADA_ConnOpen(AMGR_Connection, 30);
switch (iRet) {
case JADA_CREDENTIAL_OK:
JADA_LogMessage("Connessione terminata correttamente.\n");
break;
case JADA_CREDENTIAL_UNKNOWN_USER:
JADA_LogMessage("Utente sconosciuto.\n");
break;
case JADA_CREDENTIAL_WRONG_PASSWORD:
JADA_LogMessage("Password non corretta.\n");
break;
}
return(0);
}
CORE_Callback AMGR_MnuCloseConnection(void)
{
int iRet;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
iRet = JADA_ConnClose(AMGR_Connection);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuListRequests(void)
{
int iRet, iStreamId;
JADA_Stream stmSelf;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
iRet = JADA_ConnListRequests(AMGR_Connection, "", &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuListPendingRequests(void)
{
int iRet, iStreamId;
JADA_Stream stmSelf;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
iRet = JADA_ConnListPendingRequests(AMGR_Connection, "", &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuListParameters(void)
{
int iRet, iStreamId;
JADA_Stream stmSelf;
if (! JADA_IsValidSupport(AMGR_Support)) {
JADA_LogMessage("La sessione non e' stata aperta.\n");
return(0);
}
iRet = JADA_SuppListParameters(AMGR_Support, &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuGetCombo(void)
{
int iRet, iStreamId;
JADA_Stream stmSelf;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
iRet = JADA_ConnGetCombo(AMGR_Connection, JADA_COMBOTYPE_CODA, "", &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
JADA_WaitKey("");
iRet = JADA_ConnGetCombo(AMGR_Connection, JADA_COMBOTYPE_REGIONE, "", &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
JADA_WaitKey("");
iRet = JADA_ConnGetCombo(AMGR_Connection, JADA_COMBOTYPE_CLIENTE, "Campania", &iStreamId, 10);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
stmSelf = JADA_ConnGetStream(AMGR_Connection, iStreamId);
CORE_ReturnValIfFail(stmSelf != NULL, -1, ;, "errore generico");
iRet = AMGR_PrintStream(stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_StmDestroy(__FILE__, __LINE__, stmSelf);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
JADA_WaitKey("");
return(0);
}
CORE_Callback AMGR_MnuGetRequestCount(void)
{
int iRet;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
iRet = JADA_ConnGetListCounters(AMGR_Connection);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuAcceptSupport(void)
{
int iRet;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
AMGR_Support = JADA_SuppNew(__FILE__, __LINE__, AMGR_Connection);
CORE_ReturnValIfFail(AMGR_Support != NULL, -1, ;, "errore generico");
iRet = CORE_GetInteger("ID Richiesta", &iRequestId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore generico");
iRet = JADA_IntToRequestId(iRequestId, &iReqId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_SuppAccept(AMGR_Support, iReqId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuLoadSupport(void)
{
int iRet;
if (! JADA_IsValidConnection(AMGR_Connection)) {
JADA_LogMessage("La connessione non e' statat aperta.\n");
return(0);
}
AMGR_Support = JADA_SuppNew(__FILE__, __LINE__, AMGR_Connection);
CORE_ReturnValIfFail(AMGR_Support != NULL, -1, ;, "errore generico");
iRet = CORE_GetInteger("ID Richiesta", &iRequestId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_IntToRequestId(iRequestId, &iReqId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_SuppLoad(AMGR_Support, iReqId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
return(0);
}
CORE_Callback AMGR_MnuCloseSupport(void)
{
int iRet;
iRet = JADA_SuppIsConnected(AMGR_Support);
if (iRet == TRUE) {
iRet = JADA_SuppClose(AMGR_Support);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = JADA_SuppDestroy(__FILE__, __LINE__, AMGR_Support);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
}
AMGR_Support = NULL;
return(0);
}
CORE_Callback AMGR_MnuSendMessages(void)
{
int iRet;
char szText[JADA_CHATTEXT_SIZE] = "";
iRet = JADA_SuppIsConnected(AMGR_Support);
if (iRet == TRUE) {
iRet = CORE_GetString("Messaggio da inviare (bye per terminare)", szText, sizeof(szText) - 1);
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
while (strcmp(szText, "bye") != 0) {
iRet = JADA_SuppSendChat(AMGR_Support, szText);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
iRet = CORE_GetString("Messaggio da inviare (bye per terminare)", szText, sizeof(szText) - 1);
CORE_ReturnValIfFail(iRet >= 0, -1, ;, "errore generico");
}
}
return(0);
}
CORE_Callback AMGR_MnuRequestCall(void)
{
int iRet;
iRet = JADA_SuppIsConnected(AMGR_Support);
if (iRet == TRUE) {
AMGR_Call = JADA_CallNew(__FILE__, __LINE__, AMGR_Connection);
CORE_ReturnValIfFail(AMGR_Call != NULL, -1, ;, "errore generico");
iRet = JADA_CallOpen(AMGR_Call, iReqId);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
}
return(0);
}
CORE_Callback AMGR_MnuRequestVnc(void)
{
int iRet;
iRet = JADA_SuppIsConnected(AMGR_Support);
if (iRet == TRUE) {
AMGR_VNC = JADA_VncNew(__FILE__, __LINE__, AMGR_Connection);
CORE_ReturnValIfFail(AMGR_VNC != NULL, -1, ;, "errore generico");
iRet = JADA_VncOpenDesktop(AMGR_VNC);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
}
return(0);
}
CORE_Callback AMGR_MnuRequestFtp(void)
{
int iRet;
iRet = JADA_SuppIsConnected(AMGR_Support);
if (iRet == TRUE) {
AMGR_FTP = JADA_FtpNew(__FILE__, __LINE__, AMGR_Connection);
CORE_ReturnValIfFail(AMGR_Call != NULL, -1, ;, "errore generico");
iRet = JADA_FtpOpen(AMGR_FTP);
CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore generico");
}
return(0);
}
|
//
// UIView+YKView.h
// YKCategeory
//
// Created by 侯玉昆 on 2019/9/5.
// Copyright © 2019 houyukun. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "UIViewController+YKViewController.h"
NS_ASSUME_NONNULL_BEGIN
@interface UIView (YKView)
// 用来快速访问和设置View的常用属性
@property (nonatomic) CGFloat x;
@property (nonatomic) CGFloat y;
@property (nonatomic) CGFloat width;
@property (nonatomic) CGFloat height;
@property (nonatomic) CGSize size;
@property (nonatomic) CGPoint origin;
@property (nonatomic) CGFloat centerX;
@property (nonatomic) CGFloat centerY;
@property (nonatomic, readonly) CGFloat middleX;
@property (nonatomic, readonly) CGFloat middleY;
@property (nonatomic, readonly) CGPoint centerInSelf;
@property (nonatomic, readonly) CGFloat bottom;
@property (nonatomic, readonly) CGFloat left;
@property (nonatomic, readonly) CGFloat right;
@property (nonatomic, readonly) CGFloat top;
@property (nonatomic, readonly) NSString *frameString;
//! 扩展传值
@property (nonatomic, strong) NSDictionary *extra;
/**
截取当前的view为图片
@return 图片
*/
- (UIImage *)yk_captureImage;
/* allow this name to get xib(UINib) instance 获取同名的xibview
*
* @return view
*/
+ (__kindof UIView *)yk_xibView;
/**
* get this view's viewController (获取当前view所在的控制器)
*
* @return UIViewController
*/
- (__kindof UIViewController *)yk_viewController;
/**
* get this view's navigationController (获取当前view所在的导航控制器)
*
* @return UINavigationController
*/
- (__kindof UINavigationController *)yk_navigationController;
//! 设置渐变色
- (void)setGradualChangeColor:(UIColor *)startColor endColor:(UIColor *)endColor startPoint:(CGPoint)startPoint endPoint:(CGPoint)endPoint;
/**
Altert
@param configure 配置
@param sure 确定
@param cancle 取消
*/
- (void)yk_showAltertConfigure:(void (^)(YKAlterConfig *config))configure sure:(void (^)(NSString *text))sure cancle:(void (^)(void))cancle;
@end
@interface UIView (RoundingCorners)
/**
边框颜色
*/
@property (nonatomic, strong) IBInspectable UIColor *borderColor;
/**
边框宽度
*/
@property (nonatomic, assign) IBInspectable CGFloat borderWidth;
/**
圆角
*/
@property (nonatomic, assign) IBInspectable CGFloat cornerRadius;
/**
单个圆角设置
*/
@property (nonatomic, assign) UIRectCorner roundCorners;
/**
指定圆角 + 尺寸
@param roundCorners 圆角 多个用 |
@param size 圆角尺寸
*/
- (void)setRoundCorners:(UIRectCorner)roundCorners cornerRadii:(CGSize)size;
/**
添加阴影
*/
- (void)createShadow;
- (void)setCircleWithRoundingCorners:(UIRectCorner)corners cornerRadius:(CGFloat)cornerRadius fillColor:(UIColor *)fillColor;
//! 设置圆角图片
- (void)cornerRadiusWithImage:(UIImage *)image cornerRadius:(CGFloat)cornerRadius rectCornerType:(UIRectCorner)rectCornerType;
@end
NS_ASSUME_NONNULL_END
|
Smoking outlawed in public housing. Here's what that means for SIoux Falls tenants.
Public housing is going smoke-free today. Here's what that means for Sioux Falls tenants.
No more smoking in public housing. Federal authorities outlawed the practice and the new rule starts today.
Federal authorities implemented a no-smoking rule Monday for all residents of public housing.
For a city that relies less on HUD-funded public housing than most communities, it still means major changes.
Residents of public housing will have to follow the new rule.
Public housing makes up a small portion of the affordable housing options in Sioux Falls. The city has 25 single-family homes designated as public housing, owned, operated and rented to families by the Sioux Falls Housing and Redevelopment Commission. Those units were home to 87 people in 2015, all who would be subject to the smoke-free policy.
In South Dakota combined, there are 2,430 tenants living in public housing. Roughly one-third of public housing residents smoke, nearly double the rate of the general population, according to the American Lung Association.
WHERE DOES THE RULE COME FROM?
The rule comes from internal policy making by HUD, which oversees the federal government's affordable housing programs and funds local agencies such as Sioux Falls Housing.
“They’ve been looking at it for a number of years," said Robert Moffitt, a spokesman for the association's regional office.
WHY BAN SMOKING IN PUBLIC HOUSING?
Beyond the obvious health risks of smoking, federal authorities and the association hope to limit the risks of second-hand smoke to non-smokers.
"There's quite a few people living in public housing that are quite sensitive to exposure to second-hand smoke," Moffitt said. "Young children with asthma, people with pulmonary disease, people with heart disease." |
//================================================
// Variables
//=================================================================================================
#include <stdio.h>
#include <glib.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include "../include/logging.h"
//=================================================================================================
// Functions
//=================================================================================================
void _good_assert(const char *cond_str, bool cond) {
if (G_UNLIKELY(!(cond))) {
GPIO_LOG_CRITICAL("ASSERTION_FAIL", 1, PMLOGKS("CAUSE", cond_str), "");
*(int *)0x00 = 0;
}
}
PmLogContext getServiceContext() {
static PmLogContext logContext = 0;
if (0 == logContext) {
PmLogGetContext("gpioservice", &logContext);
}
return logContext;
}
// For output to kernel message.
#define LOG_BUF_MAX 512
void logKmsg(const char *fmt, ...) {
char buf[LOG_BUF_MAX];
va_list ap;
int log_fd;
log_fd = open("/dev/kmsg", O_WRONLY);
if (log_fd < 0) return;
va_start(ap, fmt);
vsnprintf(buf, LOG_BUF_MAX, fmt, ap);
buf[LOG_BUF_MAX - 1] = 0;
va_end(ap);
write(log_fd, buf, strlen(buf));
close(log_fd);
}
|
<reponame>rsingh-flx/jfixture<gh_stars>0
package com.flextrade.jfixture.jodatime.component;
import com.flextrade.jfixture.JFixture;
import com.flextrade.jfixture.SpecimenSupplier;
import com.flextrade.jfixture.jodatime.customisation.JodaTimeCustomisation;
import org.hamcrest.Matchers;
import org.joda.time.DateTimeFieldType;
import org.joda.time.Duration;
import org.joda.time.MutablePeriod;
import org.joda.time.ReadWritableDateTime;
import org.joda.time.ReadWritableInstant;
import org.joda.time.ReadWritableInterval;
import org.joda.time.ReadWritablePeriod;
import org.joda.time.ReadableDateTime;
import org.joda.time.ReadableDuration;
import org.joda.time.ReadableInstant;
import org.joda.time.ReadableInterval;
import org.joda.time.ReadablePartial;
import org.joda.time.ReadablePeriod;
import org.junit.Before;
import org.junit.Test;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertThat;
public class TestAllInterfaceDataTypesAreSupported {
private final JFixture fixture = new JFixture();
private final DateFormat formatter = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
private Date date;
private Date secondDate;
@Before
public void initialise() throws ParseException {
// The Joda Time customisation should derive all values from the JDK Date class
// which means respecting any overrides for Date. We can use this to our advantage
// in these tests by fixing two dates and asserting correctness of Joda objects
// as a function of these dates rather than rely on the default 'random' implementation
customiseToReturnFixedDates();
fixture.customise(new JodaTimeCustomisation());
}
@Test
public void creates_instance_of_ReadableDateTime() throws ParseException {
ReadableDateTime dateTime = fixture.create(ReadableDateTime.class);
assertThat(dateTime, notNullValue());
assertThat(new Date(dateTime.getMillis()), is(date));
}
@Test
public void creates_instance_of_ReadWritableDateTime() throws ParseException {
ReadWritableDateTime dateTime = fixture.create(ReadWritableDateTime.class);
assertThat(dateTime, notNullValue());
assertThat(new Date(dateTime.getMillis()), is(date));
}
@Test
public void creates_instance_of_ReadableDuration() throws ParseException {
ReadableDuration duration = fixture.create(ReadableDuration.class);
assertThat(duration, notNullValue());
assertThat(duration, Matchers.<ReadableDuration>is(Duration.standardDays(365)));
}
@Test
public void creates_instance_of_ReadableInstant() throws ParseException {
ReadableInstant instant = fixture.create(ReadableInstant.class);
assertThat(instant, notNullValue());
assertThat(new Date(instant.getMillis()), is(date));
}
@Test
public void creates_instance_of_ReadWritableInstant() throws ParseException {
ReadWritableInstant instant = fixture.create(ReadWritableInstant.class);
assertThat(instant, notNullValue());
assertThat(new Date(instant.getMillis()), is(date));
}
@Test
public void creates_instance_of_ReadablePartial() throws ParseException {
ReadablePartial partial = fixture.create(ReadablePartial.class);
assertThat(partial, notNullValue());
assertThat(partial.get(DateTimeFieldType.year()), is(2001));
assertThat(partial.get(DateTimeFieldType.monthOfYear()), is(1));
assertThat(partial.get(DateTimeFieldType.dayOfMonth()), is(1));
assertThat(partial.get(DateTimeFieldType.hourOfDay()), is(12));
assertThat(partial.get(DateTimeFieldType.minuteOfHour()), is(34));
assertThat(partial.get(DateTimeFieldType.secondOfMinute()), is(56));
}
@Test
public void creates_instance_of_ReadableInterval() throws ParseException {
ReadableInterval interval = fixture.create(ReadableInterval.class);
assertThat(interval, notNullValue());
assertThat(interval.getStart().toDate(), is(date));
assertThat(interval.getEnd().toDate(), is(secondDate));
}
@Test
public void creates_instance_of_ReadWritableInterval() throws ParseException {
ReadWritableInterval interval = fixture.create(ReadWritableInterval.class);
assertThat(interval, notNullValue());
assertThat(interval.getStart().toDate(), is(date));
assertThat(interval.getEnd().toDate(), is(secondDate));
}
@Test
public void creates_instance_of_ReadablePeriod() throws ParseException {
ReadablePeriod period = fixture.create(ReadablePeriod.class);
assertThat(period, notNullValue());
assertThat(period, Matchers.<ReadablePeriod>is(new MutablePeriod(1,0,0,0,0,0,0,0))); // 1Yr
}
@Test
public void creates_instance_of_ReadWritablePeriod() throws ParseException {
ReadWritablePeriod period = fixture.create(ReadWritablePeriod.class);
assertThat(period, notNullValue());
assertThat(period, Matchers.<ReadablePeriod>is(new MutablePeriod(1,0,0,0,0,0,0,0))); // 1Yr
}
private void customiseToReturnFixedDates() throws ParseException {
date = formatter.parse("2001/01/01 12:34:56");
secondDate = formatter.parse("2002/01/01 12:34:56");
fixture.customise().lazyInstance(Date.class, new SpecimenSupplier<Date>() {
boolean isFirstCall = true;
@Override
public Date create() {
if (isFirstCall) {
isFirstCall = false;
return date;
}
return secondDate;
}
});
}
}
|
Ontarians will soon be able to air their linen in public. Premier Dalton McGuinty is to announce today that clotheslines can no longer be banned in subdivisions or almost anywhere else in the province.
Premier Dalton McGuinty is set to veto bans on clotheslines, which are common in many suburbs and newly-built neighbourhoods. ( SHUTTERSTOCK ) Hanging laundry on an outdoor clothes line cuts electricity costs and the heat of a dryer. ( RON ALBERTSON / THE HAMILTON SPECTATOR FILE PHOTO )
In a bid to curb the use of energy-sucking dryers, the new regulation will overrule neighbourhood covenants – part of the mortgage agreement between many developers and homebuyers – that outlaw clotheslines because they're considered unsightly. The regulation, to take effect today, will not only prohibit new bans but also wipe out most that already exist, a provision that angered the province's building industry. It will apply to free-standing and semi-detached homes and most row houses.
Article Continued Below
Highrise condos and apartments won't be affected for now. The province wants more consultation about them to deal with safety and other concerns. "The premier wanted to move quickly on this because it's a simple way to help families save money and help to save the environment," said a government official. "We're always looking for opportunities to help people find ways to conserve energy and fight climate change." The announcement will come as Toronto Hydro launches a giveaway of 75,000 clotheslines through four retail chains. Each Saturday and Sunday from April 26 to May 11, retractable lines for indoor or outdoor use, worth $13 to $15 each, will be handed to the first 500 shoppers at some Home Depot, Wal-Mart, Costco and Zellers locations. Details can be found at torontohydro.com. Dryers account for 5 to 6 per cent of Ontario's household electricity demand. An average machine consumes about 900 kilowatt-hours of energy each year and results in the discharge of up to 840 kilograms of air pollution and greenhouse gases. Each dryer adds about $90 a year to a household's electricity bill.
The new regulation comes under the province's Energy Conservation Leadership Act, which empowers the government to remove barriers to conservation, including covenants and municipal bylaws. McGuinty's move, following a 60-day consultation period, was urged by many elected municipal officials and environment groups.
Article Continued Below
Ontario's chief conservation officer, Peter Love, recommended overriding the bans last November. Across North America, the issue has spawned an advocacy movement known as "Right to Dry." But the group that represents the province's housing industry said the regulation should not be retroactive. All new developments could be clothesline-friendly but existing bans should not be overturned, said Victor Fiume, past-president of the Ontario Home Builders Association and general manager of Oshawa-based Durham Homes. "It's taking away a right from people who knew (a ban) was in place and purchased a home because of that," Fiume said. Clothesline bans are imposed in 20 to 30 per cent of the province's subdivisions, and are part of the legally binding contract between builder and buyer, he said. "Is this what government should be doing – overturning contracts signed by parties voluntarily? "It's a slippery slope to arbitrarily remove a covenant between builders and buyers." Instead of an "arbitrary" government move, it would be better if opponents of the bans sought a court ruling against them. But that has never happened, he said. In any case, Fiume said, the new regulation is a non-issue because few people will use clotheslines in any case. "With today's lifestyle, no one has the time or inclination to hang their clothes outside to dry." The new regulation is just a first step, said Chris Winter of the Conservation Council of Ontario. "The overwhelming majority of people say it's a good move and are solidly behind it. That doesn't mean the overwhelming majority will dry every piece of clothing on a clothesline. But this is a start."
Read more about: |
/*
* Author: <NAME>
* Copyright (c) 2018, Private Octopus, Inc.
* All rights reserved.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Private Octopus, Inc. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <string.h>
#include <stdio.h>
#include "CsvHelper.h"
#include "M1Data.h"
#include "ithiutil.h"
#ifndef UNREFERENCED_PARAMETER
#define UNREFERENCED_PARAMETER(x) (void)(x)
#endif
M1Data::M1Data()
:
totalDomain(0),
total1stN(0),
nbRegistrars50pc(0),
nbRegistrars90pc(0),
nb_registrars(0)
{
}
M1Data::~M1Data()
{
for (size_t i = 0; i < category_indices.size(); i++) {
if (category_indices[i] != NULL) {
delete[] category_indices[i];
category_indices[i] = NULL;
}
}
}
bool M1Data::Load(char const * monthly_compliance_file_name)
{
FILE* F;
M1DataLine_t line;
M1RegSummary_t reg_summary;
char buffer[512];
char category[512];
char month[64];
double half_sum = 0;
double ninety_mark = 0;
double current_sum = 0;
size_t current_reg = 0;
bool skip_month_column = false;
bool ret;
F = ithi_file_open(monthly_compliance_file_name, "r");
ret = (F != NULL);
/*
MAYBE parse file name for date.
if (M2Type == Unknown) {
parse_file_name(monthly_csv_file_name);
}
*/
dataset.clear();
firstNotice.clear();
while (ret && fgets(buffer, sizeof(buffer), F))
{
int start = 0;
memset(&line, 0, sizeof(M1DataLine_t));
start = CsvHelper::read_string(category, sizeof(category), start, buffer, sizeof(buffer));
start = CsvHelper::read_string(line.name, sizeof(line.name), start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.RegistrarId, start, buffer, sizeof(buffer));
start = CsvHelper::read_string(line.complaint, sizeof(line.complaint), start, buffer, sizeof(buffer));
if (skip_month_column) {
start = CsvHelper::read_string(month, sizeof(month), start, buffer, sizeof(buffer));
}
start = CsvHelper::read_number(&line.Domains, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.Complaints, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nb1stNotices, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nb3rdNotices, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nbBreaches, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nbSuspensions, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nbTerminations, start, buffer, sizeof(buffer));
start = CsvHelper::read_number(&line.nbNonRenewals, start, buffer, sizeof(buffer));
if (line.RegistrarId > 0) {
/* Some files have an extra month column before the domain count */
if (!skip_month_column && line.Domains == 0) {
skip_month_column = true;
line.Domains = line.Complaints;
line.nb1stNotices = line.nb3rdNotices;
line.nb3rdNotices = line.nbBreaches;
line.nbBreaches = line.nbSuspensions;
line.nbSuspensions = line.nbTerminations;
line.nbTerminations = line.nbNonRenewals;
CsvHelper::read_number(&line.nbNonRenewals, start, buffer, sizeof(buffer));
}
/* allocate data and add to vector */
dataset.push_back(line);
}
}
if (F != NULL)
{
fclose(F);
}
/* Sort by registrar ID */
std::sort(dataset.begin(), dataset.end(), M1Data::RegistryIdIsSmaller);
/* Compile the summaries, one line per registry */
firstNotice.reserve(dataset.size());
memset(®_summary, 0, sizeof(reg_summary));
for (size_t i = 0; i < dataset.size(); i++) {
if (i == 0 || dataset[i].RegistrarId != reg_summary.RegistrarId){
if (i != 0) {
/* Push the old value */
firstNotice.push_back(reg_summary);
}
/* Push the new values */
reg_summary.RegistrarId = dataset[i].RegistrarId;
reg_summary.Domains = dataset[i].Domains;
reg_summary.nb1stNotices = dataset[i].nb1stNotices;
totalDomain += dataset[i].Domains;
total1stN += dataset[i].nb1stNotices;
} else {
/* Count the notices */
reg_summary.nb1stNotices += dataset[i].nb1stNotices;
total1stN += dataset[i].nb1stNotices;
}
}
if (reg_summary.Domains > 0) {
/* Don't forget the last line */
firstNotice.push_back(reg_summary);
}
/* Sort by bigger to smaller number of notices */
std::sort(firstNotice.begin(), firstNotice.end(), M1Data::FirstNoticeIsBigger);
/* Compute the statiscal summaries */
half_sum = 0.5*(double)total1stN;
ninety_mark = 0.9*(double)total1stN;
current_sum = 0;
while (current_sum < half_sum && current_reg < firstNotice.size()) {
current_sum += firstNotice[current_reg++].nb1stNotices;
}
nbRegistrars50pc = (int)current_reg;
while (current_sum < ninety_mark && current_reg < firstNotice.size()) {
current_sum += firstNotice[current_reg++].nb1stNotices;
}
nbRegistrars90pc = (int)current_reg;
nb_registrars = (uint32_t)firstNotice.size();
return ret;
}
int M1Data::GetCategoryIndex(char const * category)
{
int ret = -1;
for (size_t i = 0; i < category_indices.size(); i++) {
if (strcmp(category, category_indices[i]) == 0) {
ret = (int)i;
break;
}
}
if (ret == -1) {
size_t len = strlen(category);
char * x = new char[len + 1];
if (x != NULL) {
memcpy(x, category, len + 1);
ret = (int)category_indices.size();
category_indices.push_back((char const *)x);
}
}
return ret;
}
bool M1Data::ParseFileName(char const * monthly_compliance_file_name)
{
UNREFERENCED_PARAMETER(monthly_compliance_file_name);
return false;
}
bool M1Data::RegistryIdIsSmaller(M1DataLine_t x, M1DataLine_t y)
{
return (x.RegistrarId < y.RegistrarId);
}
bool M1Data::FirstNoticeIsBigger(M1RegSummary_t x, M1RegSummary_t y)
{
return (x.nb1stNotices > y.nb1stNotices);
}
ComputeM1::ComputeM1() :
ithi_m1(),
nb_registrars(0)
{
for (int i = 0; i < 3; i++) {
ithi_m1[1] = 0;
}
}
ComputeM1::~ComputeM1()
{
}
bool ComputeM1::Load(char const * single_file_name)
{
return m1Data.Load(single_file_name);
}
bool ComputeM1::Compute()
{
bool ret = false;
if (m1Data.totalDomain > 0) {
ithi_m1[0] = (1000000*(double)m1Data.total1stN) / ((double)m1Data.totalDomain);
ithi_m1[1] = (double)m1Data.nbRegistrars50pc;
ithi_m1[2] = (double)m1Data.nbRegistrars90pc;
nb_registrars = m1Data.nb_registrars;
ret = true;
}
return ret;
}
bool ComputeM1::Write(FILE * F_out, char const* date, char const* version)
{
bool ret = true;
ret &= fprintf(F_out, "M1.1,%s,%s, , %8f,\n", date, version, ithi_m1[0]) > 0;
ret &= fprintf(F_out, "M1.2,%s,%s, , %f,\n", date, version, ithi_m1[1]) > 0;
ret &= fprintf(F_out, "M1.3,%s,%s, , %f,\n", date, version, ithi_m1[2]) > 0;
ret &= fprintf(F_out, "M1.4,%s,%s, , %d,\n", date, version, nb_registrars) > 0;
return ret;
}
|
? foo(); /* extern */
void test(s32 arg0) {
if (arg0 == 0) {
foo();
}
}
|
System Design and Simulation Model of Coherent Optical DQPSK Modulation Technique for Long-Haul Communication Coherent communication technologies extensively is used for high speed optical communications because it is tolerant to many dispersions like polarization mode dispersion (PMD) tolerance, chromatic dispersion (CD) mitigation, and provide high spectral efficiency. Coherent detection technique has the nature of preserving the phase information of the received optical signal. In DQPSK (Differential Quadrature Phase-Shift-Keying) format modulation, each symbol transmits 2 bits so that the symbol rate is half the bit rate, with reduced complexity of the system compared to QPSK. The biggest advantage of this coherent optical modulation DQPSK compared to QPSK is that it requires no optical phase lock loop at the receiver as the information is encoded in the differential phase. However, the transmitter requires an extra precoder component to map information in the differential phase. In this paper, the digital coherent DQPSK working principles and typical system design of optical communications is been explained and the simulation experiment shows the feasibility to transmit 20-Gb/s DQPSK signals with channel bandwidth of 10 GHz over more than 100 km. With that spectral efficiency improved by 2 bps/Hz. Also, this paper discusses about detailed simulation set up and various results of 20Gbps coherent DQPSK generated through simulation software. |
WASHINGTON — Sens. Richard Durbin (D-Ill.) and Lindsey Graham (R-S.C.) introduced today the Development, Relief, and Education for Alien Minors (DREAM) Act.
Lorella Praeli, director of immigration policy and campaigns at the American Civil Liberties Union, said:
“In a year characterized by division and partisan politics, this renewed, bipartisan effort to support and protect our undocumented youth emphasizes their movement’s resilience, the extensive common-sense benefits of the DREAM Act, and how widespread public support is for our DREAMer neighbors, friends, and family members.
“Passing the DREAM Act to achieve permanent security for these young immigrants is long overdue, and we thank Senators Durbin and Graham, stalwart allies in the fight, for their leadership.
“However, there must be no confusion: the introduction of DREAM intensifies rather than lessens the need to preserve the Deferred Action for Childhood Arrivals (DACA) program. President Trump has told us that whether to preserve or rescind DACA is his decision to make. We urge him to follow his self-described ‘big heart’ and protect undocumented immigrant youth who contribute so much to the United States.
“Until legislation is passed that permanently guarantees the protection of DREAMers, DACA must remain in place. The 2017 DREAM Act, like DACA itself, is the product of many years of advocacy and education in the courts and in the streets: our movement will continue to fight and win.” |
from benchmark_evaluator.oracles_impl.oracle_abstract import Oracle
from benchmark_evaluator.configurations.connection_settings import Connection
import benchmark_evaluator.search.query_engine_impl as query_engine
import benchmark_evaluator.search.url_comparator as url_comparator
import time
class OracleSimulator(Oracle):
def __init__(self):
super(OracleSimulator, self).__init__()
self.conn = Connection()
def select_facet(self, query_results, already_selected_facets, max_candidate=5, **kwargs):
"""
Oracle strategy: first select top-K (k=max_candidate=5) most relevant facets based scores,
then find the facet which achieves the best relevance_rank for the expected tip.
:param query_results:
:param already_selected_facets:
:param max_candidate:
:param kwargs:
:return:
"""
start = time.time()
new_facet, final_results = None, None
ans_tip_docs = query_results.gold_ans_urls
print('Oracle -- Last DFS relevance_rank: {}'.
format(",".join([str(item) for item in query_results.obtained_ans_url_ranks])))
facets = Oracle.flatten_facets(
query_results=query_results, max_candidate=max_candidate, already_selected_facets=already_selected_facets)
if len(facets) > 0:
new_facets = list()
results_with_new_facets = dict()
for f in facets: # consider the top K most relevant facets based on facet scores
selected_facets = already_selected_facets.copy()
selected_facets.append(f[0])
tmp_results = query_engine.get_search_results_with_facets(
query_results.query, query_results.query_id, self.conn,
selected_facet_list=selected_facets)
if tmp_results is not None:
tip_ranks = url_comparator.get_ans_url_position_in_docs(tmp_results.documents, ans_tip_docs)
if tip_ranks:
tip_ranks = [r.predicted_rank for r in tip_ranks]
if query_results.obtained_ans_url_ranks and \
min(tip_ranks) < min([r.predicted_rank for r in query_results.obtained_ans_url_ranks]):
new_f = (f[0], tip_ranks)
new_facets.append(new_f)
results_with_new_facets[f[0]] = tmp_results
if new_facets:
new_facets.sort(key=lambda x: x[1], reverse=False)
print('oracle -- new Facet: ', new_facets[0][0])
print('Oracle -- tip relevance_rank: ', str(new_facets[0][1]))
new_facet, final_results = new_facets[0][0], results_with_new_facets[new_facets[0][0]]
print('select_optimal_facet_for_best_possible_result took %d seconds' % (time.time() - start))
return new_facet, final_results
|
<gh_stars>1-10
#include "inc.h"
|
def _get_provenance_hash(self, job: Job):
if job in self._hashes:
return self._hashes[job]
workflow = job.dag.workflow
h = hashlib.sha256()
if job.is_shell:
h.update(job.rule.shellcmd.encode())
elif job.is_script:
_, source, _, _ = script.get_source(
job.rule.script,
job.rule.workflow.sourcecache,
basedir=job.rule.basedir,
wildcards=job.wildcards,
params=job.params,
)
h.update(source.encode())
elif job.is_notebook:
_, source, _, _ = script.get_source(
job.rule.notebook,
job.rule.workflow.sourcecache,
basedir=job.rule.basedir,
wildcards=job.wildcards,
params=job.params,
)
h.update(source.encode())
elif job.is_wrapper:
_, source, _, _ = script.get_source(
wrapper.get_script(
job.rule.wrapper,
sourcecahce=job.rule.workflow.sourcecache,
prefix=workflow.wrapper_prefix,
),
job.rule.workflow.sourcecache,
basedir=job.rule.basedir,
wildcards=job.wildcards,
params=job.params,
)
h.update(source.encode())
for key, value in sorted(job.params._allitems()):
if key is not None:
h.update(key.encode())
try:
h.update(json.dumps(value, sort_keys=True).encode())
except TypeError as e:
raise WorkflowError(
"Rule {} cannot be cached, because params "
"are not JSON serializable. "
"Consider converting them into a suitable format "
"if you are sure that caching is necessary. "
"Otherwise, deactivate caching for this rule "
"by removing it from the --cache command line argument "
"or removing the cache: true directive from the rule itself.".format(
job.rule.name
),
e,
)
for file_hash in sorted(
hash_file(f)
for f in job.input
if not any(f in depfiles for depfiles in job.dag.dependencies[job].values())
):
h.update(file_hash.encode())
if workflow.use_conda and job.conda_env:
if workflow.use_singularity and job.conda_env.container_img_url:
h.update(job.conda_env.container_img_url.encode())
h.update(job.conda_env.content)
elif workflow.use_singularity and job.container_img_url:
h.update(job.container_img_url.encode())
for dep_hash in sorted(
self._get_provenance_hash(dep)
for dep in set(job.dag.dependencies[job].keys())
):
h.update(dep_hash.encode())
provenance_hash = h.hexdigest()
self._hashes[job] = provenance_hash
return provenance_hash |
/**
* A helper class which is able to create and fulfill a bean from a {@link Properties} object.
*
* @since 2013.10.19
*/
final class PropertiesParser
extends ConfigurationParser<InputStream>
{
@Override
public <T> T getBean(Class<T> theClass, InputStream input)
throws ConfigurationLoader.ConfigurationLoaderException
{
final T bean = createBean(theClass);
return setBean(theClass, input, bean);
}
@Override
public <T> T setBean(Class<T> theClass, InputStream input, T bean)
throws ConfigurationLoader.ConfigurationLoaderException
{
final Properties properties = new Properties();
try
{
properties.load(input);
}
catch (IOException exception)
{
throw new ConfigurationLoader.ConfigurationLoaderException(exception);
}
for (Entry<Object, Object> entry : properties.entrySet())
{
final String propertyName = (String) entry.getKey();
final String rawPropertyValue = (String) entry.getValue();
setField(theClass, bean, propertyName, rawPropertyValue);
}
return bean;
}
} |
Genome Sequence of Bacillus subtilis subsp. spizizenii gtP20b, Isolated from the Indian Ocean ABSTRACT Bacillus subtilis is an aerobic spore-forming Gram-positive bacterium that is a model organism and of great industrial significance as the source of diverse novel functional molecules. Here we present, to our knowledge, the first genome sequence of Bacillus subtilis strain gtP20b isolated from the marine environment. A subset of candidate genes and gene clusters were identified, which are potentially involved in production of diverse functional molecules, like novel ribosomal and nonribosomal antimicrobial peptides. The genome sequence described in this paper is due to its high strain specificity of great importance for basic as well as applied researches on marine organisms. |
Dynamically induced scalar quark confinement: A link between chiral symmetry breaking and confinement Employing functional approaches the infrared behaviour of Landau gauge QCD vertex functions is investigated. Results for the ghost-gluon, three-gluon and quark-gluon vertex functions are presented. As can be analytically shown a linear rising potential between heavy quarks is generated by infrared singularities in the dressed quark-gluon vertex. The selfconsistent mechanism that generates these singularities implies the existence of scalar Dirac amplitudes of the full vertex and the quark propagator. These amplitudes can only be present when chiral symmetry is broken, either explicitly or dynamically. The corresponding relations thus constitute a novel mechanism that directly links chiral symmetry breaking with confinement. Introduction Quark confinement and dynamical chiral symmetry breaking are the two most prominent phenomena of infrared QCD. Recent Monte-Carlo lattice calculations made clear that there is, at least for quarks in the fundamental representation, a close and yet not fully understood relation between these two properties of QCD. E.g. the spectral properties of the Dirac operator reflect both, confinement and chiral symmetry breaking. It is the central aim of this talk to shed light onto this issue from the point of view of QCD Green functions in the Landau gauge. In this and related functional approaches dynamical chiral symmetry breaking finds a direct explanation, the main challenge for such non-perturbative methods is posed by the properties of the linearly rising static quark-antiquark potential. There have been many quite different attempts to relate the properties of this potential to properties of QCD, and thus explain quark confinement. In ref. some of these pictures have been briefly reviewed. These explanations for confinement are seemingly different but there are surprising relations between them which are not yet understood. Given the current status one has to note that these theories are definitely not mutually exclusive but simply reveal only different aspects of the confinement phenomenon. And into one special facet, the above mentioned relation to broken chiral symmetry, there is novel insight from the Landau gauge Greens functions approach. Let us start by looking at the Dyson-Schwinger equation for the ghost-gluon vertex function as depicted in fig. 1. In the Landau gauge the gluon propagator is transverse, and therefore one can employ the relation Infrared Exponents of Gluons and Ghosts to conclude that the ghost-gluon vertex stays finite when the outgoing ghost momentum vanishes, i.e. when q → 0. This argument is valid to all orders in perturbation theory, a truely nonperturbative justification of the infrared finiteness of this vertex has been given in refs.. Using this property of the ghost-gluon vertex the Dyson-Schwinger equation for the ghost propagator, see fig. 2, can be analysed. The only unknowns in the deep infrared are the gluon and the ghost propagators: In Landau gauge these (Euclidean) propagators are parametrized by two invariant functions, Z(k 2 ) and G(k 2 ), respectively. As solutions of renormalized equations, these functions depend also on the renormalization scale. Furthermore, assuming that QCD Green functions can be expanded in asymptotic series, the integral in the ghost Dyson-Schwinger equation can be split up in three pieces: an infrared integral, an ultraviolet integral, and an expression for the ghost wave function renormalization. Hereby it is the resulting equation for the latter quantity which allows one to extract definite information without using any truncation or ansatz. One obtains that the infrared behaviour of the gluon and ghost propagators is given by power laws, and that the exponents are uniquely related such that the gluon exponent is -2 times the ghost exponent. As we will see later on this implies an infrared fixed point for the corresponding running coupling. The signs of the exponents are such that the gluon propagator is infrared suppressed as compared to the one for a free particle, the ghost propagator is infrared enhanced. Given the infrared power laws, that the Yang-Mills propagators obey, one can infer the infrared behaviour of higher n-point functions. To this end the n-point Dyson-Schwinger equations have been studied in a skeleton expansion, i.e. a loop expansion using dressed propagators and vertices. Furthermore, an asymptotic expansion has been applied to all primitively divergent Green functions. As an example consider the Dyson-Schwinger equation for the 3-gluon vertex which is diagrammatically represented in fig. 3. Its skeleton expansion, see fig. 4, can be constructed via the following general infrared behaviour for one-particle irreducible Green functions with 2n external ghost legs and m external gluon legs can be derived : where is one yet undetermined parameter, and d is the space-time dimension. Exploiting Dyson-Schwinger equations and Exact Renormalization Group Equations one can show that this infrared solution is unique. Infrared fixed point of the Yang-Mills running coupling The infrared behaviour (2.3) especially includes and therefore the running couplings related to these vertex functions possess an infrared fixed point: The infrared value of the coupling related to the ghost-gluon vertex can be computed : This yields gh−gl = 2.972 for N c = 3 and = (93 − √ 1201)/98 ≃ 0.595353, which is the value obtained with a bare ghost-gluon vertex. Positivity violation for the gluon propagator Positivity violation of the propagator of transverse gluons has been for a long time a conjecture which has been supported recently, see e.g. and references therein. The basic feature is hereby the infrared suppression of transverse gluons caused by the infrared enhancement of ghosts. As this behaviour clearly signals the confinement of tranverse gluons it is certainly worth to have a closer look at the underlying analytic structure of the gluon propagator. Note that the infrared exponent is an irrational number. Given the infrared power laws this implies already that the gluon propagator possesses a cut on the negative real p 2 axis. It is possible to fit the solution for the gluon propagator quite accurately without introducing further singularities in the complex p 2 plane : w is a normalization parameter, and = (−13N c + 4N f )/(22N c − 4N f ) is the one-loop value for the anomalous dimension of the gluon propagator. The running coupling is expressed as : It is important to note that the gluon propagator (2.7) possesses a form such that Wick rotation is possible! Dynamically induced scalar quark confinement The above presented results provide an explanation how gluon confinement works in a covariant gauge, but due to the infrared suppression of the gluon propagator quark confinement seems even more mysterious than ever. To proceed as in the above described studies the Dyson-Schwinger equation for the quark propagator is analyzed with the result that the structure of the quark propagator depends crucially on the quark-gluon vertex. Therefore a detailed study of this three-point function, and especially its infrared behaviour, is mandatory. Its Dyson-Schwinger equation is diagrammatically depicted in fig. 6, its skeleton expansion in fig. 7. But there is a = + + + + drastic difference of the quarks as compared to Yang-Mills fields: They possess a current mass. Even if this were not the case one expects dynamical chiral symmetry breaking and thus dynamical mass generation to occur. To generalize the infrared analysis of the Yang-Mills theory to full QCD one concentrates first on the quark sector of quenched QCD and chooses the masses of the valence quarks to be large, i.e. m > QCD. The remaining scales below QCD are those of the external momenta of the propagators and vertex functions. Then the Dyson-Schwinger equations can be used to determine the selfconsistent solutions in terms of powers of the small external momentum scale p 2 ≪ 2 QCD. The equations which have to be considered in addition to the ones of Yang-Mills theory are the one for the quark propagator and the quark-gluon vertex. The full quark-gluon vertex can consist of up to twelve linearly independent Dirac tensors. Some of those would vanish if chiral symmetry would be realized in the Wigner-Weyl mode: These tensor structures can be non-vanishing either if chiral symmetry is explicitely broken by current masses and/or chiral symmetry is realized in the Nambu-Goldstone mode (i.e. spontaneously broken). From a solution of the Dyson-Schwinger equations we infer that these "Dirac-scalar" structures are, in the chiral limit, generated non-perturbatively together with the dynamical quark mass function in a self-consistent fashion: Dynamical chiral symmetry breaking reflects itself not only in the propagator but also in the quark-gluon vertex function. From such an infrared analysis one obtains an infrared divergent solution for the quark-gluon vertex such that Dirac vector and "scalar" components of this vertex are infrared divergent with exponent − − 1 2. A numerical solution of a truncated set of Dyson-Schwinger equations confirms this infrared behavior. The driving pieces of this solution are the scalar Dirac amplitudes of the quark-gluon vertex and the scalar part of the quark propagator. Both pieces are only present when chiral symmetry is broken, either explicitely or dynamically. For the coupling related to the quark-gluon vertex we obtain Note that the coupling (3.1) is singular in the infrared contrary to the couplings from the Yang-Mills vertices. In a next step the anomalous infrared exponent of the four-quark function is determined. Note that the static quark potential can be obtained from this four-quark one-particle irreducible Greens function, which, including the canonical dimensions, behaves like (p 2 ) −2 for p 2 → 0. Therefore employing the well-known relation for a function F ∝ (p 2 ) −2 one obtains for the static quark-antiquark potential V (r). We conclude at this point that, given the infrared divergence of the quark-gluon vertex as found in the solution of the coupled system of Dyson-Schwinger equations, the vertex overcompensates the infared suppression of the gluon propagator, and one therefore obtains a linear rising potential. In addition, this potential is dynamically induced and has a strong scalar component. To elucidate the here found relation between chiral symmetry breaking and quark confinement we keep chiral symmetry artificially in the Wigner-Weyl mode, i.e. in the chiral limit we force the quark mass term as well as the "scalar" terms in the quark-gluon vertex to be zero. We then find that the resulting running coupling from the quark-gluon vertex is no longer diverging but goes to a fixed point in the infrared similar to the couplings from the Yang-Mills vertices. Correspondingly, one obtains a 1/r behaviour of the static quark potential. The "forced" restoration of chiral symmetry is therefore directly linked with the disappearance of quark confinement. The infared properties of the quark-gluon vertex in the "unforced" solution thus constitute a novel mechanism that directly links chiral symmetry breaking with confinement. Summary In this talk we have reported on results of functional approaches to infrared QCD in the Landau gauge. We have elucidated the mechanism for gluon confinement: Positivity of transverse gluons is violated. Furthermore, in the Yang-Mills sector the strong running coupling is infrared finite whereas the running coupling from the quark-gluon vertex is infrared divergent. Chiral symmetry is dynamically broken, and this takes place in the quark propagator and the quark-gluon vertex. We have provided clear evidence that static quark confinement in the Landau gauge is due to the infrared divergence of the quark-gluon vertex. In the infrared this vertex has strong scalar components which induce a relation between confinement and broken chiral symmetry. |
Rumours of Sidharth Malhotra and Kiara Advani’s supposed romance have faded out but well, latest reports suggest that their film together, a biopic on Kargil martyr Vikram Batra is finally ready to go on floors. It is speculated that Sid’s film will start in April as the extensive pre-production work is finally done. First, a few portions including all the romantic scenes between Sid and Kiara will be shot in Chandigarh and later, the unit will move to Kashmir to shoot key war scenes. The movie is backed by Karan Johar’s Dharma Productions. Shabbir Boxwala is associated with the project. The reason this movie was delayed was because the makers wanted to get done with the pre-production work beforehand and since it is now sorted, the makers will start shoot soon.
Sidharth will essay a double role in this movie. Vikram and his twin brother Vishal both will be played by Sid. It sure is a challenging role for him as we have not seen Sid in a double role before this. The movie is directed by Vishnu Vardhan. Apart from an inspiring story of a war hero, the movie will have Sidharth and Kiara’s fresh pairing which has garnered a lot of interest.
Vikram Batra has been posthumously awarded Param Vir Chakra for his bravery in Kargil. Sid is not new to the war movie genre as he has just done Aiyaary with Manoj Bajpai and Rakul Preet. |
Convulsions and Ritalin? I have been following a 10-year-old boy with school problems related to his hyperactivity and short attention span. After an essentially normal neurological and psychological evaluation, he began taking 5 mg/day of Ritalin. This dose was increased weekly by 5 mg. When a dose of 15 mg/day was reached, he had a grand mal seizure. An electroencephalogram done one week later was normal. A neurologist of my acquaintance recalls a similar association in about four or five other patients out of many being followed by her who have been put on Ritalin. |
<reponame>ConnexonSystems/dockex<filename>core/experiment/helpers/connect_ftp_clients.py
import time
from ftplib import FTP
def connect_ftp_clients(ftp_server_dicts, local_ip_address):
ftp_clients = []
for ftp_server_dict in ftp_server_dicts:
ftp_client = connect_ftp_client(ftp_server_dict, local_ip_address)
if ftp_client:
ftp_clients.append(ftp_client)
return ftp_clients
def connect_ftp_client(ftp_server_dict, local_ip_address):
keep_trying = True
while keep_trying:
try:
if ftp_server_dict["ip_address"] == local_ip_address:
ftp_client = None
# ftp_client = FTP('')
# ftp_clients.append(ftp_client)
# print('DEBUG')
pass
else:
print("CONNECTING TO: " + str(ftp_server_dict))
ftp_client = FTP("")
# TODO: maybe this timeout is getting hit during the file transfer
# TODO: problem was that we have problems connecting if we don't have this value set I think
# TODO: could try increasing it, or removing it (which sets to global max value) and see if it breaks again like with DemoJob
# ftp_client.connect(ftp_server_dict['ip_address'], ftp_server_dict['tmp_dockex_ftpd_port'], timeout=20)
ftp_client.connect(
ftp_server_dict["ip_address"],
ftp_server_dict["tmp_dockex_ftpd_port"],
timeout=120,
)
print("LOGGING IN")
ftp_client.login("dockex", ftp_server_dict["tmp_dockex_ftpd_password"])
print("LOGGED IN")
keep_trying = False
except Exception as e:
print("ERROR CONNECTING TO FTP SERVER:")
print(ftp_server_dict)
print(e)
time.sleep(1.0)
return ftp_client
|
N = int(input())
a_list = list(map(int, input().split()))
b = [0 for i in range(N)]
for i in range(N)[::-1]:
s = N // (i+1)
a = a_list[i]
if s > 1:
m_a = sum([b[(i+1)*k-1] for k in range(2, s+1)]) % 2
a = a ^ m_a
b[i] = a
print(sum(b))
res = ""
for i in range(len(b)):
if b[i] > 0:
res += str(i+1) + " "
if len(res) > 0:
print(res)
|
Comparative Metabolomics of Early Development of the Parasitic Plants Phelipanche aegyptiaca and Triphysaria versicolor Parasitic weeds of the family Orobanchaceae attach to the roots of host plants via haustoria capable of drawing nutrients from host vascular tissue. The connection of the haustorium to the host marks a shift in parasite metabolism from autotrophy to at least partial heterotrophy, depending on the level of parasite dependence. Species within the family Orobanchaceae span the spectrum of host nutrient dependency, yet the diversity of parasitic plant metabolism remains poorly understood, particularly during the key metabolic shift surrounding haustorial attachment. Comparative profiling of major metabolites in the obligate holoparasite Phelipanche aegyptiaca and the facultative hemiparasite Triphysaria versicolor before and after attachment to the hosts revealed several metabolic shifts implicating remodeling of energy and amino acid metabolism. After attachment, both parasites showed metabolite profiles that were different from their respective hosts. In P. aegyptiaca, prominent changes in metabolite profiles were also associated with transitioning between different tissue types before and after attachment, with aspartate levels increasing significantly after the attachment. Based on the results from 15N labeling experiments, asparagine and/or aspartate-rich proteins were enriched in host-derived nitrogen in T. versicolor. These results point to the importance of aspartate and/or asparagine in the early stages of attachment in these plant parasites and provide a rationale for targeting aspartate-family amino acid biosynthesis for disrupting the growth of parasitic weeds. Introduction A parasitic plant is a plant that derives some or all of its nutrients from another living plant through specialized feeding structures called haustoria. Parasitic plants are diverse, and vary in the extent to which they depend on the nutrients of their host. Hemiparasites retain the ability to photosynthesize, whereas holoparasites lack this ability and depend on their hosts for all nutrients. Facultative parasites are able to complete their lifecycle without a host, if necessary, while obligate parasites are absolutely reliant on a host. All Orobanchaceae species are parasitic on host plant roots, but species differing in host dependence may use different mechanisms to obtain resources. For example, holoparasites must acquire both carbon and nitrogen from their hosts, so these species make connections to both host xylem and phloem and are primarily phloem-feeding. In contrast, hemiparasites are primarily xylem-feeding and mainly take nitrogen from the host plant. Studying parasitic plant metabolism has been one approach for identifying processes that may be targeted for disruption to improve crop resistance to parasitism. Several species in the family Orobanchaceae are important weeds, and in particular, members of the genera Phelipanche, Orobanche, and Striga cause major agricultural damage. Weedy Orobanchaceae tend to thrive in warm, arid climates and grow primarily in Africa, the Mediterranean, and the Middle East. It is estimated that 4%-5% of the world's arable land is threatened in capacity for crop production by parasitic Orobanchaceae and these parasites can cause up to 100% loss of crop yield. These parasites are also prolific producers of small seeds; millions of new seeds per hectare can be added to the seed bank each year in fields where susceptible crops are grown. These parasitic plants are more challenging to control than non-parasitic weeds because they cause crop damage while they are still underground, where they are not accessible to mechanical control measures. Chemical control is also challenging because herbicides cannot selectively kill the parasite without harming the crop, and the non-photosynthetic parasite species are not susceptible to herbicides that disrupt chlorophyll production or photosynthesis. The fact that parasitic plants are metabolically dependent on their hosts raises the possibility that reliance on host metabolism is a weakness that may be exploited for parasitic weed control. For parasite species that acquire carbon and nitrogen in organic forms, parasitism would seem to relieve them of the metabolic burden of incorporating carbon and nitrogen from inorganic materials. In Orobanchaceae species, a number of ammonium-assimilation-related enzymes have low activity, such as nitrate reductase (NR), glutamine synthetase (GS), and aspartate amino transferase (AAT). In addition, some members of these species are missing GS2, one of the two copies of GS that are normally found in higher plants. This may be due to reduced selection pressure, given the role of GS2 in re-assimilating ammonia produced by photorespiration. Free-living plants typically have equal or higher GS2 than GS1 activities. Within Orobanchaceae, the ratio of activities of the two isozymes depends loosely on the level of parasitism of the parasites studied: Obligate parasites primarily have GS1 activity-with holoparasites generally lacking GS2 activity and hemiparasites having low levels of GS2 activity relative to GS1. A corollary to the hypothesis that parasitic plants are deficient in aspects of their own metabolism is that parasitic plants would then adopt major aspects of the host metabolism because they would acquire host metabolites. However, recent reports contribute to a consensus that parasitic plants are largely self-regulating in metabolism, showing metabolite profiles that are distinct from their hosts, likely to support their parasitic lifestyles. A number of studies have compared metabolite profiles of the host root to Orobanchaceae holoparasites. Limitations of these studies were in focusing on a single developmental stage and using obligate parasites with well-established attachments, thus ignoring the role of parasite development and generally neglecting facultative parasites. Many questions remain unanswered regarding the independence of parasite and host metabolisms in the context of parasite development during the transition from free-living to parasitic life stages. More attention should also be given to understanding differences between parasite species and their specific host interactions. The objective of this research was to fill these knowledge gaps by characterizing parasitic plant metabolism across key developmental stages in their life cycle and by comparing related species that contrast in level of host dependence. For the comparison of developmental stages, we focused on the transition from free-living to host-dependent stages of their life cycles. The free-living stage occurs in early development when the parasites must grow and develop either autotrophically (for facultative parasites) or as seedlings using only seed-storage reserves (for holoparasites). The process of host attachment is therefore pivotal in that this transition marks the switch from independent metabolism to host-fed metabolism and comprises the essence of parasitism. For evolutionary comparisons, we used the facultative hemiparasite Triphysaria versicolor and the obligate holoparasite Phelipanche aegyptiaca, both members of the family Orobanchaceae, the only parasitic plant family that includes species that span all levels of host dependence. Our goal was to determine whether these species would show evidence of shared metabolic processes that could indicate a cross-cutting strategy for parasite metabolism. Although we used different hosts for each parasite species (justified in more detail in ), we were able to contrast the different parasite species with their hosts and with each other to gain insight into the extent to which parasite metabolism is self-regulating. Our data reveal the unique metabolic makeup of two representative Orobanchaceae species that differ in parasitic dependency and clarify the differences in feeding strategies during developmental transitions in these parasites. We show that nitrogen assimilation dynamics differ among the parasites and suggest that multiple metabolic mechanisms exist to enable successful parasitism. Parasitic Plants and Hosts Have Distinct Metabolite Profiles Metabolite profiles were analyzed for two parasitic plant species and their associated hosts ( Figure 1). The parasites differ in levels of host dependence and across life stages that span from host-independent to host-dependent stages. Steady-state levels of 28 metabolites were quantified in 16 sample types from parasites and their hosts. Four to five biological replicates of pooled parasite samples were analyzed for each sample type. When all stages of the two parasite and two host species were compared on a combined basis, each species demonstrated a distinctive metabolite profile as determined by principal component analysis (PCA) ( Figure 2) and analysis of variance (ANOVA) ( Figure S1, Table S1). Thus, the two parasite species P. aegyptiaca and T. versicolor differed from each other in metabolite profiles, as did the parasite species and their respective hosts. The first three principal components (PCs) together accounted for 65% of the variance among samples. species that span all levels of host dependence. Our goal was to determine whether these species would show evidence of shared metabolic processes that could indicate a cross-cutting strategy for parasite metabolism. Although we used different hosts for each parasite species (justified in more detail in ), we were able to contrast the different parasite species with their hosts and with each other to gain insight into the extent to which parasite metabolism is self-regulating. Our data reveal the unique metabolic makeup of two representative Orobanchaceae species that differ in parasitic dependency and clarify the differences in feeding strategies during developmental transitions in these parasites. We show that nitrogen assimilation dynamics differ among the parasites and suggest that multiple metabolic mechanisms exist to enable successful parasitism. Parasitic Plants and Hosts Have Distinct Metabolite Profiles Metabolite profiles were analyzed for two parasitic plant species and their associated hosts ( Figure 1). The parasites differ in levels of host dependence and across life stages that span from hostindependent to host-dependent stages. Steady-state levels of 28 metabolites were quantified in 16 sample types from parasites and their hosts. Four to five biological replicates of pooled parasite samples were analyzed for each sample type. When all stages of the two parasite and two host species were compared on a combined basis, each species demonstrated a distinctive metabolite profile as determined by principal component analysis (PCA) ( Figure 2) and analysis of variance (ANOVA) ( Figure S1, Table S1). Thus, the two parasite species P. aegyptiaca and T. versicolor differed from each other in metabolite profiles, as did the parasite species and their respective hosts. The first three principal components (PCs) together accounted for 65% of the variance among samples. Figure 1. Stages of the Orobanchaceae species analyzed for metabolite profiles in this study. These stages correspond to those studied in previous transcriptome studies of Orobanchaceae. Corresponding host roots of A. thaliana and M. truncatula were collected for each stage indicated for the parasites P. aegyptiaca and T. versicolor, respectively. Figure was adapted from Yang, Wafula, Honaas, Zhang, Das, Fernandez-Aparicio, Huang, Bandaranayake, Wu, Der, Clarke, Ralph, Landherr, Altman, Timko, Yoder, Westwood, and dePamphilis. Stages of the Orobanchaceae species analyzed for metabolite profiles in this study. These stages correspond to those studied in previous transcriptome studies of Orobanchaceae. Corresponding host roots of A. thaliana and M. truncatula were collected for each stage indicated for the parasites P. aegyptiaca and T. versicolor, respectively. Figure was adapted from Yang, Wafula, Honaas, Zhang, Das, Fernandez-Aparicio, Huang, Bandaranayake, Wu, Der, Clarke, Ralph, Landherr, Altman, Timko, Yoder, Westwood, and dePamphilis. Figure 2. Comparison of two parasitic plant species and their associated hosts. PCA on metabolite level correlations. A 3D score plot of PC1, PC2, and PC3 is shown, accounting for 26.7%, 23.5%, and 15.1% of the variation between samples, respectively. PCA was performed on combined data from two experiments with five stages of P. aegyptiaca and three stages of A. thaliana host root samples (shown in green and orange, respectively, using circles to represent the first experiment and diamonds to represent the second) and one experiment with four stages of T. versicolor and four stages of M. truncatula host root samples (shown in blue asterisks and pink asterisks, respectively). Earlier time points are shown in lighter color shades. The data used relative levels of sugars, sugar alcohols, and carboxylic acids and absolute levels of free amino acids (FAAs) and included four to five biological replicates per developmental stage. To gain a higher resolution picture of the factors contributing to differences between hosts and parasites and among different stages of parasite development, each parasite species was compared across stages and directly against its host. For P. aegyptiaca, the PCA and ANOVA indicate that metabolite profiles are distinguishable by developmental stage ( Figure S1, Table S1), with pronounced differences separating stages before and after vascular linkage to the host (stages 1-3 vs. stages 4.1-4.2) ( Figure 3A). The parasite metabolite profile is also distinguishable from that of its host at all stages, and in fact appears more similar to the host prior to rather than after attachment to the host ( Figure 3A). The PC 1 separates pre-from post-vascular P. aegyptiaca, PC 2 separates stages 4.1 and 4.2, and PC 3 separates pre-vascular P. aegyptiaca from Arabidopsis thaliana roots. Metabolites that account for the separation between samples are shown in the loading plots ( Figure 3B). Prominent examples of differences in key metabolites include higher levels of -aminobutyric acid (GABA) and glucose in A. thaliana roots than in parasites, whereas parasites have more mannitol, proline, and lysine. P. aegyptiaca has higher levels of proline and lysine shortly after vascular attachment than it has after development of adventitious roots on the tubercle. The levels of many specific metabolites increased in P. aegyptiaca after its attachment to the host (stages 4.1 and 4.2) (Figures 4, 5, and S2). These metabolites include asparagine, glutamine, aspartate, glutamate, valine, and all detected sugars, sugar alcohols, and carboxylic acids except for glucose and mannitol. Generally, the attached parasites have relatively high levels of these compounds in comparison to both pre-attached P. aegyptiaca seedlings and to the host roots. The sum of the detectable proteogenic free amino acids (FAAs) is highest at stage 4.1 immediately after attachment, with a subsequent decrease at stage 4.2, although their levels remain higher than at the pre-attachment stages ( Figure 4). Asparagine and glutamine levels peak at stage 4.1, while aspartate and glutamate levels are high at both stages 4.1 and 4.2. On the host side, A. thaliana root metabolite profiles are unaffected by infection with P. aegyptiaca ( Figure 3A). Comparison of two parasitic plant species and their associated hosts. PCA on metabolite level correlations. A 3D score plot of PC1, PC2, and PC3 is shown, accounting for 26.7%, 23.5%, and 15.1% of the variation between samples, respectively. PCA was performed on combined data from two experiments with five stages of P. aegyptiaca and three stages of A. thaliana host root samples (shown in green and orange, respectively, using circles to represent the first experiment and diamonds to represent the second) and one experiment with four stages of T. versicolor and four stages of M. truncatula host root samples (shown in blue asterisks and pink asterisks, respectively). Earlier time points are shown in lighter color shades. The data used relative levels of sugars, sugar alcohols, and carboxylic acids and absolute levels of free amino acids (FAAs) and included four to five biological replicates per developmental stage. To gain a higher resolution picture of the factors contributing to differences between hosts and parasites and among different stages of parasite development, each parasite species was compared across stages and directly against its host. For P. aegyptiaca, the PCA and ANOVA indicate that metabolite profiles are distinguishable by developmental stage ( Figure S1, Table S1), with pronounced differences separating stages before and after vascular linkage to the host (stages 1-3 vs. stages 4.1-4.2) ( Figure 3A). The parasite metabolite profile is also distinguishable from that of its host at all stages, and in fact appears more similar to the host prior to rather than after attachment to the host ( Figure 3A). The PC 1 separates pre-from post-vascular P. aegyptiaca, PC 2 separates stages 4.1 and 4.2, and PC 3 separates pre-vascular P. aegyptiaca from Arabidopsis thaliana roots. Metabolites that account for the separation between samples are shown in the loading plots ( Figure 3B). Prominent examples of differences in key metabolites include higher levels of -aminobutyric acid (GABA) and glucose in A. thaliana roots than in parasites, whereas parasites have more mannitol, proline, and lysine. P. aegyptiaca has higher levels of proline and lysine shortly after vascular attachment than it has after development of adventitious roots on the tubercle. The levels of many specific metabolites increased in P. aegyptiaca after its attachment to the host (stages 4.1 and 4.2) ( Figure 4, Figure 5, and Figure S2). These metabolites include asparagine, glutamine, aspartate, glutamate, valine, and all detected sugars, sugar alcohols, and carboxylic acids except for glucose and mannitol. Generally, the attached parasites have relatively high levels of these compounds in comparison to both pre-attached P. aegyptiaca seedlings and to the host roots. The sum of the detectable proteogenic free amino acids (FAAs) is highest at stage 4.1 immediately after attachment, with a subsequent decrease at stage 4.2, although their levels remain higher than at the pre-attachment stages ( Figure 4). Asparagine and glutamine levels peak at stage 4.1, while aspartate and glutamate levels are high at both stages 4.1 and 4.2. On the host side, A. thaliana root metabolite profiles are unaffected by infection with P. aegyptiaca ( Figure 3A). Abundance of selected sugars, sugar alcohols, and carboxylic acids. Data for graphs are from the first experiment in P. aegyptiaca. Four to five biological replicates are used for each sample type. Amounts were normalized from peak areas of the quantitation ion selected for each compound. In cases where A. thaliana host roots were not collected for the corresponding P. aegyptiaca stages, no data are shown. Tukey HSD connecting letters are given, run on host and parasite sample values concurrently. Standard error bars shown. The PCA of T. versicolor on Medicago truncatula illustrates a different pattern of metabolism than seen in P. aegyptiaca, though some similarities exist. T. versicolor growth stages are not clearly separated by PCA of the metabolite profiles within the species, unlike growth stages in P. aegyptiaca ( Figure 3C). Figure 5. Abundance of selected sugars, sugar alcohols, and carboxylic acids. Data for graphs are from the first experiment in P. aegyptiaca. Four to five biological replicates are used for each sample type. Amounts were normalized from peak areas of the quantitation ion selected for each compound. In cases where A. thaliana host roots were not collected for the corresponding P. aegyptiaca stages, no data are shown. Tukey HSD connecting letters are given, run on host and parasite sample values concurrently. Standard error bars shown. The PCA of T. versicolor on Medicago truncatula illustrates a different pattern of metabolism than seen in P. aegyptiaca, though some similarities exist. T. versicolor growth stages are not clearly separated by PCA of the metabolite profiles within the species, unlike growth stages in P. aegyptiaca ( Figure 3C). However, the levels of compounds such as mannitol, fumarate/maleate, glutamate, and, to a lesser extent, malate and aspartate are higher in T. versicolor than in M. truncatula roots, which is consistent with differences observed in the P. aegyptiaca-A. thaliana system. ANOVA results corroborate the PCA results, as all p-values for all tested metabolites with growth stage as a factor were found statistically insignificant, as they exceeded the value of 0.05. ( Figure S1, Table S1). Only a few specific amino acids showed altered levels pre vs. post attachment, and proline was notable for an increase in stage 4 relative to earlier stages ( Figure 4). On the host side, the M. truncatula roots were enriched in asparagine, valine, threonine, phenylalanine, and histidine relative to the parasite, with asparagine being the most abundant amino acid in the M. truncatula roots ( Figure 3C,D, Figure 4 and Figure S2). Asparagine levels were constant through the first three stages, but decreased to 25% of the average of its previous levels in stage 4, after parasite attachment. Histidine, threonine, isoleucine, and phenylalanine also followed this pattern, and the trend was reflected in the sum of the detectable proteogenic FAAs (Figure 4 and Figure S2). GABA was the only metabolite that showed increased levels in the host root during T. versicolor parasitism (stages 3 and 4; Figure S2). Levels of the sugars, carboxylic acids, and sugar alcohols remained stable in the host root in response to parasitism. T. versicolor Preferentially Accumulates Asparagine/Aspartate The relatively constant levels of amino acids across stages spanning pre-and post-attachment in T. versicolor provided limited insight into nitrogen assimilation. A further experiment was conducted to determine the compounds to which nitrogen from the host is directed first in T. versicolor. A split root system was used to feed 15 N-labeled nitrate to the host without allowing direct uptake by the parasite ( Figure 6) and protein-derived amino acids were analyzed in terms of levels/composition and 15 N enrichment. In this type of experiment, when a single 15 N substrate is provided (in this case at low isotopic enrichment), all N-containing compounds will be labeled to the same (low) degree within a given system after time periods exceeding rates of metabolism. As such, protein-derived amino acids in M. truncatula roots showed low, about 3% isotopic enrichment in analyzed amino acids regardless of parasitism when 15 N nitrate at 5% enrichment was fed to half of the roots for several days (Mt-Uninfected and Mt-Infected, Figure 7A). Host root protein levels and amino acid composition of the total proteins were also unaffected by parasite attachment (Figure 7B,C). However, the levels of compounds such as mannitol, fumarate/maleate, glutamate, and, to a lesser extent, malate and aspartate are higher in T. versicolor than in M. truncatula roots, which is consistent with differences observed in the P. aegyptiaca-A. thaliana system. ANOVA results corroborate the PCA results, as all p-values for all tested metabolites with growth stage as a factor were found statistically insignificant, as they exceeded the value of 0.05. ( Figure S1, Table S1). Only a few specific amino acids showed altered levels pre vs. post attachment, and proline was notable for an increase in stage 4 relative to earlier stages ( Figure 4). On the host side, the M. truncatula roots were enriched in asparagine, valine, threonine, phenylalanine, and histidine relative to the parasite, with asparagine being the most abundant amino acid in the M. truncatula roots ( Figures 3C,D, 4, and S2). Asparagine levels were constant through the first three stages, but decreased to 25% of the average of its previous levels in stage 4, after parasite attachment. Histidine, threonine, isoleucine, and phenylalanine also followed this pattern, and the trend was reflected in the sum of the detectable proteogenic FAAs (Figures 4 and S2). GABA was the only metabolite that showed increased levels in the host root during T. versicolor parasitism (stages 3 and 4; Figure S2). Levels of the sugars, carboxylic acids, and sugar alcohols remained stable in the host root in response to parasitism. T. versicolor Preferentially Accumulates Asparagine/Aspartate The relatively constant levels of amino acids across stages spanning pre-and post-attachment in T. versicolor provided limited insight into nitrogen assimilation. A further experiment was conducted to determine the compounds to which nitrogen from the host is directed first in T. versicolor. A split root system was used to feed 15 N-labeled nitrate to the host without allowing direct uptake by the parasite ( Figure 6) and protein-derived amino acids were analyzed in terms of levels/composition and 15 N enrichment. In this type of experiment, when a single 15 N substrate is provided (in this case at low isotopic enrichment), all N-containing compounds will be labeled to the same (low) degree within a given system after time periods exceeding rates of metabolism. As such, protein-derived amino acids in M. truncatula roots showed low, about 3% isotopic enrichment in analyzed amino acids regardless of parasitism when 15 N nitrate at 5% enrichment was fed to half of the roots for several days (Mt-Uninfected and Mt-Infected, Figure 7A). Host root protein levels and amino acid composition of the total proteins were also unaffected by parasite attachment (Figure 7B,C). T. versicolor feeding on M. truncatula roots showed low enrichment in most amino acids (about 1.5% 15 N), confirming the uptake of nitrogen-containing compounds from the host. Interestingly, asparagine and/or aspartate (Asx; these two amino acids are indistinguishable due to acid-enabled hydrolysis of the amide group from asparagine during protein hydrolysis) showed a small, but statistically significant increase in labeling over other amino acids (Tv-Attached, Figure 7A). This T. versicolor feeding on M. truncatula roots showed low enrichment in most amino acids (about 1.5% 15 N), confirming the uptake of nitrogen-containing compounds from the host. Interestingly, asparagine and/or aspartate (Asx; these two amino acids are indistinguishable due to acid-enabled hydrolysis of the amide group from asparagine during protein hydrolysis) showed a small, but statistically significant increase in labeling over other amino acids (Tv-Attached, Figure 7A). This increase in Asx 15 N enrichment was specific to the attached parasite and was not observed in the host roots. Asx proportion in proteins was also increased (doubled) upon attachment to the host root (Tv-Attached, Figure 7B), while the composition of the remaining analyzed amino acids was not affected in T. versicolor attached to the host root (Tv-Attached vs. Tv-Water, Figure 7B). Nitrogen Assimilation is a Priority in Early Development of Parasitic Plants Two key genes for ammonium assimilation in plants are asparagine synthases (AS) and aspartate amino transferases (AAT), both of which occur in multigene families. We searched the transcriptome data of the PPGP to identify sequences coding for the P. aegyptiaca and T. versicolor versions of the corresponding transcripts. Two forms of AS and four forms of AAT were found, and the expression of all forms was inferred from RNA-seq data. RNA-seq contigs used are listed in Table S2. For P. aegyptiaca, expression of at least one form of each gene peaks early in development, around stage 2 when the haustorium is being initiated (Figure 8). Other forms of these genes have maximum expression around stages 3 and 4.1. For T. versicolor, AS expression is dominated by a single gene with high expression at stage 3 as well as stages 6.1 (vegetative stems/leaves) and 6.2 (flowers). Expression of AAT in T. versicolor is highest in seeds, but otherwise characterized by multiple forms expressed steadily throughout development. increase in Asx 15 N enrichment was specific to the attached parasite and was not observed in the host roots. Asx proportion in proteins was also increased (doubled) upon attachment to the host root (Tv-Attached, Figure 7B), while the composition of the remaining analyzed amino acids was not affected in T. versicolor attached to the host root (Tv-Attached vs. Tv-Water, Figure 7B). Nitrogen Assimilation is a Priority in Early Development of Parasitic Plants Two key genes for ammonium assimilation in plants are asparagine synthases (AS) and aspartate amino transferases (AAT), both of which occur in multigene families. We searched the transcriptome data of the PPGP to identify sequences coding for the P. aegyptiaca and T. versicolor versions of the corresponding transcripts. Two forms of AS and four forms of AAT were found, and the expression of all forms was inferred from RNA-seq data. RNA-seq contigs used are listed in Table S2. For P. aegyptiaca, expression of at least one form of each gene peaks early in development, around stage 2 when the haustorium is being initiated (Figure 8). Other forms of these genes have maximum expression around stages 3 and 4.1. For T. versicolor, AS expression is dominated by a single gene with high expression at stage 3 as well as stages 6.1 (vegetative stems/leaves) and 6.2 (flowers). Expression of AAT in T. versicolor is highest in seeds, but otherwise characterized by multiple forms expressed steadily throughout development. Figure 8. Expression of two amino acid metabolism genes. Gene expression of asparagine synthases (AS) class I and II and aspartate amino transferases (AAT) enzymes is shown for the obligate holoparasite P. aegyptiaca and facultative hemiparasite T. versicolor. Gene expression is shown in fragments per kilobase million (FPKM). Stages 5.1 and 5.2 represent shoots and roots, respectively, harvested before the parasite emerges from the soil. Stages 6.1 and 6.2 represent above-ground vegetative tissue and floral buds, respectively, after the parasite emerges from the soil. FPKM levels for P. aegyptiaca AAT5 and AAT1 and T. versicolor AAT3 and AAT1 are the sums of the FPKM levels for two or more bioinformatically identified isoforms of the same gene. The gene expression data are from previous study through the Parasitic Plant Genome Project, and contigs used are presented in Table S2. 2 represent shoots and roots, respectively, harvested before the parasite emerges from the soil. Stages 6.1 and 6.2 represent above-ground vegetative tissue and floral buds, respectively, after the parasite emerges from the soil. FPKM levels for P. aegyptiaca AAT5 and AAT1 and T. versicolor AAT3 and AAT1 are the sums of the FPKM levels for two or more bioinformatically identified isoforms of the same gene. The gene expression data are from previous study through the Parasitic Plant Genome Project, and contigs used are presented in Table S2. Discussion This study aimed at understanding parasitic plant development and evolution as informed by metabolite profiling. We compared the metabolite profiles of the facultative parasite T. versicolor and its host M. truncatula with the obligate holoparasite P. aegyptiaca and its host A. thaliana. The focus was on the developmental stages that span the transition from free-living to parasitic in order to determine the impact of host-derived nutrition on the parasite. We have previously characterized these species with respect to transcriptomes, but metabolite profiles provide a fresh perspective on these questions. Metabolic Autonomy in Parasitic Plants Our data show the metabolic profile varies greatly between the parasite and host species studied. Total detectable proteogenic FAA levels in T. versicolor were approximately half those of the host M. truncatula until the levels in the host decrease following vascular attachment, whereas total detectable proteogenic FAA levels in P. aegyptiaca were approximately twice those of A. thaliana roots after attachment to the host (Figure 4). The higher FAA levels in a holoparasite species than in its host is in agreement with what was reported for below ground P. aegyptiaca shoots parasitizing Solanum lycopersicum and in older tubercles (roughly stage 4.2) of Phelipanche ramosa grown on Brasica napus. Because neither parasite species adopted the metabolite profile of its host root after attachment, our data support the idea that parasites maintain autonomy over their primary metabolism (Figures 2 and 3). Another question is whether the two parasite species are similar in their metabolic compositions. P. aegyptiaca and T. versicolor are related parasite species that share at least a core set of gene functions involved in haustorial development and establishment on hosts. Their common need to draw resources from the host could be hypothesized to lead to similar metabolite profiles in the two species, but this was not the case as the metabolite profiles of each species generally differed at all stages of growth ( Figure 2). The differences between the two parasite species were most evident during the transition from free-living to host-feeding stages. P. aegyptiaca showed a marked shift in metabolite profiles pre-and post-attachment to the host, whereas T. versicolor did not. As an obligate holoparasite, P. aegyptiaca is not able to grow without a host and completely relies on influx of host-derived metabolites such as sugars and amino acids. Therefore, the peak in total detectable proteogenic FAAs at stage 4.1 (Figure 4) likely reflected a strong influx of an unknown form of nitrogen from the host following access to host-derived nutrients. Similarly, these tubercles (stages 4.1 and 4.2) also had substantial general increases in sugar, sugar alcohol, and carboxylic acid levels, except for glucose and mannitol ( Figure 5 and Figure S2). However, the specific host-derived metabolites that are transported to these tubercules remain to be uncovered. In contrast, the T. versicolor metabolite profiles were not substantially changed following the connection of a haustorium to the host (Figures 3C and 4). These differences between the developmental transitions of the two parasite species are likely due to the nature of parasite development post-attachment (Figures 3 and 4). The development of lateral haustoria in T. versicolor is followed primarily by additional root growth, and the roots surrounding the haustorium do not transition into a different type of plant organ after haustorial attachment. In contrast, the P. aegyptiaca developmental transition from a seedling to a tubercle represents a substantial change in tissue types as well as nutrient source. We conclude that P. aegyptiaca and T. versicolor undergo shifts in their metabolite profiles that are more closely related to patterns of parasite development than to influences from host metabolism. This fits nicely with results of a study by Nativ et al. of P. aegyptiaca metabolite profiles that spanned stages of development ranging from the tubercle (older than our stage 4.2) to mature shoots, and in organs ranging from roots to flowers. This work showed that the greatest differences in metabolite profiles were between different organs, rather than across time in the same organs. Although this study did not include seedling or very young tubercle stages (as we present in the current work), taken together our data show that P. aegyptiaca metabolite profiles differ primarily between major life stages and organs. From this perspective, the lack of metabolite profile change in T. versicolor is expected because pre-and post-parasitizing roots are both essentially root organs. Carbon Metabolism The holoparasite P. aegyptiaca relies exclusively on the host for carbon resources, with a wide range of sugars, sugar alcohols, and carboxylic acids increasing after attachment to its host. In particular, increases in sugars such as fructose, glucose-6-P, and sucrose ( Figure 5) reflect this phloem-feeding nature of the parasite. The increase in levels of the TCA cycle intermediates fumarate, malate, and citrate ( Figure 5 and Figure S2) may indicate an acceleration in energy metabolism corresponding to a rapid increase in growth. The levels of these compounds do not change in T. versicolor after attachment to its host, perhaps reflecting the lower host dependence and the steadier continuation of growth of T. versicolor before and after haustorial attachment. One aspect of parasite metabolism that is conserved between the two species is mannitol accumulation. Parasites of the family Orobanchaceae convert hexoses into mannitol in order to increase sink strength. Mannitol is a major metabolite for these parasites and may be a conserved metabolic strategy within Orobanchaceae as it has been detected in genera across the family. Mannitol serves as a compatible solute, meaning it can accumulate within the cell without disrupting cell function. Our results confirm that mannitol is specific to the Orobanchaceae species in these systems ( Figure 5). Although trace amounts were detected in some of the host samples, it was not found in host roots that had no parasites, so this likely represents either backflow of the metabolite into the host or contamination due to small amounts of tissue from the haustorium embedded in the host root. In P. aegyptiaca, a reproducible, statistically significant decrease in mannitol levels at stage 4.1 was observed in comparison with other stages ( Figure 5). This is opposite to the patterns seen in P. ramosa on S. lycopersicum, in which mannitol was higher in young tubercles (comparable with stage 4.1 in the current study) vs. germinated seeds and mature tubercles (comparable with stages 1 and 4.2, respectively). Nitrogen Assimilation The acquisition of nitrogen is a dominant theme in studies of parasitic plant metabolism and our study confirms that parasitic plants rapidly accumulate amino acids involved in ammonium assimilation. Glutamine is the amino acid present at the highest levels in A. thaliana phloem and xylem sap, so the spike in glutamine levels in the parasite immediately after vascular attachment ( Figure 4) may reflect the high levels of glutamine available for influx. Other studies indicate that Phelipanche species preferentially accumulate glutamine, along with glutamate, asparagine, and aspartate. Although T. versicolor metabolite profiles were not affected by the attachment of the parasite to the host, the observed 15 N labeling of amino acids and increased total protein levels after attachment imply that this facultative parasite is able to direct host-derived nitrogen into proteins without changing the steady state pools of FAA (Figure 7). The increase in the levels and 15 N enrichment in protein-derived Asx in comparison with other amino acids raises the question of why Asx is not labeled to the same degree as other amino acids in feeding T. versicolor when the host-root Asx is labeled to the same extent as other amino acids. The levels and labeling of other observed amino acids did not change upon parasite attachment. A plausible explanation is that, due to preferential Asx uptake or metabolism, higher levels of organic 15 N from the host get mixed with the original non-labeled Asx from the parasite than for other amino acids, which leads to the observed increase in labeling over other amino acids in the attached parasite. If the original non-labeled Asx levels were negligible in the parasite, the labeling in Asx would theoretically become similar to that in the host roots. Based on the increase in the levels and 15 N enrichment in protein-derived Asx in comparison with other amino acids, it is likely the Asx is incorporated into an unknown Asx-rich protein. Collectively, both increased levels and labeling in Asx corroborate increased or preferential uptake and/or metabolism of Asx during facultative parasitism. These results point to the importance of Asx in post-attachment nitrogen metabolism of a facultative parasite. Asparagine synthetase is considered an important enzyme for nitrogen assimilation and balancing the carbon to nitrogen ratios in Orobanchaceae. For example, expression of the AS gene in T. versicolor is also known to be upregulated by exposure to host root exudates and it has been characterized in the related parasite, Striga hermonthica. Our data also point to an increase in aspartate levels in P. aegyptiaca after attachment to the host (Figure 4). Aspartate represents 15% of the total detectable proteogenic FAAs in the parasite at stage 4.1 and 30% at stage 4.2. The role of Asx as metabolic intermediates in processing nitrogen is probably important considering that A. thaliana phloem contains less aspartate than glutamate. While AS is important in holoparasite shoot development and in T. versicolor roots, we hypothesize that AAT plays a key role in amino acid metabolism in the holoparasite P. aegyptiaca during early development. AAT functions to transfer the amine group from glutamate onto oxaloacetate to form aspartate. Other Orobanchaceae species exhibit functional, though reduced, AAT activity. For example, Striga species and Orobanche minor exhibit about three-fourths and one-fourth, respectively, of the AAT activity in Zea mays. P. aegyptiaca has several AAT homologs matching with known A. thaliana paralogs. The expression of the P. aegyptiaca homolog of AT4G31990 in A. thaliana correlates well with the changes in aspartate levels. We found multiple transcript isoforms for AAT and AS in transcriptome data of the two parasite species ( Figure S3). Expression of these transcripts indicates some specialization for developmental stages in P. aegyptiaca, with one form associated with seedlings (stages 1 and 2), while another form was expressed predominantly during tubercle and later stages (Figure 8). The finding that expression patterns differed substantially between the two parasite species is consistent with the different styles of parasitism and levels of host dependence. Parasite Effect on Host Plants It is also interesting to ask whether the host root metabolite profiles changed upon parasitism. Our data indicate no change in A. thaliana roots parasitized by P. aegyptiaca, but M. truncatula roots parasitized by T. versicolor showed a 75% drop in asparagine levels ( Figure 4). Asparagine is the most abundant amino acid in the Medicago phloem sap, comprising approximately 70% of the total amino acids. The decrease in root amino acid levels may represent a means of defense. For the holoparasite Orobanche foetida grown on Vicia faba, a parasite tolerant line was found to decrease its phloem amino acid concentration to 53% that of the susceptible line in response to parasitism, and this was postulated to be a potential defense mechanism. However, in our case, the M. truncatula host is still susceptible to parasitism, despite a dramatic decrease in asparagine levels. Decreased levels of amino acids in some hosts could also reflect the inability of these hosts to compensate for an additional substantial sink that parasitic plants represent. Further study is warranted to understand this response in infected hosts.. The stages of parasites collected centered on haustorial development and function, and corresponded to those defined as stages 1 through 4.2 by the Parasitic Plant Genome Project (Figure 1). The associated host roots were collected for all T. versicolor stages and for P. aegyptiaca stages 1, 3, and 4.1. Phelipanche aegyptiaca Growth on Arabidopsis thaliana Parasite and host plants were grown in a polyethylene bag culture system. The roots of 2-week-old A. thaliana plants were washed of soil and placed onto glass microfiber sheets (GF/A, Fisher Scientific) held in polyethylene bags. The bags were suspended vertically with the roots kept in the dark and shoots were exposed to short day conditions (10 h light, 14 h dark, approximately 100 mol m −2 s −1 light ). Hoagland's solution ( 1 4 ) was added to the bags as needed to maintain moisture. For stages not involving a host, identical conditions were used except that A. thaliana plants were absent from the bags. P. aegyptiaca seeds were surface sterilized with 1% (v/v) sodium hypochlorite for 8 min and rinsed thoroughly in sterile distilled water. Seeds were conditioned at room temperature (23 C) for 6-7 days in the dark on moist glass microfiber disks in a Petri dish. The P. aegyptiaca seeds were stimulated to germinate by addition of GR-24 solution to create a final concentration of 1 ppm. The following day, P. aegyptiaca seeds were transferred to the A. thaliana roots or empty glass microfiber sheets in polyethylene bags using a fine brush. Samples consisting of either whole parasites or host roots were collected for metabolite analysis using a stereo-zoom microscope to enable precise dissection. For stage 1, seeds germinated in the absence of host roots were collected four days after GR-24 stimulation. Roots of non-infected A. thaliana were also collected at this stage. Stage 2 was also collected four days after GR-24 treatment as for stage 1, with the exception that the seedlings were placed on a mat of A. thaliana roots to expose them to haustorial-inducing factors for six hours prior to harvesting. Stage 3 parasites were placed on A. thaliana roots and were harvested seven days after GR-24 stimulation, at which time seedlings were attached to the host root, but did not show signs of swelling that would indicate vascular connections. For this stage, approximately 1 cm sections of host root were also collected, by dissecting them away from the associated parasites. Stage 4.1 parasite and host roots were collected at 10 days post GR-24 stimulation when the parasite had developed into a small tubercle. Stage 4.2 P. aegyptiaca was collected 14 days after GR-24 stimulation and was characterized by the development of adventitious roots on the tubercles. Host roots were not collected at this stage. Triphysaria versicolor Growth on Medicago truncatula T. versicolor seeds were surface sterilized with 1% (v/v) sodium hypochlorite, washed, suspended in water, and placed at 4 C for two days. On the third day, the seeds were placed onto agar media containing 1 4 Hoagland's solution in 0.6% Phytoagar and incubated for germination at 16 C in a controlled environment chamber with 80% relative humidity and a 12 h light cycle. On day 14, T. versicolor seedlings were transferred onto plates containing 1 4 Hoagland's media supplemented with 1% sucrose and placed in a 25 C growth room under a 16 h light cycle. Two rows of 10-12 T. versicolor seedlings were arrayed along the top and middle of the plates. For metabolomics, T. versicolor seedlings were treated either with or without a M. truncatula host and harvested at indicated time points. For stage 1, T. versicolor and M. truncatula roots were harvested from their separate plates 22 days after germination of T. versicolor. For stages 2-4, M. truncatula seedlings were removed from their plates on day 22 and placed onto T. versicolor seedlings so that roots of M. truncatula seedlings were overlaid onto the ends of the T. versicolor roots. Stages 2, 3, and 4 were harvested 6, 24, and 72 h after the transfer of the M. truncatula seedlings, respectively. Tissue Harvesting The small size of young parasites and need for painstaking dissection in harvesting presented challenges for tissue harvesting. Two experiments were conducted for P. aegyptiaca, with the first one involving harvesting by placing tissue immediately into UPLC grade methanol in microcentrifuge tubes held in a benchtop cooler rack pre-chilled to −20 C. Samples were then transferred to −80 C for storage. T. versicolor was also harvested this way. A second metabolomics analysis of P. aegyptiaca was conducted such that samples were collected directly into liquid nitrogen and held there until transfer to −80 C for storage. For both methods, all samples were then freeze-dried and stored in a desiccator until metabolite profiling. Metabolite Extraction and Polar Metabolite Level Analyses Metabolite extractions and analyses by gas chromatography-mass spectrometry (GC-MS) and Waters ultra-performance liquid chromatography (UPLC) were performed as previously described. This untargeted GC-MS method allowed analysis of relative levels of 11 polar metabolites, including sugars, sugar alcohols and acids, carboxylic acids, and other compounds, while the targeted UPLC method allowed analysis of 17 amino acids and organic amines. We detected many additional metabolites and peaks, but because of differences in sample runs, we excluded all metabolites that were below the detection limit in at least one experiment. However, we were also interested in plant-specific presence-absence of metabolites, so metabolites that were not below the limit of detection for a particular sample type were included. For instance, host plants are known to be incapable to synthesize mannitol, but the parasites are able to make mannitol. Proteinogenic amino acids should be detectable. Some metabolites detected in T. versicolor were not detectable in P. aegyptiaca and vice versa, and they could contribute to species differences. It is likely that species differences lead to many of these differences in range of abundance in the more "obscure" polar metabolites. Because it was impossible to distinguish between the species specificity and the detection limit problems associated with batch effect, these metabolites were not included in statistical analyses. Briefly, polar metabolites were extracted from dry powdered samples using a modified biphasic Bligh and Dyer protocol, with 10 mM HCl as the aqueous solvent. For P. aegyptiaca, the starting material dry weights ranged from 0.79 to 1.99 mg and metabolites were extracted in 100 L each chloroform and aqueous phase solvents. T. versicolor was extracted in varying quantities of solvent based on starting dry weight, normalizing to 1 mg dry weight per 100 L aqueous phase solvent, in order to accommodate the large M. truncatula host root diameters relative to that of T. versicolor. For GC-MS analysis, 50 L of the aqueous phase were dried under a stream of N 2 gas, trimethylsilyl (TMS) derivatives of polar metabolites pretreated with methoxyamine HCl (MOX) were prepared, and 1 L of a 30 L derivatization mixture was injected on an Agilent 7890A-5975C series GC-MS with a 30-m DB-5MS-DG column (0.25 mm 0.25 m, Agilent Technologies, Santa Clara, CA, USA). For UPLC analysis, 5 L of aqueous phase were used in a 25-L AccQ-Tag TM derivatization reaction and 0.5 L were injected on an Acquity H-class UPLC system equipped with a fluorescence detector and analyzed according to the manufacturer's recommendations using Waters 10.2 min method for analysis of cell culture amino acids (Waters, Milford, MA, USA). Metabolite-Level Data Processing Data were collected from three separate experiments, two with P. aegyptiaca and one with T. versicolor. The data included absolute and relative levels of FAAs and polar metabolites (sugars, sugar alcohols, and carboxylic acids), respectively. Four to five biological replicates per developmental stage were done for each experiment. Each biological replicate represented a pooling of enough individual seedlings (3 to 140, depending on the stage) to reach approximately 1 mg of dry sample mass. For GC-MS data, compound identification and level quantification were done as previously described. Briefly, three different complementary spectral libraries (NIST spectral library (National Institute of Standards and Technology, Gaithersburg, MD, USA)), FiehnLib spectral and retention time library, and an in-house generated spectral and retention time library using about 250 metabolite standards) were used to identify metabolites based on their spectra and retention times. Automated mass spectrometry deconvolution and identification system (AMDIS, NIST) was used to deconvolute signals from the coeluting compounds and to select specific fragments for each compound for quantitation. The enhanced mass selective detector ChemStation software version E.02.00.493 (Agilent Technologies, Santa Clara, CA, USA) was used in combination with the three above-mentioned libraries to obtain relative levels of polar metabolites. Manual curation was used on an individual basis (QEdit function of the ChemStation software version E.02.00.493) after automated peak area integration. The sugar alcohol ribitol was used as an internal standard to account for recovery. Peak areas of the multiply labeled internal standards were used to confirm the absence of matrix effects specific to particular categories of polar metabolites relative to ribitol (-glucose was used for sugars and sugar phosphates and -citrate for carboxylic and organic acids). For UPLC data, standard curves were generated for each amino acid and used for amino acid identification and to quantify absolute levels of 17 amino acids. Norvaline was used as an internal standard to account for recovery. All samples were also standardized relative to the sample dry weight used for extractions. PCA was conducted on metabolite level correlations for all experiments together and for each host-parasite species set separately (JMP Pro 12 software, SAS, Cary, NC, USA). A 3D score plot was generated, which includes the data from all four species together. Raw uncorrected metabolomics data are provided in Table S3. ANOVA was performed to evaluate the effect of stage, experimental batch, and species on each of the 28 metabolites. Three separate ANOVA tables were generated: (i) ANOVA was performed using the data from all three experiments and (ii) ANOVA was performed using data from each parasite/host system separately ( Table S1). Histograms of the p-values were plotted ( Figure S1). ANOVA tables and histograms were generated using the MetabolomicsBasics package within R, as described by. Split Root System Sterilized M. truncatula seeds were germinated on 1 4, micronutrients, and 0.7% (w/v) Phyto agar. Such low 15 N enrichment was needed for accurate subsequent stable isotope analysis by isotope-ratio mass spectrometer (IRMS) described below. One-week-old M. truncatula seedlings were transferred to rhizotrons and watered with 5 atom % 15 N labeled 1 4 Hoagland's media. Approximately three weeks later, M. truncatula plants with lateral roots in sufficient length were washed in deionized H 2 O and transplanted to split-root rhizotrons ( Figure 6A). Roots in rhizotron A were watered with 5 atom % 15 N labeled 1 4 Hoagland's nutrient media and roots in rhizotron B were watered with H2O. One-week-old T. versicolor seedlings germinated on 1 4 Hoagland's nutrient plates were aligned with M. truncatula in rhizotron B ( Figure 6B). Samples of M. truncatula and T. versicolor were collected 12 days after alignment (Mt-Infected and Tv-Attached, respectively). Roots of M. truncatula growing in rhizotron B without T. versicolor were used as a control (Mt-Uninfected, Figure 6C). T. versicolor growing by itself in rhizotron watered with either H2O (Tv-Water) or 1 4 Hoagland's media (Tv-Hoagland's) were collected as controls ( Figure 6D). Plant material was freeze-dried and ground to a fine powder in a mortar cooled with liquid nitrogen. For each sample, 5-20 mg was submitted for 15 N isotope analysis and amino acids of total proteins analysis. Stable isotope abundance analysis on 15 N labeled amino acids was performed in the Stable Isotope Facility at UC Davis. Proteins were acid hydrolyzed with 6 M hydrochloric acid for 70 min at 105 C, derivatized as N-acetyl isoprotyl esters, and analyzed on a Thermo Trace 1310 GC coupled to a Thermo Scientific Delta V Advantage IRMS(GC-C-IRMS). Arginine, histidine, and serine were not derivatized using this method. Tyrosine and threonine were below the limit of detection and the limit of quantification, respectively. Composition of amino acids of total proteins were analyzed at the Molecular Structural Facility, UC Davis. Proteins were acid hydrolyzed with 6 M hydrochloric acid for 24 h at 110 C by L-8800 Hitachi amino acid analyzer, which separated amino acids by an ion-exchange column in HPLC. Total protein levels were calculated by adding up the levels of individual amino acids. Data for cysteine, methionine, and tryptophan were not available as these amino acids are destroyed during protein hydrolysis. These amino acids are present at trace levels in proteins and, therefore, not including them in total protein calculations should not significantly influence protein level estimations. Conclusions In summary, comparative metabolite profiling revealed that the metabolic response of parasitic Orobanchaceae to the availability of a host varies substantially between parasites of different nutritional dependency on their hosts. Our study showed that parasite metabolite profiles better reflected the developmental stage of the parasite than the resource availability of the host. This work also highlights the importance to the parasite of nitrogen acquisition from the host and supports this phenomenon as a potential target for control strategies against parasitism. More work is needed to understand precisely how parasites take nitrogen and amino acids from their hosts, and the extent to which amino acid levels and ratios have an effect on parasitism. Supplementary Materials: The following are available online at http://www.mdpi.com/2218-1989/9/6/114/s1, Figure S1: Histograms of p-values from analysis of variance of metabolite levels. Figure S2: Abundance of quantified metabolites and total detectable proteogenic free amino acids. Figure S3: Phylogenetic trees of asparagine synthase and aspartate amino transferase homologs. Table S1: Analysis of variance of metabolite levels. Table S2: Contigs used for expression analysis of two amino acid metabolism genes. Table S3: Metabolite profiling data. |
In a break with decadeslong diplomatic tradition, President-elect Donald Trump spoke directly with the president of Taiwan, a move that drew an irritated response from China and looked set to cast uncertainty over U.S. policy toward Asia.
It is perhaps unprecedented for a U.S. president or president-elect to speak directly with a leader of Taiwan, a self-governing island the U.S. broke diplomatic ties with in 1979.
In first comments apparently meant to downplay the significance of the call, Chinese Foreign Minister Wang Yi said Saturday that the contact between Taiwan's president and Trump was "just a small trick by Taiwan" that he believed would not change U.S. policy toward China, according to Hong Kong's Phoenix TV.
"The one-China policy is the cornerstone of the healthy development of China-U.S. relations and we hope this political foundation will not be interfered with or damaged," Wang was quoted as saying.
Washington has pursued a so-called "one China" policy since 1979, when it shifted diplomatic recognition of China from the government in Taiwan to the communist government on the mainland. Under that policy, the U.S. recognizes Beijing as representing China but retains unofficial ties with Taiwan.
A statement from Trump's transition team said he spoke Friday with Taiwanese President Tsai Ing-wen, who offered her congratulations on his election win.
Trump tweeted later that Tsai "CALLED ME." He also groused about the reaction to the call: "Interesting how the U.S. sells Taiwan billions of dollars of military equipment but I should not accept a congratulatory call."
The Taiwanese presidential office said Trump and Tsai discussed issues affecting Asia and the future of U.S. relations with Taiwan.
"The (Taiwanese) president is looking forward to strengthening bilateral interactions and contacts as well as setting up closer cooperative relations," the statement said.
Tsai also told Trump that she hoped the U.S. would support Taiwan in its participation in international affairs, the office said, in an apparent reference to China's efforts to isolate Taiwan from global institutions such as the United Nations.
It said the two also discussed "promoting domestic economic development and strengthening national defense" to improve the lives of ordinary people.
Taiwan's presidential office spokesman Alex Huang said separately that Taiwan's relations with China and "healthy" Taiwan-U.S. relations can proceed in parallel. "There is no conflict (in that)," he told reporters in Taipei.
The White House learned of the conversation after it had taken place, said a senior Obama administration official, who requested anonymity because of the sensitive diplomatic relations involved.
China's embassy in Washington and its foreign ministry and Taiwan Affairs Office in Beijing did not respond to requests for comment.
Friday's call is the starkest example yet of how Trump has flouted diplomatic conventions since he won the Nov. 8 election. He has apparently undertaken calls with foreign leaders without guidance customarily lent by the State Department, which oversees U.S. diplomacy.
"President-elect Trump is just shooting from the hip, trying to take phone calls of congratulatory messages from leaders around the world without consideration for the implications," said Bonnie Glaser, senior adviser for Asia at the Center for Strategic and International Studies in Washington.
Glaser said such a call was "completely unprecedented" or at least has never been known publicly. China is likely to be trying to identify whether this signals any intent on the part of Trump to alter long-standing U.S. policy toward Taiwan, Glaser said.
"They will hope that this is a misstep, but I think privately, they will definitely seek to educate this incoming president and ensure that he understands the sensitivity of Taiwan," she said.
In particular, China would want to highlight to the incoming administration the risks involved in any form of signal from the United States that it supports strengthening a relationship with Taiwan under a president that Beijing views as pro-independence, Glaser added.
Last month, Trump had a call with Chinese President Xi Jinping during which Trump's office described him as saying he believed the two would have "one of the strongest relationships for both countries."
Despite China's muted response Saturday, concern about Trump's policy toward China is growing, said Shi Yinhong of Renmin University in Beijing, one of China's best-known international relations scholars.
"In the mind of Chinese leaders, concerns are mounting about U.S. policy toward China" under Trump's administration, Shi said.
Tsai was elected in January and took office in May. The traditional independence-leaning policies of her party have strained relations with Beijing.
The call with Trump could "convince people in Taiwan that the island can establish good relations with the U.S. and encourage (Tsai) to continue to resist pressure from Beijing," Shi said.
Over the decades, the status of Taiwan has been one of the most sensitive issues in U.S.-China relations. China regards Taiwan as part of its territory to be retaken by force, if necessary, if it seeks independence. It would regard any recognition of a Taiwanese leader as a head of state as unacceptable.
Taiwan split from the Chinese mainland amid civil war in 1949. The U.S. policy acknowledges the Chinese view over sovereignty, but considers Taiwan's status as unsettled. The U.S. has legal commitments to help Taiwan maintain the ability to defend itself.
Taiwan's official Central News Agency said Edwin Feulner, former president of the Heritage Foundation, a Washington-based conservative think tank, was a "crucial figure" in setting up communication channels between the sides, leading to the call. Feulner could not immediately be reached to comment on the report, which cited anonymous sources.
Feulner had met with Tsai in October when he led a delegation from the think tank on a trip to Taiwan, according to a release at the time from Taiwan's presidential office. That release says Tsai called Feulner a "longtime friend to Taiwan" and conveyed her gratitude to his foundation for its support.
Ned Price, a spokesman for the White House National Security Council, said Trump's conversation does not signal any change to long-standing U.S. policy on cross-strait issues.
In Beijing, a U.S. business group said it expected the new U.S. administration to respect the status quo.
"American business operating in Asia needs certainty and stability," said James Zimmerman, chairman of the American Chamber of Commerce in China. "The new administration needs to get up to speed quickly on the historical tensions and complex dynamics of the region." |
import unittest2
import featureflow as ff
import numpy as np
from zounds.util import simple_in_memory_settings
from .preprocess import Reshape, PreprocessingPipeline
class ReshapeTests(unittest2.TestCase):
def do_setup(self, shape, new_shape):
@simple_in_memory_settings
class Model(ff.BaseModel):
flattened = ff.PickleFeature(
Reshape,
new_shape=new_shape,
store=False)
pipeline = ff.PickleFeature(
PreprocessingPipeline,
needs=(flattened,),
store=True)
training = np.random.random_sample(shape)
_id = Model.process(flattened=training)
model = Model(_id)
data = np.random.random_sample(shape)
transformed = model.pipeline.transform(data)
inverted = transformed.inverse_transform()
return data, transformed.data, inverted
def do_assertions(self, original_shape, new_shape, expected_shape):
data, transformed, inverted = self.do_setup(original_shape, new_shape)
self.assertEqual(expected_shape, transformed.shape)
self.assertEqual(original_shape, inverted.shape)
np.testing.assert_allclose(data.ravel(), transformed.ravel())
np.testing.assert_allclose(data.ravel(), inverted.ravel())
def test_reshape(self):
self.do_assertions(
original_shape=(10, 100),
new_shape=(10, 10),
expected_shape=(10, 10, 10))
def test_expand_multiple_dimensions(self):
self.do_assertions(
original_shape=(10, 40),
new_shape=(1, -1, 1),
expected_shape=(10, 1, 40, 1))
def test_expand_middle_dimension(self):
self.do_assertions(
original_shape=(10, 40),
new_shape=(1, -1),
expected_shape=(10, 1, 40))
def test_expand_last_dimension(self):
self.do_assertions(
original_shape=(10, 40),
new_shape=(-1, 1),
expected_shape=(10, 40, 1))
def test_flatten_one_dimensional(self):
self.do_assertions(
original_shape=(10,),
new_shape=tuple(),
expected_shape=(10,))
def test_flatten_two_dimensional(self):
self.do_assertions(
original_shape=(10, 40),
new_shape=(-1,),
expected_shape=(10, 40))
def test_flatten_three_dimensional(self):
self.do_assertions(
original_shape=(10, 40, 20),
new_shape=(-1,),
expected_shape=(10, 800))
|
package com.ezshare.server.model;
import java.io.DataOutputStream;
import java.io.IOException;
import javax.net.ssl.SSLSocket;
import org.pmw.tinylog.Logger;
import EZShare.Resource;
public class SecureSubscriber {
public String id;
public int resultSize;
public Resource subscribeTemplate;
public SSLSocket subscriberSocket;
public DataOutputStream streamOut;
public SecureSubscriber(String id, int resultSize, Resource subscribeTemplate, SSLSocket subscriberSocket) {
this.id = id;
this.resultSize = resultSize;
this.subscribeTemplate = subscribeTemplate;
this.subscriberSocket = subscriberSocket;
try {
this.streamOut = new DataOutputStream(subscriberSocket.getOutputStream());
} catch (IOException e) {
Logger.error(e);
}
}
}
|
Failed Democratic presidential candidate Howard Dean attacked Steve Bannon, Breitbart News, and GOP strategist Brian Darling on an MSNBC panel recently, branding 35 million Breitbart readers as “racist.”
Brian Darling, a GOP strategist and Breitbart News contributing columnist, appeared on an MSNBC panel recently where former governor of Vermont and failed Democratic Party presidential candidate Howard Dean attacked White House Policy Adviser Stephen Miller and referred to White House Chief Strategist and former Breitbart News Executive Chairman Steve Bannon as a “virulent anti-Semite.”
Discussing Bannon, Dean stated in the clip, “He’s a virulent anti-Semite and — as Joy [Reid] pointed out earlier in the clip — has been recognized as somebody who is a major figure in the alt-right. What’s the difference between that and being a Nazi? I don’t know.” Dean continued to say, “these three guys don’t belong in the White House.”
Darling jumped to Bannon’s defense, saying that he personally knew Bannon and Miller and that it was outrageous to accuse either man of holding antisemitic views, “to people calling Steve Bannon a Nazi. I know Steve Bannon, I have actually written three op-eds this year for Breitbart.com. Breitbart is a conservative website,” said Darling. “Am I a racist for writing for it?” Darling said. Former Bush ethics chief Richard Painter also attacked Darling, saying, “It’s a racist platform and you know it.”
“Breitbart.com is read by 35 million people a month. Are they racist?” asked Darling, Howard Dean was quick to reply, saying, “yup, sure are!” before asking Darling, “how do you excuse what he [Bannon] has said about Jews?” Darling then pointed out that many of Breitbart News’ staff members are Jewish, saying, “you know that Breitbart is run by two Jewish editors over there, if he’s such an antisemite why did he hire [them]?”
Darling then went on to say, “he hired two Jewish editors to run Breitbart so he [Bannon] doesn’t seem that antisemitic to me,” to which Dean replied, “that’s like saying people in the South had black slaves, therefore, they aren’t anti-black, don’t be ridiculous!”
“Yeah but you call him a Nazi, that is outrageous!” said Darling to which Dean replied, “I think he fits the description.”
Watch the full interview below:
Lucas Nolan is a reporter for Breitbart News, covering issues of free speech and online censorship. Follow him on Twitter @LucasNolan or email him at [email protected]. |
// Problem Link:
// https://practice.geeksforgeeks.org/problems/implementing-floyd-warshall2042/1
// TC: O(n^3)
// SC: O(1)
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define deb(x) cout << #x << ": " << x << "\n"
void floydWarshall(vector<vector<int>> &matrix)
{
int n = matrix.size();
for (int k = 0; k < n; ++k)
{
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
{
if (matrix[i][k] == -1 or matrix[k][j] == -1)
continue;
else if (matrix[i][j] == -1)
matrix[i][j] = matrix[i][k] + matrix[k][j];
else
matrix[i][j] = min(matrix[i][j], matrix[i][k] + matrix[k][j]);
}
}
}
}
void display(vector<vector<int>> &matrix)
{
int n = matrix.size();
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < n; ++j)
cout << matrix[i][j] << " ";
cout << endl;
}
cout << endl;
}
void solve()
{
vector<vector<int>> matrix{{0, 1, 43}, {1, 0, 6}, {-1, -1, 0}};
display(matrix);
floydWarshall(matrix);
display(matrix);
}
int main()
{
ios_base::sync_with_stdio(0), cin.tie(0), cout.tie(0);
// freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
int t{1}, i{1};
// cin >> t;
while (t--)
{
// cout << "Case #" << i++ << ": ";
solve();
}
return 0;
} |
<reponame>miiiingi/algorithmstudy
N = int(input())
def factorial(N) :
if N <= 1 :
return 1
return N * factorial(N-1)
print(factorial(N)) |
<reponame>riddhidhawan/hacktoberfest2021
import java.util.*;
public class ReverseString {
public static void main(String[] args) {
Scanner sc= new Scanner(System.in);
System.out.print("Enter a string:\n");
String str= sc.nextLine();
String reversed = reverseString(str);
System.out.println("The reversed string is:\n" + reversed);
}
public static String reverseString(String str)
{
if (str.isEmpty()){
return str;
}
return reverseString(str.substring(1)) + str.charAt(0);
}
}
|
<gh_stars>1-10
/* ====================================================================
* Copyright (c) 1999-2001 Carnegie Mellon University. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* This work was supported in part by funding from the Defense Advanced
* Research Projects Agency and the National Science Foundation of the
* United States of America, and the CMU Sphinx Speech Consortium.
*
* THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
* ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
* NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ====================================================================
*
*/
/*
* lm.c -- Interpolation of various language models.
*
* HISTORY
*
* $Log$
* Revision 1.8 2004/12/10 16:48:56 rkm
* Added continuous density acoustic model handling
*
*
* 01-Apr-97 <NAME> (<EMAIL>) at Carnegie Mellon University
* Started, based on earlier FBS6 version.
*/
/* Currently, interpolation of dynamic cache LM and static trigram LM */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "s2types.h"
#include "CM_macros.h"
#include "basic_types.h"
#include "list.h"
#include "hash.h"
#include "lmclass.h"
#include "lm_3g.h"
#include "lm.h"
#include "cache_lm.h"
#include "search_const.h"
#include "msd.h"
#include "dict.h"
#include "kb.h"
#include "err.h"
#include "log.h"
static cache_lm_t *clm = NULL;
int32 lm_tg_score (int32 w1, int32 w2, int32 w3)
{
int32 cscr, tscr, remwt;
lm_t *lm3g;
if (! clm)
return (lm3g_tg_score (w1, w2, w3));
lm3g = lm_get_current ();
/* Get cache LM score and apply language weight */
cscr = cache_lm_score (clm, w2, w3, &remwt);
cscr *= lm3g->lw;
/* Get static trigram LM score, apply remaining weight */
tscr = lm3g_tg_score (w1, w2, w3);
tscr += remwt * lm3g->lw;
/* Return MAX of static trigram LM and dynamic cache LM scores (approx to sum) */
return (cscr > tscr) ? cscr : tscr;
}
int32 lm_bg_score (int32 w1, int32 w2)
{
int32 cscr, tscr, remwt;
lm_t *lm3g;
if (! clm)
return (lm3g_bg_score (w1, w2));
lm3g = lm_get_current ();
/* Get cache LM score and apply language weight */
cscr = cache_lm_score (clm, w1, w2, &remwt);
cscr *= lm3g->lw;
/* Get static trigram LM score, apply remaining weight */
tscr = lm3g_bg_score (w1, w2);
tscr += remwt * lm3g->lw;
/* Return MAX of static trigram LM and dynamic cache LM scores (approx to sum) */
return (cscr > tscr) ? cscr : tscr;
}
int32 lm_ug_score (int32 w)
{
return (lm3g_ug_score (w));
}
void lm_cache_lm_init ( void )
{
if (clm)
return;
/* Hack!! Hardwired parameters to cache_lm_init */
clm = cache_lm_init (0.0001, 0.001, 0.04, 100, 0.07);
}
void lm_cache_lm_add_ug (int32 w)
{
int32 ugscr;
lm_t *lm3g;
if (! clm)
return;
lm3g = lm_get_current ();
ugscr = lm3g_ug_score (w) * lm3g->invlw;
if (ugscr >= clm->ugprob_thresh)
return;
#if 0
E_INFO("Adding unigram %s (scr %d, thresh %d) to cache LM\n",
kb_get_word_str(w), ugscr, clm->ugprob_thresh);
#endif
cache_lm_add_ug (clm, w);
}
void lm_cache_lm_add_bg (int32 w1, int32 w2)
{
if (! clm)
return;
#if 0
E_INFO("Adding bigram %s,%s to cache LM\n",
kb_get_word_str (w1), kb_get_word_str (w2));
#endif
cache_lm_add_bg (clm, w1, w2);
}
void lm_cache_lm_dump (char *file)
{
if (! clm)
return;
cache_lm_dump (clm, file);
}
void lm_cache_lm_load (char *file)
{
if (! clm)
return;
cache_lm_load (clm, file);
}
|
Beliefs about the Mind as Doxastic Inventional Resource: Freud, Neuroscience, and the Case of Dr. Spocks Baby and Child Care Commonsense beliefs about the mind are routinely operative in human discourse, where they serve as prolific resources from which to generate discourse/understanding while often remaining in what Pierre Bourdieu calls the realm of the undiscussed. As a study of how mind-related beliefs serve as a resource for rhetorical invention, this essay provides insight into an important and pervasive category of doxastic beliefs and brings into focus the powerful undertow of doxas routine discursive work. It does so, in part, by analyzing Dr. Benjamin Spocks best-selling child-rearing manual, The Common Sense Book of Baby and Child Care, together with reactions it elicited from readers. These show how mind-related beliefs can generate discourse while being suppressed in the discursive iteration, resulting in fragments, enthymemes, implications, and presences/absences. Moreover, published in multiple editions over many years, Spocks book demonstrates the inventional implications of historical changes in widely shared beliefs about the mind. |
<filename>ProjectDemo/JDShopCart/TaoBaoShoppingCar/Const.h
//
// Const.h
// 淘宝购物车
//
// Created by 朱献国 on 20/09/2017.
// Copyright © 2017 朱献国. All rights reserved.
//
#import <UIKit/UIKit.h>
//宏的结尾 不要加冒号
#define ScreenW [UIScreen mainScreen].bounds.size.width
#define ScreenH [UIScreen mainScreen].bounds.size.height
UIKIT_EXTERN CGFloat const PayViewH;//下部结账view的高度
UIKIT_EXTERN CGFloat const CellH; //cell的高度
//***********CELL**************//
UIKIT_EXTERN CGFloat const NameSize; //名称字体的大小
UIKIT_EXTERN CGFloat const PriceSize;//价格字体的大小
UIKIT_EXTERN CGFloat const NumSize; //数量字体的大小
UIKIT_EXTERN CGFloat const TipSize; //尺寸字体的大小
//***********PayView**************//
UIKIT_EXTERN CGFloat const SelectBtnSize; //全选按钮字体的大小
UIKIT_EXTERN CGFloat const PayBtnSize; //结算按钮字体的大小
UIKIT_EXTERN CGFloat const TotalPriceLabelSize; //总价字体的大小
#define spTableViewH (ScreenH - self.navigationController.navigationBar.bounds.size.height - 20 - PayViewH)//上部商品view的高度
#define RandomColor [UIColor colorWithRed:arc4random_uniform(256) / 255.0 green:arc4random_uniform(256) / 255.0 blue:arc4random_uniform(256) / 255.0 alpha:1.f]
#define CellColor [UIColor whiteColor]
#define SecHeaderColor [UIColor colorWithRed:250 / 255.0 green:250 / 255.0 blue:250 / 255.0 alpha:1.f]
#define TableBGColor [UIColor colorWithRed:240 / 255.0 green:240 / 255.0 blue:240 / 255.0 alpha:1.f]
#define BorderColor [UIColor colorWithRed:199 / 255.0 green:199 / 255.0 blue:199 / 255.0 alpha:1.f]
#define DivideLineColor [UIColor colorWithRed:199 / 255.0 green:199 / 255.0 blue:199 / 255.0 alpha:1.f]
|
//-------------------------------------------------
//
// \class L1MuGMTParametersProducer
//
// Description: A class to produce the L1 GMT emulator Parameters record in the event setup
//
//
// Author :
// I. Mikulec
//
//--------------------------------------------------
#include "L1TriggerConfig/GMTConfigProducers/interface/L1MuGMTParametersProducer.h"
L1MuGMTParametersProducer::L1MuGMTParametersProducer(const edm::ParameterSet& ps) {
m_ps = new edm::ParameterSet(ps);
setWhatProduced(this, &L1MuGMTParametersProducer::produceL1MuGMTParameters);
setWhatProduced(this, &L1MuGMTParametersProducer::produceL1MuGMTChannelMask);
}
L1MuGMTParametersProducer::~L1MuGMTParametersProducer() { delete m_ps; }
//
// member functions
//
// ------------ methods called to produce the data ------------
std::unique_ptr<L1MuGMTParameters> L1MuGMTParametersProducer::produceL1MuGMTParameters(
const L1MuGMTParametersRcd& iRecord) {
auto gmtparams = std::make_unique<L1MuGMTParameters>();
gmtparams->setEtaWeight_barrel(m_ps->getParameter<double>("EtaWeight_barrel"));
gmtparams->setPhiWeight_barrel(m_ps->getParameter<double>("PhiWeight_barrel"));
gmtparams->setEtaPhiThreshold_barrel(m_ps->getParameter<double>("EtaPhiThreshold_barrel"));
gmtparams->setEtaWeight_endcap(m_ps->getParameter<double>("EtaWeight_endcap"));
gmtparams->setPhiWeight_endcap(m_ps->getParameter<double>("PhiWeight_endcap"));
gmtparams->setEtaPhiThreshold_endcap(m_ps->getParameter<double>("EtaPhiThreshold_endcap"));
gmtparams->setEtaWeight_COU(m_ps->getParameter<double>("EtaWeight_COU"));
gmtparams->setPhiWeight_COU(m_ps->getParameter<double>("PhiWeight_COU"));
gmtparams->setEtaPhiThreshold_COU(m_ps->getParameter<double>("EtaPhiThreshold_COU"));
gmtparams->setCaloTrigger(m_ps->getParameter<bool>("CaloTrigger"));
gmtparams->setIsolationCellSizeEta(m_ps->getParameter<int>("IsolationCellSizeEta"));
gmtparams->setIsolationCellSizePhi(m_ps->getParameter<int>("IsolationCellSizePhi"));
gmtparams->setDoOvlRpcAnd(m_ps->getParameter<bool>("DoOvlRpcAnd"));
gmtparams->setPropagatePhi(m_ps->getParameter<bool>("PropagatePhi"));
gmtparams->setMergeMethodPhiBrl(m_ps->getParameter<std::string>("MergeMethodPhiBrl"));
gmtparams->setMergeMethodPhiFwd(m_ps->getParameter<std::string>("MergeMethodPhiFwd"));
gmtparams->setMergeMethodEtaBrl(m_ps->getParameter<std::string>("MergeMethodEtaBrl"));
gmtparams->setMergeMethodEtaFwd(m_ps->getParameter<std::string>("MergeMethodEtaFwd"));
gmtparams->setMergeMethodPtBrl(m_ps->getParameter<std::string>("MergeMethodPtBrl"));
gmtparams->setMergeMethodPtFwd(m_ps->getParameter<std::string>("MergeMethodPtFwd"));
gmtparams->setMergeMethodChargeBrl(m_ps->getParameter<std::string>("MergeMethodChargeBrl"));
gmtparams->setMergeMethodChargeFwd(m_ps->getParameter<std::string>("MergeMethodChargeFwd"));
gmtparams->setMergeMethodMIPBrl(m_ps->getParameter<std::string>("MergeMethodMIPBrl"));
gmtparams->setMergeMethodMIPFwd(m_ps->getParameter<std::string>("MergeMethodMIPFwd"));
gmtparams->setMergeMethodMIPSpecialUseANDBrl(m_ps->getParameter<bool>("MergeMethodMIPSpecialUseANDBrl"));
gmtparams->setMergeMethodMIPSpecialUseANDFwd(m_ps->getParameter<bool>("MergeMethodMIPSpecialUseANDFwd"));
gmtparams->setMergeMethodISOBrl(m_ps->getParameter<std::string>("MergeMethodISOBrl"));
gmtparams->setMergeMethodISOFwd(m_ps->getParameter<std::string>("MergeMethodISOFwd"));
gmtparams->setMergeMethodISOSpecialUseANDBrl(m_ps->getParameter<bool>("MergeMethodISOSpecialUseANDBrl"));
gmtparams->setMergeMethodISOSpecialUseANDFwd(m_ps->getParameter<bool>("MergeMethodISOSpecialUseANDFwd"));
gmtparams->setMergeMethodSRKBrl(m_ps->getParameter<std::string>("MergeMethodSRKBrl"));
gmtparams->setMergeMethodSRKFwd(m_ps->getParameter<std::string>("MergeMethodSRKFwd"));
gmtparams->setHaloOverwritesMatchedBrl(m_ps->getParameter<bool>("HaloOverwritesMatchedBrl"));
gmtparams->setHaloOverwritesMatchedFwd(m_ps->getParameter<bool>("HaloOverwritesMatchedFwd"));
gmtparams->setSortRankOffsetBrl(m_ps->getParameter<unsigned>("SortRankOffsetBrl"));
gmtparams->setSortRankOffsetFwd(m_ps->getParameter<unsigned>("SortRankOffsetFwd"));
gmtparams->setCDLConfigWordDTCSC(m_ps->getParameter<unsigned>("CDLConfigWordDTCSC"));
gmtparams->setCDLConfigWordCSCDT(m_ps->getParameter<unsigned>("CDLConfigWordCSCDT"));
gmtparams->setCDLConfigWordbRPCCSC(m_ps->getParameter<unsigned>("CDLConfigWordbRPCCSC"));
gmtparams->setCDLConfigWordfRPCDT(m_ps->getParameter<unsigned>("CDLConfigWordfRPCDT"));
gmtparams->setVersionSortRankEtaQLUT(m_ps->getParameter<unsigned>("VersionSortRankEtaQLUT"));
gmtparams->setVersionLUTs(m_ps->getParameter<unsigned>("VersionLUTs"));
return gmtparams;
}
std::unique_ptr<L1MuGMTChannelMask> L1MuGMTParametersProducer::produceL1MuGMTChannelMask(
const L1MuGMTChannelMaskRcd& iRecord) {
auto gmtchanmask = std::make_unique<L1MuGMTChannelMask>();
gmtchanmask->setSubsystemMask(m_ps->getParameter<unsigned>("SubsystemMask"));
return gmtchanmask;
}
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import webbrowser
from . import System
class Workload(object):
"""
Base class for Android related workloads
"""
_packages = None
_availables = {}
def __init__(self, test_env):
"""
Initialized workloads available on the specified test environment
test_env: target test environmen
"""
self._te = test_env
self._target = test_env.target
self._log = logging.getLogger('Workload')
# Set of data reported in output of each run
self.trace_file = None
self.nrg_report = None
def _adb(self, cmd):
return 'adb -s {} {}'.format(self._target.adb_name, cmd)
@classmethod
def _subclasses(cls):
"""
Recursively get all subclasses
"""
nodes = cls.__subclasses__()
return nodes + [child for node in nodes for child in node._subclasses()]
@classmethod
def _check_availables(cls, test_env):
"""
List the supported android workloads which are available on the target
"""
_log = logging.getLogger('Workload')
# Getting the list of installed packages
cls._packages = test_env.target.list_packages()
_log.debug('Packages:\n%s', cls._packages)
_log.debug('Building list of available workloads...')
for sc in Workload._subclasses():
_log.debug('Checking workload [%s]...', sc.__name__)
if sc.package in cls._packages or sc.package == '':
cls._availables[sc.__name__.lower()] = sc
_log.info('Supported workloads available on target:')
_log.info(' %s', ', '.join(cls._availables.keys()))
@classmethod
def getInstance(cls, test_env, name):
"""
Get a reference to the specified Android workload
"""
# Initialize list of available workloads
if cls._packages is None:
cls._check_availables(test_env)
if name.lower() not in cls._availables:
msg = 'Workload [{}] not available on target'.format(name)
raise ValueError(msg)
return cls._availables[name.lower()](test_env)
def run(self, out_dir, collect='',
**kwargs):
raise RuntimeError('Not implemeted')
def tracingStart(self):
if 'ftrace' in self.collect and 'systrace' in self.collect:
msg = 'ftrace and systrace cannot be used at the same time'
raise ValueError(msg)
# Start FTrace
if 'ftrace' in self.collect:
self.trace_file = os.path.join(self.out_dir, 'trace.dat')
self._log.info('FTrace START')
self._te.ftrace.start()
# Start Systrace (mutually exclusive with ftrace)
elif 'systrace' in self.collect:
self.trace_file = os.path.join(self.out_dir, 'trace.html')
# Get the systrace time
match = re.search(r'systrace_([0-9]+)', self.collect)
self._trace_time = match.group(1) if match else None
self._log.info('Systrace START')
self._systrace_output = System.systrace_start(
self._te, self.trace_file, self._trace_time)
# Initialize energy meter results
if 'energy' in self.collect and self._te.emeter:
self._te.emeter.reset()
self._log.info('Energy meter STARTED')
def tracingStop(self):
# Collect energy meter results
if 'energy' in self.collect and self._te.emeter:
self.nrg_report = self._te.emeter.report(self.out_dir)
self._log.info('Energy meter STOPPED')
# Stop FTrace
if 'ftrace' in self.collect:
self._te.ftrace.stop()
self._log.info('FTrace STOP')
self._te.ftrace.get_trace(self.trace_file)
# Stop Systrace (mutually exclusive with ftrace)
elif 'systrace' in self.collect:
if not self._systrace_output:
self._log.warning('Systrace is not running!')
else:
self._log.info('Waiting systrace report [%s]...',
self.trace_file)
if self._trace_time is None:
# Systrace expects <enter>
self._systrace_output.sendline('')
self._systrace_output.wait()
# Dump a platform description
self._te.platform_dump(self.out_dir)
def traceShow(self):
"""
Open the collected trace using the most appropriate native viewer.
The native viewer depends on the specified trace format:
- ftrace: open using kernelshark
- systrace: open using a browser
In both cases the native viewer is assumed to be available in the host
machine.
"""
if 'ftrace' in self.collect:
os.popen("kernelshark {}".format(self.trace_file))
return
if 'systrace' in self.collect:
webbrowser.open(self.trace_file)
return
self._log.warning('No trace collected since last run')
# vim :set tabstop=4 shiftwidth=4 expandtab
|
Linking Cultural Capital With Subjective Well-Being and Social Support This study examines the ways in which different forms of cultural capital are associated with college students subjective well-being and social support. Results show that when social capital is accounted for, cultural capital derived from sports participation was positively associated with subjective well-being and social support. Further, the size and density of discussion networks about culture were positively associated with well-being and social support in general, while the heterogeneity of networks was negatively related. Findings from this study extend previous research on cultural capital by drawing attention to the inclusive aspect of cultural capital, examining online cultural participation as well as multiple forms of cultural activities including popular and sporting events, applying the literature on interpersonal discussion networks to the context of culture and demonstrating the value of communicative action about cultural experiences, and understanding the implications of cultural capital in a college setting. |
Sialidase in the guinea pig pulmonary parenchyma. Increased activity in the cytosolic and microsomal subcellular fractions after stimulation with Bacillus Calmette Gurin. The sialidase activity was assayed in the guinea pig pulmonary parenchyma after removal of bronchoalveolar cells by washing. After differential centrifugation of the crude tissue homogenate, sialidase activities were measured in the subcellular fractions using the fluorogenic substrate 2-(4-methylumbelliferyl)-alpha-D-N-acetylneuraminate. Sialidase activities were found in the lysosomal-enriched (17,000 x g pellet), in the microsomal (105,000 x g pellet) and in the cytosolic (105,000 x g supernatant) fractions. Microsomal and lysosomal forms of sialidase had an optimum activity at pH 3.6-3.8, whereas the optimum for the cytosolic form was pH 4.6. The activity of all three forms was inhibited by Cu2+, whereas 1 mM Zn2+ and 0.5 mM Ca2+ activated the lysosomal and the cytosolic forms, respectively. In the crude homogenate taken from lungs of Bacillus Calmette Gurin-(BCG-) stimulated guinea pigs, the sialidase activity was increased by 43% (p = 0.025) 3 weeks after the end of the treatment. The cytosolic (+246%) and microsomal (+51%) sialidase activities were significantly increased, whereas the lysosomal sialidase activity was not changed significantly by BCG stimulation. |
<gh_stars>0
package com.avasthi.roadcompanion.adapters;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CheckBox;
import android.widget.TextView;
import com.avasthi.android.apps.roadbuddy.backend.roadMeasurementApi.model.Amenity;
import com.avasthi.android.apps.roadbuddy.backend.roadMeasurementApi.model.Toll;
import com.avasthi.roadcompanion.R;
import com.avasthi.roadcompanion.activities.RCAmenityActivity;
import java.util.List;
/**
* Created by vavasthi on 26/11/15.
*/
public class RCAmenityListAdapter extends RecyclerView.Adapter<RCAmenityListAdapter.DataObjectHolder> {
RCAmenityActivity activity;
private final Amenity[] amenities;
private int position = -1;
public RCAmenityListAdapter(List<Amenity> dataset, RCAmenityActivity activity) {
this.amenities = dataset.toArray(new Amenity[dataset.size()]);
this.activity = activity;
}
@Override
public DataObjectHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View v = LayoutInflater.from(parent.getContext()).inflate(R.layout.amenity_item, parent, false);
DataObjectHolder holder = new DataObjectHolder(v);
return holder;
}
@Override
public void onBindViewHolder(DataObjectHolder holder, int position) {
holder.name.setText(amenities[position].getName());
holder.food.setChecked(amenities[position].getHasRestaurant());
holder.restroom.setChecked(amenities[position].getHasRestrooms());
holder.fuel.setChecked(amenities[position].getHasPetrolStation());
this.position = position;
// activity.updateActivityUI();
}
@Override
public int getItemCount() {
return amenities.length;
}
public int getPosition() {
return position;
}
public Amenity getSelectedAmenity() {
return amenities[position];
}
public static class DataObjectHolder extends RecyclerView.ViewHolder implements View.OnClickListener {
private final TextView name;
private final CheckBox food;
private final CheckBox fuel;
private final CheckBox restroom;
private int position;
public DataObjectHolder(View item) {
super(item);
name = (TextView) item.findViewById(R.id.name);
food = (CheckBox) item.findViewById(R.id.has_restaurant);
restroom = (CheckBox) item.findViewById(R.id.has_restroom);
fuel = (CheckBox) item.findViewById(R.id.has_fuel);
item.setOnClickListener(this);
}
@Override
public void onClick(View v) {
position = getAdapterPosition();
}
}
}
|
Evaluation of World Health OrganizationRecommended Hand Hygiene Formulations As a result of the coronavirus disease pandemic, commercial hand hygiene products have become scarce and World Health Organization (WHO) alcohol-based hand rub formulations containing ethanol or isopropanol are being produced for hospitals worldwide. Neither WHO formulation meets European Norm 12791, the basis for approval as a surgical hand preparation, nor satisfies European Norm 1500, the basis for approval as a hygienic hand rub. We evaluated the efficacy of modified formulations with alcohol concentrations in mass instead of volume percentage and glycerol concentrations of 0.5% instead of 1.45%. Both modified formulations met standard requirements for a 3-minute surgical hand preparation, the usual duration of surgical hand treatment in most hospitals in Europe. Contrary to the originally proposed WHO hand rub formulations, both modified formulations are appropriate for surgical hand preparation after 3 minutes when alcohol concentrations of 80% wt/wt ethanol or 75% wt/wt isopropanol along with reduced glycerol concentration (0.5%) are used. RESEARCH B ecause commercial products are hardly or no longer available due to the coronavirus disease (COVID-19) pandemic, alcohol-based hand rub formulations for hygienic and surgical hand treatment published by the World Health Organization (WHO) in 2009 for local production in developing countries are now being produced for use in hospitals worldwide. As shown previously, neither the formulation based on ethanol 80% vol/vol (WHO I) nor that based on isopropanol 75% vol/vol (WHO II), meets the efficacy requirements of the European Norm (EN) 12791, which must be fulfilled to obtain approval as a surgical hand preparation in Europe. Each WHO-recommended formulation is also insufficient for hygienic hand antisepsis when 3 mL is applied for 30 seconds according to the test method described in EN 1500. The requirements can be met only if the volume is doubled (6 mL) and exposure is extended to 60 seconds. But sufficient efficacy has been achieved by using modified WHO formulations with an increased alcohol concentration of 80% wt/wt ethanol or 75% wt/wt isopropanol at 3 mL for 30 seconds. On the basis of those results, we modified both WHO formulations by increasing their alcohol concentrations through changing their volume percentages into weight percentages and by prolonging the duration of application from 3 to 5 minutes. These modifications have been shown to render the immediate effects of both formulations noninferior to the reference of EN 12791, but this improvement was not observed for the so-called 3-hour effect (i.e., 3 hours after hand antisepsis). Because the high glycerol concentration (1.45% vol/vol) of the original formulations has been shown to exert a negative influence on the 3-hour efficacy of alcohols, we performed further studies by reducing the glycerol content of the WHO formulations by 50%. By increasing the alcohol concentration by ≈5% and reducing glycerol concentrations to 0.725%, both modified WHO formulations meet the efficacy requirements of EN 12791 when used for 5 minutes. Although both new formulations were successfully tested for a 5-minute application, our suggestions for improving efficacy were not accepted by the WHO because the common duration for surgical hand preparation in most hospitals is 3 minutes. Furthermore, no information on dermal As a result of the coronavirus disease pandemic, commercial hand hygiene products have become scarce and World Health Organization (WHO) alcohol-based hand rub formulations containing ethanol or isopropanol are being produced for hospitals worldwide. Neither WHO formulation meets European Norm 12791, the basis for approval as a surgical hand preparation, nor satisfies European Norm 1500, the basis for approval as a hygienic hand rub. We evaluated the efficacy of modified formulations with alcohol concentrations in mass instead of volume percentage and glycerol concentrations of 0.5% instead of 1.45%. Both modified formulations met standard requirements for a 3-minute surgical hand preparation, the usual duration of surgical hand treatment in most hospitals in Europe. Contrary to the originally proposed WHO hand rub formulations, both modified formulations are appropriate for surgical hand preparation after 3 minutes when alcohol concentrations of 80% wt/wt ethanol or 75% wt/wt isopropanol along with reduced glycerol concentration (0.5%) are used. tolerability and healthcare workers' acceptance of these modified formulations was available. In 2019, Menegueti et al showed that a modified WHO I formulation containing only 0.5% glycerol led to better ratings of skin tolerance than the original WHO formulation containing 1.45% or a modification containing 0.75% glycerol. Because all such alternative formulations require testing for not only dermal tolerability but also for bactericidal performance, we investigated the efficacy of these modified WHO formulations (mass instead of volume percentage ethanol or isopropanol and 0.5% instead of 1.45% glycerol) according to EN 12791, with an application duration of 3 minutes, as commonly used in surgical theaters in Europe. We recruited 24 volunteers from the Institute for Hygiene and Applied Immunology, Medical University of Vienna (Vienna, Austria), to participate in the study. Exclusion criteria were age <18 years and skin breaks on hands (e.g., cuts, abrasions or other skin disorders). Nails were short and clean and volunteers agreed to not use any antibacterial soap or other antibacterial substance during the trial, starting from 1 week before testing. Volunteers were also asked to not use any hand rub or hand cream on trial days. All volunteers provided written informed consent. The study protocol was approved by the institutional ethics committee of the Medical University of Vienna (ethical vote no. 2092/2019). Culture media were as described in EN 12791. For sampling and dilution fluids, we used tryptic soy broth (CASO broth; Merck). For counting plates, we used tryptic soy agar (CASO agar; Merck). Neutralizing agents were not necessary for any of the tested modified WHO formulations because even dilution in pure broth without supplement in previous validation tests has been shown to neutralize any antimicrobial effect. We compared the efficacy of the modified WHO formulations with that of the standardized reference surgical hand treatment described in EN 12791. We used a Latin-square design with 3 groups, each with 8 randomly allocated volunteers, and as many experimental runs as there were formulations, including the reference. In every run, we tested all hand treatment procedures concurrently. At the end of the third test run, every volunteer had used each formulation once. We spaced test runs apart by 1 week to allow reversion of normal skin flora. We used the test method described in EN 12791. In brief, after a preparatory hand wash for 1 minute with 5 mL of 20% nonmedicated soap applied onto wet hands to remove transient bacterial flora and any other soil, participants rinsed their hands under running tap water and dried them with soft paper towels. Pretreatment values were established by rubbing and kneading the fingertips, including the thumbs, of both hands for 1 minute at the base of a petri dish (diameter 9 cm) containing 10 mL of sampling fluid, one for each hand. Subsequently, surgical hand antisepsis was performed according to the standardized hand rub procedure of EN 12791 by applying and rubbing as many 3-mL portions of the study formulations (i.e., WHO I modified, WHO II modified, or reference) onto both hands up to the wrists as necessary to keep the hands wet for 3 minutes. According to EN 12791, the efficacy of a preoperative hand procedure is determined immediately and 3 hours after hand antisepsis. Thus, to assess the posttreatment values of a formulation, we sampled one randomly selected hand as described for the pretreatment values immediately after hand antisepsis (immediate effect). The other hand was gloved and sampled 3 hours later to assess the 3-hour effect. We performed quantitative surface cultures from all sampling fluids and dilutions on tryptic soy agar, incubated counting plates for a total of 48 hours at 36°C ± 1°, and counted colony-forming units. For statistical analyses, we expressed all colony counts per mL of sampling fluid as decadic logarithms.. From the intra-individual differences between log 10 pretreatment minus log 10 posttreatment values, we calculated individual log 10 reduction factors separately for immediate and 3-hour effects. We tested pretreatment values of study formulations and the reference formulation for significant differences by means of the Friedman analysis of variance with an agreed significance level of p = 0.05. Subsequently, we tested the differences between the log 10 reduction factors from each study formulation and the appropriate values of the reference for significance by a nonparametric noninferiority test according to Hodges-Lehmann. We rejected inferiority of a study formulation and assumed noninferiority if the Hodges-Lehmann upper 97.5% confidence limits for the differences in log 10 bacterial reductions between study formulations and reference treatment were smaller than the agreed inferiority margin of 0.75 log 10 (immediate effect) or 0.85 log 10 (3-hour effect). We set the level of significance at p = 0.025 (1-sided). Furthermore, we used the Wilcoxon matched-pairs, signed-ranks test to test for a suspected sustained effect at p = 0.01 (1-sided) ifas concluded from a higher mean log 10 reduction-a study formulation was suspected to be more efficacious than the reference antisepsis procedure 3 hours after antisepsis. Results We observed no significant differences between the means of the log 10 pretreatment bacterial counts for the immediate and 3-hour efficacy tests. Hence, the baseline for each study formulation can be considered equivalent. Overall, immediate effects were comparable to that of the reference alcohol of EN 12791; typical magnitude was mean log 10 reductions of >2.00 (Table). Each modified formulation was even more effective than the reference alcohol immediately after hand antisepsis. Each modified formulation also met the 3-hour efficacy requirements of EN 12791. The mean log 10 bacterial reduction of the formulation based on isopropanol was greater by 0.15 log 10 than that of the reference alcohol, but this difference was not significant (p = 0.01 by Wilcoxon matched-pairs signedranks test), so sustained efficacy cannot be confirmed. Discussion The COVID-19 pandemic has led to scarcity of commercial hand antisepsis agents, and healthcare institutions worldwide are seeking alternatives. Since the end of February 2020, pharmacies in Europe have been producing the WHO-recommended formulations either for sale or as donations for personal use by the general population or use in healthcare settings. Use of hygienic hand preparations made with the original WHO-recommended formulations might be justifiable to prevent infection or transmission of pathogens outside patient care. However, to be approved in Europe, preparations for hygienic hand antisepsis used in healthcare facilities must meet the bactericidal efficacy requirements of EN 1500 under practical use conditions. Both WHO-recommended formulations failed the EN 1500 requirements with use of 3 mL for 30 seconds, the common duration of application in hospitals in Europe. Sufficient bactericidal efficacy could be achieved with the original WHO-recommended formulation with 6 mL in 60 seconds or with 3 mL in 30 seconds when modified formulations with increased alcohol concentrations of 80% wt/wt ethanol or 75% wt/wt isopropanol were used. In general, a shortening of the necessary exposure time may help medical personnel comply with hand hygiene standards. A recent study (A. Kratzel et al., unpub. data, https://www.biorxiv.org/conte nt/10.1101/2020.03.10.986711v1) showed that severe acute respiratory coronavirus 2 can be inactivated within 30 seconds by both WHO-recommended formulations but also by modifications as proposed by us in 2013 or used by Allegranzi et al. in a before-after intervention cohort study. The use of WHO-recommended formulations in hospitals, including for surgical hand preparation, is paramount despite the lack of commercial agents. In Europe, before a product is allowed to be used for surgical hand preparation, its efficacy must be evaluated in the laboratory on the hands of volunteers according to EN 12791, the most stringent available in vivo test method for efficacy testing. This testing ensures that results are generated under controlled conditions but also under as near as possible practical in vivo conditions. The bacterial reduction is measured directly after hand antisepsis on one hand (immediate effect) and after 3 hours on the other (gloved) hand (3-hour effect). According to the requirements of the norm, a formulation shall not be significantly less efficacious than a reference procedure at both times (i.e., immediately and 3 hours after application). The 2009 WHO guideline reported that WHO I did not pass EN 12791 under 2 laboratory testing conditions and WHO II under 1 of 2 laboratory testing conditions. Even prolonging the duration of application to 5 minutes, the longest duration allowed by EN 12791, did not achieve a favorable outcome for the original WHO formulations. Increasing the alcohol concentration of both formulations by ≈5% (by changing to weight percentage concentrations) rendered the immediate effect of the 2 formulations noninferior to the reference; unfortunately, the 3-hour effect was still significantly less effective than the reference alcohol. The reason for these results was attributed to the high concentration of glycerol (1.45% vol/vol). Although the 3-hour effects of each formulation with reduced glycerol content (0.725%) were rendered noninferior to the reference, glycerol-free preparations were even more effective than reference EN 12791. We have been able to show how the WHO formulations can be improved to meet the European standards; however, our proposals have not yet been endorsed. One of the arguments given was the lack of data on acceptance and tolerability for the modified formulations. Another argument was the necessary application duration of 5 minutes for surgical hand preparation, which does not correspond with common practice. Frequent use of alcohol-based hand rubs can cause skin dryness unless emollients or humectants such as glycerol are added to the formulation. A recent study evaluated the skin tolerability of healthcare workers to the original WHO formulation containing 1.45% glycerol against 3 other concentrations (0%, 0.5%, and 0.75%) of glycerol in a tropical climate healthcare setting. Dermal application of glycerol, a trihydroxy alcohol, increases the endogenous delivery of glycerol with improvement of stratum corneum hydration, skin barrier function, and mechanical properties. It also inhibits stratum corneum lipid phase transition, protection against irritating stimuli, and enhancement of desmosomal degradation. A modified WHO formulation containing only 0.5% glycerol leads to better ratings of skin tolerance than the original formulation and may therefore offer the best balance between skin tolerance and antimicrobial efficacy. In addition, it is useful to have the same alcohol-based hand rub formulation in the surgical setting and in other medical settings, especially if products are scarce. Because glycerol availability is also critical during the current pandemic, lowering the glycerol concentration might improve availability of these alcohol-based formulations in areas with limited supplies, such as developing countries. In this study, we were able to show, once again, that the effect on the resident skin flora of the original WHO formulations can be improved if the concentration of the alcohols is increased by using weight instead of volume percentage. In addition, by further reducing the glycerol content from 1.45% to 0.725% or to 0.50%, the 3-hour effects of each formulation can be improved to such an extent that the requirement of the European test standard is already ensured after 3 minutes of application. Although the criteria for use as a product regulated by the US Food and Drug Administration differ from the EN requirements, these results could also be of interest to US healthcare providers. Reductions achieved with the modified formulations were >1 log 10 step higher than those achieved with the original WHO-recommended formulations when applied for 3 minutes for both immediate and 3-hour effects (Figure). On the basis of these results and considering the current situation, we believe that the original WHO formulations should be urgently reconsidered. We therefore recommend a modification of the WHO I formulation with 80% wt/wt ethanol, 0.125% vol/vol hydrogen peroxide, and 0.50% vol/vol glycerol and a modification of the WHO II formulation with 75% wt/wt isopropanol, 0.125% vol/vol hydrogen peroxide, and 0.50% vol/vol glycerol. |
def stop(self):
if isinstance(self.thread, threading.Thread):
if self.thread.is_alive():
with self.state.lock:
if not self.state.should_stop():
self.logger.debug("Sending reaper State.SHUTTINGDOWN signal")
self.state.set(State.SHUTTINGDOWN)
if self.state.has_value(State.SHUTTINGDOWN):
self.state.wait_while(State.SHUTTINGDOWN)
self.logger.info(f"Reaper shutdown : {self.state.get()}.")
else:
self.logger.debug(f"Reaper tried to stop but is stopped already: {self.state.get()}.")
else:
self.logger.debug("Reaper tried to stop but was never started") |
from rest_framework.test import APITestCase
from django.urls import reverse
from books.models import Book
class BooksGETNoFiltersTestCase(APITestCase):
def test_get_book_empty_list(self):
response = self.client.get(reverse('book-api'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_get_single_book_in_json(self):
Book.objects.create(title='Test')
response = self.client.get(reverse('book-api'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertIn("('title', 'Test')", str(response.data))
def test_get_many_books_in_json(self):
for nr in range(1, 100):
Book.objects.create(title=f'Test_{nr}')
response = self.client.get(reverse('book-api'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 10)
self.assertIn("('title', 'Test_1')", str(response.data))
self.assertNotIn("('title', 'Test_99')", str(response.data))
self.assertIsNotNone(Book.objects.filter(title='Test_99').first())
class BooksGETQueryFiltersTestCase(APITestCase):
authors = ['Tolkien', 'Sapkowski', 'Żeromski', 'Tokarczuk', '<NAME>']
titles = ['The Hobbit', 'Alice in the Wonderland']
pub_dates = ['2020-12-31', '1987-06-05', '2020-01-01', '1987-12-12', '2022-01-10']
def setUp(self):
for nr in range(5):
Book.objects.create(title=self.titles[0], author=self.authors[nr], language='en',
pub_date=self.pub_dates[nr])
for nr in range(2):
Book.objects.create(title=self.titles[1], author=self.authors[nr], language='pl',
pub_date=self.pub_dates[nr])
Book.objects.create(title=self.titles[1], author=self.authors[2], language='en')
def test_filtering_by_author(self):
response = self.client.get(reverse('book-api') + f'?inauthor={self.authors[0]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
response = self.client.get(reverse('book-api') + f'?inauthor={self.authors[4]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
response = self.client.get(reverse('book-api') + '?inauthor=Author not in sb')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_filtering_by_title(self):
response = self.client.get(reverse('book-api') + f'?intitle={self.titles[0]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 5)
response = self.client.get(reverse('book-api') + f'?intitle={self.titles[1]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
response = self.client.get(reverse('book-api') + '?intitle=Title not in db')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_filtering_by_language_tag(self):
response = self.client.get(reverse('book-api') + '?language=en')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 6)
response = self.client.get(reverse('book-api') + '?language=pl')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
response = self.client.get(reverse('book-api') + '?language=wrong')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_filtering_by_language_name(self):
response = self.client.get(reverse('book-api') + '?language=english')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 6)
response = self.client.get(reverse('book-api') + '?language=polish')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
response = self.client.get(reverse('book-api') + '?language=wrong')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_filtering_by_title_and_language(self):
response = self.client.get(reverse('book-api') + f'?language=pl&intitle={self.titles[1]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
response = self.client.get(reverse('book-api') + f'?language=en&intitle={self.titles[1]}')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_filtering_by_date_from(self):
response = self.client.get(reverse('book-api') + f'?date-from=2000-01-01')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 4)
def test_filtering_by_date_to(self):
response = self.client.get(reverse('book-api') + f'?date-to=2000-01-01')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
def test_filtering_by_date_from_and_date_to(self):
response = self.client.get(reverse('book-api') + f'?date-from=2000-01-01&date-to=2020-12-31')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
|
<gh_stars>10-100
#ifndef GATHVL_VEC_ANIMATOR_H
#define GATHVL_VEC_ANIMATOR_H
#include "animator.h"
#include "../types/vec.h"
struct vec_animator : animator {
vec *vec_ptr;
vec start_vec, end_vec;
void update(const int frame) override;
vec_animator(int start, int end, vec *vec_ptr, vec startvec, vec endvec) :
animator(start, end), vec_ptr(vec_ptr), start_vec(startvec),
end_vec(endvec) {}
};
#endif //GATHVL_VEC_ANIMATOR_H
|
<filename>src/ClassExercises/arrays/sumOneDimensionalArray.java
package ClassExercises.arrays;
public class sumOneDimensionalArray {
public static void main(String[] args) {
System.out.println(arraySum(new int[] {1, 2, 5}));
}
static int arraySum(int[] arr) {
int sum = arr[0];
for (int i = 1; i < arr.length; i++) {
sum += arr[i];
}
return sum;
}
}
|
// collect data from input elements
// validate and create proper message
func (d *CollectorDef) collect() {
name := d.InputName.getValue()
val := d.InputValue.getValue()
if validate(name) && validate(val) {
notifications.Dispatch(DataCollected{Item: Item{Name: name, Value: val}})
} else {
notifications.Dispatch(ErrorValidation{Msg: "Input error"})
}
} |
Cambridge United suffered a disappointing 2-0 defeat at Accrington last night to put a dent in their Sky Bet League Two play-off bid.
Stanley skipper Sean McConville opened the scoring in the first half and Shay McCartan doubled their advantage after the break.
But United had been dealt a blow in the warm-up when Scott Wharton was forced to withdraw with a hamstring niggle, with Mark Roberts stepping into the breach at short notice.
It meant United had just five substitutes on the bench as they were already one short after Adam McGurk, who had only made his comeback from five months out with a thigh injury in last Saturday's goalless draw at home to Stevenage, was ruled out with illness.
However, here is a gallery featuring a selection of the best pictures of the action at the Wham Stadium. |
<gh_stars>1-10
declare function capitalize(str: string): string;
export = capitalize;
|
<filename>src/tetris/tetris_game.py
import time
import curses
import math
import random
from collections import deque
from offsets import *
TIME_PER_TICK = 0
TIME_BETWEEN_ROUNDS = 0.5
N_ROLLING_AVG = 500
ROTATE_LEFT = 0
ROTATE_RIGHT = 1
MOVE_RIGHT = 2
MOVE_LEFT = 3
MOVE_DOWN = 4
DO_NOTHING = 5
BOARD_HEIGHT = 12
BOARD_WIDTH = 6
MOVES_MAP = [ lambda x:x.rotate_left(),
lambda x:x.rotate_right(),
lambda x:x.move_right(),
lambda x:x.move_left(),
lambda x:x.move_down(),
lambda x:x ]
POSSIBLE_MOVE_NAMES = [
"ROTATE_LEFT",
"ROTATE_RIGHT",
"MOVE_RIGHT",
"MOVE_LEFT",
"MOVE_DOWN",
"DO_NOTHING" ]
POSSIBLE_MOVES = np.array([
ROTATE_LEFT,
ROTATE_RIGHT,
MOVE_RIGHT,
MOVE_LEFT,
MOVE_DOWN,
DO_NOTHING ],np.int8)
class Tetromino:
def __init__(self, offsets, x, y, rotation_index):
self.offsets = offsets
self.x = x
self.y = y
self.rotation_index = rotation_index
def rotate_left(self):
return Tetromino(self.offsets, self.x, self.y, (self.rotation_index - 1) % 4)
def rotate_right(self):
return Tetromino(self.offsets, self.x, self.y, (self.rotation_index + 1) % 4)
def move_left(self):
return Tetromino(self.offsets, self.x - 1, self.y, self.rotation_index)
def move_right(self):
return Tetromino(self.offsets, self.x + 1, self.y, self.rotation_index)
def move_down(self):
return Tetromino(self.offsets, self.x, self.y - 1, self.rotation_index)
def occupied_squares(self, x=None, y=None, rotation_index=None):
x = self.x if x is None else x
y = self.y if y is None else y
rotation_index = self.rotation_index if rotation_index is None else rotation_index
squares = []
for index_y, row in enumerate(self.offsets[rotation_index]):
for index_x, occupied in enumerate(row):
if occupied:
# hacky math to figure out relative positions from the offsets
# offsets are centered on 1,1 in the 4x4 grid definition
squares.append([(3 - index_x) + x - 1, (3 - index_y) + y - 1, occupied])
return squares
class Board:
def __init__(self, width=BOARD_WIDTH, height=BOARD_HEIGHT):
self.height = height
self.width = width
self.starting_x = math.floor(width / 2) - 1
self.starting_y = height - 2
self.board_array = np.array([[0 for x in range(0, self.width)] for y in range(0, self.height)], np.int8)
self.tetronimo = None
self.current_height = 0
def __clear_line__(self, y):
self.board_array = np.concatenate((
np.delete(self.board_array, (y), axis=0),
[[0 for x in range(0,self.width)]]),
axis=0)
def __freeze_tetronimo__(self):
n_cleared_rows = 0
for x, y, v in self.tetronimo.occupied_squares():
self.current_height = max(self.current_height, y + 1) # 0 based
if not self.is_out(x, y):
self.board_array[y][x] = 1
if all(self.board_array[y]):
self.__clear_line__(y)
n_cleared_rows += 1
return n_cleared_rows
def is_occupied(self, x, y):
return self.board_array[y][x] != 0
def is_out(self, x, y):
return 0 > y or y >= self.height or x >= self.width or 0 > x
def can_place_piece(self, tetronimo):
for s_x, s_y, _ in tetronimo.occupied_squares():
if self.is_out(s_x, s_y) or self.is_occupied(s_x, s_y):
return False
return True
def tetronimo_settled(self):
return self.tetronimo and not self.can_place_piece(self.tetronimo.move_down())
def start_tetronimo(self, tetronimo):
tetronimo.x = self.starting_x
tetronimo.y = self.starting_y
self.tetronimo = tetronimo
def set_tetronimo(self, tetronimo):
self.tetronimo = tetronimo
def tick(self):
old_height = self.current_height
points = 0
n_cleared_rows = 0
tetronimo_frozen = False
if self.tetronimo is not None:
if self.tetronimo_settled():
n_cleared_rows = self.__freeze_tetronimo__()
if n_cleared_rows > 0:
points = [0, 20, 50, 125, 300][n_cleared_rows]
points += 1
tetronimo_frozen = True
return points, n_cleared_rows, tetronimo_frozen
@classmethod
def copy_state(cls, board, tetronimo):
copy = np.array(board.board_array, copy=True, ndmin=3)
if tetronimo is not None:
for x, y, value in tetronimo.occupied_squares():
copy[0][y][x] = -1
return copy
class Tetris:
def __init__(self, agent):
self.agent = agent
self.reset_tetronimos()
def play_visually(self):
curses.wrapper(self.play)
def tick(self, board):
next_tetronimo = board.tetronimo.move_down()
if board.can_place_piece(next_tetronimo):
board.set_tetronimo(next_tetronimo)
new_reward, lines_cleared, tetronimo_frozen = board.tick()
self.n_cleared += lines_cleared
return new_reward, tetronimo_frozen
def play(self, screen=None):
print('Begin playing!')
if screen is not None:
self.init_colors()
running_scores = deque([], N_ROLLING_AVG)
n_total_moves = 0
n_games = 0
total_start_time = time.time()
print('output: n_game, avg_score, avg_q_value, n_lines, loss, accuracy, training_runs, n_pieces, n_total_moves, time')
while self.agent.should_continue():
board = Board()
continue_game = True
self.reset_tetronimos()
board.start_tetronimo(self.generate_tetronimo(board))
game_start = time.time()
ticks = 0
reward = 0
self.n_cleared = 0
n_pieces = 0
while continue_game:
n_pieces += 1
continue_game, episode_reward, episode_length = self.play_episode(board)
reward += episode_reward
n_total_moves += episode_length
running_scores.append(reward)
n_games += 1
self.agent.n_games = n_games
game_size = self.agent.current_game_length
self.agent.game_over(reward)
if screen is not None:
print_game_over(board, board.tetronimo, reward, screen)
else:
avg = (sum(running_scores)/float(len(running_scores)))
avg_q_value = 0
avg_loss = 0
avg_accuracy = 0
if hasattr(self.agent, 'recent_q_values') and len(self.agent.recent_q_values) > 0:
avg_q_value = sum(self.agent.recent_q_values) / float(len(self.agent.recent_q_values))
if hasattr(self.agent, 'recent_losses') and len(self.agent.recent_losses) > 0:
avg_loss = self.agent.recent_losses[-1]
avg_accuracy = self.agent.recent_accuracies[-1]
print('output: {}, {}, {}, {}, {}, {}, {}, {}, {}, {}'.format(n_games, reward, avg_q_value, self.n_cleared, avg_loss, avg_accuracy, 0, n_pieces, n_total_moves, time.time() - total_start_time))
def play_episode(self, board):
episode_reward = 0
episode_length = 0
plays_since_tick_counter = 0
tetronimo_frozen = False
while not tetronimo_frozen:
state_t0 = Board.copy_state(board, board.tetronimo)
action = self.agent.choose_action(board)
plays_since_tick_counter += 1
episode_length += 1
if action == MOVE_DOWN:
while not board.tetronimo_settled():
board.set_tetronimo(board.tetronimo.move_down())
r, tetronimo_frozen = self.tick(board)
episode_reward += r
else:
next_tetronimo = MOVES_MAP[action](board.tetronimo)
if board.can_place_piece(next_tetronimo):
board.set_tetronimo(next_tetronimo)
if plays_since_tick_counter >= 6:
r, tetronimo_frozen = self.tick(board)
episode_reward += r
self.agent.on_move_end()
if tetronimo_frozen:
state_t1 = Board.copy_state(board, board.tetronimo)
self.agent.on_episode_end(episode_reward, episode_length)
next_tetronimo = self.generate_tetronimo(board)
if board.can_place_piece(next_tetronimo):
self.agent.handle(state_t0, action, episode_reward, state_t1)
board.start_tetronimo(next_tetronimo)
return True, episode_reward, episode_length
else:
self.agent.handle(state_t0, action, -20, state_t1)
episode_reward += -20
return False, episode_reward, episode_length
else:
state_t1 = Board.copy_state(board, board.tetronimo)
self.agent.handle(state_t0, action, 0, state_t1)
def reset_tetronimos(self):
# self.tetronimos = [T, L, J, O, I, S, Z, T, L, J, O, I, S, Z] # Official rules
# random.shuffle(self.tetronimos)
self.tetronimos = [O] * 50 # Simple test
def generate_tetronimo(self, board):
if len(self.tetronimos) == 0:
self.reset_tetronimos()
return Tetromino(self.tetronimos.pop(), board.starting_x, board.starting_y, 0)
def init_colors(self):
curses.start_color()
curses.use_default_colors()
colors = [ curses.COLOR_BLUE,
curses.COLOR_CYAN,
curses.COLOR_GREEN,
curses.COLOR_MAGENTA,
curses.COLOR_RED,
curses.COLOR_WHITE,
curses.COLOR_YELLOW ]
curses.init_pair(0, curses.COLOR_WHITE, curses.COLOR_BLACK)
for i, c in enumerate(colors):
curses.init_pair(i + 1, c, curses.COLOR_BLACK)
def print_game_over(board, tetronimo, reward, screen):
resting_state = Board.copy_state(board, tetronimo)
tetris_print(resting_state, reward, screen)
screen.addstr(board.height + 7, 0, 'GAME OVER!')
screen.refresh()
time.sleep(TIME_BETWEEN_ROUNDS)
def tetris_print(array, reward, screen):
curses.noecho()
curses.curs_set(0)
screen.erase()
for y, row in reversed(list(enumerate(array[0]))):
for x, value in enumerate(row):
character = "\u2588" if value else "."
color = curses.color_pair(value)
screen.addch(len(array[0]) - y, 3*x, character, color)
screen.addch(len(array[0]) - y, 3*x + 1, character, color)
screen.addstr(len(array[0]) + 5, 0, 'Reward: {}'.format(reward))
screen.refresh()
|
<reponame>Copypeng/alibabacloud-httpdns-ios-sdk
//
// HttpdnsScheduleCenterRequest.h
// AlicloudHttpDNS
//
// Created by ElonChan(地风) on 2017/4/11.
// Copyright © 2017年 <EMAIL>. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface HttpdnsScheduleCenterRequest : NSObject
- (NSDictionary *)queryScheduleCenterRecordFromServerSync;
@end
|
Website Review: Islamist websites Before discussing Islamist websites, the very term Islamist needs qualification. In general, it refers to modern political movements in which Islam plays a central ideological role. However, the term often holds a pejorative connotation. It implies that Islamist movements are, in fact, distinct from Islam itself, that they superficially invoke Islam for political ends, without any real understanding of, or commitment to, the religion. With this usage, the term Islamist drips from a variety of tongues and pens. It appears in Islamophobic polemics as well as intersectarian Muslim debates. Still, the term remains useful for a more neutral designation of Muslim groups and individuals that directly engage the issue of politics. The Islamist internet embodies many traditional characteristics of online media. It displays a geographic fluidity that bypasses national borders and controls. Nonetheless, it remains bound by the imbalances of the global information infrastructure. Its relatively low costs of entry and production allow a diversity of voices. Yet, powerful institutions and governments offer innovative attempts to restrict content and monopolize discourse. Islamist websites tend to fit within identifiable clusters united by a common political focus, even though the specific content, location, and format may differ. The following list of clusters identifies major themes and notable examples of the Islamist internet. |
<filename>JAVA/Projects/la fac/src/pack1/A2.java
package pack1;
public class A2 extends ClassA{
public A2(int x){
super();
a=x+1;
b=x;
c=x*2;
}
}
|
Role of the FliA-FlgM regulatory system on the transcriptional control of the flagellar regulon and flagellar formation in Salmonella typhimurium In the flagellar regulon of Salmonella typhimurium, the flagellar operons are divided into three classes, 1, 2, and 3, with respect to transcriptional hierarchy. The class 2 operons are controlled positively by the class 1 genes, flhD and flhC. The class 3 operons are controlled positively by fliA and negatively by flgM. It has been shown that FliA is a sigma factor specific for class 3, whereas FlgM is an anti-sigma factor which binds FliA to prevent its association with RNA polymerase core enzyme. Therefore, the FliA-FlgM regulatory system has been believed to control specifically the class 3 operons. In the present study, we showed that the flgM mutation enhanced the expression of class 2 by more than fivefold. When a fliA mutation was present simultaneously, this enhancement was not observed. These results indicate that the FliA-FlgM regulatory system is involved not only in the expression of class 3 but also in that of class 2. However, though neither flhD nor flhC mutants could express the class 2 operons, the fliA mutants permitted the basal-level expression of those operons. Therefore, FlhD and FlhC are indispensable for the expression of class 2, whereas FliA is required only for its enhancement in the FlgM-depletion condition. Furthermore, we showed that the flgM mutation resulted in a two- to threefold increase in flagellar number. On the basis of these results, we propose that the relative concentration of FliA and FlgM may play an important role in the determination of flagellar numbers produced by a single cell. |
//! The "Lane Table". In the paper, this is depicted like so:
//!
//! ```
//! +-------+----+-----+----+------------+
//! + State | C1 | ... | Cn | Successors |
//! +-------+----+-----+----+------------+
//! ```
//!
//! where each row summarizes some state that potentially contributes
//! lookahead to the conflict. The columns `Ci` represent each of the
//! conflicts we are trying to disentangle; their values are each
//! `TokenSet` indicating the lookahead contributing by this state.
//! The Successors is a vector of further successors. For simplicity
//! though we store this using maps, at least for now.
use collections::{Map, Multimap, Set};
use grammar::repr::*;
use lr1::core::*;
use lr1::lookahead::*;
use std::default::Default;
use std::fmt::{Debug, Error, Formatter};
use std::iter;
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct ConflictIndex {
index: usize,
}
impl ConflictIndex {
pub fn new(index: usize) -> ConflictIndex {
ConflictIndex { index: index }
}
}
pub struct LaneTable<'grammar> {
_grammar: &'grammar Grammar,
conflicts: usize,
lookaheads: Map<(StateIndex, ConflictIndex), TokenSet>,
successors: Multimap<StateIndex, Set<StateIndex>>,
}
impl<'grammar> LaneTable<'grammar> {
pub fn new(grammar: &'grammar Grammar, conflicts: usize) -> LaneTable {
LaneTable {
_grammar: grammar,
conflicts: conflicts,
lookaheads: Map::default(),
successors: Multimap::default(),
}
}
pub fn add_lookahead(&mut self,
state: StateIndex,
conflict: ConflictIndex,
tokens: &TokenSet) {
self.lookaheads
.entry((state, conflict))
.or_insert_with(|| TokenSet::new())
.union_with(&tokens);
}
pub fn add_successor(&mut self, state: StateIndex, succ: StateIndex) {
self.successors.push(state, succ);
}
}
impl<'grammar> Debug for LaneTable<'grammar> {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
let indices: Set<StateIndex> = self.lookaheads
.keys()
.map(|&(state, _)| state)
.chain(self.successors
.iter()
.map(|(key, _)| key.clone()))
.collect();
let header = iter::once(format!("State"))
.chain((0..self.conflicts).map(|i| format!("C{}", i)))
.chain(Some(format!("Successors")))
.collect();
let rows = indices.iter().map(|&index| {
iter::once(format!("{:?}", index))
.chain((0..self.conflicts).map(|i| {
self.lookaheads
.get(&(index, ConflictIndex::new(i)))
.map(|token_set| format!("{:?}", token_set))
.unwrap_or(String::new())
}))
.chain(Some(self.successors
.get(&index)
.map(|c| format!("{:?}", c))
.unwrap_or(String::new())))
.collect()
});
let table: Vec<Vec<_>> = iter::once(header).chain(rows).collect();
let columns = 2 + self.conflicts;
let widths: Vec<_> = (0..columns)
.map(|c| {
// find the max width of any row at this column
table.iter()
.map(|r| r[c].len())
.max()
.unwrap()
})
.collect();
for row in &table {
try!(write!(fmt, "| "));
for (i, column) in row.iter().enumerate() {
if i > 0 {
try!(write!(fmt, " | "));
}
try!(write!(fmt, "{0:1$}", column, widths[i]));
}
try!(write!(fmt, " |\n"));
}
Ok(())
}
}
|
CPS Parents, Students Fear School Closings Will Bulldoze Their Community
By Samantha Abernethy in News on Apr 5, 2013 10:20PM
Martin Gonzalez
On Thursday I accompanied lawmakers and members of the press on a bus tour led by the Chicago Teachers Union through parts of Englewood and North Lawndale. The bus made its first stop at Mahalia Jackson Elementary School, due to be closed this year, and they showed students will walk past a charter school and across the railroad tracks to get to their new school in the fall. The ended with a mile-long stroll past the many abandoned buildings in North Lawndale and Garfield Park.
The tour was designed to confirm adults' fears of what children will see when they walk to their new school. It also reinforced fears of what children see now when they walk to their current school. We don't normally get too personal in expressing our views on Chicagoist, but after Thursday's tour, I've decided to publish the following essay, which I wrote and performed at the Paper Machete live magazine at the Green Mill on Saturday, March 30.
On March 21, Chicago Public Schools announced 54 elementary and middle schools would be closed and 61 buildings would be left vacant. Plenty of schools have been closed before, but this is an exceptionally large number unparalleled by any school district. The plan affects 30,000 students, not to mention countless teachers, principals and parents.
The announcement wasn't a surprise after months of speculation and dozens of community meetings. That didn't make it any less infuriating for those affected, and on Wednesday a sizable group of protesters gathered in Daley Plaza.
Now Chicago police downplayed it, saying about 700 to 900 people protested school closings in Daley Plaza on Wednesday. And of course the Chicago Teachers Union played it up saying about 5,000 to 6,000 people were protesting. That's quite a difference.
Protesters held signs calling Emanuel racist or Mayor One Percent, or even simply saying, “Rahm Sucks.”
The mayor's perceived indifference to public opinion wasn't helped by the fact he was out of town on vacation when the school closings list was announced... and then he waited days to defend the decision.
Schools close all the time in Chicago and nationwide, and it's not a popular decision. When Michelle Rhee served as DC Public Schools Chancellor, she announced a plan to shut down 23 schools in 2008. In the 2010 education documentary “Waiting for Superman” she said, “If you want to quickly become the most unpopular person in a city, you just tell someone you're going to close down *A* school, much less twenty-three.”
CPS and the mayor's office say the move is necessary to break children out of underperforming schools and to consolidate underutilized facilities to cut costs. In the negotiations Chicago Public Schools chief Barbara Byrd-Bennett promised schools would only be closed if the children could be sent to better schools. Well, a report by the Sun-Times showed that one-third of the schools closing will send students to schools with similar rankings. At least eight schools are sending their kids to schools with lower test scores.
The concern isn't just based on education, though. After 506 murders in 2012, the city is struggling to rein in violence. Closing schools disrupts children's educations, but also their routines. Something as simple as crossing another street can put a child in a dangerous situation in a different turf. A shakeup in the schools can coincide with a shakeup in gang violence.
In 2008, there were 513 murders in Chicago, a significant uptick of 65 more than the year before. As a graduate student at Northwestern in 2009, I interviewed Wesley Skogan from the Institute for Policy Research. He said, “Gang homicide is to a certain extent caused by the disruptions of the [drug] markets and in the stable gang relationships caused by reconstituting schools, knocking down CHA high rises, and [police] attacking street corner drug markets.”
Those reconstituted schools were part of Mayor Daley's mid-2000s renaissance initiative that merged several troubled high schools into a few really troubled high schools.
NPR's This American Life recently examined the effect of violence on Chicago's Harper High School in West Englewood. Linda Lutton reports that the area around Harper was once almost solely controlled by the Gangster Disciples. Gangs don't operate that way anymore, in part because “Chicago police have been so effective at locking up the big gang leaders that the hierarchy has crumbled.”
Now there are more than 15 gang factions in Harper's attendance area. Lutton says the gangs are determined by geography and kids aren't joining a gang, so much as they're assigned to one.
Lutton asked, “What if I'm a kid and I really don't want any part of this gang stuff? How can I avoid it?” A police officer responded, “It's not gonna happen.”
Last week Cook County Board President Toni Preckwinkle told Mick Dumke at the Chicago Reader, "You know, schools are community anchors. They're social centers. They're part of a community's identity. And often kids go half a dozen blocks and they're in different gang territory.”
Director Davis Guggenheim said in “Waiting for Superman” that “For generations, experts tended to blame failing schools on failing neighborhoods. But reformers have begun to believe the opposite: that the problems of failing neighborhoods might be blamed on failing schools.”
In her interview with the Chicago Reader, County Board President Preckwinkle also wondered why prisons are packed and schools are closing. Why, instead of investing in education, money pours into the prison system.
Harvard professor Deborah Prothrow-Stith approaches violence as a public health issue. In a speech at Northwestern Law School last week, she compared violence to lung cancer. It's more effective to stop people from smoking than it is to throw money at buying equipment to cure lung cancer. In the same way, preventing violence is more effective at, ya know, preventing violence from happening, than it is to throw people in jail with increased penalties.
She said, “You can't police your way to prevention.”
Cook County Sheriff Tom Dart reported recently that the county jail is nearly filled to capacity, just in time for the usual crime surge of the summer months. Sheriff Dart was an outspoken opponent of Mayor Emanuel's plan to close mental health centers last year, estimating one-third of the jail’s population suffers from mental illness. He said, “when we don't fund services ... They end up in my jail."
Growing up surrounded by this violence, students have been traumatized. They need those mental health services. They're living in fear. How can they learn algebra, if they're worried about whether they'll get beat up on the way home from school? That's how kids give up on school.
Every week or so, the mayor's office sends out a press release to brag about how they've lured some business to move its headquarters into the city. Earlier this week he said, “Illinois has what it takes for businesses to grow.” There is funding, there are loans. He's negotiated This week he also announced the city would receive a federal loan to fix up the riverwalk to lure in more tourists.
That's job creation. But you know what else is job creation? Hiring more teachers, social workers and therapists by funding schools and violence prevention programs. Find a way to lure children to schools with more programs, and the future of the city will be as secure as their education. Right now the mayor's office is running some sort of kickstarter-type campaign to fund a new after-school basketball program. Perhaps they should try a kickstarter for the riverwalk instead.
Now I'm not saying this to criticize Mayor Emanuel. He's not the malicious villain portrayed on picket signs downtown. I don't doubt that he believes that this will really help Chicago's children, that it will strengthen the school system, that it will strengthen communities, that it might even stabilize the city's bottomline. Hell, it might even work.
As the outrage on school closings continues, he's standing his ground. He says he is done negotiating. He made a few concessions, like agreeing not to repeat the mistake of consolidating high schools. It's not like Emanuel was secretly bulldozing an airport in the middle of the night like that other certain mayor we know. But it doesn't stop parents, teachers and especially children from feeling like they've been bulldozed just the same. |
/**
* Inserts data in the generated file.
*
* @param file file in which need to be inserted
* @param data data which need to be inserted
* @throws IOException when fails to insert into file
*/
public static void insertDataIntoJavaFile(File file, String data) throws IOException {
try {
updateFileHandle(file, data, false);
} catch (IOException e) {
throw new IOException("Failed to insert in " + file + "file");
}
} |
<reponame>rpuigm/ecommercewebtemplate
package net.ostemplate.app.productos.models.dao;
import org.springframework.data.repository.CrudRepository;
import net.ostemplate.app.productos.models.entity.ImagenProducto;
public interface ImagenProductoDao extends CrudRepository<ImagenProducto, Long>{
void deleteByImagen(String imagen);
}
|
/**
* Computes {@code a}*{@code pointA}+{@code b}*B
* where a = a[0]+256*a[1]+...+256^31*a[31].
* and b = b[0]+256*b[1]+...+256^31*b[31].
* B is the Ed25519 base point (x,4/5) with x positive.
*
* Note that execution time varies based on the input since this will only be used in verification
* of signatures.
*/
private static XYZ doubleScalarMultVarTime(byte[] a, XYZT pointA, byte[] b) {
CachedXYZT[] pointAArray = new CachedXYZT[8];
pointAArray[0] = new CachedXYZT(pointA);
PartialXYZT t = new PartialXYZT();
doubleXYZT(t, pointA);
XYZT doubleA = new XYZT(t);
for (int i = 1; i < pointAArray.length; i++) {
add(t, doubleA, pointAArray[i - 1]);
pointAArray[i] = new CachedXYZT(new XYZT(t));
}
byte[] aSlide = slide(a);
byte[] bSlide = slide(b);
t = new PartialXYZT(NEUTRAL);
XYZT u = new XYZT();
int i = 255;
for (; i >= 0; i--) {
if (aSlide[i] != 0 || bSlide[i] != 0) {
break;
}
}
for (; i >= 0; i--) {
doubleXYZ(t, new XYZ(t));
if (aSlide[i] > 0) {
add(t, XYZT.fromPartialXYZT(u, t), pointAArray[aSlide[i] / 2]);
} else if (aSlide[i] < 0) {
sub(t, XYZT.fromPartialXYZT(u, t), pointAArray[-aSlide[i] / 2]);
}
if (bSlide[i] > 0) {
add(t, XYZT.fromPartialXYZT(u, t), B2[bSlide[i] / 2]);
} else if (bSlide[i] < 0) {
sub(t, XYZT.fromPartialXYZT(u, t), B2[-bSlide[i] / 2]);
}
}
return new XYZ(t);
} |
package com.felixzh.SDKInterface;
public interface MQProducer {
public void start();
public void stop();
public boolean send(MQMessage<?> message);
}
|
<filename>packages/gatsby-source-shopify/src/get-shopify-image.ts
import {
IUrlBuilderArgs,
getImageData,
IGatsbyImageData,
} from "gatsby-plugin-image"
const validFormats = new Set([`jpg`, `jpeg`, `png`, `webp`, `auto`])
export function urlBuilder({
width,
height,
baseUrl,
format,
}: IUrlBuilderArgs<unknown>): string {
if (!validFormats.has(format)) {
console.warn(
`${format} is not a valid format. Valid formats are: ${[
...validFormats,
].join(`, `)}`
)
format = `auto`
}
let [basename, version] = baseUrl.split(`?`)
const dot = basename.lastIndexOf(`.`)
let ext = ``
if (dot !== -1) {
ext = basename.slice(dot + 1)
basename = basename.slice(0, dot)
}
let suffix = ``
if (format === ext || format === `auto`) {
suffix = `.${ext}`
} else {
suffix = `.${ext}.${format}`
}
return `${basename}_${width}x${height}_crop_center${suffix}?${version}`
}
export function getShopifyImage({
image,
...args
}: IGetShopifyImageArgs): IGatsbyImageData {
const {
originalSrc: baseUrl,
width: sourceWidth,
height: sourceHeight,
} = image
return getImageData({
...args,
baseUrl,
sourceWidth,
sourceHeight,
urlBuilder,
formats: [`auto`],
})
}
|
Subsets and Splits