content
stringlengths 7
2.61M
|
---|
Synthetic biology in biofilms: Tools, challenges, and opportunities The field of synthetic biology seeks to program living cells to perform novel functions with applications ranging from environmental biosensing to smart cellbased therapeutics. Bacteria are an especially attractive chassis organism due to their rapid growth, ease of genetic manipulation, and ability to persist across many environmental niches. Despite significant progress in bacterial synthetic biology, programming bacteria to perform novel functions outside the wellcontrolled laboratory context remains challenging. In contrast to planktonic laboratory growth, bacteria in nature predominately reside in the context of densely packed communities known as biofilms. While biofilms have historically been considered environmental and biomedical hazards, their physiology and emergent behaviors could be leveraged for synthetic biology to engineer more capable and robust bacteria. Specifically, bacteria within biofilms participate in complex emergent behaviors such as collective organization, celltocell signaling, and division of labor. Understanding and utilizing these properties can enable the effective deployment of engineered bacteria into natural target environments. Toward this goal, this review summarizes the current state of synthetic biology in biofilms by highlighting new molecular tools and remaining biological challenges. Looking to future opportunities, advancing synthetic biology in biofilms will enable the next generation of smart cellbased technologies for use in medicine, biomanufacturing, and environmental remediation. |
. From 1975 to 1989 operations were carried out in the clinic by various methods on 30 children with cysts of the common bile duct. Hepaticoor choledochojejunostomy after Roux is the operation of choice in cysts of the choledochus. However, recurrent cholangitis caused by jejunobiliary reflux was encountered in some patients in the postoperative period. The accumulated clinical experience has also shown that the Roux type operation does not guarantee a successful functional result. To prevent reflux of the intestinal contents into the biliary tract after Roux's anastomosis, the authors used an antireflux valve on the excluded loop of the jejunum. Experimental study was undertaken of the theoretically substantiated antireflux valve on an excluded loop of the type of a "nonspilling inkpot" with a shortened loop. The creation of an antireflux valve in the form of an invaginated segment prevents jejunobiliary reflux and therefore provides the possibility for reducing significantly the length of the excluded loop in Roux's biliodigestive anastomoses. The obtained positive results confirm the rationality of this construction which does not need additional time for the operation. Since 1987 four children have been operated on by this method in the clinic. The length of the excluded loop was reduced from 30 to 15 cm. Postoperative complications linked with reflux into the biliary system were not encountered. |
<filename>packages/sqlint/src/cli/loadConfig.ts
import { Config, ErrorLevel } from '../rules'
import { fileExists, readFile, directoryExists } from './utils'
import * as yaml from 'js-yaml'
import Ajv from 'ajv'
import schemaConf from '../../schema.conf'
import { extname } from 'path'
const configFiles = [
{ name: '.sqlintrc.json' },
{ name: '.sqlintrc.yaml' },
{ name: '.sqlintrc.yml' },
]
const defaultConfig: Config = {
rules: {
'align-column-to-the-first': { level: ErrorLevel.Error },
'column-new-line': { level: ErrorLevel.Error },
'linebreak-after-clause-keyword': { level: ErrorLevel.Error },
'reserved-word-case': { level: ErrorLevel.Error, option: 'upper' },
'space-surrounding-operators': { level: ErrorLevel.Error },
'where-clause-new-line': { level: ErrorLevel.Error },
'align-where-clause-to-the-first': { level: ErrorLevel.Error },
'require-as-to-rename-column': { level: ErrorLevel.Error }
}
}
function formatErrors(errors: Ajv.ErrorObject[]) {
return errors.map(error => {
if (error.keyword === "additionalProperties") {
return `Unexpected property "${error.data.invalidProp}"`;
}
}).map(message => `\t- ${message}.\n`).join("");
}
function validateSchema(config: Object) {
const ajv = new Ajv({ verbose: true, schemaId: 'auto', missingRefs: 'ignore' })
const validate = ajv.compile(schemaConf)
if (!validate(config)) {
throw new Error(`SQLint configuration is invalid:\n${formatErrors(validate.errors || [])}`)
}
return true
}
type RawConfig = {
rules: {
[key: string]: string | number | { level: string | number, option: any }
}
}
function convertToConfig(rawConfig: RawConfig): Config {
return Object.entries(rawConfig.rules).reduce((p, c) => {
let level = 0
let option = null
const getLevel = (v: any) => {
if (typeof v === 'number') {
return v
}
if (typeof v === 'string') {
switch(v) {
case 'error': return 2
case 'warning': return 1
case 'off': level = 0
default: throw new Error(`unknown error type: ${c[1]}`)
}
}
return 0
}
if (Array.isArray(c[1])) {
level = getLevel(c[1][0])
option = c[1][1]
} else {
level = getLevel(c[1])
}
p.rules[c[0]] = { level, option }
return p
}, { rules: {} } as Config)
}
export function loadConfig(directoryOrFile: string): Config {
let filePath = '';
if (fileExists(directoryOrFile)) {
filePath = directoryOrFile
} else if (directoryExists(directoryOrFile)) {
const file = configFiles.find(v => fileExists(`${directoryOrFile}/${v.name}`))
if (file) filePath = `${directoryOrFile}/${file.name}`
}
if (!filePath) {
// try to lookup personal config file
const file = configFiles.find(v =>
fileExists(`${process.env.HOME}/.config/sql-language-server/${v.name}`)
)
if (file) filePath = `${process.env.HOME}/.config/sql-language-server/${file.name}`
}
if (!filePath) {
return defaultConfig
}
const fileContent = readFile(filePath)
let config: RawConfig;
switch(extname(filePath)) {
case '.json':
config = JSON.parse(fileContent)
break
case '.yaml':
case '.yml':
config = yaml.safeLoad(fileContent) as any
break
default:
config = JSON.parse(fileContent)
}
validateSchema(config)
return convertToConfig(config)
} |
<reponame>prabhuignoto/react-creme<filename>packages/documentation/components/position/position-examples.tsx<gh_stars>10-100
import classNames from 'classnames';
import React, { useCallback, useRef } from 'react';
import { useRecoilValue } from 'recoil';
import { Position } from '../../../lib/components/common/effects/use-position-model';
import { usePosition } from '../../../lib/components/common/effects/usePosition';
import { themeState } from '../../atoms/home';
import './position-examples.scss';
export const PositionComponent: React.FunctionComponent<{
position: Position;
}> = ({ position }) => {
const container = useRef<HTMLDivElement>();
const element = useRef<HTMLElement>();
const theme = useRecoilValue(themeState);
const { position: cssPosition, onInit } = usePosition(
container,
element,
position,
{
spacing: 0,
}
);
const onRef = useCallback((node: HTMLDivElement) => {
container.current = node;
onInit();
}, []);
return (
<div
className={classNames(
'rc-demo-position-outer-box',
theme.darkMode ? 'dark' : ''
)}
ref={onRef}
>
<span
className={classNames('rc-demo-position-inner-box')}
style={{ ...cssPosition }}
ref={element}
></span>
</div>
);
};
export const PositionLeft = () => <PositionComponent position="left center" />;
export const PositionRight = () => (
<PositionComponent position="right center" />
);
export const PositionTop = () => <PositionComponent position="top center" />;
export const PositionLeftBottom = () => (
<PositionComponent position="left bottom" />
);
export const PositionRightTop = () => (
<PositionComponent position="right top" />
);
export const PositionRightBottom = () => (
<PositionComponent position="right bottom" />
);
export const PositionLeftTop = () => <PositionComponent position="left top" />;
|
from __future__ import print_function
import contextlib
import filecmp
import itertools
import json
import logging
import os
import pprint
import shutil
import sys
import tempfile
import time
import datetime
from . import six
from .exceptions import IrodsError, IrodsWarning, IrodsSchemaError
from . import lib
from . import json_validation
from .password_obfuscation import encode, decode
from . import paths
class IrodsConfig(object):
def __init__(self,
injected_environment={},
insert_behavior=True):
self._injected_environment = lib.callback_on_change_dict(self.clear_cache, injected_environment)
self._insert_behavior = insert_behavior
self.clear_cache()
@property
def version_tuple(self):
if os.path.exists(paths.version_path()):
return lib.version_string_to_tuple(self.version['irods_version'])
legacy_version_file_path = os.path.join(paths.irods_directory(), 'VERSION')
if os.path.exists(legacy_version_file_path):
with open(legacy_version_file_path) as f:
for line in f:
key, _, value = line.strip().partition('=')
if key == 'IRODSVERSION':
return lib.version_string_to_tuple(value)
raise IrodsError('Unable to determine iRODS version')
@property
def server_config(self):
if self._server_config is None:
self._server_config = load_json_config(paths.server_config_path(),
template_filepath=paths.get_template_filepath(paths.server_config_path()))
return self._server_config
@property
def is_provider(self):
return self.server_config['catalog_service_role'] == 'provider'
@property
def is_catalog(self):
# compatible with 4.2.x
return self.is_provider
@property
def is_consumer(self):
return self.server_config['catalog_service_role'] == 'consumer'
@property
def is_resource(self):
# compatible with 4.2.x
return self.is_consumer
@property
def default_rule_engine_instance(self):
return self.server_config['plugin_configuration']['rule_engines'][0]['instance_name']
@property
def default_rule_engine_plugin(self):
return self.server_config['plugin_configuration']['rule_engines'][0]['plugin_name']
@property
def configured_rule_engine_plugins(self):
ret_list = []
for re in self.server_config['plugin_configuration']['rule_engines']:
ret_list.append(re['plugin_name'])
return ret_list
@property
def database_config(self):
try:
database_config = [e for e in self.server_config['plugin_configuration']['database'].values()][0]
except (KeyError, IndexError):
return None
if not 'db_odbc_driver' in database_config.keys():
l = logging.getLogger(__name__)
l.debug('No driver found in the database config, attempting to retrieve the one in the odbc ini file at "%s"...', self.odbc_ini_path)
if os.path.exists(self.odbc_ini_path):
from . import database_connect
with open(self.odbc_ini_path) as f:
odbc_ini_contents = database_connect.load_odbc_ini(f)
else:
l.debug('No odbc.ini file present')
odbc_ini_contents = {}
if self.catalog_database_type in odbc_ini_contents.keys() and 'Driver' in odbc_ini_contents[self.catalog_database_type].keys():
database_config['db_odbc_driver'] = odbc_ini_contents[self.catalog_database_type]['Driver']
l.debug('Adding driver "%s" to database_config', database_config['db_odbc_driver'])
self.commit(self._server_config, paths.server_config_path(), clear_cache=False)
else:
l.debug('Unable to retrieve "Driver" field from odbc ini file')
return database_config
@property
def catalog_database_type(self):
try:
return [e for e in self.server_config['plugin_configuration']['database'].keys()][0]
except (KeyError, IndexError):
return None
@property
def odbc_ini_path(self):
if 'ODBCINI' in self.execution_environment:
return self.execution_environment['ODBCINI']
return os.path.join(paths.home_directory(), '.odbc.ini')
@property
def version(self):
if self._version is None:
self._version = load_json_config(paths.version_path())
return self._version
@property
def hosts_config(self):
if self._hosts_config is None:
self._hosts_config = load_json_config(paths.hosts_config_path(),
template_filepath=paths.get_template_filepath(paths.hosts_config_path()))
return self._hosts_config
@property
def host_access_control_config(self):
if self._host_access_control_config is None:
self._host_access_control_config = load_json_config(paths.host_access_control_config_path(),
template_filepath=paths.get_template_filepath(paths.host_access_control_config_path()))
return self._host_access_control_config
@property
def client_environment_path(self):
if 'IRODS_ENVIRONMENT_FILE' in self.execution_environment:
return self.execution_environment['IRODS_ENVIRONMENT_FILE']
else:
return paths.default_client_environment_path()
@property
def client_environment(self):
if self._client_environment is None:
self._client_environment = load_json_config(self.client_environment_path)
return self._client_environment
@property
def server_environment(self):
return self.server_config.get('environment_variables', {})
def print_execution_environment(self):
pprint.pprint(self.execution_environment)
@property
def execution_environment(self):
if self._execution_environment is None:
if self.insert_behavior:
self._execution_environment = dict(self.server_environment)
self._execution_environment.update(os.environ)
self._execution_environment['irodsConfigDir'] = paths.config_directory()
self._execution_environment['PWD'] = paths.server_bin_directory()
self._execution_environment.update(self.injected_environment)
else:
self._execution_environment = dict(self.injected_environment)
return self._execution_environment
@property
def insert_behavior(self):
return self._insert_behavior
@insert_behavior.setter
def insert_behavior(self, value):
self._insert_behavior = value
self.clear_cache()
@property
def injected_environment(self):
return self._injected_environment
@injected_environment.setter
def injected_environment(self, value):
self._injected_environment = lib.callback_on_change_dict(self.clear_cache, value if value is not None else {})
self.clear_cache()
@property
def schema_uri_prefix(self):
if self._schema_uri_prefix is None:
l = logging.getLogger(__name__)
l.debug('Attempting to construct schema URI...')
key = 'schema_validation_base_uri'
self.throw_if_property_is_not_defined_in_server_config(key)
base_uri = self.server_config[key]
key = 'schema_version'
self.throw_if_property_is_not_defined_in_server_config(key)
schema_version = self.server_config[key]
self._schema_uri_prefix = '/'.join([base_uri, schema_version])
l.debug('Successfully constructed schema URI.')
return self._schema_uri_prefix
@property
def admin_password(self):
if not os.path.exists(os.path.dirname(paths.password_file_path())):
return None
with open(paths.password_file_path(), 'rt') as f:
return decode(f.read())
@admin_password.setter
def admin_password(self, value):
l = logging.getLogger(__name__)
if not os.path.exists(os.path.dirname(paths.password_file_path())):
os.makedirs(os.path.dirname(paths.password_file_path()), mode=0o700)
mtime = int(time.time())
with open(paths.password_file_path(), 'wt') as f:
l.debug('Writing password file %s', f.name)
print(encode(value, mtime=mtime), end='', file=f)
os.utime(paths.password_file_path(), (mtime, mtime))
def throw_if_property_is_not_defined_in_server_config(self, property_name):
if property_name not in self.server_config:
raise IrodsSchemaError('Cannot validate configuration files. [{}] is missing a required property: [{}].'
.format(paths.server_config_path(), property_name))
def validate_configuration(self):
l = logging.getLogger(__name__)
key = 'schema_validation_base_uri'
self.throw_if_property_is_not_defined_in_server_config(key)
if self.server_config[key] == 'off':
l.warn(('Schema validation is disabled; json files will not be validated against schemas. '
'To re-enable schema validation, supply a URI to a set of iRODS schemas in the field '
'"schema_validation_base_uri" and a valid version in the field "schema_version" in the '
'server configuration file (located in %s).'), paths.server_config_path())
return
configuration_schema_mapping = {
'server_config': {
'dict': self.server_config,
'path': paths.server_config_path()
},
'version': {
'dict': self.version,
'path': paths.version_path()
},
'service_account_environment': {
'dict': self.client_environment,
'path': self.client_environment_path
}
}
skipped = []
# schema_uri_suffix is the key of a single element within configuration_schema_mapping (e.g. 'server_config').
# config_file is the dict mapped to the key (e.g. configuration_schema_mapping['server_config']).
for schema_uri_suffix, config_file in configuration_schema_mapping.items():
try:
schema_uri = '%s/%s.json' % (
self.schema_uri_prefix,
schema_uri_suffix)
except IrodsError as e:
l.debug('Failed to construct schema URI')
six.reraise(IrodsWarning, IrodsWarning('%s\n%s' % (
'Preflight Check problem:',
lib.indent('JSON Configuration Validation failed.'))),
sys.exc_info()[2])
l.debug('Attempting to validate %s against %s', config_file['path'], schema_uri)
try:
json_validation.validate_dict(
config_file['dict'],
schema_uri,
config_file['path'])
except IrodsWarning as e:
l.warning(e)
l.warning('Warning encountered in json_validation for %s, skipping validation...',
config_file['path'])
l.debug('Exception:', exc_info=True)
skipped.append(config_file['path'])
if skipped:
raise IrodsWarning('%s\n%s' % (
'Skipped validation for the following files:',
lib.indent(*skipped)))
def commit(self, config_dict, path, clear_cache=True, make_backup=False):
l = logging.getLogger(__name__)
l.info('Updating %s...', path)
with tempfile.NamedTemporaryFile(mode='wt', delete=False) as f:
json.dump(config_dict, f, indent=4, sort_keys=True)
if os.path.exists(path):
if filecmp.cmp(f.name, path):
return
if make_backup:
time_suffix = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
shutil.copyfile(path, '.'.join([path, 'prev', time_suffix]))
shutil.move(f.name, path)
if clear_cache:
self.clear_cache()
def clear_cache(self):
self._server_config = None
self._version = None
self._hosts_config = None
self._host_access_control_config = None
self._client_environment = None
self._schema_uri_prefix = None
self._execution_environment = None
#provide accessors for all the paths
@property
def irods_directory(self):
return paths.irods_directory()
@property
def config_directory(self):
return paths.config_directory()
@property
def home_directory(self):
return paths.home_directory()
@property
def core_re_directory(self):
return paths.core_re_directory()
@property
def scripts_directory(self):
return paths.scripts_directory()
@property
def server_config_path(self):
return paths.server_config_path()
@property
def database_config_path(self):
return paths.database_config_path()
@property
def version_path(self):
return paths.version_path()
@property
def hosts_config_path(self):
return paths.hosts_config_path()
@property
def host_access_control_config_path(self):
return paths.host_access_control_config_path()
@property
def password_file_path(self):
return paths.password_file_path()
@property
def log_directory(self):
return paths.log_directory()
@property
def control_log_path(self):
return paths.control_log_path()
@property
def setup_log_path(self):
return paths.setup_log_path()
@property
def test_log_path(self):
return paths.test_log_path()
@property
def icommands_test_directory(self):
return paths.icommands_test_directory()
@property
def server_test_directory(self):
return paths.server_test_directory()
@property
def server_parent_log_path(self):
return paths.server_parent_log_path()
@property
def server_log_path(self):
return paths.server_log_path()
@property
def server_bin_directory(self):
return paths.server_bin_directory()
@property
def server_executable(self):
return paths.server_executable()
@property
def rule_engine_executable(self):
return paths.rule_engine_executable()
@property
def database_schema_update_directory(self):
return paths.database_schema_update_directory()
@property
def service_account_file_path(self):
return paths.service_account_file_path()
@property
def irods_user(self):
return paths.irods_user()
@property
def irods_uid(self):
return paths.irods_uid()
@property
def irods_group(self):
return paths.irods_group()
@property
def irods_gid(self):
return paths.irods_gid()
def load_json_config(path, template_filepath=None):
l = logging.getLogger(__name__)
if not os.path.exists(path) and template_filepath is not None:
l.debug('%s does not exist, copying from template file %s', path, template_filepath)
shutil.copyfile(template_filepath, path)
l.debug('Loading %s into dictionary', path)
try :
return lib.open_and_load_json(path)
except ValueError as e:
six.reraise(IrodsError,
IrodsError('%s\n%s' % (
'JSON load failed for [%s]:' % (path),
lib.indent('Invalid JSON.',
'%s: %s' % (e.__class__.__name__, e)))),
sys.exc_info()[2])
|
class dO0ECwJIL {
public static void ctbhoK (String[] T4OuVvQKWs8oxD) throws KB5cH6 {
while ( ----!-new ukQpFTyrF()[ this[ --!new void[ -null[ ( this[ -new int[ !04197.zMDXeM28HjlYqL()].Caq56oNUY7OB7M]).QJ3E9VCEloc6l]].czQO6w_rhZQcgc()]]) ( -sr.ctkcIi3pAGFo()).U;
int srk3xrUSN = this[ RzNW832X0NXi.EsVSM750fYYurB()];
boolean KWbSmAtGra5ZQ;
while ( 4.JHLsveUg) ;
;
{
;
boolean naEcpM;
hYeLnoF4ybBz McMFDQ;
{
xZB CD3SF;
}
if ( !sPT5IRNU9VbL.wWMj9lqb0bPbMl) if ( ( !new void[ !-true.Z()].w5uCPVuOk7ZM()).P9DJ_54MH()) ;
}
int[] o9_J9jHvvcj;
U6XgFf3R ZG8Os;
while ( !true.jYMZrumexbHUf2()) if ( Kp()[ !!OR_l().b_]) if ( voQ2().VGbH2gH38UQW) {
pKTrRKDWgy[] kDpTPs;
}
void v_bc860RVg = new void[ true.WC0e0Fk1][ null.dBtVZUNnd9lxQ()] = !---this.kbgErUIMC1iUJd;
{
-FJcs15E322().s8HIpMPtiWWgX();
( !-new ePfzXmx().D51v8a()).DschA1_RNpZv();
boolean[] stU0iVnfq4W;
void ut;
NjCGoW pOnw74S;
RwBTGQt8V[] ZSGwqzp;
}
}
public static void ILLgmoe2B (String[] OnqpzNYd9) {
null[ -!----true.nTI4cOag];
return;
boolean[][] mYuqP3md3;
int[][] yOMbMZ2HmsFqL = true[ !!291[ null.tspdEfrll6G()]] = -!QlGtvSX.v0tDWp07();
boolean[] L4k77 = -new lUWFavb8wHg6V()[ !-false.rCJzsoW()] = -new VCFaiKS_Qt().Vd();
{
;
if ( !P2rkRf9tP5sByo().E()) -!JA2uqcD()[ true.pe];
msGQFP6pBV yW3b0mJl;
5._R;
int QYjCt;
if ( !true[ new GPHD().FH_8]) new ZpVh()[ 4109543.HhVvEN5c4fd()];
while ( !new oWljPJK()[ NyzvSFYj0q_cU[ null.baQocH]]) while ( --!!-!this.qZ) true.yEdPl0dQbvt;
int[][] Z6Ce;
return;
NqT CHyLAphH;
boolean o;
boolean JvpM;
cFSYXo[] k;
asBA_[][] Stk7E;
return;
}
boolean[][][] DnR;
if ( new KUqC().srwXO634p2z_aZ()) {
M lDG627b6BA6;
}
boolean[] SLl7hosU;
{
if ( new Ml9i5ZQZxdaNO().SXYGvVoleBmaR) {
;
}
q4SJ0SnsZTldC _h2;
while ( 99539[ --!null[ new void[ !false.Z83zq].H6wKU8Luu()]]) ;
return;
while ( !true.zrdq()) while ( !new boolean[ 335242402.kTIwYqR()].u) --!null.qRP8XS;
while ( !!!4041225.iolF53R_MaVOY) {
boolean[] e4955iG1AJKm;
}
Gh7Zoaxnt4SG HR;
boolean V8pTUoh2F0;
}
}
public int[][][] qaCZYn5;
}
class DnhA0mw6LN {
public int tFj (boolean AzoQJwP9z, boolean v, int[][][] jCUdBG, boolean NYx4nR) {
if ( -kcdZB9PRHH().xnbiCeMquW()) return;
;
void T = ---!-p9cDgy3tBk()[ !null.tfKjJ90nD];
!new f_UDHS().MFokLThjPDtNy;
void eVyhOSfxmyXx = !-!new boolean[ HyijHSA.h][ -null[ new NUL().f()]] = X7Jh().hFnpYSCsUUB;
boolean uhyzf8qLXUNcR4;
return;
while ( !-( ---!( -null[ -this[ -!new S[ !new int[ C.tF86y_J6E()][ -!lKig().hN]].ULke45qg5mC5]]).kterg0vh).zfD17Ok) while ( --xmZhsw[ !new nUvbth2JikF()._EGxOU()]) {
void YV2S;
}
;
void mfawyk8DP = -!!!--!-this.TfIAyO3pQjqybG;
boolean Dlr8xYYTnhR;
boolean[] uoVvriGZ2;
while ( !this[ null.WNiXjkt]) return;
}
public int SP7oQTOetf;
public boolean o4oKZvjICP;
public static void nriox8bO (String[] ykKP) throws XIiLYZ25 {
while ( !new boolean[ null.muoH2QW].b) if ( !false.Hgs7i) this.JlvC3mvt();
return !( !!065854.e88bSWO).nhLHyywwl;
}
public ptu5o[][][] H6fX29pcFzca1 () {
t9s_3iJBNb[][] CkdBumJIpp7YVn;
{
int VahA;
IemwoM5OLW1CRK[] wYc4W;
boolean[][] aHFukV;
;
void p_c_lg6FGDqf;
while ( -null.wjoPR) return;
if ( true.aQuBKJ()) return;
void Fg3;
}
;
return opjc97ckT3Gu().I3AcP();
int[] wYkhAvYTIk;
if ( !( false.I())[ -false.UAxpzuBpOsn]) return;
return;
}
public static void Ig7MWDAxTM8v (String[] IPAAAH6G) throws UYV91DLu6Aow {
this.QEHrvHO0t7SU6r;
void YQaiIg2R = !-!942898.df2PVzgzuEiS() = !-!-true.Cn6Dcq();
return;
boolean HIpW;
while ( --zja5Svj[ -E7EAdiv.hT]) {
while ( --( -f.FVynQU())[ -!true.b94ViH9vmi]) while ( V4I3HMa9qLXj[ ( !--new boolean[ !!false.eie0af].LRPvbZ9UcV1IU())[ ( true[ -new int[ --!null[ !-Eoe2[ C7WzzBZJanrQ0.mhcAXE4kh()]]][ kVYKi1U.NOe3zkWDhxtusb()]])[ null.qIwW_FYrPP()]]]) {
{
while ( !new V().tRIAaP9qYdbhl()) {
{
void PPdpNYONO;
}
}
}
}
}
while ( opvNx0()[ false.Wxjz]) new E1zoJLXc().HW();
boolean WkWcnSuo = 25119489.m3fBW = -this.TczJcfa3RcpUDg();
if ( !null[ UQLTj[ new bbE().QqPRhyE9L3qF_w()]]) while ( -this.Fh) while ( null[ ( ( -!( new PyVGJWVlOuw().IMrNHzAEbKW)[ OdqJc().uMENiNrWJerJx]).Y6X6())[ null[ ( null.LQECL9()).e9]]]) 221.D;else ;
i gJwY85KJS3dkq = new V2[ !!this.VMh7M2()][ ( new g0LA672v1RvW().UIYNXzB5Oa_B())[ ( -( 928429.pTv).A2oCq).l8__]] = true[ this[ !false._oJ2h]];
return ( !-new MNslNFxa()[ !--veVxdWcezO6t()._OTqDd_NHJkAno()]).JsjjamA370SCp();
return;
void ZgtShkO;
void[][] _UTd6vT02Bl;
( this.UdtLPLqnam8df())[ !new AElzzTNxNKcQY().Vm];
;
Gr[][] j;
{
boolean mI;
;
PPzyM tzNu;
if ( -null.aE4StD1bu4Tc69()) return;
QHQRd_fKV[][][] TAYqKiI;
return;
{
int jP3N;
}
A3.JAuBz1ZiRt();
return;
sJWQYh[][] TiVw7YQfv5XL;
;
{
VTFkUZMJT[][] Ar1IT;
}
int[] jebHTHP3;
lgcnCd4f3Ri Y;
}
;
}
public Lno_R6E7w uEAtgUtnmmzpw (int bYkDAC, void[] IrfwI4) throws stUbP2 {
while ( -!!!!null[ -zPsSYmqY8lastg().H]) 737530.eSO6Vw7E;
boolean nrP31rVM1FD3Hh = this.zMX7IFWYYCc9H();
n71APVyxii M0oG6D9Sg;
;
while ( new u[ !980[ JY().sTGkbhby0n()]].MkP9m()) return;
y2E5 Yh = !-212.Kwqj = !new d()[ !piZA5FLEcbXbMB.WAyIfE()];
while ( !( !new void[ ( true.XvDDseGkqgkL())[ !vCpXZolk()[ -!496.Oc()]]][ 414525662.OG]).D()) -true.athM62gPYk9xfR();
void VIqf8TrqnR = !this.KBg4R;
{
int V8QX;
while ( this[ !new gobAiHWU[ true.jv7ezvHz9GA()][ true.qPaW()]]) return;
void[][][][] I5N2Nz2__4A8;
g[][][] eLkz;
int u;
;
int BddS;
boolean[][][][][][][] P5TL7SEwvbQzi;
}
;
int NF8u8M0j7nq5c;
;
}
public static void _8Ve (String[] Ait6dHLRO47hCt) throws oFMm8ksWJRXn {
void[] FNqt2Pp;
yohUx[] w5vpW = !8272841.uU24CQkLP();
NYlD2aaf3vyei[] fkpDQ_jMgK;
int[][][] ngXDJynYzHF1d0;
void[] Wa3ka;
void HKKASml;
;
boolean eq31bvOGOE9L;
boolean[][][] JrdZVG1JShV7 = new void[ u().LySH1C].Efo2imsP() = null.WVo6UoqvAH;
void rn;
void zm1VvLDS0t0;
!true.WW1C();
int[] fLhpydPBWj9 = -!!false.EalzOmYJsLW0z7();
boolean[] yTyOAFj = !false.LOUz3akFaSr4w() = !---!new int[ --lVqLnQSNkYmx.NY9zkzH].g();
{
boolean YEwFr1mUs1o_C;
void[][][] E7K3;
int[] fOVnEXTM;
void[][][] XOVaPEQ;
return;
;
while ( -mmtqp7KarnXxRf()[ -true._H8lnddhB76S]) ;
}
boolean[][][] GreLmAF0;
void[] ENChwbGdFxUF = -z.FMu() = IZdL2F[ new boolean[ true[ 054657290.bhprIbH()]].fkZkMY];
boolean[] vGRIkZyF6hzzKo;
}
public int[] W2PBKaqZl;
}
class sYgCs {
}
class uD {
public static void q (String[] OrzT4GVNO) {
L8[] CRq;
int UwfpjAw1QTHC;
return null[ -new Ebe6u().TkehCkLMZu5()];
return;
if ( -R()[ W.UB]) if ( !7068[ !-null[ !95[ -!-this[ -HO7moDDH7_1F.zrtBZUqjZS()]]]]) while ( Pbv.t4kj7) while ( ( -!-!true.Wy47mGxgWvvs()).D_sCamJQFee7Sq()) return;else {
while ( -null.K) 4696[ !--!yQOlea9.XlMHv];
}
;
{
return;
while ( --new Do4U583ypK2X()[ -h6RWu3QtWn()[ -!!-!new jJz9IVrNv()[ LJzQS8().lF5VdwpIdQ]]]) if ( true[ null[ null.t]]) if ( !-!qIweHLIRoqjNkJ.TIof) return;
QiKorf1mWq7L[] vrrRb;
boolean[][][][] LkqrM5W7xs9yV9;
-!vmEzpoIR6xE()[ 51796346[ PDspVCh.EbmMFp0JVB7RA()]];
boolean mv;
false.AY7lXNdI;
}
return !--!--false[ -!false.Cv];
int JguYq1Fdr;
if ( !-false[ ogCrHz4MKLlH().sh()]) if ( false[ -!8029617.Z9HJ2Va8t99F]) {
ynGNU3gjbPZDvH ZRMB93VaGu;
}else true[ Gujyi19tnjeDC()[ new int[ 41151803.FKJfiVc][ new HiLkaAQ0[ true[ !( -!-( null.KHY6w8_yIx9V()).qw()).ufHyCzG]][ XGvrrVIqAJ().IPCB6WPs]]]];
if ( !true.xj()) {
return;
}else return;
}
public void[] Cbvk1AnkB () throws Wuf {
while ( ( -!!LIG2GNYK_s0Qs3().rHC1vT2rq33v5O).ThdDupS()) if ( new gVvKunDzS()[ ( -new L().ZFq7wzk).C()]) {
void uLukz;
}
while ( ykcNfj4KO7M_Nk().ygqFcRgO) return;
{
boolean[] GixyQuVC6;
new m3h().VH;
-!this[ this[ this.TvushKDkRyziQ]];
;
{
;
}
{
_pH4lm4NGCHhi ni8GH_;
}
int[] X4ZJcs;
!!false.d3hIraXwdvJf();
int QPXNhUOZ;
void[][][][] GLel882zQkTFz;
new FY()[ true[ -true.vIuCyFHCxi]];
int[][][][][] NGHceRrdkv;
int eZFm6Smq3saB;
if ( false.sEIihR6VyhP4()) --true[ -o6vkkoL.qWceEg()];
}
iTrhvDiHdfYbGk qfQDprRUmbEdh = !null.A30kzt() = !7420.VKH3d7Vmahzc;
void[][][][] IBvPXVUmgY9Nn = -xdqfxty_.VxXEtx4Z0SSUe();
{
void[][][][][][][][] zh6NkuB1SsXGYd;
{
int[][][][] c5;
}
;
true[ !true.NMjFkGiAGWuzKD()];
int[] BSlXVv;
return;
void MgzIAuPsC;
void[] BL8MYIquNBS;
if ( !null.Y29D()) while ( !-null.MaQx()) ;
}
{
void[][][] UHL76NZpHS;
int g3P2K_vhyrw;
return;
RzlOAE27Qq[] fOrvtee3dLqAl;
while ( -true.KPyyH2seocNGGR) while ( -new Azepsm7Bj[ Ho().rPko].kb73aIQ2phHhO7) return;
boolean wsNjl4H;
boolean[][] SNEJqI4;
int[] K;
LU5U1D_VayQK fQ;
return;
while ( false.gP()) new f54cGYDfc().dpTsu;
return;
void XJU_p7R0LUW;
P3GYoYPzc1xY[] owwMA;
}
_dlrPnC woGi = ---true.Htk6qW2T;
while ( --new E8eFbB()[ new boolean[ new boolean[ -YFkAoiMNXeC[ !new boolean[ -!!this[ this.fSx0xvO6bFS()]][ --new FzVlbOrK38[ -( mKeEn().PZ3e1XUdieq).vWF].Y]]][ -false[ --!-PHa8Okc().E4vt()]]].Madi()]) {
void[] pZ5LCCOo;
}
while ( -new KJtLi().l_yJCLujv31t()) !-687383114.USsu3r2rJlp1e;
{
NKsybMTICmgHes[] RQg2;
int N;
;
{
while ( new IjnZuHk[ !true.tJpyS][ new VJe9Af33eEM()[ -new tq().Ai8QjY2_HFawD]]) return;
}
int uS1iA4OgKh;
int[][] JHTESnd8uczG;
void[][] oG7W;
int[] EMh;
int[] V3EdDcgE7dcH0;
void t;
GnrtZN[][][] e_xRfxIF2qfy;
void[][][][] o2PvG2B7;
}
FXvIV2Qq SONtWFrJC = !MIXpkZZro4()[ ( -!--!!-new boolean[ !new kIkYng()._k380cV5M].SoDijYg2())[ !new hXSHvKwc().hhSmA8sm()]];
}
public static void jNE (String[] h) throws Q1eCZD1 {
;
b[] SlR;
-!!-new WUlc0p5Vyqihk()[ M3Abj0uVUs6jfx.lWiAWjr73CWBT()];
boolean Jqkq6Jbm = !-!null.U3bdzNsVkF;
void c7 = true.spXf1QXqNf7fv() = !--( oSPSQ()[ null.r7d]).iYlobfqT2yY4Zf;
boolean[] K4DsUwVcjSj;
while ( null[ 576[ !abb8NdY[ 1555.OnYG]]]) if ( !---this[ !true[ true.xjI8BR8b9ml()]]) !!G84xqPqcidS().nGi();
{
boolean[] kCKJveNrhqCe;
return;
void[] _4ReV9Vr;
if ( -!-!( null[ !this.k4JIK9])[ --this.QRIGydSKNeSWsL()]) return;
{
return;
}
;
void mTFHINYARX_a;
int fkp4;
{
{
boolean[] Z9;
}
}
;
}
if ( -false.PXfalafa0) {
Jn[][] V9zbJTHVAi;
}
void[] Tf;
;
while ( !new boolean[ !JPsjV94().HIXE9R()].SxFK0qNw) {
AQDC[] mXeoMs5ceD;
}
;
boolean KZN;
void[][][][] y2ahX2E2FJVx4f = jDHneUnyrXP[ ( -!hrM[ 88.XJ])[ -false[ --new lHk03().zhrrndtf9bo]]];
!new NK[ ( !--new WoE3odQONVf()[ false.Y9C7Qg]).HTlnyQjL90].Z;
return;
}
public F8jvC0 t0Kz5GB (int iNwGib5BdSl, int[] iqT, int[] yi8E3W_) throws gWkagx8 {
boolean[][][][][] QS6NQJMbKRhNRE = !!-!!new fpb().gBvqUJ = !false.l1WdTvf3g40c3;
boolean bbHTQR1tux6Lb9 = false.b = this[ QNuMZqkZlp7T()[ false.Vn_6csVhUI]];
-true[ false[ !false.k()]];
}
public Tzo63[] T4gVrhQpAzY (void[][] LSLAmlCpAOi, void baU__80rBunJ6W, int uKiuIeVX, boolean ZdaWNCYMkesVWR, int[][][][][][][] xcXAY49SuNShX, void[] aYU_x9zrEze) {
if ( ( !!-zJmO9W09P_I.gELsOgtW_()).HxGDuq) if ( !01011662[ ( -new int[ new void[ --true.g7a4aDVGdD()].z()].iz9yy0B2yM)[ KllyIg3[ -u1I[ !!( !-ojwuT()[ new int[ new void[ 30170897.SNe8qx()].LEk9pTT4][ this.WA5u_pjFm]]).wtee]]]]) while ( !t3.OT()) return;else ;
if ( XqMf4()[ this.l24n8nqUsW()]) while ( !false.rXlMVwZq3avJ) {
int[][] a6D5Btu8tU;
}
void s = new boolean[ null.pOYs0UI].iCRniB();
if ( ( ( ( new void[ -!y().VelKgLO2Q][ !this[ -!0443588.lKMjfGy()]]).BEjNemy5nG7g())[ -!!-563795130[ false[ !!-null.i]]]).zl()) while ( -!E8rA2zxs6aHT().Bfl) {
while ( false.yBkFltxw()) if ( !true[ null[ null.jbxep9iR4ov7()]]) {
;
}
}
return;
if ( false.eWIU2bjn()) ;else {
{
void bX_;
}
}
boolean[][][][] Na5c1cDB = 064813676[ !-new b7SzID().H] = true.EVrMCLHgDq8pZt();
return;
void[][][] x8nuAdMjjb0;
if ( ( -( -null.oaOMHWR).bFlIUAlEZK6t()).aoYZeqql()) {
;
}else while ( 7.e_c()) if ( !!!-false.jy()) while ( !QLEr().aNwOH25KFF) 4822561.bKIo48hl;
return;
{
while ( null.todd8GDJd()) while ( -UEp()[ false[ false[ new int[ new j()[ !!!( !new int[ !!true.s8n].oLdn()).PfcX9yM()]][ !GerdjEv.NJnPv0hzRddoO()]]]]) return;
void peVOgrzZ;
void oLfBnR9BG;
boolean sFNXe0n;
{
if ( new G97i[ -vbt38.mUPLVT()].uW6kEzVC) return;
}
;
return;
while ( !--ePnboZaaHn5GL1[ --63947[ null.t]]) return;
}
return -false.umJM();
int[][][][] J2cDI6 = -!JUwryhgKT3.Zt = true.RhdH();
{
{
boolean QdIsIXj4fz;
}
!!!-vY8c2Mc6XG()[ -!-false[ !-new boolean[ new Mj()[ !!rgXTBegMj[ !!-!false.XUZB6Z3Dtseog()]]].VduYFAofGRe()]];
while ( -3[ !!( !this.YCM()).Dhtlct()]) ;
}
if ( true[ false.cLmaBs_FVgksf()]) if ( this[ null.w5kh6oV]) -qEMJONeKNxFjS8().I4();else while ( ---921[ new void[ new int[ false.yEkxluqy19vWyr][ !QoGabe30rbA()[ true.gF()]]][ new Ym6uev().G1d()]]) while ( 9.wIVJ7KOY4b()) if ( ( -null.haWJEqnbmgyt9)[ !( !new WR9().wpHC4BrGKgDJ)[ new XrSQsG()[ 10[ 7[ !!--!-!( !!2560.X8D).Xl1nw()]]]]]) ;
int[][] kWg0KU = !!!( 4.inoWr78zUi9vEH()).bqRZx0rbB = true[ !!null.HPiXPwf2xjHuL()];
}
public static void lbbHLEC2C7JYK (String[] RqkC_H) throws kkfJ {
{
-false.olgudB4kSzbys;
void[][] Q;
return;
void ozXfq3aSMx;
void Ey5NGRb;
cd[][] aU49M_Jd9v7p;
;
void[][][][] igSjZIapyZ5;
return;
if ( --new void[ !false.qo][ this.WW()]) 39661.ZfCl34I6d0E;
if ( -false.pIpwxsf()) true.a_CenKcbMsz;
{
uDi9pnAR[] V1WvYGUl;
}
void[][][][] OKvf7rH75HUE9;
void q4WQv6cPe;
return;
{
boolean C6MQzTr;
}
if ( true[ !-dPn().iQfMIc6KWS]) ;
Sjo9qEWlA2fpE8 m1ip7tK6ZmUS;
boolean zNaXOwvn94D;
}
int rcvEGPTfnqZYQ = !-!-!this[ -( this.CHL0yo3kiXZb4w).SF()];
int OA;
}
public void n_aaZUjbqK () {
while ( new hbO3VU1M().leSWjjp) return;
Lg_pfhJ01 WoMVwMiHXaYc = -this.aoyeMDjl;
return !new EElz7I5Iw().kuvpsr1d4wvdr;
while ( ( ( TvKme.Ta_PnbIU())._sM)[ !!true.fPsSgwTky8dz]) ;
kWJ9YY[][][] ZW0wwcPDlPW = P0VMw().H1fxHs8lC() = Dh57nue0oOBZA().k();
void[][] HTokxRa2DVr2 = false.T();
true.u8_CRsR();
}
}
class Xp6 {
}
class Y {
public void[] G6MLlvJmXh (int _3_, vQZgH1U59T7b a9b, void[] B83) throws u {
void[][][][] S = -!true.shgmaIaOXGTf1;
nc9PAR[][][][][] mc = -new sOZy()[ !false[ !--false.wbK2HJR]];
}
}
class oVIFQTUtj0c4I {
public boolean[] h;
public AHpKvIvuHk[] Rpz;
public void gk (uuHduJRERHvG f8fWra8r5NG9, void[][] evwQnMxe, boolean BHw_Vd_OqC, boolean[][][][][] syMbG1ki5, int[] JNpvON5G_ZQ, void[][][] PwghyHwT, boolean[] D1j) throws DCWRVS {
I[][][][] tXawKYtuCe3;
return;
if ( !this[ !!-gCd1RPlT71()[ -V1Jn[ !W.tyF]]]) !this.Y5kLvl();
return;
while ( !--!!4484662.rge3sVPm) return;
MV82cl9 zuB = !this[ !-jsTyP().jSlOlGw] = new Q8i().jHlst();
return;
null.Nw;
void d8uN = -false.a3YzhQf77mF = !null.pY9ZHxj_rFZXoo();
zjh tHiiW;
boolean IqPbQtad76Iy7 = this[ !-!!Wx6XtQKc.t8vyZ44F] = !new void[ -!new F9L1gY().B()].yn2m4bsmz;
return;
{
UnlKzfCz5[][][][][] H;
this[ new I_Cz6()[ !-!null[ !new WTtkPNyCchNL().J3Nn_hSt]]];
;
vhWbWG03az1lqs().RJwD9Y2AfUWC;
if ( this.R_JswRBQazWUv) if ( -!580481606[ -false.nl]) if ( !!( this.M79).KERGXU) {
int r;
}
aWp0X4XJ GAFxfC;
int[][] Iwl;
int[][][][][] KdHOcK27terME;
while ( !null.RDp) false.kFm3q4ya2;
int[][] pblSk19yP_;
return;
boolean[] _HM3u0r6Rd;
{
if ( --en27DgsDu57to()[ !null.MFdEkexOYHZYP_]) if ( !( -new fCSstqZGlcUA[ null.DfZsCqYTpfRQ][ -false[ true[ true[ S3QON()[ -!-this.EDmcVxjV]]]]])[ !!--!VkI3Ue5nCP8d().qkh0VKuc30Jwf_]) return;
}
}
fWGZnBPihN[] qovrQJST;
return;
}
public int[][] rn () {
ExmQ peDxGAHrHl3UWv = -new nOxtajV()[ ( !!!-new a().kllj9nTj).wguKVpcn()];
while ( new void[ -!null.lI0d].II8XxG1()) !true[ true[ !---5669.h]];
;
;
void[] XpMm0;
int[][][][][] usd6JqWBwiDf = new YHLO_KqcgU1Dp0()[ -!false[ a.q0Jn3R1XOk]];
return -!-04740.KnMU1LnQoeyHM();
boolean rmw = -!!-new or[ this.m09N69Uwts9ol][ !!!-( ( -g8jWWk().vvb5_fCnPJVW)[ un3RICfxSDQ()[ -!OtFMBO9N_9m3q.qHiGLj]]).mTnTo5kitSHZ()] = new void[ 2[ false[ !this.uE9stMXAeKE_6()]]].Q5B9uIjlljW;
!!--!-this[ ---!!null.DWYifaZAVN8cw];
return;
}
public int Jk9aW (lY l_XLWI) throws zGd6KdMd1Cx {
if ( this[ --new EgYS3b7tCJkqD()[ !!null.wARiMOUS]]) while ( --this.pmycS6NjCqCR) while ( new int[ -( ---new void[ !!-!true[ true.ZnedH9CE]].NCXfl0sfBu).SRi9WXNB][ false.BRfzS]) {
_TgccPdMkW9[][] u;
}else ;
void[] _03JQHK8o58P = false.KJGBrJF6BL9xhP;
boolean L6L4SQR5VW4p = false.MdTwwNfaDfTq4X;
SxX52V0VUQhOt cO122;
{
void[] hWuh6WU;
-!!false[ new Qm6BhEZ()[ -!true.np6Lc]];
-!-null.ClVDxTec9Jd();
!false[ --this.uHj()];
void[] Qd;
I[ !-true.ITdqIx0Fgbpu9G];
boolean NZ3EArrAU;
void nWrNA;
int qRFo1lwPa;
while ( new boolean[ !V()[ null.aAR5PDxxpID1_6]].GbLsVNrTG()) ;
-null.lTguPo3bIKCZkz;
void[][] H7GIP1CE7hyfXP;
if ( --new void[ -new Re0().qSR7w4oq8b].G()) while ( -true.eieMExmaH) ;
787841.QZIvJU();
if ( !se_HjsHiorj()[ ---new boolean[ new boolean[ new Bo().Oh].m8NOMlDoug()].qhGi]) if ( true.khwaB6mCmm) while ( !!( !( null.LQz)[ !false[ KB[ a0gJ().cAg()]]]).yn8hkaZ()) if ( !-( ---null.R)[ ( false.DYR0BPwSXNnGRS())[ ----564.dpbfKGahvD()]]) if ( ( !---!--true.yObaMutS1())[ --Kod7yvt28().QzZC2]) ;
boolean PL;
boolean h8fGf0k5Wt;
;
}
boolean[][][][][] loCkz3ztMAUAn = !-571158.PThFaExThF7;
int[][][][][] pPIO9 = this[ 0.yJ2];
WUwXLRF V8;
int uxlt;
boolean[] BEVL91;
void WdEzLQDdXQK;
boolean[][] JT9cfe = --!!OVlXFhhle.iQbWiXrUkGl = ( this.VM).CV9ovRkS8I;
void[] f = !new B2B_dxpuE()[ !p5opPMBUOBcKXJ.zTI3cLt5n6eP()];
void n = null.bFannHP = true.YLbYE;
MCD[] U = !new lyzcFZ5tNbt().ahLYg;
{
{
boolean[][] CpxDDSrnd;
}
boolean YL4Y_ofzOs;
DlPaT1 h3vOCr0wOH8PB;
return;
while ( false.IYX()) false[ -!--!new KY[ --null.bQU][ -true.O_2EZhuuUqd()]];
null[ !-this.D3Dz2fJ()];
return;
boolean st3Tln;
}
while ( new semxiyZ9oR4fJ().AvX4hPh3CQFf) !!!!-new o0cY4ZAHoFKZB[ ---!!-!new fJMPl[ -new rHITpnhenX0UOC()[ -!this.I17()]][ !1655.Au2xmZ3iS()]][ true.Lv()];
}
}
class DBrrmD9rZkB6 {
public static void K23FhU64md_cao (String[] ZCxQoMnEyv) throws TrsUzJ7gLlh_ {
if ( !!k6hUTOzoEoBy6().Pa7QQhgI()) false[ true.pKgO_6()];else if ( -!( !!new MfNQ().J8aIAaa()).YVtkEpCOqq19()) while ( !true.bFK()) if ( true.N) return;
XPeXFI JR_oxqyv = F._mmT1v7qyvby2 = !new int[ QYFZXWZJQ.mHlnv()].l;
int[] GqwwGRgi;
int WEpgBuSkdDpws;
f1kvSFuO8 _HWXty2M = -false.IVkapo() = !( !!!null.fYnup1KG3lYpRh())[ -this.vexqwtUY2Ceq()];
boolean[][][][] dgYrHTWD2AoK;
;
{
void HX_5OCL2hZ4T1A;
int[][][] HKV;
return;
}
!-56722.wHLaY();
int[][][] MdHQ;
XLtGahB XD;
null.bir1TCVu();
return;
cFprGM3DaZg Q4t;
void[][][] MT9yI;
if ( -false.XtPVpWQBkp()) return;else return;
Drpkq73 Ux7UVm4N0q;
while ( _cg().l43O()) {
boolean[][] DJy;
}
;
if ( -new o6Wa__gN9s5().MZyc4JM93KyG) ;
}
public static void D6LcbKWNY (String[] tKgjQR5LU97) {
;
boolean[][] XAZup = !this.qBHd1XF73hsn65() = false.edO2PJvZe43VL;
int[] JPPolRzJEU;
Kh[][] N;
{
if ( ---bKnwgZ4a().Hkq4hOvn()) while ( new NM().EnqMbF7CWD8()) ;
_IMsD_VFaKx0A().Nnik8VCOGqOLV;
eWcj79yHBO[][][] CeF4l8Vjy;
while ( !!cYon2.uYC4uTY0qFrZ) if ( ( -false.HGKhczFt())[ !foYZy().IdHZ3j0LC()]) return;
;
int[][][] Ow;
void zZ3gFCKySvFV;
;
}
}
public boolean[] mhXKrI3Z;
public fD_e[] ncB0UL;
public static void Me (String[] ePK) {
int XlfVg9JqVthpi;
return !--!-( -new CJS7eih[ null.F()].EEvxfka5e()).q4ZY();
;
boolean[][] kbBWEs = !false.NhEfCHbzxfrI();
int ddbSgO9T7cn = -true[ new C().mDbJlWM1R5wCM4];
m64 FnKz2Md1ZtW8s = -false.k1oKVU2q;
SC7GghnQ80 VCUk7F;
d0QLmjRHWX5kdJ b;
_nAAkcbWMQ[][] TuVUinWXA;
Wb9hCNrkj6I MqhkubLXb_mvM;
while ( AiA_h0WbZd[ aUhM0().ucL4x_pOEQfkWH]) -!new void[ false[ wOkv.dzPOC]].fPoHrY();
void[] wkI = ( new GT2RLebKg7a().QjP_d())[ new kyM3()[ new DioWepKA7J()[ new AcArAdMXh[ !!--null.vRFXj1u6Tt()].pKEFbG]]] = this[ false[ !!amZG0Z.vi()]];
f[] imhEK2qH7Ie0 = ( new int[ !true[ 622007365.P]].SvIwGh5V30).vdBYhQ6opdotS() = KhFCjvaZz.fahUSI();
if ( !_BgL.JXLRgTU()) while ( true.yJMP_) while ( ( !-!new MBgH().qm_1B3L3ge_a)[ true.Mero_4iI]) -false.E30P;
}
public wezH8XklVEPG[][][] GRujsN (boolean u53oQU, int yoSUtSVkKIggnK, ZtPCKcdMvJ[][] gWKlI, IdNnwwk[] JHFMdYCSEc, boolean NUJvpMY5HzP, yLgB6o cL0g39I5SAl, boolean E) throws J2KCTt_ {
kZurFz31dDLcmF bptaRk = !-true._UHJQUIDNpQ();
GL48n_7gK[][] SBlTVvfYGs = !!-new akAsjSsJP().T0m49FGdke_R = dAw.hF9ivY_();
return !4197966.JWLDY3Xcy08();
boolean[] _JLEgk1 = false.E7X_CrpY3wQpZW = !!!!!!this[ this.cExVRVa1nDYO1];
this.P10();
boolean[] Y_NL9yNyC = !L0Ny.BCM9V_fv6Jthq;
boolean[][][] mIIZ_AIZ8SwL;
void insk = !-null[ !!-true.aYlZPp()] = Nx4fmsd2FhnDY5()[ --!vYFk741[ !!uVC6Jt.z()]];
void[][][][][] f9;
}
public U01H XClRzvOyyqPo (int[][] hQR, int[] gem9Fo3Zy, int NKqoqR6Ny, int q, int nem6) throws phz {
new boolean[ !-!!!new E().p75Dq80w7Q][ !null.a9];
while ( !cSF1uQkzDNY()[ --yKpSOzGWUX()[ new VA2RKVgRbF().ZMSN]]) {
Awx_Hn Tf9jZDKTU;
}
void Th8lYT1;
{
while ( true.lvrqL()) return;
{
void T;
}
}
int[][][] ctUWU = --this[ --new _787XZBwRUxi().CoH25n()];
void[][] _OGJ4W24pi = !-!this[ --!null.vR0WRmdTRJYz_()];
;
;
while ( -!false[ gQ()[ !-!new boolean[ !--null[ !--!( !pQsQfCl().hp()).jbLE()]][ new g().y0B]]]) return;
{
if ( null[ this.AhLVh93nyxuRXz]) while ( !!-null[ false.tsQ6gMfJTPziaC]) return;
void[] NeD5W;
int zpEU;
while ( !---null.B0a) ;
if ( -( !9382112.R4ezqaGdQOOPR()).RTl9V8ZqWevWu()) ;
;
003[ !true.J_2k6Sfv4Q7Di()];
{
-iZc97_V3Ts.U();
}
;
{
IW6c[][][] sq249GMxS4zhtq;
}
;
void[][] BkQfw3C8rn0A;
void[] EWyACU;
mG USDE6;
boolean[] CFQ9Tk;
if ( !new P().fveuC()) {
return;
}
boolean AFzcELBADv;
{
void QMUVCmn9;
}
void GS;
}
}
public static void VoTatdnmmuA9 (String[] t) throws m5 {
boolean[][] uCAUgNpkGVt = null.rhKawBm0aUSwI_() = new boolean[ !V()[ 72287863.fTB]][ true[ D9IQDfS().AKw]];
e00[] _IP_DLyDcwYC9;
{
if ( this.b_eQkWjtF6l()) if ( !new void[ 6970.jD()].Zx7qc) ( -!!-!new LbhYGF0[ false[ ----VE5Hm1nNO()[ 834.tmGKj]]][ null.MQ()])[ null.UtIsCAA()];
while ( null[ -( -( !new tkEmim27O()[ !new qH6d()[ !!false.Z]]).f90y2KCml0WIw()).x]) {
-!!true.GZhN4N4g();
}
boolean[] w_au7V;
}
return;
if ( --v68E5tRCfzi()[ -new void[ true[ T[ new Rx42k()[ this[ 60961.reaN()]]]]].j__M8poy]) while ( -true.jZQm7()) {
;
}
boolean d_Cjtp7sog = !true[ -!new boolean[ null.IsUwZ3Q].mz5BGt3()];
int[][] vvV9M6tjBMs;
void SPfIXeqcGT;
EYk QTzDW;
d6 FNyXZQihHisF_ = true[ -!false.FqqjnUAwla] = -true.D();
PHjtu7 e = false.Vq3Wd1FKxBseA = null.OFDpw1t();
while ( new seLY2u89_uu().WndCr) ;
void bE;
{
return;
}
if ( new void[ ( -!!Gft4T.scR_4x7()).CV8PcjA()].pOo5SLbHD9go) return;
X5BPNUmlW9Ha P6DSGZGxf2f;
new boolean[ !new a4().Xo2dW][ ( -!-!!!null[ --geCEmY.ezbIpr2x4u]).nYtQZVv7];
_Vr9ADt mdIyLnp0iVfGxJ = 0175[ null.Bs()];
return new NZSiARqmOX5()[ -567.sbO2xNWjOOtC()];
{
return;
void I8r;
boolean PVs6b6g25NdxMW;
}
}
public static void Qcso8M5Bm (String[] V0FzdOESEsa) throws tyWTq51rPR6 {
while ( false.pfKn8EVy2n) !-!-true[ -!!!-this.OEV8ju6t2EYOhE()];
if ( true.q3m24L) ;
int t8khEur89RKcV = -!new b0y().UMOBTH525HaJR = -!( null.UW88H).DvpUJN9R;
boolean[] BofOFfc9Sr0fEt;
boolean[] ziq9dHdB = !-( true.qd0JxGaaN4uKHL).kpgs = new GCl8MB5h().DEuzoZFVi;
{
void E_YY3q_AwyrNiZ;
if ( --EZmQWL[ !new K().eYB5dcM]) {
return;
}
void TONwOhTUGZ5;
void[] _GJmJ;
hO6k4[][] s;
while ( ---true.QJy20ms()) LVInkyhr3AJ()[ !new boolean[ true[ yvnXqy8uutk().tDlgfX()]].oa()];
}
a4Kdp41vdQy35[] Io;
}
}
|
package cn.kastner.oj.controller;
import cn.kastner.oj.dto.TagDTO;
import cn.kastner.oj.repository.TagRepository;
import cn.kastner.oj.util.CommonUtil;
import cn.kastner.oj.util.DTOMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
@RestController
@RequestMapping(value = "/api/v1")
public class TagRestController {
private final TagRepository tagRepository;
private final DTOMapper mapper;
@Autowired
public TagRestController(TagRepository tagRepository, DTOMapper mapper) {
this.tagRepository = tagRepository;
this.mapper = mapper;
}
@GetMapping(value = "/tags")
private List<TagDTO> getTags(String name) {
if (!CommonUtil.isNull(name)) {
return mapper.toTagDTOs(tagRepository.findByNameContaining(name));
}
return mapper.toTagDTOs(tagRepository.findAll());
}
}
|
/**
* Translates the string to another language
* @param stringToBeTranslated The string to be translated
* @return The translated string
* @throws InvalidSettingKey If the translations are not yet read from the file
* @throws NoSuchStringToTranslate If the string to be translated does not exist in the translations and fall back translation file
*/
public String translate(String stringToBeTranslated) throws InvalidSettingKey, NoSuchStringToTranslate {
String translated = translateToSpecifiedLanguage(stringToBeTranslated);
if(translated == null){
translated = translateToFallBackLanguage(stringToBeTranslated);
if(translated == null) throw new NoSuchStringToTranslate("The string you passed to translate does not exist in the translations nor in the fall back translations file");
}
return translated;
} |
Two-step epitaxial lateral overgrowth of a-plane GaN by MOCVD We report on growth and characterization of epitaxial lateral overgrown (ELO) a-plane GaN by metalorganic chemical vapor deposition (MOCVD). The ELO samples were grown using a SiO2 striped mask pattern consisting of 4 &mgr;m wide open windows and 10 &mgr;m or 20 &mgr;m wide SiO2 stripes. Different growth rates in Ga- and N-wings along with the wing tilt create a major obstacle for achieving a fully coalesced flat surface in ELO-GaN. To address this problem we have employed a two-step growth method that is able to provide a high height to width aspect ratio in the first growth step followed by enhanced lateral growth in the second step by controlling the growth temperature. Depending on the growth conditions, lateral growth rate of the wings with Ga-polarity were from 2 to 5 times larger than that of the N-polarity wings. We investigated the effects of growth parameters on wing tilt, which was observed to be ~0.25° from the Kikuchi lines using large angle convergent beam electron diffraction (LACBED) and accompanied by some twist (0.09°) between the two opposite wings. Transmission electron microscopy (TEM) results showed that the threading dislocation density in the resulting fully coalesced overgrown GaN was reduced from 4.21010 cm-2 in the window area to 1.0108 cm-2 in the wing area, and that the wing areas contained relatively high density of basal stacking faults, 1.2104 cm-1. The recombination of carriers/excitons localized at stacking faults was evident in far-field near bandedge photoluminescence (PL) measured at 10 K. Moreover, atomic force microscopy (AFM) measurements revealed two orders of magnitude higher density of surface pits in window than in wing regions, which could be decorating dislocation termination on surface. Time-resolved PL measurements for the a-plane ELO-GaN samples revealed biexponential decays. The recombination times were significantly increased ( &tgr;1 =80 ps and &tgr;2 =250 ps) compared to the standard a-plane epitaxial layers (<45 ps), and ratio of the slow decaying component magnitude to the fast decaying one was more than 1.5, showing considerable reduction of nonradiative centers by lateral overgrowth. In addition, room temperature near-field optical microscopy studies revealed the improved optical quality in the wing regions of the overgrown GaN. As revealed from far-field PL, the band edge luminescence at room temperature was more than two orders of magnitude weaker than the yellow luminescence. Therefore, the overall spectrally integrated near field PL was collected, and its intensity was noticeably stronger in the wing areas with both Ga and N polarity. The much weaker emission at the windows and meeting fronts of the two opposite wings were consistent with the observations of high density of dislocations in the window regions and new defects originating at the meeting boundaries from TEM. |
import { Injectable } from '@angular/core';
import { ApiService } from '../../api/api.service';
import { DEVICE_URLS } from '../../coldchain-service/coldchain-url-params.const';
@Injectable()
/**
* 组织树结构相关接口
*/
export class TreeDataApi {
constructor(private api: ApiService) {
}
/**
* 根据输入地点查询树形结构数据
* @param areaCode
* @param func
*/
queryTreeDataByCityCode(areaCode: string, func: Function) {
this.api.get(DEVICE_URLS.queryStockTreeDataByCityCode + '/' + areaCode).subscribe(result => func(result));
}
}
|
<reponame>nilsbore/rbpf_mtt
#!/usr/bin/python
import numpy as np
import rospy
from rbpf_mtt.msg import GMMPoses, ObjectMeasurement
from rbpf_mtt.srv import PublishGMMMap, PublishGMMMaps
from rbpf_mtt.rbpf_filter import RBPFMTTFilter
from rbpf_mtt.rbpf_vis import filter_to_gmms
from geometry_msgs.msg import PoseWithCovariance
from visualization_msgs.msg import Marker, MarkerArray
from std_msgs.msg import Empty
class FilterServer(object):
def __init__(self):
self.gmm_pub = rospy.Publisher('filter_gmms', GMMPoses, queue_size=10)
self.poses_pub = rospy.Publisher('filter_poses', MarkerArray, queue_size=10)
self.ready_pub = rospy.Publisher('filter_ready', Empty, queue_size=10)
self.nbr_targets = rospy.get_param('~number_targets', 2)
self.filter = RBPFMTTFilter(self.nbr_targets, 100, 4)
self.initialized = np.zeros((self.nbr_targets,), dtype=bool)
rospy.Subscriber("filter_measurements", ObjectMeasurement, self.callback)
rospy.Subscriber("sim_filter_measurements", ObjectMeasurement, self.callback)
# here we got a measurement, with pose and feature, time is in the pose header
def callback(self, pose):
measurement_dim = len(pose.feature)
feature_measurement = np.zeros((measurement_dim,))
spatial_measurement = np.zeros((2,))
spatial_measurement[0] = pose.pose.pose.position.x
spatial_measurement[1] = pose.pose.pose.position.y
for i in range(0, measurement_dim):
feature_measurement[i] = pose.feature[i]
is_init = np.all(self.initialized)
print self.initialized
print pose.initialization_id
print self.nbr_targets
if not is_init and pose.initialization_id != -1:
print "Not intialized, adding initialization..."
self.filter.initialize_target(pose.initialization_id, spatial_measurement, feature_measurement)
self.initialized[pose.initialization_id] = True
else:
if not is_init:
print "All targets have not been initialized, not updating..."
return
print "Intialized, adding measurement..."
#if pose.negative_observation:
# self.filter.negative_update(spatial_measurement, pose.observation_id)
#else:
# self.filter.single_update(spatial_measurement, feature_measurement, pose.timestep, pose.observation_id)
self.filter.single_update(spatial_measurement, feature_measurement, pose.timestep, pose.observation_id)
#self.visualize_marginals(self.filter)
#self.publish_marginals(self.filter)
self.par_visualize_marginals(self.filter)
e = Empty()
self.ready_pub.publish(e)
def publish_marginals(self, rbpfilter):
gmms = filter_to_gmms(rbpfilter, self.initialized)
for gmm in gmms:
self.gmm_pub.publish(gmm)
def visualize_marginals(self, rbpfilter):
gmms = filter_to_gmms(rbpfilter, self.initialized)
rospy.wait_for_service('publish_gmm_map')
for gmm in gmms:
try:
publish_map = rospy.ServiceProxy('publish_gmm_map', PublishGMMMap)
publish_map(gmm)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def par_visualize_marginals(self, rbpfilter):
#gmms = PublishGMMMaps()
gmms = filter_to_gmms(rbpfilter, self.initialized)
rospy.wait_for_service('publish_gmm_maps')
try:
publish_maps = rospy.ServiceProxy('publish_gmm_maps', PublishGMMMaps)
publish_maps(gmms)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# This should just publish a posearray, which can be displayed directly
# But how do we know which pose is which? Maybe it would make more
# Sense with a marker array with a label for each marker
def publish_estimated_poses(self, rbpfilter):
markers = MarkerArray()
poses = self.filter.estimate()
for j in range(0, poses.shape[0]):
marker = Marker()
marker.header.frame_id = "map"
marker.header.stamp = rospy.Time.now()
marker.ns = "my_namespace"
marker.id = j
marker.type = Marker.SPHERE
marker.action = Marker.ADD
marker.pose.position.x = poses[j, 0]
marker.pose.position.y = poses[j, 1]
marker.pose.position.z = 0.
marker.pose.orientation.x = 0.
marker.pose.orientation.y = 0.
marker.pose.orientation.z = 0.
marker.pose.orientation.w = 1.
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1. # Don't forget to set the alpha!
marker.color.r = 0.
marker.color.g = 1.
marker.color.b = 0.
markers.markers.append(marker)
self.poses_pub.publish(markers)
if __name__ == '__main__':
rospy.init_node('test_filter', anonymous=True)
fs = FilterServer()
rospy.spin()
|
<gh_stars>0
namespace vaca {
/**
@page page_tn_008 TN008: Unicode support
@li @ref page_tn_008_string_literals
@li @ref page_tn_008_convert_strings
@li @ref page_tn_008_unicode_chars
@section page_tn_008_string_literals String Literals
You have to use the L suffix to define string literals. For example:
@code
String str = L"Hi";
@endcode
This creates a wide character string literal to initialize @c str String.
@section page_tn_008_convert_strings Convert Strings
You can use the convert_to function to convert an ASCII string
to a Vaca String:
@code
String a = convert_to<String>("Hi"); // Use L"Hi" for literals
char* b = strdup("Hi");
String c = convert_to<String>(b);
std::string d = "Hi";
String e = convert_to<String>(d);
@endcode
@section page_tn_008_unicode_chars Unicode Characters
You can use \\xNNNN to create Unicode characters. For example:
@code
Char a_character = L'\x0061'; // same as L'a'
String japanese = L"\x65E5\x672C\x8A9E"; // 日本語
@endcode
*/
}
|
A two-year-old boy pulled from a Phoenix pool Saturday evening has died.
According to Phoenix fire officials, family members found the child in a pool near 24th Street and Southern Avenue and began performing CPR. It is unknown how long the child was underwater.
Officials say Phoenix police arrived on scene and continued life-saving measures until rescue crews arrived minutes after and took the child to a nearby hospital where he was later pronounced deceased. |
import sqlalchemy
import datetime
class User(SqlAlchemyBase):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
surname = sqlalchemy.Column(sqlalchemy.String, nullable=True)
name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
age = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
position = sqlalchemy.Column(sqlalchemy.String, nullable=True)
speciality = sqlalchemy.Column(sqlalchemy.String, nullable=True)
address = sqlalchemy.Column(sqlalchemy.String, nullable=True)
email = sqlalchemy.Column(sqlalchemy.String,
index=True, unique=True, nullable=True)
hashed_password = sqlalchemy.Column(sqlalchemy.String, nullable=True)
modified_date = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.now)
|
Ionophore-mediated swelling of erythrocytes as a therapeutic mechanism in sickle cell disease Sickle cell disease (SCD) is characterized by sickle hemoglobin (HbS) which polymerizes under deoxygenated conditions to form a stiff, sickled erythrocyte. The dehydration of sickle erythrocytes increases intracellular HbS concentration and the propensity of erythrocyte sickling. Prevention of this mechanism may provide a target for potential SCD therapy investigation. Ionophores such as monensin can increase erythrocyte sodium permeability by facilitating its transmembrane transport, leading to osmotic swelling of the erythrocyte and decreased hemoglobin concentration. In this study, we treated 13 blood samples from patients with SCD with 10 nM of monensin ex vivo. We measured changes in cell volume and hemoglobin concentration in response to monensin treatment, and we perfused treated blood samples through a microfluidic device that permits quantification of blood flow under controlled hypoxia. Monensin treatment led to increases in cell volume and reductions in hemoglobin concentration in most blood samples, though the degree of response varied across samples. Monensin-treated samples also demonstrated reduced blood flow impairment under hypoxic conditions relative to untreated controls. Moreover, there was a significant correlation between the improvement in blood flow and the decrease in hemoglobin concentration. Thus, our results demonstrate that a reduction in intracellular HbS concentration by osmotic swelling improves blood flow under hypoxic conditions. Although the toxicity of monensin will likely prevent it from being a viable clinical treatment, these results suggest that osmotic swelling should be investigated further as a potential mechanism for SCD therapy. Introduction Sickle cell disease (SCD) is an inherited blood disorder that affects approximately 100,000 Americans in the United States and decreases a patient's life expectancy by 30 years. 1 The disease is caused by a genetic mutation in the b-globin gene which produces sickle hemoglobin (HbS). HbS can polymerize under deoxygenated conditions forming stiff, sickled red blood cells (sRBC). 2 The presence of sRBC contributes to the key elements of SCD pathology: hemoglobin polymerization, endothelial dysfunction, sterile inflammation, leading to overall disruption of blood flow particularly in the microvasculature. 3 These processes ultimately give rise to the vast clinical manifestations seen in SCD including vaso-occlusive episodes (VOE), acute chest syndrome, and stroke. The complex pathophysiology of SCD requires the development of treatments that target one or more of the molecular disease pathology mechanisms. Given that HbS polymerization is essential in the pathophysiology of SCD, treatments to prevent HbS polymerization continue to be an area of investigation for therapeutic development. Hydroxyurea, the first drug approved by the Food and Drug Administration (FDA) for the treatment of SCD, induces production of fetal hemoglobin (HbF), an anti-sickling hemoglobin. Though available for decades, patients treated with hydroxyurea experience variable clinical benefit and are subject to ongoing monitoring given its hematologic side effects. 9 Recently, another antisickling agent, voxelotor, was approved by the FDA for the treatment of SCD. Voxelotor stabilizes the oxygenated form of HbS by increasing hemoglobin's oxygen affinity, preventing polymerization when exposed to deoxygenated conditions. 10,11 Research is still ongoing to determine voxelotor's potential side effects and clinical benefit. In its phase III clinical trial, patients randomized to receiving voxelotor did experience an increase in hemoglobin after 6 months of use, but they did not have a reduction in VOE. 11 While treatments that inhibit polymerization such as voxelotor or hydroxyurea have been successful, therapeutics that target polymerization without affecting hemoglobin oxygen affinity or targeting the hematopoietic niche may provide similar clinical benefit without the side effects seen with these drugs. An alternative mechanism to inhibit HbS polymerization is the reduction of intracellular HbS concentration within a sRBC. Small decreases in HbS concentration can lead to slower polymerization rates that are longer than sRBC capillary transit time. 8,12 Previous studies to reduce HbS concentration include using antidiuretic hormone and a low sodium (Na) diet to reduce plasma osmolality and Na concentration. This caused hypotonic swelling of sRBC and a reduction in mean cell hemoglobin concentration (MCHC), ultimately leading to decreased erythrocyte sickling observed in three patients. 13 However, maintaining the necessary level of hyponatremia was impractical and results could not be reproduced in later studies. 14,15 Rather than decreasing plasma Na and osmolality as a method to reduce MCHC and HbS polymerization, increasing intracellular Na and osmolality may be more feasible. This produces similar osmotic swelling effects and decreases MCHC without the difficulties of sustaining low plasma Na concentrations. In order to study this mechanism and its potential benefit in SCD, ionophores that increase the erythrocyte permeability to Na, such as monensin, can be used to facilitate intracellular Na transport. Monensin selectively binds to Na + ions and facilitates its electrogenic transport across the erythrocyte membrane, creating an osmotic gradient and causing an influx of fluid intracellularly. Previous work in sRBC treated with monensin have demonstrated that monensin is effective at increasing mean corpuscular volume (MCV), decreasing MCHC, and increasing deformability of sRBC. These studies provided a basis for understanding the molecular effects of monensin on RBC, however, they did not examine how these molecular changes impact the mechanics of RBC flow under physiologic conditions. In this study, we use monensin as a model compound to investigate osmotic swelling to reduce MCHC as a potential mechanism for SCD therapy development. We aim to characterize the effects of sRBC osmotic swelling and reduced MCHC on sRBC rheologic oxygen dependence using a microfluidic device designed to recapitulate the physiological environment of the microvasculature. We compare the rheological response to hypoxia in our microfluidic device between blood samples treated with monensin and untreated controls. In order to further quantify its effect, we correlate MCV, MCHC, and the rheological response to hypoxia. By studying the effect monensin may have on rheology, we build upon previous monensin studies and are now able to better capture the complex pathophysio-logic changes in blood flow that occur with deoxygenated conditions in a physiologically relevant system, gaining a more comprehensive understanding of the potential therapeutic mechanism and its in vivo effects. Monensin treatment All study protocols were approved by the Institutional Review Board (IRB). In preparation for monensin treatment ( Figure 1A), RBC were washed three times by centrifugation in Buffer A solution (104 mM NaCl, 32 mM Na 2 HPO 4, 8 mM KH2PO 4, 5.5 mM dextrose, 1g/L bovine serum albumin ; pH 7.4, 305 mOsm) with techniques previously published. 18 Dextrose and BSA components of the Buffer A solution were added on the day of experiments. Samples were resuspended in Buffer A with 0.01% EtOH and 10 nM monensin (420701, BioLegend) to achieve 25% hematocrit (hct) and incubated at 37°C for 12 hours (hrs). A concentration of 10 nM monensin was chosen based on previous studies demonstrating optimal cellular effect without increased hemolysis. 19 Preliminary research shows that incubation in Buffer A between 6 and 24 hrs limits RBC swelling in controls to less than 5% MCV. While the effect of 0.01% EtOH on RBC has been previously studied to be insignificant, 22-24 a control resuspended in Buffer A and 0.01% EtOH was used for each sample in this study. After incubation, the sample was washed with phosphate-buffered saline (PBS) to remove extracellular monensin and resuspended in PBS to achieve 25% hct prior to rheology measurements. Details of blood sample collection, storage, and methods of obtaining laboratory values are provided in the Online Supplementary Appendix. Data collection and analysis Device design, fabrication, and experimental set up have been previously published and is detailed in the Online Supplementary Appendix. Continuous rheological data were captured using a high-speed camera (GS3-U3-23S6M-C, FLIR) at a frame rate of 500-600 FPS (frames per second) at 40x magnification. Blood flow velocity measurements were collected using a contrast detection algorithm developed in MATLAB based on the Kanade-Lucas-Tomasi algorithm. The velocity of thousands of contrasting points per frame were identified and averaged to obtain an average velocity per frame. Representative data in Figure 1B demonstrates blood flow velocity under normoxic and hypoxic conditions for a control and monensin treated sample. Each sample was exposed to 1 minute of normoxia (160 mmHg) and then 1 minute of hypoxia (0 mmHg). This oxygenation-deoxygenation cycle was then repeated for a total of 3 cycles. Average steady state (SS) velocity at nor-moxia or hypoxia was determined by averaging the velocities of the three cycles at each oxygen tension for each sample. The average SS velocity value was used to determine two velocity metrics used for analysis: velocity response and recovery. Velocity response is defined by the difference between the average SS velocity at 160 mmHg and 0 mmHg oxygen. The response is normalized by the sample's average velocity at 160 mmHg oxygen tension ( Figure 1C) and indicates the magnitude of velocity reduc-tion during deoxygenation. Velocity recovery is defined by the difference in velocity response between treatment and the untreated control and indicates the change in velocity reduction during deoxygenation due to treatment. Statistical analysis A Wilcoxon signed-rank test is used to establish significant difference between control and treatment groups (n=13). A Pearson's product moment correlation coefficient is Representative image of raw velocity data (below) as it relates to oxygen tension (above) from a single sickle cell disease (SCD) patient sample. In the bottom panel of (B), blood flow velocity is compared between monensin treatment (red) and the untreated control (blue). The average oxygenated shear rate during experiments was 355 s -1, within physiologic range for channel dimensions. 46 In this sample, it appears that the velocity at normoxia of the monensin-treated condition is lower than that of the untreated condition. In order to address the differences in normoxic velocities between treatment conditions, conductance of all 13 samples was calculated to determine if additional variables were present contributing to normoxic velocities (Online Supplementary Figure S3). There were no significant differences in conductance at normoxia between treatment conditions in all samples, indicating velocity differences at normoxia were related to driving pressure. (C) The oxygenated (160 mmHg) and deoxygenated (0 mmHg) sections of the collected velocity data in (B), normalized by the average oxygenated steady state velocity for the representative sample. The representative single patient data in (C) demonstrates a 13% velocity response for a monensin-treated sample and a 33% response for the untreated control. This corresponds to a velocity recovery of 20% after monensin treatment. Velocity response is calculated using the difference between oxygenated (160 mmHg) and deoxygenated (0 mmHg) velocities and velocity recovery is calculated by the difference in the control and monensin treated response. SN: supernatant; PBS: phosphate-buffered saline, SS: steady-state. ARTICLE -RBC swelling improves sickle blood flow A.C. Geisness et al. A B C used to describe the linear correlation (n=13). Significance was defined by a P≤0.05. Results Mean cell hemoglobin concentration strongly correlates with rheologic response to hypoxia A total of 13 samples from patients with SCD were obtained and used in experimentation. A summary of corresponding patient demographic, clinical, and baseline laboratory data are shown in Table 1. Several different sickle cell genotypes were included in the cohort. When oxygenation was decreased from 160 mmHg to 0 mmHg, all untreated sickle samples responded with velocity reduction to a specific steady state velocity. When oxygen tension was restored back to 160 mmHg, blood flow velocity then increased and returned to its SS velocity prior to deoxygenation. Similar velocity response was replicated with repeated cycles of deoxygenation. The conductance of each sample at normoxia and hypoxia were calculated in each treatment condition to ensure non-significant differences in sample preparation and device variability between experiments (Online Supplementary Figure S2). In contrast, oxygen-dependent velocity was not observed in healthy, AA, blood controls (Online Supplementary Figure S3A). In order to identify a parameter which may dictate a sample's velocity response to hypoxia, we first determined if MCHC and MCV were independent variables within the 13 untreated sickle samples. There was no correlation between MCV and MCHC (Figure 2A, There is a slight negative relationship between velocity response and MCV amongst the 13 untreated samples, though this correlation was not significant ( Figure 2B, r=-0.13, P=0.660). There was, however, a significant positive relationship when correlating velocity response and MCHC ( Figure 2C, r=0.83, P=0.001), as untreated samples with lower MCHC had smaller velocity responses when exposed to hypoxia. Collectively, this data corroborates previous work by others demonstrating that cell volume does not strongly correlate with the rheological response and rather it is hemoglobin concentration that is strongly correlated with sample blood flow response. 32 Monensin increases sickled red blood cells mean corpuscular volume, decreases mean cell hemoglobin concentration, and reduces hypoxia-induced polymerization In order to determine the effect of monensin on sRBC, MCV and MCHC values pre-and post-treatment were collected of all 13 SCD samples and shown in Figure 3A and B. Overall, the monensin-treated samples had significantly increased MCV ( Figure 3A) and decreased MCHC ( Figure 3B) when compared to the controls (P<0.01). The significant effects in MCV and MCHC were also observed when treating three healthy, AA blood controls with monensin as well (Online Supplementary Figure S3B). with HbSB 0. Given that monensin drives cell swelling, measured by MCV, and decreases hemoglobin concentration, measured by MCHC, these results reflect the degree in which the sample was affected by monensin. Given the observed changes in MCV and MCHC with monensin treatment, to further demonstrate the mechanism of cell swelling to reduce sickle pathophysiology, we analyzed the morphology of cells from three additional sickle blood samples under shear flow and controlled oxygen tension using a previously published microfluidic chip. 33 A full description of the device and the methodology as well as the samples' baseline hematological laboratory data can be found in the Online Supplementary Appendix (Online Supplementary Table S1). Monensin reduced the fraction of cells containing polymer when exposed to hypoxic oxygen tensions in all samples (Online Supplementary Figure S4). However, similar to the effect observed on MCV and MCHC, the amount monensin reduced polymerized cells in hypoxia varied between samples. Monensin treatment improves rheological response to hypoxia In order to quantify the effect of monensin on sRBC blood flow velocity in a hypoxic environment, we examined the velocity response to 0 mmHg oxygen tension between treated and untreated samples for the 13 SCD blood samples. In monensin-treated samples, there was a significant decrease in velocity response with deoxygenation compared to that of untreated controls (P<0.01, Figure 3C), indicating the efficacy of monensin in decreasing sRBC sensitivity to hypoxia. However, there was variability in the degree of response to monensin treatment across all samples. For example, in sample ID 11, monensin treatment eliminated almost all blood flow velocity oxygen dependence demonstrated by no velocity response to hypoxia compared to a 40% response in the control. This contrasts with sample ID 3 and 5, where there was no monensin effect on blood flow velocity response when compared to the untreated control. In AA samples, there was no change in velocity response between monensintreated and untreated controls, despite the significant changes in MCV and MCHC after monensin treatment (Online Supplementary Figure S3). Reduction in mean cell hemoglobin correlates with improved rheologic response to hypoxia In order o objectively determine whether the magnitude of the monensin-induced changes to MCHC or MCV affects the magnitude of change in velocity response to hypoxia, we compared the linear correlation between MCV or MCHC change induced by monensin and the velocity recovery of each sample. By using the absolute change in MCV and MCHC, the analysis removes the variability of each sample's initial MCV and MCHC and controls for patients' baseline heterogenous clinical severity. First, we ensured correlation between MCV increases and MCHC decreases in the monensin-treated samples. In Figure 4A, there was a significant positive correlation between MCV change and MCHC change (r=0.91, P<0.001), in that large MCV increases due to monensin corresponded with large MCHC reductions. This demonstrates cell swelling is an effective method to decrease MCHC. When comparing the degree in which MCV was increased by monensin and velocity recovery we found a significant positive correlation ( Figure 4B, r=0.87, P<0.001). A more significant positive relationship is seen when comparing the degree to which monensin decreased MCHC and sample recovery ( Figure 4C, r=0.96, P<0.001), in that the largest improvements in sample velocity response to hypoxia correlated with larger reductions in MCHC. These relationships reveal that the degree to which monensin affects sample sensitivity to hypoxia is strongly dependent on the degree to which the MCHC is reduced. Discussion In this study, we examined osmotic cell swelling to decrease intracellular HbS concentration as a potential mechanism to be targeted for future therapeutic development in SCD. We used a model Na ionophore compound, monensin, to treat SCD blood samples ex vivo. The samples were exposed to hypoxic conditions in a microfluidic device while blood flow was quantified. Though The monensin-treated group had significantly higher MCV and lower MCHC compared to the control group. (C) There was a significantly lower velocity response in the monensin-treated group compared to that in the control group. Significance between control and monensin-treated groups was determined using a Wilcoxon signed-rank test and indicated by the asterisks (*) denoting P<0.01. Sample ID represents the de-identified patient ID corresponding to the sample. Error bars indicate the standard deviation in velocity response over 3 oxygenation/deoxygenations cycles. SS: steady state. ARTICLE -RBC swelling improves sickle blood flow A.C. Geisness et al. A B C previous studies conducted in the early 1980's demonstrated monensin's ability to decrease sRBC MCHC, 19 monensin's impact on dynamic blood flow, particularly in hypoxic environments, was not explored. Furthermore, deformability measurements were made through ektacytometry 19 which may allude to improved rheology, however, monensin's global effect on blood flow was not directly measured. Additionally, more recent research reports ektacytometry deformability measur ements are unreliable in predicting the ability of RBC perfusion of a microvascular network. 34 Given that MCHC reduction was initially hypothesized to reduce polymerization and decrease vasoocclusion, investigating monensin's effect on dynamic flow rheology is critical to understanding whether the mechanism has a role in future therapeutic development. Using our microfluidic platform, we were able to observe and quantify monensin's effect of reduced MCHC on blood flow by measuring rheologic variables in a dynamic, physiologically relevant system. In our study, we not only found that monensin decreased MCHC, but we also report that it significantly decreased the sensitivity of SCD blood flow to hypoxia compared to controls. The velocity recovery with monensin treatment varied between samples which correlated to the variation in monensin-induced changes in MCHC. Through the rheological measurements obtained in this study, we provide both novel insight into the capability of this mechanism in the prevention of vaso-occlusion but also provide findings to suggest patient response variability. While osmotic swelling and increases in MCV are the primary effects of the ionophore treatment, we found that the magnitude of MCHC reduction is the primary parameter modifying blood flow response to hypoxia. This is demonstrated by the significant correlation found between reduction in MCHC and reduction in velocity response ( Figure 4C). When comparing response with MCV, the relationship is not as strongly correlated ( Figure 4B). This corroborates existing studies which demonstrate that polymerization rates are extremely dependent on HbS concentration. Additionally, we found that in the four samples that demonstrated insignificant change in velocity response when treated with monensin compared to controls (sample ID: 3, 5, 7, and 10), there was less than a 5% MCHC decrease. It is unclear what caused the observed patient variability in MCV/MCHC response to monensin, however our data suggests that decreasing the MCHC by 5% has a significant impact on the sample's blood flow in hypoxia. Future studies that examine reduction of MCHC as a mechanism for SCD treatment should use a minimal threshold in MCHC reduction to guide drug efficacy. This study investigated the mechanism of osmotic swelling to decrease intracellular MCHC thereby decreasing HbS polymerization. Similarly, previous SCD drug trials have focused on decreasing intracellular MCHC by inhibiting ion channels that are involved in the pathologic dehydration of sRBC. Clotrimazole, an inhibitor of the Gardos channel ARTICLE -RBC swelling improves sickle blood flow A.C. Geisness et al. A B C which contributes to sRBC water loss, demonstrated reduced sRBC dehydration, decreased MCHC, and mild improvements in hemoglobin when taken by five SCD patients. 35,36 Patients on Senicapoc, a similar Gardos channel inhibitor, also experienced significant increases in hemoglobin and hematocrit in a phase II clinical trial. Despite these promising results, studies involving Senicapoc were terminated early due to limited efficacy when no significant improvement in the rate of VOE were seen in those taking Senicapoc compared to those on placebo. 37 While reductions in MCHC may correlate with reductions in polymerization-induced hemolysis and increased hemoglobin, it does not appear to be as correlated to frequency of VOE and implies that hemolysis and vaso-occlusion are distinct, yet intertwined, pathologic mechanisms. Further, although reductions in MCHC have been shown to decrease rigidity and stiffness of sRBC, 38 over-swelling of sRBC by exposure to hypotonic solutions has also led to increased vaso-occlusions in in vitro models. 39 Therefore, while the mechanism of osmotic swelling has been beneficial to distinct aspects of SCD pathology, it may not be effective or potentially problematic. Given that previous compounds have attempted to exploit a similar mechanism tested in this study but have ultimately proven unsuccessful due to a lack of reduction in VOE, the ability to determine how a compound affects blood flow, particularly in deoxygenated conditions like that of a VOE, is important in predicting its potential clinical success. Therapeutics that demonstrate improvement in overall blood rheology rather than on a single SCD pathophysiologic mechanism are likely to provide more benefit in reducing VOE frequency. By using microfluidic technology in this study, we can characterize the effect of monensin on velocity response and demonstrate that the mechanism improves rheological behavior. Due to its recognized narrow therapeutic window, use of monensin in veterinary medicine has led to several toxicities and accidental death, and therefore would not be an ideal agent for human study. 40 However, this study motivates development of other compounds that may have similar osmotic swelling effects on erythrocytes for SCD treatment. 41 A potential concerning side effect of this treatment mechanism includes increased blood viscosity due to increased cell volume and hct. With increased viscosity, the potential for VOE is amplified. 41 Viscosity, however, is dependent on several factors such as hct, RBC deformability and aggregation. 41,42 Previous studies demonstrate with decreased MCHC, sRBC deformability increases. 43 Additionally, it has been reported that cell stiffness has a greater influence on blood viscosity than hct 41 and as previously mentioned, monensin treatment has demonstrated increased sRBC deformability when compared to untreated controls. 8 Patients treated with hydroxyurea also experience an increase in MCV without association of worsening outcomes. 44 Additionally, although our study does not control for post-monensin increase in hct, we still demonstrate improved rheological behavior. Therefore, while osmotic sRBC swelling may increase viscosity, RBC deformability appears to be stronger determining factor in overall blood viscosity. Our study is limited by primarily demonstrating correlative relationships with little experimentation on causation. However, previous work demonstrating increases in RBC deformability and decreases in sRBC fraction with monensin treatment, support the reduced rheological response to hypoxia observed in this study. While we demonstrate that decreasing MCHC by erythrocyte osmotic swelling successfully reduces sRBC oxygen dependent flow, this study does not capture the complex biological interactions between the many other cellular components involved in the pathophysiology of SCD. For example, in our experiments, we used washed red cells rather than whole blood. Previous studies found that monensin had a reduced effect on sRBC in the presence of plasma and required significantly increased concentrations to replicate improved deformability, indicating that the drug likely binds across plasma constituents. 19 While using patient plasma may be helpful in determining concentration of monensin to observe rheological improvement, this study was to demonstrate the efficacy of the mechanism of MCHC reduction to improve sRBC rheological behavior. Should further studies exploring this mechanism be conducted, or for future therapeutic development, agents that specifically target RBC Na permeability by RBC-specific cation channels or transport would be of priority to reduce effect on other potential cellular components. Additionally, the endothelium is particularly of interest given that increased RBC deformability has been shown to increase endothelial adhesion and may contribute to the occurrence of VOE and clinical severity. 45,46 Further studies examining the effect of MCHC reduction on adhesion and inflammation are needed to assess the potential benefit of this therapeutic mechanism. As discovery of SCD pathophysiology reveals more intricate biological pathways and multifaceted systems simultaneously at play, the approach to treatment may require an equally multifaceted, multi-drug approach. By combining a therapy that reduces MCHC and polymerization with an anti-adhesion therapy, perhaps further benefit may be achieved. Studies are needed to determine the advantages of a multi-agent approach, however similar strategies have already been successful in HIV therapy, cardiology, and oncology. Conclusion The reduction of MCHC through osmotic swelling of a sickle erythrocyte can effectively decrease the rheological dependence on oxygenation. Blood flow velocity measurements within microfluidic channels, of physiologic dimen-sions, indicate a strong correlation between MCHC reduction and reduction of blood flow sensitivity to hypoxia. These relationships may indicate the potential efficacy of regulating MCHC as a targeted mechanism for SCD therapeutic development. Disclosures No conflicts of interest to disclose. |
<reponame>jgolieb/cloudbreak
package com.sequenceiq.authorization.resource;
public class RightUtils {
private RightUtils() {
}
public static String getRight(AuthorizationResourceType resource, AuthorizationResourceAction action) {
return resource.getResource() + "/" + action.getAction();
}
}
|
Upbeat over its performance in the previous elections, the ruling BJP in Rajasthan continues to experiment with poll strategy for the Panchayati Raj elections. The party has now made it mandatory for the workers to recruit at least 100 members to be eligible for its ticket.
On the contrary, the Congress is back to square one and banking on traditional methods. After losing all the previous polls, the Panchayati Raj elections are of extreme significance for the party as this will be the last polls in the state before the next Assembly election due in December 2018.
“Adding at least 100 new members has become the minimum eligibility criterion to get the party ticket,” said a state BJP office-bearer.
“The party has issued directives even to the office-bearers from the state to divisional-level, stating they will have to relinquish their posts if they fail to achieve the target under the BJP’s nationwide membership drive,” the office-bearer added.
All the ticket aspirants now seem to have taken up the task seriously. Previously, the aspirants were seen meeting MPs, MLAs and RSS office-bearers to ensure the ticket. But now they are knocking at the voters’ doors to offer them the party membership.
“They may not give us ticket even after making 100 members for the party. But they will surely disqualify candidates who do not achieve the target. So this has become basic eligibility for us to get the ticket,” said Om Prakash Rajoria, a ticket aspirant.
In the Congress, district-wise observers will interview and collect the bio-data of the ticket aspirants from each district.
These observers will shortlist the candidates and after discussions with the local leaders, a panel of three candidates will be prepared. The ticket will be given to one of the candidates in the panel. Later these observers and district in-charge will look after the election management for the party.
This time the party has not introduced any riders like preference to youth, education qualification or denying ticket to those who lost previous polls.
The leaders are now saying that winnable candidates will be given ticket.
While Sachin Pilot has already started holding the election rallies in different districts, the local leaders are clueless especially after the public mandate in previous last elections, which was completely opposite to their expectation. |
<filename>django_web_secure/middleware.py
import re
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
class WebSecureMiddleware:
CONTENT_SECURITY_POLICY_HEADER = 'Content-Security-Policy'
CONTENT_TYPE_OPTIONS_HEADER = 'X-Content-Type-Options'
HSTS_HEADER = 'Strict-Transport-Security'
XSS_PROTECTION_HEADER = 'X-XSS-Protection'
def __init__(self):
self.enforce_ssl = settings.SECURE_ENFORCE_SSL
self.ssl_host = settings.SECURE_SSL_HOST
self.exempt_hosts = [re.compile(r) for r in settings.SECURE_EXEMPT_HOSTS]
self.hsts_max_age = settings.SECURE_HSTS_MAX_AGE
self.hsts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS
self.hsts_preload = settings.SECURE_HSTS_PRELOAD
self.content_security_policy = settings.SECURE_CONTENT_POLICY
self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF
self.xss_filter = settings.SECURE_XSS_FILTER
def is_exempt(self, path):
return any(pattern.search(path) for pattern in self.exempt_hosts)
def process_request(self, request):
if self.enforce_ssl is None:
return None
elif self.enforce_ssl and not request.is_secure():
path = self.ssl_host.lstrip('/')
if not self.is_exempt(path):
host = self.ssl_host or request.get_host()
path = request.get_full_path()
return HttpResponsePermanentRedirect('https://{0}{1}'.format(host, path))
def get_hsts_header(self):
header = ''
if self.hsts_max_age:
header += 'max-age={}'.format(self.hsts_max_age)
if self.hsts_include_subdomains:
header += '; includesubdomains'
if self.hsts_preload:
header += '; preload'
return header
def process_response(self, request, response):
if request.is_secure() and self.HSTS_HEADER not in response:
response[self.HSTS_HEADER] = self.get_hsts_header()
if self.content_type_nosniff and self.CONTENT_TYPE_OPTIONS_HEADER not in response:
response[self.CONTENT_TYPE_OPTIONS_HEADER] = 'nosniff'
if self.xss_filter and self.XSS_PROTECTION_HEADER not in response:
response[self.XSS_PROTECTION_HEADER] = '1; mode=block'
return response
|
Concussion in Sport: Psychological Perspectives Concussions represent a significant health concern in sport, affecting as many as 4 million athletes in the United States each year (Langlois, Rutland-Brown, & Wald, 2006). A concussion is defined as a complex pathophysiological process affecting the brain, induced by biomechanical forces (p. 250, ). On a practical level, concussions involve myriad symptoms including headache, dizziness, as well as impairment to balance, cognitive, vestibular, ocular motor, and other functioning. Concussions also involve psychological effects including anxiety () and depression (). These effects may be pronounced in athletes that take longer to recover and for those with certain subtypes of clinical profiles of concussion. The outcomes associated with concussion, including symptoms, impairment, and recovery time, are also influenced by many risk or modifying factors, including age (Purcell, Harvey, & Seabrook, 2016), sex (), concussion (Covassin, Stearne, & Elbin, 2008), and migraine history (Sufrinko, McAllisterDeitrick, Elbin, Collins, & Kontos, 2017). However, the role of psychological factors in predicting outcomes following a concussion is not well understood. We also know little about the psychological sequelae that often accompany concussion. As our understanding of concussion continues to evolve, we have learned that concussions are highly individualized, involving different clinical profiles (Collins, Kontos, Reynolds, Murawski, & Fu, 2014; ). To characterize these profiles, a comprehensive, interdisciplinary approach to assessment that includes multiple domains such as symptoms, cognitive, vestibular, and oculomotor is needed. The information from the comprehensive assessment helps to identify and prioritize targeted treatment strategies. Recently, the approach to treating concussion has moved from prescribed rest to more active approaches involving specific therapies and rehabilitation interventions. In spite of these advances, one area in which our understanding and research focus has lagged behind is on the psychological issues related to concussion. This is somewhat surprising, given that in 2004, my colleagues and I highlighted the need for a greater emphasis on psychological issues related to this injury by clinicians and researchers alike (Kontos, Collins, & Russo, 2004). The purpose of the current special issue is to examine psychological issues with regard to sport-related concussion. In doing so, the current issue is the first on sport-related concussion devoted solely to psychology-related issues. The authors in this issue describe new research findings both quantitative and qualitative (something that is lacking in the field of concussion)and present and discuss empirical reviews of psychological issues. The order of the papers for this special issue flows from the first paper, which provides an overview of the psychological factors associated with concussion, followed by several papers that emphasize the role of psychological factors on decisions related to playing concussion-risk sports and baseline testing for concussion. Following this preinjury focus, the papers transition to emphasize the role of psychological factors on concussion and related outcomes, including a paper Editors Note. This is an introduction to the special issue Concussion in Sport, Exercise, and Performance: Psychological Perspectives. Please see the Table of |
<gh_stars>1-10
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an actor-critic agent instance on a bsuite experiment."""
# Import all packages
from absl import app
from absl import flags
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.actor_critic_rnn import actor_critic_rnn
from bsuite.baselines.utils import pool
import tensorflow as tf
from typing import Text
# bsuite logging
flags.DEFINE_string('bsuite_id', 'catch/0',
'specify either a single bsuite_id (e.g. catch/0)\n'
'or a global variable from bsuite.sweep (e.g. SWEEP for '
'all of bsuite, or DEEP_SEA for just deep_sea experiment).')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
# algorithm
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 64, 'number of units per hidden layer')
flags.DEFINE_float('learning_rate', 3e-3, 'the learning rate')
flags.DEFINE_integer('sequence_length', 32, 'mumber of transitions to batch')
flags.DEFINE_float('td_lambda', 0.9, 'mixing parameter for boostrapping')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: Text) -> Text:
"""Runs an A2C agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
num_actions = env.action_spec().num_values
hidden_sizes = [FLAGS.num_units] * FLAGS.num_hidden_layers
network = actor_critic_rnn.PolicyValueRNN(hidden_sizes, num_actions)
agent = actor_critic_rnn.ActorCriticRNN(
obs_spec=env.observation_spec(),
action_spec=env.action_spec(),
network=network,
optimizer=tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate),
sequence_length=FLAGS.sequence_length,
td_lambda=FLAGS.td_lambda,
agent_discount=FLAGS.agent_discount,
seed=FLAGS.seed,
)
experiment.run(
agent=agent,
environment=env,
num_episodes=FLAGS.num_episodes or env.bsuite_num_episodes, # pytype: disable=attribute-error
verbose=FLAGS.verbose)
return bsuite_id
def main(argv):
"""Parses whether to run a single bsuite_id, or multiprocess sweep."""
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print('Running a single bsuite_id={}'.format(bsuite_id))
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print('Running a sweep over bsuite_id in sweep.{}'.format(bsuite_sweep))
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError('Invalid flag bsuite_id={}'.format(bsuite_id))
if __name__ == '__main__':
app.run(main)
|
//
// WholesaleOrderListReplaceModel.h
// KidsTC
//
// Created by 童成mac-dev1 on 2017/2/15.
// Copyright © 2017年 zhanping. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "WholesaleOrderListItem.h"
@interface WholesaleOrderListReplaceModel : NSObject
@property (nonatomic, assign) NSInteger errNo;
@property (nonatomic, strong) WholesaleOrderListItem *data;
@property (nonatomic, assign) NSInteger count;
@property (nonatomic, strong) NSString *page;
@end
|
<filename>hashtable.h
#ifndef HASHTABLE_H
#define HASHTABLE_H
#include <linkedlist.h>
#include <stdint.h>
typedef struct Hashtable{
LinkedList* chains;
size_t prime;
}Hashtable;
Hashtable Hashtable_create(size_t min_keys);
void Hashtable_insert_pair(Hashtable* hashtable,uint8_t* key,size_t key_size,void* obj);
void* Hashtable_lookup_obj(Hashtable* hashtable,uint8_t* key,size_t key_size);
void HashtableString_insert_pair(Hashtable* hashtable,char* key,void* obj);
void* HashtableString_lookup_obj(Hashtable* hashtable,char* key);
void Hashtable_destroy(Hashtable* hashtable);
#endif
|
Taming The Security Weakest Link(s)
Overview
The security level of a computerized system is as good as the security level of its weakest links. If one part is secure and tightened properly and other parts are compromised, then your whole system is compromised, and the compromised ones become your weakest links. The weakest link fits well with attackers’ mindset which always looks for the least resistant path to their goal. Third parties in computers present an intrinsic security risk for CISOs, and in general, to any person responsible for the overall security of a system. A security risk is one that is overlooked due to a lack of understanding and is not taken into account in an overall risk assessment, except for the mere mention of it. To clarify, third-party refers to all other entities that are integrated into yours, which can be hardware and software, as well as people who have access to your system and are not under your control.
A simple real life example can make it less theoretical: Let’s say you are building a simple piece of software running on Linux. You use the system C library, and in this case, plays the 3rd party role. If the C library has vulnerabilities—then your software has vulnerabilities. And, even if you make your software bulletproof, it still won’t remove the risks associated with the C library which becomes your software weakest link.
Zooming out on our imaginary piece of software then, you probably already understand that the problem of the 3rd party is much bigger than what was previously mentioned, as your software also relies on the operating system and other installed 3rd party libraries, and the hardware itself, and the networking services, and the list goes on and on. I am not trying to be pessimistic, but this is how it works.
In this post, I will focus on application integration-driven weakest links for the sake of simplicity, and not on other 3rd parties such as reusable code, non-employees, and others.
Application Integration as a Baseline for 3rd Parties
Application integration has been one of the greatest trends ever in the software industry, enabling the buildup of complex systems based on existing systems and products. Such integration takes many forms depending on the specific context in which it is implemented.
Mobile World
In the mobile world, for example, integration serves mainly the purpose of ease of use where the apps are integrated into one other by means of sharing or delegation of duty, such as integrating the camera into an image editing app—iOS have come a long way in this direction with native FB and Twitter integration, as well as native sharing capabilities. Android was built from the ground up for such integration with its activity-driven architecture.
Enterprise Systems
In the context of enterprise systems, integration is the lifeblood of business processes where there are two main forms of integration: one-to-one such as software X “talking” to software Y via software or network API. The second form is many-to-many, such as in the case of software applications “talking” to a middleware where later the middleware “talks” to other software applications.
Personal Computers
In the context of a specific computer system, there is also the local integration scenario which is based on OS native capabilities such as ActiveX/OLE or dynamic linking to other libraries – such integration usually serves code reuse, ease of use and information sharing.
Web Services
In the context of disparate web-based services, the one-to-one API integration paradigm is the main path for building great services fast.
All In All
Of course, the world is not homogeneous as is depicted above. Within the mentioned contexts you can find different forms of integration which usually depend on the software vendors and existing platforms.
Integration Semantics
Each integration is based on specific semantics. This semantics are imposed by the interfaces each party exposes to the other party. REST APIs, for example, provide a rather straightforward approach to understanding the semantics where the interfaces are highly descriptive. The semantics usually dictate the range of actions that can be taken by each party in the integration tango and the protocol itself enforces that semantics. Native forms of integration between applications are a bit messier than network based APIs where there is less capability to enforce the semantics allowing exploits such as in the case with ActiveX integration on Windows which has been a basis for quite a few attacks. The semantics of integration also includes the phase of establishing the trust between the integrated parties, and again, this varies quite a bit regarding implementation within each context. It varies from a zero trust case with fully public APIs such as consuming an RSS feed or running a search on Google with an Incognito browser up to a full authentication chain with embedded session tokens.
In the mobile world where the aim of integration is to increase ease of use, the trust level is quite low: the mobile trust scheme is based mainly on the fact that both integrated applications reside on the same device such as in the case of sharing, where any app can ask for sharing via other apps and gets an on-the-fly integration into the sharing apps. The second prominent use case in mobile for establishing trust is based on a permission request mechanism. For example, when an app tries to connect to your Facebook app on the phone, the permission request mechanism verifies the request independently from within the FB app, and once approved, the trusted relationship remains constant by use of a persisted security token. Based on some guidelines, some apps do expire those security tokens, but they last for an extended period. With mobile, the balance shift remains between maintaining security and annoying the user with many too many permission questions.
Attack Vectors In Application Integration
Abuse of My Interfaces
Behind every integration interface, there is a piece of software which implements the exposed capabilities, and as in every software, it is safe to assume that there are vulnerabilities just waiting to be discovered and exploited. So the mere existence of opening integration interfaces from your software poses a risk.
Man In the Middle
Every communication among two integrated parties can be attacked using a man in the middle (MitM). MitM can first intercept the communications, but also alter them to either disrupt the communications or exploit a vulnerability on either side of the integration. Of course, there are secure protocols such as SSL which can reduce that risk but not eliminate it.
Malicious Party
Since we don’t have control of the integrated party, then it is very difficult to assume that it has not been taken over by a malicious actor which now can do all kind of things: exploit my vulnerabilities, exploit the data channel by sending harmful or destructive data, or cause a disruption of my service with denials of service attacks. The other risk of a malicious or under attack party is about availability, and many times with tight integration your availability strongly depends on the integrated parties availability. The risk posed by a malicious party is amplified due to the fact a trust is already established, and many times a trusted party receives wider access to resources and functionality than a non-trusted party, so the potential for abuse is higher.
Guidelines for Mitigation
There are two challenges for mitigating 3rd party risks: the first one is visibility that is easier to achieve, and the second is what to do about each risk identified since we don’t have full control over the supply chain. The first step is to gain an understanding of which 3rd parties your software is reliant upon. This is not easy as you may have visibility only over the first level of integrated parties—in a way this is a recursive problem, but still, the majority of the integrations can be listed out. For each integration point, it is interesting to understand the interfaces and the method of integration (i.e. over the network, ActiveX), and finally, trust establishing a method. Once you have this list, then you should create a table with four columns:
CONTROL – How much control you have over the 3 rd party implementation.
party implementation. CONFIDENCE – Confidence in 3rd party security measures.
IMPACT – Risk level associated with potential abuse of my interfaces.
TRUST – The trust level required to be established between the integrated parties before communicating with each other.
These four parameters serve as a basis for creating an overall risk score where the weights for each parameter should be assigned at your discretion and based on your judgment. Once you have such a list, and you’ve got your overall risk calculated for each 3rd party, then simply sort it out based on risk score, and there you’ve got a list of priorities for taming the weakest links.
Once you know your priorities, then there are things you can do, and there are actions that only the owners of the 3rd party components can do so you need some cooperation. Everything that is in your control, which is the security of your end in the integration and the trust level imposed between the parties (assuming you have control of the trust chain and you are not the consumer party in the integration), should be tightened up. For example, reducing the impact of your interfaces towards your system is one thing in your control as well as patching level of dependent software components. MITM risk can be reduced dramatically with the establishment of a good trust mechanism and implementation of secure communications, but not completely mitigated. And lastly, taking care of problems within an uncontrolled 3rd party is a matter of specifics which can’t be elaborated upon theoretically.
Summary
The topic of 3rd party security risks is a large one to be covered by a single post, and as seen within each specific context, the implications vary dramatically. In a way, it is a problem which cannot be solved 100%, due to lack of full control over the 3rd parties, and lack of visibility into the full implementation chain of the 3rd party systems. To make it even more complicated, consider that you are only aware of your 3rd parties, and your 3rd parties also have 3rd parties—which in turn also have 3rd parties…and on and on…so you can not be fully secure! Still, there is a lot to do even if there is no clear path to 100% security, and we all know that the more we make it hard for attackers, the costlier it is for them, which does wonders to weaken their motivation.
Stay safe! |
<reponame>kurpenok/Labs<gh_stars>1-10
#include <iostream>
int f(int n);
int main() {
std::cout << "[+] F(100): " << f(100) << std::endl;
int steps[100];
for (int i = 1; i < 101; i++) {
if (i <= 3) {
steps[i - 1] = i;
} else if ((i > 3) && (i % 3 == 0)) {
steps[i - 1] = i * i * i + steps[i - 2];
} else if ((i > 3) && (i % 3 == 1)) {
steps[i - 1] = 4 + steps[(i / 3) - 1];
} else if ((i > 3) && (i % 3 == 2)) {
steps[i - 1] = i * i + steps[i - 3];
}
}
std::cout << "[+] F(100): " << steps[99] << std::endl;
return 0;
}
int f(int n) {
if (n <= 3) {
return n;
} else if ((n > 3) && (n % 3 == 0)) {
return n * n * n + f(n - 1);
} else if ((n > 3) && (n % 3 == 1)) {
return 4 + f(n / 3);
} else if ((n > 3) && (n % 3 == 2)) {
return n * n + f(n - 2);
}
return 0;
}
|
import Phaser from 'phaser';
import { Colors, Directions } from '../enums/GameEnums';
import { createPacManAnims, createGhostsAnims } from '../anims/CreateAnims';
import { debugLayer } from '../tools/debug';
import Ghost from '../sprites/Ghost';
import ScatterAI from '../ai-mode/ScatterAI';
import { GameConfig, GhostAIConfig } from '../configs/GameConfig';
import Pacman from '../sprites/Pacman';
import ChaseAI from '../ai-mode/ChaseAI';
import FrightenedAI from '../ai-mode/FrightenedAI';
import {
EVENT_GAME_CHANGEMODE,
EVENT_GAME_INITED,
EVENT_GAME_LEVELUP,
EVENT_GAME_OVER,
EVENT_GAME_UPDATESCORE,
EVENT_PACMAN_HASPOWER,
EVENT_PACMAN_LOSELIFE,
EVENT_PACMAN_LOSEPOWER,
sceneEvents
} from '../events/GameEvents';
import { IGhostAIMap } from '../interfaces/IGhostAIMap';
import ILevelInfo from '../interfaces/ILevelInfo';
import IDirections from '~interfaces/IDirections';
export default class GameScene extends Phaser.Scene {
private pacman?: Pacman;
private cursor?: Phaser.Types.Input.Keyboard.CursorKeys;
private wallLayer?: Phaser.Tilemaps.TilemapLayer;
private ghostArray?: Array<Ghost>;
private ghostAIMap?: IGhostAIMap;
private _globalTimer?: Phaser.Time.TimerEvent;
private _frightenedTimer?: Phaser.Time.TimerEvent;
private _frightenedFlashTimer?: Phaser.Time.TimerEvent;
private _elapse: number = 0;
private _modeIdx: number = 0;
private _lastMode: string = '';
private _dotsArray?: Array<Phaser.GameObjects.Sprite>;
private _bigDotsArray?: Array<Phaser.GameObjects.Sprite>;
private _dotsCount: number = 0;
private _bigDotsCount: number = 0;
private _lifeCount: number = 3;
private _scores: number = 0;
private _levels: number = 1;
private _ghostValue: number = 100;
private _externalDirections?: IDirections;
constructor() {
super('GameScene');
}
init(){
this.ghostArray = new Array<Ghost>();
this._dotsCount = 0;
this._bigDotsCount = 0;
this._elapse = 0;
this._modeIdx = 0;
this._lastMode = '';
this._dotsArray = new Array<Phaser.GameObjects.Sprite>();
this._bigDotsArray = new Array<Phaser.GameObjects.Sprite>();
this._ghostValue = 100;
this._externalDirections = {
left: false,
right: false,
up: false,
down: false
};
}
create(levelInfo?: ILevelInfo)
{
console.log(levelInfo);
this._levels = levelInfo?.levels ? levelInfo?.levels : 1;
let info = { init: levelInfo?.levels === undefined, levels: this._levels };
this.scene.launch('GameUIScene', info);
createPacManAnims(this.anims);
createGhostsAnims(this.anims);
const map = this.make.tilemap({key: 'pacmanJson'});
const tileSet = map.addTilesetImage('pac-man', 'pacman', 16, 16, 1, 2);
this.wallLayer = map.createLayer('Walls', tileSet).forEachTile(tile =>{
tile.tint = 0x3ba3ff;
}).setCollisionByProperty({ collides: true });
const dotsLayer = map.createLayer('Dots', tileSet);
this._dotsArray = dotsLayer.createFromTiles(16, -1, { key: 'pacman', frame: 15, origin: 0 });
this._dotsArray.forEach(dot => {
let skip = false;
if (levelInfo?.dotsData && !levelInfo.newLevel){
skip = levelInfo.dotsData.indexOf(`${dot.x},${dot.y}`) === -1;
}
if (!skip){
this._dotsCount++;
this.physics.add.existing(dot);
(dot.body as Phaser.Physics.Arcade.Body).setCircle(2, 6, 6);
}
else{
dot.destroy(true);
}
});
this._bigDotsArray = dotsLayer.createFromTiles(18, -1, { key: 'pacman', frame: 17, origin: 0 });
this._bigDotsArray.forEach(bigDot => {
let skip = false;
if (levelInfo?.bigDotsData && !levelInfo.newLevel){
skip = levelInfo.bigDotsData.indexOf(`${bigDot.x},${bigDot.y}`) === -1;
}
if (!skip){
this._bigDotsCount++;
this.physics.add.existing(bigDot);
(bigDot.body as Phaser.Physics.Arcade.Body).setCircle(4, 4, 4);
this.tweens.add({
targets: bigDot,
alpha: 0.1,
duration: 800,
yoyo: true,
repeat: -1
});
}
else{
bigDot.destroy(true);
}
});
this.pacman = new Pacman(this, 144, 256, undefined, this.wallLayer);
this.add.existing(this.pacman!);
(this.pacman!.body as Phaser.Physics.Arcade.Body).setCircle(8);
this.cursor = this.input.keyboard.createCursorKeys();
const ghostDatas = map.getLayer('Ghosts').data;
let color = Colors.Blinky;
ghostDatas.forEach(tileRow => {
tileRow.forEach(tile => {
if (tile.index != -1){
const ghost = new Ghost(this, tile.x * tile.width, tile.y * tile.height, undefined, this.wallLayer);
ghost.makeColor(color++);
this.add.existing(ghost);
this.ghostArray?.push(ghost);
}
});
});
if (!this.ghostArray){
throw new Error('GHOST NOT INITED!');
}
this.ghostAIMap =
{
'blinky': [
new ScatterAI(this.wallLayer.width, 0, this.ghostArray[Colors.Blinky], this.wallLayer),
new ChaseAI(this.pacman!, this.ghostArray[Colors.Blinky], this.wallLayer),
new FrightenedAI(this.ghostArray[Colors.Blinky], this.wallLayer)
],
'clyde': [
new ScatterAI(0, this.wallLayer.height, this.ghostArray[Colors.Clyde], this.wallLayer),
new ChaseAI(this.pacman!, this.ghostArray[Colors.Clyde], this.wallLayer),
new FrightenedAI(this.ghostArray[Colors.Clyde], this.wallLayer)
],
'inky': [
new ScatterAI(this.wallLayer.width, this.wallLayer.height, this.ghostArray[Colors.Inky], this.wallLayer),
new ChaseAI(this.pacman!, this.ghostArray[Colors.Inky], this.wallLayer, this.ghostArray[Colors.Blinky]),
new FrightenedAI(this.ghostArray[Colors.Inky], this.wallLayer)
],
'pinky': [
new ScatterAI(0, 0, this.ghostArray[Colors.Pinky], this.wallLayer),
new ChaseAI(this.pacman!, this.ghostArray[Colors.Pinky], this.wallLayer),
new FrightenedAI(this.ghostArray[Colors.Pinky], this.wallLayer)
]
};
this.physics.add.collider(this.pacman!, this.wallLayer, this.pacman.handlePacmanWallsCollision, undefined, this.pacman);
this.physics.add.overlap(this.pacman!, this._dotsArray, (p, d)=>{
let pacman = p as Pacman;
let dot = d as Phaser.GameObjects.Sprite;
pacman.eatDot(dot);
this.updateGameScore(10);
--this._dotsCount;
this.checkGameLevelUp();
}, undefined, this);
this.physics.add.overlap(this.pacman!, this._bigDotsArray, (p, d)=>{
let pacman = p as Pacman;
let bigDot = d as Phaser.GameObjects.Sprite;
pacman.eatBigDot(bigDot);
this.updateGameScore(50);
--this._bigDotsCount;
let levelUp = this.checkGameLevelUp();
if(!levelUp){
sceneEvents.emit(EVENT_PACMAN_HASPOWER);
this.setAllGhostAIMode('frightened');
}
}, undefined, this);
this.physics.add.overlap(this.pacman!, this.ghostArray!, (p, g)=>{
let pacman = p as Pacman;
let ghost = g as Ghost;
if (ghost.isFrightened){
if (!ghost.isAte){
ghost.handlePacmanAte();
this._ghostValue *= 2;
const scoreText= this.add.text(ghost.x, ghost.y, `${this._ghostValue}`, {color: '#00ff00'});
this.time.delayedCall(500, ()=>{
scoreText.destroy(true);
this.updateGameScore(this._ghostValue);
});
}
}
else{
sceneEvents.emit(EVENT_PACMAN_LOSELIFE, --this._lifeCount);
pacman.handleGhostHited();
this.pauseGameWorld();
if (this._lifeCount >= 0){
this.time.delayedCall(
3000,
() => {
this.restartGameWorld();
}
);
}
else{
this.time.delayedCall(
3000,
() => {
sceneEvents.emit(EVENT_GAME_OVER);
}
);
}
}
}, (p, g) => {
let pacman = p as Pacman;
let ghost = g as Ghost;
return pacman.canPacmanOverlapGhost(ghost);
}, this);
this.physics.world.setBounds(0, 0, this.wallLayer.width, this.wallLayer.height);
this.physics.world.pause();
sceneEvents.on(EVENT_GAME_INITED, ()=>{
this.physics.world.resume();
this.createGhostAITimer();
this.initGhostOutRoom();
this.updateGameScore(0);
});
const camera = this.cameras.main;
camera.setViewport(0, 0, this.wallLayer.width, this.wallLayer.height);
this.events.once(Phaser.Scenes.Events.SHUTDOWN, () => {
sceneEvents.off(EVENT_GAME_INITED);
this._globalTimer?.remove();
this._frightenedTimer?.remove();
this._frightenedFlashTimer?.remove();
});
}
update() {
if (!this.cursor || !this.pacman || this.physics.world.isPaused){
return;
}
let cusorDirections: IDirections = {
left: this.cursor.left.isDown,
right: this.cursor.right.isDown,
up: this.cursor.up.isDown,
down: this.cursor.down.isDown
};
if (!(cusorDirections.left || cusorDirections.right || cusorDirections.up || cusorDirections.down) && this._externalDirections){
cusorDirections = this._externalDirections;
}
this.pacman.update(cusorDirections);
}
setExternalDirections(dir: IDirections){
this._externalDirections = dir;
}
private createGhostAITimer(){
if (!this.ghostArray){
return;
}
this._globalTimer = this.time.addEvent({
delay: 1000,
callback: () => {
let currMode = GhostAIConfig.modes[this._modeIdx];
this._elapse++;
if (currMode && this._elapse * 1000 >= currMode.during){
this._elapse = 0;
currMode = GhostAIConfig.modes[++this._modeIdx];
if (currMode){
this.setAllGhostAIMode(currMode.type);
}
}
},
callbackScope: this,
loop: true
});
let currMode = GhostAIConfig.modes[this._modeIdx];
if (currMode){
this.setAllGhostAIMode(currMode.type);
}
}
private setAllGhostAIMode(mode: string){
if (!this.ghostArray || !this.ghostAIMap || !this._globalTimer){
return;
}
sceneEvents.emit(EVENT_GAME_CHANGEMODE, mode.toUpperCase());
if (mode === 'frightened'){
this._globalTimer.paused = true;
let remain = GhostAIConfig.frightened;
let flashRemain = remain * 0.7;
this._ghostValue = 100;
if (!this._frightenedTimer){
this._frightenedFlashTimer = this.time.delayedCall(
flashRemain,
() => {
this.ghostArray!.forEach(ghost => {
ghost.setGhostFlash();
});
}
);
this._frightenedTimer = this.time.delayedCall(
remain,
() => {
this.setAllGhostAIMode(this._lastMode);
sceneEvents.emit(EVENT_PACMAN_LOSEPOWER);
this._globalTimer!.paused = false;
}, undefined, this);
}
else{
if (!this._frightenedTimer.hasDispatched){
remain += remain - this._frightenedTimer.getElapsed();
flashRemain = remain * 0.7;
}
this._frightenedTimer.remove();
this._frightenedFlashTimer?.remove();
this._frightenedFlashTimer = this.time.delayedCall(
flashRemain,
() => {
this.ghostArray!.forEach(ghost => {
ghost.setGhostFlash();
});
}
);
this._frightenedTimer = this.time.delayedCall(
remain,
() => {
this.setAllGhostAIMode(this._lastMode);
sceneEvents.emit(EVENT_PACMAN_LOSEPOWER);
this._globalTimer!.paused = false;
}, undefined, this);
}
}
else{
this._lastMode = mode;
}
this.ghostArray!.forEach(ghost => {
let color = Colors[ghost.ghostColor!].toLowerCase();
let modeIdx = 0;
if (mode === 'scatter'){
modeIdx = 0;
}
else if (mode === 'chase'){
modeIdx = 1;
}
else if (mode === 'frightened'){
modeIdx = 2;
}
let modeAI = this.ghostAIMap![color][modeIdx];
ghost.setAI(modeAI);
});
}
private initGhostOutRoom(){
this.ghostArray![Colors.Blinky]?.outRoom();
this.time.delayedCall(
1000,
() => {
this.ghostArray![Colors.Pinky]?.outRoom();
}
);
this.time.delayedCall(
15000,
() => {
this.ghostArray![Colors.Inky]?.outRoom();
}
);
this.time.delayedCall(
25000,
() => {
this.ghostArray![Colors.Clyde]?.outRoom();
}
);
}
private checkGameLevelUp(): boolean{
if (this._dotsCount + this._bigDotsCount <= 0){
sceneEvents.emit(EVENT_GAME_LEVELUP);
this.pauseGameWorld();
this.time.delayedCall(
6000,
()=>{
this.restartGameWorld(true);
}
)
return true;
}
return false;
}
private pauseGameWorld(){
this.physics.world.pause();
this.pacman?.setMovingPause();
this.ghostArray?.forEach(g => {
g.setMovingPause();
});
this._globalTimer?.remove();
this._frightenedTimer?.remove();
this._frightenedFlashTimer?.remove();
}
private restartGameWorld(newLevel: boolean = false){
let levelInfo: ILevelInfo = {
levels: this._levels,
dotsData: [],
bigDotsData: [],
newLevel: false
};
if (!newLevel){
this._dotsArray?.forEach(dot => {
if (dot.visible){
levelInfo.dotsData.push(`${dot.x},${dot.y}`);
}
});
this._bigDotsArray?.forEach(bigDot => {
if (bigDot.visible){
levelInfo.bigDotsData.push(`${bigDot.x},${bigDot.y}`);
}
});
}
else{
levelInfo.levels = this._levels + 1;
levelInfo.newLevel = true;
}
this.scene.restart(levelInfo);
}
private updateGameScore(score: number){
this._scores += score;
sceneEvents.emit(EVENT_GAME_UPDATESCORE, this._scores);
}
}
|
<reponame>SamueleDelMoro/ACTAM.Project2122-TraningGame<filename>node_modules/standardized-audio-context/build/es2019/types/detect-cycles-factory.d.ts
import { TAudioParamAudioNodeStore } from './audio-param-audio-node-store';
import { TDetectCyclesFunction } from './detect-cycles-function';
import { TGetAudioNodeConnectionsFunction } from './get-audio-node-connections-function';
import { TGetValueForKeyFunction } from './get-value-for-key-function';
export declare type TDetectCyclesFactory = (audioParamAudioNodeStore: TAudioParamAudioNodeStore, getAudioNodeConnections: TGetAudioNodeConnectionsFunction, getValueForKey: TGetValueForKeyFunction) => TDetectCyclesFunction;
//# sourceMappingURL=detect-cycles-factory.d.ts.map |
def results():
res = OrderedDict()
res["ndvi_aqua"] = ({
"fname":"./results/netcdf/MYD13C1_ndvi_TheilSen_2002_to2018_GlobalMODIS_CMG.nc",
"source":"MYD13C1","test":"Theisen", "FDRmethod":"BenjaminiHochberg",
"window":0, "grid":"MODIS_CMG", "param":"AnnualMaxNDVI",
"units":r"x10$^{-2}$ NDVI$_{max}$ yr$^{-1}$"})
res["ndvi_terra"] = ({
"fname":"./results/netcdf/MOD13C1_ndvi_TheilSen_2000_to2018_GlobalMODIS_CMG.nc",
"source":"MOD13C1","test":"Theisen", "FDRmethod":"BenjaminiHochberg",
"window":0, "grid":"MODIS_CMG", "param":"AnnualMaxNDVI",
"units":r"x10$^{-2}$ NDVI$_{max}$ yr$^{-1}$"})
res["ndvi"] = ({
"fname":"./results/netcdf/GIMMS3gv1.1_ndvi_TheilSen_1982_to2017_GlobalGIMMS.nc",
"source":"GIMMS3gv1.1","test":"Theisen", "FDRmethod":"BenjaminiHochberg",
"window":0, "grid":"GIMMS", "param":"AnnualMaxNDVI",
"units":r"x10$^{-2}$ NDVI$_{max}$ yr$^{-1}$"})
return res |
TRAINING FOR THE INNOVATIVE TEACHING AS A DETERMINANT OF THE PROFESSIONAL MOBILITY OF THE FUTURE TECHNOLOGY TEACHERS The article deals with the professional mobility and readiness for the innovative teaching of future technology teachers as interdependent components. The purpose of the article is to study the organizational and methodological aspects of forming the professional mobility of future technology teachers in the context of preparation for innovation activities at Ukrainian Teacher Training Universities. Using the complex of theoretical and empirical research methods, the main organizational and methodical aspects of training were determined: the construction of the individual trajectory of study based on selected disciplines; the acquisition of new branches of activity; the forming of the complete conception of technological education and future innovative teaching; the introduction of innovative technologies (interactive discussions, motivational trainings, group forms of work, a problem-solving training, design and research activities, mixed learning, etc.); implementation of Information and communications technology (ICT), distance, Smart and hybrid education systems (e-learning resources, electronic educational and methodical complexes, Web services, individual and group projects based on Web quests and Blog quests); the monitoring of the readiness level for professional mobility; the strengthening of the consultative and coordinating function of teachers. The implementation of certain aspects expands the possibilities of optimizing the educational process not only at the level of formation of the readiness for professional mobility, but also at the level of the entire system of training future technology teachers to innovative teaching. It requires: creating conditions for continuous learning; the orientation of the final stages of pedagogical education to the formation of students academic mobility; the rapid expansion of vertical and horizontal professional mobility ranges; the forming of future technology teachers readiness and the ability to change the specialization, the place of work and even the profession, if necessary, or to master several specialties or training profiles at the same time. |
<reponame>raphaelcohn/security-keys-rust
// This file is part of security-keys-rust. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/security-keys-rust/master/COPYRIGHT. No part of security-keys-rust, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2021 The developers of security-keys-rust. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/security-keys-rust/master/COPYRIGHT.
use enumflags2::{bitflags, BitFlag};
use likely::unlikely;
use serde::{Deserialize, Serializer, Deserializer};
use serde::Serialize;
use std::error;
use std::fmt;
use std::fmt::Debug;
use std::fmt::Display;
use std::fmt::Formatter;
use crate::version::Version;
use crate::interface::video::control::entities::Entity;
use crate::interface::video::control::entities::entity_index;
use crate::interface::video::control::entities::entity_index_non_constant;
use crate::interface::video::control::entity_identifiers::UnitEntityIdentifier;
use crate::interface::video::control::entity_identifiers::EntityIdentifier;
use crate::device::{DeviceConnection, DeadOrAlive};
use crate::interface::video::control::entities::unit::{Sources, WithSourcesUnitEntity};
use crate::interface::video::control::entities::unit::UnitEntity;
use crate::interface::video::control::entities::unit::SourcesParseError;
use crate::string::{LocalizedStrings, GetLocalizedStringError};
use crate::device::DeadOrAlive::Alive;
use crate::collections::{Bytes, WrappedBitFlags};
use crate::integers::u6;
use std::iter::FusedIterator;
use serde::ser::SerializeSeq;
use serde::de::{Visitor, SeqAccess, Error};
use crate::universally_unique_identifiers_support::UniversallyUniqueIdentifier;
include!("ExtensionCodeAndControls.rs");
include!("ExtensionControls.rs");
include!("ExtensionControlsIterator.rs");
include!("ExtensionUnitEntity.rs");
include!("ExtensionUnitEntityParseError.rs");
include!("H264ExtensionControl.rs");
include!("LogitechCodecExtensionControl.rs");
include!("LogitechCodecExtendedExtensionControl.rs");
include!("LogitechDeviceInformationVersion1ExtensionControl.rs");
include!("LogitechDeviceInformationVersion3ExtensionControl.rs");
include!("LogitechMotorVersion1ExtensionControl.rs");
include!("LogitechPeripheralExtensionControl.rs");
include!("LogitechTestDebugVersion3ExtensionControl.rs");
include!("LogitechUserHardwareVersion1ExtensionControl.rs");
include!("LogitechVideoPipeVersion1ExtensionControls.rs");
include!("LogitechVideoPipeVersion3ExtensionControls.rs");
include!("MicrosoftExtensionControl.rs");
include!("VimicroSisDopExtensionControls.rs");
|
IF the name Jesse Harris doesn’t ring a bell, perhaps Norah Jones and Ethan Hawke do. Those are just two of the artists Harris has worked with, most famously penning Jones’ hit “Don’t Know Why” (for which he won a 2003 Grammy for Best Song).
This past year, Harris co-produced the score for Hawke’s film “The Hottest State” and released his seventh album, “Feel,” to much acclaim.
Needing an apartment with enough room to both live and work four years ago, Harris found it in this 1,300-square-foot TriBeCa loft, which dates back to the 1860s.
“I was looking at a lot of places, and I didn’t see anything I liked,” recalls Harris.
He found the loft through a friend of a friend who was planning to sell it.
“I called him right there and then saw it the next day, and I loved it,” Harris says. “And I bought it from him without a broker.
The previous owner did most of the major renovations, converting the space to a one-bedroom and adding a sliding door. Both bathrooms had been updated with designer fixtures, and the master bath was given a spacious steam shower.
Harris appreciates the modern conveniences, but it’s the historic details that truly resonate with him.
“The moldings around the windows are original, and even some of the glass panes are original,” he says.
As are the tin ceiling, painted white, and the wood floors, which Harris stripped and refinished.
“They are still rough, and I like that,” he says.
Harris’ apartment has a lived-in feel, but it’s also impeccably neat.
“The more uncluttered my environment is, the more I can think. I write a lot of songs here,” he says.
There are huge closets – a bonus for a musician with a collection of instruments, amplifiers, recording masters and LPs. But not every instrument is kept behind closed doors: Displayed are maracas that Harris bought in New Orleans and a kalimba (also known as a thumb piano) he found in Chile. There’s also a Deering banjo and a cherished 1956 Gibson LG-1 acoustic guitar.
But his Sohmer stand-up piano definitely takes center stage.
While Harris doesn’t compose on the piano, it’s a focal point for entertaining, and many of his famous friends, including Jones, have tinkled the keys.
To outfit the loft, Harris chose mid-century Danish-style furnishings (from BDDW on Crosby Street) accented with vintage-inspired lamps. Above a comfy ’50s-style sofa are built-in bookshelves that store and display his vast collection of books. And on the walls is a cherished collection of original art, including a limited- edition Seb Jarnot print.
Conspicuously absent is a TV set; Harris has not owned one for years.
The dinner table seats eight and is conveniently next to the fire escape, where friends can get some fresh air after dinner.
Harris seldom cooks, but his live-in girlfriend, Hannah, does, so he added a Sub-Zero refrigerator, granite countertops and a Wolf stove. The only appliance he kept from the previous owner was a Sub-Zero wine cooler.
“Two of my best friends are in the wine business, so they supply me with great Italian wines,” he says.
Although Harris didn’t initially imagine himself living in TriBeCa, today he couldn’t be happier.
Jesse Harris performs this Saturday at the World Financial Center’s Winter Garden. |
ST. LOUIS - For her dedication, commitment and courageous spirit, Appalachian State University women’s basketball head coach Angel Elderkin is a recipient of the 2017 United States Basketball Writers Association (USBWA) Pat Summitt Most Courageous Award, as announced on Thursday by the USBWA.
For the first time in the history of the award, the USBWA has chosen two very deserving recipients as ESPN reporter Holly Rowe will join Coach Elderkin as an award winner. Like coach Elderkin, Rowe has been in an ongoing battle with the deadly disease, but has continued to do what she loves while fighting it.
Named after legendary Tennessee Head Coach Pat Summitt in 2012, the Pat Summitt Most Courageous Award honors individuals associated with women’s college basketball.
Coach Elderkin epitomizes what the Pat Summitt Most Courageous Award stands for. Elderkin was diagnosed with Stage III Endometrial Cancer during the summer, but continued to coach the Mountaineers. All of the visible cancer was removed through surgery, but Coach Elderkin still had to complete a rigorous treatment plan before the season started. This plan included six chemotherapy treatment cycles & a period of radiation, which required her to be at the Lineberger Cancer Center five days a week. During her chemo treatments, she would commute from Chapel Hill, N.C. for treatments to Boone, N.C. to continue being with the team for practices. During her radiation treatment, she would leave Monday after a morning workout and return Friday for an afternoon workout, rarely missing any workouts or events with her players. She remained visible to her players throughout one of the toughest battles she ever had to fight.
Heading into Thursday’s contest against UT Arlington, coach Elderkin has yet to miss a game this season, including five-straight road games to start the season. Following her final chemo treatment, the Mountaineers began a stretch of two games in four days on the road.
Coach Elderkin’s will to fight, her courage and bravery has inspired the Mountaineers’ staff, fan base and countless coaches and players across the nation. Her passion for basketball and the will to coach through adversity shows you can do what you love while battling this deadly disease.
Coach Elderkin and Rowe will receive their awards at USBWA Awards press conference at the NCAA Women’s Final Four in Dallas in March.
The U.S. Basketball Writers Association was formed in 1956 at the urging of then-NCAA Executive Director Walter Byers. With more than 900 members worldwide, it is one of the most influential organizations in college basketball. It has selected an All-America team since the 1956-57 season. For more information on the USBWA and its award programs, contact executive director Joe Mitch at 314-795-6821. |
Effect of rosiglitazone on the expression of tumor necrosis factor- in patients with type 2 diabetes Objective To observe the effect of rosiglitazone on the expression of tumor necrosis factor-(TNF-) in patients with type 2 diabetes, and to assess the efficacy and safety of rosiglitazone.Methods This study included 30 patients newly diagnosed as type 2 diabetes with HbA 1C level≥7.5% (DM group) and 30 healthy subjects (control group).Patients were treated with rosiglitazone 4 mg q.d for 12 weeks.The serum levels of TNF-, blood glucose, HbA 1C, serum insulin, blood lipids and safety indices were measured.Results TNF- levels were significantly higher in DM than those in control group (P0.05).After treatment of rosiglitazone the expression of TNF- was significantly reduced (P0.05).And HbA l C, fasting plasma glucose and fasting insulin levels all significant decreases at the end of treatment (P0.05). There were significant decreases in triglyceride levels and increases in HDL-C, LDL-C and total cholesterol levels(P0.05), but there was no significant change in the total cholesterol/HDL ratio. Rosiglitazone was well tolerated.Conclusion Compared with control group, the level of TNF- in diabetic patients increases. Rosiglitazone decreases insulin resistance, increases insulin sensitivity, and inhibits the expression of TNF-. Rosiglitazone improves HbA 1C and fasting plasma glucose levels with beneficial effects on serum triglyceride and HDL-C levels. |
The Study of the Radius of Connection of the Die for Deep Drawing Using Analysis with Finite Element Deep drawing is a complex process influenced by the geometric parameters of the die-punch system. In the present paper we study the behavior of the semi-finished product, in the process of drawing deep cylindrical parts, using the finite element method and the software package of the ANSYS program. In order to reduce the cost and design time, an analysis of the variation of the radius connection is carried out, resulting in low energy consumption, using the finite element method. By analysing the radius of connection of the plate, we identify future directions useful in substantiating the elaboration of a judicious experimental program and optimizing the geometric shape of the finished parts. |
List of contributors We use an agent-based model to investigate the interdependent dynamics between individual agency and emergent socioeconomic structure, leading to institutional change in a generic way. Our model simulates the emergence and exit of institutional units, understood as generic governed social structures. We show how endogenized trust and exogenously given leader authority influences institutional change, i.e., diversity in institutional life-cycles. It turns out that these governed institutions (de)structure in cyclical patterns dependent on the overall evolution of trust in the artificial society, while at the same time, influencing this evolution by supporting social learning. Simulation results indicate three scenarios of institutional life-cycles. Institutions may, build up very fast and freeze the artificial society in a stable but fearful pattern (ordered system); exist only for a short time, leading to a very trusty society (highly fluctuating system); and structure in cyclical patterns over time and support social learning due to cumulative causation of societal trust (complex system). Reprinted from Special Issue: Aspects of Game Theory and Institutional Economics, Games. Cite as: Wckerle, M.; Rengs, B.; Radax, W. An Agent-Based Model of Institutional Life-Cycles. Games 2014, 5, 160187. |
1. Field of the Invention
The present invention relates to a light emitting diode capable of implementing high light emission efficiency by using a surface plasmon phenomenon as well as showing reduced crystal defects (i.e., threading dislocations) and a method for fabricating the same.
2. Description of the Related Art
FIG. 1 is a cross-sectional view schematically showing a layer structure of a general planar LED 10. In the figure, the LED is constituted by a substrate 1, an n-type semiconductor layer 2, an active layer 3, and a p-type semiconductor layer 4 sequentially formed from the bottom. A p-electrode 5 is formed on the top of the p-type semiconductor layer 4, while an n-electrode 6 is formed on an exposure surface of the n-type semiconductor layer 2. However, in thin-film growth for fabricating a representative GaN-based LED, the lack of materials matched with lattice constants is pointed out as one of the main problems. For this reason, a homoepitaxially grown GaN thin film has problems such as high-density defects (i.e., threading dislocations, other point defects, and the like) caused due to the mismatch in lattices and thermal expansion coefficients between a GaN film and homogeneous substrates. Up to now, since c-surface sapphire has been widely used as the substrate because it can be inexpensive and form comparatively high-quality epitaxial layer in spite of its lattice mismatch (approximately 16%).
A technology of forming a buffer layer or a shock-absorbing layer on a substrate in order to reduce the threading dislocations caused due to the mismatch is widely known, but it cannot also but generate a lot of defects. As another method, a technology such as epitaxially laterally overgrowth (ELOG) is proposed. However, overgrowth of GaN of approximately 10 μm, which is comparatively thicker is achieved in order to attach adjacent pattern regions to each other and a process cost is increased.
In relation therewith, an attempt to solve the problem of the low threading dislocations by using porous semiconductors is made. This is a technology of reducing the threading dislocations by using the porous semiconductor having a crystalline structure of an initial material as a template for homoepitaxially lateral growth of a lattice mismatch material. For example, Hartono et al. reported that a GaN layer having low defect density can be formed by using a nano-porous GaN template (Phys. Status Solidi B 244, 1793 (2007)) and that a GaN layer subsequently grown on an annealed porous GaN template shows a characteristic in that threading dislocation density was reduced by approximately 60% (Appl. Phys. Lett. 90, 171917 (2007)). Furthermore, the same researchers proposed the influence of subsequent regrowth of a GaN buffer layer on the nano-porous GaN layer at different chamber temperatures and a mechanism in which the threading dislocations in the subsequently regrown GaN film were reduced (Phys. Status Solidi C 6, No. S2, S699-S702 (2009)).
Meanwhile, in recent years, an attempt to improve internal quantum efficiency by using surface plasmon through the interaction between light and metal has been made. The surface plasmon as collective charge density oscillation which occurs on the surface of a metal thin film is a surface electromagnetic wave which is localized to a very small region which is a boundary between metal and a dielectric (air, a semiconductor, or the like), that is, an interface between both materials and progressed on the interface.
The generated surface electromagnetic wave has energy that generates resonance that varies depending on a metal type and when the generated surface electromagnetic wave is sufficiently close to an active layer which exists in an LED and the energy of the surface electromagnetic wave is matched, energy coupling occurs. In this case, energy coupling occurs in non-emission recoupling energy as well as emission recoupling energy which occurs on the active layer, such that since light is emitted by the surface plasmon, the internal quantum efficiency is increased. In general, metals such as Pd and Al are mainly used in a UV emission region and metals such as Ag, Pt, Cu, Au, and the like are mainly used in a visible ray region.
As such, recouping speed of carriers which exist in the LED is improved through the intercoupling between the surface plasmon and the active layer formed by collective oscillation of free electros which exist in metal. Therefore, an n-type GaN layer (alternately a p-type GaN layer) and an active layer (having multi-quantum well structure) are sequentially formed on the substrate and thereafter, a general metal layer is attached onto the surface of the active layer together with the p-type GaN layer (alternately, the n-type GaN layer) of a predetermined thickness (typically, approximately less than 50 nm), for the effective coupling of the active layer and the surface plasmon.
However, the related arts (Korean Patent Publication No. 2008-74474 and Korean Patent Publication No. 915502) can improve the internal quantum efficiency of the LED element through a surface plasmon resonance effect to some extent, but are still technologically limited in that the internal quantum efficiency is deteriorated due to the layer defect such as the threading dislocations.
As described above, the relates arts which are previously known merely adopt only any one of a method of using the surface plasmon and a method of reducing the defects such as the threading dislocations, and the like in order to improve the internal quantum efficiency and cannot implement both methods. |
Hosptial Pharmacists in Quebec Achieve Higher Salaries in Contract with Government After 2 years of negotiations, the association representing Quebec's hospital pharmacists has reached agreement with the province on a new contract that raises salaries and narrows the gap between pharmacists working in hospitals and those in community pharmacy. Members of l'Association des pharmaciens des etablissements de sante du Quebec (A.P.E.S.) unanimously approved the agreement at a special meeting in Montreal on July 29, 2012. Under the deal, Quebec's hospital pharmacists are to be paid $42.59 an hour up from $37.68 under the previous contract. Pharmacists working in the private sector earn an average of $51 an hour, according to the association, which says the new contract narrows the salary gap between hospital and community pharmacists to 14%, down from 35%. The group has long been advocating for increased compensation, saying the comparatively low salaries have contributed to a significant shortage of pharmacists in Quebec hospitals. |
from edera.managers import MongoManager
def test_manager_destroys_client_on_exit(mongo):
with MongoManager(mongo):
instance = mongo.instance
assert mongo.instance is instance
assert mongo.instance is not instance
|
A device of this type and use thereof are known in principle from DE 10 2005 045 569 A1.
Devices for generating short-wavelength radiation based on a discharge plasma which is generated in a pulsed manner are used, for example, to implement lithography methods, particularly when it is a question of high repetition frequency of the discharge processes. At the same time, fluctuations in the amount of radiation emitted are to be limited to less than 1% to ensure a consistent quality of the emitted radiation.
The electric voltage, typically several kilovolts, required for generation of a discharge plasma is supplied via a (resonant) charging circuit with a pulsed capacitor or capacitor bank. In so doing, a charging switch of the charging circuit is closed at a first switching time and the pulsed capacitor is charged. A virtually sinusoidal current flow through the inductor develops during this phase. At a second switching time, the charging switch is opened and a freewheeling switch is closed so that the energy stored in the inductor is decreased through a further rise in voltage at the pulsed capacitor. At a third point in time which is defined by the current zero crossing of the inductor, the plasma is ignited, i.e., a flow of current is initiated between two electrodes between which the plasma is to be generated.
In this way, the energy stored by the charging circuit for generation of the discharge plasma is provided as a discharge voltage at two electrodes which are separated by a gap.
When an electrically conductive channel is generated or provided between the electrodes at a firing time, the available energy flows along the channel, thereby exciting emission of a plasma of an emitter material. At the firing time, the discharge voltage will have reached a maximum. An electrically conductive channel can be produced through the evaporation of an emitter material, e.g., tin, by means of a high-energy radiation, e.g., a laser. In so doing, the emitter material can be present, for example, on the electrodes or can be introduced, for example, by injection of droplets, therebetween.
In devices for generating discharge plasmas, it is difficult in actual practice to coordinate the first and second switching times and the firing time in such a way that high repetition frequencies are accompanied by minimal deviations in the amount of radiation emitted by the discharge plasma. The reason for this consists in fluctuating parameter values, for example, voltage differences, different component values and variable plasma efficiency, i.e., the extent of conversion of electrical energy into radiation energy. In this respect, parameter values which change only slightly over a number of pulses may differ from those which do not change only slightly from pulse to pulse. As a rule, the parameter values which change slightly show a certain tendency in the direction of their changes (long-term drift), while the parameter values which change from pulse to pulse usually vary stochastically.
The device suggested in DE 10 2005 045 569 A1 has a resonant charging circuit with at least one charging switch for charging a charging capacitor of the resonant charging circuit and at least one discharging switch for discharging the resonant charging circuit. Further, a control is provided which calculates the switching times of the charging switch in real time depending on input values and one or more reference quantities. In order to reduce the required computing effort for coordinating the switching times, DE 10 2005 045 569 A1 suggests relying for calculation upon an approximation algorithm or on lookup tables which were calculated and set up in a non-time-critical method segment. In a time-critical method segment, actual measurement values, e.g., of the charging voltage, can be taken into consideration in real time and the switching times can be determined. The charging switch is triggered based on the calculated first and second switching times.
By means of the device according to DE 10 2005 045 569 A1, the second switching times are calculated in such a way that the desired discharge voltage is supplied at a precisely fixed firing time.
For calculating the second switching times and the firing time, it is known to apply semianalytic methods to a simplified model of a resonant charging circuit. First, a relationship is determined between a desired charging voltage and the second switching time. This relationship is typically nonlinear and can be described by a polynomial. The firing time can be determined using another, analytic relationship. This analytic relationship in which the second switching time is entered further contains simplifying assumptions about the behavior of the resonant charging circuit.
A disadvantage in a procedure of this kind consists in that the effects of various elements of the resonant charging circuit are not sufficiently acquired, if at all, due to the simplifications. Thus the effect of an existing degaussing circuit is not taken into account. Further, the DC voltage source is assumed to be ideal.
Due to these simplifications, significant errors can occur in calculating the second switching time and firing time which must be corrected subsequently by introducing optimization factors.
In the procedure mentioned above, it is also very difficult or even impossible to determine the relationship between the desired discharge voltage and the second switching time after changes in the topology of the resonant charging circuit. Every change in topology requires a new determination of the relationship, and complex topologies cannot be described at all by this procedure. |
/**
* This class represents the startup procedure for a datacenter. Calling the start method will begin the startup
* procedure in a background thread. Calling the {@link #join} method let's the main thread wait for startup of a
* datacenter to either succeed or fail.
*/
class DatacenterInitializer {
private static final Logger logger = LoggerFactory.getLogger(DatacenterInitializer.class);
private final CompletableFuture<DcInfo> initializationFuture = new CompletableFuture<>();
private final ClusterMapConfig clusterMapConfig;
private final HelixManager localManager;
private final HelixFactory helixFactory;
private final ClusterMapUtils.DcZkInfo dcZkInfo;
private final String dcName;
// Fields to pass into both ClusterChangeHandlers
private final String selfInstanceName;
private final Map<String, Map<String, String>> partitionOverrideInfoMap;
private final HelixClusterManager.ClusterChangeHandlerCallback clusterChangeHandlerCallback;
private final HelixClusterManager.HelixClusterManagerCallback helixClusterManagerCallback;
private final HelixClusterManagerMetrics helixClusterManagerMetrics;
private final AtomicLong sealedStateChangeCounter;
// Fields to pass into only SimpleClusterChangeHandler (These can be removed if SimpleClusterChangeHandler is removed)
private final ConcurrentHashMap<ByteBuffer, AmbryPartition> partitionMap;
private final ConcurrentHashMap<String, AmbryPartition> partitionNameToAmbryPartition;
private final ConcurrentHashMap<AmbryPartition, Set<AmbryReplica>> ambryPartitionToAmbryReplicas;
/**
* @param clusterMapConfig {@link ClusterMapConfig} to help some admin operations
* @param localManager the {@link HelixManager} for the local datacenter that has already been started.
* @param helixFactory the {@link HelixFactory} instance to construct managers.
* @param dcZkInfo info about the DC, like connection string, name, and replica type
* @param selfInstanceName the name of instance on which {@link HelixClusterManager} resides.
* @param partitionOverrideInfoMap a map specifying partitions whose state should be overridden.
* @param clusterChangeHandlerCallback a call back that allows current handler to update cluster-wide info.
* @param helixClusterManagerCallback a help class to get cluster state from all DCs.
* @param helixClusterManagerMetrics metrics that help track of cluster changes and infos.
* @param sealedStateChangeCounter a counter that records event when replica is sealed or unsealed
* @param partitionMap a map from serialized bytes to corresponding partition.
* @param partitionNameToAmbryPartition a map from partition name to {@link AmbryPartition} object.
* @param ambryPartitionToAmbryReplicas a map from {@link AmbryPartition} to its replicas.
*/
DatacenterInitializer(ClusterMapConfig clusterMapConfig, HelixManager localManager, HelixFactory helixFactory,
ClusterMapUtils.DcZkInfo dcZkInfo, String selfInstanceName,
Map<String, Map<String, String>> partitionOverrideInfoMap,
HelixClusterManager.ClusterChangeHandlerCallback clusterChangeHandlerCallback,
HelixClusterManager.HelixClusterManagerCallback helixClusterManagerCallback,
HelixClusterManagerMetrics helixClusterManagerMetrics, AtomicLong sealedStateChangeCounter,
ConcurrentHashMap<ByteBuffer, AmbryPartition> partitionMap,
ConcurrentHashMap<String, AmbryPartition> partitionNameToAmbryPartition,
ConcurrentHashMap<AmbryPartition, Set<AmbryReplica>> ambryPartitionToAmbryReplicas) {
this.clusterMapConfig = clusterMapConfig;
this.localManager = localManager;
this.helixFactory = helixFactory;
this.dcZkInfo = dcZkInfo;
this.selfInstanceName = selfInstanceName;
this.partitionOverrideInfoMap = partitionOverrideInfoMap;
this.clusterChangeHandlerCallback = clusterChangeHandlerCallback;
this.helixClusterManagerCallback = helixClusterManagerCallback;
this.helixClusterManagerMetrics = helixClusterManagerMetrics;
this.sealedStateChangeCounter = sealedStateChangeCounter;
this.partitionMap = partitionMap;
this.partitionNameToAmbryPartition = partitionNameToAmbryPartition;
this.ambryPartitionToAmbryReplicas = ambryPartitionToAmbryReplicas;
dcName = dcZkInfo.getDcName();
}
/**
* Begin the startup procedure in a background thread.
*/
public void start() {
Utils.newThread(() -> {
try {
DcInfo dcInfo;
switch (dcZkInfo.getReplicaType()) {
case DISK_BACKED:
dcInfo = initializeHelixDatacenter();
break;
case CLOUD_BACKED:
dcInfo = initializeCloudDatacenter();
break;
default:
throw new UnsupportedOperationException("Unknown replica type: " + dcZkInfo.getReplicaType());
}
initializationFuture.complete(dcInfo);
} catch (Exception e) {
logger.warn("Exception while initializing datacenter {}", dcName, e);
onInitializationFailure(e);
}
}, false).start();
}
/**
* Wait for startup to either succeed or fail.
* @return the {@link DcInfo} for the datacenter (a handle to the stateful objects related to the datacenter).
* @throws Exception if startup of the datacenter fails.
*/
DcInfo join() throws Exception {
try {
return initializationFuture.get();
} catch (ExecutionException e) {
throw Utils.extractExecutionExceptionCause(e);
}
}
/**
* @return the datacenter name for this initializer.
*/
public String getDcName() {
return dcName;
}
/**
* Callback to complete the future when an error occurs
* @param e the {@link Exception}.
*/
private void onInitializationFailure(Exception e) {
initializationFuture.completeExceptionally(e);
}
/**
* Perform initialization for a helix-managed datacenter of servers.
* @return the {@link DcInfo} for the datacenter.
* @throws Exception if something went wrong during startup
*/
private DcInfo initializeHelixDatacenter() throws Exception {
// For now, the first ZK endpoint (if there are more than one endpoints) will be adopted by default for initialization.
// Note that, Ambry currently doesn't support multiple spectators, because there should be only one source of truth.
String zkConnectStr = dcZkInfo.getZkConnectStrs().get(0);
HelixManager manager;
if (dcZkInfo.getDcName().equals(clusterMapConfig.clusterMapDatacenterName)) {
manager = Objects.requireNonNull(localManager, "localManager should have been set");
} else {
manager = helixFactory.getZKHelixManager(clusterMapConfig.clusterMapClusterName, selfInstanceName,
InstanceType.SPECTATOR, zkConnectStr);
logger.info("Connecting to Helix manager at {}", zkConnectStr);
manager.connect();
logger.info("Established connection to Helix manager at {}", zkConnectStr);
}
HelixClusterChangeHandler clusterChangeHandler;
String clusterChangeHandlerType = clusterMapConfig.clusterMapClusterChangeHandlerType;
if (clusterChangeHandlerType.equals(SimpleClusterChangeHandler.class.getSimpleName())) {
clusterChangeHandler =
new SimpleClusterChangeHandler(clusterMapConfig, dcName, selfInstanceName, partitionOverrideInfoMap,
partitionMap, partitionNameToAmbryPartition, ambryPartitionToAmbryReplicas, helixClusterManagerCallback,
helixClusterManagerMetrics, this::onInitializationFailure, sealedStateChangeCounter);
} else if (clusterChangeHandlerType.equals(DynamicClusterChangeHandler.class.getSimpleName())) {
clusterChangeHandler =
new DynamicClusterChangeHandler(clusterMapConfig, dcName, selfInstanceName, partitionOverrideInfoMap,
helixClusterManagerCallback, clusterChangeHandlerCallback, helixClusterManagerMetrics,
this::onInitializationFailure, sealedStateChangeCounter);
} else {
throw new IllegalArgumentException("Unsupported cluster change handler type: " + clusterChangeHandlerType);
}
// Create RoutingTableProvider of each DC to keep track of partition(replicas) state. Here, we use current
// state based RoutingTableProvider to remove dependency on Helix's pipeline and reduce notification latency.
logger.info("Creating routing table provider associated with Helix manager at {}", zkConnectStr);
RoutingTableProvider routingTableProvider = new RoutingTableProvider(manager, PropertyType.CURRENTSTATES);
logger.info("Routing table provider is created in {}", dcName);
routingTableProvider.addRoutingTableChangeListener(clusterChangeHandler, null);
logger.info("Registered routing table change listeners in {}", dcName);
// The initial instance config change notification is required to populate the static cluster
// information, and only after that is complete do we want the live instance change notification to
// come in. We do not need to do anything extra to ensure this, however, since Helix provides the initial
// notification for a change from within the same thread that adds the listener, in the context of the add
// call. Therefore, when the call to add a listener returns, the initial notification will have been
// received and handled.
manager.addInstanceConfigChangeListener(clusterChangeHandler);
logger.info("Registered instance config change listeners for Helix manager at {}", zkConnectStr);
manager.addIdealStateChangeListener(clusterChangeHandler);
logger.info("Registered ideal state change listeners for Helix manager at {}", zkConnectStr);
// Now register listeners to get notified on live instance change in every datacenter.
manager.addLiveInstanceChangeListener(clusterChangeHandler);
logger.info("Registered live instance change listeners for Helix manager at {}", zkConnectStr);
// in case initial event occurs before adding routing table listener, here we explicitly set snapshot in
// ClusterChangeHandler. The reason is, if listener missed initial event, snapshot inside routing table
// provider should be already populated.
clusterChangeHandler.setRoutingTableSnapshot(routingTableProvider.getRoutingTableSnapshot());
// the initial routing table change should populate the instanceConfigs. If it's empty that means initial
// change didn't come and thread should wait on the init latch to ensure routing table snapshot is non-empty
if (clusterChangeHandler.getRoutingTableSnapshot().getInstanceConfigs().isEmpty()) {
// Periodic refresh in routing table provider is enabled by default. In worst case, routerUpdater should
// trigger routing table change within 5 minutes
logger.info("Routing table snapshot in {} is currently empty. Waiting for initial notification.", dcName);
clusterChangeHandler.waitForInitNotification();
}
if (!clusterMapConfig.clustermapListenCrossColo && manager != localManager) {
manager.disconnect();
logger.info("Stopped listening to cross colo ZK server {}", zkConnectStr);
}
return new HelixDcInfo(dcName, dcZkInfo, manager, clusterChangeHandler);
}
/**
* Currently, this does not connect to the VCR zookeeper and assumes that all partitions are supported in the cloud
* datacenter. This will be the case until the VCR and native storage clusters are unified under the same
* {@link ClusterMap}. Once this happens, we can use the VCR cluster as a source of truth for supported partitions
* in the cloud datacenter.
* @return the {@link DcInfo} for the cloud datacenter.
* @throws Exception if something went wrong during startup
*/
private DcInfo initializeCloudDatacenter() throws Exception {
CloudServiceClusterChangeHandler clusterChangeHandler =
new CloudServiceClusterChangeHandler(dcName, clusterMapConfig, clusterChangeHandlerCallback);
return new DcInfo(dcName, dcZkInfo, clusterChangeHandler);
}
} |
Double point compression for elliptic curves of j-invariant 0 The article provides a new double point compression method (to 2dlog2(q)e+ 4 bits) for an elliptic curve Eb : y2 = x3 + b of j-invariant 0 over a finite field Fq such that q ≡ 1 (mod 3). More precisely, we obtain explicit simple formulas transforming the coordinates x0, y0, x1, y1 of two points P0, P1 ∈ Eb(Fq) to some two elements of Fq with four auxiliary bits. In order to recover (in the decompression stage) the points P0, P1 it is proposed to extract a sixth root 6 √ Z ∈ Fq of some element Z ∈ Fq. It is known that for q ≡ 3 (mod 4), q 6≡ 1 (mod 27) this can be implemented by means of just one exponentiation in Fq. Therefore the new compression method seems to be much faster than the classical one with the coordinates x0, x1, whose decompression stage requires two exponentiations in Fq. |
1. Field of the Invention
The present invention relates to an electronic component method and apparatus for mounting on a circuit board an electronic component of, for example, an Integrated Circuit (IC) chip and a surface acoustic wave (SAW) device on an electronic circuit use printed board (referred to as a “board” as a representative example in the present specification, and the “board” means a mounting base object on which other components such as an interposer and/or an electronic component are mounted) in a single body (a bear IC in the case of an IC chip) state, and relates to an electronic component unit obtained by mounting the electronic components on the board by the mounting method.
2. Description of the Related Art
In these days, electronic circuit boards have come to be used in various sorts of products, and the performance thereof has been improved day by day with increased frequencies used on the circuit boards. Flip chip mounting, which provides reduced impedance, is a mounting method suitable for electronic equipment that uses high frequencies. There is also demanded flip chip mounting for mounting on a circuit board an IC chip not in a package style but in a bear component style in compliance with the increase in number of portable devices. For this reason, when IC chips are mounted as a single chip on a circuit board or mounted on electronic equipment and a flat panel display, the IC chips include a certain amount of defective eneschips. Other than the flip chip, CSP (Chip Size Package), BGA (Ball Grid Array) and so on have been increasingly employed.
As a conventional method for bonding an IC chip onto the circuit board of electronic equipment (first prior art), there is the method disclosed in Japanese Examined Patent Publication No. 06-66355 and so on. This is shown in FIG. 15. As shown in FIG. 15, there is generally known the method of transferring an Ag paste 74 onto an IC chip 71 provided with bumps 73 for connection to electrodes 75 of a circuit board 76, thereafter hardening the Ag paste 74 and thereafter pouring an encapsulating material 78 between the IC chip 71 and the circuit board 76.
As a method for connecting an IC chip to a liquid crystal display (second prior art), as disclosed in Japanese Examined Patent Publication No. 62-6652 shown in FIG. 16A and FIG. 16B, there is generally known a semiconductor chip connection structure in which an anisotropic conductive film 80 is employed, and by peeling off an anisotropic conducting adhesive layer 81 constituted by adding conductive minute segments 82 into an insulating resin 83 from a separator 85 and applying the film onto a board or the glass of a liquid crystal display 84 and thermocompression-bonding an IC chip 86, the anisotropic conducting adhesive layer 81 is interposed between a lower surface of the IC chip 86 and the board 84 except for spaces under the Au bump 87.
As a third prior art, there is known a method for applying a UV-curing resin onto a board, hardening the resin located between the two by UV irradiation while mounting an IC chip on it with pressurization, and maintaining a contact between the two by the contracting force of the resin.
As described above, the IC chip bonding has been performed by performing the processes of die-bonding an IC chip of a flat package or the like onto a lead frame, connecting the electrodes of the IC chip to the lead frame by wire bonding, forming a package with resin encapsulation, thereafter printing a solder paste on the circuit board, mounting the flat package IC thereon, and subjecting the IC to reflow. According to the above-mentioned method called SMT (Surface Mount Technology), the process of packaging the IC is long and requires much time for the production of IC components, and it has been difficult to miniaturize the circuit board. For example, an IC chip encapsulated in a flat pack occupies an area about four to ten times greater than that of the IC chip, and this has been a factor of hindering the miniaturization.
In contrast to this, the flip chip method for mounting an IC chip in a bear chip style directly on a board for the reduction of process and the reduction in size and weight has come to be adopted lately. With regard to this flip chip method, there are developed many processing methods such as stud bump bonding (SBB) for performing bump formation on an IC chip, bump leveling, Ag.Pd paste transfer, mounting, inspection, encapsulation with encapsulation resin, and inspection and UV resin bonding for concurrently performing bump formation on an IC chip and UV-curing resin coating on a board and thereafter performing mounting, UV-curing of resin and inspection.
However, any of the processing methods has had the drawback that much time is required for the hardening of the paste for bonding the bumps of the IC chip to the electrodes of the board and for the coating and hardening of the encapsulation resin, leading to degraded productivity. There has also been the issue that ceramic or glass of which the quantity of warp is controlled is needed as a circuit board, leading to cost increase.
Moreover, according to the processing method that uses a conductivity paste as in the first prior art for the bonding material, it has been required to level and flatten the IC chip bumps before use in order to stabilize the quantity of transfer.
Moreover, in the bonding structure with an anisotropic conducting adhesive as in the second prior art, there has been developed one that employs glass as a board material of the circuit board. It is required to hold conductive particles between the IC chip side electrode and the board side electrode for electric continuity between the electrodes, and therefore, it is required to uniformly disperse the conductive particles in the conductive adhesive. However, it is difficult to uniformly disperse the conductive particles in the conductive adhesive, and this has disadvantageously caused short circuit due to the abnormal dispersion of the particles and led to the expensive cost of the conductive adhesive and the necessity of the formation of the IC chip bumps by electroplating in order to shape the height of the bumps.
Moreover, according to the method of performing bonding using a UV-curing resin as in the third prior art, it is required to set a bump height variation within a range of ±1 (μm), and there has been the issue that the bonding cannot be achieved in the case of a board of a degraded flatness such as a resin board (glass epoxy board). Moreover, according to the method using solder, it has been required to pour and harden the encapsulation resin in order to alleviate the thermal expansion and shrinkage difference between the board and the IC chip after bonding. A time of two to eight hours has been needed for the hardening of the encapsulation resin, and this has led to the issue that the productivity is extremely degraded.
Accordingly, the object of the present invention is to solve the aforementioned issues and provide an electronic component mounting method and apparatus for bonding electronic components to circuit boards with high productivity and high reliability by interposing an anisotropic conductive layer that has conductive particles needing neither a resin encapsulation process for pouring a resin between an electronic component and a board nor a bump leveling process for shaping the bump height after the electronic component is bonded to the circuit board as well as an electronic component unit obtained by mounting the electronic component on the board by the above-mentioned mounting method. |
<gh_stars>10-100
/**************************************************
* Author: HuLang *
* Notes: Some functions about Console Control. *
* License: Copyleft. Enjoy it Just for fun. *
* Date: 2008-12-17 00:28:39 *
***************************************************/
#ifndef CC_H_INCLUDED
#define CC_H_INCLUDED
#ifdef _WIN32
#include <stdio.h>
#include <stdlib.h>
#include <conio.h>
#include <windows.h>
#else
#include <termios.h>
#include <unistd.h>
#include <signal.h>
#endif
#ifndef COMMEN_TYPE
#define COMMEN_TYPE
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned long uint32;
#endif // COMMEN_TYPE
/* 按键定义 */
#define JK_FUNC_KEY 0x00
#define JK_CTRL_KEY 0xE0
#define JK_ESC 0x001B
#define JK_ENTER 0x000D
#define JK_SPACE 0x0020
#define JK_BKSPACE 0x0008
#define JK_TAB 0x0009
#define JK_CTRL_Z 0x001A
#define JK_CTRL_X 0x0018
#define JK_CTRL_C 0x0003
#define JK_CTRL_A 0x0001
#define JK_CTRL_S 0x0013
#define JK_CTRL_D 0x0004
#define JK_LEFT 0xE04B
#define JK_RIGHT 0xE04D
#define JK_UP 0xE048
#define JK_DOWN 0xE050
#define JK_INSERT 0xE052
#define JK_HOME 0xE047
#define JK_PGUP 0xE049
#define JK_DELETE 0xE053
#define JK_END 0xE04F
#define JK_PGDW 0xE051
#define JK_F1 0xFF3B
#define JK_F2 0xFF3C
#define JK_F3 0xFF3D
#define JK_F4 0xFF3E
#define JK_F5 0xFF3F
#define JK_F6 0xFF40
#define JK_F7 0xFF41
#define JK_F8 0xFF42
#define JK_F9 0xFF43
#define JK_F10 0xFF44
#define JK_F11 0xE085
#define JK_F12 0xE086
/* 控制台尺寸定义 */
#define MIN_CONSOLE_WIDTH 14
#define MIN_CONSOLE_HEIGHT 1
/* 颜色定义 */
typedef enum _PCCOLOR
{
BLACK = 0, // 黑色
BLUE = 1, // 蓝色
GREEN = 2, // 绿色
CYAN = 3, // 青色
RED = 4, // 红色
MAGENTA = 5, // 紫色
BROWN = 6, // 褐色
LIGHT_GRAY = 7, // 浅灰
DARK_GRAY = 8, // 深灰
LIGHT_BLUE = 9, // 亮蓝
LIGHT_GREEN = 10, // 亮绿
LIGHT_CYAN = 11, // 浅蓝
LIGHT_RED = 12, // 亮红
LIGHT_MAGENTA = 13, // 亮紫
YELLOW = 14, // 黄色
WHITE = 15 // 白色
}PCCOLOR;
#ifdef __cplusplus
extern "C"
{
#endif
/* 延时,以毫秒计 */
void delayMS(uint32 d);
/* 清除文字 */
void clearText(void);
/* 暂停,等待用户按键 */
void pauseHere(void);
/* 发出简单的响铃声(阻塞,慎用) */
int simpleRing(uint16 freq, uint16 len);
/* 设置文本颜色,0~15 */
int setTextColor(uint8 fColor);
/* 获取文本颜色,0~15 */
PCCOLOR getTextColor(void);
/* 设置文本背景颜色,0~15 */
int setBackColor(uint8 bColor);
/* 获取文本背景颜色,0~15 */
PCCOLOR getBackColor(void);
/* 设置文本及其背景颜色,0~15 */
int setColors(uint8 fColor, uint8 bColor);
/* 设置/取消前/背景颜色的交换解析 */
int setSwapColors(int b);
/* 设定/取消文字的下划线 */
int setUnderLine(int b);
/* 获取控制台文本行的最大长度[默认为80] */
uint8 getLineWidth(void);
/* 获取控制台文本行的最大行数 */
uint8 getLineNum(void);
/* 获取光标的横坐标[列数] */
uint8 getCursorX(void);
/* 获取光标的纵坐标[行数] */
uint8 getCursorY(void);
/* 屏幕光标定位,x为列(横),y为行(纵) */
int gotoTextPos(uint8 x, uint8 y);
/* 设置光标的可见性 */
int setCursorVisible(int b);
/* 设置光标的(厚度)尺寸,1-100 */
int setCursorSize(uint8 s);
/* 获取控制台的标题字符串 */
int getConsoleTitle(char *title, uint8 len);
/* 设置控制台的标题字符串 */
int setConsoleTitle(char *title);
/* 设置一个没有滚动条的控制台窗口尺寸 */
int fixConsoleSize(uint16 width, uint16 height);
/* 输出控制台的相关信息(仅作调试用) */
int showConsoleInfo();
/* 获取控制台的键输入 */
uint16 jkGetKey(void);
/* 判断控制台是否有键按下 */
int jkHasKey(void);
#ifdef __cplusplus
}
#endif
#endif // CC_H_INCLUDED
|
Grid Types Used to Strengthen Reinforced Concrete Panels Subjected to Impact Loading The protection of existing reinforced concrete structures against impact loads, the effects of blast, or both, has become a major concern of researchers and engineers. Sudden impacts and release of high-speed loads, such as those from bullets, fragment bombs, and bare explosive charges, can generate high pressures on structures, resulting in fragmentation that can cause serious damage, injuries, and casualties. Results from an experimental investigation performed on reinforced concrete slabs retrofitted with different grids and polypropylene, polyethylene, and steel meshes are presented. The testing program includes impact tests using a falling weight and fragmentation simulation using an air gun. These tests were conducted on square concrete panels with dimensions of 600 600 100 mm. Deflections and depths of penetration were measured, as were applied and absorbed kinetic energy and the kinetic energy of the resulting concrete fragments. Damage and mode of failure due to post-impact were also observed and recorded. Results and analysis of the data and observations showed that the concrete panels retrofitted with a combination of steel mesh and polyethylene grid provided the most promising retrofitting protection when compared with other options. |
package log
import (
"fmt"
)
type LogLevel int
func (l LogLevel) String() string {
switch l {
case LogLevelNone:
return "NONE"
case LogLevelError:
return "ERROR"
case LogLevelInfo:
return "INFO"
case LogLevelDebug:
return "DEBUG"
default:
return fmt.Sprintf("UNKNOWN [%d]", l)
}
}
const (
LogLevelNone LogLevel = iota + 1
LogLevelError
LogLevelInfo
LogLevelDebug
)
// Logger is the interface used to get logging from graceful-shutdown internals.
type ILogger interface {
// Log a message at the given level.
Log(level LogLevel, msg string)
}
|
Amodiaquine clearance and its metabolism to N-desethylamodiaquine is mediated by CYP2C8: a new high affinity and turnover enzyme-specific probe substrate. Amodiaquine (AQ) metabolism to N-desethylamodiaquine (DEAQ) is the principal route of disposition in humans. Using human liver microsomes and two sets of recombinant human cytochrome P450 isoforms (from lymphoblastoids and yeast) we performed studies to identify the CYP isoform(s) involved in the metabolism of AQ. CYP2C8 was the main hepatic isoform that cleared AQ and catalyzed the formation of DEAQ. The extrahepatic P450s, 1A1 and 1B1, also cleared AQ and catalyzed the formation of an unknown metabolite M2. The K(m) and V(max) values for AQ N-desethylation were 1.2 microM and 2.6 pmol/min/pmol of CYP2C8 for recombinant CYP2C8, and 2.4 microM and 1462 pmol/min/mg of protein for human liver microsomes (HLMs), respectively. Relative contribution of CYP2C8 in the formation of DEAQ was estimated at 100% using the relative activity factor method. Correlation analyses between AQ metabolism and the activities of eight hepatic P450s were made on 10 different HLM samples. Both the formation of DEAQ and the clearance of AQ showed excellent correlations (r = 0.98 and 0.95) with 6alpha-hydroxylation of paclitaxel, a marker substrate for CYP2C8. The inhibition of DEAQ formation by quercetin was competitive with K(i) values of 1.96 for CYP2C8 and 1.56 microM for HLMs. Docking of AQ into the active site homology models of the CYP2C isoforms showed favorable interactions with CYP2C8, which supported the likelihood of an N-desethylation reaction. These data show that CYP2C8 is the main hepatic isoform responsible for the metabolism of AQ. The specificity, high affinity, and high turnover make AQ desethylation an excellent marker reaction for CYP2C8 activity. |
<filename>keras/caffe/test_converted.py
from keras.models import Sequential, model_from_json
from keras.optimizers import SGD
from scipy import misc
import numpy as np
import copy
if __name__ == "__main__":
out_layer_names = ["loss1/loss", "loss2/loss", "loss3/loss3"]
print "Preparing test image."
# Read image
im = misc.imread('models/cat.jpg')
# Resize
im = misc.imresize(im, (224, 224)).astype(np.float32)
# Change RGB to BGR
aux = copy.copy(im)
im[:,:,0] = aux[:,:,2]
im[:,:,2] = aux[:,:,0]
# Remove train image mean
im[:,:,0] -= 104.006
im[:,:,1] -= 116.669
im[:,:,2] -= 122.679
# Transpose image dimensions (Keras' uses the channels as the 1st dimension)
im = np.transpose(im, (2, 0, 1))
# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)
# Load the converted model
print "Loading model."
# Load model structure
model = model_from_json(open('models/Keras_model_structure.json').read())
# Load model weights
model.load_weights('models/Keras_model_weights.h5')
# Compile converted model
print "Compiling model."
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
loss = dict()
for out in out_layer_names:
loss[out] = 'categorical_crossentropy'
last_out = out
model.compile(optimizer=sgd, loss=loss)
# Predict image output
print "Applying prediction."
in_data = dict()
for input in ['data']:
in_data[input] = im
out = model.predict(in_data)
# Load ImageNet classes file
classes = []
with open('models/classes.txt', 'r') as list_:
for line in list_:
classes.append(line.rstrip('\n'))
for i, o in enumerate(out_layer_names):
print 'Prediction on output layer "'+o+'": '+str(classes[np.argmax(out[i])])
|
<filename>SMB3_v2_C++/assets/classes/audio/NullAudio.h
#pragma once
class NullAudio : public Audio {
public:
void Initialize() override;
void PlayAudio(AudioType, bool = false, float = 1.0f, float = 100.0f) override;
void PauseAudio(AudioType) override;
void StopAudio(AudioType) override;
void StopAll() override;
void Release() override;
}; |
package lsieun.bytecode.core;
import lsieun.utils.radix.HexUtils;
public abstract class Node {
private byte[] bytes;
public byte[] getBytes() {
return bytes;
}
public void setBytes(byte[] bytes) {
this.bytes = bytes;
}
public String getHexCode() {
return HexUtils.fromBytes(this.bytes);
}
}
|
# coding=utf-8
# Copyright 2023 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gp.py.
Use the following to debug nan.
from jax.config import config
config.update('jax_debug_nans', True)
"""
import functools
import logging
import time
from absl.testing import absltest
from absl.testing import parameterized
from hyperbo.basics import definitions as defs
from hyperbo.basics import params_utils
from hyperbo.gp_utils import basis_functions as bf
from hyperbo.gp_utils import gp
from hyperbo.gp_utils import kernel
from hyperbo.gp_utils import mean
from hyperbo.gp_utils import objectives as obj
from hyperbo.gp_utils import utils
import jax
DEFAULT_WARP_FUNC = utils.DEFAULT_WARP_FUNC
GPParams = defs.GPParams
retrieve_params = params_utils.retrieve_params
class ObjectivesTest(parameterized.TestCase):
"""Tests for objectives.py."""
@parameterized.named_parameters(
('squared_exponential kl', kernel.squared_exponential,
utils.kl_multivariate_normal, 'lbfgs'),
('matern32 kl', kernel.matern32, utils.kl_multivariate_normal, 'lbfgs'),
('matern52 kl', kernel.matern52, utils.kl_multivariate_normal, 'lbfgs'),
('matern32_mlp kl', kernel.matern32_mlp, utils.kl_multivariate_normal,
'lbfgs'),
('matern52_mlp kl', kernel.matern52_mlp, utils.kl_multivariate_normal,
'lbfgs'),
('squared_exponential_mlp kl', kernel.squared_exponential_mlp,
utils.kl_multivariate_normal, 'lbfgs'),
('dot_product_mlp kl', kernel.dot_product_mlp,
utils.kl_multivariate_normal, 'lbfgs'),
('squared_exponential euclidean', kernel.squared_exponential,
utils.euclidean_multivariate_normal, 'lbfgs'),
('dot_product_mlp kl adam', kernel.dot_product_mlp,
utils.kl_multivariate_normal, 'adam'),
('squared_exponential_mlp kl adam', kernel.squared_exponential_mlp,
utils.kl_multivariate_normal, 'adam'),
)
def test_sample_mean_cov_regularizer(self, cov_func, distance, opt_method):
"""Test that GP parameters can be inferred correctly."""
key = jax.random.PRNGKey(0)
key, init_key = jax.random.split(key)
n = 20
vx = jax.random.normal(key, (n, 2))
params = GPParams(
model={
'constant': 5.,
'lengthscale': 1.,
'signal_variance': 1.0,
'noise_variance': 0.01,
})
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
elif cov_func == kernel.dot_product_mlp:
key, _ = jax.random.split(key)
params.model['dot_prod_sigma'] = 0.1
params.model['dot_prod_bias'] = 0.
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
mean_func = mean.constant
logging.info(msg=f'params = {params}')
key, _ = jax.random.split(key)
dataset = [(vx,
gp.sample_from_gp(
key, mean_func, cov_func, params, vx,
num_samples=10), 'all_data')]
# Minimize sample_mean_cov_regularizer.
init_params = GPParams(
model={
'constant': 5.1,
'lengthscale': 0.,
'signal_variance': 0.,
'noise_variance': -4.
},
config={
'method':
opt_method,
'max_training_step':
2,
'logging_interval': 1,
'objective':
functools.partial(
obj.multivariate_normal_divergence, distance=distance),
'batch_size': 100,
'learning_rate': 0.001,
})
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
init_params.config['mlp_features'] = (8,)
bf.init_mlp_with_shape(init_key, init_params, vx.shape)
elif cov_func == kernel.dot_product_mlp:
init_params.model['dot_prod_sigma'] = 0.1
init_params.model['dot_prod_bias'] = 0.
init_params.config['mlp_features'] = (8,)
bf.init_mlp_with_shape(init_key, init_params, vx.shape)
warp_func = DEFAULT_WARP_FUNC
model = gp.GP(
dataset=dataset,
mean_func=mean_func,
cov_func=cov_func,
params=init_params,
warp_func=warp_func)
def reg(gpparams, gpwarp_func=None):
return obj.multivariate_normal_divergence(
mean_func=model.mean_func,
cov_func=model.cov_func,
params=gpparams,
dataset=model.dataset,
warp_func=gpwarp_func,
distance=distance)
def nll_func(gpparams, gpwarp_func=None, use_cholesky=True):
return obj.neg_log_marginal_likelihood(
mean_func=model.mean_func,
cov_func=model.cov_func,
params=gpparams,
dataset=model.dataset,
warp_func=gpwarp_func,
use_cholesky=use_cholesky,
exclude_aligned=False)
logging.info(msg=f'Regularizer on ground truth params = {reg(params)}')
logging.info(msg=f'NLL on ground truth params = {nll_func(params)}')
init_reg = reg(init_params, warp_func)
init_nll = nll_func(init_params, warp_func)
svd_nll = nll_func(init_params, warp_func, use_cholesky=False)
self.assertAlmostEqual(svd_nll/init_nll, 1., places=2)
logging.info(msg=f'Reg on init params = {init_reg}')
logging.info(msg=f'NLL on init params = {init_nll}')
start_time = time.time()
logging.info(msg=f'init_params={init_params}')
inferred_params = model.train()
logging.info(msg=f'Elapsed training time = {time.time() - start_time}')
keys = params.model.keys()
retrieved_inferred_params = dict(
zip(keys, retrieve_params(inferred_params, keys, warp_func=warp_func)))
logging.info(msg=f'inferred_params = {retrieved_inferred_params}')
inferred_reg = reg(inferred_params, warp_func)
inferred_nll = nll_func(inferred_params, warp_func)
svd_nll = nll_func(inferred_params, warp_func, use_cholesky=False)
self.assertAlmostEqual(svd_nll/inferred_nll, 1., places=2)
logging.info(
msg=f'Reg on inferred params = {inferred_reg} (Before: {init_reg})')
logging.info(
msg=f'NLL on inferred params = {inferred_nll} (Before: {init_nll})')
self.assertGreater(init_reg, inferred_reg)
@parameterized.named_parameters(
('squared_exponential nll', kernel.squared_exponential, 'lbfgs'),
('matern32 nll', kernel.matern32, 'lbfgs'),
('matern52 nll', kernel.matern52, 'lbfgs'),
('matern32_mlp nll', kernel.matern32_mlp, 'lbfgs'),
('matern52_mlp nll', kernel.matern52_mlp, 'lbfgs'),
('squared_exponential_mlp nll', kernel.squared_exponential_mlp, 'lbfgs'),
('dot_product_mlp nll', kernel.dot_product_mlp, 'lbfgs'),
('squared_exponential euclidean', kernel.squared_exponential, 'lbfgs'),
('dot_product_mlp nll adam', kernel.dot_product_mlp, 'adam'),
('squared_exponential_mlp nll adam', kernel.squared_exponential_mlp,
'adam'),
)
def test_neg_log_marginal_likelihood(self, cov_func, opt_method):
"""Test that GP parameters can be inferred correctly."""
key = jax.random.PRNGKey(0)
key, init_key = jax.random.split(key)
n = 20
vx = jax.random.normal(key, (n, 2))
params = GPParams(
model={
'constant': 5.,
'lengthscale': 1.,
'signal_variance': 1.0,
'noise_variance': 0.01,
})
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
elif cov_func == kernel.dot_product_mlp:
key, _ = jax.random.split(key)
params.model['dot_prod_sigma'] = 0.1
params.model['dot_prod_bias'] = 0.
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
mean_func = mean.constant
logging.info(msg=f'params = {params}')
key, init_key = jax.random.split(key)
dataset = [(vx,
gp.sample_from_gp(
key, mean_func, cov_func, params, vx,
num_samples=10), 'all_data')]
vy = dataset[0][1]
for i in range(vy.shape[1]):
dataset.append((vx, vy[:, i:i+1]))
# Minimize sample_mean_cov_regularizer.
init_params = GPParams(
model={
'constant': 5.1,
'lengthscale': 0.,
'signal_variance': 0.,
'noise_variance': -4.,
'dot_prod_sigma': -1.,
'dot_prod_bias': 0.,
},
config={
'method':
opt_method,
'max_training_step':
2,
'logging_interval': 1,
'objective': obj.nll,
'batch_size': 100,
'learning_rate': 0.001,
})
warp_func = DEFAULT_WARP_FUNC
model = gp.GP(
dataset=dataset,
mean_func=mean_func,
cov_func=cov_func,
params=init_params,
warp_func=warp_func)
model.initialize_params(init_key)
def reg(gpparams, gpwarp_func=None):
return obj.multivariate_normal_divergence(
mean_func=model.mean_func,
cov_func=model.cov_func,
params=gpparams,
dataset=model.dataset,
warp_func=gpwarp_func,
distance=utils.kl_multivariate_normal)
def nll_func(gpparams, gpwarp_func=None, use_cholesky=True):
return obj.neg_log_marginal_likelihood(
mean_func=model.mean_func,
cov_func=model.cov_func,
params=gpparams,
dataset=model.dataset,
warp_func=gpwarp_func,
use_cholesky=use_cholesky,
)
logging.info(msg=f'Regularizer on ground truth params = {reg(params)}')
logging.info(msg=f'NLL on ground truth params = {nll_func(params)}')
init_reg = reg(init_params, warp_func)
init_nll = nll_func(init_params, warp_func)
svd_nll = nll_func(init_params, warp_func, use_cholesky=False)
self.assertAlmostEqual(svd_nll/init_nll, 1., places=2)
logging.info(msg=f'Reg on init params = {init_reg}')
logging.info(msg=f'NLL on init params = {init_nll}')
start_time = time.time()
logging.info(msg=f'init_params={init_params}')
inferred_params = model.train()
logging.info(msg=f'Elapsed training time = {time.time() - start_time}')
keys = params.model.keys()
retrieved_inferred_params = dict(
zip(keys, retrieve_params(inferred_params, keys, warp_func=warp_func)))
logging.info(msg=f'inferred_params = {retrieved_inferred_params}')
inferred_reg = reg(inferred_params, warp_func)
inferred_nll = nll_func(inferred_params, warp_func)
svd_nll = nll_func(inferred_params, warp_func, use_cholesky=False)
self.assertAlmostEqual(svd_nll/inferred_nll, 1., places=2)
logging.info(
msg=f'Reg on inferred params = {inferred_reg} (Before: {init_reg})')
logging.info(
msg=f'NLL on inferred params = {inferred_nll} (Before: {init_nll})')
self.assertGreater(init_reg, inferred_reg)
if __name__ == '__main__':
absltest.main()
|
/*
* Copyright (c) 2017-2020 Software Architecture Group, Hasso Plattner Institute
*
* Licensed under the MIT License.
*/
package de.hpi.swa.trufflesqueak.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.math.BigInteger;
import org.junit.Test;
import de.hpi.swa.trufflesqueak.interop.WrapToSqueakNode;
import de.hpi.swa.trufflesqueak.model.AbstractSqueakObject;
import de.hpi.swa.trufflesqueak.model.ArrayObject;
import de.hpi.swa.trufflesqueak.model.BooleanObject;
import de.hpi.swa.trufflesqueak.model.LargeIntegerObject;
import de.hpi.swa.trufflesqueak.model.NilObject;
public class SqueakPrimitiveTest extends AbstractSqueakTestCaseWithDummyImage {
@Test
public void testPrimEquivalent() {
final AbstractSqueakObject rcvr = image.specialObjectsArray;
assertTrue((boolean) runBinaryPrimitive(110, rcvr, rcvr));
assertFalse((boolean) runBinaryPrimitive(110, rcvr, NilObject.SINGLETON));
}
@Test
public void testPrimReplaceFromTo() {
final ArrayObject rcvr = image.asArrayOfObjects(NilObject.SINGLETON, BooleanObject.FALSE, BooleanObject.TRUE, image.characterClass, image.metaClass,
image.schedulerAssociation, image.smallIntegerClass, image.smalltalk,
image.specialObjectsArray);
assertSame(NilObject.SINGLETON, rcvr.getObject(0));
for (int i = 1; i < 8; i++) {
assertNotSame(NilObject.SINGLETON, rcvr.getObject(i));
}
final Object result = runQuinaryPrimitive(105, rcvr, 1L, 6L, ArrayObject.createEmptyStrategy(image, image.arrayClass, 10), 1L);
assertSame(result, rcvr);
for (int i = 0; i < 6; i++) {
assertSame(NilObject.SINGLETON, rcvr.getObject(i));
}
for (int i = 7; i < 8; i++) {
assertNotSame(NilObject.SINGLETON, rcvr.getObject(i));
}
}
@Test
public void testAdd() {
final Object[][] testValues = new Object[][]{
{(long) Integer.MAX_VALUE, (long) Integer.MAX_VALUE, 2 * (long) Integer.MAX_VALUE},
{Long.MAX_VALUE, Long.MAX_VALUE, new LargeIntegerObject(image, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(2)))},
{Long.MAX_VALUE, Long.MIN_VALUE, BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(Long.MIN_VALUE)).longValue()}};
final WrapToSqueakNode wrapNode = WrapToSqueakNode.getUncached();
for (int i = 0; i < testValues.length; i++) {
final Object[] values = testValues[i];
assertEquals(wrapNode.executeWrap(values[2]), runBinaryPrimitive(1, wrapNode.executeWrap(values[0]), wrapNode.executeWrap(values[1])));
}
}
@Test
public void testSub() {
final Object[][] testValues = new Object[][]{
{(long) Integer.MAX_VALUE, (long) Integer.MAX_VALUE, 0L},
{Long.MAX_VALUE, Long.MAX_VALUE, 0L},
{Long.MAX_VALUE, Long.MAX_VALUE - 1, 1L},
{Long.MAX_VALUE, Long.MAX_VALUE - Integer.MAX_VALUE, (long) Integer.MAX_VALUE},
{Long.MIN_VALUE, 1L, new LargeIntegerObject(image, BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE))},
{Long.MAX_VALUE, Long.MAX_VALUE - Integer.MAX_VALUE, (long) Integer.MAX_VALUE}};
final WrapToSqueakNode wrapNode = WrapToSqueakNode.getUncached();
for (int i = 0; i < testValues.length; i++) {
final Object[] values = testValues[i];
assertEquals(wrapNode.executeWrap(values[2]), runBinaryPrimitive(2, wrapNode.executeWrap(values[0]), wrapNode.executeWrap(values[1])));
}
assertEquals(wrapNode.executeWrap(Long.MAX_VALUE),
runBinaryPrimitive(22, wrapNode.executeWrap(new LargeIntegerObject(image, BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE))), wrapNode.executeWrap(1L)));
}
}
|
U.S. President Barack Obama will name Jeh Johnson, the Pentagon's former top lawyer, to succeed Janet Napolitano as homeland security secretary, officials said.
Johnson, whose first name is pronounced "Jay," framed many Obama administration national security policies and is widely respected in the administration for his capacity to assess situations or circumstances shrewdly and to draw sound conclusions, an administration official said.
Obama is expected to announce his nomination in the White House Rose Garden at 2 p.m. Friday, the White House said.
"The president is selecting Johnson because he is one the most highly qualified and respected national security leaders," a senior administration official told The Washington Post.
"During his tenure at the Department of Defense, he was known for his sound judgment and counsel," the official said, adding Johnson was "responsible for the prior legal review and approval of every military operation approved by the president and secretary of defense" during Obama's first term.
The nomination of Johnson, 56, must be confirmed by the Senate, which confirmed him as Defense Department general counsel Feb. 9, 2009. He left that job in December 2012 to return to private practice.
Napolitano resigned in July to lead the University of California system.
Homeland security is currently run by acting Secretary Rand Beers, who is undersecretary for national protection and programs.
The department, created in response to the Sept. 11, 2001, attacks, has the primary responsibilities of protecting the United States and its territories from terrorist attacks. Its mission covers counter-terrorism and cybersecurity, but it also oversees issues including the government's response to human-made and national disasters and border security.
Johnson's Pentagon job placed him at the center of some of Obama's most important national security decisions, the Post said.
These included the detention of terrorism suspects and the practice of targeted killings beyond defined battlefields to the administration's drive to end the "don't ask, don't tell" law that barred gay men and lesbians from serving openly in the military.
Shortly before leaving the Pentagon last year, he publicly questioned the idea of an indefinite war against terrorism.
"War violates the natural order of things, in which children bury their parents -- in war parents bury their children," he said in a November 2012 speech at England's Oxford Union debating society.
"In its 12th year, we must not accept the current conflict, and all that it entails, as the 'new normal,'" he said. "Peace must be regarded as the norm toward which the human race continually strives."
He predicted the United States would reach a "tipping point" in which the government's efforts against al-Qaida would no longer be considered an armed conflict and would be part of a more traditional law-enforcement effort against individual terrorists. |
<filename>src/VisitorRuby.java
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.tree.ParseTree;
import java.util.*;
class FunctionCreated {
public String name;
public int line;
public int column;
public FunctionCreated(String name, int line, int column){
this.name = name;
this.line = line;
this.column = column;
}
}
public class VisitorRuby<T> extends RubyBaseVisitor<T> {
public static int returnsCounter;
public static int chainsCounter;
public static int conditionalsVariableCounter;
public static int conditionalsCounter;
public static int ifstatementCounter;
public static int returnsFunctionLine;
public static int returnsFunctionColumn;
public static String returnsFunctionName;
public static String conditionalVariable;
public static ArrayList<FunctionCreated> functionsCreated = new ArrayList<FunctionCreated>();
public static ArrayList<String> varDeclarated = new ArrayList<String>();
public CodeSmellsManager manager;
public static int methodStartLine;
public static int methodStartColumn;
public static int methodFinalLine;
public static int maxMethodLongitude = 100;
VisitorRuby(ArrayList<Integer> _enableSmells) {
manager = new CodeSmellsManager(_enableSmells);
}
VisitorRuby() {
manager = new CodeSmellsManager();
}
@Override public T visitFunction_definition_header(RubyParser.Function_definition_headerContext ctx) {
returnsCounter = 0;
returnsFunctionLine = ctx.start.getLine();
returnsFunctionColumn = ctx.start.getCharPositionInLine();
methodStartLine = ctx.start.getLine();
methodStartColumn = ctx.start.getCharPositionInLine();
returnsFunctionName = ctx.getChild(1).getChild(0).getChild(0).toString();
FunctionCreated func = new FunctionCreated(returnsFunctionName, returnsFunctionLine, returnsFunctionColumn);
functionsCreated.add(func);
ifstatementCounter = 0;
return visitChildren(ctx);
}
@Override
public T visitReturn_statement(RubyParser.Return_statementContext ctx) {
if(returnsCounter == 0 ){
returnsCounter += 1;
}else{
returnsCounter += 1;
RuleContext StmtCtx = ctx.parent.parent.parent.parent;
if(StmtCtx.equals("class RubyParser$Function_if_statementContext") ||
StmtCtx.getClass().toString().equals("class RubyParser$Function_definitionContext") ||
StmtCtx.getClass().toString().equals("class RubyParser$Function_if_elsif_statementContext") ||
StmtCtx.getClass().toString().equals("class RubyParser$Function_definitionContext") ||
StmtCtx.getClass().toString().equals("class RubyParser$Function_unless_statementContext") ||
StmtCtx.parent.getClass().toString().equals("class RubyParser$Function_unless_statementContext") ||
StmtCtx.parent.getClass().toString().equals("class RubyParser$Function_if_elsif_statementContext")){
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor en la funcion: \'" + returnsFunctionName + "\' en Linea: "+ returnsFunctionLine + ", Columna: " + returnsFunctionColumn +
"\nSe encontraron multiples returns, primer return multiple Linea: "+ line +" Columna: " + column + "\n"
+ "Se recomienda crear una variable unica a retornar y asignarle a esto los valores que se pensaban retornar, posteriormente retornar esta variable al final de la funcion\n";
manager.AddCodeSmell(SMELL.MultipleReturn, line, column, message);
}
}
String StmtCtx = ctx.parent.parent.parent.parent.getClass().toString();
if(StmtCtx.equals("class RubyParser$Function_while_statementContext")){
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message ="\nMal olor encontrado, return dentro de una estructura de bucle en Linea : " + line + " Columna " + column + "\n"
+ "Se recomienda no tener returns en estructuras de bucle, declare una variable donde guarde el resultado que desea y retornelo fuera de la estructura.\n";
manager.AddCodeSmell(SMELL.ReturnInLoop, line, column, message);
}
if(StmtCtx.equals("class RubyParser$Function_for_statementContext")){
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message ="\nMal olor encontrado, return dentro de una estructura de bucle en Linea : " + line + " Columna " + column + "\n"
+ "Se recomienda no tener returns en estructuras de bucle, declare una variable donde guarde el resultado que desea y retornelo fuera de la estructura.\n";
manager.AddCodeSmell(SMELL.ReturnInLoop, line, column, message);
}
// else if(StmtCtx.equals("class RubyParser$If_elsif_statementContext")){
// System.out.println("elsif");
// }
String inFunction = ctx.parent.parent.parent.getClass().toString();
//System.out.println(inFunction);
if(!(inFunction.equals("class RubyParser$Function_statement_bodyContext") ||
inFunction.equals("class RubyParser$Function_definition_bodyContext"))){
int line = ctx.start.getLine()+1;
int column = ctx.start.getCharPositionInLine();
String message ="\nMal olor encontrado, codigo inalcanzable en Linea: " + line + " Columna " + column + "\n"
+ "Se recomienda mover o eliminar el codigo que se encuentra despues del return.\n";
manager.AddCodeSmell(SMELL.DeadCodeReturn, line, column, message);
}
return super.visitChildren(ctx);
}
@Override public T visitUnless_statement(RubyParser.Unless_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override public T visitWhile_statement(RubyParser.While_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override public T visitFor_statement(RubyParser.For_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override public T visitFunction_unless_statement(RubyParser.Function_unless_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedFunctionStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override public T visitFunction_while_statement(RubyParser.Function_while_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedFunctionStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override public T visitFunction_for_statement(RubyParser.Function_for_statementContext ctx) {
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedFunctionStatement(ctx, line, column);
return super.visitChildren(ctx);
}
@Override
public T visitExpression(RubyParser.ExpressionContext ctx) {
if((ctx.getChild(0).getClass().toString()).equals("class RubyParser$Function_chainContext")){
chainsCounter = 0;
}
return super.visitChildren(ctx);
}
@Override public T visitLvalue(RubyParser.LvalueContext ctx) {
String id = ctx.getChild(0).getChild(0).toString();
if(!varDeclarated.contains(id)){
varDeclarated.add(id);
if(id.length() > 10){
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, el nombre de la variable " + id+ " es muy largo linea: " + line + " Columna: " + column + "\n"
+ "Se recomienda cambiar este identificador por uno mas sencillo, esto hara el codigo mas legible\n";
manager.AddCodeSmell(SMELL.IdTooLong, line, column, message);
}
}
return super.visitChildren(ctx);
}
@Override public T visitFunction_name(RubyParser.Function_nameContext ctx) {
if((ctx.getParent().getClass().toString()).equals("class RubyParser$Function_definition_headerContext")){
String id = ctx.getChild(0).getChild(0).toString();
if(id.length() > 10){
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, el nombre de la funcion " + id+ " es muy largo linea: " + line + " Columna: " + column + "\n"
+ "Se recomienda cambiar este identificador por uno mas sencillo, esto hara el codigo mas legible\n";
manager.AddCodeSmell(SMELL.IdTooLong, line, column, message);
}
}
return super.visitChildren(ctx);
}
@Override
public T visitExpression_list(RubyParser.Expression_listContext ctx) {
ifstatementCounter = 0;
return visitChildren(ctx);
}
@Override
public T visitFunction_chain(RubyParser.Function_chainContext ctx) {
if(chainsCounter < 5){
chainsCounter += 1;
}else if(chainsCounter == 5){
chainsCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, muchas funciones encadenadas en Linea: " + line + " Columna: " + column + "\n"
+ "Se recomienda dividir los encadenamientos en diferentes variables, donde como mucho se encadenen cuatro funciones, para que el codigo sea mas comprensible y facil de mantener\n";
manager.AddCodeSmell(SMELL.ExtremeChains, line, column, message);
}
return super.visitChildren(ctx);
}
@Override
public T visitFunction_inline_call(RubyParser.Function_inline_callContext ctx) {
String functionName = ctx.getChild(0).getChild(0).getChild(0).getChild(0).toString();
Iterator<FunctionCreated> itr = functionsCreated.iterator();
while (itr.hasNext()) {
FunctionCreated function = itr.next();
if (function.name.equals(functionName)) {
itr.remove();
}
}
return visitChildren(ctx);
}
@Override
public T visitTerminator(RubyParser.TerminatorContext ctx) {
if(ctx.getChild(ctx.getChildCount() - 1).toString().equals("<EOF>")){
for (FunctionCreated x : functionsCreated){
int line = x.line;
int column = x.column;
String message = "\nMal olor encontrado, la funcion \'" + x.name + "\' ubicada en linea " + line + " y Columna: " + column + " nunca es llamada\n"
+ "Se recomienda crear un llamado a la funcion o en caso de que no sea necesaria eliminarla.\n";
manager.AddCodeSmell(SMELL.FunctionsNotCalled, line, column, message);
}
}else{
RuleContext StmtCtx = ctx.parent.parent;
if(StmtCtx.getClass().toString().equals("class RubyParser$Function_definition_bodyContext")){
methodFinalLine = ctx.start.getLine();
if((methodFinalLine - methodStartLine) > maxMethodLongitude){
String message = "\nMal olor encontrado, la funcion \'" + returnsFunctionName + "\' en la linea " + methodStartLine + " y Columna: " + methodStartColumn + " Es demasiado larga\n"
+ "Se recomienda revisar la funcionalidad de este metodo e intentar dividirlo en varios metodos\nque trabajen en conjunto.\n";
manager.AddCodeSmell(SMELL.MethodTooLong, methodStartLine, methodStartColumn, message);
}
}
}
return super.visitChildren(ctx);
}
public void nestedStatement(RuleContext ctx, int line, int column){
RuleContext parentStructure = ctx;
String parentString;
int nestedCounter = 1;
int i;
while(true){
i = 0;
while(parentStructure != null && i < 4){
parentStructure = parentStructure.parent;
i++;
}
if(parentStructure != null){
parentString = parentStructure.getClass().toString();
if(parentString.equals("class RubyParser$If_elsif_statementContext")){
parentStructure = parentStructure.parent.parent;
parentString = parentStructure.getClass().toString();
}
if(parentString.equals("class RubyParser$If_statementContext")
|| parentString.equals("class RubyParser$Unless_statementContext")
|| parentString.equals("class RubyParser$While_statementContext")
|| parentString.equals("class RubyParser$For_statementContext")){
nestedCounter++;
}
if(nestedCounter > 4){
String message = "\nMal olor encontrado, estructura profundamente anidada, Linea: " + line + ", Columna: " + column + "\n"
+ "Se recomienda reestructurar la logica del codigo para evitar la complejidad de lectura.\n";
manager.AddCodeSmell(SMELL.DeeplyNestedCode, line, column, message);
break;
}
}
else{
break;
}
}
}
public void nestedFunctionStatement(RuleContext ctx, int line, int column) {
RuleContext parentStructure = ctx;
String parentString;
int nestedCounter = 1;
int i;
while (true) {
i = 0;
while (parentStructure != null && i < 4) {
parentStructure = parentStructure.parent;
i++;
}
if (parentStructure != null) {
parentString = parentStructure.getClass().toString();
if (parentString.equals("class RubyParser$Function_if_elsif_statementContext")) {
parentStructure = parentStructure.parent.parent;
parentString = parentStructure.getClass().toString();
}
if (parentString.equals("class RubyParser$Function_if_statementContext")
|| parentString.equals("class RubyParser$Function_unless_statementContext")
|| parentString.equals("class RubyParser$Function_while_statementContext")
|| parentString.equals("class RubyParser$Function_for_statementContext")) {
nestedCounter++;
}
if (nestedCounter > 4) {
String message = "\nMal olor encontrado, estructura profundamente anidada, Linea: " + line
+ ", Columna: " + column + "\n"
+ "Se recomienda reestructurar la logica del codigo para evitar la complejidad de lectura.\n";
manager.AddCodeSmell(SMELL.DeeplyNestedCode, line, column, message);
break;
}
} else {
break;
}
}
}
@Override
public T visitIf_statement(RubyParser.If_statementContext ctx) {
conditionalsVariableCounter = 0;
conditionalsCounter = 0;
ParseTree comparison = ctx.getChild(1).getChild(0).getChild(1).getChild(0).getChild(0);
conditionalVariable = comparison.getChild(0).getChild(0).getChild(0).getChild(0).getChild(0).toString();
try{
String firstInLoop = ctx.parent.parent.parent.parent.parent.getClass().toString();
String inLoop = ctx.parent.parent.parent.parent.getClass().toString();
if(firstInLoop.equals("class RubyParser$While_statementContext") || firstInLoop.equals("class RubyParser$For_statementContext")){
ifstatementCounter = 0;
}else if(inLoop.equals("class RubyParser$While_statementContext") || inLoop.equals("class RubyParser$For_statementContext") || inLoop.equals("class RubyParser$If_statementContext")){
ifstatementCounter = 0;
}
}catch (Exception ex) {
}
if(ifstatementCounter < 4){
ifstatementCounter += 1;
}else if(ifstatementCounter == 4){
ifstatementCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, muchos condicionales en Linea: " + line + " Columna: " + column + " para la variable \'" + conditionalVariable + "\'. \n"
+ "Se recomienda separar las condiciones en bucles o metodos diferentes segun lo permita la logica del programa.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedStatement(ctx, line, column);
return visitChildren(ctx);
}
@Override
public T visitIf_elsif_statement(RubyParser.If_elsif_statementContext ctx) {
ParseTree comparison = ctx.getChild(1).getChild(0).getChild(1).getChild(0).getChild(0);
String auxComparison = comparison.getChild(0).getChild(0).getChild(0).getChild(0).getChild(0).toString();
if(auxComparison.equals(conditionalVariable)){
if(conditionalsVariableCounter < 4){
conditionalsVariableCounter += 1;
}else if(conditionalsVariableCounter == 4){
conditionalsVariableCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, condicionales muy largos en Linea: " + line + " Columna: " + column + " para la variable \'" + auxComparison + "\'.\n"
+ "Se recomienda la creacion de un objeto, donde pueda mapear las diferentes opciones de la variable, para asi ingresar a estas con mayor eficacia.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
}else if(conditionalsCounter < 4 ){
conditionalsCounter += 1;
}else if(conditionalsCounter == 4){
conditionalsCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, condicionales muy largos en Linea: " + line + " Columna: " + column + "\n"
+ "Se recomienda separar las condiciones en bucles o metodos diferentes segun lo permita la logica del programa.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
return visitChildren(ctx);
}
@Override
public T visitFunction_if_statement(RubyParser.Function_if_statementContext ctx) {
conditionalsVariableCounter = 0;
conditionalsCounter = 0;
ParseTree comparison = ctx.getChild(1).getChild(0).getChild(1).getChild(0).getChild(0);
conditionalVariable = comparison.getChild(0).getChild(0).getChild(0).getChild(0).getChild(0).toString();
String inLoop = ctx.parent.parent.parent.parent.getClass().toString();
if(inLoop.equals("class RubyParser$Function_while_statementContext") || inLoop.equals("class RubyParser$Function_for_statementContext") || inLoop.equals("class RubyParser$Function_if_statementContext")){
ifstatementCounter = 0;
}
if(ifstatementCounter < 4){
ifstatementCounter += 1;
}else if(ifstatementCounter == 4){
ifstatementCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, muchos condicionales en Linea: " + line + " Columna: " + column + " para la variable \'" + conditionalVariable + "\'. \n"
+ "Se recomienda separar las condiciones en bucles o metodos diferentes segun lo permita la logica del programa.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
nestedFunctionStatement(ctx, line, column);
return visitChildren(ctx);
}
@Override
public T visitFunction_if_elsif_statement(RubyParser.Function_if_elsif_statementContext ctx) {
ParseTree comparison = ctx.getChild(1).getChild(0).getChild(1).getChild(0).getChild(0);
String auxComparison = comparison.getChild(0).getChild(0).getChild(0).getChild(0).getChild(0).toString();
if(auxComparison.equals(conditionalVariable)){
if(conditionalsVariableCounter < 4){
conditionalsVariableCounter += 1;
}else if(conditionalsVariableCounter == 4){
conditionalsVariableCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, condicionales muy largos en Linea: " + line + " Columna: " + column + " para la variable \'" + auxComparison + "\'.\n"
+ "Se recomienda la creacion de un objeto, donde pueda mapear las diferentes opciones de la variable, para asi ingresar a estas con mayor eficacia.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
}else if(conditionalsCounter < 4 ){
conditionalsCounter += 1;
}else if(conditionalsCounter == 4){
conditionalsCounter += 1;
int line = ctx.start.getLine();
int column = ctx.start.getCharPositionInLine();
String message = "\nMal olor encontrado, condicionales muy largos en Linea: " + line + " Columna: " + column + "\n"
+ "Se recomienda separar las condiciones en bucles o metodos diferentes segun lo permita la logica del programa.\n";
manager.AddCodeSmell(SMELL.LongConditionals, line, column, message);
}
return visitChildren(ctx);
}
}
|
Leptomeningeal Carcinomatosis in Recurrent Non-Small Cell Lung Cancer: A Case Report and Review of Current Treatment Modalities Leptomeningeal carcinomatosis (LC) is an uncommon sequelae of non-small cell lung cancer. The treatment modalities for LC have historically been limited with an overall poor prognosis. This case report outlines a 76-year-old female who presented with recurrence of non-small cell lung cancer as LC. LC is difficult to treat, and options include radiation, chemotherapy (systemic and intrathecal), as well as targeted therapies. This case outlines a unique approach and reviews the current literature on the effectiveness of these options in non-small cell lung cancer. Introduction Leptomeningeal carcinomatosis (LC) affects five percent of patients with non-small cell lung cancer (NSCLC), and the incidence is increasing. This increased incidence is thought to be secondary to the prolonged survival of cancer patients. Nonetheless, patients with diagnosed LC have a poor prognosis, with an average survival of four to six weeks among untreated patients vs. four to six months among treated patients. In this report, we describe the case of a patient who presented with a recurrent adenocarcinoma of the lung that manifested as LC. Case Presentation We present a case of a 76-year-old Caucasian female with a medical history that included hypertension, diabetes, and stage IB (T2a N0 Mx) adenocarcinoma of the lung. At the time of cancer diagnosis, she underwent lobectomy of the affected right upper lobe. The surgical margins were negative on pathology, and she received adjuvant chemotherapy comprising pemetrexed plus carboplatin for three months. Her clinical course was good, and she reported no complaints during regular routine follow-ups. After a 1.5-year follow-up period, she noticed intermittent numbness on the left side of her face, along with insidious, increasingly worsening left-sided hearing loss and weekly headaches that she described as a sharp shooting pain, mostly located over her left scalp, of less than 60 minutes' duration. Two months after these symptoms started, she began to experience "dizzy spells" and intermittent double vision. Upon continuation of these symptoms, she was subsequently examined at the clinic. At the time of the examination, her Eastern Cooperative Oncology Group performance status was 1. Brain magnetic resonance imaging (MRI) revealed the development of several punctate foci with superficial enhancement bilaterally within the occipital regions, consistent with metastatic disease (leptomeningeal vs. cortical location). No significant mass or edema was observed ( Figure 1). Given her history of cancer, blood work, a diagnostic lumbar puncture, and a positron emission tomography (PET)/computed tomography (CT) scan were recommended to address the suspected recurrence. Following a lumbar puncture, her cerebrospinal fluid (CSF) and cytology results were negative ( Table 1). However, the PET/CT scan revealed bilateral hilar uptake (6.2 and 5.3), consistent with a recurrence of cancer. No other significant uptake was identified. Combined with the brain MRI findings, we made a diagnosis of the leptomeningeal spread of a recurrent adenocarcinoma of the lung. FIGURE 1: MRI of the brain showing multiple foci of superficial enhancement in the occipital region compatible with metastatic disease (red circles) The patient received one cycle of pemetrexed and bevacizumab for systemic disease. The tumor was positive for an exon 19 deletion, at which point we initiated a discussion regarding the use of epidermal growth factor receptor (EGFR)-tyrosine kinase inhibitor (TKI) therapy. At three months post initiation of the treatment, the patient was no longer complaining of headaches and double vision. Her left-sided facial numbness and hearing loss markedly improved as well. Discussion Decisions regarding optimal cancer treatment modalities for leptomeningeal carcinomatosis depend on the patient, presentation, risk status, and cancer type. Whole-brain radiation therapy (WBRT), intrathecal chemotherapy (IC), ventriculoperitoneal shunting, EGFR-TKIs and, more recently, anaplastic lymphoma kinase (ALK) inhibitor therapy have yielded favorable outcomes in selected patients. WBRT is a useful palliative therapy but does not appear to improve survival. In a retrospective study of LC secondary to NSCLC by Morris, et al., no significant difference in median survival was observed between 46 patients who received WBRT vs. 59 who did not (p = 0.84). In addition to the transient benefits, WBRT often causes unbearable side effects and carries a significant risk of cognitive decline, especially in patients older than 75 years of age. This modality is therefore often reserved for patients with a poor functional status (Karnofsky performance score <60), encephalopathy, multiple serious neurologic deficits, bulky central nervous system (CNS) disease, and extensive systemic disease with few treatment options. Although IC has been successfully used to treat LC in patients with lymphoma and breast cancer, no real consensus has been reached regarding IC for lung cancer. Traditionally, methotrexate and cytarabine are not considered active against lung cancers, and early studies of LC indicate little or no response to these agents. Topotecan, which is occasionally administered systemically to patients with NSCLC, does not yield any real benefits when compared to other IC therapies, as demonstrated by Groves, et al. in a multicenter study of LC secondary to all solid tumors. This finding was attributed to the spread of cancer between the dura and arachnoid, rather than contiguous with the cerebrospinal fluid (CSF). Notable side effects of IC include meningitis, seizures, and dizziness. Systemic chemotherapy (SC) has yielded mixed results as a result of limited leptomeningeal penetration. Most chemotherapeutic agents do not achieve good CNS penetration. The benefit of SC was primarily attributed to the treatment of systemic disease, rather than any effects on the course of leptomeningeal spread. However, pemetrexed and bevacizumab have been shown to be effective against CNS metastasis. Pemetrexed improves survival, both as an initial treatment option and in refractory cases with CNS involvement, as shown by Beerz, et al. in a study of 39 patients where 70% of the patients achieved stable disease or a partial response with a median survival of 10 months. Bevacizumab has traditionally been used to treat glioblastoma multiforme and was proven safe for NSCLC patients with CNS metastases. One study of six patients, conducted in 2010, found that three patients achieved stable disease and two achieved a partial response. Intracranial hemorrhage was observed in only one of three patients using bevacizumab and a concurrent anticoagulation agent. EGFR-mutant lung cancer was first described in 2004 and has since been characterized as a distinct subset of lung cancer. EGFR-TKIs such as gefitinib and erlotinib have yielded promising results against these cancers, and both agents have been shown to significantly increase survival. For example, one study of 17 patients with EGFR-mutated lung cancer with brain metastasis (exon 19 and 21 deletions) demonstrated that eight of nine patients who concurrently received high-dose erlotinib therapy and WBRT achieved a partial or complete response, compared with six of eight patients treated with erlotinib alone. Patients in that study had a median time to progression of 11.7 months. Osimertinib, an irreversible EGFR-TKI currently in phase I trials is a promising candidate; patients who reached a treatment duration of 12 weeks exhibited symptomatic and radiologic improvements, and it is generally well tolerated. More recently, ALK mutations have led to studies of crizotinib, an ALK-TKI that has yielded promising results. However, pre-existing lesions tended to progress and/or new intracranial lesions tended to develop during crizotinib therapy. A recent phase II trial found that alectinib, a second-generation ALK-TKI, was efficacious in crizotinib-resistant cases. Conclusions Although LC remains a complex disease with a grave prognosis, progress during the last decade has led to improved survival among affected patients. However, the treatment of LC requires an individualized approach. WBRT, systemic therapy, and targeted therapy have been evaluated as candidates for improving a patient's quality of life. The current research and treatments associated with genetic testing are promising, and the identification of mutations such as EGFR and ALK have facilitated the use of targeted therapies that have enhanced progression-free survival and overall survival. Although many of the treatments administered for LC remain palliative in nature, we have gained a better understanding of the obstacles that must be overcome to prolong survival and improve the quality of life of the affected patients. |
/// New empty Canvas with no background
pub fn new(width: u32, height: u32) -> Canvas {
Canvas {
width,
height,
background: None,
display_list: DisplayList::new(),
}
} |
Dual-Source Energy Cooperative Harvesting Circuit with Single Inductor In this paper, we proposed a dual-source energy cooperative harvesting (DCH) circuit with a single-inductor. Compared with single pile energy harvesting (SPH) circuit, the DCH circuit enables extraction of power from both a photovoltaic (PV) cell under indoor faint light and two thermoelectric generators (TEG) in one inductor charge and discharge cycle. The DCH operation mode also increases the conversion efficiency of PV energy harvesting with introducing thermoelectric energy. According to the simulation results, it can increase up to 27%, and the energy conversion rate of the two TEGs is 58% on average. |
De novo-developed T cells have compromised response to existing alloantigens: using Ld-specific transgenic 2C T cells as tracers in a mouse heart transplantation model. In this study, the phenotype, TCR signaling events, and function of T cells developed de novo during adulthood in the presence of extrathymic alloantigen were investigated. C57BL/6 mice(H-2b) were first transplanted heterotopically with BALB/c hearts (H-2d) and treated with rapamycin for 2 wk to create a tolerant status. Three weeks postoperation, the mice were whole body irradiated and transplanted with bone marrow cells from 2C mice, which are transgenic for TCR, and most of their T cells are Ld-specific CD8 cells. The 2C T cells developed de novo in the C57BL/6 mice were not able to reject the heart allograft. No clonal deletion, TCR down-regulation, or CD8 down-regulation was found in the tolerized 2C T cells. There was no characteristic phenotype of these cells in terms of CD25, ICAM-1, CD44, and MEL-14 expression. Early TCR signaling events such as intracellular calcium concentration flux, tyrosine phosphorylation, Lck and Fyn kinase activities, and Lck and Fyn protein levels in the tolerized 2C T cells were comparable to their normal counterparts, but the tolerized T cells were defective in IL-2 production and proliferation upon H-2d alloantigen stimulation in vitro. Exogenous IL-2 could not reverse the compromised proliferation. The results of this study indicate that during adulthood, the de novo-developed T cells become tolerant to extrathymic Ag without clonal deletion. These newly minted T cells are functionally defective although they are indistinguishable from normal T cells in phenotypes and in some early signaling events. |
Lower sales at gas stations and in the auto sector pulled down Canadian retail sales in June after they surged higher in May.
Statistics Canada said on Wednesday retail sales fell 0.2 per cent in June to $50.7-billion.
The retail sales figure followed a report on Tuesday that wholesale sales fell 0.8 per cent to $63.1-billion in June.
TD Bank senior economist Brian DePratto said he expects the reading to show the economy grew at a 3.5-per-cent annualized rate in the second quarter.
“Slotting today’s data into the bigger economic picture does little to alter the assessment of the economy’s performance in the second quarter,” Mr. DePratto said.
Sales at gasoline stations fell 2.3 per cent, in part because of lower prices at the pump in June, while sales at motor-vehicle and parts dealers dropped 0.7 per cent.
Food and beverage stores were up 0.9 per cent and a there was a 1.1-per-cent jump for building material and garden equipment and supplies dealers.
Health- and personal-care stores increased 0.6 per cent. |
Evaluation of the ISBA-TRIP continental hydrologic system over the Niger basin using in situ and satellite derived datasets. During the 1970s and 1980s, West Africa has faced extreme climate variations with extended drought conditions. Of particular importance is the Niger basin, since it traverses a large part of the Sahel and is thus a critical source of water for an ever-increasing local population in this semi arid region. However, the understanding of the hydrological processes over this basin is currently limited by the lack of spatially distributed surface water and discharge measurements. The purpose of this study is to evaluate the ability of the ISBA-TRIP continental hydrologic system to represent height changes. The basic analysis of in situ discharges confirms the impact of the inner delta area, known as a significant flooded area, on the discharge, characterized by a strong reduction of the streamflow after the delta compared to the streamflow before the delta. In the simulations, the flooding scheme leads to a non-negligible increase of evaporation over large flooded areas, which decreases the Niger river flow by 15 % to 50 % in the locations situated after the inner delta as a function of the input rainfall dataset used as forcing. This improves the simulation of the river discharge downstream of the delta, confirming the need for coupling the land surface scheme with the flood model. The deep aquifer reservoir improves Niger low flows and the recession law during the dry season. The comparison with 3 satellite products from the Gravity Recovery and Climated Experiment (GRACE) shows a non negligible contribution of the deeper soil layers to the total storage (34 % for groundwater and aquifer). The simulations also show a non negligible sensitivity of the simulations to rain uncertainties especially concerning the discharge. Finally, sensitivity tests show that a good parameterization of routing is required to optimize simulation errors. Indeed, the modification of certain key parameters which can be observed from space (notably river height and flooded zones height changes and extent) has an impact on the model dynamics, thus it is suggested that improving the model input parameters using future developments in remote sensing technologies such as the joint CNES-NASA satellite project SWOT (Surface Water Ocean Topography), which will provide water heights and extentat land surface with an unprecedented 50-100 m resolution and precision. Published by Copernicus Publications on behalf of the European Geosciences Union. Introduction Over the past 5 decades, West Africa has faced extreme climate variations with extended extreme drought conditions most recently during the 70s and 80s (Ali and Lebel, 2009). In this region, precipitation is closely linked with the monsoon, and better understanding and prediction are needed for improved water resource management. With an approximate length of 4180 km (2600 miles), the Niger river is the largest river in West Africa. It starts in the Guinea Highlands in southeastern Guinea and ends in Nigeria, discharging through a massive delta into the Gulf of Guinea within the Atlantic Ocean. It is a significant source of water and food for West Africa which, as an agricultural region, is highly dependent on the water availability and management practices. According to several studies (Coe, 1998;;), the seasonal and interannual cycle of the Niger river discharge is influenced by the hydrological processes, including overland processes (precipitation, evaporation, stream flows, floods, infiltration, etc.) and underground processes (groundwater and/or deep aquifer recharge). These processes are theorized to have feedbacks with the climate, rainfall variability and the carbon cycle (;Matthews, 2000;;Taylor, 2010;). Thus, a better parameterization of hydrological processes in atmospheric general circulation models (AGCMs) is necessary to obtain a better understanding of the feedbacks with the West African monsoon. This could then potentially translate into improved water resource management and climate prediction, at least at the regional scale (;Douville et al.,, 2003Douville et al.,, 2004;Lawrence and Slater, 2007;). Currently, the representation of the surface component of the hydrological cycle in AGCMs is done using continental hydrological systems (CHSs) composed of land surface models (LSMs), which provide the lower boundary conditions for heat, momentum and mass. Some AGCMs go further and include river routing models (RRMs) which are used to convert the runoff simulated by the LSMs into river discharge. RRMs transfer the continental freshwater into the oceans at specific locations (as source terms for the ocean model component). The evaluation of LSM-RRM systems is therefore a crucial task. This is generally done using offline simulations driven by atmospheric forcing which is as realistic as possible. Such forcing data are usually generated using a combination of atmospheric model reanalysis or short term forecasts combined with satellite-based products which are calibrated or bias corrected using gauge data (;;). These simulations are then evaluated with in situ river discharge data, which does not guarantee that the spatiotemporal distribution of water storage over and under the land surface is well represented. Over West Africa especially, measurement data are difficult to access due to geographical, geopolitical and economic issues. In this context, satellite remote sensing techniques (Alsdorf and Lettenmaier, 2003;;;) have become useful tools for hydrologic investigations. For instance, efforts have already been done to quantify the soil water content/groundwater using satellite data (;). Satellite altimetry has also been used for systematic monitoring of water levels in large rivers, lakes and floodplains and several studies have demonstrated the capability of using these sensors locally for estimating river discharge in large rivers, including the Amazon River (;;), the Ganges-Brahmaputra (a) or the Lake Chad basin (Coe and Birkett, 2004). Also, an advanced study of satellite altimetry by Enjolras and Rodriguez intended to derive water surface elevation of narrow river channels by using likelihood-estimation problem. In parallel, globally applicable remote sensing technique employing a suite of satellite observations has been developed and now provides estimates of the spatial and temporal dynamics of surface water extent at the global scale over the last 2 decades ((Prigent et al.,, 2007b). In the future, the joint CNES-NASA Surface Water Ocean Topography (SWOT, to be launched in 2020) mission will measure the surface water height with an unprecedented resolution of 50 m over the globe Rodriguez, 2009). This will enable a global scale near real time monitoring of the majority of the worlds rivers, lakes and reservoirs with spatial resolution of about one hectare (;). Such data should significantly accelerate the improvement of the representation of hydrology for global scale models. The need for a better representation of the global water budget has resulted in numerous implementations of river routing schemes into LSMs, and they vary widely in their complexity and degree of calibration. For water management applications on the watershed scale, highly parameterized, geographically specific models can be used to provide accurate estimates of streamflow and reservoir status (;Dai and Labadie, 2001;). For global scale applications, however, computationally efficient, easily parameterized, comparatively simple and physicallybased routing methodologies are preferable. In fact, land waters are supposed to play an important role in the atmosphere and ocean dynamics (;Dirmeyer, 2000Dirmeyer,, 2001Douville,, 2003Douville,, 2004;Koster et al.,, 2002Lawrence and Slater, 2007;). In AGCM applications, it is most important to close the water budget and get a good representation of the fluxes of water into the atmosphere and ocean. An early influential effort at large scale routing was done by Vorosmarty et al. who prepared a river routing network for the Amazon basin at a 0.5 resolution. Runoff produced by a water balance approach was routed through the network using a linear transfer model, with flow time calculated as a function of flow length, estimated subgrid scale sinuosity, and grid scale (). A similar linear transfer model was adopted by Miller et al. for application within the Goddard Institute for Space Studies (GISS) General Circulation Model (GCM) at the global scale. In their formulation, runoff produced by a GCM at 4 5 was routed to the ocean through a 2 2.5 network in which flow direction was determined by topography and velocity was a function of the slope. Because the scale of the implementation was quite coarse, slope based estimates of velocity were intentionally calculated to yield low values, providing an implicit correction for subgrid scale sinuosity and the time it would realistically take runoff to work its way through the river system. Sausen et al. implemented a linear routing scheme for the European Center Hamburg (ECHAM) GCM, with transport parameters semi-objectively calibrated to match observed flow in major gauged rivers. In a study of the Amazon River system, Costa and Foley adopted the velocity estimation procedure of Miller et al.. As a refinement, they estimated the sinuosity coefficient independently for each tributary within the Amazon basin, and they adjusted velocities as a function of stream order. Costa and Foley further divided runoff into surface and subsurface components and applied differential source retention times to each. Further variants on the Miller et al. approach include the global hydrological routing algorithm (HYDRA, Coe, 2000), which was implemented at a 5 resolution and included variability in surface waters, and made some adjustments to the Miller et al. method for calculating distributed velocities. Oki and Sud and Oki et al. continued this line of application through the development of the topographically corrected integrating pathways for routing models, TRIP (Total Runoff Integrating Pathways). Arora and Boer implemented a timeevolving velocity that depends on the amount of runoff generated in the GCM land grid, using Mannings equation to estimate flow velocities for a river chanel with a rectangular section. Decharme et al. (2008Decharme et al. (, 2011 used the TRIP approach to implement a flood routing scheme into the ISBA (Interaction Soil Biosphere Atmosphere)-TRIP CHS. The scheme accounts explicitly for the river routing, precipitation interception by the floodplains, the direct evaporation from the free water surface, and the possible re-infiltration into the soil in flooded areas. The regional and global evaluations of this scheme at a 1 by 1 spatial resolution emphasized the importance of floodplains in the continental part of the hydrologic cycle, through generally improved river discharges and a non-negligible increase of evapotranspiration. However, it was noticed that over some basins, including the Niger, the discharge was still overestimated. A possible identified cause was that these regions might overlie large aquifers that can be relatively uncoupled to the river. The difficulty of modelling the Niger basin and the current concerns about water resource management in West Africa make the improved understanding of this basin a scientific and socio-economic challenge. Moreover, its role in climate change and its potential feedback with atmosphere are crucial issues. It is then important for a LSM to be able to reproduce the key components of the water cycle and their evolution which will enable the detection of big anomalies in climatologic applications. The purpose of this study is to evaluate the performance of the ISBA-TRIP CHS model, including a flooding scheme and a new simple aquifer reservoir, over the Niger basin using comparisons with in situ measurements as well as recently available satellite derived data from 2002-2007. This period covers the core observation period of the African Monsoon Multidisciplinary Analyses (AMMA) project (). More precisely, the model is run in 4 different configurations (with/without flooding scheme/aquifers) to evaluate the impact of floods and aquifers on the Niger basin model configuration. In this study, we first examine the routing scheme and its ability to simulate discharge simulated by LSMs from the AMMA Land surface Model Intercomparison Project (ALMIP). For this, TRIP was run in offline mode (default made with no feedbacks with LSMs) with total runoff from 11 LSMs, including ISBA, as input data in order to explore the impact of routing alone on the river discharge. Secondly, we evaluate the ISBA-TRIP CHS model in fully coupled LSM-RRM mode in four different configurations using two rainfall datasets. The evaluation is done using a large variety of data consisting of gauging measurements for discharge and satellite-based products, such as water heights and flooded areas. The study also attempts to give quantitative estimates of the contribution of the different water budget components over the basin using satellite data. In Sect. 4, sensitivity tests were performed to determine the robustness of the model and where the greatest uncertainties exist with respect to model parameters. Finally, conclusions and perspectives are given in Sect. 5. Review of the ISBA-TRIP model ISBA is a state-of-the-art land surface model which calculates the time evolution of the surface energy and water budgets (Noilhan and Planton, 1989). In this paper, we use the 3-layer force-restore option (). It includes a comprehensive sub-grid hydrology to account for the heterogeneity of precipitation, topography and vegetation in each grid cell. A TOPMODEL approach (Beven and Kirkby, 1979) has been used to simulate a saturated fraction, f sat, where precipitation is entirely converted into surface runoff. Infiltration is computed via two sub-grid exponential distributions of rainfall intensity and soil maximum infiltration capacity. V. Pedinotti et al.: ISBA-TRIP continental hydrologic system over the Niger basin The TRIP RRM was developed by Oki and Sud at the University of Tokyo. It was first used at Mto-France to convert the model simulated runoff into river discharge using a global river channel network at a 1 resolution. The original TRIP model is only based on a single surface prognostic reservoir, S (kg), whose discharge is linearly related to the river mass using a uniform and constant flow velocity. In the ISBA-TRIP CHS, TRIP takes into account a simple groundwater reservoir, G (kg), which can be seen as a simple soil-water storage, and a variable stream flow velocity computed via the Mannings equation Appendix A). In addition, ISBA-TRIP includes a twoway flood scheme in which a flooded fraction, f flood, of the grid cell can be determinated (. The flood dynamics are described through the daily coupling between the ISBA land surface model and the TRIP river routing model, including a prognostic flood reservoir, F (kg). This reservoir fills when the river height exceeds the critical river bankfull height, h c (m) (Appendix B). The flood interacts with the soil hydrology through infiltration, I f (kg s −1 ), with the overlying atmosphere through precipitation interception P f (kg s −1 ), and through free water surface evaporation E f (kg s −1 ). These three terms are calculated by multiplying, respectively, the total infiltration, precipitation interception and water surface evaporation over the grid cell by the ratio of flooded area to the grid area. This results in a system of three prognostic equations: where Q sb (kg s −1 ) is the deep drainage from ISBA, Q G out (kg s −1 ) the groundwater outflow, Q S in (kg s −1 ) the sum of the surface runoff from ISBA within the grid cell with the water inflow from the upstream neighboring grid cells, and Q S out (kg s −1 ) is the simulated discharge, while Q F in and Q F out (kg s −1 ) represent the flood inflow and outflow, respectively. See Appendix A and B for more details. The global evaluation of the ISBA-TRIP CHS model at a 1 by 1 resolution suggested that the model may not take into account some important process such as the presence of large aquifers in certain regions. Also, by comparing the chemical composition of river water and groundwater, Fontes et al. demonstrated that significant aquifer recharge occurs in the Niger Inland Delta region, especially during summer flooding. For these reasons, a simple linear aquifer reservoir was added to the model. This reservoir was built on the example of the groundwater reservoir, G, but with a significantly longer time delay factor, aq (s). This results in a new system of four prognostic equations: where represents the fraction of deep drainage going into the groundwater reservoir while the rest of the drainage (1 − ) goes into the aquifer. Unlike the groundwater reservoir, we assume that the aquifer reservoir local feedbacks are negligible, but contribute to the flow at the mouth of the river. The aquifer outflow Q Aq out (kg s −1 ) can be written as follows: where Aq (s) is a constant and uniform time delay factor, which represents the characteristic timescale for the aquifer reservoir to drain laterally to the ocean (out of the basin). This simple approach is currently motivated mainly by the lack of data describing the water table, which would be required for a more detailed approach. Figure 1 illustrates the configuration of the ISBA-TRIP CHS model used in this study. TRIP specific parameters The baseline parameter values are presented in this section; the sensitivity of the model to these parameters will be investigated in a subsequent section. For the model evaluation, the time delay parameters for the groundwater and deep aquifer reservoirs are fixed to 30 days and 4 yr, respectively. The aquifer parameter is initially fixed at 3/4 (which implies that 1/4 of the drainage flows into the deep aquifer). The aquifer reservoir is defined equally in each pixel. The river width is an important parameter because it modulates both the river flow speed and the floodplain dynamics. It is computed over the entire TRIP network via an empirical mathematical formulation that describes a simple geomorphologic relationship between W and the mean annual discharge at each river cross section (Knighton, 1998;Arora and Boer, 1999;Moody and Troutman, 2002;: where Q 0,5 yr (m 3 s −1 ) is the annual mean discharge in each grid cell estimated using the global runoff database from Cogley. As discussed in Decharme et al., the coefficient can vary drastically from one basin to another (Knighton, 1998;Arora and Boer, 1999;Moody and Troutman, 2002). Decharme et al. proposed that varies according to climatic zone and fixed to 20 for monsoon (;Decharme, 2007) coupled system. The surface runoff calculated by the land surface model (ISBA) flows into the stream reservoir. The flood dynamic is described using a prognostic flood reservoir which fills when the river height exceeds a critical value and vice versa. The flood fraction is based on sub-grid topography. Finally, we add a linear aquifer reservoir so that the deep drainage is divided between the groundwater and the deep aquifer reservoirs. basins and to 13 for semi-arid and arid basins. As the Niger river flows through both such climate zones, two different values are used within the Niger basin: is 20 for the branch of the river going from the river mouth (5 N) to 12 N and is fixed to 10 for the remaining branch of the river. The spatial distribution of the river width is shown in Fig. 3a. The key parameter for the floodplain parameterization is h c, the critical river bankfull height (. In this study, as proposed by Decharme et al., it is computed according to the river width via a simple power function: The spatial distribution of h c is shown in Fig. 3b. However, owing to both the uncertaintities in this parameter and its impact on model results, sensitivity tests will be carried out using arbitrary h c ± 20 % (), leading to an increase or decrease in bankfull height up to 2 m. Finally, as in Decharme et al., the Manning friction factor n riv varied linearly and proportionally to W from 0.04 near the river mouth to 0.1 in the upstream grid cells (Fig. 3c): where n riv represents the Manning n factor of the grid cell, n max and n min the maximum and the minimum values of the Manning friction factor (respectively equal to 0.1 and 0.04), W min (m) the minimum river width value and W mouth (m) the width of the mouth in each basin of the TRIP network. Methodology In order to determine the impact of the flooding scheme on simulated discharges, the TRIP routing model is used in offline mode, uncoupled from a LSM and without floodplains. ALMIP I, which is a part of the AMMA project, was motivated by an interest in fundamental scientific issues and by the societal need for improved prediction of the West African Monsoon (WAM) and its impacts on West African nations (). As part of this project, ALMIP I focused on better understanding land-atmosphere and hydrological processes over Western Africa. LSMs were run offline with prescribed atmospheric forcing consisting in a combination of observations, satellite products and atmospheric model output data. All of the LSMs used the same computational grid at a 0.50 spatial resolution (see the domain on Fig. 2). The advantage of using ALMIP data is that each LSM can simulate a different runoff response, therefore we use an ensemble of inputs. In the current study, 11 simulations are used over the 2002-2007 period. TRIP is used to compute daily outputs of discharges along the river and water mass storage for each activated reservoir. In addition, the ISBA-TRIP CHS coupled model is used with and without the flooding scheme to quantify the impact of the scheme on the discharge and the surface energy budget. As the TRMM-3B42 rainfall (see next section for details) was used as forcing for the ALMIP experiment, the same forcing is used for the ISBA-TRIP CHS simulation with and without the flooding scheme. In the second part of this study, the deep aquifer reservoir is implemented into the ISBA-TRIP CHS model and deep drainage water is then distributed between deep soil layers and this aquifer reservoir The model is run using two rainfall datasets (see next section for details) to take rain uncertainty into account in a simple manner, leading to 8 different simulations. Comparison with both in situ and remote sensing data will allow us to evaluate the simulated surface processes, the impact of the inclusion of floodplains and aquifers, and the ability of the model to estimate the river discharge. Atmospheric forcing dataset to run ISBA-TRIP The atmospheric state variables are based on the European Centre for Medium Range Forecasts (ECMWF) ECMWF numerical weather prediction (NWP) model forecasts for the years 2002-2007. The forcing variables consist in the air temperature, specific humidity, wind components at 10 m, and the surface pressure, all at a 3 h time step. Because of the importance of having accurate incoming radiation fluxes and precipitation, and because of the potentially significant errors in these variables derived from NWP models over this region (e.g. see ), merged satellite products are used. The downwelling longwave and shortwave radiative fluxes are provided by the LAND-SAF project (). Two products are used for rainfall forcing. The TRMM 3B42 product () is used by default. However, several studies have shown that RFE2 () produces rainfall over the Sahel agrees better with observed values than the other available rainfall products (e.g. ), but it is at a time step which is not well adapted to land surface modelling (daily time step). Therefore, a second set of rainfall forcing data was created by disaggregating the daily RFE2 to a three hour timestep using the TRMM rainfall data. The monthly total RFE2 rainfall is well preserved using this simple downscaling method. This rain forcing is referred to as RFE-Hybrid (RFEH) herein. All of the simulations presented in this paper were done using the two datasets as forcing rainfall. Satellite-derived inundation estimates are used to evaluate the spatial distribution and the time evolution of the flooded areas. Two different products are used. The first product is based on data from the MODIS multispectral imaging system installed onboard the Terra and Aqua satellites. In this study, the surface reflectance product (MOD09GHK) is used, which is defined as the reflectance that would be measured at the land surface if there were no atmosphere. The spatial resolution is 500 m for the corresponding MODIS images and Hydrol. Earth Syst. Sci., 16, 1745-1773, 2012 www.hydrol-earth-syst-sci.net/16/1745/2012/ the coverage is global (). In order to detect open water and aquatic vegetation in arid and semi arid regions, a classification is performed using the fact that water surfaces do not reflect in the visible and near infra-red part of the spectrum. A threshold value has been estimated for reflectance in the MODIS frequency band-5 1230-1250 nm and for the NDVI index (Table 1) in order to delineate the shallow, sediment laden, and open water over the Inner Niger Delta, and also in order to distinguish between aquatic vegetation and vegetation on dry land. It has been assumed that small values of surface reflectance in band-5 characterize open water, independent of the NDVI index. When the surface reflectance in band-5 increases to the median value, depending on the NDVI index, it is assumed that there is a partial coverage of dry land by water, aquatic vegetation or vegetation on dry land. Finally, dry land is assumed when the NDVI is small and surface reflectance in band-5 is large. NDVI has been shown to be a robust index for monitoring temporal changes of the vegetation photosynthetic activity (;). In the arid environment, a high level of vegetation photosynthetic activity can only be sustained by the presence of surface water or groundwater discharge. If dense enough, the aquatic vegetation and hydrophilic plants can mask underlying water and should be included in the estimate of the total area of the floodwaters. The NDVI ranges from negative values (open water) to >0.5 for dense vegetation. The second product consists in global estimates of the monthly distribution of surface water extent at about 25 km sampling intervals. These data were generated from complementary multiple satellite observations, including passive (Special Sensor Microwave Imager) and active (ERS scatterometer) microwaves along with visible and near infrared imagery (advanced very high resolution radiometer; AVHRR). These estimates were first developed over 1993(, adjusted and extended over 1993-2004(b and recently recomputed for the entire period 1993-2007. This dataset has been extensively evaluated at the global scale (b) and at river basin scale, including the Niger river. In the present study, this dataset is aggregated to a 0.5 resolution and referred to as PP. Because PP does not distinguish between the diverse anthropogenic and/or natural water bodies, while the ISBA-TRIP output must be compared only with flooded areas, two additional datasets are used to hybridize PP in order to conserve information on flood inter-annual variability only: the Global Lakes and Wetland Database (GLWD; Lehner and Dll, 2004) and the Monthly Irrigated and Rainfed Crop Areas (MIRCA2000; ) database. The corresponding final product is named CPP in this study. The methodology is described in detail by Decharme et al., and so it is not detailed here. Water height changes over the basin are evaluated using the HYDROWEB hydrological database (http://www. legos.obs-mip.fr/en/equipes/gohs/resultats/i hydroweb). The water level time series are based on altimetry measurements from ENVISAT satellite. Seven sites were chosen for the evaluation, one upstream of the Niger inner delta, four downstream of the delta and two in the delta. The data are available at a regular 35 days time step (with occasional missing data) from November 2002 to the end of 2007. Total Water Storage (TWS) variations over the entire basin are evaluated using data from the Gravity Recovery and Climate Experiment (GRACE; ). GRACE provides monthly TWS variation estimates based on highly accurate maps of the Earth's gravity field at monthly intervals at a resolution of approximately 300-400-km ). The instrumentation and onboard instrument processing units are described in detail in Haines et al.. Here, we used 60 months (from January 2003 to December 2007, excluding June 2003 and January 2004 because products are not available) of the Release 04 data produced by the Center for Space Research (CSR at The University of Texas in Austin), the Release 4.1 data produced by the Jet Propulsion Laboratory (JPL), and the GeoForschungsZentrum (GFZ) Release 04 (more details concerning GRACE data are available online at http://grace. jpl.nasa.gov/data/). The combination of these data with those datasets described in the previous paragraphs above will allow us to evaluate the distribution of water in the different TRIP reservoirs and to have a first estimation/validation of the aquifer water storage variations. Improvement of simulated discharges due to river flooding The evaluation of the simulated river discharge is important for hydrological applications as well as for climate studies. Previous studies (Bonan, 1995;;Decharme et al.,, 2011;) have shown that the inclusion of a flooding scheme can impact the hydrological cycle by increasing the average evaporation and reducing the simulated discharge, which leads to a better estimation of the latter. Indeed, while an increasing number of LSMs used for large scale hydrological or GCM applications use river routing, most of these models do not represent floodplains. Flooded zones can be significant sources of evaporation and have a role of surface water storage, and their exclusion can result in an overestimation of the discharge for basins with significant annual flooding. To generalize this result, the TRIP RRM model was used in offline mode and without the flooding scheme (or aquifers) to convert simulated runoff and drainage from 11 LSMs into discharge. The LSMs considered for this study were part of the ALMIP I project. The Fig. 4 shows the mean daily discharges simulated by the ALMIP models (black line) for several locations along the river. The blue range is the difference between the minimum and the maximum value of discharges simulated by the models and the red line is the observed discharge. The corresponding statistics are given in In terms of observed discharge, there is a clear change of behaviour after the delta (Niamey, ansongo, Kandadji, Malanville, Lokoja) compared to upstream of the delta (Banankoro, Koulikoro, Ke Macina). Indeed, the discharge before the delta is almost twice higher than downstream of the delta. This reflects the significant impact of the inner delta on the discharge amplitude due to the floodplains. The first 3 discharges on the Fig. 4 are located before the inner delta area (Banankoro, Koulikoro and Ke Macina). For these three locations, the discharge is reasonnably represented by the ALMIP models. A bias in discharge is observed in 2005, 2006 and 2007 where the models simulate a smaller discharge compared to the previous years. This can be due to a bias in the rain forcing and will be discussed in Sect. 4.4. However, for the sites located downstream of the inner delta area (Niamey, Kandadji, Ansongo, Malanville and Lokoja) all of the ALMIP land surface models clearly overestimate the discharge leading to poor results compared to the three locations before the inner delta (see Table 2). In Malanville, the mean simulated discharge is around 5 times higher than that observed over this period. At the other sites (Niamey, Kandadji, Ansongo, and Lokoja), the mean simulated discharge is 2 to 2.8 times higher than observed. However, the variability of the discharge is generally well captured by the models as pointed out by the correlation scores (see Table 1). The green line represents the discharge simulated by the ISBA-TRIP CHS model with the flooding scheme activated. The results can be seperated into three classes. Fisrt, in Banankoro and Koulikoro (before the inner delta), the discharge and thus the scores are not significantly changed, probably because no floods occur in these places. Second, after the inner delta (Niamey, Ansongo, Kandadji, Malanville and Lokoja), discharge has decreased considerably (50 %) in Niamey, Kandadji, Ansongo and Malanville and 26 % in Lokoja. The root mean square error (rms) has decreased considerably compared to the simulation without flooding scheme (see Table 2). Indeed, part of the water in the floodplains evaporates, while part infiltrates into the flooded soil thereby reducing the stream reservoir water storage and discharge. The Nash-Sutcliffe coefficient or efficiency (eff) is also improved. Finally, in Ke Macina, the discharge is deteriorated by the addition of the flooding scheme. Among the sites before the inner delta, Ke Macina is the closest to the delta. It is likely that the model floods occur too soon upstream of the delta. This can be directly linked to a poor parametrisation or model parameter value (such as the river width) in this particular area. In the 3 locations upstream of the delta, there is a significant decrease of the simulated discharge in 2005, 2006 and 2007 which is not observed. This reduction of the discharge is observed for all the LSMs as well as for both configurations of ISBA-TRIP (with and without floods) and is more likely to be due to rainfall errors. This issue will be discussed in Sect. 4.4. The impact of the flooding scheme on the surface energy budget was also investigated where the total evaporation includes evaporation from the soil and flooded areas and transpiration. The flooding scheme contributes to an increase in the evaporation mainly over the inland delta and in the southern part of the basin (+280 % with TRMM rainfall and +200 % with RFEH), which are areas which generally experience significant floods (see Sect. 4.2.2). According to these results the floods occurring in the Niger inner delta region have an impact on the discharge, which is characterized by a decrease in its amplitude. The in situ discharge is among twice higher before the delta than after (Niamey, Ansongo, Kandadji and Malanville) and increases again when reaching the mouth of the river (Lokoja) where several tributaries join the Niger river. In addition, the flooding scheme allows a better simulation of the discharge after the delta, highlighting the importance of representing floodplains in LSMs. However, some model deficiencies remain, such as a bias of discharge in Ke Macina (possibly due to the previous cited reason), but also a bad reproduction of the recession flow during the dry season. In fact, the discharge remains relatively high during the dry season compared to the observations, which implies that there is too much water in the river. Several reasons for this can be identified, such as underestimated evaporation, an underestimation of the water in flooded areas or the neglect of aquifers. Anthropologic activities (dams, agriculture and water use for domestic consumption) are not explicitly accounted for and can also explain the bias between observed and simulated discharge, especially during the dry season when the population might need to extract more water from the river due to the lack of rain. In order to investigate the impact of aquifers on the discharge, a relatively simple and linear aquifer reservoir was added to the model (see Sect. 2.1 for details). The next section will focus on 4 different configurations of the ISBA-TRIP model and their respective impacts on some variables involved in the water cycle (discharge, water level changes, flooded fraction and total water storage). In order to take into account the rain uncertainties, two rainfall dataset are used as forcing (see Sect. 3.2 for details). Discharge A fourth reservoir was added to the ISBA-TRIP model to represent deep aquifer processes (Sect. 2). This reservoir is supplied by a fraction of the soil drainage and it does not supply water back the river. Indeed, the model simulates too much water in the river, which could be due to the presence of large aquifers (;). Two different rainfall datasets are also used as forcing: TRMM-3B42, already used in the ALMIP I project and in the previous section; and RFEH (see Sect. 3.2). Figure 5 shows the simulated discharge for the 4 configurations when the model is forced by TRMM-3B42 and RFEH, respectively. Table 3 presents the statistical scores for each configuration and the best scores are in bold type. The statistics are generally better when both floods and aquifers are represented in the model, especially downstream of the inner delta when the model is forced by TRMM-3B42 (see Table 3). Upstream of the delta, the Nash-Sutcliff coefficient and the RMS are generally better without aquifers with both forcing. With RFEH, the aquifers do not lead to a systematic improvement of the scores; however, they do not lead to a significant degradation either. Before the inner delta, the introduction of aquifers impacts the discharge mostly by reducing the monsoon peak, resulting in a deterioration of the rms score. This deterioration might be due to the "simplicity" of the aquifer reservoir parametrisation. Indeed, the aquifer is homogeneously defined over all the domain and can generate biases in regions where this aquifer either does not exist or has a minor role. Downstream of the inner delta, the aquifer impacts mostly the recession flow in two manners: it lenghtens the period of maximum discharge and reduces the low flows. This results in a deterioration of the period except in Lokoja where the period and the recession law are improved (Fig. 5a). When the model is forced by TRMM, the presence of aquifers considerably improves the recession during the dry season. The reduction of low flows is explained by the fact that the river empties faster after the rainy season which results in a more realistic discharge during the dry season. In terms of statistics, the scores (ratio, rmse, eff) are similar or slightly deteriorated, except in Malanville and Lokoja where they are improved. The correlation score, however, is improved at all of the sites. The sensitivity of the model to the choice of the time delay factor Aq and the fraction will be presented in Sect. 5. The scores are greatly improved by the addition of the flooding scheme for the locations situated downstream from the inner delta. The configuration with floods and aquifers generally leads to a good improvement of the scores in the sites located after the inner delta when the model is forced by the TRMM rainfal datasets. It is less obvious when the model is forced by RFEH rainfalls. However, when the aquifers deteriorate the scores, the deterioration is therefore small compared to the improvements (see Lokoja for example) and Hydrol. Earth Syst. Sci., 16, 1745-1773, 2012 www.hydrol-earth-syst-sci.net/16/1745/2012/ likely due to a bad parametrisation of aquifers in these regions. However, we recall that global applications do not aim at calibrating input parameters but at detecting the key processes which impact the evolution of the water cycle. Finally, we notice that all of the configurations poorly reproduce the discharge in 2005, 2006 and (to a lesser extent) in 2007. To investigate the role of the inner delta over evaporation, we looked at the relative difference of total evaporation over the delta between the simulations NOAQNF and NOAQF when the model is forced by TRMM (the results are similar withe RFEH). This is shown on Fig. 6 (blue solid line). Moreover, we added the relative difference of discharge between Niamey and Koulikoro (black solid line). Ke Macina is closer to the delta, but as many data are missing on this station, we looked at Koulikoro for the comparison. The dashed lines represent the absolute discharge in Niamey (green) and Koulikoro (red). During the monsoon, the observed discharge in Niamey is 40 to 80 % less than in Koulikoro, as noticed before. However, while the discharge in Koulikoro decreases really fast at the end of the rainy season, the discharge in Niamey remains at its maximum value and even increases a little, resulting in a second peak of discharge. While the second peak corresponds to the flood signal of the upper Niger basin delayed by the inner delta and has been observed for decades, the first peak is likely related to increased contribution of the tributaries located downstream from the delta that appeared in the recent 10 to 15 yr (). The transition between the monsoon and the post-monsoon regime is also visible if we look at the total evaporation simulated with and without the flooding scheme over the delta. Indeed, during the monsoon, there is hardly any difference of evaporation between the two simulations. But, during the post-monsoon, the model including floodplains simulates 30 % more evaporation than without the flooding scheme. The floodplains intensify the creation of evaporation over the delta and the time correlation with the second peak of discharge in Niamey suggest that they also have an impact on the recession flow by lengthening the period of maximum discharge. From 2005 to 2007, we notice a weaker evaporation over the delta than during previous years. This is coherent with the simulated discharge which is also very weak during these three years. Flooded areas The quantification of wetland extent is an important step towards a better representation of surface water dynamics. In this study, the time and spatial distribution of wetlands were evaluated over the inner delta region, which is a large inundated area, and over the whole basin. Figure 7a and b show the time evolution of the mean monthly flooded fraction (in %) averaged over the inner delta region and over the Niger basin, respectively, with and without aquifers, when the model is forced by TRMM. Figure 7c and d present the same results when the model is forced by RFEH. Only the two configurations with and without aquifers are shown as there is no simulated flooded fraction without flooding scheme. The blue range on the Fig. 7a and c represents the interval between the minimum and the maximum Modis derived (JFC) flooded fraction. Over the delta, the simulated flooded fraction is generally included in this range, although it tends to be on the low end when the model is forced by With both rainfall datasets, the presence of aquifers results in a reduction of flooded areas. But the impact of aquifers on the flooded fraction is more obvious for the simulation forced by TRMM rainfall. Indeed, the aquifers greatly improve the period of the floods. However, as shown in the figures, the CPP product is around 3 times higher than the modelled values over the basin and 10 times higher over the delta. This can be explained by the fact that the multi-satellite method can encounter some difficulties in accurately discriminating between very moist soils and standing open water, likely overestimating the actual fraction of inundations (a, b). Model deficiencies may also explain this bias. They can be related to routing deficiencies due to a bad parametrization, or to LSMs deficiencies in the calculation of floodplains evaporation and/or infiltration. Figure 8a and b show the time series of de-seasonalized anomalies (obtained by subtracting the 12-yr mean monthly value from individual months and then divided by the standard deviation) over the delta and over the basin, with and without aquifers, when the model is forced by TRMM. Figure 8c and d present the same results when the model is forced by RFEH. Over the delta, the Fig. 8a and c suggest that the model and the data are in good agreement in their time variations, with a better phasing between CPP and ISBA/TRIP. Over the basin, the CPP and model anomalies globally corroborate in phase and amplitude. Figure 9a show the monthly relative CPP flooded fraction averaged over the period 2002-2007. The monthly values have been divided by the maximum monthly value over 2002-2007 to determinate the main observed flooded areas. According to these observations, the main inundations occur between July and December in three principal regions: the inner delta in Mali, the Northern Nigeria and the Southern basin. Figure 9b shows the monthly spatial correlation between CPP and ISBA-TRIP when the model is forced by TRMM with floods and aquifers. Over the 3 principal inundated regions, the correlation is bigger than 0.4. This correlation does not change significantly according to the configuration of the model. The impact of general flooded areas over the evaporation was investigated. For this, only the grid cells with a flooded fraction higher than 15 % for the configuration NOAQF were considered. These cells represent 11 % of the basin if the model is forced by TRMM and 7 % of the basin if the model is forced by RFEH rainfalls. Figure 10 presents the averaged relative difference of total monthly evaporation simulated on these cells with (red) and without (blue) floodplains when the model is forced by TRMM. The evaporation on flooded areas is generally higher with the flooding scheme than without floods, especially during the monsoon and post-monsoon periods (20 to more than 50 % higher). The same observations are done for the simulations forced by RFEH. River height change To complete the evaluation of surface water dynamics, the river height time changes are compared to estimates from the HYDROWEB hydrological database developed by the LEGOS/GOHS which gives estimations of height changes at several points along the Niger river (Fig. 11). The seven locations used for comparison are noted in purple in Fig. 2. The bias error on the HYDROWEB water levels measures is estimated to be around 20 cm and the peaks of water height changes are within a range between 2 and 4 m. Since our interest is to be able to reproduce extreme events, this error is considered as reasonable for evaluating water height changes. The water level changes are represented in Fig. 11 for the four configurations when the model is forced by TRMM-3B42 rainfalls. The statistical scores are represented in Table 4 for the two rainfall datasets. The scores are generally improved by the presence of the flooding scheme. The addition of aquifers is more relevant when the model is forced by TRMM rainfall than RFEH. However, the scores are not greatly deteriorated by the presence of aquifers and considerably improved at the other sites. Without floodplains, the peaks of water height changes are overestimated. The model also overestimates the peaks of positive height changes which might be due to forcing anomalies (rain) or to model deficiencies. Indeed, the surface runoff stream function might be false in some areas and, if overestimated, results in an overestimation of water height variation during rain events. Also, uncertainties in the river bed slope can also result in an overestimation of the water height changes in the valleys. Moreover, Yamazaki et al. showed the limitation of the kinematic wave approach. Indeed, when kinematic wave equation is used for discharge calculation, the predictability of water surface elevation becomes bad in flat river basins with floodplains. However, no attempt is made to calibrate these parameters here, which would be a long and difficult process and which is not necessary for use in a GCM. Total terrestrial water storage For global applications, it is of interest to evaluate the time evolution of total water storage (TWS) in LSMs and the contribution of each component to the total storage. Figure 12 shows the comparison between 3 GRACE satellite products that estimate the total water storage (TWS) change globally at 1 resolution (the blue range in the lower panels represents the difference between the maximum and minimum monthly observation values) and the water storage change in all of the lower panels represents the mean water storage change of the Niger basin in all of the ISBA-TRIP reservoirs. The upper panels contain the water storage change in each reservoir (averaged over the basin) and the middle panels present the time evolution of the rain, drainage, runoff and evaporation over the basin. On the figure, only the results for the configuration AQ-F forced by TRMM are shown but Table 5 presents the correlations for each configuration. The comparison of ISBA-TRIP water storage change with the GRACE products over the Niger basin shows a very good correlation between the simulation and observations (more than 0.78) independantly of the configuration considered. The contributions of each reservoir to the total water storage change appear in the Table 6 for the configuration AQ-F. Although the uppermost soil layers (approximately 1 to a few meters) comprise most of the total water storage change over the basin (49 %), the contribution of the other reservoirs, such as the groundwater and the aquifer, are not negligible (17 % each). The contribution of flooded zones is less (4 %), but since their impact on evaporation is not negligible, they must be considered also. These results emphasize the need for considering all such reservoirs in LSMs in order to close the water budget. Generally, studies compare the GRACE water storage change to the water storage change in the hydrologic soil layers only, i.e. the first soil meters (green curve in the last panel). However, this approximation is likely less valid for regions with significant storage in flooded zones and deeper soil layers since the contribution of these two reservoirs to the total water storage are not necessarily negligible. Rainfall comparison A comparison of the rain datasets was done for every year. The averaged monthly ratio for every year (TRMM − RFEH)/(TRMM + RFEH), which represents the relative bias of one dataset to the other, has been calculated when the monthly sum (TRMM + RFEH) is bigger than 1mm/day. The most significant differencies are observed during the monsoon period and visible on Fig. 13 which presents the previous ratio for the months of July, August and September 2002-2007. The basin is delimitated by the black contour. Of note, significant differences are seen in the upper basin. From 2002 to 2004, the TRMM rainfall gives 20 to 80 % more rainfall than RFEH. This area is the main source region for the river and this difference probably explains the fact that the discharge simulated when the model is forced by TRMM is generally bigger than the discharge when the model is forced by RFEH, in particular, when there is no flooding scheme (twice as large as than RFEH). Moreover, the discharge simulated using TRMM rainfall has a longer recession period, probably due to the fact that there is more water going from the floodplains to the river after the flooding season. Figure 13 also shows that in 2005, 2006 and 2007, the relative bias between the two datasets is no longer obvious. Looking at the discharge we can see that during these 2 yr, the two rainfall products produce a very similar discharge amplitude, which results in a big reduction of the discharge amplitude simulated by TRMM in comparison with previous years. One possible cause for the reduction in input rainfall is that the gauge analysis source was changed from the GPCC Monitoring analysis to the Climate Prediction Center (CPC) Climate Analysis and Monitoring System (CAMS) in May, 2005. This change was made to take advantage of the timeliness in CAMS, but in retrospect it introduced a discontinuity in the error characteristics of the gauge analysis (G. J. Huffman, personal communication, 2012). Aquifer storage Over the Niger basin, it was noticed that the discharge was still overestimated. A possible identified cause was that these regions might overlie large aquifers that can be relatively uncoupled to the river. The available data concerning the aquifer storage are generally very localized, making the comparison with such a global scale model not relevant. Figure 14 shows the repartition of the aquifer recharge over the basin when the model is forced by RFEH. As expected, the aquifer recharge is very heterogeneous over the basin and follows rain patterns. There is also more aquifer recharge when the model is forced by TRMM than by RFEH. The aquifer reservoir is a relatively simple single-parameter linear reservoir and thus cannot represent high frequency fluctuations and distribution of the aquifer recharge. However, the analysis of total water storage have shown that its contribution to this total storage is not negligible and must be taken into account to reproduce the evolution of the water budget. Sensitivity tests Sensitivity tests were performed to determine the input parameters which have the most significant impact on the simulations. For global simulations, it is preferable that the model is not sensitive to too many parameters since tuning is a long and fastidious process at the global scale and spatially distributed global scale observational data is currently rather limited. Generally, physiographic relationships or the derivation of secondary parameters are preferred. The sensitivity of the ISBA-TRIP model to several key input parameters was investigated in this study in order to test their importance for a single regional scale basin. The Table 7 presents the key input parameters and the variations applied. The RFEH rainfall datasets were used for this study but the sensitivity tests using TRMM-3B42 rainfall datasets lead to the same tendancies with lesser extent when forced by RFEH. Both rainfall datasets were used for this study. However, as sensitivity tests generally lead to the same tendancies according to the rainfall dataset used as forcing, the different figures show the results for only one rain forcing. The impact of the river critical height, h c, on the simulated discharge was examined first. The river width W is kept at the default value. Increasing the critical height by 20 % leads to 5 % less flooded fraction over the inner delta and in the south of the basin. The evaporation also decreases over the flooded zones by 4 to 12 % (relative bias). Conversely, when decreasing the river height by 20 %, the flooded fraction is 5 % more over the same areas and the evaporation is increased by 14 to 24 %.The water height changes are also influenced by the critical height modification. Over the 7 virtual sites, an increase of h c globally increases the water height changes (+30 %), while a decrease of h c decreases the water height changes (−16 %). This can be explained by the fact that a river with a small h c will be flooded earlier and the water will spread more rapidly over the surrounding area, making the river water level less sensitive to rain events. In terms of inter-annual discharge, increasing or decreasing h c, respectively, increases or decreases the amplitude of the discharge by 5 to 15 % ( Fig. 15a and b). However, the annual variability of the discharge is not changed by a modification Hydrol. Earth Syst. Sci., 16, 1745-1773 of the critical height. In Niamey, Ansongo and Kandadji, the increase of h c leads to better statistical scores, which might suggest that the model overestimates the flood extend in these areas. In contrast, in Malanville, the scores are better when reducing the critical height, which suggests an underestimation of flooding at this site. In Lokoja however, the scores are better for the standard simulation. The impact of the river width, W, was also investigated. The critical height is not changed. Increasing W increases the amplitude of the discharge by around 6 %, while decreasing arbitrary W by 20 % decreases the discharge by 9 % (Fig. 15c and d). The water height changes vary differently according to the site. For example, for location 1 (see Fig. 2 for locations), a 20 % reduction of the river width reduces the mean water height changes by 35 % over the studied period. However, for locations 2, 4, 5, 6 and 7, the mean water height changes increase by 15 % to 28 % and there is no change for location 3. Indeed, water height changes depend on the topography which is modified with river width variations. The evaporation over the flooded areas is reduced by 3 to 9 % when W increases and increased by 4 to 16 % when W decreases. There are no significant impacts of W and h c on the total water storage change. Indeed, the storage of the different reservoirs and the amount of drainage are only slightly changed by the modification of these parameters. The mean value of the Manning coefficient, n riv, is around 0.075 and most of the pixels have values above 0.06 (91 out of 110). Since the Niger basin covers a large area, the soil properties are very heterogeneous all over the basin, making it necessary to use spatial distributions of soil parameters. Two new distributions of n riv were created and used to run the model: one distribution in which n riv coefficient is arbitrary reduced by 40 % and the other one in which it is increased by 20 %. In order to keep a value included in a reasonable range (between 0.03 and 0.1), all the values out of this range after modification are set equal to the closest value in this range. Figure 16a shows the behaviour of the discharge for each distribution of n riv. Increasing the Manning coefficient delays the response of the river to rain events. Indeed, small values of the coefficient speed the rise in water level and increase discharge amplitude. Also, the decrease of the discharge after the rainy season is faster when n riv is smaller. We can also notice that when n riv is bigger, the model is better able to dissociate the different rain events and two peaks of discharge appear. Flooded areas and evaporation are higher for large values of n riv as the water flows more slowly in the river bed, generating smaller river height changes, and flooded areas empty to the river more slowly. The evaporation increases by 14 % over main evaporation areas when n riv is 20 % higher and decreases by 18 % when it is 40 % smaller. Flooded areas are 15 % higher over the inner delta area when the Manning coefficient increases and 30 % smaller when it decreases. The increase of n riv also delays the water height changes, while small values of n riv decrease the peaks of river height changes. However, the impact of this coefficient on the water height change is more or less significant according to the observation sites, and for most of them this impact is not obvious. Finally, these modifications of n riv have no significant impact on the total water storage change. Thus, the current distribution used in the model is the most reasonable according to the scores. The model is quite sensitive to Manning coefficient, which seems coherent. Since this coefficient is used for the calculation of the flow speed, it will impact the discharge, but also the creation of floods. We also investigated the impact of increasing the groundwater reservoir's time delay factor on discharge, which extends the time of exchange between the groundwater reservoir and the river. Decharme et al. estimated that a time delay factor of the order of 30-60 days is generally Hydrol. Earth Syst. Sci., 16, 1745-1773, 2012 www.hydrol-earth-syst-sci.net/16/1745/2012/ Fig. 15. Impact of modifications of the critical height h c (a an b, up) and of the river width W (c and d, down) on the discharge (RFEH is used as forcing). The standard run stands for the simulation AQ-F with standard parameters (used for the simulations in previous sections).. 16. Impact of the Manning coefficient on the river discharges and on the river height changes (RFEH is used as forcing). The standard run stands for the simulation AQ-F with standard parameters (used for the simulations in previous sections). Fig suitable for global simulations. The increase of impacts the discharge on the descending phase by deteriorating the recession law. The scores are not significantly changed by the increase of. The total water storage is not highly dependant on either (the mean variation represents about 5 % of the mean water storage change). However, previous results emphasized that this parameter is important since it increases the residence time of water storage in the basin and allows a more realistic simulation of the discharge. Finally, we investigated the impact of parameter related to the aquifers. The reduction of the distribution factor (which means an increase of the water going to the aquifer) decreases the discharge amplitude before the inner delta and accelerate the recession of the discharges after the inner delta (see Fig. 17). The scores are not significantly changed by the value of when the model is forced by RFEH and experience only few changes when forced by TRMM-3B42. The aquifer reservoir time delay factor has also no impact on the discharge as aquifers are assumed to be too deep and too slow to impact directly the river discharge. Modifications of Aq have a negligible impact on the total water storage of the basin (the mean variation represents less than 10 % of the mean water storage change). However, the simulation is done over a relatively short period (5 yr) over which the aquifer time delay factor might be less significant. Over longer periods of time, as for example for climatic studies, it is possible that water storage by aquifers and water discharge to the ocean has a significant impact on the water budget, and thus Aq could be one key parameter contributing to the water balance.. 17. Impact of aquifer distribution factor on the discharge (TRMM is used as forcing). The standard run stands for the simulation AQ-F with standard parameters (used for the simulations in previous sections). Discussion The presented study investigated the impact of a linear flooding scheme and a simple aquifer storage on the simulation of the Niger basin. The flooding scheme decreases streamflow and increases evaporation over flooded areas. The impacts of floods on the water fluxes and storage terms are found to be coherent with other studies (;Decharme et al.,, 2011), thus we further emphasize the need for representing these processes in GCMs. Moreover, the observed data from the ABN have shown a clear change of behaviour of the discharge after the inner delta compared to the discharge before the delta (the discharge is almost divided by two), highlighting the role of the delta in the discharge reduction. This is coherent with the impact of the flooding scheme on the simulated discharge (divided by two after inclusion of floodplains in the model). However, it seems that in ISBA-TRIP, floods occur too early upstream of the delta, as suggested by the results in Ke Macina where the simulated discharge starts to be reduced while it is not the case in the observations. This might be due to poor values of the river parameters, such as river width in this particular region. The aquifer reservoir reduces the low flows and impacts the recession law, especially when the model is forced by TRMM. Moreover, its contribution to the total water budget is not negligible, and thus the consideration of aquifer processes is necessary to better simulate the evolution of the water cycle components. And indeed, several studies qualitatively suggest the presence of a deep water storage reservoir. The results also suggest that the coupled land surface and river routing model provides a reasonable estimation of inland hydrological processes of the Niger basin when the flood scheme is activated and a deep aquifer is considered. Several diverse datasets have been used for model evaluation such as river discharge, spatial and temporal evolution of flooded areas and water height changes measured by satellite. These data provide basic constraints for estimating the sub-surface water storage and dynamics, but also the shallow soil water content and the groundwater storage, which are linearly related to the surface water. The comparison with GRACE total water storage dataset also show a good ability of the model to reproduce the evolution of total inland water. Evapotranspiration is the remaining water budget component, but large scale observations are not available. The evaluation of this variable has been done within the context of several other studies. The ISBA surface temperature was evaluated using brightness temperatures from AMSR, which is related to the surface energy budget and near surface soil moisture; and the monthly sensible heat fluxes aggregated from local scale observations to the ALMIP grid square were evaluated for a semi-arid region within the Niger basin (net radiation was imposed, thus monthly Bowen ratios can be estimated; ). Finally, regional scale water budget studies were performed over West Africa using ISBA evaporation estimates (). All of the aforementioned studies imply that monthly scale evaporation estimates are reasonable. Moreover, Mahe et al., estimated the water losses of the inner delta of the Niger river and their evolution from 1924 to 1996. They estimated the total evapotranspiration from the delta to be about 800 mm yr −1 over the period 1924-1996, varying between 400 mm yr −1 and 1300 mm yr −1. The total evapotranspiration calculated by ISBA over the period 2002-2006 is 662 mm yr −1, which is contained in the range estimated by the previous study. They also related the water losses in the delta to the expansion of the floodplains, highlighting the importance of considering floods in a LSM. However, some model deficiencies remain and can be due to different factors: Hydrol. Earth Syst. Sci., 16, 1745-1773, 2012 www.hydrol-earth-syst-sci.net/16/1745/2012/ -A bias in the runoff and drainage calculated by the LSM. Further improvements could be obtained by calibrating the relevant parameters, but such a procedure is not relevant to GCM modelling. -An over-simplified routing model. Indeed, global scale routing models are generally parametrized by geomorphologic relationships, which is not always realistic. Spatially distributed basin-specific parameters would undoubtably improve the simulations. -Rain biases can also be the origine of model biases. In this study, we have seen that the generally accepted two best rainfall datasets over this region give significantly different results. Sensitivity tests have shown that a good routing model is required to optimize the simulation errors. For example, Fig. 15 shows that while increasing h c in Niamey, Kandadji and Ansongo would improve the simulation score, it would have the opposite effect in Malanville. Thus, improvements in remote sensing technologies should help to create maps of spatial and temporal evolution of inland waters (river width, flooded areas expansion, river height) and thus compensate the lack of in situ measurements. These data will then either be used as input data and replace geomorphologic relations used currently to describe these parameters, or they will be assimilated into the model to correct simulation errors. In GCMs, the input parameters, such as the Manning coefficient, critical height, river width and depth, are defined by empirical relationships which might not give the best results for all modelled basins, since the main objective of such parameterizations is to give the best overall global results. However, for regional or basin scale studies, these relationships lead to non-negligible known errors which could be reduced using satellite data. Indeed, satellite data could be used to spatially distribute parameters by basin and then could contribute to the development of a global database describing the major river characteristics, at least the stream width and the river bankfull height. This is an important step if GCM climate scenario output is to be used for water resource management at the regional scale. Input rainfall uncertainties can also be the cause of biases in the simulations, as shown in Sect. 4.1 where the model, forced by two different rain datasets, gives significantly different results. In this paper, only the TRMM-3B42 and TRMM-RFE2-hybrid rain dataset, RFEH, were used for the bulk of the validation. However, other rain datasets were used as input rainfall to run the ISBA-TRIP model, such as PERSIANN (Precipitation Estimation from Remotely Sensed Information using Artificial Neural Networks, http://chrs.web.uci.edu/persiann/) from the Center for hydrometeorology and remote sensing (CHRS) and CMORPH (CPC MORPHing technique, http://www.cpc.ncep.noaa.gov/ products/janowiak/cmorph.shtml) from the United States National Oceanic and Atmospheric Administration (NOAA). The results of the simulations using both of these rainfall datasets showed a significant overestimation of the discharge (about 5 times higher than with the RFEH forcing for both CMORPH and PERSIANN forcing, and twice higher for the TRMM forcing) at all discharge observation sites, even with the representation of floods and aquifers. This is consistent with the work of Pierre et al. who showed that CMORPH dataset clearly overestimates precipitations over the Sahel. Improved spatially distributed remotely sensed datasets which are more precise for hydrological applications are thus needed. Conclusions and perspectives This study describes the evaluation of the ISBA-TRIP Continental Hydrologic System (CHS) over the Niger river basin, using a prognostic flooding scheme and a linear deep aquifer reservoir. The simulations are done at a 0.5 by 0.5 resolution over the 2002-2007 period. The flood scheme accounts explicitly for the precipitation interception by the floodplains, the direct evaporation from the free water surface and the possible re-infiltration into the soil. The deep aquifer reservoir has no feedback with the river locally and drains water to the river mouth over a comparatively long timescale. The model has been developed for use in climate model applications (coupled to the ARPEGE RCM and GCM at Mto France) where the representation of processes such as evaporation from the continental surface and freshwater fluxes to the ocean are fundamental to the global water budget. These applications especially aim at detecting strong anomalies in the future climate, and for this reason we focused on evaluating the ability of the model to reproduce inland waters anomalies. The model was run in four different configurations to evaluate the separated impacts of the flooding scheme and the aquifer reservoir on the modelisation of the Niger basin. Moreover, two different rainfall were used as forcing in order to take into account the impact of rain uncertainties on the simulations. The evaluation is done using a large variety of data, consisting of gauging measurements and satellite-derived products. This allows the spatially distributed evaluation of the separation of the water storage into its different components and it gives a first estimate of aquifer dynamics over the basin. Considering the relative simplicity of the routing channel, the model provides a good estimation of the surface water dynamics: the spatio-temporal variability of the flooded areas, the river discharge and the river water height changes. The flooding scheme leads to an increase of evaporation and reduction of discharge after the inner delta area, testifying for the need to incorporate flood representations into land surface models (LSMs). The behaviour of the observed discharge also suggest an impact of the inner delta, known as an important flooded area, on the discharge. The aquifer reservoir impacts the representation of both low flows and the recession law during the dry season. Note that recently an option to include a detailed representation of aquifers has been introduced into the ISBA-TRIP CHS (). However, the quality of the input and observational data required to evaluate the scheme is currently lacking over the Niger basin. For this reason we have opted for a more simple linear reservoir approach in this study (consistent with the other TRIP reservoirs). However, the possible link between river height and aquifer storage will be explored using remotely sensed data in future work. The comparisons with GRACE total water storage change (GFZ, CSR and JPL) were used to evaluate the ability of the model to reproduce the evolution of the total inland water, and good overall agreement of total water stored with GRACE was found. Finally, the use of two different rainfall datasets as forcing has shown the sensitivity of the model to rain uncertainties. Despite the fact that the main features of the river dynamics and water budget terms are represented reasonably well by this relatively simple system, some simulation deficiencies remain. For example, the model has a difficulty in terms of reproducing the discharge during the low flow period or the two annual peaks of discharge (only one peak is reproduced by the model). These deficiencies might be due to precipitation uncertainties or LSM errors (in terms of subgrid runoff, evaporation and soil water transfer physics, input LSM physiographic parameters such as vegetation indicies, soil texture and depth, etc.): but the focus in this study is mainly on river, floodplain and aquifer dynamics. Precipitation uncertainties were briefly touched upon by using different input forcings, but few currently available rainfall products are good enough to be useful for hydrological modelling studies over this region (notably owing to large biases). Regarding the RRM errors, Decharme et al. have discussed the questionable aspects of the flooding scheme such as the empirical computation of the river width, the choice of the river bankfull height, the simplified geometry of river stream and flood reservoirs, or the use of the Manning's formula for computing the mass transfer between them. Moreover, sensitivity tests have shown the non-negligible impact of some of the parameter values on simulations. However, the model has been developed for global climate applications at low resolutions and must be as robust as possible to be applicable at global scale, and therefore has a limited number of tunable parameters. However, upcoming advances in remote sensing technologies should permit an optimization of the spatially distributed parameters of the model. In fact, forcing uncertainties, especially rain uncertainties, represent a limitation for model tuning at this scale. Moreover, they can compensate the non-representation of lakes and large ponds. A global database describing the basin characteristics such as the river width and the bankfull height would be of great interest for improving the model simulations. This likely depends heavily on advances in remote sensing technologies, which should help to get maps of spatial and temporal evolution of inland waters (river width, flooded areas expansion, river height, etc.) and thus compensate for the lack of in situ measurements at the global scale. The joint CNES-NASA satellite project SWOT will provide water heights and extent at land surface with an unprecedented 50-100 m resolution and precision (centimetric accuracy when averaged over areas of 1 km; ). These data will then either be used as input data and replace geomorphologic relations used currently to describe surface parameters, or they will be assimilated into the model to correct model errors. Indeed, a small number of recent studies have begun to quantify the benefits of such a mission for land surface hydrology. For this purpose, synthetic water elevation data were created using the JPL Instrument Simulator (Rodriguez and Moller, 2004) and assimilated into CHS systems ). In all of these studies, the assimilation of synthetically generated SWOT measurements helped to reduce model errors and improved river discharge simulation. Other studies have used SWOT simulated data as inputs in algorithms to obtain estimates of river depth and discharge (;;Biancamaria et al.,, 2011. These preliminary results are promising and show the current need for such a mission, and the potential for improving the representation of hydrological processes in current models. Consequently, the next step of this work will consist of integrating synthetic SWOT data into a suitable assimilation system to determine their impact on the simulated discharge using the ISBA-TRIP CHS described herein. The TRIP river discharge and groundwater outflow The river discharge simulated by TRIP (Eq. 1) is computed using a streamflow variable velocity, v (m s −1 ), and via the Manning's formula: where L (m) is the river length that takes into account a meandering ratio of 1.4 as proposed by Oki and Sud, s (m m −1 ) is the downstream river height loss per unit length approximated as the river bed slope, R (m) the hydraulic radius, (m −3 s −1 ) a constant equal to 1, and n riv the dimensionless Manning friction factor which varies from the upstream part to the mouth of each basin. The river bed slope is indeed a critical parameter to compute velocity via the Manning formula. The STN-30p Digital Elevation Model (DEM) provided at 0.5 C by 0.5 C resolution by the ISLSCP2 database (http://www.gewex.org/islscpdata. htm) has been used. The STN-30p DEM was heavily edited to represent the actual elevation along the river network on a global scale, based on the aggregated HYDRO1 K DEM at 1 km resolution. Further adjustments were made to eliminate some of the unrealistic rapid slope changes in the STN-30p Hydrol. Earth Syst. Sci., 16, 1745-1773, 2012 www.hydrol-earth-syst-sci.net/16/1745/2012/ DEM along the global river network. Yamazaki et al., included a realistic sub-grid-scale topography for a more reasonnable representation of the river height loss. This inclusion could be considered as a possible improvement of the representation of the river bed slope in the TRIP model. The hydraulic radius is related to the stream water depth, h s (m), calculated from the stream water mass, S (kg), assuming a rectangular river cross-section (Arora and Boer, 1999): where W (kg m −3 ) is the water density, and W (m) the bankfull river width. The TRIP groundwater outflow (Eq. 1) is computed using the following simple linear relationship proposed by Arora and Boer : where (s) is an uniform and constant time delay factor of the groundwater reservoir which is fixed to 30 days. This groundwater reservoir does not represent the groundwater dynamics but only delays the groundwater flow contribution to the surface river reservoir within a particular grid cell: the deep drainage is fed into the surface reservoir with a time delay factor of. More details can be found in. Appendix B The ISBA-TRIP flood model As shown in Fig. 1, a simplified rectangular geometry is assumed in TRIP to represent the cross section between the floodplain and the river reservoirs in each grid cell. River flooding arises when the water height of the stream reservoir is higher than the critical bankfull height, h c (m), and the flood outflow and inflow from this reservoir (Eq. 1) are given by: where W f (m) is the floodplain width, and M f (kg) the potential inflow (positive M f ) or outflow (negative M f ). This outflow assumes an equilibrium state between the stream and the floodplain water depth: where L f (m) and h f (m) are the length along the river and the depth of the floodplains, h s (m) the water height of the stream reservoir, and h c (m) the critical bankfull river height. W + W f represents the distance covered by M f from the stream to the floodplains or conversely. v in and v out (m s −1 ) are the flood inflow and outflow velocities, respectively, computed using the Manning's formula: v in,out = s 1/2 in,out where n f is the Manning roughness coefficient for the floodplains that varies according to the vegetation type, while s in,out (m m −1 ) and R in,out (m) are the inflow (or outflow) slope and hydraulic radius, respectively, at the interface between the floodplain and the river stream. The flood inflow and outflow velocities computed using the Manning's formula require the hydrological slope between the floodplain and the river stream: They also require the hydraulic radius assumed rectangular and calculated as follows: where W f (m), L f (m) and h f (m) are the width, the length and the depth (respectively) of the floodplains, h s (m) the water height of the stream reservoir, h c (m) the critical height of the river bed, and W (m) the stream river width. The hf is calculated in each grid-cell with the help of the actual distribution of the local height, h i (m), determined at a 1 km by 1 km resolution. The assumption is that each pixel, i, represents a subbasin into a given grid-cell that can be potentially flooded. Each subbasin has a triangular form and is associated with a fraction, f i, of the grid cell area, A. The h i is computed using the local slope, i ( ) and flow direction data given by the HYDRO1 K dataset (Verdin and Greenlee, 1996): where l (m) is the characteristic length of one pixel equal to 1000 m, and i is equal to 1 if the local flow direction is north, south, east, or west, and to 2 elsewhere. Therefore, for each hi a potential mass of flood, V (h i ) (kg), can be simply calculated using a discrete equation: The sub-grid distributions of the flooded fraction and the flood depth allow to determine f flood, and h f at each time step and in each grid-cell via the comparison between the water mass into the floodplain reservoir, F, computed by TRIP (Eq. 4) and the sub-grid distribution of this potential mass V (h i ): When f flood is known within the grid cell, W f and L f are simply calculated as follow: where r is the meandering ratio fixed to 1.4 as recommended by Oki and Sud. Finally, the precipitation interception by the floodplains, P f, the re-infiltration, I f, and the direct free water surface evaporation, E f, (Eq. 1) are estimated by ISBA. I f occurs if the flooded fraction, f flood, calculated according to the subgrid topography, is superior to the soil saturated fraction, f sat, and depends on the soil maximum infiltration capacity. In other words, the floodplains cannot infiltrate the fraction of the grid-cell for which the soil is saturated. To a first approximation, it allows to simply represent the fact that the actual floodplains evolve according to the presence of shallow aquifer and water table depth variations. More details can be found in Decharme et al.. |
// MarshalJSON marshals the struct by converting it to a map
func (myOperatingSystemType OperatingSystemType) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
"ID": myOperatingSystemType.IDvar,
"Match": myOperatingSystemType.Matchvar,
"Priority": myOperatingSystemType.Priorityvar,
"Type": myOperatingSystemType.Typevar,
})
} |
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import os, sys
import time
import math
import pickle
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import data
from nucleus_sampling import top_k_top_p_filtering
parser = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
parser.add_argument('--enable_unk', action='store_true',
help='allow generation of <unk> tokens')
parser.add_argument('--top_k', type=int, default=0,
help='top-k sampling')
parser.add_argument('--top_p', type=float, default=0.0,
help='top-p (nucleus) sampling')
# LM robustness
parser.add_argument('--ndistilstudents', type=int, default=0,
help='number state distillation students per layer')
parser.add_argument('--distillossw', type=float, default=1.0,
help='student distillation loss weight')
parser.add_argument('--no_average_ensemble', action='store_true',
help='disable average ensemble, use only master')
# LM robusness (RND)
parser.add_argument('--rnd_scaling_coefficient', type=float, default=-1.0,
help='scaling coefficient in RND')
parser.add_argument('--rnd_enable', action='store_true',
help='enable RND model')
parser.add_argument('--rnd_pre_apply', action='store_true',
help='apply the OOD state scaling before each RNN update rather than after')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
vocab_path = os.path.join(os.path.dirname(args.checkpoint), 'vocab.pickle')
with open(vocab_path, 'rb') as vocab_file:
vocab = pickle.load(vocab_file)
ntokens = len(vocab)
with open(args.checkpoint, 'rb') as f:
model = torch.load(f)
model.eval()
model.rnd_pre_apply = args.rnd_pre_apply
if args.rnd_enable:
for rnd_model in model.rnd_models:
rnd_model.scaling_coefficient = torch.scalar_tensor(args.rnd_scaling_coefficient)
if args.cuda:
model.cuda()
parallel_model = nn.DataParallel(model, dim=1)
else:
model.cpu()
hidden = model.init_hidden(1)
input = Variable(torch.rand(1, 1).mul(ntokens).long())
if args.cuda:
input.data = input.data.cuda()
with open(args.outf, 'w') as outf:
with torch.no_grad():
ninf = torch.scalar_tensor(-np.float('Inf'))
for i in range(args.words):
output, hidden = parallel_model(*hidden, input=input, return_prob=False,
average_ensemble=not args.no_average_ensemble,
enable_rnd_tune=args.rnd_enable)
word_weights_logits = output.squeeze().data.div(args.temperature)
if not args.enable_unk:
word_weights_logits[0] = ninf
word_weights_logits = top_k_top_p_filtering(word_weights_logits, args.top_k, args.top_p)
word_weights = word_weights_logits.exp()
word_weights = word_weights.cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = vocab.idx2word[word_idx]
if word == '<eos>':
outf.write('\n')
else:
outf.write(word + ' ')
#outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.servicefabric.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for RollingUpgradeMode. */
public final class RollingUpgradeMode extends ExpandableStringEnum<RollingUpgradeMode> {
/** Static value Invalid for RollingUpgradeMode. */
public static final RollingUpgradeMode INVALID = fromString("Invalid");
/** Static value UnmonitoredAuto for RollingUpgradeMode. */
public static final RollingUpgradeMode UNMONITORED_AUTO = fromString("UnmonitoredAuto");
/** Static value UnmonitoredManual for RollingUpgradeMode. */
public static final RollingUpgradeMode UNMONITORED_MANUAL = fromString("UnmonitoredManual");
/** Static value Monitored for RollingUpgradeMode. */
public static final RollingUpgradeMode MONITORED = fromString("Monitored");
/**
* Creates or finds a RollingUpgradeMode from its string representation.
*
* @param name a name to look for.
* @return the corresponding RollingUpgradeMode.
*/
@JsonCreator
public static RollingUpgradeMode fromString(String name) {
return fromString(name, RollingUpgradeMode.class);
}
/** @return known RollingUpgradeMode values. */
public static Collection<RollingUpgradeMode> values() {
return values(RollingUpgradeMode.class);
}
}
|
<filename>examples/cluster/plot_cluster_iris.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
# Code source: <NAME>
# Modified for documentation by <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = [
("k_means_iris_8", KMeans(n_clusters=8)),
("k_means_iris_3", KMeans(n_clusters=3)),
("k_means_iris_bad_init", KMeans(n_clusters=3, n_init=1, init="random")),
]
fignum = 1
titles = ["8 clusters", "3 clusters", "3 clusters, bad initialization"]
for name, est in estimators:
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(float), edgecolor="k")
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
for name, label in [("Setosa", 0), ("Versicolour", 1), ("Virginica", 2)]:
ax.text3D(
X[y == label, 3].mean(),
X[y == label, 0].mean(),
X[y == label, 2].mean() + 2,
name,
horizontalalignment="center",
bbox=dict(alpha=0.2, edgecolor="w", facecolor="w"),
)
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor="k")
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
ax.set_title("Ground Truth")
ax.dist = 12
fig.show()
|
THE NEURODEVELOPMENTAL (ND) OUTCOME OF THE EXTREMELY LOW BIRTHWEIGHT INFANT(ELBW) REMAINS PRECARIOUS IN THE 1990's. 1633 With improved survival of the ELBW (BW≤1000g, GA≤29wks) there is a need to learn more about these infant's health, growth and ND outcome. 121 ELBW born between Jan 1992 and Dec 1994, discharged from the NICU were enrolled for follow-up; 15 infants (12%) failed the appointments and 1 (0.8%) died (SIDS). According to protocol our study group (N=105) was monitored neurologically, developmentally, and for growth, health and ROP. At 6 mos. they were evaluated with the Gesell Schedule and at 1 yr with the Bayley Scale of Infant Development. All ages were corrected for prematurity. Although this is an ongoing project we are reporting our first year findings. The mean BW was 782g (sd 156, range 440-1000); 30% were SGA; mean GA was 27 wks (sd 2.1, range 22-33); 53% female; 86% Hispanic. For analysis the study infants were divided in three groups according to BW. |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.example.camel.transport;
import java.net.MalformedURLException;
import org.apache.camel.test.AvailablePortFinder;
import org.apache.hello_world_soap_http.Greeter;
import org.apache.hello_world_soap_http.PingMeFault;
import org.apache.hello_world_soap_http.types.FaultDetail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
class CamelTransportClientServerTest {
static AbstractApplicationContext context;
static int port;
@BeforeAll
public static void startUpServer() throws Exception {
if (!"true".equalsIgnoreCase(System.getProperty("skipStartingCamelContext"))) {
port = AvailablePortFinder.getNextAvailable();
System.setProperty("port", String.valueOf(port));
context = new ClassPathXmlApplicationContext(new String[]{"/META-INF/spring/CamelTransportSpringConfig.xml"});
} else {
System.out.println("Skipping starting CamelContext as system property skipStartingCamelContext is set to be true.");
}
}
@AfterAll
public static void shutDownServer() {
if (context != null) {
context.stop();
}
}
@Test
void testClientInvocation() throws MalformedURLException {
Client client = new Client("http://localhost:" + port + "/GreeterContext/GreeterPort");
Greeter port = client.getProxy();
assertNotNull(port, "The proxy should not be null");
String resp = port.sayHi();
assertEquals("Bonjour from EndpointA", resp, "Get a wrong response ");
resp = port.sayHi();
assertEquals("Bonjour from EndpointB", resp, "Get a wrong response ");
resp = port.greetMe("Mike");
assertEquals("Hello Mike from EndpointA", resp, "Get a wrong response ");
resp = port.greetMe("James");
assertEquals("Hello James from EndpointB", resp, "Get a wrong response ");
port.greetMeOneWay(System.getProperty("user.name"));
try {
port.pingMe("hello");
fail("exception expected but none thrown");
} catch (PingMeFault ex) {
assertEquals("PingMeFault raised by server EndpointB", ex.getMessage(), "Wrong exception message received");
FaultDetail detail = ex.getFaultInfo();
assertEquals(2, detail.getMajor(), "Wrong FaultDetail major:");
assertEquals(1, detail.getMinor(), "Wrong FaultDetail minor:");
}
}
}
|
Yields on 10-year Portuguese bonds jumped to 6.9pc, replicating the pattern seen in Greece and Ireland just before they capitulated and turned to the EU and the International Monetary Fund.
Spreads on 10-year Spanish bonds rose to a post-EMU record of 233 basis points over Bunds, pushing the yield to 4.87pc. Spain's central bank governor, Miguel Angel Fenrandez Ordonez, said the contagion had spread rapidly to the eurozone periphery and "made itself felt" in the Spanish debt markets. He called on Madrid to accelerate fiscal reforms to persuade the markets the country really means to put its house in order.
"Spain is a bit too big to be bailed out," said Antonia Garcia Pascual, of Barclays Capital. "The size of rescue required would use up all the funds available and then you have Italy with contagion as well."
Saxo Bank said the EU's €440bn (£370bn) bail-out fund would lose its AAA credit rating if Spain needed serious help. Germany and France would have to put up fresh money, creating a political storm.
German Chancellor Angela Merkel admitted on Tuesday that the eurozone was "facing an exceptionally serious situation". She brushed aside criticism that German insistence on bondholder "haircuts" from 2013 was fuelling the crisis. "I will not let up on this because the primacy of politics over markets must be enforced," she said.
Dutch finance minister Jan Kees de Jager sent a further chill through markets, saying "holders of subordinated bonds in Irish banks will have to bleed" under the Irish rescue. The comment touched a neuralgic nerve, heightening fears that investors may be treated harshly under the bail-out terms for any other country needing a rescue.
Bank of Ireland shares crashed 23pc and Allied Irish Bank's fell 19pc on fears that shareholders will be wiped out. Ominously, there was a sharp sell-off of Spain's two top banks, with Santander down 4.7pc and BBVA down 3.9pc.
Markets, further unsettled by the tensions in Korea, fell around the world. The FTSE 100 closed down 1.75pc at 5581.28. In Spain the Ibex index fell 3pc, while in France the CAC lost 2.5pc.
"The Irish rescue has done absolutely nothing to calm the markets: it has done the opposite," said Elizabeth Afseth, a bond expert at Evolution. "It is dangerous to talk about creditor losses. Investors will be very wary of lending to Portugal. It looks as if Europe is going to push this to the edge of the cliff."
EU president Herman Van Rompuy denied that Lisbon needs a lifeline, insisting that Portugal's banks are well capitalised and do not face property losses. "Portugal does not need any help – it is in a very different situation to Ireland," he said.
However, Portuguese banks have been shut out of the capital markets. The country's total debt level is one of the world's highest, at 325pc of GDP, and it has a current account deficit of 10pc – which requires a flow of external funding.
The euro fell to a two-month low of $1.3380 against the dollar, in part fed by fears of paralysis in Ireland as the crumbling coalition unveils a four-year austerity drive and tries to push through budget cuts before an election next year. Opposition parties said premier Brian Cowen no longer had the authority to act for the nation, while rebels in his own Fianna Fail party demanded his resignation. |
We’re not exactly sure when this goes live, but you already know it’s absurdly promising!
Here’s even better news from these dedicates wizards. This time up, we’re looking at a brilliant Terminator board game!
Space Goat Productions has had an absolutely amazing year. From a myriad of brilliant, nostalgia dipped Evil Dead 2 comics, to apparel, to board games, stickers and more, it’s clear as day that Space Goat plans on delivering top notch goods for years to come.
From the Official Press Release:
Space Goat Productions announced today that they will be partnering with Studiocanal, Oak Productions, and Creative Licensing to make The Terminator: The Official Board Game™, which will be launching on Kickstarter in February of 2017.
William Wisher, one of the screenwriters of the original 1984 film, has been brought on as a story consultant. “I’m very excited to be consulting with the creative team at Space Goat to bring The Terminator™ board game to life!” said William Wisher. “I’ve missed my old friends Sarah Connor, Kyle Reese, and the T-800. It will be fun to spend time with them again!”
“We’re bringing 1984 back. The only movie we care about is the original. We love retro. We want to explore the beginning.” Space Goat President Shon Bury said. “Which is why for this game, we’re focusing on what hardcore Terminator fans love about the original movie.”
The board game is currently under production. The Terminator™: The Official Board Game is an asymmetrical strategy game played across two boards – one in 1984 and one in 2029. One player takes control of all the forces of the machines: Hunter Killer flying machines, Terminator endoskeletons , and new robots based on the classic 80s aesthetic. The rest of the players take the role of the human resistance, struggling against the impossible odds of the machine uprising.
“At its robotic heart, The Terminator™ is a raw and intense chase scene,” Evil Dead 2 Board Game Designer Taylor Smith said. “Combine that with a reactive time travel mechanic and you get a super fun mash-up of action and strategy.”
For more information on the Kickstarter and to be signed up for email updates visitwww.terminatorboardgame.com. If you are a news outlet interested in getting a print and play version prior to the Kickstarter launching, email [email protected] |
<reponame>android-xiao-jun/android-chat<filename>client/src/main/java/cn/wildfirechat/remote/OnFriendUpdateListener.java<gh_stars>1-10
/*
* Copyright (c) 2020 WildFireChat. All rights reserved.
*/
package cn.wildfirechat.remote;
import java.util.List;
public interface OnFriendUpdateListener {
/**
* 好友列表更新回调
* @param updateFriendList 只包含有更新的好友,不包含被删除的好友,所以这个回调的时候,需要调用一下{@link ChatManager#getMyFriendList(boolean)}获取当前完整的好友列表
*/
void onFriendListUpdate(List<String> updateFriendList);
/**
* 好友请求列表更新回调
* @param newRequests 只包含新来的好友请求,所以这个回调的时候,需要调用一下{@link ChatManager#getFriendRequest(boolean)}获取当前完整的好友请求列表
*/
void onFriendRequestUpdate(List<String> newRequests);
}
|
<filename>src/clldutils/licenses.py
import pathlib
import attr
_LICENSES = {
"Glide": {
"name": "3dfx Glide License",
"url": "http://www.users.on.net/~triforce/glidexp/COPYING.txt",
},
"Abstyles": {
"name": "Abstyles License",
"url": "https://fedoraproject.org/wiki/Licensing/Abstyles",
},
"AFL-1.1": {
"name": "Academic Free License v1.1",
"url": "http://opensource.linux-mirror.org/licenses/afl-1.1.txt",
},
"AFL-1.2": {
"name": "Academic Free License v1.2",
"url": "http://opensource.linux-mirror.org/licenses/afl-1.2.txt",
},
"AFL-2.0": {
"name": "Academic Free License v2.0",
"url": "http://opensource.linux-mirror.org/licenses/afl-2.0.txt",
},
"AFL-2.1": {
"name": "Academic Free License v2.1",
"url": "http://opensource.linux-mirror.org/licenses/afl-2.1.txt",
},
"AFL-3.0": {
"name": "Academic Free License v3.0",
"url": "http://www.opensource.org/licenses/afl-3.0",
},
"AMPAS": {
"name": "Academy of Motion Picture Arts and Sciences BSD",
"url": "https://fedoraproject.org/wiki/Licensing/BSD#AMPASBSD",
},
"APL-1.0": {
"name": "Adaptive Public License 1.0",
"url": "http://www.opensource.org/licenses/APL-1.0",
},
"Adobe-Glyph": {
"name": "Adobe Glyph List License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#AdobeGlyph",
},
"APAFML": {
"name": "Adobe Postscript AFM License",
"url": "https://fedoraproject.org/wiki/Licensing/AdobePostscriptAFM",
},
"Adobe-2006": {
"name": "Adobe Systems Incorporated Source Code License Agreement",
"url": "https://fedoraproject.org/wiki/Licensing/AdobeLicense",
},
"AGPL-1.0": {
"name": "Affero General Public License v1.0",
"url": "http://www.affero.org/oagpl.html",
},
"Afmparse": {
"name": "Afmparse License",
"url": "https://fedoraproject.org/wiki/Licensing/Afmparse",
},
"Aladdin": {
"name": "Aladdin Free Public License",
"url": "http://pages.cs.wisc.edu/~ghost/doc/AFPL/6.01/Public.htm",
},
"ADSL": {
"name": "Amazon Digital Services License",
"url": "https://fedoraproject.org/wiki/Licensing/AmazonDigitalServicesLicense",
},
"AMDPLPA": {
"name": "AMD's plpa_map.c License",
"url": "https://fedoraproject.org/wiki/Licensing/AMD_plpa_map_License",
},
"ANTLR-PD": {
"name": "ANTLR Software Rights Notice",
"url": "http://www.antlr2.org/license.html",
},
"Apache-1.0": {
"name": "Apache License 1.0",
"url": "http://www.apache.org/licenses/LICENSE-1.0",
},
"Apache-1.1": {
"name": "Apache License 1.1",
"url": "http://apache.org/licenses/LICENSE-1.1",
},
"Apache-2.0": {
"name": "Apache License 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0",
},
"AML": {
"name": "Apple MIT License",
"url": "https://fedoraproject.org/wiki/Licensing/Apple_MIT_License",
},
"APSL-1.0": {
"name": "Apple Public Source License 1.0",
"url": "https://fedoraproject.org/wiki/Licensing/Apple_Public_Source_License_1.0",
},
"APSL-1.2": {
"name": "Apple Public Source License 1.2",
"url": "http://www.samurajdata.se/opensource/mirror/licenses/apsl.php",
},
"APSL-2.0": {
"name": "Apple Public Source License 2.0",
"url": "http://www.opensource.apple.com/license/apsl/",
},
"Artistic-1.0": {
"name": "Artistic License 1.0",
"url": "http://opensource.org/licenses/Artistic-1.0",
},
"Artistic-1.0-Perl": {
"name": "Artistic License 1.0 (Perl)",
"url": "http://dev.perl.org/licenses/artistic.html",
},
"Artistic-1.0-cl8": {
"name": "Artistic License 1.0 w/clause 8",
"url": "http://opensource.org/licenses/Artistic-1.0",
},
"Artistic-2.0": {
"name": "Artistic License 2.0",
"url": "http://www.opensource.org/licenses/artistic-license-2.0",
},
"AAL": {
"name": "Attribution Assurance License",
"url": "http://www.opensource.org/licenses/attribution",
},
"Bahyph": {
"name": "Bahyph License",
"url": "https://fedoraproject.org/wiki/Licensing/Bahyph",
},
"Barr": {
"name": "Barr License",
"url": "https://fedoraproject.org/wiki/Licensing/Barr",
},
"Beerware": {
"name": "Beerware License",
"url": "https://fedoraproject.org/wiki/Licensing/Beerware",
},
"BitTorrent-1.1": {
"name": "BitTorrent Open Source License v1.1",
"url": "http://directory.fsf.org/wiki/License:BitTorrentOSL1.1",
},
"BSL-1.0": {
"name": "Boost Software License 1.0",
"url": "http://www.boost.org/LICENSE_1_0.txt",
},
"Borceux": {
"name": "Borceux license",
"url": "https://fedoraproject.org/wiki/Licensing/Borceux",
},
"BSD-2-Clause": {
"name": "BSD 2-clause \"Simplified\" License",
"url": "http://www.opensource.org/licenses/BSD-2-Clause",
},
"BSD-2-Clause-FreeBSD": {
"name": "BSD 2-clause FreeBSD License",
"url": "http://www.freebsd.org/copyright/freebsd-license.html",
},
"BSD-2-Clause-NetBSD": {
"name": "BSD 2-clause NetBSD License",
"url": "http://www.netbsd.org/about/redistribution.html#default",
},
"BSD-3-Clause": {
"name": "BSD 3-clause \"New\" or \"Revised\" License",
"url": "http://www.opensource.org/licenses/BSD-3-Clause",
},
"BSD-3-Clause-Clear": {
"name": "BSD 3-clause Clear License",
"url": "http://labs.metacarta.com/license-explanation.html#license",
},
"BSD-4-Clause": {
"name": "BSD 4-clause \"Original\" or \"Old\" License",
"url": "http://directory.fsf.org/wiki/License:BSD_4Clause",
},
"BSD-Protection": {
"name": "BSD Protection License",
"url": "https://fedoraproject.org/wiki/Licensing/BSD_Protection_License",
},
"BSD-3-Clause-Attribution": {
"name": "BSD with attribution",
"url": "https://fedoraproject.org/wiki/Licensing/BSD_with_Attribution",
},
"0BSD": {
"name": "BSD Zero Clause License",
"url": "http://landley.net/toybox/license.html ",
},
"BSD-4-Clause-UC": {
"name": "BSD-4-Clause (University of California-Specific)",
"url": "http://www.freebsd.org/copyright/license.html",
},
"bzip2-1.0.5": {
"name": "bzip2 and libbzip2 License v1.0.5",
"url": "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html",
},
"bzip2-1.0.6": {
"name": "bzip2 and libbzip2 License v1.0.6",
"url": "https://github.com/asimonov-im/bzip2/blob/master/LICENSE",
},
"Caldera": {
"name": "Caldera License",
"url": "http://www.lemis.com/grog/UNIX/ancient-source-all.pdf",
},
"CECILL-1.0": {
"name": "CeCILL Free Software License Agreement v1.0",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V1-fr.html",
},
"CECILL-1.1": {
"name": "CeCILL Free Software License Agreement v1.1",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V1.1-US.html",
},
"CECILL-2.0": {
"name": "CeCILL Free Software License Agreement v2.0",
"url": "http://www.cecill.info/licences/Licence_CeCILL_V2-fr.html",
},
"CECILL-2.1": {
"name": "CeCILL Free Software License Agreement v2.1",
"url": "http://opensource.org/licenses/CECILL-2.1",
},
"CECILL-B": {
"name": "CeCILL-B Free Software License Agreement",
"url": "http://www.cecill.info/licences/Licence_CeCILL-B_V1-fr.html",
},
"CECILL-C": {
"name": "CeCILL-C Free Software License Agreement",
"url": "http://www.cecill.info/licences/Licence_CeCILL-C_V1-fr.html",
},
"ClArtistic": {
"name": "Clarified Artistic License",
"url": "http://www.ncftp.com/ncftp/doc/LICENSE.txt",
},
"MIT-CMU": {
"name": "CMU License",
"url": "https://fedoraproject.org/wiki/Licensing:MIT?rd=Licensing/MIT#CMU_Style",
},
"CNRI-Jython": {
"name": "CNRI Jython License",
"url": "http://www.jython.org/license.html",
},
"CNRI-Python": {
"name": "CNRI Python License",
"url": "http://www.opensource.org/licenses/CNRI-Python",
},
"CNRI-Python-GPL-Compatible": {
"name": "CNRI Python Open Source GPL Compatible License Agreement",
"url": "http://www.python.org/download/releases/1.6.1/download_win/",
},
"CPOL-1.02": {
"name": "Code Project Open License 1.02",
"url": "http://www.codeproject.com/info/cpol10.aspx",
},
"CDDL-1.0": {
"name": "Common Development and Distribution License 1.0",
"url": "http://www.opensource.org/licenses/cddl1",
},
"CDDL-1.1": {
"name": "Common Development and Distribution License 1.1",
"url": "http://glassfish.java.net/public/CDDL+GPL_1_1.html",
},
"CPAL-1.0": {
"name": "Common Public Attribution License 1.0",
"url": "http://www.opensource.org/licenses/CPAL-1.0",
},
"CPL-1.0": {
"name": "Common Public License 1.0",
"url": "http://opensource.org/licenses/CPL-1.0",
},
"CATOSL-1.1": {
"name": "Computer Associates Trusted Open Source License 1.1",
"url": "http://opensource.org/licenses/CATOSL-1.1",
},
"Condor-1.1": {
"name": "Condor Public License v1.1",
"url": "http://research.cs.wisc.edu/condor/license.html#condor",
},
"CC-BY-1.0": {
"name": "Creative Commons Attribution 1.0",
"url": "https://creativecommons.org/licenses/by/1.0/",
},
"CC-BY-2.0": {
"name": "Creative Commons Attribution 2.0",
"url": "https://creativecommons.org/licenses/by/2.0/",
},
"CC-BY-2.5": {
"name": "Creative Commons Attribution 2.5",
"url": "https://creativecommons.org/licenses/by/2.5/",
},
"CC-BY-3.0": {
"name": "Creative Commons Attribution 3.0",
"url": "https://creativecommons.org/licenses/by/3.0/",
},
"CC-BY-4.0": {
"name": "Creative Commons Attribution 4.0",
"url": "https://creativecommons.org/licenses/by/4.0/",
},
"CC-BY-ND-1.0": {
"name": "Creative Commons Attribution No Derivatives 1.0",
"url": "https://creativecommons.org/licenses/by-nd/1.0/",
},
"CC-BY-ND-2.0": {
"name": "Creative Commons Attribution No Derivatives 2.0",
"url": "https://creativecommons.org/licenses/by-nd/2.0/",
},
"CC-BY-ND-2.5": {
"name": "Creative Commons Attribution No Derivatives 2.5",
"url": "https://creativecommons.org/licenses/by-nd/2.5/",
},
"CC-BY-ND-3.0": {
"name": "Creative Commons Attribution No Derivatives 3.0",
"url": "https://creativecommons.org/licenses/by-nd/3.0/",
},
"CC-BY-ND-4.0": {
"name": "Creative Commons Attribution No Derivatives 4.0",
"url": "https://creativecommons.org/licenses/by-nd/4.0/",
},
"CC-BY-NC-1.0": {
"name": "Creative Commons Attribution Non Commercial 1.0",
"url": "https://creativecommons.org/licenses/by-nc/1.0/",
},
"CC-BY-NC-2.0": {
"name": "Creative Commons Attribution Non Commercial 2.0",
"url": "https://creativecommons.org/licenses/by-nc/2.0/",
},
"CC-BY-NC-2.5": {
"name": "Creative Commons Attribution Non Commercial 2.5",
"url": "https://creativecommons.org/licenses/by-nc/2.5/",
},
"CC-BY-NC-3.0": {
"name": "Creative Commons Attribution Non Commercial 3.0",
"url": "https://creativecommons.org/licenses/by-nc/3.0/",
},
"CC-BY-NC-4.0": {
"name": "Creative Commons Attribution Non Commercial 4.0",
"url": "https://creativecommons.org/licenses/by-nc/4.0/",
},
"CC-BY-NC-ND-1.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 1.0",
"url": "https://creativecommons.org/licenses/by-nd-nc/1.0/",
},
"CC-BY-NC-ND-2.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 2.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/2.0/",
},
"CC-BY-NC-ND-2.5": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 2.5",
"url": "https://creativecommons.org/licenses/by-nc-nd/2.5/",
},
"CC-BY-NC-ND-3.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 3.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/3.0/",
},
"CC-BY-NC-ND-4.0": {
"name": "Creative Commons Attribution Non Commercial No Derivatives 4.0",
"url": "https://creativecommons.org/licenses/by-nc-nd/4.0/",
},
"CC-BY-NC-SA-1.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 1.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/1.0/",
},
"CC-BY-NC-SA-2.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 2.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/2.0/",
},
"CC-BY-NC-SA-2.5": {
"name": "Creative Commons Attribution Non Commercial Share Alike 2.5",
"url": "https://creativecommons.org/licenses/by-nc-sa/2.5/",
},
"CC-BY-NC-SA-3.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 3.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/3.0/",
},
"CC-BY-NC-SA-4.0": {
"name": "Creative Commons Attribution Non Commercial Share Alike 4.0",
"url": "https://creativecommons.org/licenses/by-nc-sa/4.0/",
},
"CC-BY-SA-1.0": {
"name": "Creative Commons Attribution Share Alike 1.0",
"url": "https://creativecommons.org/licenses/by-sa/1.0/",
},
"CC-BY-SA-2.0": {
"name": "Creative Commons Attribution Share Alike 2.0",
"url": "https://creativecommons.org/licenses/by-sa/2.0/",
},
"CC-BY-SA-2.5": {
"name": "Creative Commons Attribution Share Alike 2.5",
"url": "https://creativecommons.org/licenses/by-sa/2.5/",
},
"CC-BY-SA-3.0": {
"name": "Creative Commons Attribution Share Alike 3.0",
"url": "https://creativecommons.org/licenses/by-sa/3.0/",
},
"CC-BY-SA-4.0": {
"name": "Creative Commons Attribution Share Alike 4.0",
"url": "https://creativecommons.org/licenses/by-sa/4.0/",
},
"CC0-1.0": {
"name": "Creative Commons Zero v1.0 Universal",
"url": "https://creativecommons.org/publicdomain/zero/1.0/",
},
"Crossword": {
"name": "Crossword License",
"url": "https://fedoraproject.org/wiki/Licensing/Crossword",
},
"CUA-OPL-1.0": {
"name": "CUA Office Public License v1.0",
"url": "http://opensource.org/licenses/CUA-OPL-1.0",
},
"Cube": {
"name": "Cube License",
"url": "https://fedoraproject.org/wiki/Licensing/Cube",
},
"D-FSL-1.0": {
"name": "Deutsche Freie Software Lizenz",
"url": "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/de/D-FSL-1_0_de.txt",
},
"diffmark": {
"name": "diffmark license",
"url": "https://fedoraproject.org/wiki/Licensing/diffmark",
},
"WTFPL": {
"name": "Do What The F*ck You Want To Public License",
"url": "http://sam.zoy.org/wtfpl/COPYING",
},
"DOC": {
"name": "DOC License",
"url": "http://www.cs.wustl.edu/~schmidt/ACE-copying.html",
},
"Dotseqn": {
"name": "Dotseqn License",
"url": "https://fedoraproject.org/wiki/Licensing/Dotseqn",
},
"DSDP": {
"name": "DSDP License",
"url": "https://fedoraproject.org/wiki/Licensing/DSDP",
},
"dvipdfm": {
"name": "dvipdfm License",
"url": "https://fedoraproject.org/wiki/Licensing/dvipdfm",
},
"EPL-1.0": {
"name": "Eclipse Public License 1.0",
"url": "http://www.opensource.org/licenses/EPL-1.0",
},
"ECL-1.0": {
"name": "Educational Community License v1.0",
"url": "http://opensource.org/licenses/ECL-1.0",
},
"ECL-2.0": {
"name": "Educational Community License v2.0",
"url": "http://opensource.org/licenses/ECL-2.0",
},
"EFL-1.0": {
"name": "Eiffel Forum License v1.0",
"url": "http://opensource.org/licenses/EFL-1.0",
},
"EFL-2.0": {
"name": "Eiffel Forum License v2.0",
"url": "http://opensource.org/licenses/EFL-2.0",
},
"MIT-advertising": {
"name": "Enlightenment License (e16)",
"url": "https://fedoraproject.org/wiki/Licensing/MIT_With_Advertising",
},
"MIT-enna": {
"name": "enna License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#enna",
},
"Entessa": {
"name": "Entessa Public License v1.0",
"url": "http://opensource.org/licenses/Entessa",
},
"ErlPL-1.1": {
"name": "Erlang Public License v1.1",
"url": "http://www.erlang.org/EPLICENSE",
},
"EUDatagrid": {
"name": "EU DataGrid Software License",
"url": "http://www.opensource.org/licenses/EUDatagrid",
},
"EUPL-1.0": {
"name": "European Union Public License 1.0",
"url": "http://ec.europa.eu/idabc/en/document/7330.html",
},
"EUPL-1.1": {
"name": "European Union Public License 1.1",
"url": "http://www.opensource.org/licenses/EUPL-1.1",
},
"Eurosym": {
"name": "Eurosym License",
"url": "https://fedoraproject.org/wiki/Licensing/Eurosym",
},
"Fair": {
"name": "Fair License",
"url": "http://www.opensource.org/licenses/Fair",
},
"MIT-feh": {
"name": "feh License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT#feh",
},
"Frameworx-1.0": {
"name": "Frameworx Open License 1.0",
"url": "http://www.opensource.org/licenses/Frameworx-1.0",
},
"FreeImage": {
"name": "FreeImage Public License v1.0",
"url": "http://freeimage.sourceforge.net/freeimage-license.txt",
},
"FTL": {
"name": "Freetype Project License",
"url": "http://freetype.fis.uniroma2.it/FTL.TXT",
},
"FSFUL": {
"name": "FSF Unlimited License",
"url": "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License",
},
"FSFULLR": {
"name": "FSF Unlimited License (with License Retention)",
"url": "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License",
},
"Giftware": {
"name": "Giftware License",
"url": "http://alleg.sourceforge.net//license.html",
},
"GL2PS": {
"name": "GL2PS License",
"url": "http://www.geuz.org/gl2ps/COPYING.GL2PS",
},
"Glulxe": {
"name": "Glulxe License",
"url": "https://fedoraproject.org/wiki/Licensing/Glulxe",
},
"AGPL-3.0": {
"name": "GNU Affero General Public License v3.0",
"url": "http://www.gnu.org/licenses/agpl.txt",
},
"GFDL-1.1": {
"name": "GNU Free Documentation License v1.1",
"url": "http://www.gnu.org/licenses/old-licenses/fdl-1.1.txt",
},
"GFDL-1.2": {
"name": "GNU Free Documentation License v1.2",
"url": "http://www.gnu.org/licenses/old-licenses/fdl-1.2.txt",
},
"GFDL-1.3": {
"name": "GNU Free Documentation License v1.3",
"url": "http://www.gnu.org/licenses/fdl-1.3.txt",
},
"GPL-1.0": {
"name": "GNU General Public License v1.0 only",
"url": "http://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html",
},
"GPL-2.0": {
"name": "GNU General Public License v2.0 only",
"url": "http://www.opensource.org/licenses/GPL-2.0",
},
"GPL-3.0": {
"name": "GNU General Public License v3.0 only",
"url": "http://www.opensource.org/licenses/GPL-3.0",
},
"LGPL-2.1": {
"name": "GNU Lesser General Public License v2.1 only",
"url": "http://www.opensource.org/licenses/LGPL-2.1",
},
"LGPL-3.0": {
"name": "GNU Lesser General Public License v3.0 only",
"url": "http://www.opensource.org/licenses/LGPL-3.0",
},
"LGPL-2.0": {
"name": "GNU Library General Public License v2 only",
"url": "http://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html",
},
"gnuplot": {
"name": "gnuplot License",
"url": "https://fedoraproject.org/wiki/Licensing/Gnuplot",
},
"gSOAP-1.3b": {
"name": "gSOAP Public License v1.3b",
"url": "http://www.cs.fsu.edu/~engelen/license.html",
},
"HaskellReport": {
"name": "Haskell Language Report License",
"url": "https://fedoraproject.org/wiki/Licensing/Haskell_Language_Report_License",
},
"HPND": {
"name": "Historic Permission Notice and Disclaimer",
"url": "http://www.opensource.org/licenses/HPND",
},
"IPL-1.0": {
"name": "IBM Public License v1.0",
"url": "http://www.opensource.org/licenses/IPL-1.0",
},
"ICU": {
"name": "ICU License",
"url": "http://source.icu-project.org/repos/icu/icu/trunk/license.html",
},
"ImageMagick": {
"name": "ImageMagick License",
"url": "http://www.imagemagick.org/script/license.php",
},
"iMatix": {
"name": "iMatix Standard Function Library Agreement",
"url": "http://legacy.imatix.com/html/sfl/sfl4.htm#license",
},
"Imlib2": {
"name": "Imlib2 License",
"url": "http://trac.enlightenment.org/e/browser/trunk/imlib2/COPYING",
},
"IJG": {
"name": "Independent JPEG Group License",
"url": "http://dev.w3.org/cvsweb/Amaya/libjpeg/Attic/README?rev=1.2",
},
"Intel": {
"name": "Intel Open Source License",
"url": "http://opensource.org/licenses/Intel",
},
"IPA": {
"name": "IPA Font License",
"url": "http://www.opensource.org/licenses/IPA",
},
"JasPer-2.0": {
"name": "JasPer License",
"url": "http://www.ece.uvic.ca/~mdadams/jasper/LICENSE",
},
"JSON": {
"name": "JSON License",
"url": "http://www.json.org/license.html",
},
"LPPL-1.3a": {
"name": "LaTeX Project Public License 1.3a",
"url": "http://www.latex-project.org/lppl/lppl-1-3a.txt",
},
"LPPL-1.0": {
"name": "LaTeX Project Public License v1.0",
"url": "http://www.latex-project.org/lppl/lppl-1-0.txt",
},
"LPPL-1.1": {
"name": "LaTeX Project Public License v1.1",
"url": "http://www.latex-project.org/lppl/lppl-1-1.txt",
},
"LPPL-1.2": {
"name": "LaTeX Project Public License v1.2",
"url": "http://www.latex-project.org/lppl/lppl-1-2.txt",
},
"LPPL-1.3c": {
"name": "LaTeX Project Public License v1.3c",
"url": "http://www.opensource.org/licenses/LPPL-1.3c",
},
"Latex2e": {
"name": "Latex2e License",
"url": "https://fedoraproject.org/wiki/Licensing/Latex2e",
},
"BSD-3-Clause-LBNL": {
"name": "Lawrence Berkeley National Labs BSD variant license",
"url": "https://fedoraproject.org/wiki/Licensing/LBNLBSD",
},
"Leptonica": {
"name": "Leptonica License",
"url": "https://fedoraproject.org/wiki/Licensing/Leptonica",
},
"LGPLLR": {
"name": "Lesser General Public License For Linguistic Resources",
"url": "http://www-igm.univ-mlv.fr/~unitex/lgpllr.html",
},
"Libpng": {
"name": "libpng License",
"url": "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt",
},
"libtiff": {
"name": "libtiff License",
"url": "https://fedoraproject.org/wiki/Licensing/libtiff",
},
"LPL-1.02": {
"name": "Lucent Public License v1.02",
"url": "http://www.opensource.org/licenses/LPL-1.02",
},
"LPL-1.0": {
"name": "Lucent Public License Version 1.0",
"url": "http://opensource.org/licenses/LPL-1.0",
},
"MakeIndex": {
"name": "MakeIndex License",
"url": "https://fedoraproject.org/wiki/Licensing/MakeIndex",
},
"MTLL": {
"name": "Matrix Template Library License",
"url": "https://fedoraproject.org/wiki/Licensing/Matrix_Template_Library_License",
},
"MS-PL": {
"name": "Microsoft Public License",
"url": "http://www.opensource.org/licenses/MS-PL",
},
"MS-RL": {
"name": "Microsoft Reciprocal License",
"url": "http://www.opensource.org/licenses/MS-RL",
},
"MirOS": {
"name": "MirOS Licence",
"url": "http://www.opensource.org/licenses/MirOS",
},
"MITNFA": {
"name": "MIT +no-false-attribs license",
"url": "https://fedoraproject.org/wiki/Licensing/MITNFA",
},
"MIT": {
"name": "MIT License",
"url": "http://www.opensource.org/licenses/MIT",
},
"Motosoto": {
"name": "Motosoto License",
"url": "http://www.opensource.org/licenses/Motosoto",
},
"MPL-1.0": {
"name": "Mozilla Public License 1.0",
"url": "http://www.mozilla.org/MPL/MPL-1.0.html",
},
"MPL-1.1": {
"name": "Mozilla Public License 1.1",
"url": "http://www.mozilla.org/MPL/MPL-1.1.html",
},
"MPL-2.0": {
"name": "Mozilla Public License 2.0",
"url": "http://www.mozilla.org/MPL/2.0/\nhttp://opensource.org/licenses/MPL-2.0",
},
"MPL-2.0-no-copyleft-exception": {
"name": "Mozilla Public License 2.0 (no copyleft exception)",
"url": "http://www.mozilla.org/MPL/2.0/\nhttp://opensource.org/licenses/MPL-2.0",
},
"mpich2": {
"name": "mpich2 License",
"url": "https://fedoraproject.org/wiki/Licensing/MIT",
},
"Multics": {
"name": "Multics License",
"url": "http://www.opensource.org/licenses/Multics",
},
"Mup": {
"name": "Mup License",
"url": "https://fedoraproject.org/wiki/Licensing/Mup",
},
"NASA-1.3": {
"name": "NASA Open Source Agreement 1.3",
"url": "http://www.opensource.org/licenses/NASA-1.3",
},
"Naumen": {
"name": "Naumen Public License",
"url": "http://www.opensource.org/licenses/Naumen",
},
"NetCDF": {
"name": "NetCDF license",
"url": "http://www.unidata.ucar.edu/software/netcdf/copyright.html",
},
"NGPL": {
"name": "Nethack General Public License",
"url": "http://www.opensource.org/licenses/NGPL",
},
"NOSL": {
"name": "Netizen Open Source License",
"url": "http://bits.netizen.com.au/licenses/NOSL/nosl.txt",
},
"NPL-1.0": {
"name": "Netscape Public License v1.0",
"url": "http://www.mozilla.org/MPL/NPL/1.0/",
},
"NPL-1.1": {
"name": "Netscape Public License v1.1",
"url": "http://www.mozilla.org/MPL/NPL/1.1/",
},
"Newsletr": {
"name": "Newsletr License",
"url": "https://fedoraproject.org/wiki/Licensing/Newsletr",
},
"NLPL": {
"name": "No Limit Public License",
"url": "https://fedoraproject.org/wiki/Licensing/NLPL",
},
"Nokia": {
"name": "Nokia Open Source License",
"url": "http://www.opensource.org/licenses/nokia",
},
"NPOSL-3.0": {
"name": "Non-Profit Open Software License 3.0",
"url": "http://www.opensource.org/licenses/NOSL3.0",
},
"Noweb": {
"name": "Noweb License",
"url": "https://fedoraproject.org/wiki/Licensing/Noweb",
},
"NRL": {
"name": "NRL License",
"url": "http://web.mit.edu/network/isakmp/nrllicense.html",
},
"NTP": {
"name": "NTP License",
"url": "http://www.opensource.org/licenses/NTP",
},
"Nunit": {
"name": "Nunit License",
"url": "https://fedoraproject.org/wiki/Licensing/Nunit",
},
"OCLC-2.0": {
"name": "OCLC Research Public License 2.0",
"url": "http://www.opensource.org/licenses/OCLC-2.0",
},
"ODbL-1.0": {
"name": "ODC Open Database License v1.0",
"url": "http://www.opendatacommons.org/licenses/odbl/1.0/",
},
"PDDL-1.0": {
"name": "ODC Public Domain Dedication & License 1.0",
"url": "http://opendatacommons.org/licenses/pddl/1.0/",
},
"OGTSL": {
"name": "Open Group Test Suite License",
"url": "http://www.opensource.org/licenses/OGTSL",
},
"OML": {
"name": "Open Market License",
"url": "https://fedoraproject.org/wiki/Licensing/Open_Market_License",
},
"OPL-1.0": {
"name": "Open Public License v1.0",
"url": "https://fedoraproject.org/wiki/Licensing/Open_Public_License",
},
"OSL-1.0": {
"name": "Open Software License 1.0",
"url": "http://opensource.org/licenses/OSL-1.0",
},
"OSL-1.1": {
"name": "Open Software License 1.1",
"url": "https://fedoraproject.org/wiki/Licensing/OSL1.1",
},
"PHP-3.01": {
"name": "PHP License v3.01",
"url": "http://www.php.net/license/3_01.txt",
},
"Plexus": {
"name": "Plexus Classworlds License",
"url": "https://fedoraproject.org/wiki/Licensing/Plexus_Classworlds_License",
},
"PostgreSQL": {
"name": "PostgreSQL License",
"url": "http://www.opensource.org/licenses/PostgreSQL",
},
"psfrag": {
"name": "psfrag License",
"url": "https://fedoraproject.org/wiki/Licensing/psfrag",
},
"psutils": {
"name": "psutils License",
"url": "https://fedoraproject.org/wiki/Licensing/psutils",
},
"Python-2.0": {
"name": "Python License 2.0",
"url": "http://www.opensource.org/licenses/Python-2.0",
},
"QPL-1.0": {
"name": "Q Public License 1.0",
"url": "http://www.opensource.org/licenses/QPL-1.0",
},
"Qhull": {
"name": "Qhull License",
"url": "https://fedoraproject.org/wiki/Licensing/Qhull",
},
"Rdisc": {
"name": "Rdisc License",
"url": "https://fedoraproject.org/wiki/Licensing/Rdisc_License",
},
"RPSL-1.0": {
"name": "RealNetworks Public Source License v1.0",
"url": "http://www.opensource.org/licenses/RPSL-1.0",
},
"RPL-1.1": {
"name": "Reciprocal Public License 1.1",
"url": "http://opensource.org/licenses/RPL-1.1",
},
"RPL-1.5": {
"name": "Reciprocal Public License 1.5",
"url": "http://www.opensource.org/licenses/RPL-1.5",
},
"RHeCos-1.1": {
"name": "Red Hat eCos Public License v1.1",
"url": "http://ecos.sourceware.org/old-license.html",
},
"RSCPL": {
"name": "Ricoh Source Code Public License",
"url": "http://www.opensource.org/licenses/RSCPL",
},
"RSA-MD": {
"name": "RSA Message-Digest License",
"url": "http://www.faqs.org/rfcs/rfc1321.html",
},
"Ruby": {
"name": "Ruby License",
"url": "http://www.ruby-lang.org/en/LICENSE.txt",
},
"SAX-PD": {
"name": "Sax Public Domain Notice",
"url": "http://www.saxproject.org/copying.html",
},
"Saxpath": {
"name": "Saxpath License",
"url": "https://fedoraproject.org/wiki/Licensing/Saxpath_License",
},
"SCEA": {
"name": "SCEA Shared Source License",
"url": "http://research.scea.com/scea_shared_source_license.html",
},
"SWL": {
"name": "Scheme Widget Library (SWL) Software License Agreement",
"url": "https://fedoraproject.org/wiki/Licensing/SWL",
},
"Sendmail": {
"name": "Sendmail License",
"url": "http://www.sendmail.com/pdfs/open_source/sendmail_license.pdf",
},
"SGI-B-1.0": {
"name": "SGI Free Software License B v1.0",
"url": "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.1.0.html",
},
"SGI-B-1.1": {
"name": "SGI Free Software License B v1.1",
"url": "http://oss.sgi.com/projects/FreeB/",
},
"SGI-B-2.0": {
"name": "SGI Free Software License B v2.0",
"url": "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.2.0.pdf",
},
"OFL-1.0": {
"name": "SIL Open Font License 1.0",
"url": "http://scripts.sil.org/cms/scripts/page.php?item_id=OFL10_web",
},
"OFL-1.1": {
"name": "SIL Open Font License 1.1",
"url": "http://www.opensource.org/licenses/OFL-1.1",
},
"SimPL-2.0": {
"name": "Simple Public License 2.0",
"url": "http://www.opensource.org/licenses/SimPL-2.0",
},
"Sleepycat": {
"name": "Sleepycat License",
"url": "http://www.opensource.org/licenses/Sleepycat",
},
"SNIA": {
"name": "SNIA Public License 1.1",
"url": "https://fedoraproject.org/wiki/Licensing/SNIA_Public_License",
},
"SMLNJ": {
"name": "Standard ML of New Jersey License",
"url": "http://www.smlnj.org//license.html",
},
"SugarCRM-1.1.3": {
"name": "SugarCRM Public License v1.1.3",
"url": "http://www.sugarcrm.com/crm/SPL",
},
"SISSL": {
"name": "Sun Industry Standards Source License v1.1",
"url": "http://opensource.org/licenses/SISSL",
},
"SISSL-1.2": {
"name": "Sun Industry Standards Source License v1.2",
"url": "http://gridscheduler.sourceforge.net/Gridengine_SISSL_license.html",
},
"SPL-1.0": {
"name": "Sun Public License v1.0",
"url": "http://www.opensource.org/licenses/SPL-1.0",
},
"Watcom-1.0": {
"name": "Sybase Open Watcom Public License 1.0",
"url": "http://www.opensource.org/licenses/Watcom-1.0",
},
"TCL": {
"name": "TCL/TK License",
"url": "https://fedoraproject.org/wiki/Licensing/TCL",
},
"Unlicense": {
"name": "The Unlicense",
"url": "http://unlicense.org/",
},
"TMate": {
"name": "TMate Open Source License",
"url": "http://svnkit.com/license.html",
},
"TORQUE-1.1": {
"name": "TORQUE v2.5+ Software License v1.1",
"url": "https://fedoraproject.org/wiki/Licensing/TORQUEv1.1",
},
"TOSL": {
"name": "Trusster Open Source License",
"url": "https://fedoraproject.org/wiki/Licensing/TOSL",
},
"Unicode-TOU": {
"name": "Unicode Terms of Use",
"url": "http://www.unicode.org/copyright.html",
},
"UPL-1.0": {
"name": "Universal Permissive License v1.0",
"url": "http://opensource.org/licenses/UPL",
},
"NCSA": {
"name": "University of Illinois/NCSA Open Source License",
"url": "http://www.opensource.org/licenses/NCSA",
},
"Vim": {
"name": "Vim License",
"url": "http://vimdoc.sourceforge.net/htmldoc/uganda.html",
},
"VOSTROM": {
"name": "VOSTROM Public License for Open Source",
"url": "https://fedoraproject.org/wiki/Licensing/VOSTROM",
},
"VSL-1.0": {
"name": "Vovida Software License v1.0",
"url": "http://www.opensource.org/licenses/VSL-1.0",
},
"W3C-19980720": {
"name": "W3C Software Notice and License (1998-07-20)",
"url": "http://www.w3.org/Consortium/Legal/copyright-software-19980720.html",
},
"W3C": {
"name": "W3C Software Notice and License (2002-12-31)",
"url": "http://www.opensource.org/licenses/W3C",
},
"Wsuipa": {
"name": "Wsuipa License",
"url": "https://fedoraproject.org/wiki/Licensing/Wsuipa",
},
"Xnet": {
"name": "X.Net License",
"url": "http://opensource.org/licenses/Xnet",
},
"X11": {
"name": "X11 License",
"url": "http://www.xfree86.org/3.3.6/COPYRIGHT2.html#3",
},
"Xerox": {
"name": "Xerox License",
"url": "https://fedoraproject.org/wiki/Licensing/Xerox",
},
"XFree86-1.1": {
"name": "XFree86 License 1.1",
"url": "http://www.xfree86.org/current/LICENSE4.html",
},
"xinetd": {
"name": "xinetd License",
"url": "https://fedoraproject.org/wiki/Licensing/Xinetd_License",
},
"xpp": {
"name": "XPP License",
"url": "https://fedoraproject.org/wiki/Licensing/xpp",
},
"XSkat": {
"name": "XSkat License",
"url": "https://fedoraproject.org/wiki/Licensing/XSkat_License",
},
"YPL-1.0": {
"name": "Yahoo! Public License v1.0",
"url": "http://www.zimbra.com/license/yahoo_public_license_1.0.html",
},
"YPL-1.1": {
"name": "Yahoo! Public License v1.1",
"url": "http://www.zimbra.com/license/yahoo_public_license_1.1.html",
},
"Zed": {
"name": "Zed License",
"url": "https://fedoraproject.org/wiki/Licensing/Zed",
},
"Zlib": {
"name": "zlib License",
"url": "http://www.opensource.org/licenses/Zlib",
},
"zlib-acknowledgement": {
"name": "zlib/libpng License with Acknowledgement",
"url": "https://fedoraproject.org/wiki/Licensing/ZlibWithAcknowledgement",
},
"ZPL-1.1": {
"name": "Zope Public License 1.1",
"url": "http://old.zope.org/Resources/License/ZPL-1.1",
},
"ZPL-2.0": {
"name": "Zope Public License 2.0",
"url": "http://opensource.org/licenses/ZPL-2.0",
},
"ZPL-2.1": {
"name": "Zope Public License 2.1",
"url": "http://old.zope.org/Resources/ZPL/",
}
}
@attr.s
class License(object):
id = attr.ib()
name = attr.ib()
url = attr.ib()
@property
def legalcode(self):
p = pathlib.Path(__file__).parent / 'legalcode' / self.id
if p.exists():
return p.read_text(encoding='utf8')
_LICENSES = [License(id_, l['name'], l['url']) for id_, l in _LICENSES.items()]
def find(q):
for license_ in _LICENSES:
if q.lower() == license_.id.lower() or q == license_.name or q == license_.url:
return license_
if '://' in q:
u1 = license_.url.split('://')[1]
u2 = q.split('://')[1]
if u1.startswith(u2) or u2.startswith(u1):
return license_
|
package com.agora.data.provider;
import android.content.Context;
import android.text.TextUtils;
import androidx.annotation.NonNull;
import com.agora.data.R;
import com.agora.data.model.AgoraRoom;
import com.agora.data.sync.AgoraException;
import com.agora.data.sync.CollectionReference;
import com.agora.data.sync.DocumentReference;
import com.agora.data.sync.FieldFilter;
import com.agora.data.sync.ISyncManager;
import com.agora.data.sync.OrderBy;
import com.agora.data.sync.Query;
import com.agora.data.sync.RoomReference;
import com.agora.data.sync.SyncManager;
import com.google.gson.Gson;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import cn.leancloud.LCException;
import cn.leancloud.LCLogger;
import cn.leancloud.LCObject;
import cn.leancloud.LCQuery;
import cn.leancloud.LeanCloud;
import cn.leancloud.livequery.LCLiveQuery;
import cn.leancloud.livequery.LCLiveQueryEventHandler;
import cn.leancloud.livequery.LCLiveQuerySubscribeCallback;
import cn.leancloud.push.PushService;
import cn.leancloud.types.LCNull;
import io.agora.baselibrary.BuildConfig;
import io.reactivex.Observable;
import io.reactivex.ObservableSource;
import io.reactivex.Observer;
import io.reactivex.disposables.Disposable;
import io.reactivex.functions.Function;
import io.reactivex.schedulers.Schedulers;
public class DataSyncImpl implements ISyncManager {
private Gson mGson = new Gson();
public DataSyncImpl(Context mContext) {
if (BuildConfig.DEBUG) {
LeanCloud.setLogLevel(LCLogger.Level.DEBUG);
} else {
LeanCloud.setLogLevel(LCLogger.Level.ERROR);
}
String appid = mContext.getString(R.string.leancloud_app_id);
String appKey = mContext.getString(R.string.leancloud_app_key);
String url = mContext.getString(R.string.leancloud_server_url);
if (TextUtils.isEmpty(appid) || TextUtils.isEmpty(appKey) || TextUtils.isEmpty(url)) {
throw new NullPointerException("please check \"strings_config.xml\"");
}
LeanCloud.initialize(mContext, appid, appKey, url);
PushService.startIfRequired(mContext);
}
@Override
public Observable<AgoraRoom> creatRoom(AgoraRoom room) {
LCObject mLCObject = new LCObject(AgoraRoom.TABLE_NAME);
Map<String, Object> datas = room.toHashMap();
for (Map.Entry<String, Object> entry : datas.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
mLCObject.put(key, value);
}
return mLCObject.saveInBackground()
.subscribeOn(Schedulers.io())
.map(new Function<LCObject, AgoraRoom>() {
@Override
public AgoraRoom apply(@NonNull LCObject LCObject) throws Exception {
AgoraRoom mAgoraRoom = mGson.fromJson(LCObject.toJSONObject().toJSONString(), AgoraRoom.class);
mAgoraRoom.setId(LCObject.getObjectId());
return mAgoraRoom;
}
}).onErrorResumeNext(new Function<Throwable, ObservableSource<? extends AgoraRoom>>() {
@Override
public ObservableSource<? extends AgoraRoom> apply(@NonNull Throwable throwable) throws Exception {
return Observable.error(new AgoraException(throwable));
}
});
}
@Override
public Observable<List<AgoraRoom>> getRooms() {
Date date = new Date();
date.setTime(System.currentTimeMillis() - (24 * 60 * 60 * 1000));
LCQuery<LCObject> mLCQuery = LCQuery.getQuery(AgoraRoom.TABLE_NAME);
mLCQuery.whereGreaterThanOrEqualTo(AgoraRoom.COLUMN_CREATEDAT, date);
mLCQuery.orderByDescending(AgoraRoom.COLUMN_CREATEDAT);
return mLCQuery.findInBackground()
.subscribeOn(Schedulers.io())
.map(new Function<List<LCObject>, List<AgoraRoom>>() {
@Override
public List<AgoraRoom> apply(@NonNull List<LCObject> LCObjects) throws Exception {
List<AgoraRoom> rooms = new ArrayList<>();
for (LCObject object : LCObjects) {
AgoraRoom room = mGson.fromJson(object.toJSONObject().toJSONString(), AgoraRoom.class);
room.setId(object.getObjectId());
rooms.add(room);
}
return rooms;
}
})
.onErrorResumeNext(new Function<Throwable, ObservableSource<? extends List<AgoraRoom>>>() {
@Override
public ObservableSource<? extends List<AgoraRoom>> apply(@NonNull Throwable throwable) throws Exception {
return Observable.error(new AgoraException(throwable));
}
});
}
@Override
public void get(DocumentReference reference, SyncManager.DataItemCallback callback) {
if (reference instanceof RoomReference) {
LCQuery<LCObject> mLCQuery = LCQuery.getQuery(AgoraRoom.TABLE_NAME);
mLCQuery.getInBackground(reference.getId())
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
} else {
String collectionKey = reference.getParent().getKey();
LCQuery<LCObject> mLCQuery = LCQuery.getQuery(collectionKey);
mLCQuery.getInBackground(reference.getId())
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
}
@Override
public void get(CollectionReference reference, SyncManager.DataListCallback callback) {
LCQuery<LCObject> mLCQuery = createLCQuery(reference.getKey(), reference.getQuery());
mLCQuery.findInBackground()
.subscribe(new Observer<List<LCObject>>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull List<LCObject> LCObjects) {
List<AgoraObject> list = new ArrayList<>();
for (LCObject LCObject : LCObjects) {
list.add(new AgoraObject(LCObject));
}
callback.onSuccess(list);
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
@Override
public void add(CollectionReference reference, HashMap<String, Object> datas, SyncManager.DataItemCallback callback) {
String collectionKey = reference.getKey();
LCObject mLCObject = new LCObject(collectionKey);
for (Map.Entry<String, Object> entry : datas.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value instanceof DocumentReference) {
CollectionReference referenceParent = ((DocumentReference) value).getParent();
LCObject LCObjectItem = mLCObject.createWithoutData(referenceParent.getKey(), ((DocumentReference) value).getId());
mLCObject.put(key, LCObjectItem);
} else {
mLCObject.put(key, value);
}
}
mLCObject.saveInBackground()
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
@Override
public void delete(DocumentReference reference, SyncManager.Callback callback) {
if (reference instanceof RoomReference) {
LCObject mLCObject = LCObject.createWithoutData(AgoraRoom.TABLE_NAME, reference.getId());
mLCObject.deleteInBackground()
.subscribe(new Observer<LCNull>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCNull LCNull) {
callback.onSuccess();
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
} else {
String collectionKey = reference.getParent().getKey();
LCObject mLCObjectCollection = LCObject.createWithoutData(collectionKey, reference.getId());
mLCObjectCollection.deleteInBackground()
.subscribe(new Observer<LCNull>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCNull LCNull) {
callback.onSuccess();
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
}
private LCQuery<LCObject> createLCQuery(String theClassName, Query mQuery) {
LCQuery<LCObject> mLCQuery = LCQuery.getQuery(theClassName);
if (mQuery != null) {
List<FieldFilter> list = mQuery.getFilters();
for (FieldFilter filter : list) {
if (filter.getOperator() == FieldFilter.Operator.EQUAL) {
String field = filter.getField();
Object value = filter.getValue();
if (value instanceof DocumentReference) {
CollectionReference referenceParent = ((DocumentReference) value).getParent();
LCObject LCObjectItem = LCObject.createWithoutData(referenceParent.getKey(), ((DocumentReference) value).getId());
mLCQuery.whereEqualTo(field, LCObjectItem);
} else {
mLCQuery.whereEqualTo(field, value);
}
}
}
List<OrderBy> orderByList = mQuery.getOrderByList();
for (OrderBy item : orderByList) {
if (item.getDirection() == OrderBy.Direction.ASCENDING) {
mLCQuery.addAscendingOrder(item.getField());
} else if (item.getDirection() == OrderBy.Direction.DESCENDING) {
mLCQuery.addDescendingOrder(item.getField());
}
}
}
return mLCQuery;
}
@Override
public void delete(CollectionReference reference, SyncManager.Callback callback) {
String collectionKey = reference.getKey();
Query mQuery = reference.getQuery();
LCQuery<LCObject> mLCQuery = createLCQuery(collectionKey, mQuery);
mLCQuery.deleteAllInBackground()
.subscribe(new Observer<LCNull>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCNull LCNull) {
callback.onSuccess();
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
@Override
public void update(DocumentReference reference, String key, Object data, SyncManager.DataItemCallback callback) {
if (reference instanceof RoomReference) {
LCObject mLCQuery = LCObject.createWithoutData(AgoraRoom.TABLE_NAME, reference.getId());
mLCQuery.put(key, data);
mLCQuery.saveInBackground()
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
} else {
String collectionKey = reference.getParent().getKey();
LCObject mLCObjectCollection = LCObject.createWithoutData(collectionKey, reference.getId());
mLCObjectCollection.put(key, data);
mLCObjectCollection.saveInBackground()
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
}
@Override
public void update(DocumentReference reference, HashMap<String, Object> datas, SyncManager.DataItemCallback callback) {
if (reference instanceof RoomReference) {
LCObject mLCObject = LCObject.createWithoutData(AgoraRoom.TABLE_NAME, reference.getId());
for (Map.Entry<String, Object> entry : datas.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value instanceof DocumentReference) {
CollectionReference referenceParent = ((DocumentReference) value).getParent();
LCObject LCObjectItem = mLCObject.createWithoutData(referenceParent.getKey(), ((DocumentReference) value).getId());
mLCObject.put(key, LCObjectItem);
} else {
mLCObject.put(key, value);
}
}
mLCObject.saveInBackground()
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
} else {
String collectionKey = reference.getParent().getKey();
LCObject mLCObjectCollection = LCObject.createWithoutData(collectionKey, reference.getId());
for (Map.Entry<String, Object> entry : datas.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (value instanceof DocumentReference) {
CollectionReference referenceParent = ((DocumentReference) value).getParent();
LCObject LCObjectItem = LCObject.createWithoutData(referenceParent.getKey(), ((DocumentReference) value).getId());
mLCObjectCollection.put(key, LCObjectItem);
} else {
mLCObjectCollection.put(key, value);
}
}
mLCObjectCollection.saveInBackground()
.subscribe(new Observer<LCObject>() {
@Override
public void onSubscribe(@NonNull Disposable d) {
}
@Override
public void onNext(@NonNull LCObject LCObject) {
callback.onSuccess(new AgoraObject(LCObject));
}
@Override
public void onError(@NonNull Throwable e) {
callback.onFail(new AgoraException(e));
}
@Override
public void onComplete() {
}
});
}
}
private HashMap<SyncManager.EventListener, LCLiveQuery> events = new HashMap<>();
@Override
public void subcribe(DocumentReference reference, SyncManager.EventListener listener) {
if (reference instanceof RoomReference) {
LCQuery<LCObject> query = createLCQuery(AgoraRoom.TABLE_NAME, reference.getQuery());
LCLiveQuery mLCLiveQuery = LCLiveQuery.initWithQuery(query);
mLCLiveQuery.setEventHandler(new LCLiveQueryEventHandler() {
@Override
public void onObjectCreated(LCObject LCObject) {
super.onObjectCreated(LCObject);
listener.onCreated(new AgoraObject(LCObject));
}
@Override
public void onObjectUpdated(LCObject LCObject, List<String> updatedKeys) {
super.onObjectUpdated(LCObject, updatedKeys);
listener.onUpdated(new AgoraObject(LCObject));
}
@Override
public void onObjectDeleted(String objectId) {
super.onObjectDeleted(objectId);
listener.onDeleted(objectId);
}
});
events.put(listener, mLCLiveQuery);
mLCLiveQuery.subscribeInBackground(new LCLiveQuerySubscribeCallback() {
@Override
public void done(LCException e) {
if (null != e) {
if (e.getCode() == LCException.EXCEEDED_QUOTA) {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_OVER_COUNT, e.getMessage()));
} else {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_DEFAULT, e.getMessage()));
}
} else {
}
}
});
} else {
String collectionKey = reference.getParent().getKey();
LCQuery<LCObject> query = createLCQuery(collectionKey, reference.getQuery());
LCLiveQuery mLCLiveQuery = LCLiveQuery.initWithQuery(query);
mLCLiveQuery.setEventHandler(new LCLiveQueryEventHandler() {
@Override
public void onObjectCreated(LCObject LCObject) {
super.onObjectCreated(LCObject);
listener.onCreated(new AgoraObject(LCObject));
}
@Override
public void onObjectUpdated(LCObject LCObject, List<String> updatedKeys) {
super.onObjectUpdated(LCObject, updatedKeys);
listener.onUpdated(new AgoraObject(LCObject));
}
@Override
public void onObjectDeleted(String objectId) {
super.onObjectDeleted(objectId);
listener.onDeleted(objectId);
}
});
events.put(listener, mLCLiveQuery);
mLCLiveQuery.subscribeInBackground(new LCLiveQuerySubscribeCallback() {
@Override
public void done(LCException e) {
if (null != e) {
if (e.getCode() == LCException.EXCEEDED_QUOTA) {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_OVER_COUNT, e.getMessage()));
} else {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_DEFAULT, e.getMessage()));
}
} else {
}
}
});
}
}
@Override
public void subcribe(CollectionReference reference, SyncManager.EventListener listener) {
String collectionKey = reference.getKey();
LCQuery<LCObject> query = createLCQuery(collectionKey, reference.getQuery());
LCLiveQuery mLCLiveQuery = LCLiveQuery.initWithQuery(query);
mLCLiveQuery.setEventHandler(new LCLiveQueryEventHandler() {
@Override
public void onObjectCreated(LCObject LCObject) {
super.onObjectCreated(LCObject);
listener.onCreated(new AgoraObject(LCObject));
}
@Override
public void onObjectUpdated(LCObject LCObject, List<String> updatedKeys) {
super.onObjectUpdated(LCObject, updatedKeys);
listener.onUpdated(new AgoraObject(LCObject));
}
@Override
public void onObjectDeleted(String objectId) {
super.onObjectDeleted(objectId);
listener.onDeleted(objectId);
}
});
events.put(listener, mLCLiveQuery);
mLCLiveQuery.subscribeInBackground(new LCLiveQuerySubscribeCallback() {
@Override
public void done(LCException e) {
if (null != e) {
if (e.getCode() == LCException.EXCEEDED_QUOTA) {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_OVER_COUNT, e.getMessage()));
} else {
listener.onSubscribeError(new AgoraException(AgoraException.ERROR_LEANCLOULD_DEFAULT, e.getMessage()));
}
} else {
}
}
});
}
@Override
public void unsubcribe(SyncManager.EventListener listener) {
if (events.get(listener) != null) {
events.get(listener).unsubscribeInBackground(new LCLiveQuerySubscribeCallback() {
@Override
public void done(LCException e) {
}
});
}
}
}
|
Electric signature of magnetic domain-wall dynamics We study current-induced domain-wall dynamics in a thin ferromagnetic nanowire. The domain-wall dynamics is described by simple equations with four parameters. We propose the procedure to determine these parameters by all-electric measurements of the time-dependent voltage induced by the domain-wall motion. We provide an analytical expression for the time variation of this voltage. Furthermore, we show that the measurement of the proposed effects is within reach with current experimental techniques. S = −S H e − j∂ z S + jS ∂ z S + S, where H e = −H/S is the effective magnetic field given by the Hamiltonian H of the system, S = M/|M | is a unit magnetization vector, is the Gilbert damping constant, is the non-adiabatic spin torque constant, ∂ z ≡ ∂/∂z where is along the wire, and the time is measured in units of the gyromagnetic ratio 0 = g|e|/(2mc). DWs in a ferromagnetic wire can be modeled by a spin Hamiltonian H which contains exchange, spin-orbit, 20 and dipolar interactions. In a thin wire, the latter can be approximated by two anisotropies: a strong anisotropy along the wire () and a weak anisotropy transverse to it (K). In realistic systems, ≪ 1 and K ≪. In a thin wire, a lowest-energy magnetization configuration (at j = 0) is uniformly ordered along the z or −z direction. A static DW is the next low-energy configuration with the boundary conditions S z (±∞) = ±1 or S z (±∞) = ∓1. DWs can be injected in the wire using different techniques. A sketch of a wire with a DW of width ∆, determined by the Hamiltonian parameters, is depicted in Fig. 1. For small enough applied currents, it can be shown that the DW in a thin wire is a rigid spin texture 14 and its dynamics can be described in terms of only two collective coordinates. 21,22 These coordinates correspond to the two softest modes of the DW motion: the DW position along the wire, z 0, and the rotation angle of the magnetization in the DW around the wire axis, see Fig. 1. It has been shown 22,23 that the equations of motion for the DW in a thin ferromagnetic wire are model independent and can very generally be written in the for Here all current nonlinearities are neglected, since the large currents leading to observable nonlinear effects would burn the nanowire. For a dc current below the critical value j c, i.e., j < j c, Eq. implies that the DW tilts from the transverse anisotropy plane by the angle that satisfies sin(2) = j/j c around the wire axis and then moves along the wire with a constant velocity Aj. For j > j c, the DW constantly rotates while moving. The coefficients A, B, C and the critical current j c are the parameters that fully describe the DW dynamics. They can be calculated microscopically for certain toy models, 22 but in general they vary for different wires and depend on the temperature and nanofabrication details. Therefore, in this Letter we propose a way to determine these coefficients by model-independent measurements of an induced ac voltage directly from an experiment suitable for all-electric DW manipulation. As we show below, this ac voltage can be induced by applied dc currents and by certain time-dependent current pulses with parameters similar to those achieved in recent experiments. 24,25 Microscopically the dynamics parameters can be obtained in the following way. The energy of a static DW, where S 0 is a solution of a static LLG with K = 0, in general depends on both z 0 and. However, assuming that the wire is translationally invariant (pinning can be neglected), E 0 would not depend on the DW position z 0 and therefore ∂ z0 E 0 = 0. The only contribution to E 0 that depends on the angle comes from the small anisotropy in the transverse plane, E 0 () = − cos(2). 2226 This allows us to find the coefficients in Eqs. and in terms of the parameters of the LLG. 22,27 Up to first order in and they are and are consistent 28 with the expressions for A, B, C, and j c found in Ref. 22. We now outline the method to find A, B, C, and j c directly from all-electric measurements. It is based on measuring the ac voltage V induced by a moving DW. To find V one has to know the time evolution of the total energy (per unit area of the wire's cross-section) in the system, In general, DW energy has two contributions: the power supplied by an electric current and a negative contribution due to The last term on the right-hand side of Eq. describes the dissipation and is therefore always nonpositive. Meanwhile, the first term is proportional to the current density j and gives the power V j supplied by the current. With the help of Eqs. - and adopting the approximation D ≃ 1 of Ref. 22 we obtain the expression for the induced DW voltage 29, Note that Eq. gives the contribution to the voltage due to DW motion. This contribution is in addition to the usual Ohmic one. The voltage V in Eq. is measured in units of P g B /(e 0 ) and the current density is measured in units 2eM/(P g B ), where P is the current polarization. We emphasize that unlike in the previously studied cases, 8,12 this voltage is not caused by the motion of topological defects (vortices) transverse to the wire. Measurement of coefficients A, B, C, and j c. In order to find coefficients A, B, and C, we propose three independent measurements of the voltage induced by a moving DW. Although there are various factors affecting the nanowire resistance, the contributions from most of them are independent of DW motion and therefore give only a constant component of the resistance. To characterize the DW dynamics, one has to concentrate only on the resistance variations in time. Our estimates show that the amplitude of voltage oscillations due to DW motion is of the order of 10 −7 V and therefore experimentally measurable. Equation implies that the voltage of the DW can give all the necessary information about DW dynamics. Namely, one can obtain C by measuring the voltage changing with time and parameters A and B by measuring the amplitude of the voltage oscillations. Slopes measurement. In Refs. 23 and 30 it was proposed to obtain A, B, and j c by measuring the drift velocity of the DW, 0. It is important to note that Eq. has the same form as Eq.. Thus, instead of measuring the drift velocity, which requires a more complicated experimental setup, we propose to perform all-electric measurements. Namely, to measure the average voltage of DW, V, as a function of dc current. From Eq. one can see that V = A 2 C B j for j < j c, whereas V = A 2 C B j +(1+A)C j 2 − j 2 c for j > j c, see Fig. 2. The critical current is determined by the end of the region linear in j for small currents. The measurement of slope k 1 at j < j c, and slope k 2 at j ≫ j c gives the two independent quantities: Instead of measuring voltage average for dc current, one can apply a linearly increasing time-dependent current j(t) = qt below the critical value j c. At sufficiently small q the voltage will also be linear in time, V (t) ≈ A 2 C B qt. By measuring this voltage one can find Once C is determined, Eqs. give A and B. The drawback of this measurement is that it might be hard to disentangle k 1 and k 2 from the Ohmic contribution. However k 2 − k 1 is free from the Ohmic resistance of the wire. In order to find C, the most intuitive approach is to input a dc current slightly above j c. Then the voltage induced by the moving DW will oscillate with the period of the double angle, see the insets of Fig. 3. The half-width of the peak (dip) for C > 0 (C < 0) is given by arccos(j c /j)/(|C| j 2 − j 2 c ). The measurement of the voltage oscillations period T 0 (which we estimate to be ∼ 10 −7 -10 −6 s) determines C at a given j: For j − j c ≪ j c, the period diverges but the half-width ∼ 1/(Cj c ) stays finite. To obtain the period T 0, one can perform the Fourier transform of V (t) to find the frequency f 0 = 1/T 0, see Fig. 3. To determine coefficient A in the same experiment, one can measure ∆V = V max − V min = 2(1 + A)|C|j c, see insets of Fig. 3. Then Note that ∆V = 2(k 2 − k 1 )j c and therefore this experiment can also provide a crosscheck with the aforementioned measurement of the slopes. Phase shift experiment. Another method to measure the coefficient C is by applying an ac current j = j 0 sin t with j 0 > j c, which has only a short time interval where j > j c, so that there is only one period of voltage within the period of j(t). One can measure the phase delay, ∆, between the current maximum and voltage extremum 31 (see Fig. 4). Next, show the phase delay ∆ between the current maximum and voltage extremum for C > 0 and C < 0, respectively. (c) and (d) depict V (t) at ∆ = 0 for the same C > 0 and C < 0, respectively. one fixes the amplitude j 0 and tunes the frequency until ∆ = 0. In this case, for j 0 − j c ≪ j c, we can use half of the time interval for which the current pulse is above j c to approximate the period of by dc current j 0 as For j 0 − j c ≪ j c, Eq. can be further simplified to give In other words, when ≈ C(j 0 − j c ) which corresponds roughly to ∼ 10 7 Hz, the current pulse covers only one period of voltage. Our simulations show that the expression works sufficiently well for j 0 1.3j c. The sign of C is determined by the extremum of the measured voltage: C > 0 if V has the minimum and C < 0 if V has the maximum. Our simulations show (Fig. 4) that in addition to the large peak (dip) of voltage there is a smaller one with the opposite curvature. This is because when j(t) reaches j c, the angle has not yet rotated to the angle corresponding to sin(2 0 ) = 1 due to the cumulative phase delay between current and voltage. Abrupt current pulse experiment. It is also possible to measure the coefficient C for currents below the critical value j c. The constant |C|j c determines the internal time scale of the DW motion. After one switches the subcritical current off at time t i, the voltage asymptotically decays as exp(−2|C|j c t), see Fig. 5. To measure the decay of V (t) with time, one inputs a dc current below j c, then measures voltage V i immediately after turning off the current at t i, and then later measures voltage V f at time t i + ∆ t. We note that right after turning off the current, there is a short time period when the DW dynamics cannot be described by Eqs. and. It corresponds to the dynamics of fast degrees of freedom. This process has a characteristic time ∼ 10 −11 s which is typically much smaller than the voltage decay time ∼ 10 −8 s. Thus we can safely assume that the rotation angle does not change much during this time interval, and we find which is valid for V i /V f ≫ 1. For example, estimating V i /V f = 10 we find |C| ≈ 1.17/(∆ t j c ). The sign of C can be easily determined by the form of voltage decay (see Fig. 5). To summarize, we propose several all-electric measurements of the parameters fully describing domain-wall dynamics in thin ferromagnetic nanowires. These measurements are based on the voltage induced by a moving DW in response to certain current pulses. Our proposal opens doors for experiments which are suitable not only for all-electric DW manipulation but also for the simultaneous measurement of the DW dynamics. These findings give a more reliable and straightforward experimental method to determine the DW dynamics parameters, which can then be compared to microscopic theories. The procedure we described works for a given temperature regime. It may also be used to investigate the temperature dependence of the effective parameters. Future work will include accounting for pinning effects, which brake translational invariance in the wires. 32 We thank I. V. Roshchin, J. Sinova, and E. K. Vehstedt for valuable discussions. This work was supported by the NSF Grant No. 0757992 and Welch Foundation (A-1678). |
Pars plana vitrectomy combined with scleral buckle versus pars plana vitrectomy for giant retinal tear. BACKGROUND A giant retinal tear (GRT) is a full-thickness neurosensory retinal break extending for 90° or more in the presence of a posterior vitreous detachment. OBJECTIVES To evaluate the effectiveness and safety of pars plana vitrectomy combined with scleral buckle versus pars plana vitrectomy alone for eyes with giant retinal tear. SEARCH METHODS We searched the Cochrane Central Register of Controlled Trials (CENTRAL; 2018, Issue 8), which contains the Cochrane Eyes and Vision Trials Register; Ovid MEDLINE; Embase.com; PubMed; Latin American and Caribbean Literature on Health Sciences (LILACS); ClinicalTrials.gov; and the World Health Organization (WHO) International Clinical Trials Registry Platform (ICTRP). We did not use any date or language restrictions in our electronic search. We last searched the electronic databases on 16 August 2018. SELECTION CRITERIA We included only randomized controlled trials (RCTs) comparing pars plana vitrectomy combined with scleral buckle versus pars plana vitrectomy alone for giant retinal tear regardless of age, gender, lens status (e.g. phakic or pseudophakic eyes) of the affected eye(s), or etiology of GRT among participants enrolled in these trials. DATA COLLECTION AND ANALYSIS Two review authors independently assessed titles and abstracts, then full-text articles, using Covidence. Any differences in classification between the two review authors were resolved through discussion. Two review authors independently abstracted data and assessed risk of bias of included trials. MAIN RESULTS We found two RCTs in abstract format (105 participants randomized). Neither RCT was published in full. Based on the data presented in the abstracts, scleral buckling might be beneficial (relative risk of re-attachement ranged from 3.0 to 4.4), but the findings are inconclusive due to a lack of peer reviewed publication and insufficient information for assessing risk of bias. AUTHORS' CONCLUSIONS We found no conclusive evidence from RCTs on which to base clinical recommendations for scleral buckle combined with pars plana vitrectomy for giant retinal tear. RCTs are clearly needed to address this evidence gap. Such trials should be randomized, and patients should be classified by giant retinal tear characteristics (extension (90, 90 to 180, > 180), location (oral, anterior, posterior to equator)), proliferative vitreoretinopathy stage, and endotamponade. Analysis should include both short-term (three months and six months) and long-term (one year to two years) outcomes for primary retinal reattachment, mean change in best corrected visual acuity, study eyes that required second surgery for retinal reattachment, and adverse events such as elevation of intraocular pressure above 21 mmHg, choroidal detachment, cystoid macular edema, macular pucker, proliferative vitreoretinopathy, and progression of cataract in initially phakic eyes. |
Impact of WSN applications generated traffic on WCDMA access networks WCDMA radio access networks are dimensioned using traffic models containing traffic characteristics for a number of services offered by the operators and their networks today. Recent trends have seen a new class of traffic emerging, driven by wireless sensor network applications and machine-to-machine communication. This communication between ldquothingsrdquo has traffic characteristics very different from traditional human centric services. As more such services are expected to use mobile network as a backhaul, it is important to understand the impact their traffic will have on the radio access network. This paper provides a first attempt to study the potential capacity impact of several selected sensor based applications on the WCDMA radio access network. In that, we assumed mobile subscribers to be generators of the wireless sensor network and machine-to-machine traffic, in addition to standard traffic that is in use today. The results thus far have shown that bearer mapping can be a deciding factor in determining the size of the network and the required resources. |
<gh_stars>0
//
// PEProgressBar.h
// Alchemy
//
// Created by Kyounghwan on 2014. 9. 21..
//
//
#ifndef __Alchemy__PEProgressBar__
#define __Alchemy__PEProgressBar__
#include "Common.h"
class PEProgressBar : public CCNode
{
public:
static PEProgressBar* create(string bar_name, int max, bool direction, const Vec2& anchor);
void change_pre_bar(int value);
void change_result_bar(int value);
int get_remain_value(void);
float get_bar_end_pos(void);
float get_bar_width(void);
Vec2 get_bar_start_pos(void);
void set_bar_start_pos(Vec2 start);
void push_particle_to_vector(CCParticleSun* object);
void erase_particle_from_vector(void);
private:
CCProgressTimer* result_bar;
CCProgressTimer* pre_bar;
Vec2 pos;
int max;
int current_pre_value;
int current_result_value;
int direction;
float bar_width;
std::vector<CCParticleSun*> regen_particle;
};
#endif /* defined(__Alchemy__PEProgressBar__) */
|
<gh_stars>0
#pragma once
#include <SFML/Audio/Music.hpp>
namespace obe::Sound
{
/**
* \brief A Wrapper around sf::Music
* @Bind
*/
class MusicWrapper
{
private:
sf::Music m_music;
public:
/**
* \brief Default constructor of MusicWrapper
*/
MusicWrapper() = default;
/**
* \brief Load Constructor of MusicWrapper
* \param filename Path to the music file to load
*/
explicit MusicWrapper(const std::string& filename);
bool doesUsesSoundPosition() const;
sf::Time getDuration() const;
float getMinimumDistance() const;
float getPitch() const;
sf::Time getPlayingOffset() const;
sf::Vector3f getPosition() const;
float getSpatialAttenuation() const;
std::string getStatus() const;
float getVolume() const;
bool isLooping() const;
/**
* \brief Loads a Music at the given path
* \param filename Path to the Music to load
*/
void load(const std::string& filename);
void pause();
void play();
void setLooping(const bool& looping);
void setMinimumDistance(float distance);
void setPitch(float pitch);
void setPlayingOffset(const sf::Time& offset);
void setPosition(float x, float y, float z);
void setSpatialAttenuation(float attenuation);
void setVolume(float volume);
void stop();
void useSoundPosition(bool usePosition);
};
} |
Hyperimages: using object recognition for navigation through images in multimedia Multimedia involves the use of multiple forms of communication media in an interactive and integrated manner. At present, textual data is the media predominantly used to provide the interactivity due to the ease with which discrete semantic elements are identified. It is common practice to follow links from words or phrases within text to associated information elsewhere in the database. To achieve a similar degree of functionality with visual information typically requires that each image (or video sequence) be processed by hand, indicating the objects and locations within the image -- a process that is excessively expensive and time-consuming for large databases. This paper describes the implementation of a simple object recognition system that allows the specification of 3D models that can then be used to recognize objects within any image, in an analogous fashion to words within text. This enables image data to become a truly active media, within a multimedia database. It provides a significantly enhanced level of functionality while keeping the authoring effort to a minimum. The basic algorithms are described and then an example application is outlined, along with feedback from users of the system. |
////////////////////////////////////////////////////////////////////////////////
// //
// This software is supplied under the terms of a license agreement or //
// nondisclosure agreement with Mitov Software and may not be copied //
// or disclosed except in accordance with the terms of that agreement. //
// Copyright(c) 2002-2018 Mitov Software. All Rights Reserved. //
// //
////////////////////////////////////////////////////////////////////////////////
#ifndef _MITOV_TRIANGLE_GENERATOR_h
#define _MITOV_TRIANGLE_GENERATOR_h
#include <Mitov.h>
#include "Mitov_BasicGenerator.h"
namespace Mitov
{
//---------------------------------------------------------------------------
class TriangleAnalogGenerator
{
public:
// -1 - 1
float Asymmetry = 0.0f;
protected:
template<typename T> void CalculateValue( T *AInherited )
{
float AAssymetryPount = 0.5 + Asymmetry / 2;
if( AInherited->FPhase < AAssymetryPount )
AInherited->FValue = AInherited->Offset - AInherited->Amplitude + AInherited->Amplitude * 2 * ( AInherited->FPhase / AAssymetryPount );
else
AInherited->FValue = AInherited->Offset - AInherited->Amplitude + AInherited->Amplitude * 2 * ( 1 - (( AInherited->FPhase - AAssymetryPount ) / ( 1 - AAssymetryPount )));
}
};
//---------------------------------------------------------------------------
class TriangleIntegerGenerator
{
public:
// -1 - 1
float Asymmetry = 0.0f;
protected:
template<typename T> void CalculateValue( T *AInherited )
{
float AAssymetryPount = 0.5 + Asymmetry / 2;
if( AInherited->FPhase < AAssymetryPount )
AInherited->FValue = ( AInherited->Offset - AInherited->Amplitude + float( AInherited->Amplitude ) * 2 * ( AInherited->FPhase / AAssymetryPount ) ) + 0.5;
else
AInherited->FValue = ( AInherited->Offset - AInherited->Amplitude + float( AInherited->Amplitude ) * 2 * ( 1 - (( AInherited->FPhase - AAssymetryPount ) / ( 1 - AAssymetryPount ))) ) + 0.5;
}
};
//---------------------------------------------------------------------------
}
#endif
|
<filename>web/app/core/service/apploading.service.ts
import {EventEmitter, Injectable} from '@angular/core';
declare var Spinner: any;
@Injectable()
export class AppLoadingService {
private loading = [];
public loadingEventEmitter: EventEmitter<Boolean>;
constructor() {
this.loadingEventEmitter = new EventEmitter();
}
public setLoading(component: string) {
this.loading[ component ] = true;
this.fireLoadingStatusChanged();
}
public finishedLoading(component: string) {
if (component in this.loading) {
delete this.loading[ component ];
this.fireLoadingStatusChanged();
}
}
public resetAll() {
this.loading = [];
this.fireLoadingStatusChanged();
}
fireLoadingStatusChanged() {
this.loadingEventEmitter.emit(this.hasLoadingComponents());
}
private hasLoadingComponents() {
for (var key in this.loading) {
if (this.loading[ key ] == true) {
return true;
}
}
return false;
}
} |
import os
from waitress import serve
from app import application
serve(application, host='0.0.0.0', port=8080)
|
The Histopathological characteristics of livedo reticularis Background: Livedo reticularis (LR) is a skin disorder characterized by a reddishblue mottling of the skin in a fishnet reticular pattern. The aim of this study was to investigate the major histopathological characteristics of LR and determine whether the histopathology of LR is more common in whitish areas or in the erythematous areas. |
Boeing has raised its 20-year forecast for passenger and cargo aircraft deliveries to 42,730 jets, with the value at list prices hitting $6.3 trillion.
The new figures show a 4.1 per cent increase over the previous forecast, which projected a total of 41,030 plane deliveries.
The US plane maker said that rising passenger traffic and upcoming airplane retirements will drive the need more jets.
The company's annual forecast, renamed the Commercial Market Outlook (CMO) to include detailed analysis of the dynamic aviation services market, was presented at the Farnborough International Airshow.
"For the first time in years, we are seeing economies growing in every region of the world. This synchronised growth is providing more stimulus for global air travel. We are seeing strong traffic trends not only in the emerging markets of China and India, but also the mature markets of Europe and North America," said Randy Tinseth, vice president of Commercial Marketing for The Boeing Company. "Along with continued traffic expansion, the data show a big retirement wave approaching as older airplanes age out of the global fleet."
The single-aisle segment will see the most growth over the forecast period, with a demand for 31,360 new airplanes, an increase of 6.1 per cent over last year. This $3.5 trillion market is driven in large part by the continued growth of low-cost carriers, strong demand in emerging markets, and increasing replacement demand in markets such as China and Southeast Asia.
The widebody segment calls for 8,070 new airplanes valued at nearly $2.5 trillion over the next 20 years. Widebody demand is spearheaded, in part, by a large wave of replacements beginning early in the next decade and airlines deploying advanced jets such as the 787 Dreamliner and 777X to expand their global networks.
Additionally, Boeing projects the need for 980 new production widebody freighters over the forecast period, up 60 airplanes over last year. In addition, operators are forecasted to buy 1,670 converted freighters.
Boeing also sees a strong demand for aviation services ranging from supply chain support (parts and parts logistics), to maintenance and engineering services, to aircraft modifications, to airline operations. Over the next 20 years, the plane maker forecasts an $8.8 trillion market for commercial aviation services with annual growth of 4.2 per cent. |
package dev.imabad.mceventsuite.core.modules.audit.db;
import dev.imabad.mceventsuite.core.api.objects.EventPlayer;
import dev.imabad.mceventsuite.core.modules.ac.db.PlayerBan;
import dev.imabad.mceventsuite.core.modules.eventpass.db.EventPassPlayer;
import dev.imabad.mceventsuite.core.modules.mysql.MySQLDatabase;
import dev.imabad.mceventsuite.core.modules.mysql.dao.DAO;
import org.hibernate.Session;
import org.hibernate.Transaction;
import org.hibernate.query.Query;
import javax.persistence.NoResultException;
import java.util.Collections;
import java.util.List;
public class AuditLogDAO extends DAO {
public AuditLogDAO(MySQLDatabase mySQLDatabase) {
super(mySQLDatabase);
}
public void saveAuditLog(AuditLogEntry auditLogEntry){
Session session = mySQLDatabase.getSession();
Transaction tx = null;
try {
tx = session.beginTransaction();
session.save(auditLogEntry);
tx.commit(); // Flush happens automatically
}
catch (RuntimeException e) {
tx.rollback();
e.printStackTrace();
}
finally {
session.close();
}
}
public List<AuditLogEntry> getPlayerHistory(EventPlayer player){
try (Session session = mySQLDatabase.getSession()) {
Query<AuditLogEntry> q = session.createQuery("select p FROM AuditLogEntry p WHERE p.actionedPlayer = :player", AuditLogEntry.class);
q.setParameter("player", player);
try {
return q.getResultList();
} catch (NoResultException e) {
return Collections.emptyList();
}
}
}
}
|
def render(self):
return [item.render_notification(index=index) for index, item in
enumerate(self.active_notification_list)] |
Catalysts for polymerization or copolymerization of ethylene, which include magnesium, are known to have very high catalytic activity and provide polymers of high bulk density, and are suitable for liquid phase or gas phase polymerization. Liquid phase polymerization of ethylene is a polymerization process performed in a medium such as bulk ethylene, isopentane or hexane. The important features of the catalysts for the processability in this process are catalytic activity, bulk density of the resultant polymers, and the amount of low molecular weight material melted into the medium. The molecular weight distribution of the polymers depends on the characteristic of catalyst itself and the polymerization process. The polymers which are produced by using Ziegler-Natta type catalysts in a single reactor, as a slurry or in the gas phase, generally have narrow molecular weight distribution. Because of this molecular structure, processability of polymers is lowered and tensile strength and rigidity in the molten state is limited, thereby resulting in the distortion, contraction of shape and Parrison deflection during processing. Moreover, these polymers are difficult to apply to the production of large scale pipes or large blow molding products which require high mechanical resistance in a molten state. When the molecular weight of the producing polymer is increased, it has an advantage of increasing the tensile strength, however is more difficult to process. This causes problems such as cracking during processing. In order to overcome these problems, it is desirable to increase the molecular weight while having a broad molecular weight distribution since excellent processability can be obtained while maintaining high tensile strength.
Many titanium-based catalysts containing magnesium for olefin polymerization, and the manufacturing methods thereof have been reported. Specifically processes making use of magnesium solutions to obtain catalysts which can generate olefin polymers of high bulk density are known. Magnesium solutions may be obtained by reacting magnesium compounds with such electron donors as alcohols, amines, cyclic ethers, or organic carboxylic acids in the presence of a hydrocarbon solvent. The formation of magnesium solutions by the reaction of magnesium compounds with an alcohol is disclosed in U.S. Pat. Nos. 3,642,746, 4,336,360, 4,330,649 and 5,106,807. Further, methods for the production of catalysts containing magnesium by reacting a liquid-phase magnesium solution with a halogenated compound, such as titanium tetrachloride, are well known. Moreover, there have been attempts to control polymerization activity or molecular weight distribution of polymers by adding ester compounds. Such catalysts provide high bulk density to the resultant polymers, but their catalytic activities or the molecular weight distribution of the resulting polymers could be improved. Moreover, tetrahydrofuran, a cyclic ether, has been used as a solvent for a magnesium compound in U.S. Pat. Nos. 4,477,639 and 4,518,706.
Meanwhile, U.S. Pat. Nos. 4,847,227, 4,816,433, 4,829,037, 4,970,186, and 5,130,284 have reported the use of such electron donors as dialkylphthalate, phthaloyl chloride, etc. in reaction with a titanium chloride compound for the production of olefin polymerization catalysts having improved polymerization activity, and which are capable of enhancing the bulk density of resultant polymers.
U.S. Pat. No. 5,459,116 has reported a method of production of a titanium solid catalyst by contact-reacting a magnesium solution containing an ester having at least one hydroxyl group as an electron donor with a titanium compound. By this method, it is possible to obtain a catalyst having high polymerization activity, which accords high bulk density to resultant polymers, but there is room for yet more improvements.
U.S. Pat. No. 3,899,477 has disclosed a catalyst which is prepared from titanium halide, vanadium halide and organoaluminum compounds. This catalyst can be used to produce polymers having broad molecular weight distributions by treating the catalyst with alkylaluminum sesquiethoxide and trialkylaluminum before polymerization. This prior art, however, has disadvantages in that the catalyst production process is complicated and it is difficult to control the polymerization process conditions due to the differences between titanium and vanadium reactivity to hydrogen, monomers and co-monomers.
As described above, it is very difficult to produce polymers with broad molecular weight distribution by using common Ziegler-Natta type catalysts while maintaining high catalytic activity and high bulk density of resultant polymers. Moreover, conventional preparation techniques for catalysts tend to be complicated and such catalysts tend to make control of polymerization processes difficult. Consequently, a method for controlling the molecular weight distribution of polymer is needed in order to expand the use of ethylene polymer and to improve their processability and physical properties. |
This invention relates to sensor systems and to security alarms which incorporate sensor systems.
In British Patent Specification No. 2 306 035, to which reference should be made, there is described a support system for a shelf or similar surface on which one or more articles can be placed, the system including one or more piezoelectric devices for partially or wholly supporting the weight of the shelf or similar surface, and means whereby an output voltage will be generated proportional to the change of weight when objects are placed on or taken from the shelf or similar surface.
It is an object of the present invention to provide a sensor system which can be used to detect the attempted theft or damage of items displayed or stored by hanging them on walls, partitions or similar substantially vertical structures, as opposed to items which are displayed by placing them on shelves or similar substantially horizontal structures.
It is another object of the present invention to provide an improved sensor system which can be used to detect the attempted theft of, or tampering with, small and lightweight items displayed or stored on horizontal or inclined surfaces.
It is a further object of the present invention to provide a sensor system which includes improved means for filtering out the effects of environmental disturbances, such as vibration and wind-induced movement.
According to a first aspect of the present invention there is provided a sensor system for use in detecting movement of an item suspended from a wall or other substantially vertical structure, or for detecting movement of an item displayed or stored on a horizontal or inclined surface, the sensor system including a piezoceramic element which is mounted on a plate which, in use, is located between the item and the structure or surface, and means for detecting the generation of an output by the piezoceramic element as a result of deflection of the plate in either of two opposite directions as a result of movement of the item.
The plate may have a first edge portion which is attached to a mounting and is bent out of the plane of the remainder of the plate, the plate being positioned so that a second edge portion, diametrically opposite to the first edge portion, is in contact with the item, for example, adjacent the lower edge of an item in the case of an item suspended from a wall or other substantially vertical structure.
The arrangement is thus such that, in use, the weight of a painting or other item suspended from the wall or other substantially vertical structure will create a small degree of deflection of the plate. Pressing the painting or other item towards the structure will alter the degree of deflection and thus generate an output, while moving the painting or other item away from the structure will also alter the degree of deflection and generate an output.
The same effect will be obtained if an attempt is made to move an item displayed or stored on a horizontal or inclined surface.
The plate may alternatively be in the form of a brass or other metal disc which is attached at its one edge to a mounting and has its diametrically opposite edge in engagement with a pad fixed to or in contact with the item, with the arrangement such that the disc is caused to deflect and an output is generated by the piezoceramic element in response to movement of the item in either direction.
In a further alternative arrangement, the disc is mounted on a resiliently deformable metal strip or plate, for example, a spring steel strip, such that a part of the strip, typically an end of the strip, is in contact with the item, with the arrangement such that the strip is caused to deflect (and will transmit such deflection to the disc) in response to movement of the item in either direction and an output will be generated by the piezoceramic element.
According to a second aspect of the present invention there is provided a sensor system for detecting movement of an item suspended from a wall or other substantially vertical structure, the sensor system including a piezoceramic element fixed to one side of a plate and a permanent magnet fixed to the other side of the plate.
The plate is preferably in the form of a brass or other metal disc that is mounted within a housing in such manner that the periphery, or a portion of the periphery, of the disc is clamped while the centre portion of the disc is free to move.
The piezoceramic element is preferably in a pre-stressed state when the sensor system is fitted to the item being protected.
According to a third aspect of the present invention there is provided a security alarm system which includes a sensor system as defined above for generating an output, a processing circuit for processing the output from the sensor system, and an alarm, the processing circuit including a slew rate limited anti-vibration low pass filter for differentiating between true alarm and false alarm signals.
Any suitable form of alarm may be employed. For example, it may be an audible siren, a radio or infra red transmitter, or a relay which changes the state of a switch in an external circuit.
The slew rate limited anti-vibration low pass filter is preferably arranged to filter out signals which have a duration less than a predetermined length of time and is preferably arranged in series with a pulse stretcher to ensure that the length of time for which the alarm is operated is greater than the length of time for which an output is generated by the slew rate limited anti-vibration low pass filter.
The arrangement may be such that, for example, signals having a duration of less than one second are filtered out by the slew rate limited anti-vibration low pass filter. Normal filters are amplitude dependent. The slew rate limited anti-vibration low pass filter is, however, not amplitude dependent.
The alarm may form part of a unit which, in the case of a wall-hung item, is located between the item and the wall. Alternatively, the alarm may be at a remote location with a signal transmitted to the alarm by means of an induction loop device. |
Multiresidue determination of zeranol and related compounds in bovine muscle by gas chromatography/mass spectrometry with immunoaffinity cleanup. A gas chromatography/mass spectrometry (GC/MS) method with immunoaffinity cleanup was developed for the determination of zeranol and related compounds, taleranol, zearalanone, and alpha-zearalenol in bovine muscle. Muscle samples were extracted with methanol and cleaned up with immunoaffinity chromatography (IAC) columns containing monoclonal antibodies raised against zeranol coupled to CNBr-activated Sepharose 4B. After derivatization, the compounds were analyzed by GC/MS. The dynamic column capacities for zeranol, taleranol, zearalanone, and alpha-zearalenol were 2639.7, 2840.3, 2731.5, and 2736.3 ng/mL Sepharose gel, respectively. The limits of detection and quantification were 0.5 and 1.0 ng/g, respectively, for all 4 compounds. Mean recoveries were 79.6-110.7% with coefficients of variation of 3.2-11.4% at spiked levels of 1.0-5.0 ng/g. This IAC-GC/MS method may be used for the determination of zeranol, taleranol, zearalanone, and alpha-zearalenol residues in bovine muscle, and possibly other tissues. |
<filename>src/main/java-gen/io/dronefleet/mavlink/ardupilotmega/MavModeGimbal.java
package io.dronefleet.mavlink.ardupilotmega;
import io.dronefleet.mavlink.annotations.MavlinkEntryInfo;
import io.dronefleet.mavlink.annotations.MavlinkEnum;
/**
*
*/
@MavlinkEnum
public enum MavModeGimbal {
/**
* Gimbal is powered on but has not started initializing yet
*/
@MavlinkEntryInfo(0)
MAV_MODE_GIMBAL_UNINITIALIZED,
/**
* Gimbal is currently running calibration on the pitch axis
*/
@MavlinkEntryInfo(1)
MAV_MODE_GIMBAL_CALIBRATING_PITCH,
/**
* Gimbal is currently running calibration on the roll axis
*/
@MavlinkEntryInfo(2)
MAV_MODE_GIMBAL_CALIBRATING_ROLL,
/**
* Gimbal is currently running calibration on the yaw axis
*/
@MavlinkEntryInfo(3)
MAV_MODE_GIMBAL_CALIBRATING_YAW,
/**
* Gimbal has finished calibrating and initializing, but is relaxed pending reception of first
* rate command from copter
*/
@MavlinkEntryInfo(4)
MAV_MODE_GIMBAL_INITIALIZED,
/**
* Gimbal is actively stabilizing
*/
@MavlinkEntryInfo(5)
MAV_MODE_GIMBAL_ACTIVE,
/**
* Gimbal is relaxed because it missed more than 10 expected rate command messages in a row. Gimbal
* will move back to active mode when it receives a new rate command
*/
@MavlinkEntryInfo(6)
MAV_MODE_GIMBAL_RATE_CMD_TIMEOUT
}
|
The Ram Mandir issue continues to be on BJP's "core agenda", it said today.
MUMBAI: The Ram Mandir issue continues to be on BJP's "core agenda", it said here today.
"It has never been outside BJP's core agenda, it continues to be....it shall be. Ram Mandir at Ayodhya is a matter of devotion," BJP spokesperson Nirmala Sitharaman said, answering a question at a press meet.
She also accused the Congress-led UPA government of "repeatedly" using CBI for political purposes.
"CBI is repeatedly used to harass the opponents and to favour some opponents who support the government," she said.
Calling the UPA's Bharat Nirman campaign as "false", she said, "It is an unfinished work. What action they have taken to control the prices of the essential commodities? The approach of the Congress is something which the media and the public of India must understand."
Sitharaman also asked why the government did not file a chargesheet against former BJP president Nitin Gadkari over the Income Tax-related allegations.
"If the Congress-led government is in the Centre, and in Maharashtra too, then why not a single chargesheet was filed against Nitin Gadkari? It (the allegations) was the false propaganda of Congress," she said. |
URBAN FOREST DEVELOPMENT IN WEST AFRICA: BENEFITS AND CHALLENGES Urban forests have played important roles in social, cultural, economic and environmental development of urban centers in West Africa through benefits such as landscape enhancement, provision of recreational and cultural facilities, erosion control, watershed protection and supply of fruits and fuelwood. The growing population and rapid urbanization occasioned by demographic switch from rural to urban society is outstripping the planning and carrying capacity of municipal authorities in West Africa. West African population increased from 132.2 million in 1980 to 234.0 million in 2000 and projected to attain 344.0 million by 2020. This study assessed development of urban forestry, evaluated the effect of populati n growth on urban forests landscape and designed innovative strategies that will ensure sustainability and improvement of urban living environment. Urban forestry development was investigated through secondary data, directed enquiries to relevant stakeholders, on-the-spot assessment and observations of urban and peri-urban forest facilities in selected West African cities. The result revealed that numerous environmental and material benefits are derived from urban forests in West Africa. The types of tree species planted for landscape enhancement, environmental protection and other benefits varied with ecological zones and cultural values. In Sahel savannah, the preponderance of Adansonia digitata, and Acacia species were observed while in Guinea and Sudan savannah, Azadirachta species, Eucalyptus species, Acacia species and Gmelina arborea dominated. Terminalia species, Gmelina arborea, Tectona grandis, Delonix regia, species of palm, etc were common in tropical rainforest zone. Rapid urban population growth, limited land area, and poor implementation of government policies are some factors affecting urban forests development in West Africa. To ensure sustainable development, forest management technique that accommodates human, social, political, cultural and economic factors should be adopted. Political goodwill should be secured while appropriate social awareness should be promoted to guarantee that forest initiatives are upheld. Key-words: Urban forestry, landscape, sustainable development, West Africa, Ecological zones Joseph A. Fuwape Jonathan C. Onyekwelu 1Department of Forestry and Wood Technology, Federal University of Technology, P.M.B 704, Akure, Nigeria. [email protected] Journal of Biodiversity and Ecological Sciences JBES IAU of Tonekabon Branch Tonekabon, Iran [email protected] Received Date:Aug/12/2010 Accepted Date:Nov/19/2010 Original Article No.1, Vol.1, Issue1 ISSN: 2008-9287 Winter 2011 JBES Journal of Biodiversity and Ecological Sciences |
A school teacher, Sathish, has been arrested under the POSCO act for sexually abusing school girls.
SALEM: A school teacher, Sathish, has been arrested under the POSCO act for sexually abusing school girls.
Sathish had recently joined as Mathematics teacher in a private school in Meyyanur. He tried to sexually abuse the school girls by luring them with sweets.
After a girl complained about this to her parents, they took this to the school management and told them about the incident. Based on the parents complaint, the school dismissed Sathish.
When Sathish came back to school to collect his documents, angered parents got hold of him and started beating him.
Later, Pallapatti police came to the spot and took him to the police station. |
//*****************************************************************************
//
//! Stops the motor drive.
//!
//! This function stops the motor drive. If the motor is currently running,
//! it will begin the process of stopping the motor.
//!
//! \return None.
//
//*****************************************************************************
void
MainStop(void)
{
IntDisable(INT_PWM0_2);
if(g_ulState == STATE_RUN)
{
if(g_ucMotorStatus != MOTOR_STATUS_DECEL)
{
g_ulDecelRate = g_sParameters.usDecel << 16;
}
g_ulState = STATE_STOPPING;
}
if(g_ulState == STATE_BACK_RUN)
{
if(g_ucMotorStatus != MOTOR_STATUS_DECEL)
{
g_ulDecelRate = g_sParameters.usDecel << 16;
}
g_ulState = STATE_BACK_STOPPING;
}
if(g_ulState & STATE_FLAG_STARTUP)
{
UIRunLEDBlink(200, 25);
g_ulState = STATE_STOPPED;
g_ucMotorStatus = MOTOR_STATUS_STOP;
PWMOutputOff();
}
IntEnable(INT_PWM0_2);
} |
Spatially-Resolved Spectroscopy of a Balmer-Dominated Shock in the Cygnus Loop: An Extremely Thin Cosmic-Ray Precursor? We present high-resolution long-slit spectroscopy of a Balmer-dominated shock in the northeastern limb of the Cygnus Loop with the Subaru high dispersion spectrograph. By setting the slit angle along the shock normal, we investigate variations of the flux and profile of the H-alpha line from preshock to postshock regions with a spatial resolution of about 4 times 10^{15} cm. The H-alpha line profile can be represented by a narrow (28.9+/-0.7 km/s) Gaussian in a diffuse region ahead of the shock, i.e., a photoionization precursor, and narrow (33.1+/-0.2 km/s) plus broad (130-230 km/s) Gaussians at the shock itself. We find that the width of the narrow component abruptly increases up to 33.1+/-0.2 km/s, or 38.8+/-0.4 km/s if we eliminate projected emission originating from the photoionization precursor, in an unresolved thin layer (<4 times 10^{15} cm at a distance of 540 pc) at the shock. We show that the sudden broadening can be best explained by heating via damping of Alfven waves in a thin cosmic-ray precursor, although other possibilities are not fully ruled out. The thickness of the cosmic-ray precursor in the Cygnus Loop (a soft gamma-ray emitter) is an order of magnitude thinner than that in Tycho's Knot g (a hard gamma-ray emitter), which may be caused by different energy distribution of accelerated particles between the two sources. In this context, systematic studies might reveal a positive correlation between the thickness of the cosmic-ray precursor and the hardness of the cosmic-ray energy distribution. Introduction Supernova explosions produce strong shock waves propagating into the interstellar medium (ISM). Whereas these shocks are usually associated with bright optical forbidden lines from metals (radiative shocks) and permitted lines from hydrogen, a small subsample of supernova remnants (SNRs) exhibit faint Balmer-dominated shocks which are characterized by H lines and very weak forbidden lines from lowly-ionized metals (e.g., Raymond 2001;Heng 2010;, for reviews). Diagnostics of the H line profile have been used to probe "collisionless" shocks, where the shock transition occurs effectively by collective interactions of the plasma with the magnetic field rather than Coulomb collisions. The H line profile behind a Balmer-dominated shock consists of narrow and broad components. The narrow component arises from collisional excitation of cold neutrals that pass through the shock, and the broad component arises from collisional excitation and cascading processes of hot neutrals that are created by charge exchange (CX) reactions between hot downstream protons heated at the shock and the cold neutrals unaffected by the shock transition region (;). Therefore, the widths of the narrow and broad components should reflect temperatures of unshocked neutrals and shocked protons, respectively. It has been a long-standing problem that the widths of the narrow component, FWHMs ∼ 30 − 50 km s −1, are broader than the thermal width expected in the ISM, roughly 21 (T /10 4 K) 0.5 km s −1 (e.g., ). Although the broadening can be partly due to heating by a photoionization precursor from postshock emission such as He II 304 (e.g., ), this effect is insufficient to quantitatively explain the width observed. Various possibilities have been considered in the interpretation of the nature of the additional broadening (;). Of these, the most likely is a cosmic-ray (CR) precursor, in which the gas is heated by damping of Alfvn/sound waves emitted by CRs and/or is disturbed by Alfvn wave turbulence. Lee et al. (2007Lee et al. (, 2010) discovered a possible CR precursor associated with a Balmer-dominated shock at the eastern edge (Knot g: Kamper & van den Bergh 1978) in Tycho's SNR, where both the intensity and width of the H line gradually increases within a thin (∼10 16 cm) region ahead of the shock. Wagner et al. showed that the precursor can heat the gas by damping of sound waves from CRs via an acoustic instability. So far, it is the only known example of a promising CR precursor (see also, reporting a hint of the CR precursor from Kepler's SNR). Therefore, it is essential to accumulate observational information about CR precursors by observing other Balmer-dominated shocks. Balmer-dominated shocks in the Cygnus Loop offer a unique opportunity for the study of thin precursors, thanks to its proximity -it is the closest (d =540 +100 −80 pc: ) among 11 SNRs (both in our Galaxy and the LMC) associated with Balmer-dominated shocks, with 1 corresponding to 2.6 10 −3 pc. Recently, Medina et al. performed high-resolution spectroscopy of a number of shocks along with diffuse emission 2.5 ahead of northeastern (NE) shocks of the Cygnus Loop. They found that H line widths in the diffuse regions are as narrow as ∼29 km s −1, while H profiles of the shocks themselves consist of broader narrow (∼36 km s −1 ) and broad (∼250 km s −1 ) components. They suggested that the H broadening of ∼29 km s −1 in the diffuse region is due to photoionization, and speculated that the additional broadening of the narrow component at the shock originates from a thin CR precursor. Like Tycho's Knot g (), such a thin precursor may be detected by spatially-resolved spectroscopy of the Balmer-dominated shock. Here, we present high-resolution long-slit spectra which are spatially resolved perpendicular to the best-studied Balmer-dominated shock in the Cygnus Loop (;Fesen & Itoh 1985;;;;;;). Consistent with the previous optical spectroscopy (), we detect a diffuse precursor ahead of the shock throughout the slit position. We find that the width of the narrow component increases within an unresolved region at the shock, which we attribute to heating by damping of Alfvn waves in a CR precursor. We present our observations and results in Section 2, and interpretations in Section 3. Observations and Results We observed one of the brightest Balmer-dominated shocks in the NE limb of the Cygnus Loop on 2015 August 31, using the Subaru High Dispersion Spectrogram (HDS: ). We use a long slit together with the H order-blocking filter. The slit width is 1, which gives velocity resolution of FWHM=9 km s −1. The slit was centered at (RA, Decl.) = (20:56:04.8, +31:56:46.7) (J2000), with a position angle of 46 measured north of east as shown in Fig. 1 (a). This way, we investigate spatial variations of the line profile along the shock normal, i.e., parallel to the shock motion. The total exposure time was 630 minutes, which was reduced to 2.5 hr after rejecting one frame affected by fog. The spectrum was binned by 2 pixels for both the dispersion axis and the slit direction, so that the pixel scales after the binning were 0.0364/pixel and 0.276 /pixel, respectively. The wavelength range covered by this observation is 6515-6589. The seeing was about 0.5. We performed a standard processing of the Subaru data, including overscan correction, flat fielding, and wavelength calibration based on the spectrum from a Th-Ar lamp. We reject cosmic-ray backgrounds, by taking the median of the four frames. In this processing, we utilize the version 2.16 of the IRAF software 1. Figure 1 (b) shows a fully processed two-dimensional spectrum of the H line, for which the x-and y-axis are responsible for the wavelength and the slit position (top is to the NE), respectively. The four blobs near the bottom correspond to the Balmer-dominated shocks 1-4. Since we did not obtain a sky background frame separately, we adopt the far upstream region between two dotted horizontal lines in Fig. 1 (b) as our background. The backgroundsubtracted H intensity profiles along the slit position are plotted in Fig. 1 (c), where the black and red are responsible for narrow and broad components, respectively. The broad component arises immediately behind the bright shocks, whereas the narrow component extends ahead of them as indicated by arrows with "photoionization precursor" in the plot (see below). To investigate spatial variations of the H profile, we extract one-dimensional spectra from 21 regions along the slit, as indicated by a vertical scale bar in Fig. 1 (b). The regions are divided such that brighter regions have smaller widths with a minimum width of 2 pixels or 0.55. Figure 2 shows a background-subtracted one-dimensional spectrum taken from the brightest region indicated by a horizontal arrow in Fig. 1 (b), together with the background spectrum in green. The H line profile can be reproduced by two Gaussian components: a broad component (dotted red line) arising from a shock-heated gas and a narrow component (dotted black line) arising from unshocked neutrals. Another small peak at 6583.4 is the line originating from the Cygnus Loop itself. Below, we will fit both of these two lines by either a single Gaussian or double Gaussians, after taking account of instrumental broadening of FWHM=9 km s −1. Our analysis shows that only a narrow Gaussian component is required in the precursor, with an upper limit of broad-to-narrow intensity ratios (I b /I n ) of 0.3. On the other hand, two Gaussian components are required at the Balmer-dominated shocks and its interior where I b /I n ratios range from 0.75±0.02 to 2.01∼0.08. Such a strong I b /I n difference between the precursor and the Balmer-dominated shock clearly demonstrates that the precursor is not the result of geometric projection of fainter Balmer-dominated shocks, but an intrinsic precursor associated with the bright shocks. The line profiles can be fitted by a single, narrow Gaussian throughout the slit. We measure the width of the line to be narrower than FWHM=8 km s −1 (after considering the instrumental resolution) all along the slit position, which allows us to infer the N temperature to be T < 20, 000 K, consistent with the H temperature in the photoionization precursor. Also, there is a good correlation between the H flux and flux within the photoionization precursor (I H = 9.37 I + 511 photons), as shown in Fig. 3. These facts lead us to conclude that the line entirely arises from the photoionization precursor. This idea is in agreement with the earlier interpretation for the origin of the extremely faint forbidden line emission including and lines from the relevant Balmer-dominated shock (Fesen & Itoh 1985). Figure 4 shows the best-fit Gaussian parameters as a function of the slit position, where x = 0 corresponds to the bottom line of the two-dimensional spectrum in Fig. 1 (b). Within the precursor region (x > 15 ), the width of the H line and the H/ intensity ratio are roughly constant at 28.9±0.7 km s −1 (error-weighted mean FWHM) and 12.1±0.2, respectively, fully consistent with those of a photoionization precursor in the NE Cygnus Loop (). We find from Fig. 4 (b) that the narrow component's width abruptly increases from the precursor region to 33.1±0.2 km s −1 (error-weighted mean FWHM) within a remarkably thin (unresolved) layer at the shock position. We have ruled out a possibility that the sudden broadening is an artifact due to the broad component, based on the fact that the line width does not change much whether or not we introduce the broad component in the narrow-band fitting. The flux of the narrow component in the photoionization precursor gradually increases from a far upstream region to the shock front. This may be caused by projection effects -increasing line-of-sight column densities toward the shock, which would be applicable to the relevant shock showing an interaction with a cloud (;). The line centroids of the broad and narrow components, which are taken to be the same between the narrow and broad components in our analysis, do not show significant spatial variations except for the innermost region (x < 7 ). As for the broad component, the width decreases from the outermost shock (230±20 km s −1 ) to the innermost shock (130±5 km s −1 ) within the four bright Balmer-dominated shocks. The difference by a factor of ∼2 can be reasonably interpreted as a result of different shock speeds; we have checked that the outermost Balmer-dominated shock is moving ∼1.5 times faster than the other three shocks, based on our own proper-motion measurements by data from the Hubble Space Telescope (HST). We see an even broader (330 +120 −80 km s −1 ) line at the innermost region. Some of the width may be the projected bulk speed of the shocked gas, if the region is projected at a significant angle, and if both front and back surfaces contribute. The presence of the line at the bright Balmer-dominated shocks (x< 15 ) suggests a possible contamination from the photoionization precursor due to projection. In other words, the H line at the Balmer-dominated shock is a mixture of the narrow (29 km s −1 ) and a broader narrow (>33 km s −1 ) component. Other hints of such a contamination include the I b /I n ratio gradually decreases from the innermost shock to the outermost shock ( Fig. 4 (a)), and the narrow component's width becomes narrower at inter-blob regions than blobs themselves ( Fig. 4 (a) and (b)). Therefore, we try to eliminate the contamination from the photoionization precursor. Using the flux and the relation between I H and I , we estimate the contaminating H flux. We then refit the H profiles with three Gaussian components responsible for narrow (photoionization precursor), broader narrow (thin precursor), and broad (postshock) components, as shown in Fig. 5. In this way, we find out broader-narrow components (the blue data in Fig. 4 (a)), whose error-weighted mean FWHM is 38.8±0.4 km s −1 (the blue data in Fig. 4 (b)). Summary and Discussions For a Balmer-dominated shock in the NE Cygnus Loop, we have found that the narrow H component abruptly broadens from 28.9±0.7 km s −1 up to 38.8±0.4 km s −1 (after removing the contamination from a photoionization precursor) within an extremely thin layer ( 0.0013 pc) at the shock position. Here, we assess the nature of this abrupt broadening. As discussed in Smith et al., there are no promising postshock mechanisms (including the conversion of Ly → H, momentum transfer in the process of excitation, elastic collisions between narrow component neutrals and shock heated protons, and molecular dissociation), leaving only preshock mechanisms. Of these, there are two major possibilities: a fast neutral precursor, in which fast neutrals returning from the postshock heat the preshock gas, and a CR precursor described in Section 1. The fast neutral scenario was later disfavored by a simulation by Lim & Raga. The authors found that the thickness of the neutral precursor becomes only ∼10 14 n 1 cm −3 −1 cm, which is smaller than the characteristic length (∼310 14 n 1 cm −3 −1 cm) for CX between upstream protons and neutrals, violating a critical condition that at least one CX reaction must occur to generate warm neutrals; upstream (unshocked) protons are first heated, and then these warm protons will become warm neutrals via CX with upstream cold neutrals. These characteristic lengths can be also checked by analytical solutions (Equations 15 and 17 in Ohira 2012) combined with realistic physical parameters for the Cygnus Loop. In addition, Morlino et al., Ohira, and Ohira also confirmed that the returning fast neutrals do not affect the width of the narrow H line for shock velocities 1500 km s −1 like those in the Cygnus Loop. Meanwhile, our measured upper limit of the precursor's length ( 410 15 cm) does not enable us to eliminate (nor support) the possibility of the heating by returning fast neutrals. Thus, precise determination of the precursor size with better spatial resolution is desired to clarify whether or not this idea is still viable. As for the CR precursor scenario, there are two ways to broaden the line. One is Doppler broadening due to Alfvn or magneto sonic turbulence, and the other is the heating via damping of these turbulence. In any case, the affected (warm) protons need to become neutrals via CX with upstream cold neutrals. This condition sets a lower limit of the CR precursor size at the characteristic CX length scale of a few 10 14 cm, which is met for the precursor of our interest (L cr 4 10 15 cm). We now examine if the two waves can really grow in the thin CR precursors. The time scales for Alfvn waves and sound waves to grow are t a ∼ 5 10 7 s V a 6.6 km s −1 ), respectively, where V a is the Alfvn velocity, L cr is the length scale of the CR precursor, V sh is the shock speed, cr is the CR acceleration efficiency, defined by a CR pressure over ram pressure, and V so is the sound velocity. On the other hand, the crossing time of the precursor is t cross ∼ 2 10 8 s L cr 0.001 pc If the magnetic field is amplified in the precursor, a somewhat larger turbulent velocity would be expected, as V a linearly depends on B with B B being expected to stay at unity for strongly amplified magnetic fields. However, we should keep in mind that the precursor's length scale is inversely proportional to B (L cr ∝ B −1 ), and that the precursor's thickness must be larger than the characteristic length of CX (310 14 cm). Therefore, the magnetic field would not exceed a few 10 G, which results in an upper limit of the turbulent velocity of ∼10 km s −1. This is significantly smaller than the velocity observed. Therefore, we conclude that Alfvn turbulence can not be the main contributor to the broadening of the narrow component. The heating by damping of Alfvn waves was extensively investigated in light of observations of the Cygnus Loop by Boulares & Cox who revealed that the nonlinear Landau damping () can easily heat the upstream gas up to the temperature observed. In this model, the diffusion coefficient is a key parameter to control the degree of heating. For the relevant precursor, we can estimate ∼ 6 10 22 Lcr 0.001 pc V sh 200 km s −1, at which we expect that T > 2 10 6 K according to Fig. 11 in Boulares & Cox. This is much higher than our measurement, T ∼3.510 4 K. This means that appropriate fine tuning of the model is required, which is beyond the scope of this Letter. However, we can say that such an excessive heating is probably due to the fact that Boulares & Cox assumed that the shock is dominated by CRs, which is unlikely the case for the Cygnus Loop, given the absence of synchrotron X-ray emission, the relatively soft gamma-ray spectrum, and the negligible pressure of nonthermal particles (). It is interesting to note an order of magnitude difference in the thickness of the CR precursors between the Cygnus Loop and Tycho's Knot g: L cr 0.001 pc for the Cygnus Loop and L cr ∼0.01 pc for Tycho's Knot g (). Combined with different shock speeds of V sh ∼ 200 km s −1 (Cygnus Loop: ) and ∼ 3000 km s −1 (Tycho's Knot g: Kamper & van den Bergh 1978;), an even larger difference is expected for the diffusion coefficient, = L cr V sh. Since is proportional to E/B with E being the energy of dominant accelerated particles, we speculate that the remarkable difference in is caused by different energy distribution of accelerated particles between the two sources; there is no doubt that CRs in Tycho's SNR have higher energies than those in the Cygnus Loop, judging from the harder gamma-ray spectrum in Tycho's SNR than that in the Cygnus Loop (;). No firm conclusion can be reached yet, however, without information about the value of B. Whatever the nature, the difference of thin precursors' sizes between the Cygnus Loop and Tycho's Knot g gives us a strong motivation to search for a positive correlation between the size of a thin precursor (or diffusion coefficient) and the hardness of CR energy distribution. We thank all the members of the Subaru telescope, especially Akito Tajitsu, for performing our observation. We also thank Masaomi Tanaka for helping our proposal. This work is supported by Japan Society for the Promotion of Science KAKENHI Grant Numbers 25800119 (SK), 26800100 (KM), and 15K05088 (RY). The work by KM is partly supported by World Premier International Research Center Initiative (WPI Initiative), MEXT, Japan. JCR's work was supported by grant HST-GO-13436 to the Smithsonian astrophysical Observatory. Fig. 1 (b). The H profile is fitted by narrow (black dotted line) and broad (red dotted line) Gaussians. The background spectrum is plotted in green. Fig. 4), while the black data (filled triangles) are from the Balmer-dominated shocks (7 < x < 15 ). As indicated by a red dotted line, there is a linear correlation between the H intensity and the intensity in the photoionization precursor. Fig. 2, but the H profile is fitted by three Gaussians, i.e., narrow (black solid line), broader narrow (blue solid line), and broad (red dotted line) components. |
Contrasting Systemic Functional Linguistic and Situated Literacies Approaches to Multimodality in Literacy and Writing Studies Against the backdrop of proliferating research on multimodality in the fields of literacy and writing studies, this article considers the contributions of two prominent theoretical perspectivesSystemic Functional Linguistics (SFL) and Situated Literaciesand the methodological tensions they raise for the study of multimodality. To delineate these two perspectives methodological tensions, I present an analysis of selected recent literature from both approaches and then analyze these tensions further as they emerge in two empirical studies published in this journal illustrating each approach. Despite the fact that SFL and Situated Literacies share some underlying theoretical assumptions and are sometimes drawn upon in concert by scholars, I illustrate how they differ in their treatment of multimodal texts and practicesas well as their methodologiesresearch design, data collected, analytic methods, and possible implications. This article thus seeks to outline the respective contributions of SFL and Situated Literacies to ongoing research on multimodality in literacy and writing studies and to encourage a conversation across theoretical and methodological borders. |
I'm from Ghana and I want to know whether receiving a scholarship at a competitive school is easy or difficult.
Most high-ranking schools also have top-notch financial aid programs. But though many of these colleges pledge to meet 100 percent of demonstrated need, that term can mean very different things to different schools.
Harvard, for example, ranked first on the US News and World Report list of best value universities, costing an average $15,486 for students on financial aid, after need-based grants are factored in. That's about $5,000 cheaper than Columbia, which came sixth on the list, and $10,000 cheaper than Georgetown, which ranked 24th.
Harvard has one of the best financial aid systems in the country. Although they do not offer merit, athletic, or academic scholarships, they have an extensive need-based financial aid system. Application to the college is need-blind, and Harvard claims on its financial aid website that they meet the demonstrated financial need of every student, including international students. The idea behind this policy is that financial need should not deter potential students from a Harvard education. As previously stated, the average cost for students on financial aid is $15,486, and 59.5 percent of students receive some form of aid. Find out more.
Yale has a very similar policy to Harvard: Applications are need-blind for both domestic and international applicants. Again, there are no merit, athletic, or academic scholarships available. 54 percent of Yale students receive financial aid, and the average after-aid price is $16,205. Learn more.
Princeton's financial aid program has grown dramatically since 2001. In this time, their funding for the need-based financial aid program has grown by over 150 percent, while tuition costs have only risen 47 percent. 58.9 percent of Princeton students are receiving some sort of need-based financial aid, and Princeton explicitly states that their financial aid program also covers international students. Princeton's admissions are need-blind, and their financial aid package even takes into consideration the cost of "eating clubs." The average cost for students on financial aid is $17,614.
According to Brown's financial aid program, 46 percent of the class of 2016 received need-based scholarships. However, these scholarships included federal and state need-based scholarships as well—42 percent receive need-based aid from the college. Again, admissions is always need-blind—a policy that was introduced at Brown in 2007. For students on financial aid, Brown's average cost is $22,039.
Columbia has an office of financial aid and educational financing that works with families of students to not only apply for Columbia need-based financial aid but also for federal and state grants. They also offer student loans. Columbia is need-blind for U.S. citizens, residents, and refugees living in the U.S., as well as for transfer students. However, Columbia admissions are not need-blind for international students. 49.9 percent of students receive aid, and the average price for them is $20,435.
Cornell also has a need-based financial aid program, but though admissions is need-blind, international students only receive aid on a “limited” basis, according to their website. 47.7 percent of Cornell students receive aid, and their average cost is $21,849.
At Dartmouth, admissions is need-blind, and students who come from families with total incomes of less than $100,000 receive free tuition at the school with no loans. Financial aid at Dartmouth also extends to any Dartmouth study abroad program. 49.7 percent of Dartmouth students receive aid, and the average cost for those students is $21,587.
Penn, similar to Columbia, offers need-blind admissions to citizens and permanent residents of U.S., Canada and Mexico, but not for international students. However, those students can still apply for financial aid if they get in, though they are encouraged not to if their family can bear the cost themselves. Penn awards $6 million in need-based financial aid annually under a no-loan policy. 44.2 percent of Penn students are on financial aid, bringing the cost for them down to an average $23,552 after financial aid. Learn more.
Stanford has similar need-blind admission policies as Columbia and Penn, stating that they admit only a small number of international students with financial need annually. The average cost for students receiving aid (49.7 percent of the class) is $18,593. For more information, see here.
Caltech has need-blind admissions for U.S. citizens and permanent residents only. They also have substantial financial aid for international students. 52.4 percent of students are on financial aid, and the price for them averages out to $21,551.
Schools in the University of California system—such as UCLA and UC Berkeley—offer need-based financial aid for students who qualify. In addition, the UC system as well as individual UC campuses offer scholarships based on merit and financial need. For example, UCLA has an extensive list of scholarships available, either based on merit and financial need or based on the specific niche the student would be filling in the school—studying humanities and law, for example, qualifies students for the Angelina Ruggie Scholarship Fund. Find out more about UCLA's scholarships.
Williams has a needs-blind admissions policy for domestic applicants only. It promises to meet all demonstrated need of domestic applicants and guarantees no-loan financial aid packages for students whose families make under $75,000 a year. The average cost for students after financial aid is $19,251.
Amherst, another well-respected liberal arts school, just recently extended its needs-blind policy to international students as well as domestic ones. While Amherst claims to meet all demonstrated financial need and does not include loans in their initial financial aid offers, some students do require loans to afford tuition. The average cost for students on aid, who make up 55 percent of the student body, is $16,286.
What is the lowest score an admitted student got on the SAT?
According to The Crimson’s freshman survey for the class of 2017, the lowest self-reported SAT score for a member of Harvard's Class of 2017 was a 1660, almost 600 points below the average score of 2237. According to that same survey, the lowest self-reported high school GPA (on a 4-point scale) was a 3.0, with the average being a 3.94.
If I ask a question, will my name be public?
No! Everything is anonymous, so please feel free to ask us anything about the admissions process. |
Effect of Different Types of Medicaid Managed Care on Childhood Immunization Rates Medicaid managed care can improve access to prevention services, such as immunization, for low-income children. The authors studied immunization rates for 7,356 children on Medicaid in three managed care programs: primary care case management (PCCM; n = 4,605), a voluntary HMO program (n = 851), and a mandatory HMO program (n = 1,900). Immunization rates (3:3:1 series) in PCCM (78%) exceeded rates in the voluntary HMO program (71%), which in turn exceeded those in the mandatory HMO program (67%). Adjusting for race, urban residence, and gender, compared to children in PCCM, children in the voluntary HMO program were less likely to complete the 3:3:1 series (OR = 0.75, CI = 0.63, 0.90), and children in the mandatory HMO program were even less likely to complete the series (OR = 0.59, CI = 0.51, 0.68). Results differed by individual HMOs. Monitoring of outcomes for all types of managed care by Medicaid agencies is imperative to assure better disease prevention for low-income children. |
Psychosocial impact of acne vulgaris: evaluating the evidence. This paper reviews current evidence presented by recent studies on the impact of acne on psychosocial health. Study methodologies, including case-control and cross-sectional surveys, have demonstrated psychological abnormalities including depression, suicidal ideation, anxiety, psychosomatic symptoms, including pain and discomfort, embarrassment and social inhibition. Effective treatment of acne was accompanied by improvement in self-esteem, affect, obsessive-compulsiveness, shame, embarrassment, body image, social assertiveness and self-confidence. Acne is associated with a greater psychological burden than a variety of other disparate chronic disorders. Future studies with a longitudinal cohort design may provide further validation of the causal inference between acne and psychosocial disability provided by the current literature. |
,-Dimethylacrylshikonin sensitizes human colon cancer cells to ionizing radiation through the upregulation of reactive oxygen species Shikonin, a naphthoquinone derivative, has been shown to possess antitumor activity. In the present study, the effects of shikonin and its analog,,-dimethylacrylshikonin, were investigated as radiosensitizers on the human colon cancer cell line, HCT-116. Shikonin and, to a greater extent, its analog-induced apoptosis of HCT-116 cells further synergistically potentiated the induction of apoptosis when combined with ionizing radiation (IR) treatment. Shikonins also stimulated an increase in reactive oxygen species (ROS) production and IR-induced DNA damage. Pre-treatment with the ROS scavenger, N-acetylcysteine, suppressed the enhancement of IR-induced DNA damage and apoptosis stimulated by shikonins, indicating that shikonins exert their radiosensitizing effects through ROS upregulation. The radiosensitizing effect of shikonins was also examined in vivo using the xenograft mouse model. Consistent with the in vitro results, injection of,-dimethylacrylshikonin combined with IR treatment significantly suppressed tumor growth of the HCT-116 xenograft. Taken together, the results show that,-dimethylacrylshikonin is a promising agent for developing an improved strategy for radiotherapy against tumors. Introduction Shikonin, a naphthoquinone pigment, is the primary component of root extracts from Lithospermum erythrorhizon. Shikonin and its analogs have been used for the treatment of burns, measles, sore throat, macular eruption and carbuncles. Shikonin and its analogs have also been shown to possess in vitro and in vivo anticancer activity against various types of cancer. Shikonins can inhibit tumor growth and prolong the lifespan of tumor-bearing mice and patients with lung cancer. Shikonins mediate apoptosis through multiple mechanisms, including induction of the generation of reactive oxygen species (ROS) and cell cycle arrest via a caspase-dependent mechanism. In addition, shikonin exhibits antiangiogenic activity and can also regulate the activity of topoisomerase I and II, leading to DNA cleavage. Along with surgery and chemotherapy, radiotherapy is one of the most significant modalities for cancer treatment. The use of radiotherapy is primarily limited by intrinsic or acquired resistance to ionizing radiation (IR). In an effort to overcome the radioresistance of cancer cells to improve radiotherapy, a variety of chemical compounds have been tested for their radiosensitizing effects. Curcumin, resveratrol, genistein and flavopiridol have been shown to exhibit radiosensitizing effects on a variety of cancer cells. IR kills cancer cells by inducing DNA damage and generating ROS, which in turn induces further damage of biomolecules, including DNA. The accumulation of ROS also induces the deregulation of the apoptotic signaling pathway, ultimately leading to apoptosis. The radiosensitizing effect of compounds is often associated with ROS upregulation, indicating that the ROS-mediated mechanism may be a significant target for achieving biological enhancement of the effects of radiotherapy. Although shikonin and its derivatives have been reported to have potential anticancer activity, they have not been examined for their effects on radiotherapy. The present study examined whether shikonin and its analog,,-dimethylacrylshikonin, exhibit radiosensitizing effects, and investigated the possible utilization of these compounds as radiotherapy-enhancing agents.,-Dimethylacrylshikonin sensitizes human colon cancer cells to ionizing radiation through the upregulation of reactive oxygen species Apoptosis assay. Cells were pre-treated with shikonins (0.5 M) for 4 h and irradiated. The media were then exchanged with fresh media and the cells were incubated for 72 h, followed by Annexin V/propidium iodide (PI)-double staining using Annexin V-FITC Apoptosis Detection kit I (BD Biosciences, Franklin Lakes, NJ, USA). For the ROS scavenging experiment, N-acetylcysteine (NAC; Sigma-Aldrich) was pre-treated at 1 mM for 2 h prior to the treatment with shikonins. Cell death was analyzed using a fluorescence-activated cell sorting (FACS)Calibur apparatus (BD Biosciences). Determination of intracellular ROS level. Intracellular ROS production was measured by staining cells with the fluorescent probe, 2'-7'-dichlorofluoresceindiacetate (DCF-DA; Invitrogen Life Technologies, Carlsbad, CA, USA). The cells that were treated with a combination of shikonins and IR were incubated with DCF-DA at 1 M for 30 min. The changes in fluorescence intensity were monitored by flow cytometry using a FACSCalibur apparatus (BD Biosciences). Western blot analysis. Following drug treatment, cell lysates were prepared for western blot analysis. Proteins were resolved by SDS-polyacrylamide gel electrophoresis (Bio-Rad, Hercules, CA, USA) and transferred to nitrocellulose membrane (Whatman, Pittsburgh, PA, USA). Subsequent to the transfer, the membranes were blocked in 5% skimmed milk in 10 mM Tris-HCl (pH 8.0), 150 mM NaCl and 0.05% Tween-20 (TBST; Amresco, Solon, OH, USA) for 30 min, and then incubated with a primary antibody in 5% skimmed milk in TBST for 2 h at room temperature. The membranes were washed three times with TBST and incubated for 1 h in TBST containing horseradish peroxidase-linked anti-immunoglobulin G (IgG). Following three washes in TBST, immunoreactive products were detected by chemiluminescence (ECL Plus; Amersham Pharmacia Biotech, Piscataway, NJ, USA). Mouse monoclonal anti-H2AX and anti--actin antibodies were purchased from Millipore (Billerica, MA, USA) and Santa Cruz Biotechnology, Inc. (Santa Cruz, CA, USA), respectively. Ionizing irradiation of cells. Cells were exposed to -rays with a 137 Cs -ray source (Atomic Energy of Canada, Ltd., Ontario, ON, Canada) and a dose rate of 2.6 Gy/min. Immunofluorescence microscopy. The cells were seeded on a cover glass in 24-well plates. The media were removed and carefully rinsed with phosphate-buffered saline (PBS) 30 min following the treatments with shikonins and IR. The cells were fixed with 3.7% paraformaldehyde in PBS for 10 min and washed twice with PBS. Cells were permeabilized for 10 min with 0.1% Triton X-100 followed by blocking with CAS-block (Invitrogen Life Technologies) for 30 min. Cells were then stained by incubating with mouse monoclonal anti-H2AX antibody (1:500 dilution) followed by goat anti-mouse IgG-Alexa Fluor555 (Invitrogen Life Technologies) (1:1000 dilution). 4,6-Diamidino-2-phenylindole (DAPI) (50 g/ml) was added to the secondary antibody mixture to visualize the nuclei. Fluorescence images were obtained using a LSM710 confocal microscope (Carl Zeiss Group, Jena, Germany). Tumor xenograft growth in athymic mice. Athymic nude mice (4-week-old males) were obtained from Orientbio, Inc., (Seoul, South Korea) and were maintained in a laminar air-flow cabinet under specific pathogen-free conditions. The human colon cancer HCT-116 xenograft mouse model was established by subcutaneous inoculation of 2x10 6 cells into the right hind leg. When the tumor size reached 150 mm 3, the mice were randomly divided into six groups (seven mice per group) and treated with either the vehicle (10% dimethylsulfoxide in PBS) or shikonins (2.0 mg/kg) in the presence or absence of IR. Two days after treatment, the second injection was prepared. Locoregional irradiation was applied in single 8-Gy doses using a Co-60 irradiator (Theratron 780; Atomic Energy of Canada). Two perpendicular diameters of tumors were measured twice a week with a caliper square by the same investigator, and the tumor volume was calculated using the following equation: Tumor volume (V) mm 3 = (smaller diameter) 2 x (larger diameter) x (/6). The experiment was terminated when the tumor volume in the control group reached 3000 mm 3. All animal protocols were reviewed using the Good Laboratory Practice guidelines of the Radiotherapy Research Center, Korea Institute of Radiological and Medical Sciences (Seoul, Korea). The use of these animals and the experimental procedures were approved by the Institutional Animal Care and Use Committee of the Korea Institute of Radiological and Medical Sciences. Statistical analysis. All data were plotted in terms of mean ± standard error of the mean values. Statistical analysis was assessed using a parametric repeated-measures one-way analysis of variance followed by Tukey's multiple comparison test (Graph Pad version 3; San Diego, CA, USA). A value of P<0.05 was considered to indicate a statistically significant difference. Results Shikonins sensitize cancer cell lines to IR. To investigate the effect of shikonin analogs on the cancer cell response to IR, shikonin and its analog,-dimethylacrylshikonin were selected for the present study and their chemical structures are shown in Fig. 1A. First, the effect of shikonins on the proliferation of cancer cells when used alone or in combination with IR was determined. For this experiment, various cancer cell lines, including HCT-116 (colon cancer), LN428 (glioma), H460 (lung cancer) and A549 (lung cancer) cells, were used. The cells were pre-treated with shikonins for 4 h and irradiated at the indicated doses in Fig. 1B. The viability of cells was determined using the MTS assay. Shikonins inhibited the overall proliferation of the cell lines in a dose-dependent manner and exhibited additional effects when combined with IR (Fig. 1B). Among the cell lines examined, HCT-116 was the most sensitive to shikonin treatment with respect to inhibition of proliferation, as determined by the MTS assay. Subsequently, the effect of shikonins on the cellular response to IR was evaluated by determining clonogenic cell survival following IR treatment through a colony formation assay. Shikonin exhibited a moderate radiosensitizing effect for HCT-116, but minor effects for the other cell lines, while the radiosensitizing effect of,-dimethylacrylshikonin was considerable for HCT-116 and LN428, but minor for H460 and A549 (Fig. 1C). Overall, these data show that shikonins sensitize HCT-116 cells more efficiently to IR treatment and, Shikonins enhance IR-induced apoptosis. The induction of apoptosis in HCT-116 cells was analyzed following combined treatment of shikonins with IR. The cells were irradiated following pre-treatment or no pre-treatment with shikonins and were analyzed for apoptosis by Annexin V/PI staining at 72 h following irradiation. The cells that stained negative for Annexin V and PI were assigned as undamaged live cells. Shikonin induced marginal cell death, and the extent of further enhancement of cell death by combination with IR was not significant (Fig. 2A). By contrast, treatment with,-dimethylacrylshikonin alone induced significant cell death, and further enhancement of cell death was observed when combined with IR. These results indicate that,-dimethylacrylshikonin is extremely effective and more effective compared with shikonin in rendering HCT-116 cells more susceptible to IR-induced cell death. It has great potential as a radiosensitizing agent. Shikonins enhance IR-induced DNA damage. The effect of shikonins on the extent of IR-induced DNA damage was examined by determining the level of the phosphorylated histone H2AX (H2AX), a well-known marker for DNA double-strand breaks. Single treatment with either shikonin or,-dimethylacrylshikonin caused weak accumulation of H2AX in HCT-116 and LN428 cells, indicating that shikonin and,-dimethylacrylshikonin can individually induce DNA damage to a certain extent. However, when the cells were treated with a combination of shikonins and IR, only,-dimethylacrylshikonin strongly enhanced further IR-induced H2AX increases (Fig. 2B). The effect of shikonins on the induction of DNA damage was also assessed by visualizing H2AX foci with immunofluorescence microscopy. Treatment with either of the shikonins increased the formation of H2AX foci, but,-dimethylacrylshikonin-tre ated cells showed a stronger H2AX signal intensity compared with shikonin-treated cells following exposure to IR (Fig. 2C). These results indicate that,-dimethylacrylshikonin strongly potentiates the induction of DNA damage by IR treatment and that this potentiation is greater compared with that observed with shikonin. Combined treatment of shikonins and IR causes ROS accumulation. ROS generation is one of the primary mechanisms by which IR kills cells, and it has been reported that shikonin causes apoptosis through an ROS/c-Jun N-terminal kinase-mediated signaling pathway in the breakpoint cluster region/Abelson-positive chronic myelogenous leukemia cells. Therefore, it was initially postulated that shikonins modulate the cellular response to IR through the regulation of ROS levels. To investigate this possibility, the effect of a combined treatment of shikonins and IR at the intracellular ROS level was examined. A single IR treatment (5 Gy) with either of the shikonins caused a minor increase (~15-25%) in ROS levels in the HCT-116 cells. However, IR treatment following pre-treatment with either of the shikonins resulted in a significant increase (~80%) in ROS levels (Fig. 3A). This synergistic effect of combined treatment of shikonins and IR on ROS accumulation indicates that shikonins may predispose cancer cells to accumulate more ROS in response to IR treatment. (Fig. 3B). NAC significantly attenuated the increase in the H2AX level induced by either of the shikonins. NAC also significantly suppressed the synergistic effect of,-dimethylacrylshikonin on IR-induced apoptosis (Fig. 3C). A B C These observations indicate that ROS accumulation plays a critical role in the enhancement of IR-induced DNA damage and subsequent apoptosis by,-dimethylacrylshikonin treatment.,-Dimethylacrylshikonin potentiates the antitumor effect of IR on tumor growth in the HCT-116 xenograft mouse model. To validate the radiosensitizing effect of shikonins in vivo, the HCT-116 xenografts in athymic nude mice were established. Using the experimental procedure described in Fig. 4A, the change in tumor volume was monitored twice a week following the combined treatment with the shikonins and IR. While tumor growth was moderately suppressed by IR alone,,-dimethylacrylshikonin completely retarded tumor growth when coupled with IR treatment (Fig. 4B). The tumor size endpoint, which was measured 30 days subsequent to IR treatment when the tumor volume of the vehicle group reached 3000 mm 3, also manifested the strong effect of,-dimethylacrylshikonin acting synergistically with IR to suppress tumor growth (Fig. 4C). Discussion Radiotherapy is one of the primary modalities in cancer treatment and is generally used in combination with surgery or chemotherapy. The use of high-dose IR also inevitably causes damage to surrounding normal tissues, necessitating the use of agents to sensitize cancer cells to IR treatment, thereby allowing the use of lower doses of radiation. In spite of numerous reports that have demonstrated the antitumor effect of shikonins, the potential applicability of shikonins as radiosensitizers has not been fully examined. In an effort to identify novel radiosensitizers, the effect of shikonin and its analog,-dimethylacrylshikonin on the sensitivity of cancer cells to IR treatment was examined. IR-induced cell death was promoted by pre-treatment with shikonin or more strongly with,-dimethylacrylshikonin. Synergistic increases in intracellular ROS levels and DNA damage accompanied the IR-sensitizing action of shikonins. It was also found that the enhancement of IR-induced DNA damage and cell death mediated by shikonins was abolished in the presence of the antioxidant NAC. Since the generation of ROS is one of the primary mechanisms by which IR induces DNA damage and kills cells, these results indicate that further upregulation of ROS to intolerable levels accounts for the radiosensitizing effects of shikonins. A recent study using leukemia cells indicated that the cytotoxicity of shikonin involves the disruption of mitochondrial function, including ROS production and the inhibition of cytoskeleton formation. Shikonin immediately accumulates in the mitochondria and disrupts the mitochondrial membrane potential, followed by the induction of oxidative damage due to the generation of ROS. Several studies have demonstrated the antitumor activity of,-dimethylacrylshikonin via various signaling pathways, including the extracellular signal-regulated kinase and Notch-1 pathways. Notably,,-dimethylacrylshikonin has been reported to inhibit the cellular growth of HCT-116 cells in vitro and of xenografts in vivo. A previous study showed that the induction of apoptosis by,-dimethylacrylshikonin is associated with the upregulation of the proapoptotic proteins, Bax and Bid, and a reduction in the expression of the antiapoptotic proteins, B-cell lymphoma 2 (Bcl-2) and Bcl-XL. This change in the ratio of the proapoptotic/antiapoptotic Bcl-2 family of proteins may have led to ROS generation. These observations are in accordance with the overall results in the present study that demonstrated the ROS-mediated radiosensitizing effect of shikonins. In summary, the present study has demonstrated significant radiosensitizing activity of,-dimethylacrylshikonin in vitro and in vivo. These findings indicate that,-dimethylacrylshikonin is a promising candidate for a radiosensitizing agent and may be exploited for the development of a novel strategy for improving radiotherapy against cancerous tumors. |
WFUMB position paper on the management incidental findings: adrenal incidentaloma Focal lesions of the adrenal glands are incidentally detected in approximately 5% of cases by modern imaging techniques. Fewer than 5% of these adrenal incidentalomas are malignant and approximately 10% have endocrine activity. Reliable differentiation of malignant versus benign and hormonally active versus nonfunctional adrenal incidentalomas significantly influences therapeutic management and the outcome of affected individuals. Therefore, each adrenal incidentaloma should undergo a standardized diagnostic work-up to exclude malignancy and endocrine activity. This position statement of the World Federation of Ultrasound in Medicine and Biology (WFUMB) summarizes the available evidence on the management of adrenal incidentaloma and describes efficient management strategies with particular reference to the role of ultrasound techniques. Introduction The definition of an adrenal incidentaloma (AI) encompasses any focal adrenal lesion, independent of size, discovered by any imaging method including ultrasonography (US), endoscopic ultrasonography (EUS), computed tomography (CT), or magnetic resonance imaging (MRI), in the absence of adrenal disease. Ultrasonography 39, January 2020 e-ultrasonography.org Due to the fact that the adrenal glands are the fourth most frequent site of metastasis, independent of the location of the primary tumor (prevalence of metastases: 27%), the definition excludes adrenal lesions that are detected in patients with a suspected or established diagnosis of malignancy. However, in patients with a known malignancy, more than 50% (9%-75%) of solid adrenal masses turn out not to be metastases. Whereas the published literature is full of CT reports, comparatively little has been published for US, which is still the imaging method with the most detailed resolution. Conventional US and EUS both allow visualization of the normal adrenal gland and its vascularity (Figs. 1,2). It is possible to detect focal lesions down to 3-5 mm, in particular with transabdominal US for the right gland and with EUS for the left gland (Figs. 3,4). Nevertheless some definitions include only adrenal mass lesions ≥10 mm, as the normal thickness of the body of the adrenal glands varies from 6 to 8 mm (±2 mm). The most frequent pathology underlying an AI is a nonfunctional adenoma. After detection of an AI, there are a few important questions to be answered to determine the need for treatment: What is the prevalence?; Is the AI malignant?; Does the AI have endocrine activity? Adrenal Incidentaloma Prevalence The prevalence of AIs of any size with imaging is reported to be about 5%, ranging from 1% to 12% (with higher rates in older age groups) and the frequency of AIs has approached 8.7% in Fig. 1. Transabdominal ultrasound image of the right adrenal gland (between markers, 34.4 mm length). The anatomical landmarks are the right liver lobe (RLV, right liver vein), the right diaphragm (D), and the inferior caval vein (vena cava inferior ). Layering of the adrenal gland with a hyperechoic central echo representing the medulla, the hypoechoic cortex and hyperechoic capsule are depicted. The thickness of the adrenal gland is less than 7 mm (in this case: 5 mm). 3. Two incidental round solid lesions of the right adrenal gland (1513 mm, between markers, and 1210 mm) as shown on transabdominal ultrasonography. There was no history or suspicion of malignant disease, unenhanced computed tomography showed an attenuation value of below 10 Hounsfield unit, and an endocrine work-up did not reveal any endocrine activity (nonfunctional adenoma). autopsy series. In patients with a high body mass index, diabetes mellitus, and arterial hypertension, the prevalence is even higher. Bilateral AIs are found in about 10%-15% of cases. In unselected healthy subjects and in patients with inflammatory bowel disease, a prevalence of 5% was reported using abdominal US [8,. By far the largest data sets have been collected using CT. With state-of-the art contemporary CT examinations, AI was found in 4.4%-5% of individuals. In older studies, the reported prevalence was much lower for both methods, with AIs found in 259 of 61,054 individuals (0.4%) using CT scans performed from 1985 to 1990, and rates of 0.1% or less using abdominal US. Therefore, AI was described to be "a 'disease' of modern imaging technology". The main role of imaging is therefore to limit the invasive management of AI, and particularly the number of surgical adrenalectomies and biopsies. Is the AI Malignant? As shown for focal liver lesions, but also in AI, primary or secondary malignancies in asymptomatic subjects are an uncommon finding [11,15,. The most recent systematic review found a mean prevalence of adrenocortical carcinoma of 1.9% (median, 1.4%) and a mean prevalence of metastases of 0.7% (median, 0.2%). It concluded that due to various biases and misinterpretations of reports, previous reviews have overestimated the risk that an AI is malignant. According to newer data, the real cumulative risk of malignancy in AI may be below 3%. Size Importantly, the size and some imaging features are helpful in determining whether an AI is benign or malignant. A diameter >40 mm is a crucial cut-off since >90% of adrenocortical carcinomas are >40 mm at the time of diagnosis. On the contrary, the smaller the size at the time of diagnosis, the better the prognosis. A systematic review showed that only 2% of all adrenal masses ≤4 cm turned out to be adrenal carcinoma, whereas the prevalence of adrenocortical carcinoma in adrenal masses measuring 4-6 cm was 6%, and in tumors >6 cm it significantly increased to 25%. According to a more recent meta-analysis, the cut-off value of 40 mm for malignancy had an area under the curve of 0.92 with high sensitivity (91%), but limited specificity (71%). The pooled positive (3.1) and negative likelihood ratio (0.13) of this 40-mm cut-off value are neither confirmative nor exclusive for malignancy, so that further parameters are needed for a definitive diagnosis. Imaging Features In addition to size, some imaging features should be considered. Most important are the smooth border of a lesion and the amount of fat (attenuation values below 10 Hounsfield units in unenhanced CT) in benign lesions. In AIs >40 mm, criteria correlating with the diagnosis of adenoma versus adrenocortical carcinoma are a round shape, the presence of fat, and precontrast attenuation values <10 HU. Moreover an overall impression of a benign lesion ("benign imaging phenotype") was described to be significantly correlated with the diagnosis of adenoma. In a blinded retrospective study, interobserver agreement was reported to be excellent for precontrast attenuation, substantial for shape, moderate for the presence of fat, and fair for overall impression of benignity. Among the features of malignancy, the presence of calcifications had substantial interobserver agreement, whereas agreement for heterogeneity was only fair and agreement for the presence of necrosis was marginal. MRI is less often used, but the so-called chemical-shift imaging techniques give equivalent results for the estimated amount of fat. The descriptive features for benign AI are an oval or round shape, sharp margins and smooth contour, and homogeneous echo pattern (US) or density (CT). In conclusion, an approach combining size (<40 mm) and HU values (≤10 HU) excludes malignancy in almost all cases. However, approximately 30% of benign adrenal adenomas are lipid-poor, with attenuation values of ≥10 HU. Contrast-Enhanced Techniques Results of studies using contrast enhancement for all imaging methods, including contrast-enhanced US, are less convincing. Very small lesion (6 mm, between markers) of the body of the left adrenal gland, which was found incidentally on endoscopic ultrasonography performed for suspected common bile duct stones. The lesion was not found with computed tomography, and an endocrine work-up did not show any endocrine activity. Image-Guided Biopsy In contrast to patients with suspected or proven malignant disease with solid adrenal tumors, in AI the role of image-guided biopsy is limited. Recent guidelines do not recommend image-guided sampling for routine work-up of AIs. A meta-analysis found percutaneous image-guided biopsy to have a sensitivity and specificity of 87% and 100%, respectively, for the diagnosis of malignancy. Definitive differentiation of adrenocortical carcinoma from adenoma is not possible in all cases. The pooled complication rate was 2.5%. For EUS-guided sampling, data are more limited. The diagnostic yield ranges from 76% to 100%, and the risk of complications is very low. With contrast-enhanced US, the combination of early arterial hyperenhancement and rapid wash-out was described to be highly sensitive for the diagnosis of malignancy, but specificity was only moderate. Another study did not find significant differences between malignant and benign adrenal masses with regard to the pattern of contrast enhancement. However, contrastenhanced US may be very helpful for detecting hypervascularity and intratumoral hemorrhage or necrosis, such as in pheochromocytoma (Fig. 5). Combined Imaging Criteria With all imaging techniques, the typical imaging features of adrenocortical carcinoma and metastases are a size >40-60 mm, Fig. 5. A large hypoechoic solid lesion of the right adrenal gland (2545 mm) found incidentally in a patient with arterial hypertension. The lesion had smooth borders, but a central area was more hypoechoic than the periphery of the tumor. Contrast-enhanced ultrasonography reveals the high vascularity of the peripheral parts of the tumor, whereas the central area is without any contrast enhancement (hemorrhage, necrosis, marked by arrows). This pattern indicates the diagnosis of pheochromocytoma, which was established by an endocrine work-up and finally by surgical pathology. Does the AI Have Endocrine Activity? Pheochromocytoma and (subclinical) Cushing's syndrome should be excluded in all patients with AI. In patients with arterial hypertension with or without hypokalemia, Conn syndrome should be excluded. All patients with proven pheochromocytoma should undergo surgery. Multiple endocrine neoplasia should be considered. Cushing's Syndrome Subclinical Cushing's syndrome (SCS) is defined by autonomous cortisol secretion (detected by ≥2 abnormalities in a basal or dynamic test of the hypothalamic-pituitary-adrenal axis in patients who do not have the typical signs and symptoms of hypercortisolism). SCS is excluded by performing the 1 mg overnight dexamethasone suppression test (DST). Cortisol might be secreted either dependently or independently of corticotropin (ACTH). Unilateral adenoma with or without somatic mutations in the cAMP-dependent protein kinase A or bilateral macronodular adrenal hyperplasia (BMAH) are found. Recurrent vertebral fractures and less specifically, arterial hypertension, impaired glucose tolerance, or type 2 diabetes mellitus are typical. An abnormal DST indicates ACTH-independent cortisol production (positive if cortisol concentration >5 g/dL ). As a consequence, 24-hour urinary free cortisol and serum ACTH concentrations should be tested and evaluation of dehydroepiandrosterone sulfate metabolism should be performed, as well as a high-dose (8 mg) overnight DST. Clinically significant glucocorticoid secretory autonomy is confirmed by a test of earlymorning DST serum cortisol levels. Autonomous glucocorticoid function may also develop over time; therefore, follow-up testing should be considered. However, the efficiency and cost-effectiveness of annual repeated testing is not known. The indications for unilateral (or very rarely bilateral) adrenalectomy are beyond the scope of this review. Briefly, adrenalectomy is recommended for younger patients with proven SCS (excess of glucocorticoid secretion) and those at risk (e.g., with known osteoporosis, arterial hypertension, diabetes mellitus, and obesity). The benefits of adrenalectomy in patients with adrenal tumors and SCS have been suggested in a recent meta-analysis. Hyperaldosteronism and Conn Syndrome Aldosteronomas (<1% of AIs) are typically <20 mm at the time of diagnosis, poorly vascularized, and most often circumscribed. Diffuse mild hyperplasia <10 mm can be encountered. The diagnosis is achieved by measuring the plasma aldosterone concentration to plasma renin activity ratio. In younger patients with unilateral aldosterone-producing adenoma, surgery should be offered to cure the aldosterone excess, whereas aldosterone-antagonistic drugs are the treatment of choice in cases of bilateral hyperplasia with hyperaldosteronism and generally in older patients with comorbidities. Nonfunctional AI Nonfunctional AI (NFAI) should be considered for surgery if >40 mm due to the risk of malignancy, in particular adrenal cortical carcinoma (ACC). The detection of NFAI is predictive of the presence of diabetes mellitus and metabolic syndrome. Smaller NFAIs may be scheduled for repeated imaging after 6 to 12 months to exclude significant growth and, therefore, malignancy. Significant growth is considered to be enlargement >10 mm in diameter during the follow-up period. It should be pointed out that most NFAIs that grow are not malignant. The decisions about whether to utilize other imaging techniques, the type of imaging for follow-up, and the time interval are mainly guided by the local circumstances and individual decisions. CT-related radiation exposure should be considered. Adrenal myelolipoma (AML) is a slowly growing benign tumor composed of hematopoietic elements and fat elements with eyecatching imaging features. AML is typically hyperechoic using conventional US and EUS (Fig. 6) and shows abundant fat using CT and MRI. Many other rare focal adrenal lesions may be encountered. Bilateral AI Patients with bilateral AI should be investigated for Cushing's disease, congenital adrenal hyperplasia, and BMAH. The indication for surgery in bilateral NFAI is more restricted. Follow-up Follow-up by repeat imaging and hormonal work-ups is recommended by most recent guidelines for individuals with AI with a benign imaging phenotype and no hormonal activity at initial presentation. Adherence to these recommendations seems to be generally poor in clinical practice. Moreover, most follow-up studies have shown negligible risk of an AI that is consistent with a benign and nonfunctional lesion at initial presentation becoming malignant (0%) or hormonally active (below 0.3%). Based on a systematic review of the available data, a review highlighted the high risk of falsepositive results of the recommended examinations and cautioned that the dose of radiation with CT follow-up confers a risk of fatal cancer that is similar to the risk of the AI becoming malignant. Another meta-analysis showed no risk of developing malignancy in 1,298 AIs (pooled from 11 studies) followed for a mean of 44.2 months. Size progression was only marginal (pooled mean increment of 0.03 cm), and the development of endocrine activity was rare (SCS, 1.8%; Cushing's syndrome, 0.7%; pheochromocytoma, 0.4%). Therefore, the guidelines of the European Society of Endocrinology (ESE) in 2016 suggested omitting further followup imaging in individuals with an adrenal mass <40 mm and with clear benign features on imaging studies. Moreover, the ESE guidelines suggested against repeated hormonal assessments in individuals with AI who have a normal hormonal work-up at the time of initial presentation, unless clinical signs of endocrine activity develop or metabolic comorbidities or arterial hypertension worsens. Imaging follow-up was recommended for patients with indeterminate adrenal masses opting against adrenalectomy. In case of growth of ≥5 mm and enlargement by >20%, surgical resection is recommended. Other societies suggest using CT protocols with reduced radiation exposure or to individualize follow-up recommendations. The Polish Society of Endocrinology recommended using abdominal US for follow-up in appropriate cases. All procedures are reported to be relatively safe. Clinical Scenarios and Role of Ultrasonography Detection of AI by Transabdominal Ultrasonography US has a high sensitivity for the detection of adrenal mass lesions (in particular for the right gland), even in tumors <20 mm. Therefore, incidental detection of adrenal tumors is a frequent clinical scenario. In a patient/individuals without history, suspicion, or proof of malignant disease, an AI with a maximum diameter of ≤40 mm, a homogeneous echo-rich echo pattern (myelolipoma) and smooth borders or a typical cystic (completely anechoic) pattern ("benign US phenotype") in all likelihood is benign. However, prospective studies comparing the diagnostic accuracy of US to that of unenhanced CT are lacking. Contrast-enhanced US is not helpful for distinguishing malignant and benign lesions. Therefore, in addition to a hormonal work-up, unenhanced CT should be performed in lesions ≥10 mm. If endocrine activity is lacking and CT findings are highly predictive for a benign lesion, further imaging or regular follow-up is not necessary. In cases of equivocal CT findings or hormonal activity, further management should be based on a multidisciplinary expert board discussion. Further management options in cases of functional AI are described above. In cases of an incidental adrenal mass with equivocal CT criteria or a diameter of >40 mm and <60 mm, an individualized decision should be made, considering close follow-up, surgery, or further imaging (chemical phase-shift MRI). (E)US-or CT-guided sampling may also be an option in individual cases (e.g., size >40 mm and <60 mm or no definite benign imaging phenotype on imaging, with patient-related factors making surgery less favorable). For smaller lesions, a hormonal work-up should be performed, and follow-up by ultrasound seems reasonable. Detection of AI by Cross-sectional Imaging (CT, MRI) In AIs detected using cross-sectional imaging techniques, the role of US and EUS is limited. Performing US may be useful if, as a result of further work-up, surgery is not the appropriate management strategy for the patient and follow-up is required. If US enables appropriate visualization and measurement of the lesion, due to the absence of radiation exposure, US may be preferable to CT for surveillance. Summary All patients with an AI >10 mm should be evaluated at initial presentation to exclude malignancy and hormonal hyperfunction according to recent guideline recommendations. In cases of a "benign imaging phenotype" on US, additional unenhanced CT should be performed in all lesions ≥10 mm that are not completely anechoic with smooth borders (typical cysts). AIs measuring <40 mm, with a smooth border and CT attenuation value <10 HU, are most probably benign. After exclusion of hormonal activity, a further diagnostic work-up is not recommended. However, it is recommended to compare with any prior imaging examinations to evaluate any changes in size. AIs measuring >40 mm and/or with hormonal activity should be considered for surgery. Adrenocortical carcinoma is typically characterized by an irregular shape, an inhomogeneous echo pattern, calcifications, non-enhancing spontaneous hemorrhage necrosis, and higher CT attenuation values (>20 HU), as well as delayed wash-out (on contrast-enhanced CT). Pheochromocytoma is typically >30 mm or >40 mm at time of diagnosis, highly vascularized with regressive changes and zones of spontaneous necrosis, and sometimes ectopic and multiple. the final diagnosis is established by measuring plasma metanephrines. Cushing's disease is excluded by performing the 1 mg overnight DST. The typical imaging features of Conn syndrome are size <20 mm, an oval or round shape, and a sharply delineated and homogeneous echo pattern. The diagnosis should be considered in patients with arterial hypertension and/or otherwise unexplained hypokalemia, using the plasma aldosterone concentration to plasma renin activity ratio. US should be considered in cases with recommended imaging follow-up, if appropriate US visualization of the mass lesion is possible. In patients with a known primary malignancy elsewhere, the probability of an adrenal mass being metastatic is much higher than in healthy subjects. Histological sampling (US/endoscopic USor CT-guided) may be considered on an individual basis in patients with an AI measuring >40 mm and <60 mm or with no definite benign imaging phenotype on imaging, or in the case of metastasis (incidentally discovered cancer), if patient-related factors strongly rule out surgery. It has high clinical value in all cases with a history or suspicion of malignancy. |
Measuring Quality: One Hospice's Process This article describes the recent modifications to the Medicare Conditions of Participation as they relate to hospice programs' quality requirements. The process used by one hospice program in the Washington Metropolitan Area to implement the National Hospice and Palliative Care Organizations Quality Partners Self-Assessment Surveys is explained. Baseline survey results were used to identify and prioritize areas for improvement related to the standards of hospice care. |
<filename>app/src/main/java/com/haohaohu/androidanimationsample/TweenAnimActivity.java
package com.haohaohu.androidanimationsample;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.view.animation.LinearInterpolator;
import android.widget.ImageView;
public class TweenAnimActivity extends AppCompatActivity {
private Animation anim;
private ImageView tweenIv;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_tween_anim);
initView();
}
private void initView() {
tweenIv = (ImageView) findViewById(R.id.tween_iv);
anim = AnimationUtils.loadAnimation(TweenAnimActivity.this, R.anim.progress_anim);
anim.setInterpolator(new LinearInterpolator());
tweenIv.startAnimation(anim);
}
}
|
/***
*
* Copyright (c) 1999, 2000 Valve LLC. All rights reserved.
*
* This product contains software technology licensed from Id
* Software, Inc. ("Id Technology"). Id Technology (c) 1996 Id Software, Inc.
* All Rights Reserved.
*
* Use, distribution, and modification of this source code and/or resulting
* object code is restricted to non-commercial enhancements to products from
* Valve LLC. All other use, distribution, or modification is prohibited
* without written permission from Valve LLC.
*
****/
//
// status_icons.cpp
//
#include "hud.h"
#include "cl_util.h"
#include <string.h>
#include <stdio.h>
#include "parsemsg.h"
DECLARE_MESSAGE( m_StatusIcons, StatusIcon );
int CHudStatusIcons::Init( void )
{
HOOK_MESSAGE( StatusIcon );
gHUD.AddHudElem( this );
Reset();
return 1;
}
int CHudStatusIcons::VidInit( void )
{
return 1;
}
void CHudStatusIcons::Reset( void )
{
memset( m_IconList, 0, sizeof m_IconList );
m_iFlags &= ~HUD_ACTIVE;
}
// Draw status icons along the left-hand side of the screen
int CHudStatusIcons::Draw( float flTime )
{
// find starting position to draw from, along right-hand side of screen
int x = 5;
int y = ScreenHeight / 2;
// loop through icon list, and draw any valid icons drawing up from the middle of screen
for ( int i = 0; i < MAX_ICONSPRITES; i++ )
{
if ( m_IconList[i].spr )
{
y -= ( m_IconList[i].rc.bottom - m_IconList[i].rc.top ) + 5;
SPR_Set( m_IconList[i].spr, m_IconList[i].r, m_IconList[i].g, m_IconList[i].b );
SPR_DrawAdditive( 0, x, y, &m_IconList[i].rc );
}
}
return 1;
}
// Message handler for StatusIcon message
// accepts five values:
// byte : TRUE = ENABLE icon, FALSE = DISABLE icon
// string : the sprite name to display
// byte : red
// byte : green
// byte : blue
int CHudStatusIcons::MsgFunc_StatusIcon( const char *pszName, int iSize, void *pbuf )
{
BEGIN_READ( pbuf, iSize );
int ShouldEnable = READ_BYTE();
char *pszIconName = READ_STRING();
if ( ShouldEnable )
{
int r = READ_BYTE();
int g = READ_BYTE();
int b = READ_BYTE();
EnableIcon( pszIconName, r, g, b );
m_iFlags |= HUD_ACTIVE;
}
else
{
DisableIcon( pszIconName );
}
return 1;
}
// add the icon to the icon list, and set it's drawing color
void CHudStatusIcons::EnableIcon( const char *pszIconName, unsigned char red, unsigned char green, unsigned char blue )
{
// check to see if the sprite is in the current list
int i;
for ( i = 0; i < MAX_ICONSPRITES; i++ )
{
if ( !stricmp( m_IconList[i].szSpriteName, pszIconName ) )
break;
}
if ( i == MAX_ICONSPRITES )
{
// icon not in list, so find an empty slot to add to
for ( i = 0; i < MAX_ICONSPRITES; i++ )
{
if ( !m_IconList[i].spr )
break;
}
}
// if we've run out of space in the list, overwrite the first icon
if ( i == MAX_ICONSPRITES )
{
i = 0;
}
// Load the sprite and add it to the list
// the sprite must be listed in hud.txt
int spr_index = gHUD.GetSpriteIndex( pszIconName );
m_IconList[i].spr = gHUD.GetSprite( spr_index );
m_IconList[i].rc = gHUD.GetSpriteRect( spr_index );
m_IconList[i].r = red;
m_IconList[i].g = green;
m_IconList[i].b = blue;
strcpy( m_IconList[i].szSpriteName, pszIconName );
}
void CHudStatusIcons::DisableIcon( const char *pszIconName )
{
// find the sprite is in the current list
for ( int i = 0; i < MAX_ICONSPRITES; i++ )
{
if ( !stricmp( m_IconList[i].szSpriteName, pszIconName ) )
{
// clear the item from the list
memset( &m_IconList[i], 0, sizeof(icon_sprite_t) );
return;
}
}
}
|
Subsets and Splits