text
stringlengths 29
850k
|
---|
# -*- encoding: utf-8 -*-
import functools as ft
from inspect import getdoc
import re
import h2o
from h2o.automl._base import H2OAutoMLBaseMixin
from h2o.automl._h2o_automl_output import H2OAutoMLOutput
from h2o.base import Keyed
from h2o.estimators import H2OEstimator
from h2o.exceptions import H2OResponseError, H2OValueError
from h2o.frame import H2OFrame
from h2o.job import H2OJob
from h2o.utils.shared_utils import check_id
from h2o.utils.typechecks import assert_is_type, is_type, numeric
_params_doc_ = dict() # holds the doc per param extracted from H2OAutoML constructor
def _extract_params_doc(docstr):
pat = re.compile(r"^:param (\w+ )?(?P<name>\w+):\s?(?P<doc>.*)") # match param doc-start in Sphinx format ":param type name: description"
lines = docstr.splitlines()
param, doc = None, None
for l in lines:
m = pat.match(l)
if m:
if param:
_params_doc_[param] = "\n".join(doc)
param = m.group('name')
doc = [m.group('doc')]
elif param:
doc.append(l)
def _aml_property(param_path, name=None, types=None, validate_fn=None, freezable=False, set_input=True):
path = param_path.split('.')
name = name or path[-1]
def attr_name(self, attr):
return ("_"+self.__class__.__name__+attr) if attr.startswith('__') and not attr.endswith('__') else attr
def _fget(self):
_input = getattr(self, attr_name(self, '__input'))
return _input.get(name)
def _fset(self, value):
if freezable and getattr(self, attr_name(self, '__frozen'), False):
raise H2OValueError("Param ``%s`` can not be modified after the first call to ``train``." % name, name)
if types is not None:
assert_is_type(value, *types)
input_val = value
if validate_fn:
value = validate_fn(self, value)
_input = getattr(self, attr_name(self, '__input'))
_input[name] = input_val if set_input else value
group = getattr(self, attr_name(self, path[0]))
if group is None:
group = {}
setattr(self, attr_name(self, path[0]), group)
obj = group
for t in path[1:-1]:
tmp = obj.get(t)
if tmp is None:
tmp = obj[t] = {}
obj = tmp
obj[path[-1]] = value
return property(fget=_fget, fset=_fset, doc=_params_doc_.get(name, None))
class H2OAutoML(H2OAutoMLBaseMixin, Keyed):
"""
Automatic Machine Learning
The Automatic Machine Learning (AutoML) function automates the supervised machine learning model training process.
The current version of AutoML trains and cross-validates the following algorithms (in the following order):
three pre-specified XGBoost GBM (Gradient Boosting Machine) models,
a fixed grid of GLMs,
a default Random Forest (DRF),
five pre-specified H2O GBMs,
a near-default Deep Neural Net,
an Extremely Randomized Forest (XRT),
a random grid of XGBoost GBMs,
a random grid of H2O GBMs,
and a random grid of Deep Neural Nets.
In some cases, there will not be enough time to complete all the algorithms, so some may be missing from the
leaderboard. AutoML then trains two Stacked Ensemble models, one of all the models, and one of only the best
models of each kind.
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>> h2o.init()
>>> # Import a sample binary outcome train/test set into H2O
>>> train = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_train_10k.csv")
>>> test = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_test_5k.csv")
>>> # Identify the response and set of predictors
>>> y = "response"
>>> x = list(train.columns) #if x is defined as all columns except the response, then x is not required
>>> x.remove(y)
>>> # For binary classification, response should be a factor
>>> train[y] = train[y].asfactor()
>>> test[y] = test[y].asfactor()
>>> # Run AutoML for 30 seconds
>>> aml = H2OAutoML(max_runtime_secs = 30)
>>> aml.train(x = x, y = y, training_frame = train)
>>> # Print Leaderboard (ranked by xval metrics)
>>> aml.leaderboard
>>> # (Optional) Evaluate performance on a test set
>>> perf = aml.leader.model_performance(test)
>>> perf.auc()
"""
def __init__(self,
nfolds=5,
balance_classes=False,
class_sampling_factors=None,
max_after_balance_size=5.0,
max_runtime_secs=None,
max_runtime_secs_per_model=None,
max_models=None,
stopping_metric="AUTO",
stopping_tolerance=None,
stopping_rounds=3,
seed=None,
project_name=None,
exclude_algos=None,
include_algos=None,
exploitation_ratio=0,
modeling_plan=None,
preprocessing=None,
monotone_constraints=None,
keep_cross_validation_predictions=False,
keep_cross_validation_models=False,
keep_cross_validation_fold_assignment=False,
sort_metric="AUTO",
export_checkpoints_dir=None,
verbosity="warn",
**kwargs):
"""
Create a new H2OAutoML instance.
:param int nfolds: Number of folds for k-fold cross-validation.
Use ``0`` to disable cross-validation; this will also disable Stacked Ensemble (thus decreasing the overall model performance).
Defaults to ``5``.
:param bool balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order).
If not specified, sampling factors will be automatically computed to obtain class balance during training.
:param float max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0).
Requires ``balance_classes``.
Defaults to ``5.0``.
:param int max_runtime_secs: Specify the maximum time that the AutoML process will run for, prior to training the final Stacked Ensemble models.
If neither ``max_runtime_secs`` nor ``max_models`` are specified by the user, then ``max_runtime_secs``.
Defaults to 3600 seconds (1 hour).
:param int max_runtime_secs_per_model: Controls the max time the AutoML run will dedicate to each individual model.
Defaults to ``0`` (disabled: no time limit).
:param int max_models: Specify the maximum number of models to build in an AutoML run, excluding the Stacked Ensemble models.
Defaults to ``0`` (disabled: no limitation).
:param str stopping_metric: Specifies the metric to use for early stopping.
The available options are:
``"AUTO"`` (This defaults to ``"logloss"`` for classification, ``"deviance"`` for regression),
``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``, ``aucpr``, ``"lift_top_group"``,
``"misclassification"``, ``"mean_per_class_error"``, ``"r2"``.
Defaults to ``"AUTO"``.
:param float stopping_tolerance: Specify the relative tolerance for the metric-based stopping to stop the AutoML run if the improvement is less than this value.
Defaults to ``0.001`` if the dataset is at least 1 million rows;
otherwise it defaults to a value determined by the size of the dataset and the non-NA-rate, in which case the value is computed as 1/sqrt(nrows * non-NA-rate).
:param int stopping_rounds: Stop training new models in the AutoML run when the option selected for
stopping_metric doesn't improve for the specified number of models, based on a simple moving average.
To disable this feature, set it to ``0``.
Defaults to ``3`` and must be an non-negative integer.
:param int seed: Set a seed for reproducibility.
AutoML can only guarantee reproducibility if ``max_models`` or early stopping is used because ``max_runtime_secs`` is resource limited,
meaning that if the resources are not the same between runs, AutoML may be able to train more models on one run vs another.
Defaults to ``None``.
:param str project_name: Character string to identify an AutoML project.
Defaults to ``None``, which means a project name will be auto-generated based on the training frame ID.
More models can be trained on an existing AutoML project by specifying the same project name in multiple calls to the AutoML function
(as long as the same training frame, or a sample, is used in subsequent runs).
:param exclude_algos: List the algorithms to skip during the model-building phase.
The full list of options is:
``"DRF"`` (Random Forest and Extremely-Randomized Trees),
``"GLM"``,
``"XGBoost"``,
``"GBM"``,
``"DeepLearning"``,
``"StackedEnsemble"``.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
Usage example: ``exclude_algos = ["GLM", "DeepLearning", "DRF"]``.
:param include_algos: List the algorithms to restrict to during the model-building phase.
This can't be used in combination with `exclude_algos` param.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
:param exploitation_ratio: The budget ratio (between 0 and 1) dedicated to the exploitation (vs exploration) phase.
By default, the exploitation phase is disabled (exploitation_ratio=0) as this is still experimental;
to activate it, it is recommended to try a ratio around 0.1.
Note that the current exploitation phase only tries to fine-tune the best XGBoost and the best GBM found during exploration.
:param modeling_plan: List of modeling steps to be used by the AutoML engine (they may not all get executed, depending on other constraints).
Defaults to None (Expert usage only).
:param preprocessing: List of preprocessing steps to run. Only 'target_encoding' is currently supported.
:param monotone_constraints: Dict representing monotonic constraints.
Use +1 to enforce an increasing constraint and -1 to specify a decreasing constraint.
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation predictions.
This needs to be set to ``True`` if running the same AutoML object for repeated runs because CV predictions are required to build
additional Stacked Ensemble models in AutoML.
Defaults to ``False``.
:param keep_cross_validation_models: Whether to keep the cross-validated models.
Keeping cross-validation models may consume significantly more memory in the H2O cluster.
Defaults to ``False``.
:param keep_cross_validation_fold_assignment: Whether to keep fold assignments in the models.
Deleting them will save memory in the H2O cluster.
Defaults to ``False``.
:param sort_metric: Metric to sort the leaderboard by.
For binomial classification choose between ``"auc"``, ``"aucpr"``, ``"logloss"``, ``"mean_per_class_error"``, ``"rmse"``, ``"mse"``.
For multinomial classification choose between ``"mean_per_class_error"``, ``"logloss"``, ``"rmse"``, ``"mse"``.
For regression choose between ``"deviance"``, ``"rmse"``, ``"mse"``, ``"mae"``, ``"rmlse"``.
Defaults to ``"AUTO"`` (This translates to ``"auc"`` for binomial classification, ``"mean_per_class_error"`` for multinomial classification, ``"deviance"`` for regression).
:param export_checkpoints_dir: Path to a directory where every model will be stored in binary form.
:param verbosity: Verbosity of the backend messages printed during training.
Available options are None (live log disabled), ``"debug"``, ``"info"`` or ``"warn"``.
Defaults to ``"warn"``.
"""
# early validate kwargs, extracting hidden parameters:
algo_parameters = {}
for k in kwargs:
if k == 'algo_parameters':
algo_parameters = kwargs[k] or {}
else:
raise TypeError("H2OAutoML got an unexpected keyword argument '%s'" % k)
# Check if H2O jar contains AutoML
try:
h2o.api("GET /3/Metadata/schemas/AutoMLV99")
except h2o.exceptions.H2OResponseError as e:
print(e)
print("*******************************************************************\n" \
"*Please verify that your H2O jar has the proper AutoML extensions.*\n" \
"*******************************************************************\n" \
"\nVerbose Error Message:")
self._job = None
self._leader_id = None
self._leaderboard = None
self._verbosity = verbosity
self._event_log = None
self._training_info = None
self._state_json = None
self._build_resp = None # contains all the actual parameters used on backend
self.__frozen = False
self.__input = dict() # contains all the input params as entered by the user
# Make bare minimum params containers
self.build_control = dict()
self.build_models = dict()
self.input_spec = dict()
self.project_name = project_name
self.nfolds = nfolds
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.keep_cross_validation_models = keep_cross_validation_models
self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment
self.keep_cross_validation_predictions = keep_cross_validation_predictions
self.export_checkpoints_dir = export_checkpoints_dir
self.max_runtime_secs = max_runtime_secs
self.max_runtime_secs_per_model = max_runtime_secs_per_model
self.max_models = max_models
self.stopping_metric = stopping_metric
self.stopping_tolerance = stopping_tolerance
self.stopping_rounds = stopping_rounds
self.seed = seed
self.exclude_algos = exclude_algos
self.include_algos = include_algos
self.exploitation_ratio = exploitation_ratio
self.modeling_plan = modeling_plan
self.preprocessing = preprocessing
if monotone_constraints is not None:
algo_parameters['monotone_constraints'] = monotone_constraints
self._algo_parameters = algo_parameters
self.sort_metric = sort_metric
#---------------------------------------------------------------------------
# AutoML params
#---------------------------------------------------------------------------
def __validate_not_set(self, val, prop=None, message=None):
assert val is None or getattr(self, prop, None) is None, message
return val
def __validate_project_name(self, project_name):
check_id(project_name, "H2OAutoML")
return project_name
def __validate_nfolds(self, nfolds):
assert nfolds == 0 or nfolds > 1, "nfolds set to %s; use nfolds >=2 if you want cross-validated metrics and Stacked Ensembles or use nfolds = 0 to disable." % nfolds
return nfolds
def __validate_modeling_plan(self, modeling_plan):
if modeling_plan is None:
return None
supported_aliases = ['all', 'defaults', 'grids']
def assert_is_step_def(sd):
assert 'name' in sd, "each definition must have a 'name' key"
assert 0 < len(sd) < 3, "each definition must have only 1 or 2 keys: name, name+alias or name+steps"
assert len(sd) == 1 or 'alias' in sd or 'steps' in sd, "steps definitions support only the following keys: name, alias, steps"
assert 'alias' not in sd or sd['alias'] in supported_aliases, "alias must be one of %s" % supported_aliases
assert 'steps' not in sd or (is_type(sd['steps'], list) and all(assert_is_step(s) for s in sd['steps']))
def assert_is_step(s):
assert is_type(s, dict), "each step must be a dict with an 'id' key and an optional 'weight' key"
assert 'id' in s, "each step must have an 'id' key"
assert len(s) == 1 or ('weight' in s and is_type(s['weight'], int)), "weight must be an integer"
return True
plan = []
for step_def in modeling_plan:
assert_is_type(step_def, dict, tuple, str)
if is_type(step_def, dict):
assert_is_step_def(step_def)
plan.append(step_def)
elif is_type(step_def, str):
plan.append(dict(name=step_def))
else:
assert 0 < len(step_def) < 3
assert_is_type(step_def[0], str)
name = step_def[0]
if len(step_def) == 1:
plan.append(dict(name=name))
else:
assert_is_type(step_def[1], str, list)
ids = step_def[1]
if is_type(ids, str):
assert_is_type(ids, *supported_aliases)
plan.append(dict(name=name, alias=ids))
else:
plan.append(dict(name=name, steps=[dict(id=i) for i in ids]))
return plan
def __validate_preprocessing(self, preprocessing):
if preprocessing is None:
return
assert all(p in ['target_encoding'] for p in preprocessing)
return [dict(type=p.replace("_", "")) for p in preprocessing]
def __validate_monotone_constraints(self, monotone_constraints):
if monotone_constraints is None:
self._algo_parameters.pop('monotone_constraints', None)
else:
self._algo_parameters['monotone_constraints'] = monotone_constraints
return self.__validate_algo_parameters(self._algo_parameters)
def __validate_algo_parameters(self, algo_parameters):
if algo_parameters is None:
return None
algo_parameters_json = []
for k, v in algo_parameters.items():
scope, __, name = k.partition('__')
if len(name) == 0:
name, scope = scope, 'any'
value = [dict(key=k, value=v) for k, v in v.items()] if isinstance(v, dict) else v # we can't use stringify_dict here as this will be converted into a JSON string
algo_parameters_json.append(dict(scope=scope, name=name, value=value))
return algo_parameters_json
def __validate_frame(self, fr, name=None, required=False):
return H2OFrame._validate(fr, name, required=required)
_extract_params_doc(getdoc(__init__))
project_name = _aml_property('build_control.project_name', types=(None, str), freezable=True,
validate_fn=__validate_project_name)
nfolds = _aml_property('build_control.nfolds', types=(int,), freezable=True,
validate_fn=__validate_nfolds)
balance_classes = _aml_property('build_control.balance_classes', types=(bool,), freezable=True)
class_sampling_factors = _aml_property('build_control.class_sampling_factors', types=(None, [numeric]), freezable=True)
max_after_balance_size = _aml_property('build_control.max_after_balance_size', types=(None, numeric), freezable=True)
keep_cross_validation_models = _aml_property('build_control.keep_cross_validation_models', types=(bool,), freezable=True)
keep_cross_validation_fold_assignment = _aml_property('build_control.keep_cross_validation_fold_assignment', types=(bool,), freezable=True)
keep_cross_validation_predictions = _aml_property('build_control.keep_cross_validation_predictions', types=(bool,), freezable=True)
export_checkpoints_dir = _aml_property('build_control.export_checkpoints_dir', types=(None, str), freezable=True)
max_runtime_secs = _aml_property('build_control.stopping_criteria.max_runtime_secs', types=(None, int), freezable=True)
max_runtime_secs_per_model = _aml_property('build_control.stopping_criteria.max_runtime_secs_per_model', types=(None, int), freezable=True)
max_models = _aml_property('build_control.stopping_criteria.max_models', types=(None, int), freezable=True)
stopping_metric = _aml_property('build_control.stopping_criteria.stopping_metric', types=(None, str), freezable=True)
stopping_tolerance = _aml_property('build_control.stopping_criteria.stopping_tolerance', types=(None, numeric), freezable=True)
stopping_rounds = _aml_property('build_control.stopping_criteria.stopping_rounds', types=(None, int), freezable=True)
seed = _aml_property('build_control.stopping_criteria.seed', types=(None, int), freezable=True)
exclude_algos = _aml_property('build_models.exclude_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='include_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
include_algos = _aml_property('build_models.include_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='exclude_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
exploitation_ratio = _aml_property('build_models.exploitation_ratio', types=(None, numeric), freezable=True)
modeling_plan = _aml_property('build_models.modeling_plan', types=(None, list), freezable=True,
validate_fn=__validate_modeling_plan)
preprocessing = _aml_property('build_models.preprocessing', types=(None, [str]), freezable=True,
validate_fn=__validate_preprocessing)
monotone_constraints = _aml_property('build_models.algo_parameters', name='monotone_constraints', types=(None, dict), freezable=True,
validate_fn=__validate_monotone_constraints)
_algo_parameters = _aml_property('build_models.algo_parameters', types=(None, dict), freezable=True,
validate_fn=__validate_algo_parameters)
sort_metric = _aml_property('input_spec.sort_metric', types=(None, str))
fold_column = _aml_property('input_spec.fold_column', types=(None, int, str))
weights_column = _aml_property('input_spec.weights_column', types=(None, int, str))
training_frame = _aml_property('input_spec.training_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='training_frame', required=True))
validation_frame = _aml_property('input_spec.validation_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='validation_frame'))
leaderboard_frame = _aml_property('input_spec.leaderboard_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='leaderboard_frame'))
blending_frame = _aml_property('input_spec.blending_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='blending_frame'))
response_column = _aml_property('input_spec.response_column', types=(str,))
#---------------------------------------------------------------------------
# Basic properties
#---------------------------------------------------------------------------
@property
def key(self):
return self._job.dest_key if self._job else self.project_name
@property
def leader(self):
return None if self._leader_id is None else h2o.get_model(self._leader_id)
@property
def leaderboard(self):
return H2OFrame([]) if self._leaderboard is None else self._leaderboard
@property
def event_log(self):
return H2OFrame([]) if self._event_log is None else self._event_log
@property
def training_info(self):
return dict() if self._training_info is None else self._training_info
@property
def modeling_steps(self):
"""
expose the modeling steps effectively used by the AutoML run.
This executed plan can be directly reinjected as the `modeling_plan` property of a new AutoML instance
to improve reproducibility across AutoML versions.
:return: a list of dictionaries representing the effective modeling plan.
"""
# removing alias key to be able to reinject result to a new AutoML instance
return list(map(lambda sdef: dict(name=sdef['name'], steps=sdef['steps']), self._state_json['modeling_steps']))
#---------------------------------------------------------------------------
# Training AutoML
#---------------------------------------------------------------------------
def train(self, x=None, y=None, training_frame=None, fold_column=None,
weights_column=None, validation_frame=None, leaderboard_frame=None, blending_frame=None):
"""
Begins an AutoML task, a background task that automatically builds a number of models
with various algorithms and tracks their performance in a leaderboard. At any point
in the process you may use H2O's performance or prediction functions on the resulting
models.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param fold_column: The name or index of the column in training_frame that holds per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds per-row weights.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold_column or weights_column).
:param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets
nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used
for early stopping of individual models and early stopping of the grid searches. By default and
when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored.
:param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and
if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard
rankings instead.
:param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values).
This is optional, but when provided, it is also recommended to disable cross validation
by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes.
:returns: An H2OAutoML object.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
"""
# Minimal required arguments are training_frame and y (response)
self.training_frame = training_frame
ncols = self.training_frame.ncols
names = self.training_frame.names
if y is None and self.response_column is None:
raise H2OValueError('The response column (y) is not set; please set it to the name of the column that you are trying to predict in your data.')
elif y is not None:
assert_is_type(y, int, str)
if is_type(y, int):
if not (-ncols <= y < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % y)
y = names[y]
else:
if y not in names:
raise H2OValueError("Column %s does not exist in the training frame" % y)
self.response_column = y
self.fold_column = fold_column
self.weights_column = weights_column
self.validation_frame = validation_frame
self.leaderboard_frame = leaderboard_frame
self.blending_frame = blending_frame
if x is not None:
assert_is_type(x, list)
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-ncols <= xi < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(names[xi])
else:
if xi not in names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
ignored_columns = set(names) - xset
for col in [y, fold_column, weights_column]:
if col is not None and col in ignored_columns:
ignored_columns.remove(col)
if ignored_columns is not None:
self.input_spec['ignored_columns'] = list(ignored_columns)
def clean_params(params):
return ({k: clean_params(v) for k, v in params.items() if v is not None} if isinstance(params, dict)
else H2OEstimator._keyify(params))
automl_build_params = clean_params(dict(
build_control=self.build_control,
build_models=self.build_models,
input_spec=self.input_spec,
))
resp = self._build_resp = h2o.api('POST /99/AutoMLBuilder', json=automl_build_params)
if 'job' not in resp:
raise H2OResponseError("Backend failed to build the AutoML job: {}".format(resp))
if not self.project_name:
self.project_name = resp['build_control']['project_name']
self.__frozen = True
self._job = H2OJob(resp['job'], "AutoML")
poll_updates = ft.partial(self._poll_training_updates, verbosity=self._verbosity, state={})
try:
self._job.poll(poll_updates=poll_updates)
finally:
poll_updates(self._job, 1)
self._fetch()
return self.leader
#---------------------------------------------------------------------------
# Predict with AutoML
#---------------------------------------------------------------------------
def predict(self, test_data):
leader = self.leader
if leader is None:
self._fetch()
leader = self.leader
if leader is not None:
return leader.predict(test_data)
print("No model built yet...")
#-------------------------------------------------------------------------------------------------------------------
# Overrides
#-------------------------------------------------------------------------------------------------------------------
def detach(self):
self.__frozen = False
self.project_name = None
h2o.remove(self.leaderboard)
h2o.remove(self.event_log)
#-------------------------------------------------------------------------------------------------------------------
# Private
#-------------------------------------------------------------------------------------------------------------------
def _fetch(self):
state = H2OAutoML._fetch_state(self.key)
self._leader_id = state['leader_id']
self._leaderboard = state['leaderboard']
self._event_log = el = state['event_log']
self._training_info = { r[0]: r[1]
for r in el[el['name'] != '', ['name', 'value']]
.as_data_frame(use_pandas=False, header=False)
}
self._state_json = state['json']
return self._leader_id is not None
def _poll_training_updates(self, job, bar_progress=0, verbosity=None, state=None):
"""
the callback function used to print verbose info when polling AutoML job.
"""
levels = ['Debug', 'Info', 'Warn']
if verbosity is None or verbosity.capitalize() not in levels:
return
levels = levels[levels.index(verbosity.capitalize()):]
try:
if job.progress > state.get('last_job_progress', 0):
# print("\nbar_progress={}, job_progress={}".format(bar_progress, job.progress))
events = H2OAutoML._fetch_state(job.dest_key, properties=['event_log'])['event_log']
events = events[events['level'].isin(levels), :]
last_nrows = state.get('last_events_nrows', 0)
if events.nrows > last_nrows:
fr = events[last_nrows:, ['timestamp', 'message']].as_data_frame(use_pandas=False, header=False)
print('')
for r in fr:
print("{}: {}".format(r[0], r[1]))
print('')
state['last_events_nrows'] = events.nrows
state['last_job_progress'] = job.progress
except Exception as e:
print("Failed polling AutoML progress log: {}".format(e))
@staticmethod
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
lb = H2OAutoML._fetch_table(resp['table'], key=dest_key, progress_bar=False)
return h2o.assign(lb[1:], dest_key)
@staticmethod
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
return h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
@staticmethod
def _fetch_state(aml_id, properties=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id)
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = H2OAutoML._fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
leaderboard = h2o.assign(leaderboard[1:], project_name+"_leaderboard") # removing index and reassign id to ensure persistence on backend
event_log = None
if should_fetch('event_log'):
event_log = H2OAutoML._fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
event_log = h2o.assign(event_log[1:], project_name+"_eventlog") # removing index and reassign id to ensure persistence on backend
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
def get_automl(project_name):
"""
Retrieve information about an AutoML instance.
:param str project_name: A string indicating the project_name of the automl instance to retrieve.
:returns: A dictionary containing the project_name, leader model, leaderboard, event_log.
"""
state = H2OAutoML._fetch_state(project_name)
return H2OAutoMLOutput(state)
def get_leaderboard(aml, extra_columns=None):
"""
Retrieve the leaderboard from the AutoML instance.
Contrary to the default leaderboard attached to the automl instance, this one can return columns other than the metrics.
:param H2OAutoML aml: the instance for which to return the leaderboard.
:param extra_columns: a string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
- 'ALL': adds all columns below.
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
- 'algo': column providing the algorithm name for each model.
:return: An H2OFrame representing the leaderboard.
:examples:
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> aml.train(y=y, training_frame=train)
>>> lb_all = h2o.automl.get_leaderboard(aml, 'ALL')
>>> lb_custom = h2o.automl.get_leaderboard(aml, ['predict_time_per_row_ms', 'training_time_ms'])
>>> lb_custom_sorted = lb_custom.sort(by='predict_time_per_row_ms')
"""
assert_is_type(aml, H2OAutoML, H2OAutoMLOutput)
return H2OAutoML._fetch_leaderboard(aml.key, extra_columns)
|
When you are out buying seneca lift-top cocktail tables, even if it may be quick to be convince by a salesman to purchase something apart of your common style. Thus, go looking with a particular you need. You'll have the ability to simply sort out what good and what does not, and produce narrowing down your alternatives less of a challenge.
Fill your space in with extra parts as place help insert a great deal to a large room, but also several pieces will crowd out a smaller room. Before you check out the seneca lift-top cocktail tables and start getting big furniture, notice of a couple of essential factors to consider. Getting new coffee table is an interesting possibility that will fully convert the look of your room.
Desires to know about color scheme and design you finally choose, you should have the basic items to harmonize your seneca lift-top cocktail tables. Once you've gotten the requirements, you will need to combine some ornamental furniture. Get picture frames or picture frames for the interior is nice strategies. You can also require more than one lighting to supply lovely ambience in the room.
In advance of choosing any seneca lift-top cocktail tables, you ought to measure size of the room. Identify where you have to put every single item of coffee table and the best measurements for that area. Scale down your furniture and coffee table if your room is tiny, pick seneca lift-top cocktail tables that fits.
Choose your coffee table theme and style. Having a design style is necessary when picking out new seneca lift-top cocktail tables as a way to get your preferred decoration. You might also wish to consider switching the decoration of your walls to fit your personal preferences.
Identify the correct way seneca lift-top cocktail tables will soon be implemented. That will assist to determine the things to get and also what color scheme to choose. See how many individuals is going to be using the space in general so that you can get the suitable sized.
Figure out the model that you want. It's will be good if you have a concept for your coffee table, for example modern or classic, stay with items that fit with your concept. You will find a number of ways to divide up interior to certain styles, but the main one is usually include modern, contemporary, classic or old fashioned.
Once choosing what amount of area you can make room for seneca lift-top cocktail tables and the place you would like possible pieces to go, tag these rooms on the floor to obtain a good coordinate. Organize your sections of furniture and each coffee table in your interior should match fifferent ones. Otherwise, your room will appear disorderly and messy together.
Related Post "Seneca Lift-Top Cocktail Tables" |
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(wrap_json, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(wrap_json_body, preserve_raw_body=preserve_raw_body)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if not response.body:
return response
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json":
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
Long before he was a restaurateur opening a new project in the heart of Silicon Valley, along one of its wealthiest commercial strips, Thierry Fassiotti was living on the streets of Paris. The French native was homeless during the mid-1980s, and says that ever since he pulled himself off the streets, he has fashioned a mission to help low income and homeless people.
Fassiotti has worked at Restaurants du Cœur in France and more recently, Bread and Roses in Southern California’s St. Joseph Center (in addition to a number of other Los Angeles restaurants).
Tonight, he opens Alkymists in downtown Palo Alto.
Set in University Avenue’s former Palo Alto Grill, the 100-seat restaurant and bar specializes in global street food from chef Jared Combs, who comes from Los Angeles. That means the menu will include everything from curry-glazed pork belly tacos to harissa meatballs in house-baked Bosnian bread to cocktails with herb-infused liquors.
“It’s traveler food. If you’re traveling the world, what kind of street food would you come across?” says Fassiotti.
The “foodanthropy” side of the equation is perhaps more noteworthy.
Once or twice a month, Alkymists will provide free lunch to low income members of the community, with a particular focus on abused women, single mothers and their children. It will be done in partnership with local charity organizations, with donations from vendors.
The free lunches will be flanked by more opportunities.
Alkymists — a funky spelling based on the fictional Esperanto language — opens tonight. It is open for dinner nightly. Weekend brunch begins next month. Reservations accepted.
Alkymists: 140 University Avenue, at the corner of High Street, in Palo Alto. (650) 321-3514 or alkymists.com. |
from __future__ import unicode_literals
from django.apps import AppConfig
from django.db import models
from django.utils.module_loading import import_string
from . import settings
class MutantConfig(AppConfig):
name = 'mutant'
def ready(self):
self.state_handler = import_string(settings.STATE_HANDLER)()
from . import management
ModelDefinition = self.get_model('ModelDefinition')
models.signals.post_save.connect(
management.model_definition_post_save,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_post_save',
)
models.signals.pre_delete.connect(
management.model_definition_pre_delete,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_pre_delete',
)
models.signals.post_delete.connect(
management.model_definition_post_delete,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_post_delete',
)
BaseDefinition = self.get_model('BaseDefinition')
models.signals.post_save.connect(
management.base_definition_post_save,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_post_save',
)
models.signals.pre_delete.connect(
management.base_definition_pre_delete,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_pre_delete',
)
models.signals.post_delete.connect(
management.base_definition_post_delete,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_post_delete',
)
UniqueTogetherDefinition = self.get_model('UniqueTogetherDefinition')
models.signals.m2m_changed.connect(
management.unique_together_field_defs_changed,
sender=UniqueTogetherDefinition.field_defs.through,
dispatch_uid='mutant.management.unique_together_field_defs_changed',
)
FieldDefinition = self.get_model('FieldDefinition')
models.signals.post_save.connect(
management.raw_field_definition_proxy_post_save,
sender=FieldDefinition,
dispatch_uid='mutant.management.raw_field_definition_proxy_post_save',
)
models.signals.pre_delete.connect(
management.field_definition_pre_delete,
sender=FieldDefinition,
dispatch_uid='mutant.management.field_definition_pre_delete',
)
models.signals.post_delete.connect(
management.field_definition_post_delete,
sender=FieldDefinition,
dispatch_uid='mutant.management.field_definition_post_delete',
)
|
Recently Asian Access held its fourth session for A2.business leaders in South Asia. The featured faculty for the weekend were two former senior Walmart executives who shared about their experiences in the marketplace, and how their faith provided the foundation for their leadership. On site with us in South Asia was Edwin Keh. Edwin was the COO and Senior Vice President of Walmart Global Procurement. Alongside him via video was Mike Duke who served as the CEO of Walmart.
Likewise, the cohort was moved by what Edwin and Mike had to share. Edwin’s successes and failures were all a part of learning the importance of every decision a leader makes. Mike shared about his own journey and emphasized the significance of starting every day with God. Both Edwin and Mike talked about key mentors who discipled them, and how they in turn committed themselves to disciplining others.
Please pray for the leaders God has placed in important and strategic business positions in Asia, who are expressing Christ’s character and love as they disciple other leaders in the marketplace. |
from PyQt5.QtCore import QSettings
class SettingData(object):
def __init__(self, name, value, write_to_disk=False):
self._name = name
self._value = value
self._writeToDisk = write_to_disk
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def write_to_disk(self):
return self._writeToDisk
@write_to_disk.setter
def write_to_disk(self, value):
self._writeToDisk = value
class ApplicationSettings(object):
def __init__(self):
self._settings = {}
self._settingsClient = None
QSettings.setPath(QSettings.IniFormat, QSettings.UserScope, "settings")
QSettings.setDefaultFormat(QSettings.IniFormat)
@property
def settings_map(self):
return self._settings
def load_settings(self):
self._settingsClient = QSettings()
# SettingData: (Name, Value, WriteToDisk)
self._settings["last_folder_path"] = SettingData("last_folder_path",
self._settingsClient.value
("last_folder_path", None))
def write_settings(self):
for _, setting in self._settings.items():
if setting.write_to_disk and setting.value is not None:
self._settingsClient.setValue(setting.name, setting.value)
|
Cove, Buncrana, Inishowen, Co Donegal.
Bathrooms include separate electric-shower, washbasin and toilet.
Main living area has traditional stone fireplace, flag floors, leather upholstered suite and dining area.
High open ceilings with wooden beams.
Bed linen and towels provided free of charge.
Magnificent views of Lough Swilly and surroundings.
Less than 2km from beach and Buncrana town.
Within driving distance of numerous major tourist attractions. |
from flask import render_template, make_response, request, redirect, url_for, Response, g, jsonify
from datetime import datetime
from operator import attrgetter
import json
from . import app, db
from .model import *
@app.before_request
def before_request():
"""Ensures that user is authenticated and fills some global variables"""
try:
user = request.authorization.username
if user:
g.user = User.query.filter_by(name=user).first()
if not g.user:
g.user = User(name=user)
db.session.add(g.user)
db.session.commit()
else:
return login()
except AttributeError:
return login()
g.events = Event.query \
.filter_by(canceled=False) \
.filter(Event.date >= datetime.now()) \
.order_by(Event.date.asc())
g.now = datetime.now()
@app.route('/login')
def login():
"""Sends a 401 response that enables basic auth"""
return Response('You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
@app.route('/')
def index():
return render_template('index.html')
@app.route('/event/<int:id>')
def event(id):
"""Shows the voted movies for an event"""
event = Event.query.filter_by(id=id).first()
if not event:
return make_response(
render_template('error.html', errormsg='The event you requested was not found.'),
404
)
event.movies = {}
voted_movies = [vote.movie for vote in Vote.query.filter_by(user=g.user, event=event)]
for vote in event.votes:
if event.movies.has_key(vote.movie.id):
event.movies[vote.movie.id].count += 1
else:
event.movies[vote.movie.id] = vote.movie
event.movies[vote.movie.id].voted = vote.movie in voted_movies
event.movies[vote.movie.id].count = 1
event.movies = sorted(event.movies.values(), key=attrgetter('count'), reverse=True)
event.voted = len(voted_movies) > 0
return render_template('event.html', event=event)
@app.route('/find_movie')
def find_film():
"""Searches for movies using a partial movie name"""
movies = Movie.query.filter(Movie.name.like('%%%s%%' % request.args['term'])).all()
return Response(
json.dumps([{'id': movie.id, 'value': movie.name + ' (' + movie.year + ')'} for movie in movies]),
200,
None,
'application/json'
)
@app.route('/movie/<int:id>')
def movie_info(id):
"""Gives detailed information about a movie"""
movie = Movie.query.filter_by(id=id).first()
if not movie:
return jsonify({})
return jsonify(movie.serialize)
@app.route('/movie/next_winning')
def next_winning_movie_info():
"""Gives detailed information about the currently winning movie of the next event"""
# to get the currently running event if some event is running, we ask for
# the next event after today's mitdnight
event = Event.query \
.filter_by(canceled=False) \
.filter(Event.date >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)) \
.order_by(Event.date.asc()) \
.first()
if not event:
return jsonify({})
if not event.votes:
return jsonify({})
event.movies = {}
for vote in event.votes:
if vote.movie.id in event.movies:
event.movies[vote.movie.id].count += 1
else:
event.movies[vote.movie.id] = vote.movie
event.movies[vote.movie.id].count = 1
event.movies = sorted(event.movies.values(), key=attrgetter('count'), reverse=True)
return movie_info(event.movies[0].id)
@app.route('/vote', methods=['POST'])
def vote():
"""Votes for a set of movies for an event. Can update previous votes."""
event_id = request.form['event_id']
event = Event.query.filter_by(id=event_id).first()
if not event:
return make_response(
render_template('error.html', errormsg='The event you voted for doesn\'t exist!'),
404
)
if event.date < datetime.now():
return make_response(
render_template('error.html', errormsg='Voting for an event in the past isn\'t possible!'),
403
)
if event.canceled:
return make_response(
render_template('error.html', errormsg='Voting for a canceled event isn\'t possible!'),
403
)
votes = Vote.query.filter_by(user=g.user, event=event)
voted_movies = dict((vote.movie.id, vote) for vote in votes)
for movie_id in request.form.getlist('movies[]'):
movie = Movie.query.filter_by(id=movie_id)
if movie:
if movie_id in voted_movies.keys():
# change nothing about this vote and remove it from the list
votes.remove(voted_movies[movie_id])
else:
vote = Vote(user=g.user, event=event, movie_id=movie_id)
db.session.add(vote)
# the votes remaining in the list are no longer voted, so remove them
for vote in votes:
db.session.delete(vote)
db.session.commit()
return redirect(url_for('event', id=event_id))
|
Download in Original Size (800x600, 159.43 KB) or select from the list of ready-made resolutions.
Download in Original Size (500x375, 41.13 KB) or select from the list of ready-made resolutions.
Download in Original Size (1360x768, 1891.81 KB) or select from the list of ready-made resolutions.
Download in Original Size (1280x852, 188.74 KB) or select from the list of ready-made resolutions.
Download in Original Size (1366x768, 283.02 KB) or select from the list of ready-made resolutions.
Download in Original Size (1280x720, 275.49 KB) or select from the list of ready-made resolutions.
Download in Original Size (1920x1200, 197.28 KB) or select from the list of ready-made resolutions. |
# Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from . import Column
from . import GenericTable
from . import Integer
from . import String
class ExternalFafInstance(GenericTable):
__tablename__ = "externalfafinstances"
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False, index=True)
baseurl = Column(String(1024), nullable=False)
|
Complete Direct Cremation(Berks, Carbon, Chester, Lehigh, Montgomery, Northampton, Schuylkill Counties) - All County Cremation Society & Cremation Society Of Berks County, Inc.
A basic cardboard container is included in the Complete Direct Cremation plan. If you desire a different cremation container at an additional cost, please click on that container as pictured below.
An acrylic urn is included in the Complete Direct Cremation plan to temporarily hold remains. If you would like to upgrade to another urn at an additional cost, please click on that urn as pictured below. There will be a $95 charge to place cremated remains in a container you provide to us.
Jewelry/Keepsakes are available at additional cost and can be filled with a portion of the cremated remains.
Does the deceased weigh more than 250 pounds?
* Available weekdays from 9 AM to 3 PM for up to 1 hour; excludes holidays. Up to 10 people permitted.
Witnessing of the Cremation at Our Crematory.
* Cremation performed weekdays after 3PM, weekends or holidays.
Cremated remains are generally available within 5-7 business days. |
# coding=utf-8
"""
Download mode implementation.
"""
from __future__ import division
import os
import re
import sys
import mimetypes
import threading
from time import sleep, time
from mailbox import Message
from .output import RawStream
from .models import HTTPResponse
from .utils import humanize_bytes
from .compat import urlsplit
PARTIAL_CONTENT = 206
CLEAR_LINE = '\r\033[K'
PROGRESS = (
'{percentage: 6.2f} %'
' {downloaded: >10}'
' {speed: >10}/s'
' {eta: >8} ETA'
)
PROGRESS_NO_CONTENT_LENGTH = '{downloaded: >10} {speed: >10}/s'
SUMMARY = 'Done. {downloaded} in {time:0.5f}s ({speed}/s)\n'
SPINNER = '|/-\\'
class ContentRangeError(ValueError):
pass
def parse_content_range(content_range, resumed_from):
"""
Parse and validate Content-Range header.
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html>
:param content_range: the value of a Content-Range response header
eg. "bytes 21010-47021/47022"
:param resumed_from: first byte pos. from the Range request header
:return: total size of the response body when fully downloaded.
"""
if content_range is None:
raise ContentRangeError('Missing Content-Range')
pattern = (
'^bytes (?P<first_byte_pos>\d+)-(?P<last_byte_pos>\d+)'
'/(\*|(?P<instance_length>\d+))$'
)
match = re.match(pattern, content_range)
if not match:
raise ContentRangeError(
'Invalid Content-Range format %r' % content_range)
content_range_dict = match.groupdict()
first_byte_pos = int(content_range_dict['first_byte_pos'])
last_byte_pos = int(content_range_dict['last_byte_pos'])
instance_length = (
int(content_range_dict['instance_length'])
if content_range_dict['instance_length']
else None
)
# "A byte-content-range-spec with a byte-range-resp-spec whose
# last- byte-pos value is less than its first-byte-pos value,
# or whose instance-length value is less than or equal to its
# last-byte-pos value, is invalid. The recipient of an invalid
# byte-content-range- spec MUST ignore it and any content
# transferred along with it."
if (first_byte_pos >= last_byte_pos
or (instance_length is not None
and instance_length <= last_byte_pos)):
raise ContentRangeError(
'Invalid Content-Range returned: %r' % content_range)
if (first_byte_pos != resumed_from
or (instance_length is not None
and last_byte_pos + 1 != instance_length)):
# Not what we asked for.
raise ContentRangeError(
'Unexpected Content-Range returned (%r)'
' for the requested Range ("bytes=%d-")'
% (content_range, resumed_from)
)
return last_byte_pos + 1
def filename_from_content_disposition(content_disposition):
"""
Extract and validate filename from a Content-Disposition header.
:param content_disposition: Content-Disposition value
:return: the filename if present and valid, otherwise `None`
"""
# attachment; filename=jkbr-httpie-0.4.1-20-g40bd8f6.tar.gz
msg = Message('Content-Disposition: %s' % content_disposition)
filename = msg.get_filename()
if filename:
# Basic sanitation.
filename = os.path.basename(filename).lstrip('.').strip()
if filename:
return filename
def filename_from_url(url, content_type):
fn = urlsplit(url).path.rstrip('/')
fn = os.path.basename(fn) if fn else 'index'
if '.' not in fn and content_type:
content_type = content_type.split(';')[0]
if content_type == 'text/plain':
# mimetypes returns '.ksh'
ext = '.txt'
else:
ext = mimetypes.guess_extension(content_type)
if ext == '.htm': # Python 3
ext = '.html'
if ext:
fn += ext
return fn
def get_unique_filename(fn, exists=os.path.exists):
attempt = 0
while True:
suffix = '-' + str(attempt) if attempt > 0 else ''
if not exists(fn + suffix):
return fn + suffix
attempt += 1
class Download(object):
def __init__(self, output_file=None,
resume=False, progress_file=sys.stderr):
"""
:param resume: Should the download resume if partial download
already exists.
:type resume: bool
:param output_file: The file to store response body in. If not
provided, it will be guessed from the response.
:type output_file: file
:param progress_file: Where to report download progress.
:type progress_file: file
"""
self._output_file = output_file
self._resume = resume
self._resumed_from = 0
self.finished = False
self.status = Status()
self._progress_reporter = ProgressReporterThread(
status=self.status,
output=progress_file
)
def pre_request(self, request_headers):
"""Called just before the HTTP request is sent.
Might alter `request_headers`.
:type request_headers: dict
"""
# Disable content encoding so that we can resume, etc.
request_headers['Accept-Encoding'] = None
if self._resume:
bytes_have = os.path.getsize(self._output_file.name)
if bytes_have:
# Set ``Range`` header to resume the download
# TODO: Use "If-Range: mtime" to make sure it's fresh?
request_headers['Range'] = 'bytes=%d-' % bytes_have
self._resumed_from = bytes_have
def start(self, response):
"""
Initiate and return a stream for `response` body with progress
callback attached. Can be called only once.
:param response: Initiated response object with headers already fetched
:type response: requests.models.Response
:return: RawStream, output_file
"""
assert not self.status.time_started
try:
total_size = int(response.headers['Content-Length'])
except (KeyError, ValueError, TypeError):
total_size = None
if self._output_file:
if self._resume and response.status_code == PARTIAL_CONTENT:
total_size = parse_content_range(
response.headers.get('Content-Range'),
self._resumed_from
)
else:
self._resumed_from = 0
try:
self._output_file.seek(0)
self._output_file.truncate()
except IOError:
pass # stdout
else:
# TODO: Should the filename be taken from response.history[0].url?
# Output file not specified. Pick a name that doesn't exist yet.
fn = None
if 'Content-Disposition' in response.headers:
fn = filename_from_content_disposition(
response.headers['Content-Disposition'])
if not fn:
fn = filename_from_url(
url=response.url,
content_type=response.headers.get('Content-Type'),
)
self._output_file = open(get_unique_filename(fn), mode='a+b')
self.status.started(
resumed_from=self._resumed_from,
total_size=total_size
)
stream = RawStream(
msg=HTTPResponse(response),
with_headers=False,
with_body=True,
on_body_chunk_downloaded=self.chunk_downloaded,
chunk_size=1024 * 8
)
self._progress_reporter.output.write(
'Downloading %sto "%s"\n' % (
(humanize_bytes(total_size) + ' '
if total_size is not None
else ''),
self._output_file.name
)
)
self._progress_reporter.start()
return stream, self._output_file
def finish(self):
assert not self.finished
self.finished = True
self.status.finished()
def failed(self):
self._progress_reporter.stop()
@property
def interrupted(self):
return (
self.finished
and self.status.total_size
and self.status.total_size != self.status.downloaded
)
def chunk_downloaded(self, chunk):
"""
A download progress callback.
:param chunk: A chunk of response body data that has just
been downloaded and written to the output.
:type chunk: bytes
"""
self.status.chunk_downloaded(len(chunk))
class Status(object):
"""Holds details about the downland status."""
def __init__(self):
self.downloaded = 0
self.total_size = None
self.resumed_from = 0
self.time_started = None
self.time_finished = None
def started(self, resumed_from=0, total_size=None):
assert self.time_started is None
if total_size is not None:
self.total_size = total_size
self.downloaded = self.resumed_from = resumed_from
self.time_started = time()
def chunk_downloaded(self, size):
assert self.time_finished is None
self.downloaded += size
@property
def has_finished(self):
return self.time_finished is not None
def finished(self):
assert self.time_started is not None
assert self.time_finished is None
self.time_finished = time()
class ProgressReporterThread(threading.Thread):
"""
Reports download progress based on its status.
Uses threading to periodically update the status (speed, ETA, etc.).
"""
def __init__(self, status, output, tick=.1, update_interval=1):
"""
:type status: Status
:type output: file
"""
super(ProgressReporterThread, self).__init__()
self.status = status
self.output = output
self._tick = tick
self._update_interval = update_interval
self._spinner_pos = 0
self._status_line = ''
self._prev_bytes = 0
self._prev_time = time()
self._should_stop = threading.Event()
def stop(self):
"""Stop reporting on next tick."""
self._should_stop.set()
def run(self):
while not self._should_stop.is_set():
if self.status.has_finished:
self.sum_up()
break
self.report_speed()
sleep(self._tick)
def report_speed(self):
now = time()
if now - self._prev_time >= self._update_interval:
downloaded = self.status.downloaded
try:
speed = ((downloaded - self._prev_bytes)
/ (now - self._prev_time))
except ZeroDivisionError:
speed = 0
if not self.status.total_size:
self._status_line = PROGRESS_NO_CONTENT_LENGTH.format(
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
)
else:
try:
percentage = downloaded / self.status.total_size * 100
except ZeroDivisionError:
percentage = 0
if not speed:
eta = '-:--:--'
else:
s = int((self.status.total_size - downloaded) / speed)
h, s = divmod(s, 60 * 60)
m, s = divmod(s, 60)
eta = '{0}:{1:0>2}:{2:0>2}'.format(h, m, s)
self._status_line = PROGRESS.format(
percentage=percentage,
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
eta=eta,
)
self._prev_time = now
self._prev_bytes = downloaded
self.output.write(
CLEAR_LINE
+ ' '
+ SPINNER[self._spinner_pos]
+ ' '
+ self._status_line
)
self.output.flush()
self._spinner_pos = (self._spinner_pos + 1
if self._spinner_pos + 1 != len(SPINNER)
else 0)
def sum_up(self):
actually_downloaded = (self.status.downloaded
- self.status.resumed_from)
time_taken = self.status.time_finished - self.status.time_started
self.output.write(CLEAR_LINE)
self.output.write(SUMMARY.format(
downloaded=humanize_bytes(actually_downloaded),
total=(self.status.total_size
and humanize_bytes(self.status.total_size)),
speed=humanize_bytes(actually_downloaded / time_taken),
time=time_taken,
))
self.output.flush()
|
Meindert Fennema( b. 1946) is an Emeritus professor of political theory at Amsterdam University and a columnist at the Dutch daily De Volkskrant.
The anti-euro parties may be stronger than ever in the next European Parliament. But unless they start working together, they will not be able to backtrack on the single currency, brought in by the economic elites of Europe in the pursuit of their own joint interests.
European project: Stop saying the EU is a failure!
Too often we forget that belonging to the EU enabled countries like Spain and Italy to develop economically, while setting aside their old colonial aspirations. And this should also influence how we see the future of Union, argues a Dutch political scientist. |
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the Apache 2.0 license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
try:
import argparse
except ImportError:
print("Cannot import argparse.")
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"List files from compile_commands.json."
))
parser.add_argument(
"--build", metavar="PATH",
help="Path to osquery build (./build/<sys>/) directory"
)
parser.add_argument(
"--base", metavar="PATH", default="",
help="Real path of source base."
)
args = parser.parse_args()
commands_path = os.path.join(args.build, "compile_commands.json")
if not os.path.exists(commands_path):
print("Cannot find '%s'" % (commands_path))
exit(1)
with open(commands_path, 'r') as fh: content = fh.read()
data = json.loads(content)
for file in data:
if file['file'].find("_tests.cpp") > 0 or file['file'].find("_benchmark") > 0:
continue
if file['file'].find("gtest") > 0:
continue
print(file['file'].replace(args.base, ""))
pass |
What with Amazing Spider-man hitting theaters this week, I thought I'd post this commission of one of his less loved adversaries, Mysterio! Hopefully we'll see this character appear on film at some point.
Boston Comic Con, 2012. Pen & ink, Copic Marker on 11x14" bristol board.
amen to that! i did Mysterio for a Spidey Villians costume group at Chicago Comic and Entertainment Expo (C2E2) this year. totally did not expect all the love for ol' Fishbowlhead. |
#!/usr/bin/env python
desc="""2D plot"""
epilog="""Author:
[email protected]
Barcelona, 10/05/2013
"""
import argparse, math, os, sys
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
def plot_2d(inputs, output, title, xlab, ylab, xmax, xmin, log, ndivide=20):
"""
"""
#define number of subplots
bi = math.sqrt(len(inputs))
bi = round(bi)
bj = len(inputs)/bi
if bj < round(bj):
bj = round(bj)+1
else:
bj = round(bj)+1
print(len(inputs),bi,bj)
#get figure
plt.figure(figsize=(bj*4, bi*4))
plt.subplots_adjust(hspace = .3, wspace = .3)
#process inputs
sys.stderr.write("Loading data...\n")
for ii, handle in enumerate(inputs, 1):
#load data
x, y = [], []
for l in handle:
l = l[:-1]
if not l or l.startswith('#'):
continue
i, c = l.split()[:2]
i, c = int(i), int(c)
if xmin <= i <= xmax:
x.append(i)
y.append(c/10**3)
maxy10 = max(y[xmax/ndivide:])
xi = y.index(maxy10)
freqk = x[xi]
gsize = maxy10*freqk/10.0**3
sys.stderr.write("[%s] Max freq: %s @ k-mer freq: %s\nEstimated genome size: %s Mb\n" %(input.name, maxy10, freqk, gsize))
plt.subplot(bi,bj,ii)
plt.plot(x, y, linewidth=2.0)
#add title and axis labels
if input.name!="<stdin>":
plt.title(input.name.split('.')[0])
elif title:
plt.title(input.name)
#plot x-axis label only on bottom plots
if ii/bi>bj-1:
plt.xlabel(xlab)
#plot y-axis label only on left-most plots
if ii%bj==1:
plt.ylabel(ylab)
plt.ylim(0,1.5*maxy10)
#plt.grid(True)
#add local max
plt.annotate("~%.2f Mb\n(%s, %sK)" % (gsize, freqk, maxy10), xy=(freqk*1.01, maxy10*1.01), xytext=(freqk*1.2, maxy10*1.2),arrowprops=dict(facecolor='black', shrink=0.05))
#plt.text(freqk, maxy10*1.1, 'Genome size: ~%.2f Mb\n(%s, %s)' % (gsize, freqk, maxy10))
#show plot if not outfile provided
if output.name=="<stdout>":
plt.show()
else:
fpath = output.name #"%s.%s" % (output.name, format)
format = fpath.split('.')[-1]
plt.savefig(fpath, dpi=200, facecolor='w', edgecolor='w',\
orientation='landscape', format=format, transparent=False)
def main():
usage = "%(prog)s [options] -v"
parser = argparse.ArgumentParser( usage=usage,description=desc,epilog=epilog )
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument("-i", dest="input", default=[sys.stdin,], type=argparse.FileType("r"), nargs="+",
help="input stream [stdin]")
parser.add_argument("-o", dest="output", default=sys.stdout, type=argparse.FileType("w"),
help="output stream [stdout]")
parser.add_argument("-c", dest="col", default=0, type=int,
help="column to use [%(default)s]")
parser.add_argument("-t", dest="title", default="",
help="histogram title [%(default)s]")
parser.add_argument("-x", dest="xlab", default="k-mer frequency",
help="x-axis label [%(default)s]")
parser.add_argument("-y", dest="ylab", default="k-mers with this frequency [10e3]",
help="y-axis label [%(default)s]")
parser.add_argument("-n", dest="ndivide", default=20, type=int,
help="discard 1/n first [%(default)s]")
parser.add_argument("--log", dest="log", default=False, action="store_true",
help="log scale [%(default)s]")
parser.add_argument("--xmax", dest="xmax", default=100, type=int,
help="max x value [%(default)s]")
parser.add_argument("--xmin", dest="xmin", default=0, type=int,
help="min x value [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
plot_2d(o.input, o.output, o.title, o.xlab, o.ylab, o.xmax, o.xmin, o.log, o.ndivide)
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
|
The KGR-96 is a receive-only, digital data decryptor that provides security for the Tactical Receive Equipment (TRE) (AN/USQ-101 [V]), which is used to copy the Tactical Data Information Exchange System (TADIXS)-Bravo and the TRE and (TRAP) Data Dissemination System (TDDS) broadcast on fleet flagships and major combatants. The KGR-96 can be controlled and monitored either locally, using front panel controls and indicators, or remotely, using signals that are input and output on rear panel connectors. Additionally, the KGR-96 features include local or remote key filling, power transient protection, and self-test. It is interoperable with the KG-46 and the KGT/R-62.
The KGR-96 requires two different keys, unique to each piece of equipment, for input by the user before processing traffic. Keying/rekeying is accomplished through the front panel fill connection or remotely through the rear panel connector (only one key may be remotely refilled). The rectangular shaped KGR-96 is designed for mounting in an MT-4841/U, which can mount two KGR-96s. The equipment is deployed on major naval shore stations, major naval surface ships, and submarines; is certified to receive up to TOP SECRET level traffic; and when unkeyed is classified SECRET No Foreign Nationals.
KGR-96 drawing courtesy of U.S.N.
Data rate: Decrypts synchronous serial data at any rate between 1 Kbps and 10 Mbps.
Unit Price: No longer produced. |
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/21/2002
#
# Refactored into a separate module: 07/04/2003
#
#------------------------------------------------------------------------------
""" Defines common, low-level capabilities needed by the Traits package.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
from os import getcwd
from os.path import dirname, exists, join
from string import lowercase, uppercase
from types import (ListType, TupleType, DictType, StringType, UnicodeType,
IntType, LongType, FloatType, ComplexType, ClassType, TypeType)
# Set the Python version being used:
vi = sys.version_info
python_version = vi[0] + (float( vi[1] ) / 10.0)
try:
from traits.etsconfig.api import ETSConfig
except:
# If the ETSConfig package is not available, fake it:
class ETSConfig ( object ):
#-----------------------------------------------------------------------
# 'object' interface:
#-----------------------------------------------------------------------
def __init__ ( self ):
""" Constructor.
Note that this constructor can only ever be called from within
this module, since we don't expose the class.
"""
# Shadow attributes for properties:
self._application_data = None
self._toolkit = None
return
#-----------------------------------------------------------------------
# 'ETSConfig' interface:
#-----------------------------------------------------------------------
#-- Property Implementations -------------------------------------------
def _get_application_data ( self ):
""" Property getter.
This is a directory that applications and packages can safely
write non-user accessible data to i.e. configuration
information, preferences etc.
Do not put anything in here that the user might want to navigate
to (e.g. projects, user data files, etc).
The actual location differs between operating systems.
"""
if self._application_data is None:
self._application_data = self._initialize_application_data()
return self._application_data
def _set_application_data ( self, application_data ):
""" Property setter.
"""
self._application_data = application_data
application_data = property( _get_application_data,
_set_application_data )
def _get_toolkit ( self ):
"""
Property getter for the GUI toolkit. The value returned is, in
order of preference: the value set by the application; the value
passed on the command line using the '-toolkit' option; the value
specified by the 'ETS_TOOLKIT' environment variable; otherwise the
empty string.
"""
if self._toolkit is None:
self._toolkit = self._initialize_toolkit()
return self._toolkit
def _set_toolkit ( self, toolkit ):
"""
Property setter for the GUI toolkit. The toolkit can be set more
than once, but only if it is the same one each time. An application
that is written for a particular toolkit can explicitly set it
before any other module that gets the value is imported.
"""
if self._toolkit and (self._toolkit != toolkit):
raise ValueError( 'Cannot set toolkit to %s because it has '
'already been set to %s' % ( toolkit, self._toolkit ) )
self._toolkit = toolkit
return
toolkit = property( _get_toolkit, _set_toolkit )
#-- Private Methods ----------------------------------------------------
def _initialize_application_data ( self ):
""" Initializes the (default) application data directory.
"""
if sys.platform == 'win32':
environment_variable = 'APPDATA'
directory_name = 'Enthought'
else:
environment_variable = 'HOME'
directory_name = '.enthought'
# Lookup the environment variable:
parent_directory = os.environ.get( environment_variable, None )
if parent_directory is None:
raise ValueError( 'Environment variable "%s" not set' %
environment_variable )
application_data = os.path.join( parent_directory, directory_name )
# If a file already exists with this name then make sure that it is
# a directory!
if os.path.exists( application_data ):
if not os.path.isdir( application_data ):
raise ValueError( 'File "%s" already exists' %
application_data )
# Otherwise, create the directory:
else:
os.makedirs( application_data )
return application_data
def _initialize_toolkit ( self ):
""" Initializes the toolkit.
"""
# We handle the command line option even though it doesn't have the
# highest precedence because we always want to remove it from the
# command line:
if '-toolkit' in sys.argv:
opt_idx = sys.argv.index( '-toolkit' )
try:
opt_toolkit = sys.argv[ opt_idx + 1 ]
except IndexError:
raise ValueError( 'The -toolkit command line argument must '
'be followed by a toolkit name' )
# Remove the option:
del sys.argv[ opt_idx: opt_idx + 1 ]
else:
opt_toolkit = None
if self._toolkit is not None:
toolkit = self._toolkit
elif opt_toolkit is not None:
toolkit = opt_toolkit
else:
toolkit = os.environ.get( 'ETS_TOOLKIT', '' )
return toolkit
ETSConfig = ETSConfig()
#-------------------------------------------------------------------------------
# Provide Python 2.3+ compatible definitions (if necessary):
#-------------------------------------------------------------------------------
try:
from types import BooleanType
except ImportError:
BooleanType = IntType
def _enumerate ( seq ):
for i in xrange( len( seq) ):
yield i, seq[i]
try:
enumerate = enumerate
except:
enumerate = _enumerate
del _enumerate
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
ClassTypes = ( ClassType, TypeType )
SequenceTypes = ( ListType, TupleType )
ComplexTypes = ( float, int )
TypeTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType, ListType, TupleType, DictType, BooleanType )
TraitNotifier = '__trait_notifier__'
# The standard Traits property cache prefix:
TraitsCache = '_traits_cache_'
#-------------------------------------------------------------------------------
# Singleton 'Uninitialized' object:
#-------------------------------------------------------------------------------
Uninitialized = None
class _Uninitialized(object):
""" The singleton value of this class represents the uninitialized state
of a trait and is specified as the 'old' value in the trait change
notification that occurs when the value of a trait is read before being
set.
"""
def __new__(cls):
if Uninitialized is not None:
return Uninitialized
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<uninitialized>'
def __reduce_ex__(self, protocol):
return (_Uninitialized, ())
#: When the first reference to a trait is a 'get' reference, the default value of
#: the trait is implicitly assigned and returned as the value of the trait.
#: Because of this implicit assignment, a trait change notification is
#: generated with the Uninitialized object as the 'old' value of the trait, and
#: the default trait value as the 'new' value. This allows other parts of the
#: traits package to recognize the assignment as the implicit default value
#: assignment, and treat it specially.
Uninitialized = _Uninitialized()
#-------------------------------------------------------------------------------
# Singleton 'Undefined' object (used as undefined trait name and/or value):
#-------------------------------------------------------------------------------
Undefined = None
class _Undefined(object):
""" Singleton 'Undefined' object (used as undefined trait name and/or value)
"""
def __new__(cls):
if Undefined is not None:
return Undefined
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<undefined>'
def __reduce_ex__(self, protocol):
return (_Undefined, ())
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return type(self) is not type(other)
#: Singleton object that indicates that a trait attribute has not yet had a
#: value set (i.e., its value is undefined). This object is used instead of
#: None, because None often has other meanings, such as that a value is not
#: used. When a trait attribute is first assigned a value, and its associated
#: trait notification handlers are called, Undefined is passed as the *old*
#: parameter, to indicate that the attribute previously had no value.
Undefined = _Undefined()
# Tell the C-base code about singleton 'Undefined' and 'Uninitialized' objects:
from . import ctraits
ctraits._undefined( Undefined, Uninitialized )
#-------------------------------------------------------------------------------
# Singleton 'Missing' object (used as missing method argument marker):
#-------------------------------------------------------------------------------
class Missing ( object ):
""" Singleton 'Missing' object (used as missing method argument marker).
"""
def __repr__ ( self ):
return '<missing>'
#: Singleton object that indicates that a method argument is missing from a
#: type-checked method signature.
Missing = Missing()
#-------------------------------------------------------------------------------
# Singleton 'Self' object (used as object reference to current 'object'):
#-------------------------------------------------------------------------------
class Self ( object ):
""" Singleton 'Self' object (used as object reference to current 'object').
"""
def __repr__ ( self ):
return '<self>'
#: Singleton object that references the current 'object'.
Self = Self()
#-------------------------------------------------------------------------------
# Define a special 'string' coercion function:
#-------------------------------------------------------------------------------
def strx ( arg ):
""" Wraps the built-in str() function to raise a TypeError if the
argument is not of a type in StringTypes.
"""
if type( arg ) in StringTypes:
return str( arg )
raise TypeError
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
StringTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType )
#-------------------------------------------------------------------------------
# Define a mapping of coercable types:
#-------------------------------------------------------------------------------
# Mapping of coercable types.
CoercableTypes = {
LongType: ( 11, long, int ),
FloatType: ( 11, float, int ),
ComplexType: ( 11, complex, float, int ),
UnicodeType: ( 11, unicode, str )
}
#-------------------------------------------------------------------------------
# Return a string containing the class name of an object with the correct
# article (a or an) preceding it (e.g. 'an Image', 'a PlotValue'):
#-------------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, basestring ):
return add_article( object )
return add_article( object.__class__.__name__ )
#-------------------------------------------------------------------------------
# Return a string containing the right article (i.e. 'a' or 'an') prefixed to
# a specified string:
#-------------------------------------------------------------------------------
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
#----------------------------------------------------------------------------
# Return a 'user-friendly' name for a specified trait:
#----------------------------------------------------------------------------
def user_name_for ( name ):
""" Returns a "user-friendly" version of a string, with the first letter
capitalized and with underscore characters replaced by spaces. For example,
``user_name_for('user_name_for')`` returns ``'User name for'``.
"""
name = name.replace( '_', ' ' )
result = ''
last_lower = False
for c in name:
if (c in uppercase) and last_lower:
result += ' '
last_lower = (c in lowercase)
result += c
return result.capitalize()
#-------------------------------------------------------------------------------
# Gets the path to the traits home directory:
#-------------------------------------------------------------------------------
_traits_home = None
def traits_home ( ):
""" Gets the path to the Traits home directory.
"""
global _traits_home
if _traits_home is None:
_traits_home = verify_path( join( ETSConfig.application_data,
'traits' ) )
return _traits_home
#-------------------------------------------------------------------------------
# Verify that a specified path exists, and try to create it if it doesn't:
#-------------------------------------------------------------------------------
def verify_path ( path ):
""" Verify that a specified path exists, and try to create it if it
does not exist.
"""
if not exists( path ):
try:
os.mkdir( path )
except:
pass
return path
#-------------------------------------------------------------------------------
# Returns the name of the module the caller's caller is located in:
#-------------------------------------------------------------------------------
def get_module_name ( level = 2 ):
""" Returns the name of the module that the caller's caller is located in.
"""
return sys._getframe( level ).f_globals.get( '__name__', '__main__' )
#-------------------------------------------------------------------------------
# Returns a resource path calculated from the caller's stack:
#-------------------------------------------------------------------------------
def get_resource_path ( level = 2 ):
"""Returns a resource path calculated from the caller's stack.
"""
module = sys._getframe( level ).f_globals.get( '__name__', '__main__' )
if module != '__main__':
# Return the path to the module:
try:
return dirname( getattr( sys.modules.get( module ), '__file__' ) )
except:
# Apparently 'module' is not a registered module...treat it like
# '__main__':
pass
# '__main__' is not a real module, so we need a work around:
for path in [ dirname( sys.argv[0] ), getcwd() ]:
if exists( path ):
break
return path
#-------------------------------------------------------------------------------
# Returns the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xgetattr( object, xname, default = Undefined ):
""" Returns the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
if default is Undefined:
object = getattr( object, name )
else:
object = getattr( object, name, None )
if object is None:
return default
if default is Undefined:
return getattr( object, names[-1] )
return getattr( object, names[-1], default )
#-------------------------------------------------------------------------------
# Sets the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xsetattr( object, xname, value ):
""" Sets the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
object = getattr( object, name )
setattr( object, names[-1], value )
#-------------------------------------------------------------------------------
# Traits metadata selection functions:
#-------------------------------------------------------------------------------
def is_none ( value ):
return (value is None)
def not_none ( value ):
return (value is not None)
def not_false ( value ):
return (value is not False)
def not_event ( value ):
return (value != 'event')
def is_str ( value ):
return isinstance( value, basestring )
|
Product categories of U Bent Heat Exchanger Tube, we are specialized manufacturers from China, U Bent Heat Exchanger Tube, U Bent Heat Exchanger Steel Pipe suppliers/factory, wholesale high-quality products of U Bent Tube For Heat Exchanger R & D and manufacturing, we have the perfect after-sales service and technical support. Look forward to your cooperation!
U Bent Heat Exchanger Tube in China.
Max Stainless Steel export a qualitative range of U Bend Heat Exchanger Tubes that are being designed as per the national and international standard quality of raw materials. U Bend Heat Exchanger Tubes are made from quality seamless hollows. U Bend Heat Exchanger Tubes are being carried out for all kind of heat exchanges, boiler and condenser applications. |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.python.classes."""
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.python import classes
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.python import classes_pybind11
except ImportError:
classes_pybind11 = None
# pylint: enable=g-import-not-at-top
@parameterized.named_parameters([
np for np in zip(('c_api', 'pybind11'), (classes, classes_pybind11))
if np[1] is not None
])
class ClassesTest(absltest.TestCase):
def testKlass(self, wrapper_lib):
self.assertEqual(wrapper_lib.Klass.C2(), 3)
k = wrapper_lib.Klass(3)
self.assertEqual(k.i, 3)
self.assertEqual(k.i2, 9)
self.assertEqual(k.Int1(), 4)
k.i = 0
self.assertEqual(k.i, 0)
# AttributeError on CPython; TypeError on PyPy.
with self.assertRaises((AttributeError, TypeError)):
k.i2 = 0
def testDerivedClassDocstring(self, wrapper_lib):
# Nothing special about this being a derived class; that is just the
# one our test .clif file has a docstring on.
self.assertIn('class also has a docstring.\n\n',
wrapper_lib.Derived.__doc__)
self.assertIn('spans multiple lines', wrapper_lib.Derived.__doc__)
self.assertIn(wrapper_lib.Derived.__doc__,
wrapper_lib.Derived.__doc__.strip())
def testPythonDerived(self, wrapper_lib):
class PyK(wrapper_lib.Klass):
pass
k = PyK(4)
self.assertEqual(k.i, 4)
self.assertEqual(k.Int1(), 5)
def testDerived(self, wrapper_lib):
# k = wrapper_lib.Derived()
k = wrapper_lib.Derived.Init(0, 0)
self.assertEqual(k.i, 0)
self.assertEqual(k.j, 0)
self.assertNotIn(2, k)
with self.assertRaises(TypeError):
wrapper_lib.Derived(1)
def testDerivedInit(self, wrapper_lib):
k = wrapper_lib.Derived.Init(1, 2)
self.assertEqual(k.i, 1)
self.assertEqual(k.j, 2)
if __name__ == '__main__':
absltest.main()
|
These lightweight headphones from SBS are ideal for anyone who loves to play on their smartphone and become completely immersed in their games. Thanks to their ergonomic design, these headphones are comfortable to wear. They also come equipped with adjustable microphone controls and enable you to enjoy the console gaming experience even on your mobile. They also feature an integrated volume control and a 3.5mm jack cable. |
# -*- coding: utf-8 -*-
#
# model.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# BSD 3-Clause
from datetime import timedelta
import logging
import uuid
import os
from gazar.grid import GDALGrid
import geopandas as gpd
from .event import EventMode, LongTermMode
from ..orm import WatershedMaskFile, ElevationGridFile, MapTableFile
from ..lib import db_tools as dbt
from ..util.context import tmp_chdir
log = logging.getLogger(__name__)
class GSSHAModel(object):
"""
This class manages the generation and modification of
models for GSSHA.
Parameters:
project_directory(str): Directory to write GSSHA project files to.
project_name(Optional[str]): Name of GSSHA project. Required for new model.
mask_shapefile(Optional[str]): Path to watershed boundary shapefile. Required for new model.
auto_clean_mask_shapefile(Optional[bool]): Chooses the largest region if the input is a multipolygon. Default is False.
grid_cell_size(Optional[str]): Cell size of model (meters). Required for new model.
elevation_grid_path(Optional[str]): Path to elevation raster used for GSSHA grid. Required for new model.
simulation_timestep(Optional[float]): Overall model timestep (seconds). Sets TIMESTEP card. Required for new model.
out_hydrograph_write_frequency(Optional[str]): Frequency of writing to hydrograph (minutes). Sets HYD_FREQ card. Required for new model.
roughness(Optional[float]): Value of uniform manning's n roughness for grid. Mutually exlusive with land use roughness. Required for new model.
land_use_grid(Optional[str]): Path to land use grid to use for roughness. Mutually exlusive with roughness. Required for new model.
land_use_grid_id(Optional[str]): ID of default grid supported in GSSHApy. Mutually exlusive with roughness. Required for new model.
land_use_to_roughness_table(Optional[str]): Path to land use to roughness table. Use if not using land_use_grid_id. Mutually exlusive with roughness. Required for new model.
load_rasters_to_db(Optional[bool]): If True, it will load the created rasters into the database. IF you are generating a large model, it is recommended to set this to False. Default is True.
db_session(Optional[database session]): Active database session object. Required for existing model.
project_manager(Optional[ProjectFile]): Initialized ProjectFile object. Required for existing model.
Model Generation Example:
.. code:: python
from datetime import datetime, timedelta
from gsshapy.modeling import GSSHAModel
model = GSSHAModel(project_name="gssha_project",
project_directory="/path/to/gssha_project",
mask_shapefile="/path/to/watershed_boundary.shp",
auto_clean_mask_shapefile=True,
grid_cell_size=1000,
elevation_grid_path="/path/to/elevation.tif",
simulation_timestep=10,
out_hydrograph_write_frequency=15,
land_use_grid='/path/to/land_use.tif',
land_use_grid_id='glcf',
load_rasters_to_db=False,
)
model.set_event(simulation_start=datetime(2017, 2, 28, 14, 33),
simulation_duration=timedelta(seconds=180*60),
rain_intensity=2.4,
rain_duration=timedelta(seconds=30*60),
)
model.write()
"""
def __init__(self,
project_directory,
project_name=None,
mask_shapefile=None,
auto_clean_mask_shapefile=False,
grid_cell_size=None,
elevation_grid_path=None,
simulation_timestep=30,
out_hydrograph_write_frequency=10,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None,
load_rasters_to_db=True,
db_session=None,
project_manager=None,
):
self.project_directory = project_directory
self.db_session = db_session
self.project_manager = project_manager
self.load_rasters_to_db = load_rasters_to_db
if project_manager is not None and db_session is None:
raise ValueError("'db_session' is required to edit existing model if 'project_manager' is given.")
if project_manager is None and db_session is None:
if project_name is not None and mask_shapefile is None and elevation_grid_path is None:
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory)
self.db_session = db_sessionmaker()
self.project_manager.readInput(directory=self.project_directory,
projectFileName="{0}.prj".format(project_name),
session=self.db_session)
else:
# generate model
if None in (project_name, mask_shapefile, elevation_grid_path):
raise ValueError("Need to set project_name, mask_shapefile, "
"and elevation_grid_path to generate "
"a new GSSHA model.")
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory, map_type=0)
self.db_session = db_sessionmaker()
self.db_session.add(self.project_manager)
self.db_session.commit()
# ADD BASIC REQUIRED CARDS
# see http://www.gsshawiki.com/Project_File:Required_Inputs
self.project_manager.setCard('TIMESTEP',
str(simulation_timestep))
self.project_manager.setCard('HYD_FREQ',
str(out_hydrograph_write_frequency))
# see http://www.gsshawiki.com/Project_File:Output_Files_%E2%80%93_Required
self.project_manager.setCard('SUMMARY',
'{0}.sum'.format(project_name),
add_quotes=True)
self.project_manager.setCard('OUTLET_HYDRO',
'{0}.otl'.format(project_name),
add_quotes=True)
# ADD REQUIRED MODEL GRID INPUT
if grid_cell_size is None:
# caluclate cell size from elevation grid if not given
# as input from the user
ele_grid = GDALGrid(elevation_grid_path)
utm_bounds = ele_grid.bounds(as_utm=True)
x_cell_size = (utm_bounds[1] - utm_bounds[0])/ele_grid.x_size
y_cell_size = (utm_bounds[3] - utm_bounds[2])/ele_grid.y_size
grid_cell_size = min(x_cell_size, y_cell_size)
ele_grid = None
log.info("Calculated cell size is {grid_cell_size}"
.format(grid_cell_size=grid_cell_size))
if auto_clean_mask_shapefile:
mask_shapefile = self.clean_boundary_shapefile(mask_shapefile)
self.set_mask_from_shapefile(mask_shapefile, grid_cell_size)
self.set_elevation(elevation_grid_path, mask_shapefile)
self.set_roughness(roughness=roughness,
land_use_grid=land_use_grid,
land_use_grid_id=land_use_grid_id,
land_use_to_roughness_table=land_use_to_roughness_table,
)
@staticmethod
def clean_boundary_shapefile(shapefile_path):
"""
Cleans the boundary shapefile to that there is only one main polygon.
:param shapefile_path:
:return:
"""
wfg = gpd.read_file(shapefile_path)
first_shape = wfg.iloc[0].geometry
if hasattr(first_shape, 'geoms'):
log.warning("MultiPolygon found in boundary. "
"Picking largest area ...")
# pick largest shape to be the watershed boundary
# and assume the other ones are islands to be removed
max_area = -9999.0
main_geom = None
for geom in first_shape.geoms:
if geom.area > max_area:
main_geom = geom
max_area = geom.area
# remove self intersections
if not main_geom.is_valid:
log.warning("Invalid geometry found in boundary. "
"Attempting to self clean ...")
main_geom = main_geom.buffer(0)
wfg.loc[0, 'geometry'] = main_geom
out_cleaned_boundary_shapefile = \
os.path.splitext(shapefile_path)[0] +\
str(uuid.uuid4()) +\
'.shp'
wfg.to_file(out_cleaned_boundary_shapefile)
log.info("Cleaned boundary shapefile written to:"
"{}".format(out_cleaned_boundary_shapefile))
return out_cleaned_boundary_shapefile
return shapefile_path
def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db)
def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db)
def set_outlet(self, latitude, longitude, outslope):
"""
Adds outlet point to project
"""
self.project_manager.setOutlet(latitude=latitude, longitude=longitude,
outslope=outslope)
def set_roughness(self,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None):
"""
ADD ROUGHNESS FROM LAND COVER
See: http://www.gsshawiki.com/Project_File:Overland_Flow_%E2%80%93_Required
"""
if roughness is not None:
self.project_manager.setCard('MANNING_N', str(roughness))
elif land_use_grid is not None and (land_use_grid_id is not None \
or land_use_to_roughness_table is not None):
# make sure paths are absolute as the working directory changes
land_use_grid = os.path.abspath(land_use_grid)
if land_use_to_roughness_table is not None:
land_use_to_roughness_table = os.path.abspath(land_use_to_roughness_table)
mapTableFile = MapTableFile(project_file=self.project_manager)
mapTableFile.addRoughnessMapFromLandUse("roughness",
self.db_session,
land_use_grid,
land_use_to_roughness_table=land_use_to_roughness_table,
land_use_grid_id=land_use_grid_id)
else:
raise ValueError("Need to either set 'roughness', or need "
"to set values from land use grid ...")
def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration)
def write(self):
"""
Write project to directory
"""
# write data
self.project_manager.writeInput(session=self.db_session,
directory=self.project_directory,
name=self.project_manager.name)
|
Based on the best practices on the following page, I set up a 3 disk RAID0 array for MemSQL to use on my two leaf nodes (running on CentOS 7.6).
My question is: How do I tell MemSQL to use /dev/md0 (mounted in /data) instead of the root partition?
Also, I only configured RAID0 on the two leaf nodes. Should I configure it on the master aggregator as well?
This post discusses customer configuration of the MemSQL data directory location. |
#!/usr/bin/env python3
import unittest
import sys
sys.path.append('.')
from yap import expand_env_soft
from yap import call_lib
escape_sh = None
exec(call_lib)
from yap import missing_lib
exec(missing_lib)
def B(s):
" Avoid quoting backslashes all the time "
return s.replace('B', '\\').replace('S', "'").replace('D', '"')
class Test(unittest.TestCase):
def test_test(self):
self.assertEqual(B('B S DB'), '\\ \' "\\')
def test_escape_sh(self):
data = [
('nothing', 'nothing'),
('with spaces', 'withB spaces'),
('with Bs', 'withB BBs'),
('keep DquotesD and SquotesS', 'keepB DquotesDB andB SquotesS'),
('with BDs', 'withB BBDs'),
('', ''),
]
for raw, escaped in data:
self.assertEqual(
escape_sh(B(raw)),
B(escaped),
)
def test_expand_env_soft(self):
class O(object):
pass
# Arguments
sys = O()
sys.argv = ['zero', 'un']
self.assertEqual(eval(
expand_env_soft('bool($1)')), True
)
self.assertEqual(eval(
expand_env_soft('$1 == "un"')), True
)
self.assertEqual(eval(
expand_env_soft('bool($2)')), False
)
self.assertEqual(eval(
expand_env_soft('$2 == "deux"')), False
)
self.assertEqual(eval(
expand_env_soft('$2 == $2')), False
)
with self.assertRaises(KeyError):
eval(expand_env_soft('"error: {}".format($2)'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2[0]'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2[-3:]'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2.attr'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2 + "nope"'))
with self.assertRaises(KeyError):
eval(expand_env_soft('int($2)'))
# Environment variables
os = O()
os.environ = {'env': 'ENV!', 'empty': ''}
self.assertEqual(eval(
expand_env_soft('$env')), 'ENV!'
)
self.assertEqual(eval(
expand_env_soft('$empty')), ''
)
self.assertEqual(eval(
expand_env_soft('bool($missing)')), False
)
with self.assertRaises(TypeError):
eval(expand_env_soft('"error: " + $missing'))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
.radio Domain Registration - .radio is a New Gltd, as released by ICANN - The introduction of .radio will help to diversify and enhance the internet - You can register .radio domains and use them to create .radio websites here.
$459.40kr2409€324.00£250.00$496.60kr3102$359.30R4474.00 for companies1 for 1 year (during GA / General Availability).
$64.30kr337€45.40£35.00$69.50kr434$50.30R626.00 for individuals2 for 1 year (during GA / General Availability).
1Companies are: Unions of Broadcasters, Broadcast Radios, Internet Radio, Radio Professionals & Radio-related Companies. Please see Notes below.
2Individuals are: Radio Amateurs. Please see Notes below.
1 Unions of Broadcasters, Broadcast Radios, Internet Radio, Radio Professionals & Radio-related Companies.
2 Radio Amatuers. The Domains for Radio amateurs should match the registrant's "callsign" IE. RadioAmateurCallsign.radio. Registrant should be able to produce his/her Radio amateur licence and the linked callsign.
domain names are sold subject to the .radio Registry Terms. |
# -*- encoding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Odoo Colombia (Community).
# Author David Arnold (devCO)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api # , _
class ResPartner(models.Model):
_inherit = 'res.partner'
dom = "['|', " \
" ('on_contact' ,'!=', is_company )," \
" '|', " \
" '&', " \
" ('on_company' , '=', is_company )," \
" ('on_company' , '=', is_company )," \
" '&', " \
" ('on_merchant', '=', is_company )," \
" ('on_merchant', '=', is_company )]"
fiscal_id_type = fields.Many2one(
'res.partner.idtype',
string=u'Document Type',
domain=dom,
)
fiscal_id = fields.Char(
string=u'Document ID',
# compute='validateformatcopy',
)
fiscal_id_doc = fields.Binary(
string=u'Document Scan',
help="Upload the supporting Document "
"preferably as size-optimized PDF. "
"This might "
"help save disk space and PDF allows "
"you to concatenate multiple documents."
)
@api.one
@api.onchange(
'fiscal_id_type',
'fiscal_id',
'is_company',
)
def validateformatcopy(self):
# CASE: Current ID Type is not applicable on Merchant
if self.is_company:
if not self.fiscal_id_type.on_merchant:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_merchant', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# CASE: Current ID Type is not applicable on Company
if self.is_company:
if not self.fiscal_id_type.on_company:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_company', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# CASE: Current ID Type is not applicable on contact
if not self.is_company:
if not self.fiscal_id_type.on_contact:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_contact', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# If everything is fine, call subclasses
if self.fiscal_id_type and self.fiscal_id:
# Function for String Operations
res = self._validateandformatid()
if res['output_type'] and res['output_id']:
self.fiscal_id_type = res['output_type']
self.fiscal_id = res['output_id']
# Procedure for Copying
self._copyid()
def _validateandformatid(self):
"""
Hook method to be inherited for custom validation methods.
:param input_type: the value of the field fiscal_id_type (id); passed
on by onchange decorator
:param input_id: the value of the field fiscal_id (string); passed on
by onchange decorator
:return: must return a dict with validated and formatted values
Hint:
you might not alter the output_type unless you might want to build
some kind of fiscal_id_type recognition
based on the input pattern into your hook method. CO###.###.###-#
CO-VAT (NIT) for example.
Find below a suggested basic outline.
"""
return {'output_type': self.fiscal_id_type, 'output_id': self.fiscal_id}
"""
f_type = self.fiscal_id_type
f_id = self.fiscal_id
is_company = self.is_company
def default():
return {'output_type': f_type, 'output_id': f_id}
return {
# Define your cases
# The index to match is self.fiscal_id_type.code
# Note: You can change this index below.
# Example assignation using two functions
# {'output_type': func_type1(), 'output_id': funct_id1()}
'CODE1': { "put your assignation here" },
'CODE2': { "put your assignation here" },
}.get(self.fiscal_id_type.code, default())
"""
def _copyid(self):
"""
Hook Method to be inherited for custom copy methods based on the
document type (id)
Example Use Case: Copy some local VAT number into the VAT-Field in
it's international format for compatibility.
:return: It is a Procedure and therefore has no return value.
Find below a suggested basic outline.
"""
"""
f_type = self.fiscal_id_type
f_id = self.fiscal_id
is_company = self.is_company
def stringop_def(s): return s
def stringop_1(s): return re.match('\\d|\\w*', s)
# Define other Docstringoperatios if necessary
def default():
self.vat_subjected = True
# self.vat is a Boolean until base_vat is installed.
# self.vat = self.country_id.code + sringop_def(f_id)
{
# Some examples to consider...
# seld.vat_subjected: True,
# self.vat: self.country_id.code + stringop_1(f_id)
'CODE1': { "put your statments here" },
'CODE2': { "put your statments here" },
}.get(self.fiscal_id_type.code, default())
"""
|
Welcome to the Thule Xpress 970, the quickest, easiest, lightest two bike carrier on the market, bar none!
If it ain’t broke don’t fix it! The Xpress 970 is a fantastic piece of design that has stood the test of time. It’s no doubt the easiest, lightest, quickest 2 bike tow ball mounted carrier on the market.
Don’t be fooled by its simple design, it’s rock solid on the ball and mounts in seconds without the need for any tools. It can be locked to the ball with a 6m padlock (not included) and the rubber cradles will protect your bike’s frame.
Roof Rack Store recommend the use of Thule Lightboard 976 in conjunction with this product. We offer a fitting service or simply order online and we’ll ship freight free at the web special price.
Miscellaneous Straps for fixation of bikes included.
Ordered online, excellent service, arrived within days. Bike rack excellent, so easy to use and great quality.
This carrier is very simple to use. It's light, goes on very quickly and comes off in a flash without any tools required! Great price for a great bike carrier! |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
import csv
import glob
import re
def load_names():
"""Import names from SSN data dump into database"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
# for filename in glob.iglob('*1880.txt'):
for filename in glob.iglob('*.txt'):
year = int(re.findall('\d+', filename)[0])
print('starting {0}'.format(year))
try:
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
data = tuple(row[0:3]) + (year,)
load_statement = "INSERT INTO raw_names " + \
"(name, gender, count, year) " + \
"VALUES ('%s', '%s', '%s', '%s')" % data
cur.execute(load_statement)
except psycopg2.DatabaseError as e:
print('Error {0}'.format(e))
sys.exit(1)
finally:
if con:
con.commit()
def calculate_rank():
rank_statement = """
UPDATE raw_names
set rank = r.rnk
FROM (
SELECT name, year, gender, rank() OVER (partition by year, gender ORDER BY count DESC) AS rnk
FROM raw_names
) r
WHERE raw_names.name = r.name and raw_names.gender = r.gender and raw_names.year = r.year
"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
cur.execute(rank_statement)
con.commit()
def generate_list():
unique_names_statement_1 = """
INSERT INTO names(name, gender)
SELECT name,
gender
FROM raw_names
GROUP BY name, gender
"""
unique_names_statement_2 = """
SELECT name,
gender,
count(name) as num
INTO temp_max
FROM raw_names
WHERE rank <= 50
GROUP by name, gender
"""
unique_names_statement_3 = """
UPDATE names n
SET num_above_max = (SELECT num
FROM temp_max t
WHERE n.name = t.name
AND n.gender = t.gender)
"""
unique_names_statement_4 = """
drop table temp_max
"""
unique_names_statement_5 = """
SELECT name,
gender,
count(name) as num
INTO temp_min
FROM raw_names
WHERE rank > 500
GROUP by name, gender
"""
unique_names_statement_6 = """
UPDATE names n
SET num_below_min = (SELECT num
FROM temp_min t
WHERE n.name = t.name
AND n.gender = t.gender)
"""
unique_names_statement_7 = """
drop table temp_min
"""
unique_names_statement_8 = """
SELECT name,
gender,
count(name)
INTO temp_count
FROM raw_names
GROUP by name, gender
"""
unique_names_statement_9 = """
UPDATE names n
SET total_count = t.count
FROM temp_count t
WHERE t.name = n.name
AND t.gender = n.gender
"""
unique_names_statement_10 = """
UPDATE names
SET total_count_below = (136 - (total_count)) + num_below_min
"""
unique_names_statement_11 = """
drop table temp_count
"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
cur.execute(unique_names_statement_1)
cur.execute(unique_names_statement_2)
cur.execute(unique_names_statement_3)
cur.execute(unique_names_statement_4)
cur.execute(unique_names_statement_5)
cur.execute(unique_names_statement_6)
cur.execute(unique_names_statement_7)
cur.execute(unique_names_statement_8)
cur.execute(unique_names_statement_9)
cur.execute(unique_names_statement_10)
cur.execute(unique_names_statement_11)
con.commit()
def main():
# load_names()
print('calculating rank')
calculate_rank()
print('generating list')
generate_list()
if __name__ == "__main__":
main()
|
FINRA recently issued a Special Notice seeking comment on how it can support financial technology (fintech) innovation consistent with its mission of investor protection and market integrity. The announcement builds on an initiative FINRA launched last year to improve communication with the securities industry around fintech. In addition, FINRA requests comment on certain fintech areas, including the provision of data aggregation services, supervisory processes concerning the use of artificial intelligence, and the development of a taxonomy-based machine-readable rulebook. The Notice requests comment by October 12, 2018. |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtractByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import QgsExpression, QgsFeatureRequest
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
class ExtractByAttribute(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains'
]
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Extract by attribute')
self.group, self.i18n_group = self.trAlgorithm('Vector selection tools')
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains')]
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Selection attribute'), self.INPUT))
self.addParameter(ParameterSelection(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(ParameterString(self.VALUE, self.tr('Value')))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Extracted (attribute)')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
fieldName = self.getParameterValue(self.FIELD)
operator = self.OPERATORS[self.getParameterValue(self.OPERATOR)]
value = self.getParameterValue(self.VALUE)
fields = layer.pendingFields()
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
layer.wkbType(), layer.crs())
idx = layer.fieldNameIndex(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.OPERATORS[-2:]:
op = ''.join(['"%s", ' % o for o in self.OPERATORS[-2:]])
raise GeoAlgorithmExecutionException(
self.tr('Operators %s can be used only with string fields.' % op))
if fieldType in [QVariant.Int, QVariant.Double, QVariant.UInt, QVariant.LongLong, QVariant.ULongLong]:
expr = '"%s" %s %s' % (fieldName, operator, value)
elif fieldType == QVariant.String:
if operator not in self.OPERATORS[-2:]:
expr = """"%s" %s '%s'""" % (fieldName, operator, value)
elif operator == 'begins with':
expr = """"%s" LIKE '%s%%'""" % (fieldName, value)
elif operator == 'contains':
expr = """"%s" LIKE '%%%s%%'""" % (fieldName, value)
elif fieldType in [QVariant.Date, QVariant.DateTime]:
expr = """"%s" %s '%s'""" % (fieldName, operator, value)
else:
raise GeoAlgorithmExecutionException(
self.tr('Unsupported field type "%s"' % fields[idx].typeName()))
expression = QgsExpression(expr)
if not expression.hasParserError():
req = QgsFeatureRequest(expression)
else:
raise GeoAlgorithmExecutionException(expression.parserErrorString())
for f in layer.getFeatures(req):
writer.addFeature(f)
del writer
|
We would like to recognize Corporations and Community Groups for their generous donations to the school.
Concrete Creations of Hawaii, Inc.
"class of '____." If you are making a donation in kind, please call us at (808) 421-4200. |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .aes import aes
from . import colors, shapes, size, linetypes, alphas
def assign_visual_mapping(data, aes, gg):
"""Assigns the visual mapping to the given data and adds the right legend
Parameters
----------
data : DataFrame
dataframe which should have aesthetic mappings assigned to
aes : aesthetic
mapping, visual value to variable
gg : ggplot object,
It holds global configuration values needed by
some of the mapping functions
Returns
-------
data : DataFrame
the changed dataframe with visual values added
legend : dict
A legend as specified in `components.legend`
"""
legend = {}
data, legend['color'] = colors.assign_colors(data, aes, gg, 'color')
data, legend['fill'] = colors.assign_colors(data, aes, gg, 'fill')
data, legend['size'] = size.assign_sizes(data, aes)
data, legend['linetype'] = linetypes.assign_linetypes(data, aes)
data, legend['shape'] = shapes.assign_shapes(data, aes)
data, legend['alpha'] = alphas.assign_alphas(data, aes)
# Delete empty entries in the legend
for _aes_name in ('color', 'fill', 'size', 'linetype', 'shape', 'alpha'):
if not legend[_aes_name]:
del legend[_aes_name]
return data, legend
|
SilkAir is the "regional wing of Singapore Airlines." In 1976, SilkAir was founded as Tradewinds Pte Ltd handling charters for Singapore Airlines and serving exotic destinations in the Singapore region. Tradewinds emerged into a scheduled airline catering to holidaying passengers in February 1989. In April 1992 Tradewinds evolved into a full-fledged regional airline and was renamed SilkAir. SilkAir is based at Singapore's Changi Airport. Over 50 destinations are served in countries including Australia, Cambodia, China, India, Indonesia, Malaysia, Myanmar, Nepal, Philippines, Thailand, and Vietnam. Service is provided with modern Airbus A319 and A320 aircraft. The airline has selected the Boeing 737-800 as it's future primary aircraft and several have entered service in its current fleet. SilkAir is a partner airline of Singapore Air and Scoot. It is a member of the KrisFlyer frequent flier program. |
import numpy as np
from analysis import Analysis
from corrections import PowerCalculator
from corrections import TurbulencePowerCalculator
from ..core.status import Status
class BenchmarkAnalysis(Analysis):
def __init__(self, analysis_config, baseLineMode):
self.basePower = "Base Power"
self.baseLineMode = baseLineMode
Status.add("Baseline Mode: %s" % self.baseLineMode)
Analysis.__init__(self, analysis_config)
self.calculateBase()
self.calculateHubBenchmark()
self.calculateREWSBenchmark()
self.calculateTurbRenormBenchmark()
self.calculationCombinedBenchmark()
self.calculatePowerDeviationMatrixBenchmark()
self.calculateProductionByHeightBenchmark()
#self.dataFrame.to_csv("debug.dat")
def calculate_sensitivity_analysis(self):
#speed optimisation (sensitivity analysis not required for benchmark)
pass
def calculate_scatter_metric(self):
#speed optimisation (scatter metric not required for benchmark)
pass
def get_base_filter(self):
base_filter = Analysis.get_base_filter(self)
if self.baseLineMode == "Hub":
return base_filter & self.dataFrame[self.baseline.wind_speed_column].notnull()
elif self.baseLineMode == "Measured":
return base_filter
else:
raise Exception("Unrecognised baseline mode: %s" % self.baseLineMode)
def calculateBase(self):
if self.baseLineMode == "Hub":
if self.powerCurve is None:
exc_str = "%s Power Curve has not been calculated successfully." % self.powerCurveMode
if self.powerCurveMode == 'InnerMeasured':
exc_str += " Check Inner Range settings."
raise Exception(exc_str)
self.dataFrame[self.basePower] = self.dataFrame.apply(PowerCalculator(self.powerCurve, self.baseline.wind_speed_column).power, axis=1)
elif self.baseLineMode == "Measured":
if self.hasActualPower:
self.dataFrame[self.basePower] = self.dataFrame[self.actualPower]
else:
raise Exception("You must specify a measured power data column if using the 'Measured' baseline mode")
else:
raise Exception("Unkown baseline mode: % s" % self.baseLineMode)
self.baseYield = self.dataFrame[self.get_base_filter()][self.basePower].sum() * self.timeStampHours
def calculateHubBenchmark(self):
self.hubPower = "Hub Power"
self.dataFrame[self.hubPower] = self.dataFrame.apply(PowerCalculator(self.powerCurve, self.baseline.wind_speed_column).power, axis=1)
self.hubYield = self.dataFrame[self.get_base_filter()][self.baseline.power_column].sum() * self.timeStampHours
self.hubYieldCount = self.dataFrame[self.get_base_filter()][self.hubPower].count()
self.hubDelta = self.hubYield / self.baseYield - 1.0
Status.add("Hub Delta: %.3f%% (%d)" % (self.hubDelta * 100.0, self.hubYieldCount))
def get_rews(self):
Status.add("Locating REWS from {0} corrections".format(len(self.corrections)), verbosity=3)
for correction in self.corrections:
if self.corrections[correction].rews_applied() and not self.corrections[correction].turbulence_applied():
Status.add("Match: {0}".format(correction))
return correction
else:
Status.add("No match: {0}".format(correction))
raise Exception("Could not locate REWS correction")
def calculateREWSBenchmark(self):
if self.rewsActive:
self.rewsYield, self.rewsYieldCount, self.rewsDelta = self.calculate_benchmark_for_correction(self.get_rews())
def calculateTurbRenormBenchmark(self):
if self.turbRenormActive:
self.turbulenceYield, self.turbulenceYieldCount, self.turbulenceDelta = self.calculate_benchmark_for_correction("Turbulence")
if self.hasActualPower:
self.dataFrame[self.measuredTurbulencePower] = (self.dataFrame[self.actualPower] - self.dataFrame[self.corrections["Turbulence"].power_column] + self.dataFrame[self.basePower]).astype('float')
def calculationCombinedBenchmark(self):
if self.rewsActive and self.turbRenormActive:
self.combinedYield, self.combinedYieldCount, self.combinedDelta = self.calculate_benchmark_for_correction("{0} & Turbulence".format(self.get_rews()))
def calculatePowerDeviationMatrixBenchmark(self):
if self.powerDeviationMatrixActive:
self.powerDeviationMatrixYield, self.powerDeviationMatrixYieldCount, self.powerDeviationMatrixDelta = self.calculate_benchmark_for_correction("2D Power Deviation Matrix")
def calculateProductionByHeightBenchmark(self):
if self.productionByHeightActive:
self.productionByHeightYield, self.productionByHeightYieldCount, self.productionByHeightDelta = self.calculate_benchmark_for_correction("Production by Height")
def calculate_benchmark_for_correction(self, correction):
power_column = self.corrections[correction].power_column
energy = self.dataFrame[self.get_base_filter()][power_column].sum() * self.timeStampHours
count = self.dataFrame[self.get_base_filter()][power_column].count()
delta = energy / self.baseYield - 1.0
Status.add("%s Delta: %f%% (%d)" % (correction, delta * 100.0, count))
return (energy, count, delta)
|
However it does lead the challenge from a management perspective of knowing which iterations are running on each Gateway.. you can get the information from the current UI but it requires multiple steps to get the information. The UI also lends itself more to the design processes today than perhaps the more dense information views that a operational report might warrant.
I’m sure that over time these views will come, but today we can solve the problem by taking advantage of the fact that the product lives by its own ‘mission’ by offering a very rich set of APIs. As a result it becomes possible to actually build your own views. To that end I have written a Groovy script which will go through each API that can be seen and retrieves the iteration deployed to each logical gateway.
You can hardwire into the script default values which will then be used if no parameters are provided.
The script includes suppressing certificate validation – necessary if you haven’t yet deployed your own specific certificate and still working with the default Oracle certificate. |
"""Provides some general, account-related tools"""
# Python imports
from math import ceil
# Django imports
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import redirect, render
# app imports
from oweb.exceptions import OWebDoesNotExist, OWebAccountAccessViolation
from oweb.models.planet import Planet
from oweb.models.building import Supply12
from oweb.models.research import Research113
from oweb.libs.production import get_fusion_production
from oweb.libs.costs import costs_onepointeight_total, costs_two_total
from oweb.libs.queue import get_mse
from oweb.libs.shortcuts import get_list_or_404, get_object_or_404
def tools_energy(req, account_id, energy_level=None, fusion_level=None):
"""Shows some energy related information"""
# this is the non-decorator version of the login_required decorator
# basically it checks, if the user is authenticated and redirects him, if
# not. The decorator could not handle the reverse url-resolution.
if not req.user.is_authenticated():
return redirect(reverse('oweb:app_login'))
# fetch the account and the current planet
try:
planets = Planet.objects.select_related('account').filter(account_id=account_id)
account = planets.first().account
except Planet.DoesNotExist:
raise OWebDoesNotExist
except AttributeError:
raise OWebDoesNotExist
# checks, if this account belongs to the authenticated user
if not req.user.id == account.owner_id:
raise OWebAccountAccessViolation
planet_ids = planets.values_list('id', flat=True)
if not fusion_level:
fusion_list = get_list_or_404(Supply12, astro_object_id__in=planet_ids)
# determine the average fusion reactor and maximum fusion reactor
max_fusion = 0
average_fusion = 0
for f in fusion_list:
average_fusion += f.level
if f.level > max_fusion:
max_fusion = f.level
fusion_level = ceil(average_fusion / len(planet_ids))
fusion_base_cost = f.base_cost
else:
fusion_base_cost = Supply12.base_cost
fusion_level = int(fusion_level)
if not energy_level:
energy = get_object_or_404(Research113, account_id=account_id)
energy_level = energy.level
energy_base_cost = energy.base_cost
else:
energy_level = int(energy_level)
energy_base_cost = Research113.base_cost
# calculate the costs of the current fusion plant
current_fusion_cost = costs_onepointeight_total(fusion_base_cost, fusion_level)
current_fusion_cost = get_mse(current_fusion_cost, (account.trade_metal, account.trade_crystal, account.trade_deut))
# calculate the costs of the current energy technology
current_energy_cost = costs_two_total(energy_base_cost, energy_level)
current_energy_cost = get_mse(current_energy_cost, (account.trade_metal, account.trade_crystal, account.trade_deut))
# calculate the production of the fusion plant
this_prod = int(get_fusion_production(fusion_level, energy=energy_level)[3])
fusion_matrix = []
for i in range(0, 5):
f = fusion_level + i
# calculate the costs of this fusion plant
f_cost = costs_onepointeight_total(fusion_base_cost, f)
f_cost = get_mse(f_cost, (account.trade_metal, account.trade_crystal, account.trade_deut)) - current_fusion_cost
et_range = []
for j in range(0, 5):
et = energy_level + j
# calculate the costs of this energy tech
et_cost = costs_two_total(energy_base_cost, et)
et_cost = (get_mse(et_cost, (account.trade_metal, account.trade_crystal, account.trade_deut)) - current_energy_cost) / len(planet_ids)
# total costs of this combination
next_cost = f_cost + et_cost
# calculate the production of this combination
next_prod = int(get_fusion_production(f, energy=et)[3])
next_prod_gain = int(next_prod - this_prod)
# calculate the "score" of this combination
# COSTS / PRODUCTION_GAIN
if next_prod_gain != 0:
next_ratio = next_cost / next_prod_gain
else:
next_ratio = 0
et_range.append((
et,
next_prod,
next_prod_gain,
next_cost,
next_ratio
))
fusion_matrix.append((int(f), et_range))
return render(req, 'oweb/tools_energy.html',
{
'account': account,
'planets': planets,
'fusion_matrix': fusion_matrix,
'energy_level': energy_level,
'fusion_level': fusion_level,
}
)
|
This gated community called Senterra, is the largest of the three Villages of Fairbanks built by Ray Watt and designed by architect Ross Sutherland. While located in the City of San Diego, zip code 92130, Senterra residents receive their mail in the Rancho Santa Fe zip code. With 162 home sites and homes running between 2,370 to 3,520 square feet in size. The first of these Spanish Mediterranean residences were built in 1986 in different sections, with each having its own pool/spa area. There are four-floor plans and prices start at around $1.3 million. School districts are Solana Beach Elementary, Earl Warren Middle, and Torrey Pines High School or San Dieguito Academy.
This ocean close development is in close proximity to Fairbanks Ranch Country Club and the Del Rayo Village Shopping Center where there is a post office, restaurants and personal services as well as a gas station. |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import copy
from cStringIO import StringIO
from fnmatch import fnmatch
import gzip
import hashlib
import logging
import mimetypes
import os
from boto.s3.key import Key
import app_config
import utils
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
GZIP_FILE_TYPES = ['.html', '.js', '.json', '.css', '.xml']
def deploy_file(bucket, src, dst, headers={}, public=True):
"""
Deploy a single file to S3, if the local version is different.
"""
k = bucket.get_key(dst)
s3_md5 = None
if k:
s3_md5 = k.etag.strip('"')
else:
k = Key(bucket)
k.key = dst
file_headers = copy.copy(headers)
if 'Content-Type' not in headers:
file_headers['Content-Type'] = mimetypes.guess_type(src)[0]
if file_headers['Content-Type'] == 'text/html':
# Force character encoding header
file_headers['Content-Type'] = '; '.join([
file_headers['Content-Type'],
'charset=utf-8'])
# Define policy
if public:
policy = 'public-read'
else:
policy = 'private'
# Gzip file
if os.path.splitext(src)[1].lower() in GZIP_FILE_TYPES:
file_headers['Content-Encoding'] = 'gzip'
with open(src, 'rb') as f_in:
contents = f_in.read()
output = StringIO()
f_out = gzip.GzipFile(filename=dst, mode='wb', fileobj=output, mtime=0)
f_out.write(contents)
f_out.close()
local_md5 = hashlib.md5()
local_md5.update(output.getvalue())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
logger.info('Skipping %s (has not changed)' % src)
else:
logger.info('Uploading %s --> %s (gzipped)' % (src, dst))
k.set_contents_from_string(output.getvalue(), file_headers, policy=policy)
# Non-gzip file
else:
with open(src, 'rb') as f:
local_md5 = hashlib.md5()
local_md5.update(f.read())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
logger.info('Skipping %s (has not changed)' % src)
else:
logger.info('Uploading %s --> %s' % (src, dst))
k.set_contents_from_filename(src, file_headers, policy=policy)
def deploy_folder(bucket_name, src, dst, headers={}, ignore=[]):
"""
Deploy a folder to S3, checking each file to see if it has changed.
"""
to_deploy = []
for local_path, subdirs, filenames in os.walk(src, topdown=True):
rel_path = os.path.relpath(local_path, src)
for name in filenames:
if name.startswith('.'):
continue
src_path = os.path.join(local_path, name)
skip = False
for pattern in ignore:
if fnmatch(src_path, pattern):
skip = True
break
if skip:
continue
if rel_path == '.':
dst_path = os.path.join(dst, name)
else:
dst_path = os.path.join(dst, rel_path, name)
to_deploy.append((src_path, dst_path))
if bucket_name == app_config.STAGING_S3_BUCKET:
public = False
else:
public = True
bucket = utils.get_bucket(bucket_name)
logger.info(dst)
for src, dst in to_deploy:
deploy_file(bucket, src, dst, headers, public=public)
def delete_folder(bucket_name, dst):
"""
Delete a folder from S3.
"""
bucket = utils.get_bucket(bucket_name)
for key in bucket.list(prefix='%s/' % dst):
logger.info('Deleting %s' % (key.key))
key.delete()
|
Need a high-performance stacking solution? Our BT Staxio W-series has been designed to allow the operator get on with the job at hand – simply and safely. With excellent visibility through the truck mast and intuitive controls, it can be driven with the handle in an upright position for improved manoeuvrability in tight spaces. |
from chiplotle.hpgl.abstract.positional import _Positional
from chiplotle.hpgl.abstract.hpglprimitive import _HPGLPrimitive
class _Arc(_Positional):
def __init__(self, xy, angle, chordtolerance=None):
self.angle = angle
self.chordtolerance = chordtolerance
_Positional.__init__(self, xy)
@apply
def angle( ):
def fget(self):
return self._angle
def fset(self, arg):
if abs(arg) > 360:
raise ValueError('angle must be between -360 and 360.')
self._angle = arg
return property(**locals( ))
@property
def format(self):
if isinstance(self.x, int) and isinstance(self.y, int):
coordinates = '%i,%i' % (self.x, self.y)
else:
coordinates = '%.2f,%.2f' % (self.x, self.y)
result = '%s%s,%.2f' % (self._name, coordinates, self.angle)
if self.chordtolerance:
result += ',%.2f' % self.chordtolerance
result += _HPGLPrimitive._terminator
return result
|
The international association of theaters producing Shakespeare’s plays has chosen Baltimore as the site of its annual conference for 2017.
An estimated 200 theater professionals are expected to stay a week in downtown Baltimore to discuss issues of artistic vision and leadership, management and education programming. The Shakespeare conference typically takes place in January, but dates for the 2017 meeting have not yet been set.
The Shakespeare Theatre Association formed in 1991 to serve the artistic, managerial and educational leaders of theaters that produce works by William Shakespeare and related education programming. Its members represent Shakespeare festivals and theater companies in America from Hawaii to Harlem, and across the globe in Great Britain, the Czech Republic, Canada, Australia and beyond.
Chesapeake Shakespeare Company managers have long wished to attract the international conference to Maryland, but did not have their own theater to welcome their professional peers until 2014. Last year, the 12-year-old theater company expanded to Baltimore from Howard County. In September, it opened a 260-seat theater in the historic Mercantile Trust building at 7 South Calvert Street downtown. |
#!/usr/bin/env python
import sys, os
PY_VERSION = sys.version_info
if PY_VERSION.major != 2 and PY_VERSION.minor != 7:
print("This application requires Python 2.7. You are using Python %d.%d."
% (PY_VERSION.major, PY_VERSION.minor))
sys.exit(1)
from datetime import datetime
from lib.blockchainquery import core as bq
from lib.bitcoinvalidation import addressvalidation as bv
EXAMPLE_ADDRESS = '18WaqDnNRbXpbfgGAv5bC7spb366c4CCfX'
def generate_related_report(recursive, indent, suppresszero, includechangeinputs, maxresult, parallel, *addresses):
'''Uses various techniques to identify addresses related and generates a report
'''
os.system('cls' if os.name == 'nt' else 'clear')
if recursive:
print("Recursively identifying addresses related to:")
else:
print("Identifying addresses related to:")
print("-"*70)
for count, addr in enumerate(addresses):
print ('{:>3}. {:<39}'.format(count+1,addr))
print("-"*70)
print('')
print("Please wait...")
related_addr_dict = bq.getRelatedAddresses(recursive, includechangeinputs, maxresult, parallel, None, *addresses)
running_balance = 0
#Generate text report
os.system('cls' if os.name == 'nt' else 'clear')
NonZeroAccount = 0
if(suppresszero):
print("Non Zero Related Accounts")
else:
print("Related Accounts")
print("-"*70)
resultsshown = print_audit_report_body(related_addr_dict,indent,suppresszero)
if(len(related_addr_dict) == maxresult):
print(' ...Maximum Limit Reached...')
if(resultsshown <len(related_addr_dict)):
print(' ...{:d} Zero Balance Results Suppressed...'.format(maxresult - resultsshown))
print("-"*70)
# Running balance
for addr in related_addr_dict:
running_balance = running_balance + float(bq.getAddressInfo(addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
print("Total BTC {:>60f}".format(running_balance))
def print_audit_report_body(related_addr_dict, indent,suppresszero, parent_addr = None, depth=0, line_num = 0):
'''Outputs the audit report body. The function returns the number of lines printed'''
if(parent_addr == None):
for outer_addr, outer_value in related_addr_dict.iteritems():
if outer_value['relationtype'] == 'root':
ref = outer_value['txhash']
balance = float(bq.getAddressInfo(outer_addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
line_num +=1
print ('{:>3}. {:<49}{:>16f}'.format(line_num, outer_addr,balance))
# Now we print any address related to the root
line_num = print_audit_report_body(related_addr_dict, indent, suppresszero, outer_addr, depth+1, line_num)
else:
# Now we print any address related to the parent
for addr, value in related_addr_dict.iteritems():
if(value['relation']==parent_addr):
balance = float(bq.getAddressInfo(addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
MAX_DEPTH = 17
if(indent):
if(depth<MAX_DEPTH):
indents = ' ' * (depth-1) + ('=' if value['relationtype'] == 'fellow' else '>' if value['relationtype']=='change' else '?')
else:
prefix = ' d+' + str(depth-MAX_DEPTH+1)
indents = prefix + ' ' * (MAX_DEPTH-len(prefix)-2) + ('=' if value['relationtype'] == 'fellow' else '>' if value['relationtype']=='change' else '?')
else:
indents=''
if not suppresszero or balance>0:
if(not suppresszero or balance>0):
line_num += 1
print ('{:>3}. {:<49}{:>16f}'.format(line_num ,indents + addr,balance))
line_num = print_audit_report_body(related_addr_dict, indent, suppresszero, addr, depth+1, line_num)
return line_num
def show_help():
'''Prints the commandline help'''
filename = os.path.basename(__file__)
print('Reports the balances of any related bitcoin addresses.')
print('')
print('{} [-r][-s][-d][-t][-m][-p] Address1 Address2 ...'.format(filename.upper()))
print('')
print(' -r Recursively scan for related addresses')
print(' -s Suppress addresses with a zero balance')
print(' -i Indent to show relationships; useful when doing a recursive scan')
print(' -t Test addresses {0} used for scan'.format(EXAMPLE_ADDRESS))
print(' -e Calls made to external servers are reported')
print(' -c Includes inputs that appear to be using a related addr to store change')
print(' -m Max results, enter as -m300 to limit results to 300 [Default:50]')
print(' -p Use Parallel queries to Blockchain.info to increase speed. [Experimental]')
print('')
print('eg. {0} -r -s {1}'.format(filename.upper(),EXAMPLE_ADDRESS))
print('')
if __name__ == '__main__':
showhelp = False
parallel = False
recurse = False
usetestaddress = False
suppresszero = False
indent = False
reportcalls = False
includechangeinputs = False
showtime = False
addresses = []
unknownflags = []
maxresults = 50
startTime = datetime.now()
if len(sys.argv) ==1: showhelp = True
else:
for flag in sys.argv[1:]:
if flag == '-?': showhelp = True
elif flag == '-t': usetestaddress = True
elif flag == '-r': recurse = True
elif flag == '-s': suppresszero = True
elif flag == '-i': indent = True
elif flag == '-e': reportcalls = True
elif flag == '-c': includechangeinputs = True
elif flag == '-p': parallel = True
elif flag.startswith('-m'):
try:
maxresults = int(flag[2:])
except:
showhelp = True
if maxresults < 1:
showhelp = True
elif bv.check_bitcoin_address(flag):
addresses.append(flag)
else:
unknownflags.append(flag)
if len(unknownflags)>0:
for flag in unknownflags:
print("This argument is not understood: {0}".format(flag))
print('')
show_help()
elif showhelp:
show_help()
elif usetestaddress:
generate_related_report(recurse, indent, suppresszero, includechangeinputs, maxresults, parallel, EXAMPLE_ADDRESS)
showtime = True
else :
generate_related_report(recurse, indent, suppresszero, includechangeinputs, maxresults, parallel, *addresses)
showtime = True
if indent:
print('')
print('Address Prefix Key')
print('------------------')
print('None: Root address, this is one of the keys you searched for')
print('= : Fellow input address of its parent')
print('> : Used as a change address by its parent')
if includechangeinputs:
print('? : Used its parent as a change address. {May be unreliable}')
if reportcalls:
print('')
print('Call report')
print('-----------')
print('')
print('Calls to blockchain.info requesting information on addresses: ' + str(bq._get_address_info_cache_misses))
print('Calls to blockchain.info requesting information on blocks: ' + str(bq._get_block_info_cache_misses))
if showtime:
print('')
print('Report took {} seconds to generate'.format((datetime.now()-startTime).total_seconds()))
|
The word angel is derived from the Greek word “angelos”, which means messenger. This was used for otherworldly visitors who appeared to humans in spiritual form, for a number of various reasons. Usually, their message contained whatever was important as directed by the heavens above. Otherworldly beings often came especially in times of need, times of distress, and to forewarn of oncoming catastrophe. Angels often came from above to teach others about spiritual knowledge and to deliver secrets of the heavens. They also appeared to herald the birth of very important figures to come for the future. Sometimes they would come to caution the world about an enemy who could potential he threaten it.
These visitors have been appearing since the beginning of time. If you go back into ancient history, you will find that in many cultures and civilizations all around the world, there is evidence of otherworldly visitors with wings who would come to speak with important figures on earth in whom they felt could help them in acting out the contents of their messages. Overall, we know that there were messengers that came through out time. There was no partiality shown in the deliverance of the message geographically or spiritually.
If there was a message that needed to be given in any part of the world, to any civilization, it was given. Although, many religious beliefs today seem to imply that they are the only ones worthy of angelic messages. Truly, it has nothing to do with religion, but it has more to do with the worthiness of the individual them self, who will receive the message.
Angels are a very specific race of heavenly beings who come from the higher dimensions within a planetary environment. They also come from the dimensions in the universe too. If you look at earth, we have eight dimensions in all. However, there are other planets that may not hold residents of a 3-D body, but they too have other dimensional layers in which beings live in, that are unseen to anyone is not of that frequency. It is very similar to how the dimensions are here, on planet earth.
All around us, there are spiritual beings that are unseen, existing on another frequency. It is the same all over the universe. The races of “angels”, very specifically the same entities that are responsible for delivering messages, were also responsible for playing a part in the creation of some planets and their inner realities too. Their roles in the upper planes depended on their power, wisdom, and if they were created or appointed in their positions.
There are many races out there, all over the Universe and in the realms within each planet’s reality complex. Any soul who has a divine mission, can be considered a messenger of sorts. A soul who is extraterrestrial or interdimensional who comes into this dimension with information that is of importance whether to an individual, a town, city, state or country, is considered to be a messenger of sorts. The same applies for any being who is divinely directed to act out a life altering task, or provide divine guidance.
Souls can be guided spiritually to enter other worlds, dimensions, and realms temporarily at any time for those purposes. But, they can also be divinely guided to enter permanently too. They can enter to stay, by being born into a body that interacts with that specific reality. For example, here on Earth they would enter a physical human body.
They can do that either by sending their consciousness into the fetus while in the womb, or they are scheduled to enter the body at another date. Is there a soul already there? Sometimes, yes. If so, that soul already spiritually made the soul agreement for merging itself with the greater consciousness of the divine being who will take over the body. Why? That human soul may want to make such a selfless choice of allowing a greater so entry into the world through itself. And it may just want the experience of at least being in the womb of a loving mother at the same time. When they combine consciousness too, the human soul will benefit from the knowledge by merging with the greater soul itself. So it gets to become part of something greater than itself, which is a divine gift. An anointing.
These situations are called “incarnations”, and the body is called an avatar as it embodies the divinity itself. Many souls incarnate no matter where they are from as a spiritual being. Technically, anyone who continues to experience being born into a new life after they die from their last, is considered to be incarnating. This is because, the original consciousness that they were created and started out with, enters a new body with genetic consciousness already in it, each new lifetime.
That genetic consciousness either stays with that soul through the lessons that they learned while using it, or it’s dies when that physical body dies as it’s no longer needed. The genetic consciousness is the intelligence within the DNA of that physical body and bloodline.
However, when speaking about a greater form of consciousness from a higher plane of divine origin and of a specific race of beings who are related to the Creators directly, the term changes. It becomes a “divine incarnation”. If the divine being comes for a great mission, they become an “incarnate angel”. This is because they embody not only the divine soul, but also a message, mission, or task. Those types of incarnate souls are powerful, wise of things on Earth and beyond it, as well as having a direct connection to spirit and other worlds.
In truth, souls as such usually choose the physical body descended genetically from a demigod to have a divine physical form, as well as the divine consciousness too. This is in order for the divine consciousness to have DNA codes that it can awaken to reach its full potential.
That type of consciousness is the only one who can read those genetic codes in that body, in order to awaken them. Otherwise, they would remain dormant for anybody else, unless an entity triggered them from heaven, for that individual. The divine consciousness is needed to awaken the divine genetics in a demigod body.
There are many souls from all over the world who can come into this one and take on human form, being born here. But the divine races are from the sixth dimension and up. There’s always some greater intention or purpose when they do come. Whether it is to inspire global change, or to inspire a small crowd of people, they’d always leave a positive mark somehow, somewhere. |
from . import auth_checks
from django.db import models
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db.models.signals import post_save
from django.utils import timezone
from django_otp.plugins.otp_totp.models import TOTPDevice
from django_otp.plugins.otp_static.models import StaticDevice
from django.conf import settings
from six.moves.urllib.parse import quote, urlencode
import base64
ALL_DEVICE_CLASSES = [TOTPDevice, StaticDevice]
# This could be configurable from settings. It isn't at the moment.
check_auth = auth_checks.check_auth
needs_2fa = auth_checks.needs_2fa
def all_otp_devices(user, confirmed=True):
for Dev in ALL_DEVICE_CLASSES:
devs = Dev.objects.devices_for_user(user, confirmed=confirmed)
for d in devs: # could be a python3 'yield from'
yield d
def totpauth_url(totp_dev):
# https://github.com/google/google-authenticator/wiki/Key-Uri-Format
label = totp_dev.user.username.encode('utf8')
# We need two separate issuers, otherwise deploying in prod will override our authenticator token from
# dev
if settings.DEPLOY_MODE == 'production':
issuer = b'CourSys'
else:
issuer = b'CourSys-DEV'
query = [
('secret', base64.b32encode(totp_dev.bin_key)),
('digits', totp_dev.digits),
('issuer', issuer)
]
return b'otpauth://totp/%s?%s' % (label, urlencode(query).encode('ascii'))
# based on http://stackoverflow.com/a/4631504/1236542
class SessionInfo(models.Model):
'''
Meta-information about Sessions, so we can record when authentications happened.
'''
session = models.OneToOneField(Session, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
last_auth = models.DateTimeField(null=True)
last_2fa = models.DateTimeField(null=True)
@classmethod
def for_session(cls, session, save_new=True):
'Retrieve or create a SessionInfo for this Session.'
assert isinstance(session, Session)
try:
si = cls.objects.get(session=session)
except (SessionInfo.DoesNotExist):
si = SessionInfo(session=session)
if save_new:
si.save()
return si
@classmethod
def for_sessionstore(cls, sessionstore, save_new=True):
'Retrieve or create a SessionInfo for this SessionStore.'
assert isinstance(sessionstore, SessionStore)
try:
si = cls.objects.get(session__session_key=sessionstore.session_key)
except (SessionInfo.DoesNotExist):
si = SessionInfo(session=Session.objects.get(session_key=sessionstore.session_key))
if save_new:
si.save()
return si
@classmethod
def for_request(cls, request, save_new=True, user=None):
'Retrieve the SessionInfo for this request, if it has an active session.'
if hasattr(request, 'session_info') and request.session_info is not None:
# already have it.
return request.session_info
if request.session.session_key is None:
# no session: no point in looking.
request.session_info = None
else:
try:
si = cls.for_sessionstore(request.session, save_new=save_new)
except (Session.DoesNotExist):
request.session_info = None
return
request.session_info = si
return request.session_info
@classmethod
def just_logged_in(cls, request):
'Records that the session associated with this request just logged in (by django auth).'
si = cls.for_request(request, save_new=False)
if si is None:
return
si.last_auth = timezone.now()
si.save()
return si
@classmethod
def just_logged_out(cls, request):
'Records that the session associated with this request just logged out.'
si = cls.for_request(request, save_new=False)
if si is None:
return
si.last_auth = None
si.save()
return si
@classmethod
def just_2fa(cls, request):
'Records that the session associated with this request just completed 2FA.'
si = cls.for_request(request, save_new=False)
si.last_2fa = timezone.now()
si.save()
return si
def __str__(self):
return '%s@%s' % (self.session_id, self.created)
def okay_auth(self, request, user):
'''
Is the auth okay for this request/user?
Hook here to allow apps to customize behaviour. Returns a boolean pair:
Is standard Django auth okay?
Is 2FA okay?
May assume that Django auth *and* OTP auth have said yes. Only need to restrict further.
'''
return check_auth(self, request, user)
def logged_in_listener(request, **kwargs):
SessionInfo.just_logged_in(request)
def logged_out_listener(request, **kwargs):
SessionInfo.just_logged_out(request)
user_logged_in.connect(logged_in_listener)
user_logged_out.connect(logged_out_listener)
def session_create_listener(instance, **kwargs):
instance.session_info = SessionInfo.for_session(instance)
post_save.connect(session_create_listener, sender=Session) |
Since September, the Caravaggio show at Musée Jacquemart André has been continually full and one way not to queue was to go at the last minute after 5 pm. For the last three days of the exhibition 26 to 28 January, the museum will be open till 10 pm. It is the occasion to see the museum at night and the ambiance will be very special, especially if the tea room remains open?
Musée Jacquemart André, 158 bd Haussmann, until January 28. |
#!/usr/bin/env <PATH_HELPDESK>/env/bin/python
# -*- encoding: utf-8 -*-
##
# Copyright 2017 FIWARE Foundation, e.V.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
from datetime import datetime
from HelpDesk.platforms.servers import StackExchange
from HelpDesk.desks.helpdeskImporter import HelpDeskImporter
from logging import error, exception, info, debug
from logging import _nameToLevel as nameToLevel
from argparse import ArgumentParser
from sys import exc_info
from random import choice
from Common.logging_conf import LoggingConf
from Config.settings import JIRA_URL
__author__ = "Fernando López <[email protected]"
class StackOverflowSync(LoggingConf):
def __init__(self, loglevel):
"""
Initialize the script and fix the log level.
:return: Nothing.
"""
super(StackOverflowSync, self).__init__(loglevel=loglevel, log_file='stackoverflow.log')
info('\n\n---- StackOverflow Synchronization----\n')
# Tell urlib3 to use the pyOpenSSL
# urllib3.contrib.pyopenssl.inject_into_urllib3()
# Create a PoolManager that verifies certificates when performing requests
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
info("Getting the HelpDesk monitors data")
self.help_desk = HelpDeskImporter()
self.stack = StackExchange()
self.list_questions = None
try:
self.help_desk.get_monitors()
except Exception as e:
error(e)
exception('No connection to JIRA https://{}'.format(JIRA_URL))
exception("Unexpected error: {}".format(exc_info()[0]))
exit()
def get_stack_monitor(self):
"""
Get the list of questions in StackOverflow and the relation of questions already monitored in the system.
:return: The StackOverflow data.
"""
info("Getting the StackOverflow data")
try:
# raise Exception
self.stack.get_questions()
except Exception as e:
error(e)
info('Failed to get questions from server')
finally:
self.stack.match(self.help_desk.monitors)
def questions_with_no_answer(self, partition_date):
"""
Get the list of questions in StackOverflow with no response.
:param partition_date: Date from which we check the new questions.
:return: Nothing
"""
# Firstly: Get the list of monitored and unmonitored questions with no answer
info("Obtaining list of questions with no answer")
list_questions = filter(lambda x: not x.answer_count, self.stack.questions)
self.help_desk.update_with(list_questions)
answered_questions = filter(lambda x: x.answer_count > 0 and not x.is_answered, self.stack.questions)
new_questions = filter(lambda x: x.added_at >= partition_date, answered_questions)
self.help_desk.update_with(new_questions)
old_questions = filter(lambda x: x.added_at < partition_date, answered_questions)
mon_old_questions = filter(lambda x: x.monitor, old_questions)
self.help_desk.update_with_time(mon_old_questions)
unmon_old_questions = list(filter(lambda x: not x.monitor, old_questions))
if len(unmon_old_questions) > 0:
self.help_desk.update_with_time([choice(unmon_old_questions)])
else:
info('NOT available answered questions for synchronization with help desk')
def questions_with_answers(self, partition_date):
"""
Get the list of questions with a answer but not reflected in Jira.
:param partition_date: Date from which we check the new questions.
:return: The list of questions that need to be monitored.
"""
# Secondly: Get the list of questions answered to check if they are monitored
info("Obtaining list of questions answers")
accepted_questions = filter(lambda x: x.is_answered, self.stack.questions)
new_questions = filter(lambda x: x.added_at >= partition_date, accepted_questions)
self.help_desk.update_with(new_questions)
old_questions = filter(lambda x: x.added_at < partition_date, accepted_questions)
mon_old_questions = list(filter(lambda x: x.monitor, old_questions))
unmon_old_questions = list(filter(lambda x: not x.monitor, old_questions))
list_questions = mon_old_questions
if len(unmon_old_questions) > 0:
list_questions.append(choice(unmon_old_questions))
else:
info('NOT available questions with accepted answer for synchronization with help desk')
self.list_questions = list_questions
def get_answers(self):
"""
:return:
"""
info("Getting the final list of StackOverflow questions")
try:
self.stack.get_answers(self.list_questions)
except Exception as e:
error(e)
exception('Failed to get answers from server')
exception("Unexpected error: {}".format(exc_info()[0]))
else:
self.help_desk.update_with_time(self.list_questions)
def report(self):
def pq(a_question):
result = 'None'
if a_question.monitor:
result = a_question.monitor.fields.status
return result
for question in self.list_questions:
debug('{}, monitor={}, monitor status={}, question url={}'
.format(question, question.monitor, pq(question), question.url))
def get_number_issues_created(self):
return self.help_desk.n_monitors
def get_number_transitions(self):
return self.help_desk.n_transitions
def get_number_assignments(self):
return self.help_desk.n_assigments
def get_questions(self):
return len(self.stack.questions)
def process(self, year, month, day):
self.get_stack_monitor()
dividing_day = datetime(year=year, month=month, day=day)
self.questions_with_no_answer(partition_date=dividing_day)
self.questions_with_answers(partition_date=dividing_day)
self.get_answers()
info('helpdesk: # issues created = {}'.format(self.get_number_issues_created()))
info('helpdesk: # issues transitions = {}'.format(self.get_number_transitions()))
info('helpdesk: # issues assignments = {}'.format(self.get_number_assignments()))
info('stackoverflow questions= {}'.format(self.get_questions()))
self.close()
if __name__ == "__main__":
# Create the scripts arguments to execute the scripts
parser = ArgumentParser(prog='StackOverflow', description='StackOverflow synchronising script')
parser.add_argument('-l', '--log',
default='INFO',
help='The logging level to be used.')
args = parser.parse_args()
loglevel = None
try:
loglevel = nameToLevel[args.log.upper()]
except Exception as e1:
print('Invalid log level: {}'.format(args.log))
print('Please use one of the following values:')
print(' * CRITICAL')
print(' * ERROR')
print(' * WARNING')
print(' * INFO')
print(' * DEBUG')
print(' * NOTSET')
exit()
stackoverflowSync = StackOverflowSync(loglevel=loglevel)
stackoverflowSync.process(year=2015, month=9, day=21)
|
The Shark Rocket Vacuum HV300 combines the versatility and convenience of an upright with the compact power of a canister. Weighing in at under eight pounds, this appliance has a slim profile and is easy to carry. Swivel steering lets you navigate in tight spaces and around furniture to get the entire room clean. Thanks to the Shark vacuum cleaner having advanced cyclonic technology, fine dust is separated from the air. This keeps the interior of the device free of buildup. The result is a consistent, strong suction that lasts until the job is complete. There are no bags to change out. Simply empty the slim vacuum cleaner canister when it's full and quickly get back to work. With the long neck and the beater brush, it can handle either hardwood floors or carpet. |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-27 07:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('manufacturer', models.CharField(max_length=128)),
('GPU_manufacturer', models.CharField(max_length=128)),
('video_memory', models.IntegerField()),
('memory_clock', models.IntegerField()),
('core_speed', models.IntegerField()),
('boost_speed', models.IntegerField()),
('memory_type', models.CharField(max_length=128)),
('motherboard_connection', models.CharField(max_length=128)),
('power_supply', models.IntegerField()),
('picture', models.CharField(max_length=999999)),
('price', models.IntegerField()),
],
),
]
|
Clean Trek Body Wipes provide hikers, campers and other adventurers an easy, eco-friendly solution to washing up without a single drop of water.
Perfect for your pack, Clean Trek Body Wipes were designed specifically for getting "off the beaten path" and pushing your limits.
Each Clean Trek Wipe contains our proven formula which eliminates body odor and targets the source to insure future odors are controlled.
While removing body and other odors, the wipes will soothe the skin with Aloe Vera and Vitamin E.
As an added boost, these wipes will resist and inhibit the growth of new odor development as you continue your adventure.
Wipes are compact and lightweight for backpacks. |
from datetime import datetime
from django.db import models
from app.models import SiteUser, NFL_Division, NFL_Conference, League, Sport, SuperUser
from app.models import NFL_Conference_Choices
from app.mixins import HelperMixins
from nfl.division.forms import NFL_DivisionForm_Create, NFL_DivisionForm_Edit
class Layout_View(object):
def __init__(self, site_user, title):
self.viewmodel = {'site_user':site_user, # app/layout.html params
'title': title,
'year': datetime.now().year,}
class Index_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, conferences, filter,
conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter' ] = filter
self.viewmodel['index_url'] = 'nfl:division:index'
self.viewmodel['pagination_routing_html'] = 'nfl/nfl_pagination_routing.html'
self.viewmodel['conference_pagination_list_html'] = 'division/conference_pagination_list.html'
self.viewmodel['shared_conference_pagination_list_html'] = 'nfl/shared_conference_pagination_list.html'
self.viewmodel['shared_division_pagination_list_html'] = 'nfl/shared_division_pagination_list.html'
self.viewmodel['conference_pagination_link_html'] = 'division/conference_pagination_link.html'
self.viewmodel['division_pagination_link_html'] = 'division/division_pagination_link.html'
self.viewmodel['conferences'] = conferences
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['create_url'] = 'nfl:division:create'
self.viewmodel['create_link_name'] = 'Create Division'
self.viewmodel['create_link_html'] = 'division/create_link.html'
self.viewmodel['shared_create_link_html'] = 'app/shared_create_link.html'
self.viewmodel['index_table_html'] = 'division/index_table.html'
self.viewmodel['home_url'] = 'nfl:home'
self.viewmodel['scripts'] = ['app/scripts/Client/TableStripping.js']
class Form_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter'] = filter
self.viewmodel['form'] = form
self.viewmodel['form_label_name'] = 'Division'
self.viewmodel['form_label_conference'] = 'Conference'
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['index_url'] = 'nfl:division:index'
self.viewmodel['index_link_html'] = 'division/index_link.html'
self.viewmodel['shared_index_link_html'] = 'app/shared_index_link.html'
self.viewmodel['scripts'] = ['app/scripts/jquery.validate.js']
class Details_Delete_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['division_id'] = division.id
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter'] = filter
self.viewmodel['descriptive_list'] = 'division/descriptive_list.html'
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['index_url'] = 'nfl:division:index'
class Table_View(Index_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, conferences, filter,
conference_id)
self.viewmodel['items'] = divisions
self.viewmodel['header_label_item'] = 'Division'
self.viewmodel['header_label_conference'] = 'Conference'
self.viewmodel['item_url'] = 'nfl:division:index'
self.viewmodel['edit_url'] = 'nfl:division:edit'
self.viewmodel['details_url'] = 'nfl:division:details'
self.viewmodel['delete_url'] = 'nfl:division:delete'
class DescriptiveList_View(Details_Delete_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['item'] = division
self.viewmodel['item_label_name'] = 'Conference'
self.viewmodel['item_label_league_name'] = 'League'
self.viewmodel['item_label_sport_name'] = 'Sport'
class Create_View(Form_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
self.viewmodel['form_template_html'] = 'division/create_form.html'
self.viewmodel['form_create_html'] = 'app/shared_create_form.html'
self.viewmodel['form_html'] = 'division/division_form.html'
self.viewmodel['form_url'] = 'nfl:division:create'
self.viewmodel['form_label_submit'] = 'Create'
class Edit_View(Form_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
self.viewmodel['division_id'] = division_id
self.viewmodel['form_template_html'] = 'division/edit_form.html'
self.viewmodel['form_edit_html'] = 'app/shared_edit_form.html'
self.viewmodel['form_html'] = 'division/division_form.html'
self.viewmodel['form_url'] = 'nfl:division:edit'
self.viewmodel['form_label_submit'] = 'Edit'
class Details_View(DescriptiveList_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['details_links_html'] = 'division/details_links.html'
self.viewmodel['edit_url'] = 'nfl:division:edit'
class Delete_View(DescriptiveList_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['delete_form'] = 'division/delete_form.html'
self.viewmodel['delete_url'] = 'nfl:division:delete'
self.viewmodel['shared_delete_form_html'] = 'app/shared_delete_form.html'
class SuperUser_Index(Table_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id)
self.viewmodel['use_pagination'] = True
@classmethod
def get_index_viewmodel(cls, site_user, title, modelstate, filter, conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
conference_id, divisions = SuperUser_Index.get_viewmodel_parameters_by_state(filter, conference_id)
conferences = NFL_Conference.get_all_items(NFL_Conference)
viewmodel = SuperUser_Index(site_user, title, modelstate, modelsuccess_bool,
divisions, conferences, filter, conference_id).viewmodel
return viewmodel
@classmethod
def get_viewmodel_parameters_by_state(cls, filter, conference_id):
if filter == 0:
conference_id = 0
divisions = NFL_Division.get_all_items(NFL_Division)
elif filter == 1:
conferences = NFL_Conference.get_all_items(NFL_Conference)
if conferences.count() == 0:
divisions = []
return conference_id, divisions
if conference_id == 0:
conference_id = NFL_Conference.get_conference_id_if_needed_and_possible(conferences, conference_id)
divisions = NFL_Division.get_items_by_conference_id(NFL_Division, conference_id)
return conference_id, divisions
class SuperUser_Create(Create_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
@classmethod
def get_create_viewmodel(cls, site_user, title, modelstate, filter, conference_id, form):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
conferences = NFL_Conference.get_all_items(NFL_Conference)
NFL_Conference_Choices.get_choices_by_conferences(conferences)
if form == None:
form = NFL_DivisionForm_Create(initial={'conference_id': conference_id,
'filter' : filter})
viewmodel = SuperUser_Create(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id).viewmodel
return viewmodel
class SuperUser_Edit(Edit_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id)
@classmethod
def get_edit_viewmodel(cls, site_user, title, modelstate, filter, division_id, conference_id,
form):
modelstate, modelsuccess_bool = NFL_Division.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
if form == None:
form = NFL_DivisionForm_Edit(initial = {'id': division.id,
'name': division.name,
'conference_id': division.conference_id,
'filter':filter})
viewmodel = SuperUser_Edit(site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id).viewmodel
return viewmodel
class SuperUser_Details(Details_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
@classmethod
def get_details_viewmodel(cls, site_user, title, modelstate, filter, division_id,
conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
viewmodel = SuperUser_Details(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id).viewmodel
return viewmodel
class User_Delete(Delete_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
@classmethod
def get_delete_viewmodel(cls, site_user, title, modelstate, filter,
division_id, conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
viewmodel = User_Delete(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id).viewmodel
return viewmodel
|
Written by Jonathan Biddle. Posted on 2009/03/16. Filed under Made in Taiwan. Tagged Around Taiwan, Mountain Biking, Taiwan, Trends. 4 Comments. |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""embrane_lbaas_driver
Revision ID: 33dd0a9fa487
Revises: 19180cf98af6
Create Date: 2014-02-25 00:15:35.567111
"""
# revision identifiers, used by Alembic.
revision = '33dd0a9fa487'
down_revision = '19180cf98af6'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'embrane_pool_port',
sa.Column(u'pool_id', sa.String(length=36), nullable=False),
sa.Column(u'port_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'],
name=u'embrane_pool_port_ibfk_1'),
sa.ForeignKeyConstraint(['port_id'], [u'ports.id'],
name=u'embrane_pool_port_ibfk_2'),
sa.PrimaryKeyConstraint(u'pool_id'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'embrane_pool_port')
|
Teaches how to make a knife using your milling machine. Also teaches how to make a Kydex sheath.
This book teaches knifemakers how to make a framelock folding knife.
A complete method of making a slipjoint folder from raw materials all the way to the finished knife. The stock removal method is utilized. 90 pages of detailed photos and descriptions of every step involved. This book includes a design for a simple slipjoint folder. |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2019 Edgewall Software
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <[email protected]>
import re
from trac.config import ConfigSection
from trac.core import *
from trac.util.html import Element, Fragment, find_element, tag
from trac.util.translation import N_, _, tag_
from trac.web.api import IRequestHandler
from trac.wiki.api import IWikiMacroProvider
from trac.wiki.formatter import extract_link
class InterTracDispatcher(Component):
"""InterTrac dispatcher."""
implements(IRequestHandler, IWikiMacroProvider)
is_valid_default_handler = False
intertrac_section = ConfigSection('intertrac',
"""This section configures InterTrac prefixes. Option names in
this section that contain a `.` are of the format
`<name>.<attribute>`. Option names that don't contain a `.` define
an alias.
The `.url` attribute is mandatory and is used for locating the
other Trac. This can be a relative path when the other Trac
environment is located on the same server.
The `.title` attribute is used for generating a tooltip when the
cursor is hovered over an InterTrac link.
Example configuration:
{{{#!ini
[intertrac]
# -- Example of setting up an alias:
t = trac
# -- Link to an external Trac:
genshi.title = Edgewall's Trac for Genshi
genshi.url = http://genshi.edgewall.org
}}}
""")
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'^/intertrac/(.*)', req.path_info)
if match:
if match.group(1):
req.args['link'] = match.group(1)
return True
def process_request(self, req):
link = req.args.get('link', '')
parts = link.split(':', 1)
if len(parts) > 1:
resolver, target = parts
if target[:1] + target[-1:] not in ('""', "''"):
link = '%s:"%s"' % (resolver, target)
from trac.web.chrome import web_context
link_frag = extract_link(self.env, web_context(req), link)
if isinstance(link_frag, (Element, Fragment)):
elt = find_element(link_frag, 'href')
if elt is None:
raise TracError(
_("Can't view %(link)s. Resource doesn't exist or "
"you don't have the required permission.", link=link))
href = elt.attrib.get('href')
else:
href = req.href(link.rstrip(':'))
req.redirect(href)
# IWikiMacroProvider methods
def get_macros(self):
yield 'InterTrac'
def get_macro_description(self, name):
return 'messages', N_("Provide a list of known InterTrac prefixes.")
def expand_macro(self, formatter, name, content):
intertracs = {}
for key, value in self.intertrac_section.options():
idx = key.rfind('.')
if idx > 0: # 0 itself doesn't help much: .xxx = ...
prefix, attribute = key[:idx], key[idx+1:]
intertrac = intertracs.setdefault(prefix, {})
intertrac[attribute] = value
else:
intertracs[key] = value # alias
if 'trac' not in intertracs:
intertracs['trac'] = {'title': _('The Trac Project'),
'url': 'http://trac.edgewall.org'}
def generate_prefix(prefix):
intertrac = intertracs[prefix]
if isinstance(intertrac, basestring):
yield tag.tr(tag.td(tag.strong(prefix)),
tag.td(tag_("Alias for %(name)s",
name=tag.strong(intertrac))))
else:
url = intertrac.get('url', '')
if url:
title = intertrac.get('title', url)
yield tag.tr(tag.td(tag.a(tag.strong(prefix),
href=url + '/timeline')),
tag.td(tag.a(title, href=url)))
return tag.table(class_="wiki intertrac")(
tag.tr(tag.th(tag.em(_("Prefix"))),
tag.th(tag.em(_("Trac Site")))),
[generate_prefix(p) for p in sorted(intertracs)])
|
4k Wallpaper Description : Download this Hot Red Ferrari 458 4K Wallpapers which is one of the best free Ultra High Defintion Wallpapers that you will find in the directory 4k Cars wallpaper category. Posted by Amit Arora on April 28, 2016 If you cannot find the exact resolution you're looking for, then choose the original 4K or higher resolution which will fit perfectly on any device including TV's, smartphones, tablets, laptop, desktop computer, Facebook Cover, Google+ Cover, or any device of your choosing.
Don't forget to rate, comment, or share any of the free 4K wallpapers using the social media icons below including Facebook, Google+, Pinterest, and many others. This free Hot Red Ferrari 458 4K Wallpapers in full UHD resolution is only one of the many as you will find numerous free ultra high definition wallpapers throughout Free 4K Wallpaper. If you're still unsure of your resolution you can download the new Hot Red Ferrari 458 4K Wallpapers, and choose fit to screen. |
import time, re
from lxml import objectify, etree
from copy import deepcopy
class ScanningTemplate(object):
"""Python object of Leica LAS Matrix Screener Scanning Template XML.
Provides easy access to elements via attributes:
>>> tmpl = ScanningTemplate('{ScanningTemplate}tmpl.xml')
>>> # attributes of MatrixScreenerTemplate/ScanningTemplate/Properties
>>> print(tmpl.properties.attrib)
Parameters
----------
filename : str
XML to load.
Attributes
----------
filename : str
Path XML-filename.
root : lxml.objectify.ObjectifiedElement
Objectified root of loaded XML.
See http://lxml.de/objectify.html#the-lxml-objectify-api
"""
def __init__(self, filename):
self.filename = filename
tree = objectify.parse(filename)
self.root = tree.getroot()
@property
def properties(self):
"Short hand for ``self.root.ScanningTemplate.Properties``"
return self.root.ScanningTemplate.Properties
# WELLS
@property
def well_array(self):
"Short hand for ``self.root.ScanWellArray``"
return self.root.ScanWellArray
@property
def wells(self):
"""All ScanWellData elements.
Returns
-------
list of objectify.ObjectifiedElement
"""
try:
return self.root.ScanWellArray.ScanWellData[:]
except AttributeError:
return []
def well_fields(self, well_x=1, well_y=1):
"""All ScanFieldData elements of given well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
list of lxml.objectify.ObjectifiedElement
All ScanFieldData elements of given well.
"""
xpath = './ScanFieldArray/ScanFieldData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
return self.root.findall(xpath)
def well(self, well_x=1, well_y=1):
"""ScanWellData of specific well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
lxml.objectify.ObjectifiedElement
"""
xpath = './ScanWellData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
# assume we find only one
return self.well_array.find(xpath)
def well_attrib(self, well_x=1, well_y=1):
"""Attributes of specific well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
dict
Attributes of ScanWellArray/ScanWellData.
"""
return self.well(well_x, well_y).attrib
# FIELDS
@property
def field_array(self):
"Short hand for ``self.root.ScanFieldArray``"
return self.root.ScanFieldArray
@property
def fields(self):
"""All ScanFieldData elements.
Returns
-------
list of objectify.ObjectifiedElement
"""
try:
return self.root.ScanFieldArray.ScanFieldData[:]
except AttributeError:
return []
def field(self, well_x=1, well_y=1, field_x=1, field_y=1):
"""ScanFieldData of specified field.
Parameters
----------
well_x : int
well_y : int
field_x : int
field_y : int
Returns
-------
lxml.objectify.ObjectifiedElement
ScanFieldArray/ScanFieldData element.
"""
xpath = './ScanFieldArray/ScanFieldData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
xpath += _xpath_attrib('FieldX', field_x)
xpath += _xpath_attrib('FieldY', field_y)
# assume we find only one
return self.root.find(xpath)
def update_start_position(self):
"Set start position of experiment to position of first field."
x_start = self.field_array.ScanFieldData.FieldXCoordinate
y_start = self.field_array.ScanFieldData.FieldYCoordinate
# empty template have all fields positions set to zero
# --> avoid overwriting start position
if x_start != 0 and y_start != 0:
self.properties.ScanFieldStageStartPositionX = int(x_start * 1e6) # in um
self.properties.ScanFieldStageStartPositionY = int(y_start * 1e6)
def update_well_positions(self):
"""Set ``well_attrib['FieldXStartCoordinate']`` and
``well_attrib['FieldYStartCoordinate']`` to FieldXCoordinate and
FieldYCoordinate of first field in well.
"""
for well in self.wells:
well_x = well.attrib['WellX']
well_y = well.attrib['WellY']
first_field = self.well_fields(well_x, well_y)[0]
x_start = first_field.FieldXCoordinate.text
y_start = first_field.FieldYCoordinate.text
well.attrib['FieldXStartCoordinate'] = x_start
well.attrib['FieldYStartCoordinate'] = y_start
@property
def count_of_wells(self):
"""Number of wells in x/y-direction of template.
Returns
-------
tuple
(xs, ys) number of wells in x and y direction.
"""
xs = set([w.attrib['WellX'] for w in self.wells])
ys = set([w.attrib['WellY'] for w in self.wells])
return (len(xs), len(ys))
@property
def count_of_assigned_jobs(self):
"Number of fields that have attrib['JobAssigned'] set to true."
assigned = len([x.attrib['JobAssigned'] for x in self.fields
if x.attrib['JobAssigned'] == 'true'])
return assigned
def update_counts(self):
"Update counts of fields and wells."
# Properties.attrib['TotalCountOfFields']
fields = str(len(self.fields))
self.properties.attrib['TotalCountOfFields'] = fields
# Properties.CountOfWellsX/Y
wx, wy = (str(x) for x in self.count_of_wells)
self.properties.CountOfWellsX = wx
self.properties.CountOfWellsY = wy
# Properties.attrib['TotalCountOfWells']
wells = str(len(self.wells))
self.properties.attrib['TotalCountOfWells'] = wells
# Properties.attrib['TotalAssignedJobs']
self.properties.attrib['TotalAssignedJobs'] = str(self.count_of_assigned_jobs)
def remove_well(self, well_x, well_y):
"""Remove well and associated scan fields.
Parameters
----------
well_x : int
well_y : int
Raises
------
AttributeError
If well not found.
"""
well = self.well(well_x, well_y)
if well == None:
raise AttributeError('Well not found')
self.well_array.remove(well)
# remove associated fields
fields = self.well_fields(well_x, well_y)
for f in fields:
self.field_array.remove(f)
def well_exists(self, well_x, well_y):
"Check if well exists in ScanWellArray."
return self.well(well_x, well_y) != None
def field_exists(self, well_x, well_y, field_x, field_y):
"Check if field exists ScanFieldArray."
return self.field(well_x, well_y, field_x, field_y) != None
def add_well(self, well_x, well_y, start_x, start_y):
"""Add well with associated scan fields. ``self.wells[0]`` and
``self.fields[0]`` will be used as base. ScanWellData will be added to
ScanWellArray and ScanFieldData to ScanFieldArray. The amount of fields
added is decided by Properties/CountOfScanFields.
Parameters
----------
well_x : int
well_y : int
start_x : int
In meters. FieldXCoordinate of first field in well.
start_y : int
In meters. FieldYCoordinate of first field in well.
Raises
------
ValueError
If well or fields already exists.
"""
# raise ValueError if well already exists
if self.well_exists(well_x, well_y):
raise ValueError('Well already exists in ScanWellArray')
if len(self.well_fields(well_x, well_y)) != 0:
raise ValueError('Fields belonging to well already exists in ScanFieldArray')
base_well = deepcopy(self.wells[0])
# append well to ScanWellArray
base_well.attrib['WellX'] = str(well_x)
base_well.attrib['WellY'] = str(well_y)
base_well.attrib['FieldXStartCoordinate'] = str(start_x)
base_well.attrib['FieldYStartCoordinate'] = str(start_y)
self.well_array.append(base_well)
# append fields to ScanFieldArray
x_fields = int(self.properties.CountOfScanFieldsX)
y_fields = int(self.properties.CountOfScanFieldsY)
x_dist = float(self.properties.ScanFieldStageDistanceX) * 1e-6 # in um
y_dist = float(self.properties.ScanFieldStageDistanceY) * 1e-6
x_label = str(self.properties.TextWellPlateHorizontal[well_x - 1])
y_label = str(self.properties.TextWellPlateVertical[well_y - 1])
for i in range(x_fields):
for j in range(y_fields):
base_field = deepcopy(self.fields[0])
base_field.FieldXCoordinate = start_x + i*x_dist
base_field.FieldYCoordinate = start_y + j*y_dist
base_field.attrib['WellX'] = str(well_x)
base_field.attrib['WellY'] = str(well_y)
base_field.attrib['FieldX'] = str(i+1)
base_field.attrib['FieldY'] = str(j+1)
base_field.attrib['LabelX'] = x_label
base_field.attrib['LabelY'] = y_label
self.field_array.append(base_field)
def move_well(self, well_x, well_y, start_x, start_y):
"""Move well and associated scan fields. Spacing between
fields will be what Properties/ScanFieldStageDistance is set to.
Parameters
----------
well_x : int
well_y : int
start_x : int
In meters. FieldXCoordinate of first field in well.
start_y : int
In meters. FieldYCoordinate of first field in well.
Raises
------
ValueError
If specified well or associated fields does not exist.
"""
# raise ValueError if well or fields doesnt exist
if not self.well_exists(well_x, well_y):
raise ValueError('Well not found in ScanWellArray')
fields = self.well_fields(well_x, well_y)
if len(fields) == 0:
raise ValueError('Fields belonging to well not found in ScanFieldArray')
well = self.well(well_x, well_y)
# update well start coordinate
well.attrib['FieldXStartCoordinate'] = str(start_x)
well.attrib['FieldYStartCoordinate'] = str(start_y)
# update fields coordinates
x_dist = float(self.properties.ScanFieldStageDistanceX) * 1e-6 # in um
y_dist = float(self.properties.ScanFieldStageDistanceY) * 1e-6
for field in fields:
i = int(field.attrib['FieldX'])
j = int(field.attrib['FieldY'])
field.FieldXCoordinate = start_x + (i - 1)*x_dist
field.FieldYCoordinate = start_y + (j - 1)*y_dist
def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8'))
def _current_time():
"Time formatted as `Monday, February 09, 2015 | 8:12 PM`"
return time.strftime('%A, %B %d, %Y | %I:%M %p')
def _xpath_attrib(attrib, value):
"""Returns string ``[@attrib="value"]``.
"""
return '[@' + str(attrib) + '="' + str(value) + '"]'
|
Keeping children busy while waiting for their food is important to having an enjoyable restaurant dining experience. Most of our restaurant customers prefer having a black/white coloring and puzzle menu. They are printed with only black ink and are economical compared to full color children’s menus. Here are some sample designs we’ve created for our customers. |
"""
Has the MetaData about the partitioning
"""
import Globals
TABLENAME = 'patitionmeta'
def create(conn):
"""
Create a MetaData table if it does not exist
:param conn: open connection to DB
:return:None
"""
with conn.cursor() as cur:
cur.execute("""
CREATE TABLE IF NOT EXISTS {0}(
KEY VARCHAR(50),
VALUE VARCHAR(50)
)
""".format(TABLENAME))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
def upsert(conn, key, value):
"""
Inserts a given (key, value) pair into meta data table if not present, else updates the value of the key
:param conn: open connection to DB
:param key: Key to insert / update
:param value: Value to insert / update
:return:None
"""
with conn.cursor() as cur:
cur.execute("SELECT value FROM {0} WHERE KEY = '{1}'".format(TABLENAME, key))
keyvalue = cur.fetchone()
if keyvalue is None:
cur.execute("INSERT INTO {0} VALUES ('{1}', '{2}')".format(TABLENAME, key, value))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
else:
cur.execute("UPDATE {0} SET VALUE = '{1}' WHERE KEY = '{2}'".format(TABLENAME, value, key))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
def select(conn, key):
"""
Fetches the value of a given key from meta data table
:param conn: open connection to DB
:param key: Key to fetch
:return:value of key if present, else None
"""
with conn.cursor() as cur:
cur.execute("SELECT value FROM {0} WHERE KEY = '{1}'".format(TABLENAME, key))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
keyvalue = cur.fetchone()
if keyvalue is not None: return keyvalue[0]
return None
def drop(conn):
"""
Drops the table
:param conn: open connection to DB
:return:None
"""
with conn.cursor() as cur:
cur.execute('drop table if exists {0};'.format(TABLENAME)) |
118 individual packs of unopened Hot Wheels ranging from 1991-2007. Mostly 90's Hot Wheels though. Also 5, five packs of unopened Hot Wheels as well. At least 10-15 of those cars retail from $5-$15 a piece. A couple of complete sets are in this collection too. I have a price guide that shows some of the cars in it with their retail values that would come with the collection. Just an old hobby that I no longer have an interest for. Don't have the time or desire to try to sell them individually. Not really willing to separate, would much rather sell the whole tote as one purchase. Asking $130 OBO. Call or preferably text me at 330-257-8688 any time. Thanks. |
from strategies.strategy_base import StrategyBase
# https://www.irs.gov/publications/p590b/index.html#en_US_2014_publink1000231236
# age................0.....1.....2.....3.....4.....5.....6.....7.....8.....9
lifeExpectancy = [82.4, 81.6, 80.6, 79.7, 78.7, 77.7, 76.7, 75.8, 74.8, 73.8, # x 0
72.8, 71.8, 70.8, 69.9, 68.9, 67.9, 66.9, 66.0, 65.0, 64.0, # x 10
63.0, 62.1, 61.1, 60.1, 59.1, 58.2, 57.2, 56.2, 55.3, 54.3, # x 20
53.3, 52.4, 51.4, 50.4, 49.4, 48.5, 47.5, 46.5, 45.6, 44.6, # x 30
43.6, 42.7, 41.7, 40.7, 39.8, 38.8, 37.9, 37.0, 36.0, 35.1, # x 40
34.2, 33.3, 32.3, 31.4, 30.5, 29.6, 28.7, 27.9, 27.0, 26.1, # x 50
25.2, 24.4, 23.5, 22.7, 21.8, 21.0, 20.2, 19.4, 18.6, 17.8, # x 60
17.0, 16.3, 15.5, 14.8, 14.1, 13.4, 12.7, 12.1, 11.4, 10.8, # x 70
10.2, 9.7, 9.1, 8.6, 8.1, 7.6, 7.1, 6.7, 6.3, 5.9, # x 80
5.5, 5.2, 4.9, 4.6, 4.3, 4.1, 3.8, 3.6, 3.4, 3.1, # x 90
2.9, 2.7, 2.5, 2.3, 2.1, 1.9, 1.7, 1.5, 1.4, 1.2, # x 10
1.1, 1.0] # x 110
'''
http://www.marketwatch.com/story/put-retirement-savings-withdrawals-on-autopilot-2013-07-24
'''
from strategies.strategy_base import YearlyStrategyBase
class HebelerAuto(YearlyStrategyBase):
def __init__(self, age):
self.resetAge = age
def getInitialWithDrawal(self):
return self.initialAmount
def getCurrentWithdrawalAmount(self):
return self.lastYearsWithdrawal # using the parlance of the hebeler paper.
def yearBaseReset(self, portfolio):
self.portfolio = portfolio
self.initialAmount = self.portfolio.value * .04
self.lastYearsWithdrawal = self.initialAmount
self.lastYearsAmount = self.portfolio.value
self.age = self.resetAge
def yearWithdraw(self, inflationRate):
withdrawal = .5 * inflationRate * self.getInitialWithDrawal()
withdrawal += .5 * self.lastYearsAmount / lifeExpectancy[self.age]
self.lastYearsWithdrawal = withdrawal
self.age += 1
w = self.portfolio.withdraw(withdrawal)
self.lastYearsAmount = self.yearGetPortfolioValue()
return w
def yearGetPortfolioValue(self):
return self.portfolio.value
def yearGrow(self, yearGrowth):
self.portfolio.grow(yearGrowth) |
Our Connecticut car accident lawyer answers very important legal questions we’ve been repeatedly asked over the last fourteen years. The answers can greatly change the outcome and value of your case. If you have additional questions call us today for a free, no-obligation consultation with a top-rated car accident lawyer.
1) What is my car accident case worth?
First of all, it’s especially important to realize this is a complex process. Despite what’s on the internet, there isn’t a magic formula to evaluate a case. Never assume you don’t have a case. Always contact an experienced attorney as soon as possible after any serious accident. Critical evidence and information may be lost if you delay.
The value of your case depends on many facts, such as how the accident happened, what injuries you sustained, and how these injuries affected your life, physically, emotionally and financially. Finally, the facts of your case can’t change but how Bartlett Legal Group uncovers the facts and presents them can make a huge difference in the outcome of your case. An experienced car accident lawyer at our firm can help you determine the maximum value of your case.
Please click here to visit our page on eight costly mistakes personal injury clients often make.
2) Do I have to talk to the other driver’s insurance company?
Do not talk to the other driver’s insurance company representatives without first speaking to an experienced attorney at Bartlett Legal Group.
3) When to get an attorney for a car accident?
Not all car accidents require an attorney. You should contact an experienced car accident attorney if there are injuries or there is major property damage. If you’re not sure, call us and we can help. We will walk you through the complex legal process and help ensure you receive all you deserve.
4) Can I bring an auto accident lawsuit if I’m partly to blame?
Yes. Connecticut uses comparative fault rules. Let’s say we go to court, and after hearing all the evidence the jury finds that you were thirty percent responsible for the accident and the defendant was seventy percent at fault. In other words, if you presented medical bills, lost wages and other reasonable expenses totaling $100,000, then under the modified comparative negligence rule the court would reduce your award by the percentage the jury found you at fault for the car accident. In this example, you should receive $70,000. Unfortunately, if the court finds that you were fifty-one percent at fault or higher, you receive nothing, and you owe your attorney nothing.
5) Should I call a car accident lawyer from the scene of the accident?
Yes, once police and medical emergency personnel have been notified. You should call an experienced car accident attorney from the scene of the accident if there are injuries or there is major property damage. Calling from the scene is especially relevant and can have a major impact on the outcome of a potential claim or lawsuit. You need to contact an experienced attorney with medical staff to sort through the legal and medical issues. We will stand up to the insurance companies and exhaust every possible means of recovery for you and your loved ones. We will take the case to trial if that’s needed.
6) Do I have to notify the police?
You must notify police right away when there is an injury, death, or property damage. Most people don’t realize there’s no threshold in CT of estimated property damage for reporting a car accident.
7) What do I need to do if I’m in a car crash in Connecticut?
It depends on the severity of the accident.
Notify police and other emergency personnel right away. First call 911, and then call a car accident lawyer at Bartlett Legal Group.
Don’t try to move an injured person unless there is a vehicle fire or other immediate risk.
Also, covering an injured person with a blanket can help prevent shock.
It’s important to realize that collecting information after an accident will be helpful to police, your insurance company when you make a claim, and especially your car crash attorney at Bartlett Legal Group.
Write down any damage you observe to all vehicles related to the accident. Cooperate with police in answering questions and providing information.
Don’t give opinions. State only the facts.
If your car hits a parked vehicle, try to find the owner. Leave a note on the driver’s windshield if the owner is not around. Include your contact information and the date and time the accident occurred.
Write down as much information about the accident as possible while it’s still fresh in your mind so that we will have all the facts required to mount a strong case.
For several weeks document all of your injuries associated with the accident and include re-injured preexisting injuries. It is quite common to feel fine initially but start to feel pain in a day or two. Finally and most importantly, a medical professional must document all of your injuries.
8) What information should I give the police?
Bartlett Legal Group will be able to get a copy of the official accident report.
9) What are car accident lawyer fees?
At our firm, there are no fees or expenses unless you win or you accept a settlement. We pay all expenses, court fees, expert expenses, etc. You only pay us if you accept a settlement or receive an award at trial.
10) Should I take an offer and who makes the decision to take an offer?
This is your ultimate decision. We will only advise you to accept or reject an offer, based on our years of experience. If the offer is unfair and we feel strongly that we will do better going to court, we will recommend that we take your case to trial. The car accident lawyer that has handled your case from the beginning will be representing you in court. You have the final say to accept an offer or go to court.
11) How long do I have to file a car accident lawsuit? What is the statute of limitations in CT?
If you were injured in an auto accident or want to file a suit for property damage (compensation for repairs to your car), Per Conn. Gen. Stat. Ch. 926 Sec. 52-584 you have two years from the date of the accident to file a lawsuit. The sooner you contact our firm the better. We will collect valuable information that can greatly change the outcome of your case.
12) Has your firm handled many car accident cases?
Yes, we are a nationally recognized personal injury wrongful death law firm. Serious car accident lawsuits are one of our firm’s core personal injury practice areas. Our car crash attorneys have successfully represented hundreds injured in car accidents. We are a litigation law firm which means we are the attorneys that actually take your case to court if it doesn’t settle to your satisfaction. Many firms refer their cases to us if their case must go to trial. We treat every case as if it will go to trial. As a result, this extra effort helps maximize the outcome whether you accept an offer or eventually go to trial. Every case is different and past results are not an indication of future results.
13) Do I need a car accident attorney near me?
We are centrally located in Connecticut. We represent people injured in car accidents from all Connecticut towns and in all of Connecticut’s courts. Distance is not an issue in this age of electronics. In addition, over the years we have established a network of top rated experienced attorneys across the country that we know and have worked with regularly. Many attorneys across the country refer cases to our firm. Don’t settle for less. Contact us now.
The map shows the numbers and locations of those we’ve helped in CT just in the past four years.
14) I was rear-ended, why would I need a lawyer?
Rear-end Collision Accident- These seem straightforward, but without an attorney, insurance companies will try to minimize the amount they pay. Having a knowledgeable attorney on your side will help to ensure you receive the recovery you deserve.
The State of Connecticut keeps track of the average Daily Vehicles Miles Travelled (DVMT) and the number and types of crashes. The last compiled data by the State was for 2015 and published in 2017. It showed that in 2015 the DVMT was a staggering 86.6 million miles a day driven on Connecticut roads. There were 3.53 crashes per million DVMT, and of those crashes, there were .24 fatal crashes per billion DVMT.
In 2015 there were 5,182 crashes in Hartford alone, of which 9 were fatal crashes with 10 fatalities. There was 1752 injury crashes with 2,635 injuries.
When you get in touch with one of the top* personal injury and car accident attorneys you start the process of making yourself whole again. Many insurance companies will try to settle quickly for less than your case is worth. There’s nothing wrong with taking a settlement if it is fair, but it’s important to realize you need an experienced car accident lawyer on your side. We will work on your behalf to achieve fair compensation for your injuries.
Distracted Driver Accident, cell phone accident, and texting while driving accident. Too often today, drivers use cell phones and text while driving. When texting or cell phone use leads to an accident, a distracted driver accident attorney at Bartlett Legal Group can help you recover the compensation you deserve.
Hit and Run Accident cases often leave victims with serious injuries and many questions about what can be done. By contacting a hit and run accident attorney at our firm, you will learn how your own insurance may provide you an avenue to recover from your injuries. If you have been injured, you are not alone, contact us today and let a hit and run accident lawyer at our firm guide you on the road to the recovery you deserve.
Drunk Driver Accident – If you or a loved one was injured by someone intoxicated there may be several sources to recover money for your loss or injury such as the person or establishment that served the driver drinks. This falls under what’s called Dram Shop Law. Call Bartlett Legal Group today for a free consultation to discuss what can be done in your particular case since every case is different and we have handled hundreds of cases.
Our lead car accident attorney Frank Bartlett possesses unique and valuable insight gained as a former insurance company defense attorney. He now works strictly for people injured by accidents or the wrongs of others. Working as an insurance defense attorney allowed him to gain valuable knowledge about how insurance adjusters handle and evaluate claims. He learned first-hand the maximum amount insurance adjusters would pay. He uses this knowledge and the experience gained from hundreds of car accident cases to help maximize your compensation and get you a fair and just settlement.
It’s not just about physical damage. A serious car accident can leave emotional scars that take a long time to heal. You don’t have to settle for less than you deserve when it comes to compensation for a car accident. Our lawyers and staff can help protect your interests and maximize your compensation. Don’t accept less than you should from the other party’s insurance company. They want to settle for the lowest amount possible, but you deserve better than that and we can help you get what you deserve.
We serve auto accident clients throughout Connecticut, including but not limited to Waterbury, Cheshire, New Haven, Fairfield, Bridgeport, and Hartford.
CAR ACCIDENT LAWYER CASE REFERRAL – Please see link on our firm overview page.
Choosing the best Connecticut accident attorney can be a difficult job, so please consider these 12 questions when choosing a car accident attorney by clicking here.
In conclusion, our mission at Bartlett Legal Group is to continue to be the best car accident law firm in Connecticut and having the highest client satisfaction. |
"""
Support for Melissa climate.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/melissa/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import load_platform
REQUIREMENTS = ["py-melissa-climate==1.0.6"]
_LOGGER = logging.getLogger(__name__)
DOMAIN = "melissa"
DATA_MELISSA = 'MELISSA'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Melissa Climate component."""
import melissa
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
api = melissa.Melissa(username=username, password=password)
hass.data[DATA_MELISSA] = api
load_platform(hass, 'sensor', DOMAIN, {}, config)
load_platform(hass, 'climate', DOMAIN, {}, config)
return True
|
Black slip-on sneakers from the �Holidays� Capsule Collection with a matching rubber sole. The asymmetrical design plays with dark metal lam� detailing and the ironic, fun and rock-and-roll �Holidays� patch. I'm different. I'm I29. |
#!/usr/bin/env python3
# Copyright 2017 Jan von Cosel
#
# This file is part of utility-scripts.
#
# utility-scripts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# utility-scripts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have recieved a copy of the GNU General Public License
# along with utility-scripts. If not, see <http://www.gnu.org/licenses/>.
#
#
# create wavefunctions for specific vibrational states and use them to
# calculate cross-correlation functions for IVR analysis
# ----- RPTWF version -----
import sys
import re
import multiprocessing as mp
import subprocess
import os
import shutil
mctdhExe = "/home/jvcosel/mctdh85.5/bin/binary/x86_64/mctdh85"
corrExe = "/home/jvcosel/mctdh85.5/bin/binary/x86_64/crosscorr85"
def main():
nameDir = sys.argv[1]
refSLInput = sys.argv[2]
refMLInput = sys.argv[3]
prtThres = float(sys.argv[4])
refSLFile = open(refSLInput, "r")
refMLFile = open(refMLInput, "r")
refSLData = refSLFile.readlines()
refMLData = refMLFile.readlines()
refSLFile.close()
refMLFile.close()
# get the dimensionality of the system from the propagation:
with open(nameDir + "/op.log") as opFile:
for line in opFile:
if "ndof =" in line:
nDof = int(line.split()[2])
if len(sys.argv) == 6 and sys.argv[5] == "-c":
create_corrs(nDof, refSLData, refMLData, nameDir, refMLInput)
# create a gnuplot file to show the correlation functions:
gnuplotName = "thermcorrelations.plt"
gnuplotFile = open(gnuplotName, "w")
gnuplotFile.write("plot 'thermcorr_gs.dat' u 1:4 w l")
for i in range(nDof):
for j in range(2,6):
currCorrName = "thermcorr_" + str(i+1).zfill(3) + "_" + str(j) + ".dat"
maxCorrVal = 0.0
with open(currCorrName) as corrFile:
for line in corrFile:
if not "#" in line:
if float(line.split()[3]) > maxCorrVal:
maxCorrVal = float(line.split()[3])
if maxCorrVal > prtThres:
writeString = ", '" + currCorrName + "' u 1:4 w l"
gnuplotFile.write(writeString)
gnuplotFile.write("\n")
gnuplotFile.close()
def create_corrs(nModes, refSLData, refMLData, nameDir, refMLInput):
pool = mp.Pool(processes=4)
for i in range(nModes):
for j in range(2,6):
pool.apply_async(func=run_calc, args=(i+1, j, refSLData, refMLData, nameDir))
pool.close()
pool.join()
# do the calculation with the global ground state:
refMLDir = os.path.splitext(refMLInput)[0]
MLgencommand = [mctdhExe, "-mnd", refMLInput]
subprocess.check_call(MLgencommand)
shutil.copy2(refMLDir + "/restart", "calc_gs.rst")
corrcommand = [corrExe, "-f", nameDir + "/psi", "-o", "thermcorr_gs.dat", "-r", "calc_gs.rst"]
shutil.rmtree(refMLDir)
subprocess.check_call(corrcommand)
os.remove("calc_gs.rst")
def run_calc(mode, state, refSLData, refMLData, psiDir):
newSLData = refSLData[:]
newMLData = refMLData[:]
# get the name-directory for the reference calculations:
for i in range(len(refSLData)):
if ("name" in refSLData[i] and "=" in refSLData[i] and not "opname" in refSLData[i]):
dirLine = i
if "file" in refSLData[i] and "=" in refSLData[i]:
excLine = i
baseName = "thermcalc_" + str(mode).zfill(3) + "_" + str(state)
corrName = "thermcorr_" + str(mode).zfill(3) + "_" + str(state) + ".dat"
SLinputFileName = baseName + "_sl.inp"
MLinputFileName = baseName + "_ml.inp"
SLinputWF = baseName + "_sl.rst"
MLinputWF = baseName + "_ml.rst"
newSLData[dirLine] = " name = " + baseName + "\n"
excString = " operate = excite_" + str(mode).zfill(3) + "\n"
for i in range(state-1):
newSLData.insert(excLine + 1,excString)
SLinputFile = open(SLinputFileName, "w")
for item in newSLData:
SLinputFile.write(item)
SLinputFile.close()
os.mkdir(baseName)
SLgencommand = [mctdhExe, "-w", SLinputFileName]
subprocess.check_call(SLgencommand)
shutil.copy2(baseName + "/restart", SLinputWF)
for i in range(len(refMLData)):
if "file" in refMLData[i] and "=" in refMLData[i]:
rstLine = i
break
newMLData[dirLine] = " name = " + baseName + "\n"
newMLData[rstLine] = " file = " + SLinputWF + "\n"
MLinputFile = open(MLinputFileName, "w")
for item in newMLData:
MLinputFile.write(item)
MLinputFile.close()
MLgencommand = [mctdhExe, "-w", MLinputFileName]
subprocess.check_call(MLgencommand)
shutil.copy2(baseName + "/restart", MLinputWF)
shutil.rmtree(baseName)
corrcommand = [corrExe, "-f", psiDir + "/psi", "-o", corrName, "-r", MLinputWF]
subprocess.check_call(corrcommand)
os.remove(SLinputWF)
os.remove(MLinputWF)
os.remove(SLinputFileName)
os.remove(MLinputFileName)
if __name__ == "__main__":
main()
|
Honestly your title is hillarious enough already, "Forum Games Moderator".
Can I have one that says "Chilean"?
When my hangover isn't muddling my brain I am going to destroy most of you.
If you want it removed ask nicely and I'll do it.
Miggles I still have something very special in the works for you. Fret not.
"If you want it removed ask nicely and I'll do it."
"Miggles I still have something very special in the works for you. Fret not."
"Rub a dub dub, dead in the tub."
God I am bad at this.
"I already have a title, why am I still playing." |
import numpy as np
import rospy
from geometry_msgs.msg import Vector3Stamped
class Vector3Handler(object):
"""
Handler for ROS topics of type: geometry_msgs/Vector3Stamped
Args:
topic_name: Name of ROS topic to be subscribed
buffer_size: Variable buffer, depend on frame rate of topic, default: 500
queue_size: Subscriber queue_size
"""
def __init__(self, topic_name, buffer_size=500, queue_size=10):
self.vector_data = Vector3Stamped()
[self.vector_x, self.vector_y, self.vector_z] = [0.0, 0.0, 0.0]
self.topic_name = topic_name
self.queue_size = queue_size
self.buffer_size = buffer_size
self.counter = 0
self.buffer = np.zeros([self.buffer_size, 3])
self.sub = rospy.Subscriber(self.topic_name, Vector3Stamped, self.callback,
queue_size=self.queue_size)
def callback(self, msg):
self.vector_data = msg
if self.counter < self.buffer_size:
self.buffer[self.counter] = [self.vector_data.vector.x,
self.vector_data.vector.y,
self.vector_data.vector.z]
else:
rospy.loginfo("Vector3Handler for: " + self.topic_name + " has reach buffer size.")
self.counter += 1
def get_value(self):
if self.counter > 0:
self.vector_x = np.sum(self.buffer[:, 0]) / self.counter
self.vector_y = np.sum(self.buffer[:, 1]) / self.counter
self.vector_z = np.sum(self.buffer[:, 2]) / self.counter
else:
[self.vector_x, self.vector_y, self.vector_z] = [0.0, 0.0, 0.0]
self.buffer = np.zeros([self.buffer_size, 3])
self.counter = 0
return [self.vector_x, self.vector_y, self.vector_z]
|
If Guru Nanak were alive today, the Sihk leader would be turning 544 years old — a mere child compared to Islam's 1,400-year-old Muhammad, Christianity's 2,000-year old Jesus, and Buddhism's 2,500-year-old Buddha. Still, as Guru Nanak would undoubtedly be keen to point out, he still has more than 300 years on Bahá'u'lláh, the founder of Baha'i. Whenever a religious leader's birthday rolls around, I try to think about them in human terms. About who they were during their lives, whether they were out for any glory or money, or whether they were surprised when their innermost passion brought them fame. All these men had something remarkable to offer the world; they wouldn't have gathered so much momentum if they hadn't. Nanak was devoted to providing an environment of inclusiveness — regardless of race, color or creed — and emphasized that there was but one God who dwells in all people.
There is neither Hindu nor Mussulman (Muslim) so whose path shall I follow? I shall follow God's path. God is neither Hindu nor Mussulman and the path which I follow is God's.
Five hundred years later, and the path that Sikhs follow is Nanak's.
I wonder how many families have pictures of religious leaders in their homes — a reminder, perhaps, to act according to the values they hold dear and give thanks for the opportunity to hold such values at all. Born in a different culture at a different time, the religious influences, and thus the pictures, would undoubtedly be different. But the importance of such physical reminders of devotion would probably remain. It's the same reason humans possess Bibles and create shrines and visit places of worship, I suppose — so they can more easily "access" the universal element that allows them to breathe and love and be.
I'm an aesthetic minimalist, so I don't have a lot of photographs hanging in my home — religious or otherwise — but I have occasionally thought of creating a space for pictures of the people to whom I'm devoted. The people who remind me to be the person I want to be, and who are, quite literally, responsible for my existence. The people who help shape my thoughts and lead me in the direction I want to be going.
There would be my parents and grandparents and great grandparents as far as I could trace them. There would be my sister and brother and their families. My husband and in-laws. My daughter. There would be my friends and mentors and godparents (who did a very poor job at helping make me godly but a very good job of helping make me happy.) And there would be people I don't know but who have helped me think more deeply about who I am, how I am, and why I'm here. People like Plato, Aristotle, Epicurus, Kierkegaard, Neizsche, Sartre, Freud, Darwin, Piaget, Einstein, Lincoln, MLK Jr., Twain, Hawking, Sagan, Goodall, Friedan, Steinem, Colbert, Oprah, E.T., the Buddha, The Beatles... Looks like I'm going to need a bigger house.
How about you? If not Nanak, who's on your wall? |
from argparse import ArgumentParser
import os.path
import sys
from Ivy.version import __version__
from Ivy.analysis_settings import EDIT_BENCH_SETTINGS
__program__ = 'ivy_benchmark'
__author__ = 'Soh Ishiguro <[email protected]>'
__license__ = ''
__status__ = 'development'
def parse_bench_opts():
desc = "Benchmarking test for detected RNA editing sites based on HTSeq data to evaluate detection params."
parser = ArgumentParser(description=desc,
prog=__program__,
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--vcf',
dest='vcf_file',
action='store',
nargs='+',
metavar='',
help='VCF file(s).')
group.add_argument('--csv',
dest='csv_file',
action='store',
nargs='+',
metavar='',
help='CSV file(s), for ***debug mode***.')
parser.add_argument('--source',
required=False,
default=EDIT_BENCH_SETTINGS['APP']['SOURCE'],
dest='source',
action='store',
metavar='',
help='To use specific sample/tissue/cell line. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['SOURCE']))
parser.add_argument('--sp',
required=True,
default=EDIT_BENCH_SETTINGS['APP']['SP'],
dest='sp',
metavar='species',
action='store',
help='Species + genome version. (eg. human_hg19)')
parser.add_argument('--plot',
required=False,
default=EDIT_BENCH_SETTINGS['APP']['PLOT'],
action='store_true',
help='Make a precision-recall plot. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['PLOT']))
parser.add_argument('--out',
dest='out',
default=EDIT_BENCH_SETTINGS['APP']['OUT'],
required=False,
action='store',
metavar='out',
help='Output file name. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['OUT']))
parser.add_argument('--version',
action='version',
help='Show program version number and exit.',
version=__version__)
return parser.parse_args()
if __name__ == '__main__':
parse_bench_opts()
|
This is a Complaint / Review about Source Tec Software.
I bought software for DVD movie making from Source Tec Software. I installed it according to the instructions, but it even doesn’t open! I called them and emailed several times and no result. This problem is not solved still. Do you have any ideas about what I have to do?
Thank You for reading of this Complaint / Review. For viewing of other complaints, reviews or scam reports about Source Tec Software, press here.
readers source - receiving magazine I did not order, then the calls!
PPINoMore.com - Stay away at all costs! |
# -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import datetime, timedelta
from gluon import current, HTTP, FORM, INPUT, LABEL, TABLE
from gluon.storage import Storage
from s3dal import Table, Rows, Row
from .s3rest import S3Method
__all__ = ("S3Trackable",
"S3Tracker",
"S3CheckInMethod",
"S3CheckOutMethod",
)
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, table=None, tablename=None, record=None, query=None,
record_id=None, record_ids=None, rtable=None):
"""
Constructor:
@param table: a Table object
@param tablename: a Str tablename
@param record: a Row object
@param query: a Query object
@param record_id: a record ID (if object is a Table)
@param record_ids: a list of record IDs (if object is a Table)
- these should be in ascending order
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
# if isinstance(trackable, (Table, str)):
# if hasattr(trackable, "_tablename"):
# table = trackable
# tablename = table._tablename
# else:
# table = s3db[trackable]
# tablename = trackable
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# query = (table._id > 0)
# if uid is None:
# if record_id is not None:
# if isinstance(record_id, (list, tuple)):
# query = (table._id.belongs(record_id))
# else:
# query = (table._id == record_id)
# elif UID in table.fields:
# if not isinstance(uid, (list, tuple)):
# query = (table[UID].belongs(uid))
# else:
# query = (table[UID] == uid)
# fields = [table[f] for f in fields]
# rows = db(query).select(*fields)
if table or tablename:
if table:
tablename = table._tablename
else:
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
if record_ids:
query = (table._id.belongs(record_ids))
limitby = (0, len(record_ids))
orderby = table._id
elif record_id:
query = (table._id == record_id)
limitby = (0, 1)
orderby = None
else:
query = (table._id > 0)
limitby = None
orderby = table._id
fields = [table[f] for f in fields]
rows = db(query).select(limitby=limitby, orderby=orderby, *fields)
# elif isinstance(trackable, Row):
# fields = self.__get_fields(trackable)
# if not fields:
# raise SyntaxError("Required fields not present in the row")
# rows = Rows(records=[trackable], compact=False)
elif record:
fields = self.__get_fields(record)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[record], compact=False)
# elif isinstance(trackable, Rows):
# rows = [r for r in trackable if self.__get_fields(r)]
# fail = len(trackable) - len(rows)
# if fail:
# raise SyntaxError("Required fields not present in %d of the rows" % fail)
# rows = Rows(records=rows, compact=False)
# elif isinstance(trackable, (Query, Expression)):
# tablename = db._adapter.get_table(trackable)
# self.rtable = s3db[tablename]
# fields = self.__get_fields(self.rtable)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# query = trackable
# fields = [self.rtable[f] for f in fields]
# rows = db(query).select(*fields)
elif query:
tablename = db._adapter.get_table(query)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Table %s is not a trackable type" % table._tablename)
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
# elif isinstance(trackable, Set):
# query = trackable.query
# tablename = db._adapter.get_table(query)
# table = s3db[tablename]
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# fields = [table[f] for f in fields]
# rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameters")
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("Table %s is not a trackable type" % table._tablename)
fields = [table[f] for f in fields]
row = db(table[UID] == r[UID]).select(limitby=(0, 1),
*fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
@classmethod
def __get_fields(cls, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
if super_entity and \
cls.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, (Table, Row)):
return fields
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=None,
empty = True):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@param empty: return None if no locations (set to False by gis.get_location_data())
@return: a location record, or a list of location records (if multiple)
@ToDo: Also show Timestamp of when seen there
"""
if exclude is None:
exclude = []
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude,
_fields=_fields,
as_rows=True).first()
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(record=r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
elif not empty:
# Ensure we return an entry for gis.get_location_data() so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@return: location
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
# Log even a set of no location
#if not location:
# return
#else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(record=r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
return location
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@return: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
if not self.__super_entity(table):
fields = (table._id,)
record = db(table._id == record).select(limitby=(0, 1), *fields).first()
if self.__super_entity(record):
# Get the instance table
table = s3db[record.instance_type]
if not self.__get_fields(table, super_entity=False):
raise SyntaxError("No trackable type: %s" % table._tablename)
# Get the instance record
query = (table[UID] == record[UID])
record = db(query).select(table._id, limitby=(0, 1), *fields).first()
try:
record_id = record[table._id] if record else None
except AttributeError:
record_id = None
if record_id:
interlock = "%s,%s" % (table, record_id)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = {"location_id": None,
"timestmp": timestmp,
"interlock": interlock,
}
q = (ptable.timestmp <= timestmp) & \
(ptable.deleted == False)
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
track_id = r[TRACK_ID]
query = (ptable[TRACK_ID] == track_id) & q
presence = db(query).select(ptable.interlock,
orderby = ~ptable.timestmp,
limitby = (0, 1),
).first()
if presence and presence.interlock == interlock:
# Already checked-in to the same instance
continue
data[TRACK_ID] = track_id
ptable.insert(**data)
self.__update_timestamp(track_id, timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
location = trackable.get_location(_fields=["id"],
timestmp=timestmp,
as_rows=True).first()
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location.id,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False,
empty=True):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param empty: return None if no locations (set to False by gis.get_location_data())
@return: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
elif not empty:
# Ensure we return an entry for gis.get_location_data() so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@return: nothing
@note: instance tables without a location_id field will be ignored
"""
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location.get("id", None)
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
db = current.db
s3db = current.s3db
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
append = tables.append
types = set()
seen = types.add
for r in rows:
instance_type = r.instance_type
if instance_type not in types:
seen(instance_type)
table = s3db[instance_type]
if instance_type not in tables and LOCATION_ID in table.fields:
append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
return location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if track_id:
if timestamp is None:
timestamp = datetime.utcnow()
current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, can be instantiated once as global 's3tracker' object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, table=None, record_id=None, record_ids=None,
tablename=None, record=None, query=None):
"""
Get a tracking interface for a record or set of records
@param table: a Table object
@param record_id: a record ID (together with Table or tablename)
@param record_ids: a list/tuple of record IDs (together with Table or tablename)
@param tablename: a Str object
@param record: a Row object
@param query: a Query object
@return: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(table=table,
tablename=tablename,
record_id=record_id,
record_ids=record_ids,
record=record,
query=query,
)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
class S3CheckInMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-in
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
table = r.table
tracker = S3Trackable(table, record_id=r.id)
title = T("Check-In")
get_vars = r.get_vars
# Are we being passed a location_id?
location_id = get_vars.get("location_id", None)
if not location_id:
# Are we being passed a lat and lon?
lat = get_vars.get("lat", None)
if lat is not None:
lon = get_vars.get("lon", None)
if lon is not None:
form_vars = Storage(lat = float(lat),
lon = float(lon),
)
form = Storage(vars=form_vars)
s3db.gis_location_onvalidation(form)
location_id = s3db.gis_location.insert(**form_vars)
form = None
if not location_id:
# Give the user a form to check-in
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "location_id"
label = LABEL("%s:" % T("Location"))
from .s3widgets import S3LocationSelector
field = table.location_id
#value = tracker.get_location(_fields=["id"],
# as_rows=True).first().id
value = None # We always want to create a new Location, not update the existing one
widget = S3LocationSelector(show_latlon = True)(field, value)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-In"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
location_id = form.vars.get("location_id", None)
if location_id:
# We're not Checking-in in S3Track terms (that's about interlocking with another object)
#tracker.check_in()
#timestmp = form.vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(location_id, timestmp=timestmp)
tracker.set_location(location_id)
response.confirmation = T("Checked-In successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-in from mobile devices
else:
raise HTTP(415, current.ERROR.BAD_FORMAT)
# =============================================================================
class S3CheckOutMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-out
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
response = current.response
tracker = S3Trackable(r.table, record_id=r.id)
title = T("Check-Out")
# Give the user a form to check-out
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-Out"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
# Check-Out
# We're not Checking-out in S3Track terms (that's about removing an interlock with another object)
# What we're doing is saying that we're now back at our base location
#tracker.check_out()
#timestmp = form_vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(r.record.location_id, timestmp=timestmp)
tracker.set_location(r.record.location_id)
response.confirmation = T("Checked-Out successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-out from mobile devices
else:
raise HTTP(415, current.ERROR.BAD_FORMAT)
# END =========================================================================
|
"We first met MMS in 1991…Today our gross revenues are over a million dollars, 3 times what they were 15 years ago. Only 10% of optometry practices around the country are at that level of productivity for 5 doctor days a week of appointment times. MMS has been a big, big reason for our being where we are today."
Your communication with us is very important. We welcome your phone calls at 419-491-1199 or email us at [email protected]. You may also complete the online form below for more information. |
# The MIT License (MIT)
#
# Copyright (c) 2017 Tony DiCola for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#This driver is derived from Tony DoCola's work and adapted to the api of the kervi framework.
import time
try:
import struct
except ImportError:
import ustruct as struct
from kervi.hal import get_i2c, SensorDeviceDriver
# Internal constants and register values:
# pylint: disable=bad-whitespace
_LSM9DS1_ADDRESS_ACCELGYRO = 0x6B
_LSM9DS1_ADDRESS_MAG = 0x1E
_LSM9DS1_XG_ID = 0b01101000
_LSM9DS1_MAG_ID = 0b00111101
_LSM9DS1_ACCEL_MG_LSB_2G = 0.061
_LSM9DS1_ACCEL_MG_LSB_4G = 0.122
_LSM9DS1_ACCEL_MG_LSB_8G = 0.244
_LSM9DS1_ACCEL_MG_LSB_16G = 0.732
_LSM9DS1_MAG_MGAUSS_4GAUSS = 0.14
_LSM9DS1_MAG_MGAUSS_8GAUSS = 0.29
_LSM9DS1_MAG_MGAUSS_12GAUSS = 0.43
_LSM9DS1_MAG_MGAUSS_16GAUSS = 0.58
_LSM9DS1_GYRO_DPS_DIGIT_245DPS = 0.00875
_LSM9DS1_GYRO_DPS_DIGIT_500DPS = 0.01750
_LSM9DS1_GYRO_DPS_DIGIT_2000DPS = 0.07000
_LSM9DS1_TEMP_LSB_DEGREE_CELSIUS = 8 # 1°C = 8, 25° = 200, etc.
_LSM9DS1_REGISTER_WHO_AM_I_XG = 0x0F
_LSM9DS1_REGISTER_CTRL_REG1_G = 0x10
_LSM9DS1_REGISTER_CTRL_REG2_G = 0x11
_LSM9DS1_REGISTER_CTRL_REG3_G = 0x12
_LSM9DS1_REGISTER_TEMP_OUT_L = 0x15
_LSM9DS1_REGISTER_TEMP_OUT_H = 0x16
_LSM9DS1_REGISTER_STATUS_REG = 0x17
_LSM9DS1_REGISTER_OUT_X_L_G = 0x18
_LSM9DS1_REGISTER_OUT_X_H_G = 0x19
_LSM9DS1_REGISTER_OUT_Y_L_G = 0x1A
_LSM9DS1_REGISTER_OUT_Y_H_G = 0x1B
_LSM9DS1_REGISTER_OUT_Z_L_G = 0x1C
_LSM9DS1_REGISTER_OUT_Z_H_G = 0x1D
_LSM9DS1_REGISTER_CTRL_REG4 = 0x1E
_LSM9DS1_REGISTER_CTRL_REG5_XL = 0x1F
_LSM9DS1_REGISTER_CTRL_REG6_XL = 0x20
_LSM9DS1_REGISTER_CTRL_REG7_XL = 0x21
_LSM9DS1_REGISTER_CTRL_REG8 = 0x22
_LSM9DS1_REGISTER_CTRL_REG9 = 0x23
_LSM9DS1_REGISTER_CTRL_REG10 = 0x24
_LSM9DS1_REGISTER_OUT_X_L_XL = 0x28
_LSM9DS1_REGISTER_OUT_X_H_XL = 0x29
_LSM9DS1_REGISTER_OUT_Y_L_XL = 0x2A
_LSM9DS1_REGISTER_OUT_Y_H_XL = 0x2B
_LSM9DS1_REGISTER_OUT_Z_L_XL = 0x2C
_LSM9DS1_REGISTER_OUT_Z_H_XL = 0x2D
_LSM9DS1_REGISTER_WHO_AM_I_M = 0x0F
_LSM9DS1_REGISTER_CTRL_REG1_M = 0x20
_LSM9DS1_REGISTER_CTRL_REG2_M = 0x21
_LSM9DS1_REGISTER_CTRL_REG3_M = 0x22
_LSM9DS1_REGISTER_CTRL_REG4_M = 0x23
_LSM9DS1_REGISTER_CTRL_REG5_M = 0x24
_LSM9DS1_REGISTER_STATUS_REG_M = 0x27
_LSM9DS1_REGISTER_OUT_X_L_M = 0x28
_LSM9DS1_REGISTER_OUT_X_H_M = 0x29
_LSM9DS1_REGISTER_OUT_Y_L_M = 0x2A
_LSM9DS1_REGISTER_OUT_Y_H_M = 0x2B
_LSM9DS1_REGISTER_OUT_Z_L_M = 0x2C
_LSM9DS1_REGISTER_OUT_Z_H_M = 0x2D
_LSM9DS1_REGISTER_CFG_M = 0x30
_LSM9DS1_REGISTER_INT_SRC_M = 0x31
_MAGTYPE = True
_XGTYPE = False
_SENSORS_GRAVITY_STANDARD = 9.80665
# User facing constants/module globals.
ACCELRANGE_2G = (0b00 << 3)
ACCELRANGE_16G = (0b01 << 3)
ACCELRANGE_4G = (0b10 << 3)
ACCELRANGE_8G = (0b11 << 3)
MAGGAIN_4GAUSS = (0b00 << 5) # +/- 4 gauss
MAGGAIN_8GAUSS = (0b01 << 5) # +/- 8 gauss
MAGGAIN_12GAUSS = (0b10 << 5) # +/- 12 gauss
MAGGAIN_16GAUSS = (0b11 << 5) # +/- 16 gauss
GYROSCALE_245DPS = (0b00 << 3) # +/- 245 degrees/s rotation
GYROSCALE_500DPS = (0b01 << 3) # +/- 500 degrees/s rotation
GYROSCALE_2000DPS = (0b11 << 3) # +/- 2000 degrees/s rotation
# pylint: enable=bad-whitespace
def _twos_comp(val, bits):
# Convert an unsigned integer in 2's compliment form of the specified bit
# length to its signed integer value and return it.
if val & (1 << (bits - 1)) != 0:
return val - (1 << bits)
return val
class _LSM9DS1():
"""Driver for the LSM9DS1 accelerometer, magnetometer, gyroscope."""
def __init__(self):
self._BUFFER = bytearray(6)
# soft reset & reboot accel/gyro
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG8, 0x05)
# soft reset & reboot magnetometer
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, 0x0C)
time.sleep(0.01)
# Check ID registers.
if self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_XG) != _LSM9DS1_XG_ID or \
self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_M) != _LSM9DS1_MAG_ID:
raise RuntimeError('Could not find LSM9DS1, check wiring!')
# enable gyro continuous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, 0xC0) # on XYZ
# Enable the accelerometer continous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG5_XL, 0x38)
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, 0xC0)
# enable mag continuous
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG3_M, 0x00)
# Set default ranges for the various sensors
self._accel_mg_lsb = None
self._mag_mgauss_lsb = None
self._gyro_dps_digit = None
self.accel_range = ACCELRANGE_2G
self.mag_gain = MAGGAIN_4GAUSS
self.gyro_scale = GYROSCALE_245DPS
@property
def accel_range(self):
"""The accelerometer range. Must be a value of:
- ACCELRANGE_2G
- ACCELRANGE_4G
- ACCELRANGE_8G
- ACCELRANGE_16G
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
return (reg & 0b00011000) & 0xFF
@accel_range.setter
def accel_range(self, val):
assert val in (ACCELRANGE_2G, ACCELRANGE_4G, ACCELRANGE_8G,
ACCELRANGE_16G)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, reg)
if val == ACCELRANGE_2G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_2G
elif val == ACCELRANGE_4G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_4G
elif val == ACCELRANGE_8G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_8G
elif val == ACCELRANGE_16G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_16G
@property
def mag_gain(self):
"""The magnetometer gain. Must be a value of:
- MAGGAIN_4GAUSS
- MAGGAIN_8GAUSS
- MAGGAIN_12GAUSS
- MAGGAIN_16GAUSS
"""
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
return (reg & 0b01100000) & 0xFF
@mag_gain.setter
def mag_gain(self, val):
assert val in (MAGGAIN_4GAUSS, MAGGAIN_8GAUSS, MAGGAIN_12GAUSS,
MAGGAIN_16GAUSS)
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
reg = (reg & ~(0b01100000)) & 0xFF
reg |= val
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, reg)
if val == MAGGAIN_4GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_4GAUSS
elif val == MAGGAIN_8GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_8GAUSS
elif val == MAGGAIN_12GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_12GAUSS
elif val == MAGGAIN_16GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_16GAUSS
@property
def gyro_scale(self):
"""The gyroscope scale. Must be a value of:
- GYROSCALE_245DPS
- GYROSCALE_500DPS
- GYROSCALE_2000DPS
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
return (reg & 0b00011000) & 0xFF
@gyro_scale.setter
def gyro_scale(self, val):
assert val in (GYROSCALE_245DPS, GYROSCALE_500DPS, GYROSCALE_2000DPS)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, reg)
if val == GYROSCALE_245DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_245DPS
elif val == GYROSCALE_500DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_500DPS
elif val == GYROSCALE_2000DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_2000DPS
def read_accel_raw(self):
"""Read the raw accelerometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the acceleration in nice units you probably want to use the
accelerometer property!
"""
# Read the accelerometer
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_XL, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def acceleration(self):
"""The accelerometer X, Y, Z axis values as a 3-tuple of
m/s^2 values.
"""
raw = self.read_accel_raw()
return map(lambda x: x * self._accel_mg_lsb / 1000.0 * _SENSORS_GRAVITY_STANDARD,
raw)
def read_mag_raw(self):
"""Read the raw magnetometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the magnetometer in nice units you probably want to use the
magnetometer property!
"""
# Read the magnetometer
self._read_bytes(_MAGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_M, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def magnetic(self):
"""The magnetometer X, Y, Z axis values as a 3-tuple of
gauss values.
"""
raw = self.read_mag_raw()
return map(lambda x: x * self._mag_mgauss_lsb / 1000.0, raw)
def read_gyro_raw(self):
"""Read the raw gyroscope sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the gyroscope in nice units you probably want to use the
gyroscope property!
"""
# Read the gyroscope
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_G, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def gyro(self):
"""The gyroscope X, Y, Z axis values as a 3-tuple of
degrees/second values.
"""
raw = self.read_gyro_raw()
return map(lambda x: x * self._gyro_dps_digit, raw)
def read_temp_raw(self):
"""Read the raw temperature sensor value and return it as a 12-bit
signed value. If you want the temperature in nice units you probably
want to use the temperature property!
"""
# Read temp sensor
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_TEMP_OUT_L, 2, self._BUFFER)
temp = ((self._BUFFER[1] << 8) | self._BUFFER[0]) >> 4
return _twos_comp(temp, 12)
@property
def temperature(self):
"""The temperature of the sensor in degrees Celsius."""
# This is just a guess since the starting point (21C here) isn't documented :(
# See discussion from:
# https://github.com/kriswiner/LSM9DS1/issues/3
temp = self.read_temp_raw()
temp = 27.5 + temp/16
return temp
def _read_u8(self, sensor_type, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
def _read_bytes(self, sensor_type, address, count, buf):
# Read a count number of bytes into buffer from the provided 8-bit
# register address. The sensor_type boolean should be _MAGTYPE when
# talking to the magnetometer, or _XGTYPE when talking to the accel or
# gyro. MUST be implemented by subclasses!
raise NotImplementedError()
def _write_u8(self, sensor_type, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
class _LSM9DS1_I2C(_LSM9DS1):
"""Driver for the LSM9DS1 connect over I2C."""
def __init__(self, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
self._mag_device = get_i2c(mag_address, bus)
self._xg_device = get_i2c(acclgyro_address, bus)
super().__init__()
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
return device.read_U8(address)
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
r= device.read_list(address, count)
buf[:] = r
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
device.write8(address, val)
class LSM9DS1AccelerationDeviceDriver(SensorDeviceDriver):
def __init__(self, accel_range=ACCELRANGE_2G, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.accel_range = accel_range
@property
def dimensions(self):
return 3
@property
def value_type(self):
return "number"
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "acceleration"
@property
def unit(self):
return "m/s^2"
def read_value(self):
x,y,z = self._device.acceleration
return [x, y, z]
class LSM9DS1GyroDeviceDriver(SensorDeviceDriver):
def __init__(self, gyro_scale=GYROSCALE_245DPS, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.gyro_scale = gyro_scale
@property
def value_type(self):
return "number"
@property
def dimensions(self):
return 3
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "gyro"
@property
def unit(self):
return "degrees/second"
def read_value(self):
x,y,z = self._device.gyro
return [x, y, z]
class LSM9DS1MagneticDeviceDriver(SensorDeviceDriver):
def __init__(self, gain=MAGGAIN_4GAUSS, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.mag_gain = gain
@property
def dimensions(self):
return 3
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "magnetic"
@property
def unit(self):
return "gauss"
@property
def value_type(self):
return "number"
def read_value(self):
x,y,z = self._device.magnetic
return [x, y, z]
|
Milo was a cowboy for Halloween this year. He choose his own costume. He was going to be a dragon because a friend of mine had very cute costume the her son wore last year when he was two and she was going to let me borrow it but Milo refused point blank to wear it. Before I had Milo I didn't even know a two year old could do that! I tried twice to put it on him, the first time was in the middle of Word World (his favorite show) so I thought maybe that was the reason he flipped out. But the 2nd time he nearly ripped the head off trying to pull it off of himself. I tried to calm him down but he would not be calmed so I backed off and thought I'll let him work it out. That was when he started to try and climb the front of the changer so I would take it off of him. That was the point I decided, as cute as the costume was and as grateful as I was that my friend would lend it to Milo, he was not going to be a dragon for Halloween.
That same day we were at the same friends house playing and her son also has a play cowboys hat that Milo loves to play with so it planted the idea in my head that if the dragon thing did work out that Milo could be a cowboy because that "costume" is really clothes so it wouldn't feel any different to him and he wouldn't freak out. Plus he LOVES cowboy stuff. So we had overalls, we got him a western shirt and boots and borrowed a toddler sized cowboy hat from another sweet friend and VOILA instant cowboy!
Here's a little trip down memory lane. Milo was a penguin last year right after he turned 1! He had just learned to walk and he looked so cute waddling around in the big puffy belly and the penguin tail flapping behind him!
And he was a puppy dog for his very first Halloween right after he was born! it was so hard find a costume for him it ended up being huge on him and it was so snuggly warm he fell asleep in it as soon as we put it on him!
I have really been working with Milo on his letters for months now. Up until now he's not really shown much interest but lately he has really gotten into the ABC song and pointing out letters. He likes to point out the letters on my shirts and guess what they are. Sometimes he gets it right but most of he mixes them up. But that's ok after all he did just turn two! I've really been work on M-I-L-O with his bath tub letters. He's a lot of fun with that! He loves the letter O, it's his favorite letter and the one he can identify the most easily. He loves to point it out whenever he sees it. There are some block letters high on a shelf in his room that spell Love and he saw them the other day, pointed to them and said "O!" He also likes to point to the Milo letters on his wall and repeat what I always tell him when we see his name "M-I-L-O, Milo!" But he can't really say M or I so they come but in toddler gibberish. One day I'll get it on video, it's so cute!
Milo's favorite song recently is the ABC song. He loves to go around singing it to himself and then be praised for doing such a great job. :) He doesn't sing it quite right yet though and if we start to sing it to him he'll tell us to stop in no uncertain terms. He wants to do it all by himself. This is a video of Milo starting to sing the ABC's until he figured out that I was taping him and then he wanted no more to do with it. He is becoming shy these days.
Milo can be very loving when he wants to be and it never fails to melt my heart when he is. As he gets more and more independent I usually have to wait for him to come to me to cuddle now. He is always on the move in his busy two year boy way so when he wants to stop and get a hug I always have time for that! I was checking my email on Chris' lap top last night and Milo was greatly annoyed that my lap wasn't available for him to just drop by and get into when ever he felt like it. At one point he came over and I tried to just give him a side hug, he wasn't having it, he kept pushing at the laptop saying "lap, lap!" When I closed it and put it away for the night he actually cheered! If we ever had a 2nd kid I just don't think he could handle it.
This morning after I got him up and we were walking into the kitchen to get breakfast when he just turn around and leaned into me and wanted a hug. As he was leaning in he looked up at me with those big round eyes and said "I lub you Mama." I melted and said "I love you too baby!" As a girl you have never heard I love you from a boy until your son says it to you. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache_file.py
# Python Library
# Author: Raymond Wagner
# Purpose: Persistant file-backed cache using /tmp/ to share data
# using flock or msvcrt.locking to allow safe concurrent
# access.
#-----------------------
import struct
import errno
import json
import os
import io
from cStringIO import StringIO
from tmdb_exceptions import *
from cache_engine import CacheEngine, CacheObject
####################
# Cache File Format
#------------------
# cache version (2) unsigned short
# slot count (2) unsigned short
# slot 0: timestamp (8) double
# slot 0: lifetime (4) unsigned int
# slot 0: seek point (4) unsigned int
# slot 1: timestamp
# slot 1: lifetime index slots are IDd by their query date and
# slot 1: seek point are filled incrementally forwards. lifetime
# .... is how long after query date before the item
# .... expires, and seek point is the location of the
# slot N-2: timestamp start of data for that entry. 256 empty slots
# slot N-2: lifetime are pre-allocated, allowing fast updates.
# slot N-2: seek point when all slots are filled, the cache file is
# slot N-1: timestamp rewritten from scrach to add more slots.
# slot N-1: lifetime
# slot N-1: seek point
# block 1 (?) ASCII
# block 2
# .... blocks are just simple ASCII text, generated
# .... as independent objects by the JSON encoder
# block N-2
# block N-1
#
####################
def _donothing(*args, **kwargs):
pass
try:
import fcntl
class Flock( object ):
"""
Context manager to flock file for the duration the object exists.
Referenced file will be automatically unflocked as the interpreter
exits the context.
Supports an optional callback to process the error and optionally
suppress it.
"""
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
fcntl.flock(self.fileobj, self.operation)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
fcntl.flock(self.fileobj, fcntl.LOCK_UN)
return suppress
def parse_filename(filename):
if '$' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif filename.startswith('/'):
# check for absolute path
return filename
# return path with temp directory prepended
return '/tmp/' + filename
except ImportError:
import msvcrt
class Flock( object ):
LOCK_EX = msvcrt.LK_LOCK
LOCK_SH = msvcrt.LK_LOCK
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
self.size = os.path.getsize(self.fileobj.name)
msvcrt.locking(self.fileobj.fileno(), self.operation, self.size)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
msvcrt.locking(self.fileobj.fileno(), msvcrt.LK_UNLCK, self.size)
return suppress
def parse_filename(filename):
if '%' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif (ord(filename[0]) in (range(65,91)+range(99,123))) \
and (filename[1:3] == ':\\'):
# check for absolute drive path (e.g. C:\...)
return filename
elif (filename.count('\\') >= 3) and (filename.startswith('\\\\')):
# check for absolute UNC path (e.g. \\server\...)
return filename
# return path with temp directory prepended
return os.path.expandvars(os.path.join('%TEMP%',filename))
class FileCacheObject( CacheObject ):
_struct = struct.Struct('dII') # double and two ints
# timestamp, lifetime, position
@classmethod
def fromFile(cls, fd):
dat = cls._struct.unpack(fd.read(cls._struct.size))
obj = cls(None, None, dat[1], dat[0])
obj.position = dat[2]
return obj
def __init__(self, *args, **kwargs):
self._key = None
self._data = None
self._size = None
self._buff = StringIO()
super(FileCacheObject, self).__init__(*args, **kwargs)
@property
def size(self):
if self._size is None:
self._buff.seek(0,2)
size = self._buff.tell()
if size == 0:
if (self._key is None) or (self._data is None):
raise RuntimeError
json.dump([self.key, self.data], self._buff)
self._size = self._buff.tell()
self._size = size
return self._size
@size.setter
def size(self, value): self._size = value
@property
def key(self):
if self._key is None:
try:
self._key, self._data = json.loads(self._buff.getvalue())
except:
pass
return self._key
@key.setter
def key(self, value): self._key = value
@property
def data(self):
if self._data is None:
self._key, self._data = json.loads(self._buff.getvalue())
return self._data
@data.setter
def data(self, value): self._data = value
def load(self, fd):
fd.seek(self.position)
self._buff.seek(0)
self._buff.write(fd.read(self.size))
def dumpslot(self, fd):
pos = fd.tell()
fd.write(self._struct.pack(self.creation, self.lifetime, self.position))
def dumpdata(self, fd):
self.size
fd.seek(self.position)
fd.write(self._buff.getvalue())
class FileEngine( CacheEngine ):
"""Simple file-backed engine."""
name = 'file'
_struct = struct.Struct('HH') # two shorts for version and count
_version = 2
def __init__(self, parent):
super(FileEngine, self).__init__(parent)
self.configure(None)
def configure(self, filename, preallocate=256):
self.preallocate = preallocate
self.cachefile = filename
self.size = 0
self.free = 0
self.age = 0
def _init_cache(self):
# only run this once
self._init_cache = _donothing
if self.cachefile is None:
raise TMDBCacheError("No cache filename given.")
self.cachefile = parse_filename(self.cachefile)
try:
# attempt to read existing cache at filename
# handle any errors that occur
self._open('r+b')
# seems to have read fine, make sure we have write access
if not os.access(self.cachefile, os.W_OK):
raise TMDBCacheWriteError(self.cachefile)
except IOError as e:
if e.errno == errno.ENOENT:
# file does not exist, create a new one
try:
self._open('w+b')
self._write([])
except IOError as e:
if e.errno == errno.ENOENT:
# directory does not exist
raise TMDBCacheDirectoryError(self.cachefile)
elif e.errno == errno.EACCES:
# user does not have rights to create new file
raise TMDBCacheWriteError(self.cachefile)
else:
# let the unhandled error continue through
raise
elif e.errno == errno.EACCESS:
# file exists, but we do not have permission to access it
raise TMDBCacheReadError(self.cachefile)
else:
# let the unhandled error continue through
raise
def get(self, date):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_SH): # lock for shared access
# return any new objects in the cache
return self._read(date)
def put(self, key, value, lifetime):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_EX): # lock for exclusive access
newobjs = self._read(self.age)
newobjs.append(FileCacheObject(key, value, lifetime))
# this will cause a new file object to be opened with the proper
# access mode, however the Flock should keep the old object open
# and properly locked
self._open('r+b')
self._write(newobjs)
return newobjs
def _open(self, mode='r+b'):
# enforce binary operation
try:
if self.cachefd.mode == mode:
# already opened in requested mode, nothing to do
self.cachefd.seek(0)
return
except: pass # catch issue of no cachefile yet opened
self.cachefd = io.open(self.cachefile, mode)
def _read(self, date):
try:
self.cachefd.seek(0)
version, count = self._struct.unpack(\
self.cachefd.read(self._struct.size))
if version != self._version:
# old version, break out and well rewrite when finished
raise Exception
self.size = count
cache = []
while count:
# loop through storage definitions
obj = FileCacheObject.fromFile(self.cachefd)
cache.append(obj)
count -= 1
except:
# failed to read information, so just discard it and return empty
self.size = 0
self.free = 0
return []
# get end of file
self.cachefd.seek(0,2)
position = self.cachefd.tell()
newobjs = []
emptycount = 0
# walk backward through all, collecting new content and populating size
while len(cache):
obj = cache.pop()
if obj.creation == 0:
# unused slot, skip
emptycount += 1
elif obj.expired:
# object has passed expiration date, no sense processing
continue
elif obj.creation > date:
# used slot with new data, process
obj.size, position = position - obj.position, obj.position
newobjs.append(obj)
# update age
self.age = max(self.age, obj.creation)
elif len(newobjs):
# end of new data, break
break
# walk forward and load new content
for obj in newobjs:
obj.load(self.cachefd)
self.free = emptycount
return newobjs
def _write(self, data):
if self.free and (self.size != self.free):
# we only care about the last data point, since the rest are
# already stored in the file
data = data[-1]
# determine write position of data in cache
self.cachefd.seek(0,2)
end = self.cachefd.tell()
data.position = end
# write incremental update to free slot
self.cachefd.seek(4 + 16*(self.size-self.free))
data.dumpslot(self.cachefd)
data.dumpdata(self.cachefd)
else:
# rewrite cache file from scratch
# pull data from parent cache
data.extend(self.parent()._data.values())
data.sort(key=lambda x: x.creation)
# write header
size = len(data) + self.preallocate
self.cachefd.seek(0)
self.cachefd.truncate()
self.cachefd.write(self._struct.pack(self._version, size))
# write storage slot definitions
prev = None
for d in data:
if prev == None:
d.position = 4 + 16*size
else:
d.position = prev.position + prev.size
d.dumpslot(self.cachefd)
prev = d
# fill in allocated slots
for i in range(2**8):
self.cachefd.write(FileCacheObject._struct.pack(0, 0, 0))
# write stored data
for d in data:
d.dumpdata(self.cachefd)
self.cachefd.flush()
def expire(self, key):
pass
|
You are not just a number. You are a part of the pack!
Here at ZenWolf we're a bit different. Blue Collar one day – White Collar the next – Quirky, professional and 100% attentive to you the client We cater to our clients – each and every one of them. You're not a number with us – you are us. No long annoying waits to talk to a real human. Our Account Executives make every day about servicing our clients on a professional level.
The four pillars we operate on are Consultation, Design, Development, and Management. Never hesitating to go the extra step in making sure our clients get exactly what they need to compete in today's marketplace.
Your relationship matters to us. It matters on all levels, from professional to personal. We take pride in understanding who you are and what you need. With ZenWolf at your side you can rest easy in your marketing efforts and let us do the work. Thinking of hiring a full time person to take over your marketing? Don't. Let us take the reigns and you'll have a professional development and marketing team, certified in three areas with Google, with over 20 years of experience in the industry. |
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
siliconi = SchLib(tool=SKIDL).add_parts(*[
Part(name='D469',dest=TEMPLATE,tool=SKIDL,keywords='High-Current Driver',description='Quad High-Current Power Driver',ref_prefix='U',num_units=4,do_erc=True,pins=[
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='IN1',do_erc=True),
Pin(num='2',name='IN',do_erc=True),
Pin(num='13',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='IN1',do_erc=True),
Pin(num='4',name='IN',do_erc=True),
Pin(num='12',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='IN1',do_erc=True),
Pin(num='6',name='IN',do_erc=True),
Pin(num='11',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='IN1',do_erc=True),
Pin(num='9',name='IN',do_erc=True),
Pin(num='10',name='OUT',func=Pin.OUTPUT,do_erc=True)]),
Part(name='DG411',dest=TEMPLATE,tool=SKIDL,keywords='CMOS Analog Switche',description='Monolithic Quad SPST, CMOS Analog Switches',ref_prefix='U',num_units=4,do_erc=True,pins=[
Pin(num='4',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='SW',do_erc=True),
Pin(num='2',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='SW',do_erc=True),
Pin(num='9',name='SW',do_erc=True),
Pin(num='10',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='14',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='15',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='16',name='SW',do_erc=True)])])
|
Capturing the story of your day. Natural, contemporary and classic wedding photography. All images are yours to keep. They are supplied on a disc, so you can print, upload and share them (at no extra cost). I have no sales agenda, nor will I 'up sell', or try to push products. I offer a friendly, relaxed and professional service in order to provide a wonderful record of your special day. |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid -**Internationalisation
utilities.**
The module provides utilities function to convert between unicode and byte
string for Python 2.x. When we move to Python 3, this module and its usage
should be removed as string in Python 3 is already stored in unicode.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
# noinspection PyPackageRequirements
from PyQt4.QtCore import QCoreApplication, QSettings, QLocale
from safe.utilities.unicode import get_unicode
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '02/24/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
def tr(text, context='@default'):
"""We define a tr() alias here since the utilities implementation below
is not a class and does not inherit from QObject.
.. note:: see http://tinyurl.com/pyqt-differences
:param text: String to be translated
:type text: str, unicode
:param context: A context for the translation. Since a same can be
translated to different text depends on the context.
:type context: str
:returns: Translated version of the given string if available, otherwise
the original string.
:rtype: str, unicode
"""
# Ensure it's in unicode
text = get_unicode(text)
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
return QCoreApplication.translate(context, text)
def locale():
"""Get the name of the currently active locale.
:returns: Name of hte locale e.g. 'id'
:rtype: stre
"""
override_flag = QSettings().value(
'locale/overrideFlag', True, type=bool)
if override_flag:
locale_name = QSettings().value('locale/userLocale', 'en_US', type=str)
else:
# noinspection PyArgumentList
locale_name = QLocale.system().name()
# NOTES: we split the locale name because we need the first two
# character i.e. 'id', 'af, etc
locale_name = str(locale_name).split('_')[0]
return locale_name
|
India Exim Bank has extended a line of credit of US$50mn to the government of Sudan, to support India’s export of equipment, goods and services to that country.
Under the line of credit to the government of Sudan, the importers from Sudan are required to make advance payment of 10 % of contract value to the Indian exporters and the balance 90% of contract value would be reimbursed by India Exim to the Indian exporters, upfront upon the shipment of goods. The credit period available to the importers, through the government of Sudan, is up to eight years, inclusive of a suitable moratorium.
India’s exports to Sudan amounted to US$ 105.20 million during the year 2002-2003. Machinery and instrument, transport equipments, drugs, pharmaceuticals and fine chemicals, manufactures of metals and primary and semi finished iron and steel were the principal items in India’s export basket to Sudan in the year 2002-03. Potential areas that Indian exporters can focus on, are equipment for rural electrification and railways, transportation vehicles and equipments, drugs and pharmaceuticals, machinery and instruments, textiles and related items. There also exists considerable potential for setting up joint ventures between Indian and Sudanese partners in sectors like textiles, pharmaceuticals and chemicals, information technology and small scale industries sector.
Exim Bank has in place a number of Lines of Credit for promoting India’s exports to countries in Africa, Asia, Latin America, East Europe and Russia. |
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
__date__ = '$Date: 2006/06/12 23:15:40 $'.split()[1].replace('/', '-')
__version__ = '$Revision: 1.30 $'.split()[1]
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
four possible forms: a similar string of hexadecimal digits, or a
string of 16 raw bytes as an argument named 'bytes', or a tuple of
six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a single
128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, fields=None, int=None,
version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'fields', or 'int' must be given.
The 'version' argument is optional; if given, the resulting UUID
will have its variant and version number set according to RFC 4122,
overriding bits in the given 'hex', 'bytes', 'fields', or 'int'.
"""
if [hex, bytes, fields, int].count(None) != 3:
raise TypeError('need just one of hex, bytes, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
import os
for dir in ['', '/sbin/', '/usr/sbin']:
try:
pipe = os.popen(os.path.join(dir, 'ifconfig'))
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in ['hwaddr', 'ether']:
return int(words[i + 1].replace(':', ''), 16)
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit integer. The first time this
runs, it may launch a separate program, which could be quite slow. If
all attempts to obtain the hardware address fail, we choose a random
48-bit number with its eighth bit set to 1 as recommended in RFC 4122."""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
import md5
hash = md5.md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
|
This site is situated along the left (northwest) wall of the bay in front of the cliff-face. The site got its name from a long chain and anchor that is disappearing into the depth. Entrance is not easy and a long hike is required to the starting point where you can enter the water. Even then it takes about 30 minutes before the chain and anchor can be reached.
If the pace is too slow then you will not reach the chain and anchor before you reach half tank.
There is a lush underwater garden of sea fans and gorgonians at a shallow depth and it's certainly worthwhile to spend some time in this garden. This dive site is more easily reached by boat, but it is possible to reach from the shore.
The dive site is on the western side of Caracas Bay. If diving from the shore take a long walk along the cliff almost as far as possible to reach the sandy patch where you can enter the water. From there swim along the drop off (very steep, almost vertical) for about 25 minutes before you reach the chain and anchor. The chain is nicely covered with sponges and disappears out of sight into the depth. Swim and follow the chain upward till about 12 meters. At that depth start your return leg of the dive.
At that depth you will see a lot of soft coral which gives the impression of a large garden with plants swaying in the wind. Keep monitoring the air left in your tank.
Dense coral growth hides the chain and anchor that keeps on going forever. (CTB approved dive guide is recommended). |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import pickle
from PyQt5 import QtWidgets, QtCore, QtNetwork
class Assistant(QtWidgets.QApplication):
timeout = 1000
def __init__(self, argv):
QtWidgets.QApplication.__init__(self, argv)
self.socket_filename = os.path.expanduser("~/.stk_assistant_share_memory")
self.shared_memory = QtCore.QSharedMemory()
self.shared_memory.setKey(self.socket_filename)
self.is_running = self.shared_memory.attach()
self.process = None
if not self.is_running:
if not self.shared_memory.create(1):
return
self.process = QtCore.QProcess()
self.process.finished.connect(self.quit)
self.server = QtNetwork.QLocalServer(self)
self.server.newConnection.connect(self.receiveMessage)
self.server.listen(self.socket_filename)
def start(self):
if not self.process:
self.process = QtCore.QProcess()
if self.process.state() != QtCore.QProcess.Running:
app = "assistant "
args = "-collectionFile resources/help/stk_collection.qhc -enableRemoteControl"
self.process.start(app + args)
if len(self.arguments()) == 2 and \
self.arguments()[1] in ['stk', 'gensec', 'genrep', 'genvis']:
self.handleMessage(self.arguments()[1])
return True
def sendMessage(self, message):
if not self.is_running:
raise Exception("Client cannot connect to the server. Not running.")
socket = QtNetwork.QLocalSocket(self)
socket.connectToServer(self.socket_filename, QtCore.QIODevice.WriteOnly)
if not socket.waitForConnected(self.timeout):
raise Exception(str(socket.errorString()))
socket.write(pickle.dumps(message))
if not socket.waitForBytesWritten(self.timeout):
raise Exception(str(socket.errorString()))
socket.disconnectFromServer()
def receiveMessage(self):
socket = self.server.nextPendingConnection()
if not socket.waitForReadyRead(self.timeout):
return
byte_array = socket.readAll()
self.handleMessage(pickle.loads(byte_array))
def handleMessage(self, content):
msg = QtCore.QByteArray()
msg.append('show index;')
msg.append('activateKeyword ' + content + ';')
msg.append('setSource ' + 'qthelp://com.sequence-toolkit.help-assistant/doc/html/' + content + '.html\n')
self.process.write(msg)
def quit(self):
self.shared_memory.detach()
os.remove(self.socket_filename)
if self.process is not None:
if self.process.state() == QtCore.QProcess.Running:
self.process.terminate()
self.process.waitForFinished(3000)
self.process.deleteLater()
QtWidgets.QApplication.quit()
|
Welcome to Encap Cleaning Services. Our services include: - Carpet Cleaning and deodorizing - Rug Cleaning - Leather Cleaning - Mattress cleaning - Upholstery cleaning - Tiles and Grout cleaning With Encap Cleaning Services, you can expect to have outstanding certified technicians at your service. We use superior equipments with the latest technology available, namely Encapsulation technology. Our goal is to provide the best Cleaning services in Australia at affordable price.
We are your one stop for all carpet & upholstery cleaning & protection services. Including leather, rugs, mattress's & stain treatments.
If you are looking for various cleaning services in Perth, contact us for reliable services.
We do carpet and upholstery cleaning all over Perth and Mandurah 7 days a week with same day service. We clean carpets, seats, sofas, mats, rugs, mattresses, blinds and much more at a very fair price.
Offering Both Dry Cleaning or Steam Cleaning of Carpets from Yanchep to Mandurah. Fully Trained Technicians, Satisfaction Guaranteed, You Can't Go Wrong with Perth Carpet Cleaning. Call Us today.
Mattress2Clean provides bed & mattress cleaning & stain removal solution in Perth Australia. Best, Professional Mattress cleaners with no steam cleaning.to kill all dust mites from your mattress.
Drymaster Carpet Cleaning Perth is a professional carpet cleaning service that provides a ton of cleaning services including dry cleaning and steam cleaning.
Professional mattress sanitisation to reduce severity and frequency of asthma and allergies.
Jim's Carpet Cleaners Perth: Carpet cleaning, mattress cleaning, upholstery cleaning and stain removal.
Carpet Cleaning, End of Lease Cleaning, Tile and Grout Cleaning, Mattress and Upholstery cleaning, Oven and Window cleaning.
Jim’s Cleaning- cleaners for all your cleaning needs: Domestic cleaners, home cleaners, commercial cleaners, office cleaners.
We are a small business that believes in personal service. Friendly, reliable and prompt service at all times. Just like a friend of the family.
We are an honest, efficient and professional family-owned and operated business offering affordable home and window cleaning services to busy households.
Professional Carpet and Upholstery Cleaning Service in Domestic, Commercial and Auto applications. Carpet, Lounge, and Mattress, tile and vinyl cleaning. Call for a free quote. Competitive prices and prompt service.
We Make Mattresses Healthy Again. Great service and low prices.
Chem-Dry Pristine provides green certified cleaning services to residents and businesses in the municipalities of Cambridge, Joondalup, Stirling, Wanneroo and Vincent.
Dry carpet cleaners based in Perth WA. We specialise in most things cleaning related, including mattress cleaning, upholstery cleaning, pet stain removals and much more. Contact us today for a quote.
Amazing Clean Rockingham Curtains and Blinds clean Household Grime, Mould, Nicotine and Dust from your blinds at curtains gently yet thoroughly leaving your blinds and curtains fresh and renewed.
Mandurah, Baldivis and Rockingham areas. Professional Deep Steam Carpet Cleaning, Hi-Tech Upholstery Cleaning, Tile & Grout Cleaning. Very competitive rates, Police Clearance and Fully Insured.
Get the best results with Perth Carpet Cleaning servicing Mandurah and surrounding areas. Satisfaction guaranteed. Fully trained technicians.
If you would like a deep clean with the benefit of a dry clean, use Magic Dry. Cleanses, sanitises and deodorises. If Magic Dry cannot remove a stain, then nobody can. Simply the best.
We offer both carpet dry cleaning or carpet steam cleaning in and around the Mandurah area. Fully trained technicians, satisfaction guaranteed, fully insured, reliable, prompt service.
Country Life Carpet Care are professional trained and certified carpet cleaners who specialise in •Steam Cleaning Carpets •Tile and grout cleaning and grout sealing •Upholstery cleaning (fabric and leather) •Mattress cleaning Our specialised tile cleaning system cleans tiles and grout with amazing results. We can also seal your grout which enhances the life of your tile cleaning and makes it easier to clean them. Steam cleaning carpets gives superior cleaning to other methods by cleaning deep into the pile of the carpet removing dirt and stains. Country Life Carpet Care are an IICRC accredited firm (Institute of Inspection, Cleaning and Restoration Certified) and all technician are trained certified and police checked. Country Life Carpet Care is owned and operated by Lyn and Gavin. Servicing Bunbury and surrounding areas. Regular professional cleaning of your tiles and carpet is as important as putting oil in your car if you want to protect the investment you have made. Yearly carpet and tile cleaning can increase the life of your floor surfaces and save you money in the long run. Call for a Free Quote today. Satisfaction Guaranteed. Call us at Country Life Carpet, for great service and professional tile, grout and carpet cleaning at a reasonable price.
Your local independent Carpet, Upholstery and Tile & Grout steam cleaning experts. |
import dask
import numpy as np
from cesium.features import generate_dask_graph
def generate_features(t, m, e, features_to_use):
"""Utility function that generates features from a dask DAG."""
graph = generate_dask_graph(t, m, e)
values = dask.get(graph, features_to_use)
return dict(zip(features_to_use, values))
def irregular_random(seed=0, size=50):
"""Generate random test data at irregularly-sampled times."""
state = np.random.RandomState(seed)
times = np.sort(state.uniform(0, 10, size))
values = state.normal(1, 1, size)
errors = state.exponential(0.1, size)
return times, values, errors
def regular_periodic(freqs, amplitudes, phase, size=501):
"""Generate periodic test data sampled at regular intervals: superposition
of multiple sine waves, each with multiple harmonics.
"""
times = np.linspace(0, 2, size)
values = np.zeros(size)
for (i,j), amplitude in np.ndenumerate(amplitudes):
values += amplitude * np.sin(2*np.pi*times*freqs[i]*(j+1) + phase)
errors = 1e-4*np.ones(size)
return times, values, errors
def irregular_periodic(freqs, amplitudes, phase, seed=0, size=501):
"""Generate periodic test data sampled at randomly-spaced intervals:
superposition of multiple sine waves, each with multiple harmonics.
"""
state = np.random.RandomState(seed)
times = np.sort(state.uniform(0, 2, size))
values = np.zeros(size)
for i in range(freqs.shape[0]):
for j in range(amplitudes.shape[1]):
values += amplitudes[i,j] * np.sin(2*np.pi*times*freqs[i]*(j+1)+phase)
errors = state.exponential(1e-2, size)
return times, values, errors
|
If you are thinking of moving to Doddington or just want to know a what the area is like, the statistics on this page should give you a good introduction. They cover a range of socio-economic factors so you can compare Doddington to figures for Shropshire and nationally. These statistics can tell you if Doddington is an economically deprived area and how hard it might be to get a job.
These statistics are for the highest level education obtained by the residents of Doddington and are from the UK Census of 2011.
The respondents of the 2011 Census were asked to rate their health. These are the results for Doddington. The percentage of residents in Doddington rating their health as 'very good' is less than the national average.
These figures on the claiming of benefits in Doddington come from the Department for Work & Pensions and are dated . They can often be a good indicator of the prosperity of the town and possible indicator of how hard it would be to get employment in the area. The rate of claiming any benefit (which includes in work benefits) is more than 10% lower in Doddington than the national average, suggesting higher salaries than the average in the area.
The population of Doddington as a whole, is older than the national average. The population of Doddington is also older than the average, making Doddington a older persons location. |
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# 首先,获取一个字符对应wordList index的映射
word_dict = {}
wordListSet = set(wordList)
if endWord not in wordListSet:
return []
for val in wordList:
for index, j in enumerate(val):
word_dict.setdefault(j, {})
word_dict[j].setdefault(index, set())
word_dict[j][index].add(val)
convert_table = {}
def build_table(start):
convert = set()
# 逐个试图替换每一个字符
for index0, i in enumerate(start):
# 把当前字符抽掉,求剩下字符可以拼出的词
ret = wordListSet
for index, j in enumerate(start):
if index == index0:
continue
# 如果留下的字符中的某个不在字典中,那么这一组数据必然都无法继续了
if j not in word_dict:
ret = set()
break
if index not in word_dict[j]:
ret = set()
break
# 留下可能的转换
ret = ret & word_dict[j][index]
convert |= ret
if start in convert:
convert.remove(start)
return convert
for i in wordList:
convert_table[i] = build_table(i)
convert_table[beginWord] = build_table(beginWord)
# 优化:如果输入字符串有两个以上不在字典中的字符,那么直接返回空值
unknown = 0
# for i in start:
# pass
best = []
best_len = len(wordList)
cache = {}
# 从输入开始穷举,试图换掉其中每一个字
def solve(path, start, limit):
nonlocal best, best_len
# path = path[:]
# print('solve', path, start)
# 中止条件:匹配上结果
if start == endWord:
# print(path)
return path
if len(path) > limit:
return
# 如果长度超出最佳纪录,那么没有必要计算
if len(path) >= best_len:
return
next_round = convert_table[start] - set(path)
for j in next_round:
path2 = path[:]
path2.append(j)
r = solve(path2, j, limit)
if r:
if len(r) < best_len:
best = [r]
best_len = len(r)
elif best_len == len(r):
if r not in best:
best.append(r)
for i in range(1, len(wordList) + 1):
print(i)
solve([beginWord], beginWord, i)
if best:
break
return best
|
Order online for takeout: 172. Shrimp Egg Young from Empire Village - Sturbridge. Serving the best Chinese in Sturbridge, MA. |
from marabunta import BaseRobot
from math import sin,cos,pi
class HeadingConsensusRobot(BaseRobot):
"""Robot model for heading consensus.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and align its heading to the
swarm's mean heading.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
#def __init__(self, body, network):
# BaseRobot.__init__(self, body, network)
# return
def heading_target(self):
"""Get the other agent's state and
compute the mean heading. Note that
for periodic quantities such as the
heading, the mean is defined as
< x_i > = atan( sum_i sin(x_i)/sum_i cos(x_i) )
Returns a vector pointing to the
mean heading. If no agents are
detected, returns None.
"""
neis = self.get_agents().values()
if neis:
sint = sum( [sin(nei[2]) for nei in neis])
cost = sum( [cos(nei[2]) for nei in neis])
target = [cost, sint]
else:
target = None
return target
def move_to_target(self, target, deltat, v):
"""Align the robot to *target* and
move forward for *deltat* at a speed *v*.
"""
self.align(target)
self.move_forward(deltat, v)
return
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, perform heading
consensus)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
# Perform swarming
target = self.heading_target()
if not target:
h= self.body.get_heading()
target = [cos(h) ,sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_to_target(target, deltat, v)
return
|
According to Child Trends, the United States has been scored with a “C” for our education system, “averaging at about 74.7 percent across the nation in 2017.” However, I’m not denying the fact that we have made a major improvement since the 1990’s because the grade point average for high school graduates was 3.10 in 2009, .33 higher than the average GPA in 1990’s.
I strongly believe there could be much more done in improving academic achievement as well as helping students learn. In fact, grades could get better for Generation Z and other upcoming generations, but there is an obstacle that every student must face: distractions and the fear of failure from anxiety.
Anxiety is a rising epidemic among many young people who are affected physiologically, emotionally, and physically. Some develop headaches, sweating, nausea, dizziness, anger, fear, helplessness, disappointment, etc. However, the most visible effects, according to the Anxiety and Depression Association of America, are lack of concentration and a negative mindset that alters the function of all parts of the human nature: the body, mind, and soul.
Though these symptoms only arise from test taking, they can be triggered even when the students prepare for the exam, but what provokes them is their lack of self confidence. Students tend to struggle in their 4 core classes: Mathematics, English, Science, and Social Studies, mandatory for every student to graduate. But the most important assessments that determine the students’ qualifications for acceptance into a college are the ACT, SAT, EOC, and general exams.
-Standardized tests bring unease on to students causing them to struggle and have a deflation in grades across America.
Since state exams (ACT, SAT, EOC) are heavily supported by high school administrations, it causes students to be pressured to do well for college admission. However, because of the heavy recommendation for students to do their best on these exams, it will then divert students attention from other important things such as their grade point average. Lead into a decline in the student average grade for students and later on possibly lead to lower standards for students unless we even the attention span between tests and grades.
According to FairTest.org, by schools “de-emphasizing the use of standardized tests by making admissions decisions about substantial numbers of applicants who recently graduated from U.S. high schools without using the SAT or ACT. Therefore the SAT and ACT are not designed as an indicator of student achievement, but rather as an predictor of how well students will do in college.” However, that’s not entirely true because FairTest.org, the National Center for Fair and Open Testing, also notes that the exam is designed to predict first-year college grades, but it is not too accurate to predict grades beyond the 1st year terms, graduation rates, how far the student wants to go to get a degree, or for placement or advising purposes.
More importantly, the most striking comment was: What should college admissions officers look for instead? And Hiss says “GPA matters the most.” With that being announced, it can be inferred that the idea if high school grades are poor, then good testing skills does not become a backup to promise future success later on in college. Whereas students with good grades and moderate testing skills do so much better in college, which can in fact be proven by and explained through how the human mind works.
The human mind is a complex network of information, so multifaceted and fluid, even when trying to find a single tool to measure all capabilities of American students is similar to a rocky road up a foggy passage. Although, even as this may be true, SATs and ACTs have some predictive value for some students. Though I will say there should be more tools of measurements to assess the success rate of achievement at college level.
Academic achievement is based on the student’s ability to study or review for tests with ease. But for some students, test taking is a struggle even if they give their full effort. I, for example, am a student who suffers from these standards as well. Therefore I strongly believe tests have damaged student’s confidence to be successful because of constant pressure succeed. The thought of failure spreads in the minds of students heads without symptom or warning, giving birth to 3 serious mental health conditions: fear of failure, procrastination, and repetitive failure. These lead into early hints of anxiety, and if not taken care of, it may spiral out of control.
Many people believe they are incapable of having a good lifestyle without being educated or going to college. This is similar to The Domino Effect, where the grades of tests are gathered together and given an average score for the whole class or school, but drops as grades get lower and lower.
For example, Senior Darcy Biermeyer, head of Journalism and Salutatorian said,“I dislike taking tests because people expect big things from me and that leads me to having performance anxiety. And if I fail, not only am I affected, but everyone else around me is too”. This is not just any student; she’s an overachiever and well-known by a variety of teachers and students.
This nation-wide testing event is causing a massive dilemma about the standards for our future generations. Bringing me to my case, that we must take action not only as a community, but more importantly as a country to assist every student’s needs, but not of embarrassing them. We should demand that the school board change their perspective in this ongoing threat to our future generations. If we change the way tests are determined to evaluate us and our achievements. Hopefully, we may then come to see an increase in our schools grade scores, welcome more migrating children, increase funding, and open more jobs for CTE, degree jobs, non-degree employment, etc. |
# Notice:
# ------
#
# Implements encrypting functions.
#
# Copyright (c) 2008, F S 3 Consulting Inc.
#
# Maintainer:
# Alec Joseph Rivera (agi<at>fs3.ph)
#
#
# Warning:
# -------
#
# This program as such is intended to be used by professional programmers
# who take the whole responsibility of assessing all potential consequences
# resulting from its eventual inadequacies and bugs. End users who are
# looking for a ready-to-use solution with commercial guarantees and
# support are strongly adviced to contract a Free Software Service Company.
#
# This program is Free Software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the:
#
# Free Software Foundation, Inc.
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307
# USA.
from random import seed, sample
from string import ascii_letters, digits
from osv import fields,osv
import pooler
from tools.translate import _
from service import security
magic_md5 = '$1$'
def gen_salt( length=8, symbols=ascii_letters + digits ):
seed()
return ''.join( sample( symbols, length ) )
# The encrypt_md5 is based on Mark Johnson's md5crypt.py, which in turn is
# based on FreeBSD src/lib/libcrypt/crypt.c (1.2) by Poul-Henning Kamp.
# Mark's port can be found in ActiveState ASPN Python Cookbook. Kudos to
# Poul and Mark. -agi
#
# Original license:
#
# * "THE BEER-WARE LICENSE" (Revision 42):
# *
# * <[email protected]> wrote this file. As long as you retain this
# * notice you can do whatever you want with this stuff. If we meet some
# * day, and you think this stuff is worth it, you can buy me a beer in
# * return.
# *
# * Poul-Henning Kamp
#TODO: py>=2.6: from hashlib import md5
import hashlib
def encrypt_md5( raw_pw, salt, magic=magic_md5 ):
raw_pw = raw_pw.encode('utf-8')
salt = salt.encode('utf-8')
hash = hashlib.md5()
hash.update( raw_pw + magic + salt )
st = hashlib.md5()
st.update( raw_pw + salt + raw_pw)
stretch = st.digest()
for i in range( 0, len( raw_pw ) ):
hash.update( stretch[i % 16] )
i = len( raw_pw )
while i:
if i & 1:
hash.update('\x00')
else:
hash.update( raw_pw[0] )
i >>= 1
saltedmd5 = hash.digest()
for i in range( 1000 ):
hash = hashlib.md5()
if i & 1:
hash.update( raw_pw )
else:
hash.update( saltedmd5 )
if i % 3:
hash.update( salt )
if i % 7:
hash.update( raw_pw )
if i & 1:
hash.update( saltedmd5 )
else:
hash.update( raw_pw )
saltedmd5 = hash.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord( saltedmd5[a] ) << 16 | ord( saltedmd5[b] ) << 8 | ord( saltedmd5[c] )
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord( saltedmd5[11] )
for i in range( 2 ):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
class users(osv.osv):
_name="res.users"
_inherit="res.users"
# agi - 022108
# Add handlers for 'input_pw' field.
def set_pw(self, cr, uid, id, name, value, args, context):
if not value:
raise osv.except_osv(_('Error'), _("Please specify the password !"))
obj = pooler.get_pool(cr.dbname).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
salt = obj._salt_cache[id] = gen_salt()
encrypted = encrypt_md5(value, salt)
cr.execute('update res_users set password=%s where id=%s',
(encrypted.encode('utf-8'), int(id)))
cr.commit()
del value
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
stored_pws = cr.fetchall()
res = {}
for id, stored_pw in stored_pws:
res[id] = stored_pw
return res
_columns = {
# The column size could be smaller as it is meant to store a hash, but
# an existing column cannot be downsized; thus we use the original
# column size.
'password': fields.function(get_pw, fnct_inv=set_pw, type='char',
size=64, string='Password', invisible=True,
store=True),
}
def login(self, db, login, password):
if not password:
return False
if db is False:
raise RuntimeError("Cannot authenticate to False db!")
cr = None
try:
cr = pooler.get_db(db).cursor()
return self._login(cr, db, login, password)
except Exception:
import logging
logging.getLogger('netsvc').exception('Could not authenticate')
return Exception('Access Denied')
finally:
if cr is not None:
cr.close()
def _login(self, cr, db, login, password):
cr.execute( 'SELECT password, id FROM res_users WHERE login=%s AND active',
(login.encode('utf-8'),))
if cr.rowcount:
stored_pw, id = cr.fetchone()
else:
# Return early if no one has a login name like that.
return False
stored_pw = self.maybe_encrypt(cr, stored_pw, id)
if not stored_pw:
# means couldn't encrypt or user is not active!
return False
# Calculate an encrypted password from the user-provided
# password.
obj = pooler.get_pool(db).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
salt = obj._salt_cache[id] = stored_pw[len(magic_md5):11]
encrypted_pw = encrypt_md5(password, salt)
# Check if the encrypted password matches against the one in the db.
cr.execute("""UPDATE res_users
SET date=now() AT TIME ZONE 'UTC'
WHERE id=%s AND password=%s AND active
RETURNING id""",
(int(id), encrypted_pw.encode('utf-8')))
res = cr.fetchone()
cr.commit()
if res:
return res[0]
else:
return False
def check(self, db, uid, passwd):
if not passwd:
# empty passwords disallowed for obvious security reasons
raise security.ExceptionNoTb('AccessDenied')
# Get a chance to hash all passwords in db before using the uid_cache.
obj = pooler.get_pool(db).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
self._uid_cache.get(db, {}).clear()
cached_pass = self._uid_cache.get(db, {}).get(uid)
if (cached_pass is not None) and cached_pass == passwd:
return True
cr = pooler.get_db(db).cursor()
try:
if uid not in self._salt_cache.get(db, {}):
# If we don't have cache, we have to repeat the procedure
# through the login function.
cr.execute( 'SELECT login FROM res_users WHERE id=%s', (uid,) )
stored_login = cr.fetchone()
if stored_login:
stored_login = stored_login[0]
res = self._login(cr, db, stored_login, passwd)
if not res:
raise security.ExceptionNoTb('AccessDenied')
else:
salt = self._salt_cache[db][uid]
cr.execute('SELECT COUNT(*) FROM res_users WHERE id=%s AND password=%s AND active',
(int(uid), encrypt_md5(passwd, salt)))
res = cr.fetchone()[0]
finally:
cr.close()
if not bool(res):
raise security.ExceptionNoTb('AccessDenied')
if res:
if self._uid_cache.has_key(db):
ulist = self._uid_cache[db]
ulist[uid] = passwd
else:
self._uid_cache[db] = {uid: passwd}
return bool(res)
def maybe_encrypt(self, cr, pw, id):
""" Return the password 'pw', making sure it is encrypted.
If the password 'pw' is not encrypted, then encrypt all active passwords
in the db. Returns the (possibly newly) encrypted password for 'id'.
"""
if not pw.startswith(magic_md5):
cr.execute("SELECT id, password FROM res_users " \
"WHERE active=true AND password NOT LIKE '$%'")
# Note that we skip all passwords like $.., in anticipation for
# more than md5 magic prefixes.
res = cr.fetchall()
for i, p in res:
encrypted = encrypt_md5(p, gen_salt())
cr.execute('UPDATE res_users SET password=%s where id=%s',
(encrypted, i))
if i == id:
encrypted_res = encrypted
cr.commit()
return encrypted_res
return pw
users()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
The Vlocity Health platform enables you to manage the formulary management process with providers, pharmacists, and members in the cloud. Our industry-specific solutions are 100% native and additive to Salesforce and we also provide an open integration framework to integrate 3rd-party systems. This way, you have a single platform to manage formularies, pharmacies, and medications with ease.
Vlocity Health manages drug substitutions and drug step therapy to offer members access to lower cost care at the same quality. Furthermore, providers can leverage the formulary lookup tool to submit digital drug pre-authorizations with full context of the member’s health history and benefit plan, ensuring member safety and utilization of lower-priced alternatives.
Vlocity Health guides customers through cost-effective drug decisions throughout the sales and service process. Whether you enable them to search for their medications during plan selection, submit a pre-authorization request, or alert them when formulary modifications have been made, your members are always empowered with resources and user-friendly tools to make informed decisions. |
from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.template import Template, Context, RequestContext
from django.template.loader import get_template
from web.settings import BASE_DIR
import sys
sys.path.append(BASE_DIR + "/../") #Shift one higher up the parent directory to reach src/
import os
from constants import *
from generate import Generate
import fileHandler
import timeFunc
from django.views.decorators.csrf import csrf_exempt
import simplejson
lines = {} #dictionary, with key as start_secs
def get_dict(labels):
"""
Given labels(output from the labels object in fileHandler.py), it creates a dictionary of the form {key:value}
Where,
key = start time in seconds = start_secs
value = line corresponding to that start time = [start, end, name, start_secs, end_secs]
Returns: Dictionary
"""
d = {}
for item in labels:
#Things are extracted based on labels format
name = item[2]
#We obtain the start and end time in seconds
start_secs = timeFunc.get_seconds(item[0])
end_secs = timeFunc.get_seconds(item[1])
item.append(start_secs)
item.append(end_secs)
#Create the dictionary
d[start_secs] = item
return d
@csrf_exempt
def index(request):
"""
The url /output maps to this function.
Displays the video and the corresponding labels file on the browser.
This page is called in two ways:
normal way: When the user visits the site
Ajax way: When user makes some changes to the labels, the page is reloaded with the help of this function
"""
global lines
t = get_template('output/index.html')
#If the page requested by the user(and not ajax), we have to read the labels file
if request.is_ajax() == False:
#Page was requested by the user
labels = fileHandler.LabelsFile(infile=BASE_DIR + "/../" + WEB_LABELS).read_lables(skip=False)
lines = get_dict(labels)
#Since keys are assorted in a dict, we sort them.
keys = lines.keys()
keys.sort()
#Now we have start_secs in increasing order, store this value in values.
values = [lines[key] for key in keys]
html = t.render(Context({'video_path': WEB_VIDEO_NAME, 'item_list': values}))
return HttpResponse(html)
@csrf_exempt
def update(request):
"""
The url /output/update is mapped to this function.
This function is always called through ajax
When the user edits any label on the browser, this function is called to reflect the changes in "lines" dictionary
"""
global lines
#Obtain the start_secs of the label which the user just edited
start = int(request.POST.get(u'start'))
#Obtain the new text
text = str(request.POST.get(u'text'))
#Update the text
l = lines[start]
l[2] = text
lines.update({start:l})
return HttpResponse(simplejson.dumps({'server_response': '1' }))
@csrf_exempt
def save(request):
"""
The url /output/save/ is mapped to this function.
This function is called with the click of the "Save changes" button.
The function writes the "lines" dictionary back into the labels file and ends the program.
"""
global lines
labels = fileHandler.LabelsFile(outfile=BASE_DIR + "/../" + WEB_LABELS)
keys = lines.keys()
keys.sort()
lines_list = [lines[key] for key in keys]
for line in lines_list:
l = [line[i] for i in range(3)]
labels.write_labels(l)
return HttpResponse('Successfully updated :-)')
@csrf_exempt
def delete(request):
"""
The url /output/delete/ maps to this function.
This function is called by the click of the button '-'.
It is used to delete the label, intended to be deleted by the user.
When a label is deleted the following operations take place:
- the end time of the to be deleted label is written onto the end time of the label preceeding it.
- the label to be deleted is removed from lines dictionary
"""
global lines
keys = lines.keys()
keys.sort()
start = int(request.POST.get(u'start_sec'))
end = int(request.POST.get(u'end_sec'))
#Now we find the preceeding label
for i in range(len(keys)):
if keys[i] == start:
break
#This will be the label, just above the label to be deleted
old_start = keys[i - 1]
#Performing the operations
#We assign the endtime of this to the previous start
lines[old_start][1] = timeFunc.get_time_string(end)
lines[old_start][-1] = end
del lines[start]
return HttpResponse(simplejson.dumps({'server_response': '1' }))
@csrf_exempt
def add(request):
"""
The url /output/add/ maps to this function.
The function is called by the click of the button '+'.
It is used to add another label.
When the function is called, these are the following operations performed.
- Obtain the new start time in seconds of the next label
- Make the end time of the new label, equal to the end time of the original label(where + was clicked)
- Change the end time of the previous label(the label whose + was clicked) to the new start time
"""
global lines
actual_start = int(request.POST.get(u'actual_start'))
start = int(request.POST.get(u'start_sec'))
end = int(request.POST.get(u'end_sec'))
if start in lines.keys():
#If already in the dictionary don't update
return HttpResponse(simplejson.dumps({'server_response': '1' }))
#Now we add the value in lines as well
lines.update({start: [timeFunc.get_time_string(start), timeFunc.get_time_string(end), UNCLASSIFIED_CONTENT, start, end]})
#We change the "end" of the previous start
lines[actual_start][1] = timeFunc.get_time_string(start)
print len(lines[start]), len(lines[actual_start])
return HttpResponse(simplejson.dumps({'server_response': '1' }))
|
(Washington, D.C., August 9, 2018) – U.S. Secretary of Agriculture Sonny Perdue today announced further reorganization of the U.S. Department of Agriculture (USDA), intended to improve customer service, strengthen offices and programs, and save taxpayer dollars. The Economic Research Service (ERS), currently under USDA’s Research, Education, and Economics mission area, will realign once again with the Office of the Chief Economist (OCE) under the Office of the Secretary. Additionally, most employees of ERS and the National Institute of Food and Agriculture (NIFA) will be relocated outside of the National Capital Region. The movement of the employees outside of Washington, DC is expected to be completed by the end of 2019. |
#!/usr/bin/env python
import colorsys
import math
import time
from random import randint
import unicornhathd
def run(params):
width, height = unicornhathd.get_shape()
# buffer to contain candle "heat" data
candle = [0] * 256
# create a palette for mapping heat values onto colours
palette = [0] * 256
for i in range(0, 256):
h = i / 5.0
h /= 360.0
s = (1.0 / (math.sqrt(i / 50.0) + 0.01))
s = min(1.0, s)
s = max(0.0, s)
v = i / 200.0
if i < 60:
v = v / 2
v = min(1.0, v)
v = max(0.0, v)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
palette[i] = (int(r * 255.0), int(g * 255.0), int(b * 255.0))
def set_pixel(b, x, y, v):
b[y * 16 + x] = int(v)
def get_pixel(b, x, y):
# out of range sample lookup
if x < 0 or y < 0 or x >= 16 or y >= 16:
return 0
# subpixel sample lookup
if isinstance(x, float) and x < 15:
f = x - int(x)
return (b[int(y) * 16 + int(x)] * (1.0 - f)) + (b[int(y) * 16 + int(x) + 1] * (f))
# fixed pixel sample lookup
return b[int(y) * 16 + int(x)]
step = 0
try:
while True:
# step for waving animation, adds some randomness
step += randint(0, 15)
# clone the current candle
temp = candle[:]
# seed new heat
v = 500
set_pixel(candle, 6, 15, v)
set_pixel(candle, 7, 15, v)
set_pixel(candle, 8, 15, v)
set_pixel(candle, 9, 15, v)
set_pixel(candle, 6, 14, v)
set_pixel(candle, 7, 14, v)
set_pixel(candle, 8, 14, v)
set_pixel(candle, 9, 14, v)
# blur, wave, and shift up one step
for x in range(0, 16):
for y in range(0, 16):
s = math.sin((y / 30.0) + (step / 10.0)) * ((16 - y) / 20.0)
v = 0
for i in range(0, 3):
for j in range(0, 3):
#r = randint(0, 2) - 1
v += get_pixel(candle, x + i + s - 1, y + j)
v /= 10
set_pixel(temp, x, y, v)
candle = temp
# copy candle into UHHD with palette
for x in range(0, 16):
for y in range(0, 16):
o = (i * 3) + 1
r, g, b = palette[max(0, min(255, get_pixel(candle, x, y)))]
unicornhathd.set_pixel(x, y, r, g, b)
unicornhathd.show()
except KeyboardInterrupt:
unicornhathd.off()
|
Taken in 1988 from the flight deck of USS America (CV-66) and the photo in this post shows F-14 Tomcat driver Capt Dale “Snort” Snodgrass performing his (in)famous super low banana pass.
And, as told by Sierra Hotel Aeronautics, no, Snort was not grounded.
Snort is not your typical ex-naval aviator. He is closer to a modern-day Chuck Yeager-like prototypical fighter pilot. According Lou Drendel’s book Tomcat: The Grumman F-14, he is the high-time F-14 Tomcat pilot (with over 4,800 hours in the F-14), a former Commander of Fighter Wing, U.S. Atlantic Fleet, (FITWINGLANT) the home of all Navy F-14 squadrons when they were not at sea. While still commander of FITWINGLANT, he devised and flew a formation aerobatic routine with a World War II-vintage Grumman F7F Tigercat, piloted by John Ellis. Given the widely varied performance envelopes of the two Grumman fighters, it was one of the most impressive displays of aeronautical skills imaginable. One of the least heralded, but perhaps the most important accomplishment of his career, was his oversight of the conversion of the Tomcat from a fleet defense interceptor to a deadly smart bomber.
Captain Snodgrass is an internationally recognized Air Show Pilot. Having flown F-14 demos at airshows for 14 years, he has additionally qualified in the F-86 Sabre, P-51 Mustang, F4U Corsair, T-6/SNJ Texan, MiG-17/21, A-4 Skyhawk and F-5 Tiger.
Snort currently flies as Lead Solo on the Black Diamond Jet Team supporting the Make-A-Wish Foundation. He also serves as Draken International’s Chief Pilot, Director of Deployed Operations and Congressional Liaison.
However that shot off of the America is very widely used and most people seem to initially think it is either an edited photo, or a risky maneuver.
Here’s the video of Snort’s legendary super low Banana pass (you can find a Slightly better version of the video here). |
from .env import env
from .ir import *
from logging import getLogger
logger = getLogger(__name__)
class DeadCodeEliminator(object):
def process(self, scope):
if scope.is_namespace() or scope.is_class():
return
usedef = scope.usedef
for blk in scope.traverse_blocks():
dead_stms = []
for stm in blk.stms:
if stm.is_a([MOVE, PHIBase]):
if stm.is_a(MOVE) and stm.src.is_a([TEMP, ATTR]) and stm.src.symbol().is_param():
continue
if stm.is_a(MOVE) and stm.src.is_a(CALL):
continue
defvars = usedef.get_vars_defined_at(stm)
for var in defvars:
if not var.is_a(TEMP):
break
if stm.block.path_exp.is_a([TEMP, ATTR]) and stm.block.path_exp.symbol() is var.symbol():
break
uses = usedef.get_stms_using(var.symbol())
if uses:
break
else:
dead_stms.append(stm)
for stm in dead_stms:
blk.stms.remove(stm)
logger.debug('removed dead code: ' + str(stm))
if stm.is_a(MOVE):
var = stm.dst
elif stm.is_a(PHIBase):
var = stm.var
if var.is_a([TEMP, ATTR]) and var.symbol().typ.is_seq():
memnode = var.symbol().typ.get_memnode()
env.memref_graph.remove_node(memnode) |
The highly versatile BU-65570M1 card is designed to test and simulate MIL-STD-1553 based systems, concurrently simulating a bus controller and up to 32 remote terminals with a triggerable bus monitor. The card utilizes a fully integrated Type II PCMCIA assembly that interfaces via a simple stub cable rather than an external adapter. The BU-65570M1 also features comprehensive error injection, detection, and monitor filtering.
BU-65570M1 software is a highly intuitive and powerful Windows application for quick and easy setup of emulation and monitoring. A menu-driven and customizable source code generation wizard eliminates the need for user programming after BC messaging, RT data table, and other basic bus elements have been configured. Generated code is based on the tester/simulator RTL function set and may be added to user software or compiled as a stand-alone application. The card comes with 'C', Visual Basic, and LabVIEW programming libraries. Optional dataMARS software provides real time acquisition and display of 1553 monitored data and dataSIMSadds simulation capability.
The BU-65570M1 is suited to a wide variety of applications. These include portable test equipment, flight line test and diagnostic, demonstration, software development, and system integration and debugging. |
#!/usr/bin/env python
from __future__ import print_function
import csv
import os
import threading
class OptimBase(object):
def __init__(self, executable, fixed_args, log_file):
self.executable = executable
self.fixed_args = fixed_args
self.log_file = log_file
assert not os.path.isfile(self.log_file), '%s exits. Choose a different name' % self.log_file
self.best_result = 100
self.lock = threading.Lock()
self.f_eval_count = 0
self.hyper_map = None
self.dim = 0
self.func = []
self.init_param_list()
with open(self.log_file, 'w') as fhandler:
header = ['bestScore', 'score', 'nb_eval', 'fname'] + self.hyper_map.keys()
csv.writer(fhandler).writerow(header)
def init_param_list(self):
raise NotImplementedError()
def param_vec2dict(self, params):
return {k: self.func[v](params[v]) for k, v in self.hyper_map.items()}
def log(self, result, nb_eval, fname, var_args):
row = [self.best_result, result, nb_eval, fname]
row += [var_args[k] for k in self.hyper_map.keys()]
with open(self.log_file, 'a') as fhandler:
csv.writer(fhandler).writerow(row)
def execute(self, var_args):
raise NotImplementedError()
|
I purchased my Acer Aspire 5920G last year complete with all bells and whistles but unfortunately also complete with Vista Home - Useless for me.
I've since loaded Vista Business and reloaded all the provided driver files but when loading the BTW (bluetooth) setup.exe application file the software goes through preparing to install then stops and tells me "No Bluetooth Device was detected. Please make sure that your Bluetooth device is plugged in properly in order to continue installation."
Is it just me or does this laptop not come with bluetooth?
Any help/advise would be appreciated.
Make sure the Bluetooth is turned on. On my Acer Ferrari it's a switch in th front.
Thanks for the advice. No buttons or switches I'm afraid. Just the shiny black square of smooth plastic in the front near the earphone jacks that made me assume that it came with Bluetooth hardware. I'll try plugging in the Belkin USB tonight and loading the drivers and see what it does.
It might be worth checking the acer website. I think my acer at home uses an acer application to control the bluetooth operation.
The website also has the specs for each model within the 5920g range.
Also I don't know if the hardware is controlled in the BIOS. My last acer advertised IR but no IR was detected. I found it had been swithced off in the BIOS one day when i was exploring around. Once it was switched oin in the BIOS XP recognised it straight away. It might be the same for the Bluetooth.
Try <windows key> + "X" to bring up the Windows Mobility Centre (or start it from the Control Panel).
On that screen you should see the status of the Bluetooth module towards the bottom of the screen. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.