code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def execute(self, transition):
"""
Queue a transition for execution.
:param transition: The transition
"""
self._transitions.append(transition)
if self._thread is None or not self._thread.isAlive():
self._thread = threading.Thread(target=self._transition_loop)
self._thread.setDaemon(True)
self._thread.start() | Queue a transition for execution.
:param transition: The transition |
def _isinstance(self, model, raise_error=True):
"""Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
type check failure
Raises:
ValueError: If `model` is not an instance of the respective Model
class
"""
rv = isinstance(model, self.__model__)
if not rv and raise_error:
raise ValueError('%s is not of type %s' % (model, self.__model__))
return rv | Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
type check failure
Raises:
ValueError: If `model` is not an instance of the respective Model
class |
def Search(self,key):
"""Search alert list by providing partial name, ID, or other key.
"""
results = []
for alert in self.alerts:
if alert.id.lower().find(key.lower()) != -1: results.append(alert)
elif alert.name.lower().find(key.lower()) != -1: results.append(alert)
return(results) | Search alert list by providing partial name, ID, or other key. |
def argval(self):
""" Returns the value of the arg (if any) or None.
If the arg. is not an integer, an error be triggered.
"""
if self.arg is None or any(x is None for x in self.arg):
return None
for x in self.arg:
if not isinstance(x, int):
raise InvalidArgError(self.arg)
return self.arg | Returns the value of the arg (if any) or None.
If the arg. is not an integer, an error be triggered. |
def top_n_list(lang, n, wordlist='best', ascii_only=False):
"""
Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered.
"""
results = []
for word in iter_wordlist(lang, wordlist):
if (not ascii_only) or max(word) <= '~':
results.append(word)
if len(results) >= n:
break
return results | Return a frequency list of length `n` in descending order of frequency.
This list contains words from `wordlist`, of the given language.
If `ascii_only`, then only ascii words are considered. |
def upsert_entities(self, entities, sync=False):
"""
Upsert a list of entities to the database
:param entities: The entities to sync
:param sync: Do a sync instead of an upsert
"""
# Select the entities we are upserting for update to reduce deadlocks
if entities:
# Default select for update query when syncing all
select_for_update_query = (
'SELECT FROM {table_name} FOR NO KEY UPDATE'
).format(
table_name=Entity._meta.db_table
)
select_for_update_query_params = []
# If we are not syncing all, only select those we are updating
if not sync:
select_for_update_query = (
'SELECT FROM {table_name} WHERE (entity_type_id, entity_id) IN %s FOR NO KEY UPDATE'
).format(
table_name=Entity._meta.db_table
)
select_for_update_query_params = [tuple(
(entity.entity_type_id, entity.entity_id)
for entity in entities
)]
# Select the items for update
with connection.cursor() as cursor:
cursor.execute(select_for_update_query, select_for_update_query_params)
# If we are syncing run the sync logic
if sync:
upserted_entities = manager_utils.sync(
queryset=Entity.all_objects.all(),
model_objs=entities,
unique_fields=['entity_type_id', 'entity_id'],
update_fields=['entity_kind_id', 'entity_meta', 'display_name', 'is_active'],
return_upserts=True
)
# Otherwise we want to upsert our entities
else:
upserted_entities = manager_utils.bulk_upsert(
queryset=Entity.all_objects.extra(
where=['(entity_type_id, entity_id) IN %s'],
params=[tuple(
(entity.entity_type_id, entity.entity_id)
for entity in entities
)]
),
model_objs=entities,
unique_fields=['entity_type_id', 'entity_id'],
update_fields=['entity_kind_id', 'entity_meta', 'display_name', 'is_active'],
return_upserts=True
)
# Return the upserted entities
return upserted_entities | Upsert a list of entities to the database
:param entities: The entities to sync
:param sync: Do a sync instead of an upsert |
def combine(items, k=None):
"""
Create a matrix in wich each row is a tuple containing one of solutions or
solution k-esima.
"""
length_items = len(items)
lengths = [len(i) for i in items]
length = reduce(lambda x, y: x * y, lengths)
repeats = [reduce(lambda x, y: x * y, lengths[i:])
for i in range(1, length_items)] + [1]
if k is not None:
k = k % length
# Python division by default is integer division (~ floor(a/b))
indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i])
for i in range(length_items)]
return [items[i][indices[i]] for i in range(length_items)]
else:
matrix = []
for i, item in enumerate(items):
row = []
for subset in item:
row.extend([subset] * repeats[i])
times = old_div(length, len(row))
matrix.append(row * times)
# Transpose the matrix or return the columns instead rows
return list(zip(*matrix)) | Create a matrix in wich each row is a tuple containing one of solutions or
solution k-esima. |
def _validate(data_type, parent_path):
"""Implementation for the `validate` function."""
if isinstance(data_type, _CLASS_TYPES):
raise TypeError(
"The data type is expected to be an instance object, but got the "
"type '%s' instead." % (_format_type(data_type),))
base = _find_base_type(data_type)
if not base:
raise TypeError(
"Objects of type '%s' aren't supported as data types. Use any "
"type from %s instead."
% (_format_type(type(data_type)), _join_types(_ALL, "or ")))
name = getattr(data_type, 'name', None)
if not name:
name = type(data_type).__name__
full_path = '%s.%s' % (parent_path, name) if parent_path else name
# Generic checks for each attribute.
for check in _TYPE_ATTR_CHECKS[base]:
attribute = getattr(data_type, check.name)
if attribute is None:
if check.allow_none:
continue
else:
raise TypeError("The attribute '%s.%s' cannot be 'None'."
% (full_path, check.name))
if isinstance(check, _FieldInstanceCheck):
check_function = isinstance
elif isinstance(check, _FieldSubclassCheck):
if not isinstance(attribute, _CLASS_TYPES):
raise TypeError(
"The attribute '%s.%s' is expected to be a type "
"object%s." % (full_path, check.name,
" or 'None'" if check.allow_none else ''))
check_function = issubclass
if not check_function(attribute, check.type):
if isinstance(check, _FieldInstanceCheck):
glue_1 = "an instance object of type"
glue_2 = "not"
glue_3 = ""
type_name = _format_type(type(attribute))
elif isinstance(check, _FieldSubclassCheck):
glue_1 = "a subclass of"
glue_2 = "but got"
glue_3 = " instead"
type_name = _format_type(attribute)
raise TypeError(
"The attribute '%s.%s' is expected to be %s %s, %s '%s'%s."
% (full_path, check.name, glue_1,
_join_types(check.type, "or "), glue_2, type_name, glue_3))
# Additional and/or recursive checks for specific attributes.
if isinstance(data_type, Array):
_validate(data_type.element_type, full_path)
elif isinstance(data_type, Structure):
for field in data_type.fields:
if not isinstance(field, _SEQUENCE_TYPES):
raise TypeError(
"Each field from the attribute '%s.fields' is expected "
"to be an instance object of type %s, not '%s'."
% (full_path,
_join_types(_SEQUENCE_TYPES + (Field,), "or "),
_format_type(type(field))))
if len(field) not in _FIELD_REQUIRED_ARG_RANGE:
raise TypeError(
"Each field from the attribute '%s.fields' is expected "
"to be an instance object of type %s, and compatible with "
"the '%s' structure, but got %r instead."
% (full_path,
_join_types(_SEQUENCE_TYPES + (Field,), "or "),
_format_type(Field), field))
field = Field(*field)
if not isinstance(field.name, _STRING_TYPES):
raise TypeError(
"The first element of each field from the attribute "
"'%s.fields', that is the 'name' attribute, is expected "
"to be an instance object of type %s, not '%s'."
% (full_path, _join_types(_STRING_TYPES, "or "),
_format_type(type(field.name))))
if not isinstance(field.type, _ALL):
raise TypeError(
"The second element of each field from the attribute "
"'%s.fields', that is the 'type' attribute, is expected "
"to be an instance object of type %s, not '%s'."
% (full_path, _join_types(_ALL, "or "),
_format_type(type(field.type))))
if not isinstance(field.read_only, bool):
raise TypeError(
"The third element of each field from the attribute "
"'%s.fields', that is the 'read_only' attribute, is "
"expected to be an instance object of type 'bool', "
"not '%s'." % (full_path,
_format_type(type(field.read_only))))
field_path = '%s.%s' % (full_path, field.name)
_validate(field.type, field_path)
fields = [field[_FIELD_NAME_IDX] for field in data_type.fields]
duplicates = _find_duplicates(fields)
if duplicates:
if len(duplicates) > 1:
raise ValueError(
"The structure fields %s, were provided multiple times."
% (_join_sequence(duplicates, "and ")),)
else:
raise ValueError(
"The structure field '%s' was provided multiple times."
% (duplicates[0]),)
return True | Implementation for the `validate` function. |
def _output_from_file(self, entry='git_describe'):
"""
Read the version from a .version file that may exist alongside __init__.py.
This file can be generated by piping the following output to file:
git describe --long --match v*.*
"""
try:
vfile = os.path.join(os.path.dirname(self.fpath), '.version')
with open(vfile, 'r') as f:
return json.loads(f.read()).get(entry, None)
except: # File may be missing if using pip + git archive
return None | Read the version from a .version file that may exist alongside __init__.py.
This file can be generated by piping the following output to file:
git describe --long --match v*.* |
def pearson_correlation_coefficient(predictions, labels, weights_fn=None):
"""Calculate pearson correlation coefficient.
Args:
predictions: The raw predictions.
labels: The actual labels.
weights_fn: Weighting function.
Returns:
The pearson correlation coefficient.
"""
del weights_fn
_, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions,
labels)
return pearson, tf.constant(1.0) | Calculate pearson correlation coefficient.
Args:
predictions: The raw predictions.
labels: The actual labels.
weights_fn: Weighting function.
Returns:
The pearson correlation coefficient. |
def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
'''
Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab.
'''
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE):
ccount, acount = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb)
if ccount != len(chunk) or acount != len(chunk):
raise s_exc.BadCoreStore(mesg='Unexpected number of values written') # pragma: no cover
rowcount += len(chunk)
if progresscb is not None and 0 == (rowcount % PROGRESS_PERIOD):
progresscb(rowcount)
return rowcount | Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab. |
def get_translated_items(fapi, file_uri, use_cache, cache_dir=None):
""" Returns the last modified from smarterling
"""
items = None
cache_file = os.path.join(cache_dir, sha1(file_uri)) if use_cache else None
if use_cache and os.path.exists(cache_file):
print("Using cache file %s for translated items for: %s" % (cache_file, file_uri))
items = json.loads(read_from_file(cache_file))
if not items:
print("Downloading %s from smartling" % file_uri)
(response, code) = fapi.last_modified(file_uri)
items = response.data.items
if cache_file:
print("Caching %s to %s" % (file_uri, cache_file))
write_to_file(cache_file, json.dumps(items))
return items | Returns the last modified from smarterling |
def check(cls, dap, network=False, yamls=True, raises=False, logger=logger):
'''Checks if the dap is valid, reports problems
Parameters:
network -- whether to run checks that requires network connection
output -- where to write() problems, might be None
raises -- whether to raise an exception immediately after problem is detected'''
dap._check_raises = raises
dap._problematic = False
dap._logger = logger
problems = list()
problems += cls.check_meta(dap)
problems += cls.check_no_self_dependency(dap)
problems += cls.check_topdir(dap)
problems += cls.check_files(dap)
if yamls:
problems += cls.check_yamls(dap)
if network:
problems += cls.check_name_not_on_dapi(dap)
for problem in problems:
dap._report_problem(problem.message, problem.level)
del dap._check_raises
return not dap._problematic | Checks if the dap is valid, reports problems
Parameters:
network -- whether to run checks that requires network connection
output -- where to write() problems, might be None
raises -- whether to raise an exception immediately after problem is detected |
def key_pair(i, region):
"""Returns the ith default (aws_key_pair_name, key_pair_path)."""
if i == 0:
return ("{}_{}".format(RAY, region),
os.path.expanduser("~/.ssh/{}_{}.pem".format(RAY, region)))
return ("{}_{}_{}".format(RAY, i, region),
os.path.expanduser("~/.ssh/{}_{}_{}.pem".format(RAY, i, region))) | Returns the ith default (aws_key_pair_name, key_pair_path). |
def generate_single_return_period(args):
"""
This function calculates a single return period for a single reach
"""
qout_file, return_period_file, rivid_index_list, step, num_years, \
method, mp_lock = args
skewvals = [-3.0, -2.8, -2.6, -2.4, -2.2, -2.0, -1.8, -1.6, -1.4, -1.2,
-1.0, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0,
1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
kfac2 = [0.396, 0.384, 0.368, 0.351, 0.33, 0.307, 0.282, 0.254, 0.225,
0.195, 0.164, 0.132, 0.099, 0.066, 0.033, 0, -0.033, -0.066,
-0.099, -0.132, -0.164, -0.195, -0.225, -0.254, -0.282, -0.307,
-0.33, -0.351, -0.368, -0.384, -0.396]
kfac10 = [0.66, 0.702, 0.747, 0.795, 0.844, 0.895, 0.945, 0.994, 1.041,
1.086, 1.128, 1.166, 1.2, 1.231, 1.258, 1.282, 1.301, 1.317,
1.328, 1.336, 1.34, 1.34, 1.337, 1.329, 1.318, 1.302, 1.284,
1.262, 1.238, 1.21, 1.18]
kfac25 = [.666, .712, .764, .823, .888, .959, 1.035, 1.116, 1.198, 1.282,
1.366, 1.448, 1.528, 1.606, 1.680, 1.751, 1.818, 1.880, 1.939,
1.993, 2.043, 2.087, 2.128, 2.163, 2.193, 2.219, 2.240, 2.256,
2.267, 2.275, 2.278]
kfac50 = [0.666, 0.714, 0.768, 0.83, 0.9, 0.98, 1.069, 1.166, 1.27, 1.379,
1.492, 1.606, 1.72, 1.834, 1.945, 2.054, 2.159, 2.261, 2.359,
2.453, 2.542, 2.626, 2.706, 2.78, 2.848, 2.912, 2.97, 3.023,
3.071, 3.114, 3.152]
kfac100 = [0.667, 0.714, 0.769, 0.832, 0.905, 0.99, 1.087, 1.197, 1.318,
1.499, 1.588, 1.733, 1.88, 2.029, 2.178, 2.326, 2.472, 2.615,
2.755, 2.891, 3.022, 3.149, 3.271, 3.388, 3.499, 3.605, 3.705,
3.8, 3.889, 3.973, 4.051]
with RAPIDDataset(qout_file) as qout_nc_file:
# get index of return period data
if method == 'weibull':
rp_index_20 = int((num_years + 1)/20.0)
rp_index_10 = int((num_years + 1)/10.0)
rp_index_2 = int((num_years + 1)/2.0)
if method == 'weibull':
return_20_array = np.zeros(len(rivid_index_list))
elif method == 'gumble':
return_100_array = np.zeros(len(rivid_index_list))
return_50_array = np.zeros(len(rivid_index_list))
return_20_array = np.zeros(len(rivid_index_list))
elif method == 'log_pearson':
return_100_array = np.zeros(len(rivid_index_list))
return_50_array = np.zeros(len(rivid_index_list))
return_25_array = np.zeros(len(rivid_index_list))
return_10_array = np.zeros(len(rivid_index_list))
return_2_array = np.zeros(len(rivid_index_list))
max_flow_array = np.zeros(len(rivid_index_list))
# iterate through rivids to generate return periods
for iter_idx, rivid_index in enumerate(rivid_index_list):
filtered_flow_data = qout_nc_file.get_qout_index(
rivid_index,
pd_filter="{0}D".format(step),
filter_mode="max")
sorted_flow_data = np.sort(filtered_flow_data)[:num_years:-1]
max_flow = sorted_flow_data[0]
if max_flow < 0.01:
log("Return period data < 0.01 generated for rivid {0}"
.format(qout_nc_file.qout_nc.variables[
qout_nc_file.river_id_dimension][rivid_index]),
"WARNING")
max_flow_array[iter_idx] = max_flow
if method == 'weibull':
return_20_array[iter_idx] = sorted_flow_data[rp_index_20]
return_10_array[iter_idx] = sorted_flow_data[rp_index_10]
return_2_array[iter_idx] = sorted_flow_data[rp_index_2]
elif method == 'gumble':
mean_flow = np.mean(filtered_flow_data)
stddev = np.std(filtered_flow_data)
return_100_array[iter_idx] = mean_flow + 3.14*stddev
return_50_array[iter_idx] = mean_flow + 2.59*stddev
return_20_array[iter_idx] = mean_flow + 1.87*stddev
return_10_array[iter_idx] = mean_flow + 1.3*stddev
return_2_array[iter_idx] = mean_flow - .164*stddev
elif method == 'log_pearson':
log_flow = np.log10(filtered_flow_data[filtered_flow_data > 0])
if len(log_flow) <= 0:
continue
mean_log_flow = np.mean(log_flow)
std_log_flow = np.std(log_flow)
log_flow_array = np.array(log_flow)
skew = (num_years * (np.sum(
np.power((log_flow_array - mean_log_flow), 3)))) / \
((num_years - 1) * (num_years - 2) * std_log_flow ** 3)
k2 = np.interp(skew, skewvals, kfac2)
k10 = np.interp(skew, skewvals, kfac10)
k25 = np.interp(skew, skewvals, kfac25)
k50 = np.interp(skew, skewvals, kfac50)
k100 = np.interp(skew, skewvals, kfac100)
return_100_array[iter_idx] = \
np.power(10, (mean_log_flow + k100*std_log_flow))
return_50_array[iter_idx] = \
np.power(10, (mean_log_flow + k50*std_log_flow))
return_25_array[iter_idx] = \
np.power(10, (mean_log_flow + k25*std_log_flow))
return_10_array[iter_idx] = \
np.power(10, (mean_log_flow + k10*std_log_flow))
return_2_array[iter_idx] = \
np.power(10, (mean_log_flow + k2*std_log_flow))
mp_lock.acquire()
return_period_nc = Dataset(return_period_file, 'a')
return_period_nc.variables['max_flow'][rivid_index_list] = \
max_flow_array
if method == 'weibull':
return_period_nc.variables['return_period_20'][
rivid_index_list] = return_20_array
elif method in 'gumble':
return_period_nc.variables['return_period_100'][
rivid_index_list] = return_100_array
return_period_nc.variables['return_period_50'][
rivid_index_list] = return_50_array
return_period_nc.variables['return_period_20'][
rivid_index_list] = return_20_array
elif method == 'log_pearson':
return_period_nc.variables['return_period_100'][
rivid_index_list] = return_100_array
return_period_nc.variables['return_period_50'][
rivid_index_list] = return_50_array
return_period_nc.variables['return_period_25'][
rivid_index_list] = return_25_array
return_period_nc.variables['return_period_10'][
rivid_index_list] = return_10_array
return_period_nc.variables['return_period_2'][
rivid_index_list] = return_2_array
return_period_nc.close()
mp_lock.release() | This function calculates a single return period for a single reach |
def startMultiple(self, zones):
"""Start multiple zones."""
path = 'zone/start_multiple'
payload = {'zones': zones}
return self.rachio.put(path, payload) | Start multiple zones. |
def reset(self, params, repetition):
"""
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment
"""
print params["name"], ":", repetition
self.debug = params.get("debug", False)
L2Params = json.loads('{' + params["l2_params"] + '}')
L4Params = json.loads('{' + params["l4_params"] + '}')
L6aParams = json.loads('{' + params["l6a_params"] + '}')
# Make sure random seed is different for each repetition
seed = params.get("seed", 42)
np.random.seed(seed + repetition)
random.seed(seed + repetition)
L2Params["seed"] = seed + repetition
L4Params["seed"] = seed + repetition
L6aParams["seed"] = seed + repetition
# Configure L6a params
numModules = params["num_modules"]
L6aParams["scale"] = [params["scale"]] * numModules
angle = params["angle"] / numModules
orientation = range(angle / 2, angle * numModules, angle)
L6aParams["orientation"] = np.radians(orientation).tolist()
# Create multi-column L2-L4-L6a network
self.numColumns = params["num_cortical_columns"]
network = Network()
network = createMultipleL246aLocationColumn(network=network,
numberOfColumns=self.numColumns,
L2Params=L2Params,
L4Params=L4Params,
L6aParams=L6aParams)
network.initialize()
self.network = network
self.sensorInput = []
self.motorInput = []
self.L2Regions = []
self.L4Regions = []
self.L6aRegions = []
for i in xrange(self.numColumns):
col = str(i)
self.sensorInput.append(network.regions["sensorInput_" + col].getSelf())
self.motorInput.append(network.regions["motorInput_" + col].getSelf())
self.L2Regions.append(network.regions["L2_" + col])
self.L4Regions.append(network.regions["L4_" + col])
self.L6aRegions.append(network.regions["L6a_" + col])
# Use the number of iterations as the number of objects. This will allow us
# to execute one iteration per object and use the "iteration" parameter as
# the object index
numObjects = params["iterations"]
# Generate feature SDRs
numFeatures = params["num_features"]
numOfMinicolumns = L4Params["columnCount"]
numOfActiveMinicolumns = params["num_active_minicolumns"]
self.featureSDR = [{
str(f): sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns))
for f in xrange(numFeatures)
} for _ in xrange(self.numColumns)]
# Generate objects used in the experiment
self.objects = generateObjects(numObjects=numObjects,
featuresPerObject=params["features_per_object"],
objectWidth=params["object_width"],
numFeatures=numFeatures,
distribution=params["feature_distribution"])
# Make sure the objects are unique
uniqueObjs = np.unique([{"features": obj["features"]}
for obj in self.objects])
assert len(uniqueObjs) == len(self.objects)
self.sdrSize = L2Params["sdrSize"]
# Learn objects
self.numLearningPoints = params["num_learning_points"]
self.numOfSensations = params["num_sensations"]
self.learnedObjects = {}
self.learn() | Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment |
def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel | Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str. |
def set_console(stream=STDOUT, foreground=None, background=None, style=None):
"""Set console foreground and background attributes."""
if foreground is None:
foreground = _default_foreground
if background is None:
background = _default_background
if style is None:
style = _default_style
attrs = get_attrs(foreground, background, style)
SetConsoleTextAttribute(stream, attrs) | Set console foreground and background attributes. |
def init():
'''
Get an sqlite3 connection, and initialize the package database if necessary
'''
if not os.path.exists(__opts__['spm_cache_dir']):
log.debug('Creating SPM cache directory at %s', __opts__['spm_db'])
os.makedirs(__opts__['spm_cache_dir'])
if not os.path.exists(__opts__['spm_db']):
log.debug('Creating new package database at %s', __opts__['spm_db'])
sqlite3.enable_callback_tracebacks(True)
conn = sqlite3.connect(__opts__['spm_db'], isolation_level=None)
try:
conn.execute('SELECT count(*) FROM packages')
except OperationalError:
conn.execute('''CREATE TABLE packages (
package text,
version text,
release text,
installed text,
os text,
os_family text,
dependencies text,
os_dependencies text,
os_family_dependencies text,
summary text,
description text
)''')
try:
conn.execute('SELECT count(*) FROM files')
except OperationalError:
conn.execute('''CREATE TABLE files (
package text,
path text,
size real,
mode text,
sum text,
major text,
minor text,
linkname text,
linkpath text,
uname text,
gname text,
mtime text
)''')
return conn | Get an sqlite3 connection, and initialize the package database if necessary |
def as_bits( region_start, region_length, intervals ):
"""
Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set.
"""
bits = BitSet( region_length )
for chr, start, stop in intervals:
bits.set_range( start - region_start, stop - start )
return bits | Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set. |
def _worker_thread_upload(self):
# type: (Uploader) -> None
"""Worker thread upload
:param Uploader self: this
"""
max_set_len = self._general_options.concurrency.transfer_threads << 2
while not self.termination_check:
try:
if len(self._transfer_set) > max_set_len:
time.sleep(0.1)
continue
else:
ud = self._upload_queue.get(block=False, timeout=0.1)
except queue.Empty:
continue
try:
self._process_upload_descriptor(ud)
except Exception as e:
with self._upload_lock:
self._exceptions.append(e) | Worker thread upload
:param Uploader self: this |
def ping(self, message=None):
'''Write a ping ``frame``.
'''
return self.write(self.parser.ping(message), encode=False) | Write a ping ``frame``. |
def _get_callable_from_trace_tuple(
self, trace_tuple: TraceTuple
) -> Tuple[str, str]:
"""Returns either (caller, caller_port) or (callee, callee_port).
"""
trace_frame = trace_tuple.trace_frame
if trace_tuple.placeholder:
return trace_frame.caller, trace_frame.caller_port
return trace_frame.callee, trace_frame.callee_port | Returns either (caller, caller_port) or (callee, callee_port). |
def add_body(self, body):
"""
Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add.
"""
body.system = self
self.bodies.append(body)
self.unfrozen = np.concatenate((
self.unfrozen[:-2], np.zeros(7, dtype=bool), self.unfrozen[-2:]
)) | Add a :class:`Body` to the system. This function also sets the
``system`` attribute of the body.
:param body:
The :class:`Body` to add. |
def load_graphs():
'''load graphs from mavgraphs.xml'''
mestate.graphs = []
gfiles = ['mavgraphs.xml']
if 'HOME' in os.environ:
for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['HOME'], ".mavproxy")):
for filename in filenames:
if filename.lower().endswith('.xml'):
gfiles.append(os.path.join(dirname, filename))
for file in gfiles:
if not os.path.exists(file):
continue
graphs = load_graph_xml(open(file).read(), file)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % file)
# also load the built in graphs
dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs")
for f in dlist:
raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read()
graphs = load_graph_xml(raw, None)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % f)
mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name) | load graphs from mavgraphs.xml |
def main() -> None:
""""Execute the main routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--outdir", help="output directory", default=os.path.dirname(__file__))
args = parser.parse_args()
outdir = pathlib.Path(args.outdir)
if not outdir.exists():
raise FileNotFoundError("Output directory is missing: {}".format(outdir))
for contracts in [0, 1, 5, 10]:
if contracts == 0:
pth = outdir / "functions_100_with_no_contract.py"
elif contracts == 1:
pth = outdir / "functions_100_with_1_contract.py"
else:
pth = outdir / "functions_100_with_{}_contracts.py".format(contracts)
text = generate_functions(functions=100, contracts=contracts, disabled=False)
pth.write_text(text)
for contracts in [1, 5, 10]:
if contracts == 1:
pth = outdir / "functions_100_with_1_disabled_contract.py"
else:
pth = outdir / "functions_100_with_{}_disabled_contracts.py".format(contracts)
text = generate_functions(functions=100, contracts=contracts, disabled=True)
pth.write_text(text)
for invariants in [0, 1, 5, 10]:
if invariants == 0:
pth = outdir / "classes_100_with_no_invariant.py"
elif invariants == 1:
pth = outdir / "classes_100_with_1_invariant.py"
else:
pth = outdir / "classes_100_with_{}_invariants.py".format(invariants)
text = generate_classes(classes=100, invariants=invariants, disabled=False)
pth.write_text(text)
for invariants in [1, 5, 10]:
if invariants == 1:
pth = outdir / "classes_100_with_1_disabled_invariant.py"
else:
pth = outdir / "classes_100_with_{}_disabled_invariants.py".format(invariants)
text = generate_classes(classes=100, invariants=invariants, disabled=True)
pth.write_text(text) | Execute the main routine. |
def get_boundaries_of_elements_in_dict(models_dict, clearance=0.):
""" Get boundaries of all handed models
The function checks all model meta data positions to increase boundary starting with a state or scoped variables.
It is finally iterated over all states, data and logical port models and linkage if sufficient for respective
graphical editor. At the end a clearance is added to the boundary if needed e.g. to secure size for opengl.
:param models_dict: dict of all handed models
:return: tuple of left, right, top and bottom value
"""
# Determine initial outer coordinates
right = 0.
bottom = 0.
if 'states' in models_dict and models_dict['states']:
left = list(models_dict['states'].items())[0][1].get_meta_data_editor()['rel_pos'][0]
top = list(models_dict['states'].items())[0][1].get_meta_data_editor()['rel_pos'][1]
elif 'scoped_variables' in models_dict and models_dict['scoped_variables']:
left = list(models_dict['scoped_variables'].items())[0][1].get_meta_data_editor()['inner_rel_pos'][0]
top = list(models_dict['scoped_variables'].items())[0][1].get_meta_data_editor()['inner_rel_pos'][1]
else:
all_ports = list(models_dict['input_data_ports'].values()) + list(models_dict['output_data_ports'].values()) + \
list(models_dict['scoped_variables'].values()) + list(models_dict['outcomes'].values())
if len(set([port_m.core_element.parent for port_m in all_ports])) == 1:
logger.info("Only one parent {0} {1}".format(all_ports[0].core_element.parent, all_ports[0].parent.get_meta_data_editor()))
if all_ports:
left = all_ports[0].parent.get_meta_data_editor()['rel_pos'][0]
top = all_ports[0].parent.get_meta_data_editor()['rel_pos'][1]
else:
raise ValueError("Get boundary method does not aspects all list elements empty in dictionary. {0}"
"".format(models_dict))
def cal_max(max_x, max_y, rel_pos, size):
max_x = size[0] + rel_pos[0] if size[0] + rel_pos[0] > max_x else max_x
max_y = rel_pos[1] + size[1] if rel_pos[1] + size[1] > max_y else max_y
return max_x, max_y
def cal_min(min_x, min_y, rel_pos, size):
min_x = rel_pos[0] if rel_pos[0] < min_x else min_x
min_y = rel_pos[1] if rel_pos[1] < min_y else min_y
return min_x, min_y
# Finally take into account all relevant elements in models_dict
# -> states, scoped variables (maybe input- and output- data ports) and transitions and data flows are relevant
parts = ['states', 'transitions', 'data_flows']
for key in parts:
elems_dict = models_dict[key]
rel_positions = []
for model in elems_dict.values():
_size = (0., 0.)
if key == 'states':
rel_positions = [model.get_meta_data_editor()['rel_pos']]
_size = model.get_meta_data_editor()['size']
# print(key, rel_positions, _size)
elif key in ['scoped_variables', 'input_data_ports', 'output_data_ports']:
rel_positions = [model.get_meta_data_editor()['inner_rel_pos']]
# TODO check to take the ports size into account
# print(key, rel_positions, _size)
elif key in ['transitions', 'data_flows']:
if key is "data_flows":
# take into account the meta data positions of opengl if there is some (always in opengl format)
rel_positions = mirror_waypoints(deepcopy(model.get_meta_data_editor()))['waypoints']
else:
rel_positions = model.get_meta_data_editor()['waypoints']
# print(key, rel_positions, _size, model.meta)
for rel_position in rel_positions:
# check for empty fields and ignore them at this point
if not contains_geometric_info(rel_position):
continue
right, bottom = cal_max(right, bottom, rel_position, _size)
left, top = cal_min(left, top, rel_position, _size)
# print("new edges:", left, right, top, bottom, key)
# increase of boundary results into bigger estimated size and finally stronger reduction of original element sizes
left, right, top, bottom = add_boundary_clearance(left, right, top, bottom, {'size': (0., 0.)}, clearance)
return left, right, top, bottom | Get boundaries of all handed models
The function checks all model meta data positions to increase boundary starting with a state or scoped variables.
It is finally iterated over all states, data and logical port models and linkage if sufficient for respective
graphical editor. At the end a clearance is added to the boundary if needed e.g. to secure size for opengl.
:param models_dict: dict of all handed models
:return: tuple of left, right, top and bottom value |
def has_same_bins(self, other: "HistogramBase") -> bool:
"""Whether two histograms share the same binning."""
if self.shape != other.shape:
return False
elif self.ndim == 1:
return np.allclose(self.bins, other.bins)
elif self.ndim > 1:
for i in range(self.ndim):
if not np.allclose(self.bins[i], other.bins[i]):
return False
return True | Whether two histograms share the same binning. |
def handle_pubcomp(self):
"""Handle incoming PUBCOMP packet."""
self.logger.info("PUBCOMP received")
ret, mid = self.in_packet.read_uint16()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventPubcomp(mid)
self.push_event(evt)
return NC.ERR_SUCCESS | Handle incoming PUBCOMP packet. |
def run_step(context):
"""Parse input file and replace a search string.
This also does string substitutions from context on the fileReplacePairs.
It does this before it search & replaces the in file.
Be careful of order. If fileReplacePairs is not an ordered collection,
replacements could evaluate in any given order. If this is coming in from
pipeline yaml it will be an ordered dictionary, so life is good.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileReplace
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
- replacePairs. mandatory. Dictionary where items are:
'find_string': 'replace_string'
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: Any of the required keys missing in
context.
pypyr.errors.KeyInContextHasNoValueError: Any of the required keys
exists but is None.
"""
logger.debug("started")
deprecated(context)
StreamReplacePairsRewriterStep(__name__, 'fileReplace', context).run_step()
logger.debug("done") | Parse input file and replace a search string.
This also does string substitutions from context on the fileReplacePairs.
It does this before it search & replaces the in file.
Be careful of order. If fileReplacePairs is not an ordered collection,
replacements could evaluate in any given order. If this is coming in from
pipeline yaml it will be an ordered dictionary, so life is good.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileReplace
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
- replacePairs. mandatory. Dictionary where items are:
'find_string': 'replace_string'
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: Any of the required keys missing in
context.
pypyr.errors.KeyInContextHasNoValueError: Any of the required keys
exists but is None. |
async def close(self) -> None:
"""
Explicit exit. If so configured, populate cache to prove all creds in
wallet offline if need be, archive cache, and purge prior cache archives.
:return: current object
"""
LOGGER.debug('HolderProver.close >>>')
if self.cfg.get('archive-cache-on-close', False):
await self.load_cache(True)
Caches.purge_archives(self.dir_cache, True)
await super().close()
for path_rr_id in Tails.links(self._dir_tails):
rr_id = basename(path_rr_id)
try:
await self._sync_revoc(rr_id)
except ClosedPool:
LOGGER.warning('HolderProver sync-revoc on close required ledger for %s but pool was closed', rr_id)
LOGGER.debug('HolderProver.close <<<') | Explicit exit. If so configured, populate cache to prove all creds in
wallet offline if need be, archive cache, and purge prior cache archives.
:return: current object |
def to_mask(self, method='exact', subpixels=5):
"""
Return a list of `~photutils.ApertureMask` objects, one for each
aperture position.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
mask : list of `~photutils.ApertureMask`
A list of aperture mask objects.
"""
use_exact, subpixels = self._translate_mask_mode(method, subpixels)
if hasattr(self, 'r'):
radius = self.r
elif hasattr(self, 'r_out'): # annulus
radius = self.r_out
else:
raise ValueError('Cannot determine the aperture radius.')
masks = []
for bbox, edges in zip(self.bounding_boxes, self._centered_edges):
ny, nx = bbox.shape
mask = circular_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, radius, use_exact,
subpixels)
# subtract the inner circle for an annulus
if hasattr(self, 'r_in'):
mask -= circular_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, self.r_in,
use_exact, subpixels)
masks.append(ApertureMask(mask, bbox))
return masks | Return a list of `~photutils.ApertureMask` objects, one for each
aperture position.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
mask : list of `~photutils.ApertureMask`
A list of aperture mask objects. |
def to_iso8601(dt, tz=None):
"""
Returns an ISO-8601 representation of a given datetime instance.
>>> to_iso8601(datetime.datetime.now())
'2014-10-01T23:21:33.718508Z'
:param dt: a :class:`~datetime.datetime` instance
:param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one
"""
if tz is not None:
dt = dt.replace(tzinfo=tz)
iso8601 = dt.isoformat()
# Naive datetime objects usually don't have info about timezone.
# Let's assume it's UTC and add Z to the end.
if re.match(r'.*(Z|[+-]\d{2}:\d{2})$', iso8601) is None:
iso8601 += 'Z'
return iso8601 | Returns an ISO-8601 representation of a given datetime instance.
>>> to_iso8601(datetime.datetime.now())
'2014-10-01T23:21:33.718508Z'
:param dt: a :class:`~datetime.datetime` instance
:param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one |
def list(self, filter_title=None, filter_ids=None, page=None):
"""
:type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
}
"""
filters = [
'filter[title]={0}'.format(filter_title) if filter_title else None,
'filter[ids]={0}'.format(','.join([str(dash_id) for dash_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}dashboards.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | :type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
} |
def _apply_orthogonal_view(self):
"""Orthogonal view with respect to current aspect ratio
"""
left, right, bottom, top = self.get_view_coordinates()
glOrtho(left, right, bottom, top, -10, 0) | Orthogonal view with respect to current aspect ratio |
def calculate_retry_delay(attempt, max_delay=300):
"""Calculates an exponential backoff for retry attempts with a small
amount of jitter."""
delay = int(random.uniform(2, 4) ** attempt)
if delay > max_delay:
# After reaching the max delay, stop using expontential growth
# and keep the delay nearby the max.
delay = int(random.uniform(max_delay - 20, max_delay + 20))
return delay | Calculates an exponential backoff for retry attempts with a small
amount of jitter. |
def to_dict(self):
""" Transform the current specification to a dictionary
"""
data = {"model": {}}
data["model"]["description"] = self.description
data["model"]["entity_name"] = self.entity_name
data["model"]["package"] = self.package
data["model"]["resource_name"] = self.resource_name
data["model"]["rest_name"] = self.rest_name
data["model"]["extends"] = self.extends
data["model"]["get"] = self.allows_get
data["model"]["update"] = self.allows_update
data["model"]["create"] = self.allows_create
data["model"]["delete"] = self.allows_delete
data["model"]["root"] = self.is_root
data["model"]["userlabel"] = self.userlabel
data["model"]["template"] = self.template
data["model"]["allowed_job_commands"] = self.allowed_job_commands
data["attributes"] = []
for attribute in self.attributes:
data["attributes"].append(attribute.to_dict())
data["children"] = []
for api in self.child_apis:
data["children"].append(api.to_dict())
return data | Transform the current specification to a dictionary |
def declare_example(self, source):
"""Execute the given code, adding it to the runner's namespace."""
with patch_modules():
code = compile(source, "<docs>", "exec")
exec(code, self.namespace) | Execute the given code, adding it to the runner's namespace. |
def recalculate_satistics(self):
'''
update self.Data[specimen]['pars'] for all specimens.
'''
gframe = wx.BusyInfo(
"Re-calculating statistics for all specimens\n Please wait..", self)
for specimen in list(self.Data.keys()):
if 'pars' not in list(self.Data[specimen].keys()):
continue
if 'specimen_int_uT' not in list(self.Data[specimen]['pars'].keys()):
continue
tmin = self.Data[specimen]['pars']['measurement_step_min']
tmax = self.Data[specimen]['pars']['measurement_step_max']
pars = thellier_gui_lib.get_PI_parameters(
self.Data, self.acceptance_criteria, self.preferences, specimen, tmin, tmax, self.GUI_log, THERMAL, MICROWAVE)
self.Data[specimen]['pars'] = pars
self.Data[specimen]['pars']['lab_dc_field'] = self.Data[specimen]['lab_dc_field']
self.Data[specimen]['pars']['er_specimen_name'] = self.Data[specimen]['er_specimen_name']
self.Data[specimen]['pars']['er_sample_name'] = self.Data[specimen]['er_sample_name']
del gframe | update self.Data[specimen]['pars'] for all specimens. |
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i"%self.hb_port)
self.heartbeat.start()
# Helper to make it easier to connect to an existing kernel.
# set log-level to critical, to make sure it is output
self.log.critical("To connect another client to this kernel, use:") | start the heart beating |
def _bind_method(self, name, unconditionally=False):
"""Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function
"""
# TODO: This does not work if the function is a mex function inside a folder of the same name
exists = self.run_func('exist', name)['result'] in [2, 3, 5]
if not unconditionally and not exists:
raise AttributeError("'Matlab' object has no attribute '%s'" % name)
# create a new method instance
method_instance = MatlabFunction(weakref.ref(self), name)
method_instance.__name__ = name
# bind to the Matlab instance with a weakref (to avoid circular references)
if sys.version.startswith('3'):
method = types.MethodType(method_instance, weakref.ref(self))
else:
method = types.MethodType(method_instance, weakref.ref(self),
_Session)
setattr(self, name, method)
return getattr(self, name) | Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function |
def _get_arrays(self, wavelengths, **kwargs):
"""Get sampled spectrum or bandpass in user units."""
x = self._validate_wavelengths(wavelengths)
y = self(x, **kwargs)
if isinstance(wavelengths, u.Quantity):
w = x.to(wavelengths.unit, u.spectral())
else:
w = x
return w, y | Get sampled spectrum or bandpass in user units. |
def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
'''
Deletes a given apiKey
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_key apikeystring
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_api_key(apiKey=apiKey)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} | Deletes a given apiKey
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_key apikeystring |
def create(self, name):
"""Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket.
"""
return Bucket(name, context=self._context).create(self._project_id) | Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket. |
def _prime_user_perm_caches(self):
"""
Prime both the user and group caches and put them on the ``self.user``.
In addition add a cache filled flag on ``self.user``.
"""
perm_cache, group_perm_cache = self._get_user_cached_perms()
self.user._authority_perm_cache = perm_cache
self.user._authority_group_perm_cache = group_perm_cache
self.user._authority_perm_cache_filled = True | Prime both the user and group caches and put them on the ``self.user``.
In addition add a cache filled flag on ``self.user``. |
def set_continue(self, name, action, seqno, value=None, default=False,
disable=False):
"""Configures the routemap continue value
Args:
name (string): The full name of the routemap.
action (string): The action to take for this routemap clause.
seqno (integer): The sequence number for the routemap clause.
value (integer): The value to configure for the routemap continue
default (bool): Specifies to default the routemap continue value
disable (bool): Specifies to negate the routemap continue value
Returns:
True if the operation succeeds otherwise False is returned
"""
commands = ['route-map %s %s %s' % (name, action, seqno)]
if default:
commands.append('default continue')
elif disable:
commands.append('no continue')
else:
if not str(value).isdigit() or value < 1:
raise ValueError('seqno must be a positive integer unless '
'default or disable is specified')
commands.append('continue %s' % value)
return self.configure(commands) | Configures the routemap continue value
Args:
name (string): The full name of the routemap.
action (string): The action to take for this routemap clause.
seqno (integer): The sequence number for the routemap clause.
value (integer): The value to configure for the routemap continue
default (bool): Specifies to default the routemap continue value
disable (bool): Specifies to negate the routemap continue value
Returns:
True if the operation succeeds otherwise False is returned |
def get_vouchers(self, vid_encoded=None,
uid_from=None, uid_to=None, gid=None,
valid_after=None, valid_before=None,
last=None, first=None):
"""
FETCHES a filtered list of vouchers.
:type vid_encoded: ``alphanumeric(64)``
:param vid_encoded:
Voucher ID, as a string with CRC.
:type uid_from: ``bigint``
:param uid_from:
Filter by source account UID.
:type uid_to: ``bigint``
:param uid_to:
Filter by destination account UID.
:type gid: ``alphanumeric(32)``
:param gid:
Filter by voucher Group ID. GID is localized to `uid_from`.
:type valid_after: ``datetime``/``dict``
:param valid_after:
Voucher has to be valid after this timestamp. Absolute
(``datetime``) or relative (``dict``) timestamps are accepted. Valid
keys for relative timestamp dictionary are same as keyword arguments
for `datetime.timedelta` (``days``, ``seconds``, ``minutes``,
``hours``, ``weeks``).
:type valid_before: ``datetime``/``dict``
:param valid_before:
Voucher was valid until this timestamp (for format, see the
`valid_after` above).
:type last: ``bigint``
:param last:
The number of newest vouchers (that satisfy all other criteria) to
return.
:type first: ``bigint``
:param first:
The number of oldest vouchers (that satisfy all other criteria) to
return.
:note:
If `first` or `last` are used, the vouchers list is sorted by time
created, otherwise it is sorted alphabetically by `vid_encoded`.
:rtype: ``list``/``dict``
:returns:
A list of voucher description dictionaries. If `vid_encoded` is
specified, a single dictionary is returned instead of a list.
:raises GeneralException:
:resource:
``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]``
``[/valid_after=<valid_after>][/valid_before=<valid_before>]``
``[/last=<last>][/first=<first>]``
:access: authorized users (ACL flag: ``voucher.get``)
"""
resource = self.kvpath(
'vouchers',
('ident', vid_encoded),
**{
'from': ('int', uid_from),
'to': ('int', uid_to),
'gid': ('ident', gid),
'valid_after': ('isobasic', absdatetime(valid_after)),
'valid_before': ('isobasic', absdatetime(valid_before)),
'first': ('int', first),
'last': ('int', last)
}
)
return self.request('get', resource) | FETCHES a filtered list of vouchers.
:type vid_encoded: ``alphanumeric(64)``
:param vid_encoded:
Voucher ID, as a string with CRC.
:type uid_from: ``bigint``
:param uid_from:
Filter by source account UID.
:type uid_to: ``bigint``
:param uid_to:
Filter by destination account UID.
:type gid: ``alphanumeric(32)``
:param gid:
Filter by voucher Group ID. GID is localized to `uid_from`.
:type valid_after: ``datetime``/``dict``
:param valid_after:
Voucher has to be valid after this timestamp. Absolute
(``datetime``) or relative (``dict``) timestamps are accepted. Valid
keys for relative timestamp dictionary are same as keyword arguments
for `datetime.timedelta` (``days``, ``seconds``, ``minutes``,
``hours``, ``weeks``).
:type valid_before: ``datetime``/``dict``
:param valid_before:
Voucher was valid until this timestamp (for format, see the
`valid_after` above).
:type last: ``bigint``
:param last:
The number of newest vouchers (that satisfy all other criteria) to
return.
:type first: ``bigint``
:param first:
The number of oldest vouchers (that satisfy all other criteria) to
return.
:note:
If `first` or `last` are used, the vouchers list is sorted by time
created, otherwise it is sorted alphabetically by `vid_encoded`.
:rtype: ``list``/``dict``
:returns:
A list of voucher description dictionaries. If `vid_encoded` is
specified, a single dictionary is returned instead of a list.
:raises GeneralException:
:resource:
``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]``
``[/valid_after=<valid_after>][/valid_before=<valid_before>]``
``[/last=<last>][/first=<first>]``
:access: authorized users (ACL flag: ``voucher.get``) |
def make_stream_tls_features(self, stream, features):
"""Update the <features/> element with StartTLS feature.
[receving entity only]
:Parameters:
- `features`: the <features/> element of the stream.
:Types:
- `features`: :etree:`ElementTree.Element`
:returns: update <features/> element.
:returntype: :etree:`ElementTree.Element`
"""
if self.stream and stream is not self.stream:
raise ValueError("Single StreamTLSHandler instance can handle"
" only one stream")
self.stream = stream
if self.settings["starttls"] and not stream.tls_established:
tls = ElementTree.SubElement(features, STARTTLS_TAG)
if self.settings["tls_require"]:
ElementTree.SubElement(tls, REQUIRED_TAG)
return features | Update the <features/> element with StartTLS feature.
[receving entity only]
:Parameters:
- `features`: the <features/> element of the stream.
:Types:
- `features`: :etree:`ElementTree.Element`
:returns: update <features/> element.
:returntype: :etree:`ElementTree.Element` |
def missing_pids(self):
"""Filter persistent identifiers."""
missing = []
for p in self.pids:
try:
PersistentIdentifier.get(p.pid_type, p.pid_value)
except PIDDoesNotExistError:
missing.append(p)
return missing | Filter persistent identifiers. |
def filter_step(G, covY, pred, yt):
"""Filtering step of Kalman filter.
Parameters
----------
G: (dy, dx) numpy array
mean of Y_t | X_t is G * X_t
covX: (dx, dx) numpy array
covariance of Y_t | X_t
pred: MeanAndCov object
predictive distribution at time t
Returns
-------
pred: MeanAndCov object
filtering distribution at time t
logpyt: float
log density of Y_t | Y_{0:t-1}
"""
# data prediction
data_pred_mean = np.matmul(pred.mean, G.T)
data_pred_cov = dotdot(G, pred.cov, G.T) + covY
if covY.shape[0] == 1:
logpyt = dists.Normal(loc=data_pred_mean,
scale=np.sqrt(data_pred_cov)).logpdf(yt)
else:
logpyt = dists.MvNormal(loc=data_pred_mean,
cov=data_pred_cov).logpdf(yt)
# filter
residual = yt - data_pred_mean
gain = dotdot(pred.cov, G.T, inv(data_pred_cov))
filt_mean = pred.mean + np.matmul(residual, gain.T)
filt_cov = pred.cov - dotdot(gain, G, pred.cov)
return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt | Filtering step of Kalman filter.
Parameters
----------
G: (dy, dx) numpy array
mean of Y_t | X_t is G * X_t
covX: (dx, dx) numpy array
covariance of Y_t | X_t
pred: MeanAndCov object
predictive distribution at time t
Returns
-------
pred: MeanAndCov object
filtering distribution at time t
logpyt: float
log density of Y_t | Y_{0:t-1} |
def replace_namespaced_custom_object_scale(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
"""replace_namespaced_custom_object_scale # noqa: E501
replace scale of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_custom_object_scale(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
return data | replace_namespaced_custom_object_scale # noqa: E501
replace scale of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_custom_object_scale(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread. |
def ReadHuntCounters(self, hunt_id):
"""Reads hunt counters."""
num_clients = self.CountHuntFlows(hunt_id)
num_successful_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY)
num_failed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY)
num_clients_with_results = len(
set(r[0].client_id
for r in self.flow_results.values()
if r and r[0].hunt_id == hunt_id))
num_crashed_clients = self.CountHuntFlows(
hunt_id, filter_condition=db.HuntFlowsCondition.CRASHED_FLOWS_ONLY)
num_results = self.CountHuntResults(hunt_id)
total_cpu_seconds = 0
total_network_bytes_sent = 0
for f in self.ReadHuntFlows(hunt_id, 0, sys.maxsize):
total_cpu_seconds += (
f.cpu_time_used.user_cpu_time + f.cpu_time_used.system_cpu_time)
total_network_bytes_sent += f.network_bytes_sent
return db.HuntCounters(
num_clients=num_clients,
num_successful_clients=num_successful_clients,
num_failed_clients=num_failed_clients,
num_clients_with_results=num_clients_with_results,
num_crashed_clients=num_crashed_clients,
num_results=num_results,
total_cpu_seconds=total_cpu_seconds,
total_network_bytes_sent=total_network_bytes_sent) | Reads hunt counters. |
def get_item_hrefs(result_collection):
"""
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of item hrefs.
'result_collection' a JSON object returned by a previous API
call.
Returns a list, which may be empty if no items were found.
"""
# Argument error checking.
assert result_collection is not None
result = []
links = result_collection.get('_links')
if links is not None:
items = links.get('items')
if items is not None:
for item in items:
result.append(item.get('href'))
return result | Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of item hrefs.
'result_collection' a JSON object returned by a previous API
call.
Returns a list, which may be empty if no items were found. |
def get_url(self, url, dest, makedirs=False, saltenv='base',
no_cache=False, cachedir=None, source_hash=None):
'''
Get a single file from a URL.
'''
url_data = urlparse(url)
url_scheme = url_data.scheme
url_path = os.path.join(
url_data.netloc, url_data.path).rstrip(os.sep)
# If dest is a directory, rewrite dest with filename
if dest is not None \
and (os.path.isdir(dest) or dest.endswith(('/', '\\'))):
if url_data.query or len(url_data.path) > 1 and not url_data.path.endswith('/'):
strpath = url.split('/')[-1]
else:
strpath = 'index.html'
if salt.utils.platform.is_windows():
strpath = salt.utils.path.sanitize_win_path(strpath)
dest = os.path.join(dest, strpath)
if url_scheme and url_scheme.lower() in string.ascii_lowercase:
url_path = ':'.join((url_scheme, url_path))
url_scheme = 'file'
if url_scheme in ('file', ''):
# Local filesystem
if not os.path.isabs(url_path):
raise CommandExecutionError(
'Path \'{0}\' is not absolute'.format(url_path)
)
if dest is None:
with salt.utils.files.fopen(url_path, 'rb') as fp_:
data = fp_.read()
return data
return url_path
if url_scheme == 'salt':
result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir)
if result and dest is None:
with salt.utils.files.fopen(result, 'rb') as fp_:
data = fp_.read()
return data
return result
if dest:
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
else:
return ''
elif not no_cache:
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
if source_hash is not None:
try:
source_hash = source_hash.split('=')[-1]
form = salt.utils.files.HASHES_REVMAP[len(source_hash)]
if salt.utils.hashutils.get_hash(dest, form) == source_hash:
log.debug(
'Cached copy of %s (%s) matches source_hash %s, '
'skipping download', url, dest, source_hash
)
return dest
except (AttributeError, KeyError, IOError, OSError):
pass
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if url_data.scheme == 's3':
try:
def s3_opt(key, default=None):
'''
Get value of s3.<key> from Minion config or from Pillar
'''
if 's3.' + key in self.opts:
return self.opts['s3.' + key]
try:
return self.opts['pillar']['s3'][key]
except (KeyError, TypeError):
return default
self.utils['s3.query'](method='GET',
bucket=url_data.netloc,
path=url_data.path[1:],
return_bin=False,
local_file=dest,
action=None,
key=s3_opt('key'),
keyid=s3_opt('keyid'),
service_url=s3_opt('service_url'),
verify_ssl=s3_opt('verify_ssl', True),
location=s3_opt('location'),
path_style=s3_opt('path_style', False),
https_enable=s3_opt('https_enable', True))
return dest
except Exception as exc:
raise MinionError(
'Could not fetch from {0}. Exception: {1}'.format(url, exc)
)
if url_data.scheme == 'ftp':
try:
ftp = ftplib.FTP()
ftp.connect(url_data.hostname, url_data.port)
ftp.login(url_data.username, url_data.password)
remote_file_path = url_data.path.lstrip('/')
with salt.utils.files.fopen(dest, 'wb') as fp_:
ftp.retrbinary('RETR {0}'.format(remote_file_path), fp_.write)
ftp.quit()
return dest
except Exception as exc:
raise MinionError('Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc))
if url_data.scheme == 'swift':
try:
def swift_opt(key, default):
'''
Get value of <key> from Minion config or from Pillar
'''
if key in self.opts:
return self.opts[key]
try:
return self.opts['pillar'][key]
except (KeyError, TypeError):
return default
swift_conn = SaltSwift(swift_opt('keystone.user', None),
swift_opt('keystone.tenant', None),
swift_opt('keystone.auth_url', None),
swift_opt('keystone.password', None))
swift_conn.get_object(url_data.netloc,
url_data.path[1:],
dest)
return dest
except Exception:
raise MinionError('Could not fetch from {0}'.format(url))
get_kwargs = {}
if url_data.username is not None \
and url_data.scheme in ('http', 'https'):
netloc = url_data.netloc
at_sign_pos = netloc.rfind('@')
if at_sign_pos != -1:
netloc = netloc[at_sign_pos + 1:]
fixed_url = urlunparse(
(url_data.scheme, netloc, url_data.path,
url_data.params, url_data.query, url_data.fragment))
get_kwargs['auth'] = (url_data.username, url_data.password)
else:
fixed_url = url
destfp = None
try:
# Tornado calls streaming_callback on redirect response bodies.
# But we need streaming to support fetching large files (> RAM
# avail). Here we are working around this by disabling recording
# the body for redirections. The issue is fixed in Tornado 4.3.0
# so on_header callback could be removed when we'll deprecate
# Tornado<4.3.0. See #27093 and #30431 for details.
# Use list here to make it writable inside the on_header callback.
# Simple bool doesn't work here: on_header creates a new local
# variable instead. This could be avoided in Py3 with 'nonlocal'
# statement. There is no Py2 alternative for this.
#
# write_body[0] is used by the on_chunk callback to tell it whether
# or not we need to write the body of the request to disk. For
# 30x redirects we set this to False because we don't want to
# write the contents to disk, as we will need to wait until we
# get to the redirected URL.
#
# write_body[1] will contain a tornado.httputil.HTTPHeaders
# instance that we will use to parse each header line. We
# initialize this to False, and after we parse the status line we
# will replace it with the HTTPHeaders instance. If/when we have
# found the encoding used in the request, we set this value to
# False to signify that we are done parsing.
#
# write_body[2] is where the encoding will be stored
write_body = [None, False, None]
def on_header(hdr):
if write_body[1] is not False and write_body[2] is None:
if not hdr.strip() and 'Content-Type' not in write_body[1]:
# If write_body[0] is True, then we are not following a
# redirect (initial response was a 200 OK). So there is
# no need to reset write_body[0].
if write_body[0] is not True:
# We are following a redirect, so we need to reset
# write_body[0] so that we properly follow it.
write_body[0] = None
# We don't need the HTTPHeaders object anymore
write_body[1] = False
return
# Try to find out what content type encoding is used if
# this is a text file
write_body[1].parse_line(hdr) # pylint: disable=no-member
if 'Content-Type' in write_body[1]:
content_type = write_body[1].get('Content-Type') # pylint: disable=no-member
if not content_type.startswith('text'):
write_body[1] = write_body[2] = False
else:
encoding = 'utf-8'
fields = content_type.split(';')
for field in fields:
if 'encoding' in field:
encoding = field.split('encoding=')[-1]
write_body[2] = encoding
# We have found our encoding. Stop processing headers.
write_body[1] = False
# If write_body[0] is False, this means that this
# header is a 30x redirect, so we need to reset
# write_body[0] to None so that we parse the HTTP
# status code from the redirect target. Additionally,
# we need to reset write_body[2] so that we inspect the
# headers for the Content-Type of the URL we're
# following.
if write_body[0] is write_body[1] is False:
write_body[0] = write_body[2] = None
# Check the status line of the HTTP request
if write_body[0] is None:
try:
hdr = parse_response_start_line(hdr)
except HTTPInputError:
# Not the first line, do nothing
return
write_body[0] = hdr.code not in [301, 302, 303, 307]
write_body[1] = HTTPHeaders()
if no_cache:
result = []
def on_chunk(chunk):
if write_body[0]:
if write_body[2]:
chunk = chunk.decode(write_body[2])
result.append(chunk)
else:
dest_tmp = u"{0}.part".format(dest)
# We need an open filehandle to use in the on_chunk callback,
# that's why we're not using a with clause here.
destfp = salt.utils.files.fopen(dest_tmp, 'wb') # pylint: disable=resource-leakage
def on_chunk(chunk):
if write_body[0]:
destfp.write(chunk)
query = salt.utils.http.query(
fixed_url,
stream=True,
streaming_callback=on_chunk,
header_callback=on_header,
username=url_data.username,
password=url_data.password,
opts=self.opts,
**get_kwargs
)
if 'handle' not in query:
raise MinionError('Error: {0} reading {1}'.format(query['error'], url))
if no_cache:
if write_body[2]:
return ''.join(result)
return b''.join(result)
else:
destfp.close()
destfp = None
salt.utils.files.rename(dest_tmp, dest)
return dest
except HTTPError as exc:
raise MinionError('HTTP error {0} reading {1}: {3}'.format(
exc.code,
url,
*BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code]))
except URLError as exc:
raise MinionError('Error reading {0}: {1}'.format(url, exc.reason))
finally:
if destfp is not None:
destfp.close() | Get a single file from a URL. |
def _calc_delta(self,ensemble,scaling_matrix=None):
'''
calc the scaled ensemble differences from the mean
'''
mean = np.array(ensemble.mean(axis=0))
delta = ensemble.as_pyemu_matrix()
for i in range(ensemble.shape[0]):
delta.x[i,:] -= mean
if scaling_matrix is not None:
delta = scaling_matrix * delta.T
delta *= (1.0 / np.sqrt(float(ensemble.shape[0] - 1.0)))
return delta | calc the scaled ensemble differences from the mean |
def SubmitJob(self, *params, **kw):
"""Asynchronously execute the specified GP task. This will return a
Geoprocessing Job object. Parameters are passed in either in order
or as keywords."""
fp = self.__expandparamstodict(params, kw)
return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus | Asynchronously execute the specified GP task. This will return a
Geoprocessing Job object. Parameters are passed in either in order
or as keywords. |
def extract(self, start, end):
"""Extracts the selected time frame as a new object.
:param int start: Start time.
:param int end: End time.
:returns: class:`pympi.Elan.Eaf` object containing the extracted frame.
"""
from copy import deepcopy
eaf_out = deepcopy(self)
for t in eaf_out.get_tier_names():
for ab, ae, value in eaf_out.get_annotation_data_for_tier(t):
if ab > end or ae < start:
eaf_out.remove_annotation(t, (start-end)//2, False)
eaf_out.clean_time_slots()
return eaf_out | Extracts the selected time frame as a new object.
:param int start: Start time.
:param int end: End time.
:returns: class:`pympi.Elan.Eaf` object containing the extracted frame. |
def index(in_bam, config, check_timestamp=True):
"""Index a BAM file, skipping if index present.
Centralizes BAM indexing providing ability to switch indexing approaches.
"""
assert is_bam(in_bam), "%s in not a BAM file" % in_bam
index_file = "%s.bai" % in_bam
alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0]
if check_timestamp:
bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam)
else:
bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file)
if not bai_exists:
# Remove old index files and re-run to prevent linking into tx directory
for fname in [index_file, alt_index_file]:
utils.remove_safe(fname)
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, index_file) as tx_index_file:
cmd = "{samtools} index -@ {num_cores} {in_bam} {tx_index_file}"
do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam))
return index_file if utils.file_exists(index_file) else alt_index_file | Index a BAM file, skipping if index present.
Centralizes BAM indexing providing ability to switch indexing approaches. |
def _create_update_from_file(mode='create', uuid=None, path=None):
'''
Create vm from file
'''
ret = {}
if not os.path.isfile(path) or path is None:
ret['Error'] = 'File ({0}) does not exists!'.format(path)
return ret
# vmadm validate create|update [-f <filename>]
cmd = 'vmadm validate {mode} {brand} -f {path}'.format(
mode=mode,
brand=get(uuid)['brand'] if uuid is not None else '',
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
# vmadm create|update [-f <filename>]
cmd = 'vmadm {mode} {uuid} -f {path}'.format(
mode=mode,
uuid=uuid if uuid is not None else '',
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
else:
if res['stderr'].startswith('Successfully created VM'):
return res['stderr'][24:]
return True | Create vm from file |
def watt_m(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
"""Compute Watterson's M (M).
.. image:: /pictures/M.png
**Range:** -1 ≤ M < 1, does not indicate bias, larger is better.
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
Watterson's M value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.watt_m(sim, obs)
0.8307913876595929
References
----------
- Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International
Journal of Climatology 16(4) 379-391.
"""
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = 2 / np.pi
b = np.mean((simulated_array - observed_array) ** 2) # MSE
c = np.std(observed_array, ddof=1) ** 2 + np.std(simulated_array, ddof=1) ** 2
e = (np.mean(simulated_array) - np.mean(observed_array)) ** 2
f = c + e
return a * np.arcsin(1 - (b / f)) | Compute Watterson's M (M).
.. image:: /pictures/M.png
**Range:** -1 ≤ M < 1, does not indicate bias, larger is better.
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
Watterson's M value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.watt_m(sim, obs)
0.8307913876595929
References
----------
- Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International
Journal of Climatology 16(4) 379-391. |
def lightcurve_moments(ftimes, fmags, ferrs):
'''This calculates the weighted mean, stdev, median, MAD, percentiles, skew,
kurtosis, fraction of LC beyond 1-stdev, and IQR.
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
Returns
-------
dict
A dict with all of the light curve moments calculated.
'''
ndet = len(fmags)
if ndet > 9:
# now calculate the various things we need
series_median = npmedian(fmags)
series_wmean = (
npsum(fmags*(1.0/(ferrs*ferrs)))/npsum(1.0/(ferrs*ferrs))
)
series_mad = npmedian(npabs(fmags - series_median))
series_stdev = 1.483*series_mad
series_skew = spskew(fmags)
series_kurtosis = spkurtosis(fmags)
# get the beyond1std fraction
series_above1std = len(fmags[fmags > (series_median + series_stdev)])
series_below1std = len(fmags[fmags < (series_median - series_stdev)])
# this is the fraction beyond 1 stdev
series_beyond1std = (series_above1std + series_below1std)/float(ndet)
# get the magnitude percentiles
series_mag_percentiles = nppercentile(
fmags,
[5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95]
)
return {
'median':series_median,
'wmean':series_wmean,
'mad':series_mad,
'stdev':series_stdev,
'skew':series_skew,
'kurtosis':series_kurtosis,
'beyond1std':series_beyond1std,
'mag_percentiles':series_mag_percentiles,
'mag_iqr': series_mag_percentiles[8] - series_mag_percentiles[3],
}
else:
LOGERROR('not enough detections in this magseries '
'to calculate light curve moments')
return None | This calculates the weighted mean, stdev, median, MAD, percentiles, skew,
kurtosis, fraction of LC beyond 1-stdev, and IQR.
Parameters
----------
ftimes,fmags,ferrs : np.array
The input mag/flux time-series with all non-finite elements removed.
Returns
-------
dict
A dict with all of the light curve moments calculated. |
def _all_get_table_col(self, key, column, fullname):
""" Creates a pytables column instance.
The type of column depends on the type of `column[0]`.
Note that data in `column` must be homogeneous!
"""
val = column[0]
try:
# # We do not want to loose int_
if type(val) is int:
return pt.IntCol()
if isinstance(val, (str, bytes)):
itemsize = int(self._prm_get_longest_stringsize(column))
return pt.StringCol(itemsize)
if isinstance(val, np.ndarray):
if (np.issubdtype(val.dtype, str) or
np.issubdtype(val.dtype, bytes)):
itemsize = int(self._prm_get_longest_stringsize(column))
return pt.StringCol(itemsize, shape=val.shape)
else:
return pt.Col.from_dtype(np.dtype((val.dtype, val.shape)))
else:
return pt.Col.from_dtype(np.dtype(type(val)))
except Exception:
self._logger.error('Failure in storing `%s` of Parameter/Result `%s`.'
' Its type was `%s`.' % (key, fullname, repr(type(val))))
raise | Creates a pytables column instance.
The type of column depends on the type of `column[0]`.
Note that data in `column` must be homogeneous! |
def _send(self, key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
"""
try:
payload = self._build_payload(key, value, metric_type)
LOGGER.debug('Sending statsd payload: %r', payload)
self._socket.sendto(payload.encode('utf-8'), self._address)
except socket.error: # pragma: nocover
LOGGER.exception('Error sending statsd metric') | Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key |
def mdaArray(arry, dtype=numpy.float, mask=None):
"""
Array constructor for masked distributed array
@param arry numpy-like array
@param mask mask array (or None if all data elements are valid)
"""
a = numpy.array(arry, dtype)
res = MaskedDistArray(a.shape, a.dtype)
res[:] = a
res.mask = mask
return res | Array constructor for masked distributed array
@param arry numpy-like array
@param mask mask array (or None if all data elements are valid) |
def main():
'''
Main part of command line utility
'''
arguments = docopt.docopt(__doc__, version='Naval Fate 2.0')
if arguments['show_diag']:
diag.show()
if arguments['show_reporting']:
diag.reporting()
diag.show()
if arguments['ping_couchdb']:
try:
if couchdb.ping():
print 'OK'
else:
print 'KO'
except:
print 'KO'
if arguments['get_admin']:
(username, password) = couchdb.get_admin()
print 'Username: {}'.format(username)
print 'Password: {}'.format(password)
if arguments['get_couchdb_admins']:
admins = couchdb.get_couchdb_admins()
print 'CouchDB admins:'
for admin in admins:
print '- {}'.format(admin)
if arguments['delete_token']:
couchdb.delete_token()
if arguments['create_token']:
print couchdb.create_token()
if arguments['create_cozy_db']:
if arguments['--name']:
db_name = arguments.get('<name>', 'cozy')
else:
db_name = 'cozy'
couchdb.create_cozy_db(db_name)
print '{} DB is ready'.format(db_name)
if arguments['reset_token']:
couchdb.reset_token()
print 'New tokens:'
print couchdb.get_admin()[0]
if arguments['get_cozy_param']:
print couchdb.get_cozy_param(arguments['<name>'])
if arguments['normalize_cert_dir']:
ssl.normalize_cert_dir()
if arguments['get_crt_common_name']:
filename = arguments['<filename>']
if filename:
print ssl.get_crt_common_name(filename)
else:
print ssl.get_crt_common_name()
if arguments['clean_links']:
ssl.clean_links()
if arguments['make_links']:
ssl.make_links(arguments['<common_name>'])
if arguments['generate_certificate']:
common_name = arguments['<common_name>']
if arguments['--size']:
key_size = int(arguments['<size>'])
else:
key_size = ssl.DEFAULT_KEY_SIZE
print 'Generate certificate for {} with {} key size'.format(common_name, key_size)
ssl.generate_certificate(common_name, key_size)
if arguments['sign_certificate']:
common_name = arguments['<common_name>']
print "Sign certificate for {} with Let's Encrypt".format(common_name)
ssl.acme_sign_certificate(common_name)
if arguments['renew_certificates']:
ssl.acme_renew_certificates()
if arguments['compare_version']:
current = arguments['<current>']
operator = arguments['<operator>']
reference = arguments['<reference>']
compare_version.compare(current, operator, reference)
if arguments['is_cozy_registered']:
print couchdb.is_cozy_registered()
if arguments['unregister_cozy']:
couchdb.unregister_cozy()
if arguments['fix_oom_scores']:
process.fix_oom_scores()
if arguments['get_oom_scores']:
process.get_oom_scores()
if arguments['rebuild_app']:
if arguments['--not-force']:
force = False
else:
force = True
if arguments['--restart']:
restart = True
else:
restart = False
migration.rebuild_app(arguments['<app>'], force=force, restart=restart)
if arguments['rebuild_all_apps']:
if arguments['--not-force']:
force = False
else:
force = True
if arguments['--restart']:
restart = True
else:
restart = False
migration.rebuild_all_apps(force=force, restart=restart)
if arguments['migrate_2_node4']:
migration.migrate_2_node4()
if arguments['install_requirements']:
migration.install_requirements()
if arguments['install_cozy']:
migration.install_cozy()
if arguments['wait_couchdb']:
helpers.wait_couchdb()
if arguments['wait_cozy_stack']:
helpers.wait_cozy_stack()
if arguments['check_lsb_codename']:
sys.exit(diag.check_lsb_codename())
if arguments['emulate_smtp']:
ip = '127.0.0.1'
port = '25'
if arguments['--bind']:
ip = arguments['<ip>']
if arguments['--port']:
if arguments['<port>']: # a bug in docopt?
port = arguments['<port>']
else:
port = arguments['<ip>']
print 'Emulate SMTP server on {}:{}'.format(ip, port)
smtpd.DebuggingServer(tuple([ip, int(port)]), None)
asyncore.loop()
if arguments['backup']:
if arguments['<backup_filename>']:
backup_filename = arguments['<backup_filename>']
else:
backup_filename = None
backup.backup(backup_filename)
if arguments['restore']:
backup.restore(arguments['<backup_filename>'])
if arguments['install_weboob'] or arguments['update_weboob']:
weboob.install()
if arguments['update_weboob_modules']:
weboob.update() | Main part of command line utility |
def export_users(self, body):
"""Export all users to a file using a long running job.
Check job status with get(). URL pointing to the export file will be
included in the status once the job is complete.
Args:
body (dict): Please see: https://auth0.com/docs/api/management/v2#!/Jobs/post_users_exports
"""
return self.client.post(self._url('users-exports'), data=body) | Export all users to a file using a long running job.
Check job status with get(). URL pointing to the export file will be
included in the status once the job is complete.
Args:
body (dict): Please see: https://auth0.com/docs/api/management/v2#!/Jobs/post_users_exports |
def expireat(self, key, when):
"""Emulate expireat"""
expire_time = datetime.fromtimestamp(when)
key = self._encode(key)
if key in self.redis:
self.timeouts[key] = expire_time
return True
return False | Emulate expireat |
def get_pdos(dos, lm_orbitals=None, atoms=None, elements=None):
"""Extract the projected density of states from a CompleteDos object.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The
density of states.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
Returns:
dict: The projected density of states. Formatted as a :obj:`dict` of
:obj:`dict` mapping the elements and their orbitals to
:obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example::
{
'Bi': {'s': Dos, 'p': Dos ... },
'S': {'s': Dos}
}
"""
if not elements:
symbols = dos.structure.symbol_set
elements = dict(zip(symbols, [None] * len(symbols)))
pdos = {}
for el in elements:
if atoms and el not in atoms:
continue
# select which sites to consider, if no sites were specified then
# select all. Make a list of the sites of particular elements first
# due to the dosplot atoms list specification (e.g. starts at 0 for
# each element
element_sites = [site for site in dos.structure.sites
if site.specie == get_el_sp(el)]
sites = [site for i, site in enumerate(element_sites)
if not atoms or (el in atoms and i in atoms[el])]
lm = lm_orbitals[el] if (lm_orbitals and el in lm_orbitals) else None
orbitals = elements[el] if elements and el in elements else None
pdos[el] = get_element_pdos(dos, el, sites, lm, orbitals)
return pdos | Extract the projected density of states from a CompleteDos object.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The
density of states.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
Returns:
dict: The projected density of states. Formatted as a :obj:`dict` of
:obj:`dict` mapping the elements and their orbitals to
:obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example::
{
'Bi': {'s': Dos, 'p': Dos ... },
'S': {'s': Dos}
} |
def default_antenna1(self, context):
""" Default antenna1 values """
ant1, ant2 = default_base_ant_pairs(self, context)
(tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl')
ant1_result = np.empty(context.shape, context.dtype)
ant1_result[:,:] = ant1[np.newaxis,bl:bu]
return ant1_result | Default antenna1 values |
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled | Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile. |
def mtabstr2doestr(st1):
"""mtabstr2doestr"""
seperator = '$ =============='
alist = st1.split(seperator)
#this removes all the tabs that excel
#puts after the seperator and before the next line
for num in range(0, len(alist)):
alist[num] = alist[num].lstrip()
st2 = ''
for num in range(0, len(alist)):
alist = tabstr2list(alist[num])
st2 = st2 + list2doe(alist)
lss = st2.split('..')
mylib1.write_str2file('forfinal.txt', st2)#for debugging
print(len(lss))
st3 = tree2doe(st2)
lsss = st3.split('..')
print(len(lsss))
return st3 | mtabstr2doestr |
def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) | Get list of all PMIDs associated with edges in the network. |
def valid_conkey(self, conkey):
"""Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string."""
for prefix in _COND_PREFIXES:
trailing = conkey.lstrip(prefix)
if trailing == '' and conkey: # conkey is not empty
return True
try:
int(trailing)
return True
except ValueError:
pass
return False | Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string. |
def getBagTags(bagInfoPath):
"""
get bag tags
"""
try:
bagInfoString = open(bagInfoPath, "r").read().decode('utf-8')
except UnicodeDecodeError:
bagInfoString = open(bagInfoPath, "r").read().decode('iso-8859-1')
bagTags = anvl.readANVLString(bagInfoString)
return bagTags | get bag tags |
def retention_period(self, value):
"""Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked.
"""
policy = self._properties.setdefault("retentionPolicy", {})
if value is not None:
policy["retentionPeriod"] = str(value)
else:
policy = None
self._patch_property("retentionPolicy", policy) | Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked. |
def matchingAnalyseIndexes(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyse indexes that correspond
to tokenJson[ANALYSIS] elements that are matching all the rules.
An empty list is returned if none of the analyses match (all the rules),
or (!) if none of the rules are describing the ANALYSIS part of the
token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
matchingResults = self.matchingAnalyses(tokenJson)
if matchingResults:
indexes = [ tokenJson[ANALYSIS].index(analysis) for analysis in matchingResults ]
return indexes
return matchingResults | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate and returns a list of analyse indexes that correspond
to tokenJson[ANALYSIS] elements that are matching all the rules.
An empty list is returned if none of the analyses match (all the rules),
or (!) if none of the rules are describing the ANALYSIS part of the
token;
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; |
def form(value):
"""
Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2'
"""
if isinstance(value, FLOAT + INT):
if value <= 0:
return str(value)
elif value < .001:
return '%.3E' % value
elif value < 10 and isinstance(value, FLOAT):
return '%.5f' % value
elif value > 1000:
return '{:,d}'.format(int(round(value)))
elif numpy.isnan(value):
return 'NaN'
else: # in the range 10-1000
return str(int(value))
elif isinstance(value, bytes):
return decode(value)
elif isinstance(value, str):
return value
elif isinstance(value, numpy.object_):
return str(value)
elif hasattr(value, '__len__') and len(value) > 1:
return ' '.join(map(form, value))
return str(value) | Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2' |
def _visible(self, element):
"""Used to filter text elements that have invisible text on the page.
"""
if element.name in self._disallowed_names:
return False
elif re.match(u'<!--.*-->', six.text_type(element.extract())):
return False
return True | Used to filter text elements that have invisible text on the page. |
def putResult(self, result):
"""Register the *result* by putting it on all the output tubes."""
self._lock_prev_output.acquire()
for tube in self._tubes_result_output:
tube.put((result, 0))
self._lock_next_output.release() | Register the *result* by putting it on all the output tubes. |
def _random_subprocessors(self):
"""Produces an iterator of subprocessors. If there are fewer than
self._proc_limit subprocessors to consider (by knocking out a
minimal subset of working qubits incident to broken couplers),
we work exhaustively. Otherwise, we generate a random set of
``self._proc_limit`` subprocessors.
If the total number of possibilities is rather small, then we
deliberately pick a random minimum subset to avoid coincidences.
Otherwise, we give up on minimum, satisfy ourselves with minimal,
and randomly generate subprocessors with :func:`self._random_subprocessor`.
OUTPUT:
an iterator of eden_processor instances.
"""
if self._processors is not None:
return (p for p in self._processors)
elif 2**len(self._evil) <= 8 * self._proc_limit:
deletions = self._compute_all_deletions()
if len(deletions) > self._proc_limit:
deletions = sample(deletions, self._proc_limit)
return (self._subprocessor(d) for d in deletions)
else:
return (self._random_subprocessor() for i in range(self._proc_limit)) | Produces an iterator of subprocessors. If there are fewer than
self._proc_limit subprocessors to consider (by knocking out a
minimal subset of working qubits incident to broken couplers),
we work exhaustively. Otherwise, we generate a random set of
``self._proc_limit`` subprocessors.
If the total number of possibilities is rather small, then we
deliberately pick a random minimum subset to avoid coincidences.
Otherwise, we give up on minimum, satisfy ourselves with minimal,
and randomly generate subprocessors with :func:`self._random_subprocessor`.
OUTPUT:
an iterator of eden_processor instances. |
def add_child(self, child):
""" Add a child node """
if not isinstance(child, DependencyNode):
raise TypeError('"child" must be a DependencyNode')
self._children.append(child) | Add a child node |
def register_up(self):
"""Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Increment the running-thread count. If we are the last thread to
start, set status to 'up'. This allows startall() to complete
if it was called with wait=True.
"""
with self.regcond:
self.runningcount += 1
tid = thread.get_ident()
self.tids.append(tid)
self.logger.debug("register_up: (%d) count is %d" %
(tid, self.runningcount))
if self.runningcount == self.numthreads:
self.status = 'up'
self.regcond.notify() | Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Increment the running-thread count. If we are the last thread to
start, set status to 'up'. This allows startall() to complete
if it was called with wait=True. |
def add_obograph_digraph(self, og, node_type=None, predicates=None, xref_graph=None, logical_definitions=None,
property_chain_axioms=None,
parse_meta=True,
**args):
"""
Converts a single obograph to Digraph edges and adds to an existing networkx DiGraph
"""
digraph = self.digraph
logging.info("NODES: {}".format(len(og['nodes'])))
# if client passes an xref_graph we must parse metadata
if xref_graph is not None:
parse_meta = True
for n in og['nodes']:
is_obsolete = 'is_obsolete' in n and n['is_obsolete'] == 'true'
if is_obsolete:
continue
if node_type is not None and ('type' not in n or n['type'] != node_type):
continue
id = self.contract_uri(n['id'])
digraph.add_node(id, **n)
if 'lbl' in n:
digraph.node[id]['label'] = n['lbl']
if parse_meta and 'meta' in n:
if n['meta'] is None:
n['meta'] = {}
meta = self.transform_meta(n['meta'])
if xref_graph is not None and 'xrefs' in meta:
for x in meta['xrefs']:
xref_graph.add_edge(self.contract_uri(x['val']), id, source=id)
logging.info("EDGES: {}".format(len(og['edges'])))
for e in og['edges']:
sub = self.contract_uri(e['sub'])
obj = self.contract_uri(e['obj'])
pred = self.contract_uri(e['pred'])
pred = map_legacy_pred(pred)
if pred == 'is_a':
pred = 'subClassOf'
if predicates is None or pred in predicates:
digraph.add_edge(obj, sub, pred=pred)
if 'equivalentNodesSets' in og:
nslist = og['equivalentNodesSets']
logging.info("CLIQUES: {}".format(len(nslist)))
for ns in nslist:
equivNodeIds = ns['nodeIds']
for i in ns['nodeIds']:
ix = self.contract_uri(i)
for j in ns['nodeIds']:
if i != j:
jx = self.contract_uri(j)
digraph.add_edge(ix, jx, pred='equivalentTo')
if logical_definitions is not None and 'logicalDefinitionAxioms' in og:
for a in og['logicalDefinitionAxioms']:
ld = LogicalDefinition(self.contract_uri(a['definedClassId']),
[self.contract_uri(x) for x in a['genusIds']],
[(self.contract_uri(x['propertyId']),
self.contract_uri(x['fillerId'])) for x in a['restrictions'] if x is not None])
logical_definitions.append(ld)
if property_chain_axioms is not None and 'propertyChainAxioms' in og:
for a in og['propertyChainAxioms']:
pca = PropertyChainAxiom(predicate_id=self.contract_uri(a['predicateId']),
chain_predicate_ids=[self.contract_uri(x) for x in a['chainPredicateIds']])
property_chain_axioms.append(pca) | Converts a single obograph to Digraph edges and adds to an existing networkx DiGraph |
def _send(self, event):
"""Generic function for sending commands to Alarm.com
:param event: Event command to send to alarm.com
"""
_LOGGER.debug('Sending %s to Alarm.com', event)
try:
with async_timeout.timeout(10, loop=self._loop):
response = yield from self._websession.post(
self.ALARMDOTCOM_URL + '{}/main.aspx'.format(
self._login_info['sessionkey']),
data={
self.VIEWSTATE: '',
self.VIEWSTATEENCRYPTED: '',
self.EVENTVALIDATION:
self.COMMAND_LIST[event]['eventvalidation'],
self.COMMAND_LIST[event]['command']: event},
headers={'User-Agent': 'Mozilla/5.0 '
'(Windows NT 6.1; '
'WOW64; rv:40.0) '
'Gecko/20100101 '
'Firefox/40.1'}
)
_LOGGER.debug(
'Response from Alarm.com %s', response.status)
text = yield from response.text()
tree = BeautifulSoup(text, 'html.parser')
try:
message = tree.select(
'#{}'.format(self.MESSAGE_CONTROL))[0].get_text()
if 'command' in message:
_LOGGER.debug(message)
# Update alarm.com status after calling state change.
yield from self.async_update()
except IndexError:
# May have been logged out
yield from self.async_login()
if event == 'Disarm':
yield from self.async_alarm_disarm()
elif event == 'Arm+Stay':
yield from self.async_alarm_arm_away()
elif event == 'Arm+Away':
yield from self.async_alarm_arm_away()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error('Error while trying to disarm Alarm.com system')
finally:
if response is not None:
yield from response.release() | Generic function for sending commands to Alarm.com
:param event: Event command to send to alarm.com |
def elem_to_container(elem, container=dict, **options):
"""
Convert XML ElementTree Element to a collection of container objects.
Elements are transformed to a node under special tagged nodes, attrs, text
and children, to store the type of these elements basically, however, in
some special cases like the followings, these nodes are attached to the
parent node directly for later convenience.
- There is only text element
- There are only children elements each has unique keys among all
:param elem: ET Element object or None
:param container: callble to make a container object
:param options: Keyword options
- nspaces: A namespaces dict, {uri: prefix} or None
- attrs, text, children: Tags for special nodes to keep XML info
- merge_attrs: Merge attributes and mix with children nodes, and the
information of attributes are lost after its transformation.
"""
dic = container()
if elem is None:
return dic
elem.tag = _tweak_ns(elem.tag, **options) # {ns}tag -> ns_prefix:tag
subdic = dic[elem.tag] = container()
options["container"] = container
if elem.text:
_process_elem_text(elem, dic, subdic, **options)
if elem.attrib:
_process_elem_attrs(elem, dic, subdic, **options)
if len(elem):
_process_children_elems(elem, dic, subdic, **options)
elif not elem.text and not elem.attrib: # ex. <tag/>.
dic[elem.tag] = None
return dic | Convert XML ElementTree Element to a collection of container objects.
Elements are transformed to a node under special tagged nodes, attrs, text
and children, to store the type of these elements basically, however, in
some special cases like the followings, these nodes are attached to the
parent node directly for later convenience.
- There is only text element
- There are only children elements each has unique keys among all
:param elem: ET Element object or None
:param container: callble to make a container object
:param options: Keyword options
- nspaces: A namespaces dict, {uri: prefix} or None
- attrs, text, children: Tags for special nodes to keep XML info
- merge_attrs: Merge attributes and mix with children nodes, and the
information of attributes are lost after its transformation. |
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result | Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict |
def _init_metadata(self):
"""stub"""
QuestionFilesFormRecord._init_metadata(self)
FirstAngleProjectionFormRecord._init_metadata(self)
super(MultiChoiceOrthoQuestionFormRecord, self)._init_metadata() | stub |
def _decorate_namespace_property(bases: List[type], namespace: MutableMapping[str, Any], key: str) -> None:
"""Collect contracts for all getters/setters/deleters corresponding to ``key`` and decorate them."""
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
value = namespace[key]
assert isinstance(value, property)
fget = value.fget # type: Optional[Callable[..., Any]]
fset = value.fset # type: Optional[Callable[..., Any]]
fdel = value.fdel # type: Optional[Callable[..., Any]]
for func in [value.fget, value.fset, value.fdel]:
func = cast(Callable[..., Any], func)
if func is None:
continue
# Collect the preconditions and postconditions from bases
base_preconditions = [] # type: List[List[Contract]]
base_snapshots = [] # type: List[Snapshot]
base_postconditions = [] # type: List[Contract]
bases_have_func = False
for base in bases:
if hasattr(base, key):
base_property = getattr(base, key)
assert isinstance(base_property, property), \
"Expected base {} to have {} as property, but got: {}".format(base, key, base_property)
if func == value.fget:
base_func = getattr(base, key).fget
elif func == value.fset:
base_func = getattr(base, key).fset
elif func == value.fdel:
base_func = getattr(base, key).fdel
else:
raise NotImplementedError("Unhandled case: func neither value.fget, value.fset nor value.fdel")
if base_func is None:
continue
bases_have_func = True
# Check if there is a checker function in the base class
base_contract_checker = icontract._checkers.find_checker(func=base_func)
# Ignore functions which don't have preconditions or postconditions
if base_contract_checker is not None:
base_preconditions.extend(base_contract_checker.__preconditions__) # type: ignore
base_snapshots.extend(base_contract_checker.__postcondition_snapshots__) # type: ignore
base_postconditions.extend(base_contract_checker.__postconditions__) # type: ignore
# Add preconditions and postconditions of the function
preconditions = [] # type: List[List[Contract]]
snapshots = [] # type: List[Snapshot]
postconditions = [] # type: List[Contract]
contract_checker = icontract._checkers.find_checker(func=func)
if contract_checker is not None:
preconditions = contract_checker.__preconditions__ # type: ignore
snapshots = contract_checker.__postcondition_snapshots__
postconditions = contract_checker.__postconditions__ # type: ignore
preconditions = _collapse_preconditions(
base_preconditions=base_preconditions,
bases_have_func=bases_have_func,
preconditions=preconditions,
func=func)
snapshots = _collapse_snapshots(base_snapshots=base_snapshots, snapshots=snapshots)
postconditions = _collapse_postconditions(
base_postconditions=base_postconditions, postconditions=postconditions)
if preconditions or postconditions:
if contract_checker is None:
contract_checker = icontract._checkers.decorate_with_checker(func=func)
# Replace the function with the function decorated with contract checks
if func == value.fget:
fget = contract_checker
elif func == value.fset:
fset = contract_checker
elif func == value.fdel:
fdel = contract_checker
else:
raise NotImplementedError("Unhandled case: func neither fget, fset nor fdel")
# Override the preconditions and postconditions
contract_checker.__preconditions__ = preconditions # type: ignore
contract_checker.__postcondition_snapshots__ = snapshots # type: ignore
contract_checker.__postconditions__ = postconditions # type: ignore
if fget != value.fget or fset != value.fset or fdel != value.fdel:
namespace[key] = property(fget=fget, fset=fset, fdel=fdel) | Collect contracts for all getters/setters/deleters corresponding to ``key`` and decorate them. |
def register(cls, range_mixin):
"""
Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class
"""
def decorator(range_set_mixin):
cls.add(range_mixin, range_set_mixin)
return range_set_mixin
return decorator | Decorator for registering range set mixins for global use. This works
the same as :meth:`~spans.settypes.MetaRangeSet.add`
:param range_mixin: A :class:`~spans.types.Range` mixin class to
to register a decorated range set mixin class for
:return: A decorator to use on a range set mixin class |
def get_parent(self):
"""Get Parent.
Fetch parent product if it exists.
Use `parent_asin` to check if a parent exist before fetching.
:return:
An instance of :class:`~.AmazonProduct` representing the
parent product.
"""
if not self.parent:
parent = self._safe_get_element('ParentASIN')
if parent:
self.parent = self.api.lookup(ItemId=parent)
return self.parent | Get Parent.
Fetch parent product if it exists.
Use `parent_asin` to check if a parent exist before fetching.
:return:
An instance of :class:`~.AmazonProduct` representing the
parent product. |
def load(self):
"""Load the state from the JSON file in the config dir."""
if not op.exists(self.path):
logger.debug("The GUI state file `%s` doesn't exist.", self.path)
# TODO: create the default state.
return
assert op.exists(self.path)
logger.debug("Load the GUI state from `%s`.", self.path)
self.update(_bunchify(_load_json(self.path))) | Load the state from the JSON file in the config dir. |
def line_iterator_to_intermediary(line_iterator):
""" Parse an iterator of str (one string per line) to the intermediary syntax"""
current_table = None
tables = []
relations = []
errors = []
for line_nb, line, raw_line in filter_lines_from_comments(line_iterator):
try:
new_obj = parse_line(line)
current_table, tables, relations = update_models(new_obj, current_table, tables, relations)
except ParsingException as e:
e.line_nb = line_nb
e.line = raw_line
errors.append(e)
if len(errors) != 0:
msg = 'ERAlchemy couldn\'t complete the generation due the {} following errors'.format(len(errors))
raise ParsingException(msg + '\n\n'.join(e.traceback for e in errors))
return tables, relations | Parse an iterator of str (one string per line) to the intermediary syntax |
def remove_class(self, ioclass):
"""Remove VNXIOClass instance from policy."""
current_ioclasses = self.ioclasses
new_ioclasses = filter(lambda x: x.name != ioclass.name,
current_ioclasses)
self.modify(new_ioclasses=new_ioclasses) | Remove VNXIOClass instance from policy. |
def surface_to_image(surface):
"""Renders current buffer surface to IPython image"""
from IPython.display import Image
buf = BytesIO()
surface.write_to_png(buf)
data = buf.getvalue()
buf.close()
return Image(data=data) | Renders current buffer surface to IPython image |
def tagmask(self, tags):
"""
:returns: a boolean array with True where the assets has tags
"""
mask = numpy.zeros(len(tags), bool)
for t, tag in enumerate(tags):
tagname, tagvalue = tag.split('=')
mask[t] = self.tagvalue(tagname) == tagvalue
return mask | :returns: a boolean array with True where the assets has tags |
def get(self, endpoint, params=None):
"""Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
"""
response = self._session.get(
url=self._url + endpoint,
params=params,
timeout=self._timeout
)
return self._handle_response(response) | Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned. |
def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065,
max_clearness_index=2.0):
"""
Calculate the clearness index.
The clearness index is the ratio of global to extraterrestrial
irradiance on a horizontal plane.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angle in decimal
degrees.
extra_radiation : numeric
Irradiance incident at the top of the atmosphere
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt : numeric
Clearness index
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987.
"""
cos_zenith = tools.cosd(solar_zenith)
I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith)
# consider adding
# with np.errstate(invalid='ignore', divide='ignore'):
# to kt calculation, but perhaps it's good to allow these
# warnings to the users that override min_cos_zenith
kt = ghi / I0h
kt = np.maximum(kt, 0)
kt = np.minimum(kt, max_clearness_index)
return kt | Calculate the clearness index.
The clearness index is the ratio of global to extraterrestrial
irradiance on a horizontal plane.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angle in decimal
degrees.
extra_radiation : numeric
Irradiance incident at the top of the atmosphere
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt : numeric
Clearness index
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987. |
def calculate_manual_reading(basic_data: BasicMeterData) -> Reading:
""" Calculate the interval between two manual readings """
t_start = basic_data.previous_register_read_datetime
t_end = basic_data.current_register_read_datetime
read_start = basic_data.previous_register_read
read_end = basic_data.current_register_read
value = basic_data.quantity
uom = basic_data.uom
quality_method = basic_data.current_quality_method
return Reading(t_start, t_end, value, uom, quality_method, "", "",
read_start, read_end) | Calculate the interval between two manual readings |
def one(iterable, cmp=None):
"""
Return the object in the given iterable that evaluates to True.
If the given iterable has more than one object that evaluates to True,
or if there is no object that fulfills such condition, return False.
If a callable ``cmp`` is given, it's used to evaluate each element.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> one((True, True))
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), lambda i: i > 40)
42
"""
the_one = False
for i in iterable:
if cmp(i) if cmp else i:
if the_one:
return False
the_one = i
return the_one | Return the object in the given iterable that evaluates to True.
If the given iterable has more than one object that evaluates to True,
or if there is no object that fulfills such condition, return False.
If a callable ``cmp`` is given, it's used to evaluate each element.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> one((True, True))
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), lambda i: i > 40)
42 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.