content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def ky_att(xs, b, Mach, k0, Att=-20):
"""
Returns the spanwise gust wavenumber 'ky_att' with response at 'xs' attenuated by 'Att' decibels
Parameters
----------
xs : float
Chordwise coordinate of reference point, defined in interval (-b, +b].
b : float
Airfoil semi chord.
Mach : float
Mean flow Mach number.
k0 : float
Acoustic wavenumber 'k0'. Can be obtained from the
temporal frequency 'f' [in Hz] and the speed of sound 'c0' [in m/s]
as 'k0 = 2*pi*f/c0'.
Att : float, optional
Level of attenuation of the surface pressure at point 'xs', in decibels.
Defaults to -20 dB.
Returns
-------
ky_att : float
Subcritical gust spanwise wavenumber 'ky_att' such that the aerofoil
response at point 'xs' is 'Att' dB reduced.
"""
beta = np.sqrt(1-Mach**2)
# critical gust spanwise wavenumber
ky_crit = k0/beta
term1 = -(beta**2)*np.log(10**(Att/20))/(k0*(xs + b))
return ky_crit*np.sqrt(term1**2 + 1) | 78d62081d0849d035953a694bbb7a0fcf956f76b | 6,044 |
from typing import Optional
def has_multiline_items(strings: Optional[Strings]) -> bool:
"""Check whether one of the items in the list has multiple lines."""
return any(is_multiline(item) for item in strings) if strings else False | 75dd6ce7d7152a200ff12c53104ff839a21d28f4 | 6,045 |
from typing import Optional
from typing import Tuple
import inspect
def eval_ctx(
layer: int = 0, globals_: Optional[DictStrAny] = None, locals_: Optional[DictStrAny] = None
) -> Tuple[DictStrAny, DictStrAny]:
"""获取一个上下文的全局和局部变量
Args:
layer (int, optional): 层数. Defaults to 0.
globals_ (Optional[DictStrAny], optional): 全局变量. Defaults to None.
locals_ (Optional[DictStrAny], optional): 局部变量. Defaults to None.
Returns:
Tuple[DictStrAny, DictStrAny]: 全局和局部变量字典.
"""
frame = inspect.stack()[layer + 1].frame # add the current frame
global_dict, local_dict = frame.f_globals, frame.f_locals
global_dict.update(globals_ or {})
local_dict.update(locals_ or {})
return global_dict, local_dict | 81b782596bcc29f1be4432cc1b95230ac952bf2b | 6,046 |
def extract_vcalendar(allriscontainer):
"""Return a list of committee meetings extracted from html content."""
vcalendar = {
'vevents': findall_events(allriscontainer),
}
if vcalendar.get('vevents'):
base_url = allriscontainer.base_url
vcalendar['url'] = find_calendar_url(base_url)
vcalendar['uid'] = find_calendar_uid(base_url)
vcalendar['borough'] = find_calendar_borough(base_url)
vcalendar['committee'] = find_calendar_committee(allriscontainer)
vcalendar['name'] = '{}: {}'.format(
vcalendar['borough'],
vcalendar['committee']
)
return vcalendar | f792ae3d8826d37b2fba874524ec78ac502fb1f0 | 6,048 |
def rnn_helper(inp,
length,
cell_type=None,
direction="forward",
name=None,
reuse=None,
*args,
**kwargs):
"""Adds ops for a recurrent neural network layer.
This function calls an actual implementation of a recurrent neural network
based on `cell_type`.
There are three modes depending on the value of `direction`:
forward: Adds a forward RNN.
backward: Adds a backward RNN.
bidirectional: Adds both forward and backward RNNs and creates a
bidirectional RNN.
Args:
inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`].
length: A 1-D tensor of shape [`batch_size`] and type int64. Each element
represents the length of the corresponding sequence in `inp`.
cell_type: Cell type of RNN. Currently can only be "lstm".
direction: One of "forward", "backward", "bidirectional".
name: Name of the op.
*args: Other arguments to the layer.
**kwargs: Keyword arugments to the layer.
Returns:
A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`].
"""
assert cell_type is not None
rnn_func = None
if cell_type == "lstm":
rnn_func = lstm_layer
assert rnn_func is not None
assert direction in ["forward", "backward", "bidirectional"]
with tf.variable_scope(name, reuse=reuse):
if direction in ["forward", "bidirectional"]:
forward = rnn_func(
inp=inp,
length=length,
backward=False,
name="forward",
reuse=reuse,
*args,
**kwargs)
if isinstance(forward, tuple):
# lstm_layer returns a tuple (output, memory). We only need the first
# element.
forward = forward[0]
if direction in ["backward", "bidirectional"]:
backward = rnn_func(
inp=inp,
length=length,
backward=True,
name="backward",
reuse=reuse,
*args,
**kwargs)
if isinstance(backward, tuple):
# lstm_layer returns a tuple (output, memory). We only need the first
# element.
backward = backward[0]
if direction == "forward":
out = forward
elif direction == "backward":
out = backward
else:
out = tf.concat(axis=2, values=[forward, backward])
return out | d6d457a10bd921560a76bc54a083271c82b144ec | 6,049 |
def get_data(dataset):
"""
:return: encodings array of (2048, n)
labels list of (n)
"""
query = "SELECT * FROM embeddings WHERE label IS NOT NULL"
cursor, connection = db_actions.connect(dataset)
cursor.execute(query)
result_list = cursor.fetchall()
encodings = np.zeros((2048, len(result_list)))
labels = []
for i in range(len(result_list)):
encodings[:, i] = result_list[i][0]
labels.append(result_list[i][1].encode())
encodings = np.nan_to_num(encodings)
labels = [x.decode('utf-8') for x in labels]
return encodings.astype('float32'), labels | 9f23631c6e263f99bab976e1225adbb448323783 | 6,050 |
def read_hdr(name, order='C'):
"""Read hdr file."""
# get dims from .hdr
h = open(name + ".hdr", "r")
h.readline() # skip line
l = h.readline()
h.close()
dims = [int(i) for i in l.split()]
if order == 'C':
dims.reverse()
return dims | 57daadfdf2342e1e7ef221cc94f2e8f70c504944 | 6,051 |
def IsTouchDevice(dev):
"""Check if a device is a touch device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a touch device.
"""
keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, [])
return evdev.ecodes.BTN_TOUCH in keycaps | 6fd36c4921f3ee4bf37c6ce8bcaf435680fc82d5 | 6,052 |
def load_users():
"""
Loads users csv
:return:
"""
with open(USERS, "r") as file:
# creates dictionary to separate csv values to make it easy to iterate between them
# the hash() function is used to identify the values in the csv, as they have their individual hash
# keys, and as the csv is immutable it'll be the same throughout
users = {}
for user in file:
user = user.strip().split(",")
user_tuple = create_user(*user[:5], int(user[5]))
users[hash(user_tuple)] = user_tuple
return users | 255745d36b5b995dfd9a8c0b13a154a87ab6f25e | 6,053 |
def clustering_consistency_check(G):
""" Check consistency of a community detection algorithm by running it a number of times.
"""
Hun = G.to_undirected()
Hun = nx.convert_node_labels_to_integers(Hun,label_attribute='skeletonname')
WHa = np.zeros((len(Hun.nodes()),len(Hun.nodes())))
for i in range(100):
partition = community.best_partition(Hun, randomize=None, resolution=1.0)
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
list_nodes = np.array(list_nodes)
WHa[np.ix_(list_nodes,list_nodes)] += 1
print('Iteration:', i)
return WHa | 917bb7a23b651821389edbcc62c81fbe4baf3d08 | 6,055 |
def l2_normalize_rows(frame):
"""
L_2-normalize the rows of this DataFrame, so their lengths in Euclidean
distance are all 1. This enables cosine similarities to be computed as
dot-products between these rows.
Rows of zeroes will be normalized to zeroes, and frames with no rows will
be returned as-is.
"""
if frame.shape[0] == 0:
return frame
index = frame.index
return pd.DataFrame(
data=normalize(frame, norm='l2', copy=False, axis=1), index=index
) | 889c2f4473fdab4661fecdceb778aae1bb62652d | 6,056 |
import socket
def canonical_ipv4_address(ip_addr):
"""Return the IPv4 address in a canonical format"""
return socket.inet_ntoa(socket.inet_aton(ip_addr)) | edacc70ccc3eef12030c4c597c257775d3ed5fa4 | 6,057 |
def _build_dynatree(site, expanded):
"""Returns a dynatree hash representation of our pages and menu
hierarchy."""
subtree = _pages_subtree(site.doc_root, site.default_language, True, 1,
expanded)
subtree['activate'] = True
pages_node = {
'title': 'Pages',
'key': 'system:pages',
'expand': True,
'icon': 'fatcow/folders_explorer.png',
'children': [subtree, ],
}
language = site.default_language
menus = []
for menu in Menu.objects.filter(site=site):
items = []
for item in menu.first_level.all():
items.append(_menuitem_subtree(item, language, True, 1, expanded))
menus.append({
'title': menu.name,
'key': 'menu:%d' % menu.id,
'expand': True,
'icon': 'fatcow/folders.png',
'children':items,
})
menus_node = {
'title': 'Menus',
'key': 'system:menus',
'expand': True,
'icon': 'fatcow/folders_explorer.png',
'children': menus,
}
tags = []
for tag in Tag.objects.filter(site=site):
title = tag.display_text(language)
if not title:
title = '<i>None</i>'
tags.append({
'title': title,
'key':'tag:%d' % tag.id,
'icon': 'fatcow/document_tag.png',
'expand': False,
})
tags_node = {
'title':'Tags',
'key':'system:tags',
'expand':False,
'icon': 'fatcow/folders_explorer.png',
'children': tags,
}
tree = [pages_node, menus_node, tags_node]
return tree | 38dd222ed5cde6b4d6bff4a632c6150666580b92 | 6,058 |
def aggregator(df, groupbycols):
"""
Aggregates flowbyactivity or flowbysector df by given groupbycols
:param df: Either flowbyactivity or flowbysector
:param groupbycols: Either flowbyactivity or flowbysector columns
:return:
"""
# tmp replace null values with empty cells
df = replace_NoneType_with_empty_cells(df)
# drop columns with flowamount = 0
df = df[df['FlowAmount'] != 0]
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation',
'GeographicalCorrelation', 'TechnologicalCorrelation',
'DataCollection')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in df.columns.values.tolist()]
df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']})
# run through other columns creating weighted average
for e in column_headers:
df_dfg[e] = weighted_average(df, e, 'FlowAmount', groupbycols)
df_dfg = df_dfg.reset_index()
df_dfg.columns = df_dfg.columns.droplevel(level=1)
# if datatypes are strings, ensure that Null values remain NoneType
df_dfg = replace_strings_with_NoneType(df_dfg)
return df_dfg | f8333087efc4a48d70aa6e3d727f73a7d03c8252 | 6,060 |
def unpack(X):
""" Unpack a comma separated list of values into a flat list """
return flatten([x.split(",") for x in list(X)]) | 1033fd5bdcd292a130c08a8f9819bf66a38fccac | 6,061 |
def doize(tock=0.0, **opts):
"""
Decorator that returns Doist compatible decorated generator function.
Usage:
@doize
def f():
pass
Parameters:
tock is default tock attribute of doized f
opts is dictionary of remaining parameters that becomes .opts attribute
of doized f
"""
def decorator(f):
# must create copy not wrapper so inspect.isgeneratorfunction works
# result of decoration
g = helping.copy_func(f)
g.tock = tock # default tock attributes
g.done = None # default done state
g.opts = dict(opts) # default opts attribute
return g
return decorator | 0c4a4220546b8c0cbc980c10de0476c9fc6c7995 | 6,062 |
def make_chained_transformation(tran_fns, *args, **kwargs):
"""Returns a dataset transformation function that applies a list of
transformations sequentially.
Args:
tran_fns (list): A list of dataset transformation.
*args: Extra arguments for each of the transformation function.
**kwargs: Extra keyword arguments for each of the transformation
function.
Returns:
A transformation function to be used in
:tf_main:`tf.data.Dataset.map <data/Dataset#map>`.
"""
def _chained_fn(data):
for tran_fns_i in tran_fns:
data = tran_fns_i(data, *args, **kwargs)
return data
return _chained_fn | 5f24e030df74a0617e633ca8f8d4a3954674b001 | 6,064 |
def configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.optimizer == "adamweightdecay":
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=FLAGS.adam_beta1,
beta_2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer | bf7dd03c4133675d58428a054cc16e7be41e88b4 | 6,065 |
import functools
def train_and_evaluate(config, workdir):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
Returns:
Training state.
"""
rng = jax.random.PRNGKey(config.seed)
rng, data_rng = jax.random.split(rng)
# Make sure config defines num_epochs and num_train_steps appropriately.
utils.check_epochs_and_steps(config)
# Check that perturbed-topk is selection method.
assert config.selection_method == "perturbed-topk", (
"ntsnet only supports perturbed-topk as selection method. Got: {}".format(
config.selection_method))
train_preprocessing_fn, eval_preprocessing_fn = data.parse_preprocessing_strings(
config.get("train_preprocess_str", ""),
config.get("eval_preprocess_str", ""))
assert config.batch_size % jax.local_device_count() == 0, (
f"Batch size ({config.batch_size}) should be divisible by number of "
f"devices ({jax.local_device_count()}).")
per_device_batch_size = config.batch_size // jax.local_device_count()
train_ds, eval_ds, num_classes = data.get_dataset(
config.dataset,
per_device_batch_size,
data_rng,
train_preprocessing_fn=train_preprocessing_fn,
eval_preprocessing_fn=eval_preprocessing_fn,
**config.get("data", {}))
module = AttentionNet.partial(config=config, num_classes=num_classes)
optimizer = create_optimizer(config)
loss_fn = functools.partial(ntsnet_loss, config=config)
train_metrics_dict = {
"train_loss": loss_fn,
"train_loss_raw": cross_entropy_raw_logits,
"train_loss_concat": cross_entropy_concat_logits,
"train_loss_part": cross_entropy_part_logits,
"train_accuracy": accuracy,
"train_rpn_scores_entropy": rpn_scores_entropy,
}
eval_metrics_dict = {
"eval_loss": loss_fn,
"eval_loss_raw": cross_entropy_raw_logits,
"eval_loss_concat": cross_entropy_concat_logits,
"eval_loss_part": cross_entropy_part_logits,
"eval_accuracy": accuracy,
"eval_rpn_scores_entropy": rpn_scores_entropy,
}
# Enables relevant statistics aggregator.
stats_aggregators = []
def add_image_prefix(image_aggregator):
def aggregator(stats):
d = image_aggregator(stats)
return {f"image_{k}": v for k, v in d.items()}
return aggregator
if config.get("log_images", True):
@add_image_prefix
def plot_patches(stats):
d = {
"part_imgs": (stats["part_imgs"] + 1.0) / 2.0,
"x": (stats["x"] + 1.0) / 2.0
}
for i, sc in enumerate(stats["scores"]):
d[f"scores_{i}"] = sc
return d
stats_aggregators.append(plot_patches)
stats_aggregators.append(lambda x: {"sigma": x["sigma"]})
state = classification_lib.training_loop(
module=module,
rng=rng,
train_ds=train_ds,
eval_ds=eval_ds,
loss_fn=loss_fn,
optimizer=optimizer,
train_metrics_dict=train_metrics_dict,
eval_metrics_dict=eval_metrics_dict,
stats_aggregators=stats_aggregators,
config=config,
workdir=workdir)
return state | 87f1dba561563acc0033663a30f105fe4056d235 | 6,066 |
def increment(i,k):
""" this is a helper function for a summation of the type :math:`\sum_{0 \leq k \leq i}`,
where i and k are multi-indices.
Parameters
----------
i: numpy.ndarray
integer array, i.size = N
k: numpy.ndarray
integer array, k.size = N
Returns
-------
changes k on return
Example
-------
k = [1,0,1]
i = [2,0,2]
increment(i, k) # changes k to [1,0,2]
increment(i, k) # changes k to [2,0,0]
increment(i, k) # changes k to [2,0,1]
"""
carryover = 1
if len(k) != len(i):
raise ValueError('size of i and k do not match up')
for n in range(len(k))[::-1]:
if i[n] == 0:
continue
tmp = k[n] + carryover
# print 'tmp=',tmp
carryover = tmp // (i[n]+1)
# print 'carryover=',carryover
k[n] = tmp % (i[n]+1)
if carryover == 0:
break
return k | 1ac8ef592376fbfa0d04cdd4b1c6b29ad3ed9fbd | 6,067 |
def sample_lopt(key: chex.PRNGKey) -> cfgobject.CFGObject:
"""Sample a small lopt model."""
lf = cfgobject.LogFeature
rng = hk.PRNGSequence(key)
task_family_cfg = para_image_mlp.sample_image_mlp(next(rng))
lopt_name = parametric_utils.choice(
next(rng), [
"LearnableAdam", "LearnableSGDM", "LearnableSGD", "MLPLOpt",
"AdafacMLPLOpt"
])
kwargs = {}
if lopt_name in ["MLPLOpt", "AdafacMLPLOpt"]:
kwargs["hidden_size"] = lf(parametric_utils.log_int(next(rng), 2, 512))
kwargs["hidden_layers"] = parametric_utils.log_int(next(rng), 1, 4)
kwargs["exp_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1))
kwargs["step_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1))
lopt_cfg = cfgobject.CFGObject(lopt_name, kwargs)
num_steps = lf(parametric_utils.log_int(next(rng), 1, 100))
outer_bs = lf(parametric_utils.log_int(next(rng), 1, 8))
return cfgobject.CFGObject(
"ParametricLOpt", {
"lopt": lopt_cfg,
"task_family": task_family_cfg,
"num_steps": num_steps,
"outer_batch_size": outer_bs,
}) | b52a7640532ed8ce7760474edbd9832d93e7bdc3 | 6,068 |
import numpy
import time
def gen_df_groupby_usecase(method_name, groupby_params=None, method_params=''):
"""Generate df groupby method use case"""
groupby_params = {} if groupby_params is None else groupby_params
groupby_params = get_groupby_params(**groupby_params)
func_text = groupby_usecase_tmpl.format(**{
'method_name': method_name,
'groupby_params': groupby_params,
'method_params': method_params
})
global_vars = {'np': numpy, 'time': time}
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_df_groupby_usecase = loc_vars[f'df_groupby_{method_name}_usecase']
return _df_groupby_usecase | 3a4f5745744299db354c17198d3175ad8b7ce4e4 | 6,069 |
import csv
def merge_csvfiles(options):
""" Think of this as a 'join' across options.mergefiles on equal values of
the column options.timestamp. This function takes each file in
options.mergefiles, reads them, and combines their columns in
options.output. The only common column should be options.timestamp. The
results are undefined if the mergefiles share other column names.
Args:
options.mergefiles - list of csv filenames
options.output - filename of merged csv file from this operation
Returns:
bool - True if success
Raises:
AssertionError - if merging encounters an error.
"""
records = {}
all_header_names = []
records_list = []
# collect all header fields from mergefiles
for filename in options.mergefiles:
records = read_csvfile(filename, True)
records_list.append(records)
all_header_names += records.fieldnames
all_header_names = sorted(set(all_header_names))
# eliminate duplicate $header
output_fd = open(options.output,'w')
writer = csv.DictWriter(output_fd, all_header_names)
writer.writeheader()
try:
# read all values until StopIteration is reached.
while True:
merge_list = [ records.next() for records in records_list ]
merge_dict = merge_rows(merge_list, options)
writer.writerow(merge_dict)
except StopIteration:
pass
output_fd.close()
return True | 171b448c2b49584ce5a601f7d8789d7198fdf935 | 6,071 |
import html
def row_component(cards):
"""
Creates a horizontal row used to contain cards.
The card and row_component work together to create a
layout that stretches and shrinks when the user changes the size of the window,
or accesses the dashboard from a mobile device.
See https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout for more information.
"""
return html.Div(
cards, className="govuk-list card-container", style={"alignItems": "stretch"}
) | baa9f86bcac786a94802d003b1abcc75686e08d8 | 6,072 |
def recCopyElement(oldelement):
"""Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
"""
newelement = ETREE.Element(oldelement.tag, oldelement.attrib)
if len(oldelement.getchildren()) > 0:
for childelement in oldelement.getchildren():
newelement.append(recCopyElement(childelement))
return newelement | 981f0c5ccdeacc1d82ebbde2de6f51298e82fa14 | 6,073 |
import hashlib
def KETAMA(key):
"""
MD5-based hashing algorithm used in consistent hashing scheme
to compensate for servers added/removed from memcached pool.
"""
d = hashlib.md5(key).digest()
c = _signed_int32
h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \
c((ord(d[1]) & 0xff) << 8) | c(ord(d[0]) & 0xff)
return h | 6baec2ea79a166389625b19c56cbcd3734e819b7 | 6,075 |
import calendar
def add_months(dt, months):
"""
月加减
"""
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year=year, month=month, day=day) | 5770c1b61e53fc692f3b13efef203d2f5d544b80 | 6,076 |
def _decomposer_interp(fp, x=None, xp=None):
"""Do the actual interpolation for multiprocessing"""
return np.interp(x, xp, fp) | eef6debf668c62f4d817a0b3697019d0bd4007c9 | 6,077 |
import tensorflow as tf
from nn4omtf import utils
import numpy as np
def create_nn(x, x_shape, is_training):
"""
Args:
x: input hits array
x_shape: input tensor shape for single event
is_training: placeholder for indicating train or valid/test phase
Note: Only code in `create_nn` function scope will be exctracted and saved
in model directory. It's important to provide all necessary imports
within.
"""
arr = [0, 5, 10, 15, 20, 25, 30]
out_sz = 2 * len(arr) + 1
in_sz = np.prod(x_shape)
hidden_layers = [128, 64, 64]
x = tf.reshape(x, [-1, in_sz])
for sz in hidden_layers:
# Pass is_training to setup batch normalization on these layers
x = utils.mk_fc_layer(x, sz, act_fn=tf.nn.relu, is_training=is_training)
logits = utils.mk_fc_layer(x, out_sz, is_training=is_training)
return logits, arr | 8c7a4ce128e434e964b951ca6fe65722c9936be9 | 6,078 |
def generate_outlier_bounds_iqr(df, column, multiplier=1.5):
"""
Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the
values in that column that signify outliers.
"""
q1 = df[column].quantile(.25)
q3 = df[column].quantile(.75)
iqr = q3 - q1
upper = q3 + (multiplier * iqr)
lower = q1 - (multiplier * iqr)
return upper, lower | 7f096d5f5cf2417cbc161713715a39560efd140a | 6,080 |
import random
def generate_data(Type):
"""
随机生成CAN帧中所包含的数据
:param Type: 需要生成数据的类型
:return: 生成的随机数据序列,长度为8,如['88', '77', '55', '44', '22', '11', '33'', '44']
"""
data = []
if Type == 1:
# 生成反馈帧单体电池Cell1-24电压信息
standard_vol = 35
offset = random.randint(0, 15)
max_vol = standard_vol + offset
min_vol = standard_vol - offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append(str(min_vol))
offset = random.randint(0, 15)
max_vol = standard_vol + offset
min_vol = standard_vol - offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append(str(min_vol))
elif Type == 2:
# 生成反馈帧单体电池Cell1-8温度信息
stanard_temp = 45
offest = random.randint(0, 20)
max_temp = stanard_temp + offest
min_temp = stanard_temp - offest - 5
data.append(str(max_temp))
data.append('6c')
data.append(str(min_temp))
data.append('6c')
offest = random.randint(0, 20)
max_temp = stanard_temp + offest
min_temp = stanard_temp - offest - 5
data.append(str(max_temp))
data.append('6c')
data.append(str(min_temp))
data.append('6c')
elif Type == 3:
# 生成反馈帧单体电池最高最低电压温度信息
standard_vol = 35
standard_temp = 45
vol_offset = random.randint(0, 15)
temp_offset = random.randint(0, 20)
max_temp = standard_temp + temp_offset
min_temp = standard_temp - temp_offset - 5
max_vol = standard_vol + vol_offset
min_vol = standard_vol - vol_offset // 2
data.append('44')
data.append(str(max_vol))
data.append('44')
data.append((str(min_vol)))
data.append(str(max_temp))
data.append('5c')
data.append(str(min_temp))
data.append('5c')
elif Type == 4:
# 生成常发帧系统电压信息
standard_vol = 55
offset = random.randint(0, 10)
max_vol = standard_vol * offset * 10
min_vol = standard_vol - offset - 5
data.append('c5')
data.append(str(max_vol))
data.append('f2')
data.append(str(min_vol))
data.append('ed')
for i in range(3):
data.append(str(standard_vol + 5 * i))
elif Type == 5:
pass
else:
pass
return data | 3a920be4b7ef5c5c3e258b3e3c79bc028004179a | 6,081 |
def counting_sort(array):
"""
SORTING FUNCTION USING COUNTING SORT ALGORITHM
ARG array = LIST(ARRAY) OF NUMBERS
"""
## counter lists has elements for every
maximum = max(array)
counter = [0]*(maximum+1)
for i in range(len(array)):
counter[array[i]] += 1
for i in range(1, maximum + 1):
counter[i] = counter[i] + counter[i-1]
#print_array(counter)
result = [0]*len(array)
for i in range(len(array)):
result[counter[array[i]] -1] = array[i]
counter[array[i]] -= 1
return result | 986e2f9277fa71dcd9897ac409653009c651c49f | 6,082 |
import math
from PIL import ImageColor
def indexedcolor(i, num, npersat=15, lightness=60):
"""Returns an rgb color triplet for a given index, with a finite max 'num'.
Thus if you need 10 colors and want to get color #5, you would call this with (5, 10).
The colors are "repeatable".
"""
nsats = int(math.ceil(num/float(npersat)))
sat = 100 - int((i//npersat)*(100/nsats))
l = lightness
nhues = int(math.ceil(num/float(nsats)))
hue = (i % nhues) * (360//nhues)
#print >>sys.stderr, 'For i %d, num %d, got %d sats, %d hues -> %d, %d, %d' % (i, num, nsats, nhues, hue, sat, l)
return ImageColor.getrgb('hsl(%d,%d%%,%d%%)' % (hue, sat, l)) | 418a875bc8ae50ce21f9667f46718863ba0f55e3 | 6,083 |
def make_customer_satisfaction(branch_index='A'):
"""Create average customer satisfaction heat map"""
customer_satisfaction = make_heat_map(branch_index, 'mean(Rating)', 'Average Satisfaction')
return customer_satisfaction | b891b74a8942da7c212ba7112ffb865deb52aec2 | 6,084 |
def extract_infos(fpath):
"""Extract information about file"""
try:
pe = pefile.PE(fpath)
except pefile.PEFormatError:
return {}
res = {}
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FILE_HEADER.Characteristics
res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode
res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode
try:
res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData
except AttributeError:
res['BaseOfData'] = 0
res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase
res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment
res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment
res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion
res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion
res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion
res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion
res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion
res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion
res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage
res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders
res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum
res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem
res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics
res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags
res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
# Sections
res['SectionsNb'] = len(pe.sections)
entropy = list(map(lambda x: x.get_entropy(), pe.sections))
res['SectionsMeanEntropy'] = sum(entropy) / float(len(entropy))
res['SectionsMinEntropy'] = min(entropy)
res['SectionsMaxEntropy'] = max(entropy)
raw_sizes = list(map(lambda x: x.SizeOfRawData, pe.sections))
res['SectionsMeanRawsize'] = sum(raw_sizes) / float(len(raw_sizes))
res['SectionsMinRawsize'] = min(raw_sizes)
res['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = list(map(lambda x: x.Misc_VirtualSize, pe.sections))
res['SectionsMeanVirtualsize'] = sum(
virtual_sizes) / float(len(virtual_sizes))
res['SectionsMinVirtualsize'] = min(virtual_sizes)
res['SectionMaxVirtualsize'] = max(virtual_sizes)
# Imports
try:
res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT)
imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])
res['ImportsNb'] = len(imports)
res['ImportsNbOrdinal'] = len(
list(filter(lambda x: x.name is None, imports)))
except AttributeError:
res['ImportsNbDLL'] = 0
res['ImportsNb'] = 0
res['ImportsNbOrdinal'] = 0
# Exports
try:
res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
except AttributeError:
# No export
res['ExportNb'] = 0
# Resources
resources = get_resources(pe)
res['ResourcesNb'] = len(resources)
if len(resources) > 0:
entropy = list(map(lambda x: x[0], resources))
res['ResourcesMeanEntropy'] = sum(entropy) / float(len(entropy))
res['ResourcesMinEntropy'] = min(entropy)
res['ResourcesMaxEntropy'] = max(entropy)
sizes = list(map(lambda x: x[1], resources))
res['ResourcesMeanSize'] = sum(sizes) / float(len(sizes))
res['ResourcesMinSize'] = min(sizes)
res['ResourcesMaxSize'] = max(sizes)
else:
res['ResourcesNb'] = 0
res['ResourcesMeanEntropy'] = 0
res['ResourcesMinEntropy'] = 0
res['ResourcesMaxEntropy'] = 0
res['ResourcesMeanSize'] = 0
res['ResourcesMinSize'] = 0
res['ResourcesMaxSize'] = 0
# Load configuration size
try:
res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size
except AttributeError:
res['LoadConfigurationSize'] = 0
# Version configuration size
try:
version_infos = get_version_info(pe)
res['VersionInformationSize'] = len(version_infos.keys())
except AttributeError:
res['VersionInformationSize'] = 0
return res | f7f3cbef72f7b9d05c25e2aabde33c7a814d05bd | 6,085 |
def calibrate_eye_in_hand(calibration_inputs):
"""Perform eye-in-hand calibration.
Args:
calibration_inputs: List of HandEyeInput
Returns:
A HandEyeOutput instance containing the eye-in-hand transform
"""
return HandEyeOutput(
_zivid.calibration.calibrate_eye_in_hand(
[
calibration_input._HandEyeInput__impl # pylint: disable=protected-access
for calibration_input in calibration_inputs
]
)
) | d8bc7b8cfe821809c441d3151297edf7f8267803 | 6,086 |
from typing import Optional
def get_intersect(A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray) -> Optional[np.ndarray]:
"""
Get the intersection of [A, B] and [C, D]. Return False if segment don't cross.
:param A: Point of the first segment
:param B: Point of the first segment
:param C: Point of the second segment
:param D: Point of the second segment
:return: The intersection if any, otherwise None.
"""
det = (B[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (B[1] - A[1])
if det == 0:
# Parallel
return None
else:
t1 = ((C[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (C[1] - A[1])) / det
t2 = ((B[0] - A[0]) * (C[1] - A[1]) - (C[0] - A[0]) * (B[1] - A[1])) / det
if t1 > 1 or t1 < 0 or t2 > 1 or t2 < 0:
# not intersect
return None
else:
xi = A[0] + t1 * (B[0] - A[0])
yi = A[1] + t1 * (B[1] - A[1])
return np.array([xi, yi]) | 1c3fab6d189f218e9f5f7e6648a46a9e53683366 | 6,087 |
from typing import Callable
def _make_vector_laplace_scipy_nd(bcs: Boundaries) -> Callable:
""" make a vector Laplacian using the scipy module
This only supports uniform discretizations.
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
|Arg_boundary_conditions|
Returns:
A function that can be applied to an array of values
"""
scaling = bcs._uniform_discretization**-2
args = bcs._scipy_border_mode
dim = bcs.grid.dim
shape_out = (dim,) + bcs.grid.shape
def vector_laplace(arr, out=None):
""" apply vector Laplacian operator to array `arr` """
if out is None:
out = np.empty(shape_out)
for i in range(dim):
ndimage.laplace(arr[i], output=out[i], **args)
return out * scaling
return vector_laplace | 3cda36d53755c84fcb47259ade64752610aeffbe | 6,088 |
def dot_to_dict(values):
"""Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"]
become {"token": {"pos": True, "_": {"xyz": True }}}.
values (iterable): The values to convert.
RETURNS (dict): The converted values.
"""
result = {}
for value in values:
path = result
parts = value.lower().split(".")
for i, item in enumerate(parts):
is_last = i == len(parts) - 1
path = path.setdefault(item, True if is_last else {})
return result | a2c56a01b179d27eabc728d6ff2ec979885d5feb | 6,089 |
def _draw_edges(G, pos, nodes, ax):
"""Draw the edges of a (small) networkx graph.
Params:
G (nx.classes.*) a networkx graph.
pos (dict) returned by nx.layout methods.
nodes (dict) of Circle patches.
ax (AxesSubplot) mpl axe.
Return:
(dict) of Circle patches.
"""
pointer = ArrowStyle.Fancy(head_width=10, head_length=15)
curved_edge = ConnectionStyle('arc3', rad=.2)
arrow_kwargs = {'arrowstyle': pointer,
'antialiased': True,
'connectionstyle': curved_edge,
'edgecolor': None,
'facecolor': None,
'linewidth': None}
edges = {}
for i, (a, b, attr) in enumerate(G.edges.data()):
arrow_kwargs['edgecolor'] = attr['color']
arrow_kwargs['facecolor'] = attr['color']
arrow_kwargs['linewidth'] = 1.0
edge = FancyArrowPatch(pos[a], pos[b],
patchA=nodes[a], patchB=nodes[b],
shrinkA=5, shrinkB=5,
**arrow_kwargs)
ax.add_patch(edge)
edges[(a, b)] = edge
return edges | 28a207a190a7066656518de7c8e8626b2f534146 | 6,090 |
def benjamini_hochberg_stepup(p_vals):
"""
Given a list of p-values, apply FDR correction and return the q values.
"""
# sort the p_values, but keep the index listed
index = [i[0] for i in sorted(enumerate(p_vals), key=lambda x:x[1])]
# keep the p_values sorted
p_vals = sorted(p_vals)
q_vals = [None]*len(p_vals) # initialize an empty list
prev_q = 0
# BH Step Up begins here.
for i, p in enumerate(p_vals):
q = len(p_vals)/(i+1)*p # calculate the q_value for the current point
q = min(q, 1) # if q >1, make it == 1
q = max(q, prev_q) # preserve monotonicity
q_vals[i] = q # store the q_value
prev_q = q # update the previous q_value
# prevent the lowest q value from going to zero
if np.sum(q_vals == 0) > 0:
# set the min q-value to 10x less than the smallest non-zero value
q_vals[np.where(q_vals == 0)] = np.min(q_vals[np.where(q_vals != 0)])/10
# return q_vals and the index so we can match up each q-value to its index
return q_vals, index | 7cff2e8d28cda37c4271935ef2e6fb48441137c3 | 6,091 |
def remove_transcription_site(rna, foci, nuc_mask, ndim):
"""Distinguish RNA molecules detected in a transcription site from the
rest.
A transcription site is defined as as a foci detected within the nucleus.
Parameters
----------
rna : np.ndarray, np.int64
Coordinates of the detected RNAs with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the foci assigned to the RNA. If no foci was
assigned, value is -1.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates),
the number of RNAs detected in the foci and its index.
nuc_mask : np.ndarray, bool
Binary mask of the nuclei region with shape (y, x).
ndim : int
Number of spatial dimensions to consider (2 or 3).
Returns
-------
rna_out_ts : np.ndarray, np.int64
Coordinates of the detected RNAs with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx coordinates)
plus the index of the foci assigned to the RNA. If no foci was
assigned, value is -1. RNAs from transcription sites are removed.
foci : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates),
the number of RNAs detected in the foci and its index.
ts : np.ndarray, np.int64
Array with shape (nb_ts, 5) or (nb_ts, 4). One coordinate per
dimension for the transcription site centroid (zyx or yx coordinates),
the number of RNAs detected in the transcription site and its index.
"""
# check parameters
check_array(rna,
ndim=2,
dtype=np.int64)
# discriminate foci from transcription sites
ts, foci = identify_objects_in_region(
nuc_mask, foci, ndim)
# filter out rna from transcription sites
rna_in_ts = ts[:, ndim + 1]
mask_rna_in_ts = np.isin(rna[:, ndim], rna_in_ts)
rna_out_ts = rna[~mask_rna_in_ts]
return rna_out_ts, foci, ts | 3f6fe083cb85dbf2f7bc237e750be57f13398889 | 6,092 |
def hexagonal_numbers(length: int) -> list[int]:
"""
:param len: max number of elements
:type len: int
:return: Hexagonal numbers as a list
Tests:
>>> hexagonal_numbers(10)
[0, 1, 6, 15, 28, 45, 66, 91, 120, 153]
>>> hexagonal_numbers(5)
[0, 1, 6, 15, 28]
>>> hexagonal_numbers(0)
Traceback (most recent call last):
...
ValueError: Length must be a positive integer.
"""
if length <= 0 or not isinstance(length, int):
raise ValueError("Length must be a positive integer.")
return [n * (2 * n - 1) for n in range(length)] | 632e60505cb17536a17b20305a51656261e469f5 | 6,093 |
def get_free_remote_port(node: Node) -> int:
"""Returns a free remote port.
Uses a Python snippet to determine a free port by binding a socket
to port 0 and immediately releasing it.
:param node: Node to find a port on.
"""
output = node.run("python -c 'import socket; s=socket.socket();"
" s.bind((str(), 0)); print(s.getsockname()[1]);"
" s.close()'")
return int(output) | 4cdb0f62909abae1af8470611f63fcc9f5495095 | 6,094 |
from typing import Tuple
from typing import List
import tqdm
def read_conll_data(data_file_path: str) -> Tuple[List[Sentence], List[DependencyTree]]:
"""
Reads Sentences and Trees from a CONLL formatted data file.
Parameters
----------
data_file_path : ``str``
Path to data to be read.
"""
sentences: List[Sentence] = []
trees: List[DependencyTree] = []
with open(data_file_path, 'r') as file:
sentence_tokens = []
tree = DependencyTree()
for line in tqdm(file):
line = line.strip()
array = line.split('\t')
if len(array) < 10:
if sentence_tokens:
trees.append(tree)
sentences.append(sentence_tokens)
tree = DependencyTree()
sentence_tokens = []
else:
word = array[1]
pos = array[4]
head = int(array[6])
dep_type = array[7]
token = Token(word=word, pos=pos,
head=head, dep_type=dep_type)
sentence_tokens.append(token)
tree.add(head, dep_type)
if not sentences:
raise Exception(f"No sentences read from {data_file_path}. "
f"Make sure you have not replaced tabs with spaces "
f"in conll formatted file by mistake.")
return sentences, trees | 6bee76277fb6a15d03c5c80a5d083920a4412222 | 6,095 |
from typing import Optional
def get_algo_meta(name: AlgoMeta) -> Optional[AlgoMeta]:
"""
Get meta information of a built-in or registered algorithm.
Return None if not found.
"""
for algo in get_all_algo_meta():
if algo.name == name:
return algo
return None | 3a568356d56d26192a1e38be6ec5dd57b52a9bba | 6,096 |
def read_gbt_target(sdfitsfile, objectname, verbose=False):
"""
Give an object name, get all observations of that object as an 'obsblock'
"""
bintable = _get_bintable(sdfitsfile)
whobject = bintable.data['OBJECT'] == objectname
if verbose:
print("Number of individual scans for Object %s: %i" % (objectname,whobject.sum()))
calON = bintable.data['CAL'] == 'T'
# HACK: apparently bintable.data can sometimes treat itself as scalar...
if np.isscalar(calON):
calON = np.array([(val in ['T',True]) for val in bintable.data['CAL']])
n_nods = np.unique(bintable.data['PROCSIZE'])
blocks = {}
for sampler in np.unique(bintable.data[whobject]['SAMPLER']):
whsampler = bintable.data['SAMPLER'] == sampler
nods = np.unique(bintable.data['PROCSEQN'][whsampler*whobject])
for nod in nods:
whnod = bintable.data['PROCSEQN'] == nod
for onoff in ('ON','OFF'):
calOK = (calON - (onoff=='OFF'))
whOK = (whobject*whsampler*calOK*whnod)
if whOK.sum() == 0:
continue
if verbose:
print("Number of spectra for sampler %s, nod %i, cal%s: %i" % (sampler,nod,onoff,whOK.sum()))
crvals = bintable.data[whOK]['CRVAL1']
if len(crvals) > 1:
maxdiff = np.diff(crvals).max()
else:
maxdiff = 0
freqres = np.max(bintable.data[whOK]['FREQRES'])
if maxdiff < freqres:
splist = [read_gbt_scan(bintable,ii) for ii in np.where(whOK)[0]]
blocks[sampler+onoff+str(nod)] = pyspeckit.ObsBlock(splist,force=True)
blocks[sampler+onoff+str(nod)]._arithmetic_threshold = np.diff(blocks[sampler+onoff+str(nod)].xarr).min() / 5.
else:
print("Maximum frequency difference > frequency resolution: %f > %f" % (maxdiff, freqres))
return blocks | 1215fdccee50f0ab5d135a5cccf0d02da09410e2 | 6,098 |
def regression_target(label_name=None,
weight_column_name=None,
target_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
target_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
target_dimension=target_dimension) | 064954b58b57caeb654ed30f31b9560ab01d7c42 | 6,099 |
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape) | 5286d31985656d2f38c4e6b126d2f6d0915c82cb | 6,100 |
def check_add_role(store, id, name):
""" Checks if role exist and then adds record if it doesn't """
role = store.find_role(name)
if role == None:
return store.create_role(id=id, name=name)
else:
return role | c8680158cc005bf7a278951774b9fe0a733fc8c6 | 6,101 |
from pathlib import Path
def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path:
"""
Removes a base path from an item, and appends result to a new path
:param base_path: The :py:class:`pathlib.Path` to be removed from `item_path`
:param item_path: The :py:class:`pathlib.Path` to be delta-ed
:param new_base_path: The new base :py:class:`pathlib.Path` for `item_path`.
:raises ValueError: If base_path is not a sub-path of item_path.
:return: The new combined path.
"""
path_stub = item_path.relative_to(base_path)
new_item_path = new_base_path / path_stub
return new_item_path | ec531a011e36f053a8092525faae2047f5f66ccc | 6,103 |
import asyncio
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for _, p_config in config_per_platform(config, DOMAIN)
)
),
)
)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, DOMAIN)
config[DOMAIN] = automations
return config | 7f77a4c008a5fcb8d275bb2e7f65005d9e1c49b5 | 6,104 |
def _fwd6(y, dt): # pragma: no cover
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the first axis.
Returns
-------
dy0 : float or (...) ndarray
Approximate derivative of y at the first entry, i.e., dy[0] / dt.
"""
return (-147*y[0] + 360*y[1] - 450*y[2] + 400*y[3] - 225*y[4] \
+ 72*y[5] - 10*y[6]) / (60*dt) | 0d7321b3615fab6d6e065917ec94479ada0ee70c | 6,106 |
def minimize_newton_cg(nrgs, x0, num_params):
"""
Minimzes a structure using a Newton-CG method. This requires a
hopefully fully invertible analytic Hessian that will be used
to minimize geometries.
Parameters
----------
nrgs: [list of functionals]
Energy functions used to compute the energy, hessian, and mixed partials.
x0: np.array
Structure of the molecule to be minimized.
num_params: int
total number of parameters of the model. (ytz): this should be refactored out.
"""
assert x0.shape[1] == 3
N = x0.shape[0]
def hessian(conf):
conf = conf.reshape((N,3))
hess = None
for e in nrgs:
_, _, test_hessians, _ = e.total_derivative(conf, num_params)
if hess is None:
hess = test_hessians
else:
hess += test_hessians
return hess.reshape((N*3, N*3))
def gradient(conf):
conf = conf.reshape((N,3))
grads = np.zeros_like(conf)
for e in nrgs:
_, test_grads, _, _ = e.total_derivative(conf, num_params)
grads += test_grads
return grads.reshape(-1)
def energy(conf):
conf = conf.reshape((N,3))
nrg = 0
for e in nrgs:
test_nrg, _, _, _ = e.total_derivative(conf, num_params)
nrg += test_nrg
return nrg
res = minimize(
energy,
x0.reshape(-1),
# method='Newton-CG',
method='L-BFGS-B',
jac=gradient,
# hess=hessian,
# options={'xtol': 1e-8, 'disp': False}
)
# print("before and after")
# print(x0)
# print(np.array(res.x).reshape((N,3)))
return res.x.reshape((N,3))
# print(energy(x0), gradient(x0), hessian(x0).shape) | 46ddd6b2004579ef07170ef578859c7119ed4e13 | 6,107 |
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message)) | 2204993f5f51c62669395de40dc14d16f110c4b4 | 6,108 |
def project_point(x, R, T, f, c, k, p):
"""
Args
x: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: 2x1 Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
Returns
ypixel.T: Nx2 points in pixel space
depth: N points
"""
if 'aist' in config.DATASET.TEST_DATASET:
xcam = R.dot(x.T) + T # [B, 3, PJ]
else:
xcam = R.dot(x.T - T) # [B, 3, PJ]
y = xcam[:2] / (xcam[2] + 1e-5)
# === add camera distortion
r = np.sum(y ** 2, axis=0)
d = 1 + k[0] * r + k[1] * r * r + k[2] * r * r * r
u = y[0, :] * d + 2 * p[0] * y[0, :] * y[1, :] + p[1] * (r + 2 * y[0, :] * y[0, :])
v = y[1, :] * d + 2 * p[1] * y[0, :] * y[1, :] + p[0] * (r + 2 * y[1, :] * y[1, :])
y[0, :] = u
y[1, :] = v
ypixel = np.multiply(f, y) + c
depth = xcam[2]
return ypixel.T, depth | 5b6cce136ac6753fcdefcde01db9636357687ab2 | 6,109 |
def sum_to_scalar(*args):
"""Adding losses/nmsks together that were evaluated in parallel"""
new_args = list()
for arg in args:
new_args.append({k: v.sum() for (k, v) in arg.items()})
return new_args | a4264911962c7bf3432735f8872522e193ceec8f | 6,110 |
def inv(h_array: np.ndarray) -> np.ndarray:
"""
Calculate pinvh of PSD array. Note pinvh performs poorly
if input matrix is far from being Hermitian, so use pinv2
instead in this case.
Parameters:
----------
h_array : input matrix, assume to be Hermitian
Returns:
----------
h_inv : pseudo inverse of h_array.
"""
if np.allclose(h_array, h_array.T):
h_inv = linalg.pinvh(h_array)
else:
h_inv = linalg.pinv2(h_array)
return h_inv | c3305878b3f2dfdaabe6a245d8063b1039e19bc2 | 6,111 |
from datetime import datetime
def update_risk_cavs(connection):
"""Parse cavs from html to markdown.
Args:
connection: SQLAlchemy connection.
Returns:
ids of risks for which cavs where updated.
"""
cavs_data = connection.execute(
sa.text("""
SELECT cav.id, cav.attribute_value, cav.attributable_id
FROM custom_attribute_values AS cav
JOIN custom_attribute_definitions AS cad
ON cad.id = cav.custom_attribute_id
WHERE cad.definition_type = "risk"
AND attribute_value REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
risks_ids = {data[2] for data in cavs_data}
cavs_ids = {data[0] for data in cavs_data}
cavs_table = sa.sql.table(
'custom_attribute_values',
sa.Column('id', sa.Integer()),
sa.Column('attribute_value', sa.Text, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for cav_id, attribute_value, _ in cavs_data:
op.execute(cavs_table.update().values(
attribute_value=parse_html(attribute_value),
updated_at=datetime.datetime.utcnow(),
).where(cavs_table.c.id == cav_id))
utils.add_to_objects_without_revisions_bulk(
connection, cavs_ids, "CustomAttributeValue", "modified",
)
return risks_ids | 8af9ef613259915573ca1efc699278c0c2a6a4e4 | 6,112 |
def prefix_to_number(prefix):
"""Return the number of the prefix."""
if prefix in PREFIXES:
return PREFIXES[prefix]
raise ValueError(f'prefix "{prefix}" not found in list of prefixes') | e0a3822aa615d79a1ff0d5c7405097e055573ed0 | 6,113 |
def is_response_going_to_be_used(request, spider):
"""Check whether the request's response is going to be used."""
callback = get_callback(request, spider)
if is_callback_using_response(callback):
return True
for provider in discover_callback_providers(callback):
if is_provider_using_response(provider):
return True
return False | 4cd908dbebfd6089a25bf5168937b2a4f02f23ee | 6,114 |
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(f"Note: number of gallery samples is quite small, got {num_g}")
indices = np.argsort(distmat, axis=1)
matches = (g_vids[indices] == q_vids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_ap = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query vid and camid
q_vid = q_vids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same vid and camid with query
order = indices[q_idx]
remove = (g_vids[order] == q_vid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
ap_ = tmp_cmc.sum() / num_rel
all_ap.append(ap_)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.array(all_cmc, dtype=np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
map_ = np.mean(all_ap)
return all_cmc, map_ | 5387ee7fe7cac90406ac91619844e8e1fd814d88 | 6,115 |
def pytype_raise():
"""A pytest.raises wrapper for catching TypeErrors.
Parameters
----------
match : str, default=None
Regular expression to match exception error text against.
Returns
-------
RaisesContext
pytest context manager for catching exception-raising blocks.
"""
def _pytype_raise(match=None):
return pytest.raises(TypeError, match=match)
_pytype_raise.__doc__ = pyvalue_raise.__doc__
return _pytype_raise | ec5c7a56a8a3fb9028fb0ec72ac814061def467d | 6,117 |
def lift_split_buffers(lines):
"""Lift the split buffers in the program
For each module, if we find any split buffers with the name "buf_data_split",
we will lift them out of the for loops and put them in the variable declaration
section at the beginning of the module.
Parameters
----------
lines:
contains the codelines of the program
"""
code_len = len(lines)
for pos in range(code_len):
line = lines[pos]
if line.find("variable=buf_data_split") != -1:
# Search for the variable declaration section
decl_pos = -1
prev_pos = pos - 1
while prev_pos >= 0:
prev_line = lines[prev_pos]
if prev_line.find("Variable Declaration") != -1:
decl_pos = prev_pos
break
prev_pos -= 1
# Move the two code lines at [pos - 1] and [pos] to [decl_pos] and
# [decl_pos + 1]
indent = lines[decl_pos].find("/*")
line1 = " " * indent + lines[pos - 1].lstrip()
line2 = " " * indent + lines[pos].lstrip()
del lines[pos - 1]
del lines[pos - 1]
lines.insert(decl_pos, line1)
lines.insert(decl_pos + 1, line2)
return lines | 78919247b241dc29de84594b097c75d5e7ae1f03 | 6,118 |
import scipy
def peak_finder(
df_run,
cd,
windowlength,
polyorder,
datatype,
lenmax,
peak_thresh):
"""Determines the index of each peak in a dQdV curve
V_series = Pandas series of voltage data
dQdV_series = Pandas series of differential capacity data
cd = either 'c' for charge and 'd' for discharge.
Output:
i = list of indexes for each found peak"""
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
V_series = df_run[volt_col]
# this makes the peak finding smoothing independent of any smoothing that
# has already occured.
dQdV_series = df_run['Smoothed_dQ/dV']
sigx, sigy = cd_dataframe(V_series, dQdV_series, cd)
# the below is to make sure the window length ends up an odd number - even
# though we are basing it on the length of the df
wl = lenmax / 20
wlint = int(round(wl))
if wlint % 2 == 0:
windowlength_new = wlint + 1
else:
windowlength_new = wlint
if len(sigy) > windowlength_new and windowlength_new > polyorder:
# has to be larger than 69 so that windowlength > 3 - necessary for sav
# golay function
sigy_smooth = scipy.signal.savgol_filter(
sigy, windowlength_new, polyorder)
else:
sigy_smooth = sigy
peak_thresh_ft = float(peak_thresh)
i = peakutils.indexes(
sigy_smooth,
thres=peak_thresh_ft,
min_dist=lenmax / 50)
if i is not None and len(i) > 0:
sigx_volts = list(sigx[i])
peak_heights = list(sigy[i])
else:
sigx_volts = []
peak_heights = []
return i, sigx_volts, peak_heights | 370e019354579ab7b9a4eedef514dbde84801950 | 6,119 |
def make_box(world, x_dim, y_dim, z_dim, mass=0.5):
"""Makes a new axis-aligned box centered at the origin with
dimensions width x depth x height. The box is a RigidObject
with automatically determined inertia.
"""
boxgeom = Geometry3D()
boxgeom.loadFile("data/objects/cube.tri")
# box is centered at the origin
boxgeom.transform([x_dim, 0, 0, 0, y_dim, 0, 0, 0, z_dim], [-x_dim * 0.5, -y_dim * 0.5, -z_dim * 0.5])
print "Making a box a rigid object"
bmass = Mass()
bmass.setMass(mass)
bmass.setCom([0, 0, 0])
bmass.setInertia([x_dim / 12, y_dim / 12, z_dim / 12])
box = world.makeRigidObject("box")
box.geometry().set(boxgeom)
box.appearance().setColor(0.6, 0.3, 0.2, 1.0)
box.setMass(bmass)
cparams = box.getContactParameters()
cparams.kFriction = 1.5
cparams.kStiffness = 100000
cparams.kDamping = 30000
cparams.kRestitution = 0.5
return box | f3257a8339542c55d96bd752bad1d0c69c6370e0 | 6,120 |
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
data : array-like
1D array contain extragalactic component of observed values
scenario : dictionary
list of models combined to one scenario
prior : boolean
"""
if not measure:
exit( "you must provide a measure. Try: 'DM', 'RM', 'tau'" )
if scenario.redshift:
exit( "requires scenario with telescope and population" )
## prepare scenario for increasing redshift
tmp = scenario.copy()
tmp.population = False
tmp.telescope = False
## container for likelihoods and deviation at incrasing redshift
Ps = np.zeros( [len(DMs),len(redshift_bins)] )
devs= Ps.copy()
## for each redshift
for iz, z in enumerate( redshift_bins ):
tmp.redshift = z
L = GetLikelihood( measure, tmp )
if measureable:
L.Measureable()
Ps[:,iz], devs[:,iz] = L.Likelihoods( DMs, density=True ) ### use probability density to compare same value of DM at different redshifts. Otherwise influenced by different binning
Ls = []
for P, dev in Ps, devs:
L = LikelihoodFunction( P=P, x=redshift_range, dev=dev )
Ls.append(L)
return Ls | 55d414bb0adb00fe549485f2e3682d15b761b7a4 | 6,123 |
def plural_suffix(count: int) -> str:
""""s" when count is not one"""
suffix = ''
if count != 1:
suffix = 's'
return suffix | 950002d57560d06e93e08647ff17d885688bca87 | 6,124 |
def _pr_exists(user, namespace, repo, idx):
""" Utility method checking if a given PR exists. """
repo_obj = pagure.lib.query.get_authorized_project(
flask.g.session, project_name=repo, user=user, namespace=namespace
)
if not repo_obj:
return False
pr_obj = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo_obj.id, requestid=idx
)
if not pr_obj:
return False
return pr_obj | 2e68b6d4282f6f3ca4d9645c78579e3df3889494 | 6,125 |
import csv
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
row_id = int(row['activity_nr'])
data_d[row_id] = dict(clean_row)
return data_d | 57dcb39dac9568024ae4be07bc0921c941d6fae3 | 6,126 |
def _get_client(app):
"""Returns a client instance for an App.
If the App already has a client associated with it, simply returns
it. Otherwise creates a new client, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or ``None`` to use the default App).
Returns:
Client: A client for the specified App instance.
Raises:
ValueError: If the app argument is invalid.
"""
return _utils.get_app_service(app, _AUTH_ATTRIBUTE, Client) | de96140ed7c15a4aa390f08a76fe7de0074730db | 6,127 |
def get_job_config_build_for_branch(**kwargs):
"""pass kwargs to JobConfig constructor"""
return JobConfig(
type=JobType.copr_build,
trigger=JobConfigTriggerType.commit,
branch="build-branch",
scratch=True,
**kwargs,
) | 0c16a16bce6a1f05ca8daf764dd2de80147c90c4 | 6,128 |
import yaml
def get_connection_string_from_config_file(cfg_src, db_cfg_key):
"""
Gets connection parameters from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
# looking for specified connection name
for connection_cfg in cfg['connections']:
if db_cfg_key in connection_cfg:
db_cfg = connection_cfg[db_cfg_key]
# reading distinct configuration parameters
try:
db_engine = db_cfg['db_engine']
user = db_cfg['user']
password = db_cfg['password']
host = db_cfg['host']
port = db_cfg['port']
database = db_cfg['database']
except KeyError as e:
print(
"Unable to retrieve parameter '%s' "
"from configuration file." % e.args[0])
return
except Exception:
print("Unable to read configuration file")
return
# setting up connection string
conn_string = "{0}://{1}:{2}@{3}:{4}/{5}".format(
db_engine, user, password, host, port, database)
return conn_string | e2245f8e9124d36e5a373f1891590046c10a38fd | 6,129 |
from typing import Tuple
from typing import Sequence
def _decomp_0_matrices(
kak: 'cirq.KakDecomposition',
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Returns the single-qubit matrices for the 0-SQRT_ISWAP decomposition.
Assumes canonical x, y, z and (x, y, z) = (0, 0, 0) within tolerance.
"""
# Pairs of single-qubit unitaries, SQRT_ISWAP between each is implied
# Only a single pair of single-qubit unitaries is returned here so
# _decomp_to_operations will not insert any sqrt-iSWAP gates in between
return [
(
kak.single_qubit_operations_after[0] @ kak.single_qubit_operations_before[0],
kak.single_qubit_operations_after[1] @ kak.single_qubit_operations_before[1],
)
], kak.global_phase | b84d65cc7076b5d294cbf7f4f6a3c3ddff7ef7d2 | 6,130 |
import math
def concave(x, m):
"""Shape function."""
assert shape_args_ok(x, m)
result = 1.0
for i in range(1, len(x) - m + 1):
result *= math.sin(x[i - 1] * math.pi / 2.0)
if m != 1:
result *= math.cos(x[len(x) - m] * math.pi / 2.0)
return correct_to_01(result) | 70020efb06f35e169041491724bd6ddc7c7a9a35 | 6,131 |
def norm_img(img):
"""
normalization image
:param img: (C, H, W)
:return:
norm_img: (C, H, W)
"""
height, width, channel = img.shape
img = np.reshape(img, (height * width, channel)) # (height * width, channel)
mean = np.mean(img, axis=0, keepdims=True) # (1, channel)
center = img - mean # (height * width, channel)
var = np.sum(np.power(center, 2), axis=0, keepdims=True) / (height * width) # (1, channel)
std = np.sqrt(var) # (1, channel)
_norm_img = center / std # (height * width, channel)
_norm_img = np.reshape(_norm_img, (height, width, channel))
return _norm_img | a794ec4e096faa0efbfc9c993d9292a54f6573cc | 6,133 |
def convert_examples_to_features(examples, use_label):
"""Loads a data file into a list of `InputBatch`s."""
features = []
line_tags = []
for (ex_index, example) in enumerate(examples):
if use_label:
labels = example.labels
else:
labels = ['O'] * len(example.units)
samples = []
context, tokens, predict_mask, label_ids = [], [], [], []
for i, w in enumerate(example.units):
if w == '[MASK]':
sub_words = ['[MASK]']
else:
sub_words = tokenizer.tokenize(w)
if not sub_words:
sub_words = ['[UNK]']
tokens.extend(sub_words)
predict_mask.append(1)
predict_mask.extend([0] * (len(sub_words) - 1))
label_ids.append(label_map[labels[i]])
label_ids.extend([0] * (len(sub_words) - 1))
while len(context) + len(tokens) >= max_seq_length - 2:
l = max_seq_length - len(context) - 2
samples.append(
[['[CLS]'] + context + tokens[:l] + ['[SEP]'], [0] * (len(context) + 1) + predict_mask[:l] + [0],
[0] * (len(context) + 1) + label_ids[:l] + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
context = tokens[max(0, l - max_seq_length // 2):l]
tokens, predict_mask, label_ids = tokens[l:
], predict_mask[l:], label_ids[l:]
if sum(predict_mask):
samples.append([['[CLS]'] + context + tokens + ['[SEP]'], [0] * (len(
context) + 1) + predict_mask + [0], [0] * (len(context) + 1) + label_ids + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
for s in samples:
input_ids = tokenizer.convert_tokens_to_ids(s[0])
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
zero_padding = [0] * padding_length
input_ids += zero_padding
input_mask += zero_padding
predict_mask = s[1] + zero_padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(predict_mask) == max_seq_length
if use_label:
label_ids = s[2] + zero_padding
assert len(label_ids) == max_seq_length
one_hot_labels = np.eye(
len(label_map), dtype=np.float32)[label_ids]
else:
one_hot_labels = None
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask,
predict_mask=predict_mask, one_hot_labels=one_hot_labels))
assert len(examples) == sum(line_tags), logger.error(
'{} != {}'.format(len(examples), sum(line_tags)))
return features, line_tags | 7720a79b7404e0d4cc340ae5ea78084b64115f92 | 6,135 |
def broadcast_to_rank(t, rank, axis = -1):
"""Appends dimensions to tf.Tensor `t` at axis `axis` to match rank `rank`."""
rank_t = t.shape.rank # Assumes ranks are known at compile time (static).
for _ in range(rank - rank_t):
t = tf.expand_dims(t, axis=axis)
return t | 8a57a1d71f92aefc6015481b358b65f565af1b00 | 6,136 |
def operator(func):
"""
Help decorator to rewrite a function so that
it returns another function from it.
"""
@wraps(func)
def wrapper(*args, **kwargs):
def operator(stream):
return func(stream, *args, **kwargs)
return operator
return wrapper | cd2873954ee9dff003d2481d296c5be8740675c8 | 6,137 |
def json(body, charset="utf-8", **kwargs):
"""Takes JSON formatted data, converting it into native Python objects"""
return json_converter.loads(text(body, charset=charset)) | e2cabfca983abb96018f51ea3c09826e033227bb | 6,138 |
def read_corpus(file_path, encoding=ENCODING, **kwargs):
"""
Create a Linguistica object with a corpus data file.
:param file_path: path of input corpus file
:param encoding: encoding of the file at *file_path*. Default: ``'utf8'``
:param kwargs: keyword arguments for parameters and their values.
"""
return Lexicon(file_path=file_path, wordlist_file=False, encoding=encoding,
**kwargs) | 28f8303e0b94e8df9b6d9a33aca14fa62b15f6e8 | 6,139 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pageRanks = {page: 0 for page in corpus}
# Randomly select a page to start
currPage = random.choice(list(corpus.keys()))
for _ in range(n):
pageRanks[currPage] += 1
model = transition_model(corpus, currPage, damping_factor)
currPage = random.choice(list(model.keys()))
return {page: rank / n for page, rank in pageRanks.items()} | 5c9f66aaf72c8330c2ee0fcd2402bf613c4eb9b7 | 6,140 |
import networkx
def compute_participants(matches, challonge_data):
"""Compute series participants.
Iterate all matches and players to create a graph.
Apply connected components algorithm to resolve distinct
participant groups over all matches.
Sort participant groups by number of wins to correlate
with Challonge participant data (which also includes number
of wins).
Note that edge cases exist that are not covered. For example,
teams sometimes field a 1v1 player for a single match. If neither
player in the 1v1 match takes part in any other matches,
the players can't be placed in a participant group and their win
is not counted. There are two consequences:
1. Not counting a win may make the number of wins between
participants even, in which case we don't know which
participant group won the series.
2. Not grouping a player means the participant player list
will be incomplete.
"""
graph = networkx.DiGraph()
win_id = 0
platform_ids = []
name_to_user = {}
for match in matches:
# Record a win
win_id += 1
graph.add_node(win_id, type='win')
# Record platform ID
platform_ids.append(match['platform_id'])
# Add node for each player
for player in match['players']:
name_to_user[player['name']] = player['user_id']
graph.add_node(player['name'], type='player')
# Can happen for incomplete matches
if match['winning_team'] is None:
continue
# Connect winning players to recorded win
for player in match['winning_team']['players']:
graph.add_edge(player['name'], win_id)
# Connect all players on the same team
for team in match['teams']:
for i in team['players']:
for j in team['players']:
graph.add_edge(i['name'], j['name'])
mgz_data = [{
'wins': len([node for node in g if graph.nodes[node]['type'] == 'win']),
'players': [node for node in g if graph.nodes[node]['type'] == 'player']
} for g in networkx.weakly_connected_components(graph)]
return [{
'user_ids': [name_to_user[n] for n in mgz['players']],
'winner': challonge['winner'],
'name': challonge['name'],
'score': challonge['score'],
'platform_id': platform_ids[0]
} for mgz, challonge in zip(
sorted(mgz_data, key=lambda k: -1 * k['wins']),
sorted(challonge_data, key=lambda k: -1 * k['score'] if k['score'] else 0)
)] | a715773d5edd3b4d6852096c665070e64bef1165 | 6,142 |
def write_haiku(word_array, is_ipv6):
"""Return the beautiful haiku"""
# String to place in schema to show word slot.
octct = 'OCTET'
schema = get_schema(is_ipv6, octct)
# Replace each instance of 'octet' in the schema with a word from
# the encoded word array.
for i in range(len(word_array)):
for j in range(len(schema)):
if schema[j] == octct:
schema[j] = word_array[i]
break
# Capitalize appropriate words.
schema = capitalize_haiku(schema)
haiku = ''.join(schema)
return haiku | b51dc7cd1cca642eb135c48952bbc2ca74faf5e1 | 6,143 |
def import_data():
"""
Utility function to imoprt summary tsv ready for usage in PyMol
"""
col_types = {
'sift_score': float, 'sift_median': float, 'total_energy': float,
'interaction_energy': float, 'diff_interaction_energy': float,
'diff_interface_residues': float, 'freq': float
}
return pd.read_csv('data/output/summary.tsv', sep='\t', index_col=False,
dtype=col_types, low_memory=False) | 1b116d74ecba83658d05ea5dbda66b15175f3fdb | 6,144 |
from datetime import datetime
def get_current_datetime():
"""
Get the current datetime.
Note: This function is intended to be mocked in testing
Return:
time(datetime.datetime): current datetime
"""
return datetime.datetime.now(current_app.config['TIMEZONE']) | 6e7986eb6029e9c2be66019d7e9f35a79580c742 | 6,145 |
def adapt_all(iterable, to_cls):
"""
Returns a list of items from adapting each item in iterable to `cls`
If `iterable` is `None`, an empty list will be returned.
"""
if iterable is None:
return []
return [adapt(obj, to_cls) for obj in iterable] | a7c4d0adcce144223929081f47512f9d673efb28 | 6,146 |
import torch
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size) | 87c99f9ab9a9c114792a2c895284a8743682fc06 | 6,148 |
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['species'][1], options are any subset of
['g','s','lt','t','char','H20','CO','CO2'] or ['initial']
for the case when you want to determine the initial
distribution before pyrolysis)
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
time : int
the index of the timepoint that you want the results for
Returns
-------
C_fun : numpy array
the distribution of carbon functional groups as a percent of total
carbon. The order of the elements in the array is:
carbonyl, aromatic C-O, aromatic C-C, aromatic C-H, aliphatic C-O,
aromatic methoxyl, aliphatic C-C
"""
C_fun = np.zeros(7)
ind = speciesindices
for species in MW:
if fractions == ['initial']:
time = 0
if y[time, speciesindices[species]] != 0:
# moles of functional group/L (order from Return docstring)
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
else:
if MW[species][1] in set(fractions):
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
C_fun /= C_fun.sum()
return C_fun | 28704b470fd919d998fcd8704b125827226fe151 | 6,149 |
def _sigmoid(x):
"""
Sigmoid function that smoothly limits values between 0.0 and 1.0
:param x: Numpy array with float values that are to be limited.
:return: Numpy array with float values between 0.0 and 1.0
"""
return 1.0 / (1.0 + np.exp(-x)) | 770875ba82df9d4ac8eb6d403527cf0fb62d3990 | 6,151 |
from typing import Dict
def inherit_n_genes_prob(n, n_father, n_mother, mutation_prob) -> Dict:
"""Returns dictionary with distribution of conditional probability of
inherited genes given that father has n_father genes and mother has
n_mother genes, taking into account probability of mutations."""
# Probabily distributions:
# key 0 or False: probability of not inheriting the gene from parent
# key 1 or True: probability of inheriting the gene from parent
probs_f: Dict[bool, float] = p_not_p(prob_inherit(n_father, mutation_prob))
probs_m: Dict[bool, float] = p_not_p(prob_inherit(n_mother, mutation_prob))
return (
# Prob to not inherit at all
probs_f[0] * probs_m[0] if n == 0
# Prob to inherit from one parent only
else probs_f[1] * probs_m[0] + probs_f[0] * probs_m[1] if n == 1
# Prob to inherit from both parents
else probs_f[1] * probs_m[1]
) | 0481244db107f6623aa109212e74be8b719f5bb8 | 6,152 |
async def get_metrics_address_counts_summary():
"""
Latest summary of address counts.
"""
qry = f"""
select col
, latest
, diff_1d
, diff_1w
, diff_4w
, diff_6m
, diff_1y
from mtr.address_counts_by_minimal_balance_change_summary;
"""
async with CONNECTION_POOL.acquire() as conn:
rows = await conn.fetch(qry)
return [dict(r) for r in rows] | c22d6c3442833743559c42e4be59a25ab073c03b | 6,153 |
from typing import Dict
from typing import Any
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a history document before it is returned to the client.
:param db: the application object
:param document: the document to process
:return: the processed document
"""
return await apply_transforms(
virtool.utils.base_processor(document),
[AttachUserTransform(db, ignore_errors=True)],
) | 89de3dd255923b3eca6444ee4410980e857aa8e1 | 6,154 |
def _unit_scale_traindata(X, xmins, xmaxs):
"""If xmax > xmin, unit-scale the training data, else do nothing
Parameters
----------
x : ndarray of shape (m, n)
xmins : ndarray of shape (n, )
xmaxs : ndarray of shape (n, )
Returns
-------
result : ndarray of shape (m, n)
Notes
-----
Training data must fit inside a rectangular box aligned with each dimension
"""
X = jnp.atleast_2d(X)
xmins = jnp.atleast_1d(xmins)
xmaxs = jnp.atleast_1d(xmaxs)
msk = xmins == xmaxs
norm = jnp.where(msk, 1.0, xmaxs - xmins)
offset = jnp.where(msk, 0.0, xmins)
return (X - offset) / norm | 2778c7a9d7b6e23775df2354b92057e6a5511dc5 | 6,155 |
def extractive_explanations(
dataset,
prefix='explain sentiment',
input_feature='review',
output_classes=('negative', 'positive'),
drop_explanations=False
):
"""Preprocessor to handle extractive rationale prediction datasets.
The preprocessor expects a dataset with the provided 'input_feature', a label,
and a list of evidences. E.g. the movie rationale dataset consists of the
following features.
{
review: 'This is a bad movie. Skip it.'
label: 0,
evidences: ['bad movie', 'Skip it']
}
The example will be transformed to the following format by the preprocessor:
{
inputs: 'explain sentiment review: This is a bad movie. Skip it.'
targets: 'NEG because bad movie explanation: Skip it'
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
input_feature: str, feature name in input dataset.
output_classes: list of output classes in the input dataset. Defaults to
['negative', 'positive'] for the movie reviews dataset.
drop_explanations: bool, whether or not to drop explanations.
Returns:
a tf.data.Dataset
"""
if output_classes is None:
output_classes = ['negative', 'positive']
def my_fn(x):
"""Helper function to transform a rationale dataset to inputs/targets."""
input_label = tf.strings.join([input_feature, ':'], separator='')
inputs = tf.strings.join(
[prefix, input_label, x[input_feature]], separator=' ')
class_label = tf.gather(output_classes, x['label'])
if drop_explanations:
targets = class_label
else:
targets = _explanation_targets(class_label, x['evidences'])
return {'inputs': inputs, 'targets': targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | c1549279cbb676ee45287afe99f1f94410c27b62 | 6,158 |
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
list_a, list_b = proc_corr(top_list_prev, top_list)
if len(list_a) != len(list_b):
raise RuntimeError("The length of 'list_a' and 'list_b' must be the same!")
if use_fast:
return [fast_weighted_kendall(list_a, list_b)[1]]
else:
rank_list_a = tiedrank(list_a)
rank_list_b = tiedrank(list_b)
return [computeWKendall(rank_list_a,rank_list_b,ranked_input=True)[1]] | 35b473040508561798831343d770acabd97cb76e | 6,159 |
from datetime import datetime
import random
def generate_processes_by_exposure(exposure):
""" Creates a simulated process based on an exposure.
Arguments:
exposure {object} -- Exposure model
Raises:
ValueError -- returns when there is no processing
with a respective exposure.
Returns:
object -- Process model
"""
flavor = exposure.flavor
process = qlf_models.get_last_process_by_flavor(
flavor, jobs_isnull=False)
if not process:
raise ValueError(
'There is no process with {} flavor.'.format(flavor)
)
process.exposure_id = exposure.exposure_id
process.id = None
tdate = datetime.datetime.now()
tdate += datetime.timedelta(minutes=random.randint(1, 5))
process.end = tdate
process.save()
return process | a3a335184fbf9c51e47210ac22fd4d4e8a8a6aa4 | 6,160 |
import copy
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters:
----------
classifier: classifier object
The object used to fit the data.
X[ndarray]: shape=(n_sample, n_feature)
y[ndarray]: shape=(n_sample,)
cv[int]: the number of folds of the cross validation
Returns:
-------
conf_ms[list]: confusion matrices of the folds
accuracies[list]: accuracies of the folds
"""
assert getattr(classifier, "_estimator_type", None) == "classifier", \
"Estimator must be a classifier!"
# calculate CV metrics
conf_ms = []
accuracies = []
classifier = copy.deepcopy(classifier)
skf = StratifiedKFold(n_splits=cv)
for train_indices, test_indices in skf.split(X, y):
# fit and prediction
classifier.fit(X[train_indices], y[train_indices])
y_preds = classifier.predict(X[test_indices])
# calculate confusion matrix and accuracy
conf_m = confusion_matrix(y[test_indices], y_preds)
acc = np.sum(conf_m.diagonal()) / np.sum(conf_m)
# collection
conf_ms.append(conf_m)
accuracies.append(acc)
return conf_ms, accuracies | bbdbed0bc18b7ac201f2933e9cff10eab19d5a75 | 6,161 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[SYNO_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok | 876aceeaa113a6275a60328f6f00c0d0c4c0f2e1 | 6,162 |
import pathlib
def confirm_control_contains(trestle_dir: pathlib.Path, control_id: str, part_label: str, seek_str: str) -> bool:
"""Confirm the text is present in the control markdown in the correct part."""
control_dir = trestle_dir / ssp_name / control_id.split('-')[0]
md_file = control_dir / f'{control_id}.md'
responses, _ = ControlIOReader.read_all_implementation_prose_and_header(md_file)
if part_label not in responses:
return False
prose = '\n'.join(responses[part_label])
return seek_str in prose | b78cd7a7ef435fcee483d98fe2199ba90c905833 | 6,164 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.