content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def truth_seed_box(true_params, init_range, az_ind=4, zen_ind=5):
"""generate initial box limits from the true params
Parameters
----------
true_params : np.ndarray
init_range : np.ndarray
Returns
-------
np.ndarray
shape is (n_params, 2); returned energy limits are in units of log energy
"""
n_params = len(true_params)
true_params = np.copy(true_params[:, np.newaxis])
# clip true energies between 0.3 GeV and 1000 GeV
true_params[-2:] = true_params[-2:].clip(0.3, 1000)
limits = np.empty((n_params, 2), np.float32)
limits[:-2] = true_params[:-2] + init_range[:-2]
limits[-2:] = np.log10(true_params[-2:]) + init_range[-2:]
limits[az_ind] = limits[az_ind].clip(0, 2 * np.pi)
limits[zen_ind] = limits[zen_ind].clip(0, np.pi)
return limits | 3c3087702be4b91589f7e75f5c7e2f18776a658a | 3,489 |
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return u'{0} - {1}'.format(task.cur_artist, task.cur_album)
else:
return u'{0} - {1}'.format(task.item.artist, task.item.title) | 87387c47e90998c270f6f8f2f63ceacebd4cdc78 | 3,491 |
def ds_tc_resnet_model_params(use_tf_fft=False):
"""Generate parameters for ds_tc_resnet model."""
# model parameters
model_name = 'ds_tc_resnet'
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params.causal_data_frame_padding = 1 # causal padding on DataFrame
params.clip_duration_ms = 160
params.use_tf_fft = use_tf_fft
params.mel_non_zero_only = not use_tf_fft
params.feature_type = 'mfcc_tf'
params.window_size_ms = 5.0
params.window_stride_ms = 2.0
params.wanted_words = 'a,b,c'
params.ds_padding = "'causal','causal','causal','causal'"
params.ds_filters = '4,4,4,2'
params.ds_repeat = '1,1,1,1'
params.ds_residual = '0,1,1,1' # no residuals on strided layers
params.ds_kernel_size = '3,3,3,1'
params.ds_dilation = '1,1,1,1'
params.ds_stride = '2,1,1,1' # streaming conv with stride
params.ds_pool = '1,2,1,1' # streaming conv with pool
params.ds_filter_separable = '1,1,1,1'
# convert ms to samples and compute labels count
params = model_flags.update_flags(params)
# compute total stride
pools = model_utils.parse(params.ds_pool)
strides = model_utils.parse(params.ds_stride)
time_stride = [1]
for pool in pools:
if pool > 1:
time_stride.append(pool)
for stride in strides:
if stride > 1:
time_stride.append(stride)
total_stride = np.prod(time_stride)
# override input data shape for streaming model with stride/pool
params.data_stride = total_stride
params.data_shape = (total_stride * params.window_stride_samples,)
# set desired number of frames in model
frames_number = 16
frames_per_call = total_stride
frames_number = (frames_number // frames_per_call) * frames_per_call
# number of input audio samples required to produce one output frame
framing_stride = max(
params.window_stride_samples,
max(0, params.window_size_samples -
params.window_stride_samples))
signal_size = framing_stride * frames_number
# desired number of samples in the input data to train non streaming model
params.desired_samples = signal_size
params.batch_size = 1
return params | b018fa56efd67d8378496d5b4b1975580fc92f89 | 3,492 |
def expected_calibration_error_evaluator(test_data: pd.DataFrame,
prediction_column: str = "prediction",
target_column: str = "target",
eval_name: str = None,
n_bins: int = 100,
bin_choice: str = "count") -> EvalReturnType:
"""
Computes the expected calibration error (ECE), given true label and prediction scores.
See "On Calibration of Modern Neural Networks"(https://arxiv.org/abs/1706.04599) for more information.
The ECE is the distance between the actuals observed frequency and the predicted probabilities,
for a given choice of bins.
Perfect calibration results in a score of 0.
For example, if for the bin [0, 0.1] we have the three data points:
1. prediction: 0.1, actual: 0
2. prediction: 0.05, actual: 1
3. prediction: 0.0, actual 0
Then the predicted average is (0.1 + 0.05 + 0.00)/3 = 0.05, and the empirical frequency is (0 + 1 + 0)/3 = 1/3.
Therefore, the distance for this bin is::
|1/3 - 0.05| ~= 0.28.
Graphical intuition::
Actuals (empirical frequency between 0 and 1)
| *
| *
| *
______ Predictions (probabilties between 0 and 1)
Parameters
----------
test_data : Pandas' DataFrame
A Pandas' DataFrame with with target and prediction scores.
prediction_column : Strings
The name of the column in `test_data` with the prediction scores.
target_column : String
The name of the column in `test_data` with the binary target.
eval_name : String, optional (default=None)
The name of the evaluator as it will appear in the logs.
n_bins: Int (default=100)
The number of bins.
This is a trade-off between the number of points in each bin and the probability range they span.
You want a small enough range that still contains a significant number of points for the distance to work.
bin_choice: String (default="count")
Two possibilities:
"count" for equally populated bins (e.g. uses `pandas.qcut` for the bins)
"prob" for equally spaced probabilities (e.g. uses `pandas.cut` for the bins),
with distance weighed by the number of samples in each bin.
Returns
-------
log: dict
A log-like dictionary with the expected calibration error.
"""
if eval_name is None:
eval_name = "expected_calibration_error_evaluator__" + target_column
if bin_choice == "count":
bins = pd.qcut(test_data[prediction_column], q=n_bins)
elif bin_choice == "prob":
bins = pd.cut(test_data[prediction_column], bins=n_bins)
else:
raise AttributeError("Invalid bin_choice")
metric_df = pd.DataFrame({"bins": bins,
"predictions": test_data[prediction_column],
"actuals": test_data[target_column]})
agg_df = metric_df.groupby("bins").agg({"bins": "count", "predictions": "mean", "actuals": "mean"})
sample_weight = None
if bin_choice == "prob":
sample_weight = agg_df["bins"].values
distance = mean_absolute_error(agg_df["actuals"].values, agg_df["predictions"].values, sample_weight=sample_weight)
return {eval_name: distance} | 0f34a5c0883325324b11fd9b97a8a55250574392 | 3,493 |
def format_bytes(size):
"""
Takes a byte size (int) and returns a formatted, human-interpretable string
"""
# 2**10 = 1024
power = 2 ** 10
n = 0
power_labels = {0: " bytes", 1: "KB", 2: "MB", 3: "GB", 4: "TB"}
while size >= power:
size /= power
n += 1
return str(round(size, 2)) + power_labels[n] | 332b9d43c044da92ef7a9b16e57cfa7d552de12f | 3,494 |
def load_input_data(filenames, Ag_class):
"""
Load the files specified in filenames.
Parameters
---
filenames: a list of names that specify the files to
be loaded.
Ag_class: classification of sequences from MiXCR txt file
(i.e., antigen binder = 1, non-binder = 0)
"""
# Combine the non-binding sequence data sets.
# Non-binding data sets include Ab+ data and Ag-
# sorted data for all 3 libraries
l_data = []
for file in filenames:
l_data.append(
mixcr_input('data/' + file, Ag_class, seq_len=15)
)
mHER_H3 = pd.concat(l_data)
# Drop duplicate sequences
mHER_H3 = mHER_H3.drop_duplicates(subset='AASeq')
# Remove 'CAR/CSR' motif and last two amino acids
mHER_H3['AASeq'] = [x[3:-2] for x in mHER_H3['AASeq']]
# Shuffle sequences and reset index
mHER_H3 = mHER_H3.sample(frac=1).reset_index(drop=True)
return mHER_H3 | 9ae9cc814f150168ca1703b1a4af54bc440b4425 | 3,495 |
from typing import Optional
def get_maximum_value(
inclusive: Optional[Edge] = None,
exclusive: Optional[Edge] = None,
ignore_unlimited: bool = False,
) -> Result[Boundary, TestplatesError]:
"""
Gets maximum boundary.
:param inclusive: inclusive boundary value or None
:param exclusive: exclusive boundary value or None
:param ignore_unlimited: indicates whether to ignore unlimited values or not
"""
return get_value_boundary(
MAXIMUM_EXTREMUM,
inclusive=inclusive,
exclusive=exclusive,
ignore_unlimited=ignore_unlimited,
) | 3ef7557ed3f7f353e0765a92fb008449409039a8 | 3,496 |
from typing import List
def build_graph(order: int, edges: List[List[int]]) -> List[List[int]]:
"""Builds an adjacency list from the edges of an undirected graph."""
adj = [[] for _ in range(order)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return adj | 86bdd0d4314777ff59078b1c0f639e9439f0ac08 | 3,497 |
import torch
import math
def construct_scheduler(
optimizer,
cfg: OmegaConf,
):
"""
Creates a learning rate scheduler for a given model
:param optimizer: the optimizer to be used
:return: scheduler
"""
# Unpack values from cfg.train.scheduler_params
scheduler_type = cfg.train.scheduler
decay_factor = cfg.train.scheduler_params.decay_factor
decay_steps = cfg.train.scheduler_params.decay_steps
patience = cfg.train.scheduler_params.patience
warmup_epochs = cfg.train.scheduler_params.warmup_epochs
warmup = warmup_epochs != -1
if scheduler_type == "multistep":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=decay_steps,
gamma=1.0 / decay_factor,
)
elif scheduler_type == "plateau":
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="max",
factor=1.0 / decay_factor,
patience=patience,
verbose=True,
# threshold_mode="rel",
# min_lr=2.5e-4,
)
elif scheduler_type == "exponential":
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer,
gamma=decay_factor,
last_epoch=-1,
)
elif scheduler_type == "cosine":
size_dataset = DATASET_SIZES[cfg.dataset]
if warmup:
# If warmup is used, then we need to substract this from T_max.
T_max = (cfg.train.epochs - warmup_epochs) * math.ceil(
size_dataset / float(cfg.train.batch_size)
) # - warmup epochs
else:
T_max = cfg.train.epochs * math.ceil(
size_dataset / float(cfg.train.batch_size)
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=T_max,
eta_min=1e-6,
)
else:
lr_scheduler = None
print(
f"WARNING! No scheduler will be used. cfg.train.scheduler = {scheduler_type}"
)
if warmup and lr_scheduler is not None:
size_dataset = DATASET_SIZES[cfg.dataset]
lr_scheduler = ckconv.nn.LinearWarmUp_LRScheduler(
optimizer=optimizer,
lr_scheduler=lr_scheduler,
warmup_iterations=warmup_epochs
* math.ceil(size_dataset / float(cfg.train.batch_size)),
)
return lr_scheduler | d0ed907aa3582978cb3adf5eada895366dc1282f | 3,498 |
import numpy
def GenerateSerialGraph(num_samples, block_size):
""" Generates a (consistent) serial graph. """
N = num_samples
num_blocks = N // block_size
if N % block_size != 0:
err = "num_samples(%d) must be a multiple of block_size (%d)" % (num_samples, block_size)
raise Exception(err)
if num_blocks < 2:
err = "the number of blocks %d should be at least 2 (%d/%d)" % (num_blocks, num_samples, block_size)
raise Exception(err)
node_weights = numpy.ones(N) * 2.0
node_weights[:block_size] = 1.0
node_weights[-block_size:] = 1.0
edge_weights = {}
w = 1.0
for block in range(num_blocks - 1):
for i in range(block_size):
for j in range(block_size):
edge_weights[(i + block * block_size, j + (block + 1) * block_size)] = w
edge_weights[(j + (block + 1) * block_size, i + block * block_size)] = w # Loops are simply overwritten
return node_weights, edge_weights | 7348c08051aa0b7ec51f79f1f6f2097ab5857ef8 | 3,499 |
def load_ref_system():
""" Returns alpha-d-rhamnopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8728 1.4263 -0.3270
O -1.5909 0.3677 0.2833
C -1.1433 -0.9887 0.0086
C 0.3390 -1.0821 0.4414
O 0.8751 -2.3755 0.1209
C 1.1868 -0.1193 -0.4147
C 0.6705 1.3308 -0.3098
O 1.0480 1.9270 0.9344
O 2.5224 -0.0436 0.1069
C -2.0793 -1.8494 0.8365
O -1.2047 1.4329 -1.7148
H -1.2520 -1.1765 -1.0826
H 0.4676 -0.8772 1.5266
H 1.2377 -0.4682 -1.4721
H 1.1005 1.9572 -1.1305
H -1.2644 2.3269 0.1977
H -2.1732 1.3552 -1.8431
H 1.9510 1.6015 1.1977
H 2.8904 -0.9551 0.1994
H 0.5074 -3.0553 0.7225
H -2.0383 -1.5878 1.9031
H -3.1236 -1.6900 0.5276
H -1.8591 -2.9160 0.7258
""") | ba8d952cb777fac9e9d445c0865027c2fe25fbe4 | 3,500 |
def find_python_root_dir(possibles):
"""Find a python root in a list of dirs
If all dirs have the same name, and one of them has setup.py
then it is probably common Python project tree, like
/path/to/projects/cd
/path/to/projects/cd/cd
Or, if all dirs are the same,
except that one has an egg suffix, like
/path/to/dotsite/dotsite
/path/to/dotsite/dotsite.egg-info
then ignore the egg
"""
names = {_.basename() for _ in possibles}
if len(names) == 1:
for possible in possibles:
setup = possible / "setup.py"
if setup.isfile():
return possible
eggless = {paths.path(p.replace(".egg-info", "")) for p in possibles}
if len(eggless) == 1:
return eggless.pop()
return None | 849d9f967899f1cdba7f9579891afbb7fb0580dc | 3,502 |
import time
def findEndpoint():
"""
scroll to bottom to get the last number
"""
print("Fetching school count")
clickToOpen()
# get scroller
scrollbar = driver.find_elements_by_class_name("scrollbar-inner")[1]
driver.execute_script("arguments[0].scrollBy(0,2);", scrollbar)
inner = driver.find_elements_by_class_name("scroll-bar")
time.sleep(2)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
# scroll until
while top < 159:
driver.execute_script("arguments[0].scrollBy(0,200);", scrollbar)
time.sleep(0.3)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
time.sleep(2)
# get point-inset
vis = driver.find_element_by_class_name("visibleGroup")
children = vis.find_elements_by_xpath(".//div[@class='slicerItemContainer']")
last = children[-1].get_attribute("aria-posinset")
print(f"School count: {last}")
time.sleep(1)
return int(last) | 7f979df78ff7dbebc4819ee4f09c86d9d8c243c0 | 3,504 |
def _shake_shake_block(x, output_filters, stride, is_training):
"""Builds a full shake-shake sub layer."""
batch_size = tf.shape(x)[0]
# Generate random numbers for scaling the branches
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
# Normalize so that all sum to 1
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope('branch_{}'.format(branch)):
b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
is_training)
branches.append(b)
res = _shake_shake_skip_connection(x, output_filters, stride)
return res + tf.add_n(branches) | 29170e867ab8a01277adbbd49f60960408ceb28b | 3,505 |
def get_all_paged_events(decision, conn, domain, task_list, identity, maximum_page_size):
"""
Given a poll_for_decision_task response, check if there is a nextPageToken
and if so, recursively poll for all workflow events, and assemble a final
decision response to return
"""
# First check if there is no nextPageToken, if there is none
# return the decision, nothing to page
next_page_token = None
try:
next_page_token = decision["nextPageToken"]
except KeyError:
next_page_token = None
if next_page_token is None:
return decision
# Continue, we have a nextPageToken. Assemble a full array of events by continually polling
all_events = decision["events"]
while next_page_token is not None:
try:
next_page_token = decision["nextPageToken"]
if next_page_token is not None:
decision = conn.poll_for_decision_task(domain, task_list,
identity, maximum_page_size,
next_page_token)
for event in decision["events"]:
all_events.append(event)
except KeyError:
next_page_token = None
# Finally, reset the original decision response with the full set of events
decision["events"] = all_events
return decision | 5125c5bbff547a02496443477aee88165ece9d80 | 3,506 |
def get_pools():
""" gets json of pools. schema follows
#{
# "kind": "tm:ltm:pool:poolcollectionstate",
# "selfLink": "https://localhost/mgmt/tm/ltm/pool?ver=11.5.3",
# "items": [
# {
# "kind": "tm:ltm:pool:poolstate",
# "name": "mypoolname",
# "partition": "mypartition",
# "fullPath": "/mypartition/mypoolname",
# "generation": 1,
# "selfLink": "https://localhost/mgmt/tm/ltm/pool/~mypartition~mypoolname?ver=11.5.3",
# "allowNat": "yes",
# "allowSnat": "yes",
# "ignorePersistedWeight": "disabled",
# "ipTosToClient": "pass-through",
# "ipTosToServer": "pass-through",
# "linkQosToClient": "pass-through",
# "linkQosToServer": "pass-through",
# "loadBalancingMode": "round-robin",
# "minActiveMembers": 0,
# "minUpMembers": 0,
# "minUpMembersAction": "failover",
# "minUpMembersChecking": "disabled",
# "monitor": "/Common/gateway_icmp ",
# "queueDepthLimit": 0,
# "queueOnConnectionLimit": "disabled",
# "queueTimeLimit": 0,
# "reselectTries": 0,
# "slowRampTime": 10,
# "membersReference": {
# "link": "url-for-rest-request-for-pool-members",
# "isSubcollection": true
# }
# }
## ,(repeated as needed for additional pools)
# ]
#}
"""
global f5rest_url
return (get_f5json(f5rest_url + 'ltm/pool')) | 36baa4d556a69a9498357b1a5534433fc8732683 | 3,507 |
def set_runner_properties(timestep, infguard=False, profile_nodenet=False, profile_world=False, log_levels={}, log_file=None):
"""Sets the speed of the nodenet calculation in ms.
Argument:
timestep: sets the calculation speed.
"""
if log_file:
if not tools.is_file_writeable(log_file):
return False, "Can not write to specified log file."
logger.set_logfile(log_file)
runner_config['log_file'] = log_file
if log_levels:
set_logging_levels(log_levels)
runner_config['runner_timestep'] = timestep
runner_config['runner_infguard'] = bool(infguard)
runner_config['profile_nodenet'] = bool(profile_nodenet)
runner_config['profile_world'] = bool(profile_world)
runner['timestep'] = timestep
return True, "" | a2ddcd6b33304cef4c03a55a78a691fa9b5d1112 | 3,508 |
def chunk_sum(vec, chunksize):
"""Computes the sums of chunks of points in a vector.
"""
Nchunks = len(vec)//chunksize
end = Nchunks*chunksize
arr = np.reshape(vec[:end], [Nchunks, chunksize])
sums = np.sum(arr, 1)
return sums | 036bfe2d2277d90339c2bcacc1c0943a96a52ece | 3,509 |
def get_feature_definitions(df, feature_group):
"""
Get datatypes from pandas DataFrame and map them
to Feature Store datatypes.
:param df: pandas.DataFrame
:param feature_group: FeatureGroup
:return: list
"""
# Dtype int_, int8, int16, int32, int64, uint8, uint16, uint32
# and uint64 are mapped to Integral feature type.
# Dtype float_, float16, float32 and float64
# are mapped to Fractional feature type.
# string dtype is mapped to String feature type.
# Our schema of our data that we expect
# _after_ SageMaker Processing
feature_definitions = []
for column in df.columns:
feature_type = feature_group._DTYPE_TO_FEATURE_DEFINITION_CLS_MAP.get(
str(df[column].dtype), None
)
feature_definitions.append(
FeatureDefinition(column, feature_type)
) # you can alternatively define your own schema
return feature_definitions | 75573b3e8ed1f666b68dde2067b5b14655c341de | 3,511 |
def draw_2d_wp_basis(shape, keys, fmt='k', plot_kwargs={}, ax=None,
label_levels=0):
"""Plot a 2D representation of a WaveletPacket2D basis."""
coords, centers = _2d_wp_basis_coords(shape, keys)
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
for coord in coords:
ax.plot(coord[0], coord[1], fmt)
ax.set_axis_off()
ax.axis('square')
if label_levels > 0:
for key, c in centers.items():
if len(key) <= label_levels:
ax.text(c[0], c[1], key,
horizontalalignment='center',
verticalalignment='center')
return fig, ax | 5e731fa02c833f36c0cde833bfdb5d4b30706519 | 3,512 |
import numpy
def build_normalizer(signal, sample_weight=None):
"""Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
>>>normalizer = build_normalizer(signal)
>>>pylab.hist(normalizer(background))
>>># this one should be uniform in [0,1]
>>>pylab.hist(normalizer(signal))
:param numpy.array signal: shape = [n_samples] with floats
:param numpy.array sample_weight: shape = [n_samples], non-negative weights associated to events.
"""
sample_weight = check_sample_weight(signal, sample_weight)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
sorter = numpy.argsort(signal)
signal, sample_weight = signal[sorter], sample_weight[sorter]
predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def normalizing_function(data):
return numpy.interp(data, signal, predictions)
return normalizing_function | b4227364899468ca2017d9bedd87aff4bf35d9c6 | 3,513 |
from typing import Callable
from typing import Tuple
def get_phased_trajectory(init_state: np.ndarray,
update_fn: Callable) -> Tuple[np.ndarray, HashableNdArray]:
"""
evolve an initial state until it reaches a limit cycle
Parameters
----------
init_state
update_fn
Returns
-------
trajectory, phase-point pair
"""
state = init_state
trajectory = list()
trajectory_set = set() # set lookup should be faster
# compute state by state until we have a repeat
hashable_state = HashableNdArray(state)
while hashable_state not in trajectory_set:
trajectory.append(hashable_state)
trajectory_set.add(hashable_state)
state = update_fn(state)
hashable_state = HashableNdArray(state)
# separate trajectory into in-bound and limit-cycle parts
repeated_state = HashableNdArray(state)
repeated_state_index = trajectory.index(repeated_state)
limit_cycle = trajectory[repeated_state_index:]
# find state in limit cycle with smallest hash (i.e. smallest lexicographic
# ordering if there is no integer overflow)
# this is our phase fixing point
cycle_min_index: int = 0
cycle_min: int = hash(limit_cycle[0])
for idx in range(1, len(limit_cycle)):
nxt_hash: int = hash(limit_cycle[idx])
if nxt_hash < cycle_min:
cycle_min_index = idx
cycle_min = nxt_hash
# get trajectory with phase
phase_idx: int = len(trajectory) - len(limit_cycle) + cycle_min_index
phased_trajectory = np.array(
[hashable.array for hashable in trajectory[:phase_idx]], dtype=np.int64
)
return phased_trajectory, trajectory[phase_idx] | 603aa9f3e626d51132b70b87321453cfacba579a | 3,515 |
import logging
import time
def exponential_backoff(func):
"""
Retries a Boto3 call up to 5 times if request rate limits are hit.
The time waited between retries increases exponentially. If rate limits are
hit 5 times, exponential_backoff raises a
:py:class:sceptre.exceptions.RetryLimitExceededException().
:param func: a function that uses boto calls
:type func: func
:returns: The decorated function.
:rtype: func
:raises: sceptre.exceptions.RetryLimitExceededException
"""
logger = logging.getLogger(__name__)
@wraps(func)
def decorated(*args, **kwargs):
max_retries = 5
attempts = 0
while attempts < max_retries:
try:
return func(*args, **kwargs)
except ClientError as e:
if e.response["Error"]["Code"] == "Throttling":
logger.error("Request limit exceeded, pausing...")
time.sleep(2 ** attempts)
attempts += 1
else:
raise e
raise RetryLimitExceededError(
"Exceeded request limit {0} times. Aborting.".format(max_retries)
)
return decorated | 63db7514b88eb64ca0ee5d626be5a59f7d40d7d6 | 3,516 |
def post_config_adobe_granite_saml_authentication_handler(key_store_password=None, key_store_password_type_hint=None, service_ranking=None, service_ranking_type_hint=None, idp_http_redirect=None, idp_http_redirect_type_hint=None, create_user=None, create_user_type_hint=None, default_redirect_url=None, default_redirect_url_type_hint=None, user_id_attribute=None, user_id_attribute_type_hint=None, default_groups=None, default_groups_type_hint=None, idp_cert_alias=None, idp_cert_alias_type_hint=None, add_group_memberships=None, add_group_memberships_type_hint=None, path=None, path_type_hint=None, synchronize_attributes=None, synchronize_attributes_type_hint=None, clock_tolerance=None, clock_tolerance_type_hint=None, group_membership_attribute=None, group_membership_attribute_type_hint=None, idp_url=None, idp_url_type_hint=None, logout_url=None, logout_url_type_hint=None, service_provider_entity_id=None, service_provider_entity_id_type_hint=None, assertion_consumer_service_url=None, assertion_consumer_service_url_type_hint=None, handle_logout=None, handle_logout_type_hint=None, sp_private_key_alias=None, sp_private_key_alias_type_hint=None, use_encryption=None, use_encryption_type_hint=None, name_id_format=None, name_id_format_type_hint=None, digest_method=None, digest_method_type_hint=None, signature_method=None, signature_method_type_hint=None, user_intermediate_path=None, user_intermediate_path_type_hint=None): # noqa: E501
"""post_config_adobe_granite_saml_authentication_handler
# noqa: E501
:param key_store_password:
:type key_store_password: str
:param key_store_password_type_hint:
:type key_store_password_type_hint: str
:param service_ranking:
:type service_ranking: int
:param service_ranking_type_hint:
:type service_ranking_type_hint: str
:param idp_http_redirect:
:type idp_http_redirect: bool
:param idp_http_redirect_type_hint:
:type idp_http_redirect_type_hint: str
:param create_user:
:type create_user: bool
:param create_user_type_hint:
:type create_user_type_hint: str
:param default_redirect_url:
:type default_redirect_url: str
:param default_redirect_url_type_hint:
:type default_redirect_url_type_hint: str
:param user_id_attribute:
:type user_id_attribute: str
:param user_id_attribute_type_hint:
:type user_id_attribute_type_hint: str
:param default_groups:
:type default_groups: List[str]
:param default_groups_type_hint:
:type default_groups_type_hint: str
:param idp_cert_alias:
:type idp_cert_alias: str
:param idp_cert_alias_type_hint:
:type idp_cert_alias_type_hint: str
:param add_group_memberships:
:type add_group_memberships: bool
:param add_group_memberships_type_hint:
:type add_group_memberships_type_hint: str
:param path:
:type path: List[str]
:param path_type_hint:
:type path_type_hint: str
:param synchronize_attributes:
:type synchronize_attributes: List[str]
:param synchronize_attributes_type_hint:
:type synchronize_attributes_type_hint: str
:param clock_tolerance:
:type clock_tolerance: int
:param clock_tolerance_type_hint:
:type clock_tolerance_type_hint: str
:param group_membership_attribute:
:type group_membership_attribute: str
:param group_membership_attribute_type_hint:
:type group_membership_attribute_type_hint: str
:param idp_url:
:type idp_url: str
:param idp_url_type_hint:
:type idp_url_type_hint: str
:param logout_url:
:type logout_url: str
:param logout_url_type_hint:
:type logout_url_type_hint: str
:param service_provider_entity_id:
:type service_provider_entity_id: str
:param service_provider_entity_id_type_hint:
:type service_provider_entity_id_type_hint: str
:param assertion_consumer_service_url:
:type assertion_consumer_service_url: str
:param assertion_consumer_service_url_type_hint:
:type assertion_consumer_service_url_type_hint: str
:param handle_logout:
:type handle_logout: bool
:param handle_logout_type_hint:
:type handle_logout_type_hint: str
:param sp_private_key_alias:
:type sp_private_key_alias: str
:param sp_private_key_alias_type_hint:
:type sp_private_key_alias_type_hint: str
:param use_encryption:
:type use_encryption: bool
:param use_encryption_type_hint:
:type use_encryption_type_hint: str
:param name_id_format:
:type name_id_format: str
:param name_id_format_type_hint:
:type name_id_format_type_hint: str
:param digest_method:
:type digest_method: str
:param digest_method_type_hint:
:type digest_method_type_hint: str
:param signature_method:
:type signature_method: str
:param signature_method_type_hint:
:type signature_method_type_hint: str
:param user_intermediate_path:
:type user_intermediate_path: str
:param user_intermediate_path_type_hint:
:type user_intermediate_path_type_hint: str
:rtype: None
"""
return 'do some magic!' | b6b082929904123f96c044753995ff1d19cb9cbf | 3,517 |
import json
def _pcheck(block):
""" Helper for multiprocesses: check a block of logs
Args:
block List[List[str], int]: lines, block_id
Returns:
[type]: [description]
"""
results = []
lines, block_id = block
for li, line in enumerate(lines):
json_line = json.loads(line)
result = [
"%s: %s" % (e.error_type, e.message)
for e in [
validate_normalized(json_line),
check_timestamp_digits(json_line["timestamp"])
if "timestamp" in json_line
else None,
check_time(json_line),
]
if e
]
global_line_number = block_id * BLOCK_SIZE + li
results.append((global_line_number, result))
return results | 3a581e3440079d5f31b00c86d84abee3eca0a396 | 3,519 |
import sqlite3
def fetch_exon_locations(database):
""" Queries the database to create a dictionary mapping exon IDs to
the chromosome, start, end, and strand of the exon """
conn = sqlite3.connect(database)
cursor = conn.cursor()
query = """
SELECT
e.edge_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position),
MAX(loc1.position,loc2.position),
e.strand
FROM edge e
LEFT JOIN location loc1 ON e.v1 = loc1.location_ID
LEFT JOIN location loc2 ON e.v2 = loc2.location_ID
WHERE e.edge_type = 'exon';"""
cursor.execute(query)
exon_location_tuples = cursor.fetchall()
# Create dictionary
exon_locations = {}
for loc_tuple in exon_location_tuples:
exon_ID = loc_tuple[0]
exon_locations[exon_ID] = loc_tuple[1:]
conn.close()
return exon_locations | 54cdd3ffa2ccd10bd777f9130caaae992ac3d451 | 3,520 |
import http
def add(request):
"""Displays/processes a form to create a collection."""
data = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
data['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
data['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
data.update(form=form, filter=get_filter(request))
return render_cat(request, 'bandwagon/add.html', data) | 04491d0465e09057a2e6c3cc7b80f41ff84314ec | 3,521 |
def get_camera_pose_cpp():
"""
Returns camera pose
"""
rospy.wait_for_service('/asr_robot_model_services/GetCameraPose', timeout=5)
pose = rospy.ServiceProxy('/asr_robot_model_services/GetCameraPose',GetPose)
return pose().pose | b52be4a613d1de482387f6643ea12181f84f6cf4 | 3,522 |
import copy
def _convert_model_from_bytearray_to_object(model_bytearray):
"""Converts a tflite model from a bytearray into a parsable object."""
model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
model_object = schema_fb.ModelT.InitFromObj(model_object)
model_object = copy.deepcopy(model_object)
return model_object | 9579b58438571b41e8451a13d5e292e142cd6b57 | 3,523 |
import requests
def get_service_mapping():
""" Get mapping dict of service types
Returns:
A mapping dict which maps service types names to their ids
"""
# Get all Service types:
all_service_type = requests.get(base_url + 'services/v2/service_types', headers=headers3).json()
# Make Dict of service names and ids
service_name_to_id = {service_type['attributes']['name']:service_type['id'] for service_type in all_service_type['data']}
return service_name_to_id | 8a8adb8fff086b323dd95012c1a901918afa2bc2 | 3,524 |
def get_recently_viewed(request):
""" get settings.PRODUCTS_PER_ROW most recently viewed products for current customer """
t_id = tracking_id(request)
views = ProductView.objects.filter(tracking_id=t_id).values(
'product_id').order_by('-date')[0:PRODUCTS_PER_ROW]
product_ids = [v['product_id'] for v in views]
return Product.active.filter(id__in=product_ids) | 048187566446294285c7c8fe48b3dafba5efdcc1 | 3,525 |
import re
def chapter_by_url(url):
"""Helper function that iterates through the chapter scrapers defined in
cu2.scrapers.__init__ and returns an initialized chapter object when it
matches the URL regex.
"""
for Chapter in chapter_scrapers:
if re.match(Chapter.url_re, url):
return Chapter.from_url(url) | 83112ab2b68faf7d48a7a8fab3cd359ff442550d | 3,526 |
def get_model_name(part_num):
"""
根据型号获取设备名称
:param part_num:
:return:
"""
models = current_config.MODELS
for model_name, model_part_num in models.items():
if model_part_num == part_num:
return model_name | 843489174c844e05752f499f250a30b157ae386e | 3,527 |
def run_get_pk(process, *args, **inputs):
"""Run the process with the supplied inputs in a local runner that will block until the process is completed.
:param process: the process class or process function to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and process node pk
"""
if isinstance(process, Process):
runner = process.runner
else:
runner = manager.get_manager().get_runner()
return runner.run_get_pk(process, *args, **inputs) | 782c3b49b90347a48495f2b7b48e34c2418d31a1 | 3,528 |
def _random_mask(target_tokens, noise_probability=None, target_length=None):
""" target_length其实是mask_length"""
unk = 3
target_masks = get_base_mask(target_tokens)
if target_length is None:
target_length = target_masks.sum(1).float()
if noise_probability is None:
# sample from [0,1]
target_length = target_length * target_length.clone().uniform_() # 要mask的长度
else:
target_length = target_length * noise_probability
target_length = target_length + 1 # make sure to mask at least one token.
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens | 89e603c7a5bfd4fd6fa1d6a36cea3e429e7bd6b7 | 3,529 |
def calculate(cart):
"""Return the total shipping cost for the cart. """
total = 0
for line in cart.get_lines():
total += line.item.shipping_cost * line.quantity
return total | 4b6d9bd94ce3a5748f0d94ab4b23dab993b430e4 | 3,531 |
def arr_to_rgb(arr, rgb=(0, 0, 0), alpha=1, invert=False, ax=None):
"""
arr to be made a mask
rgb:assumed using floats (0..1,0..1,0..1) or string
"""
# arr should be scaled to 1
img = np.asarray(arr, dtype=np.float64)
img = img - np.nanmin(img)
img = img / np.nanmax(img)
im2 = np.zeros(img.shape + (4,))
if isinstance(rgb, str):
rgb = mpl.colors.to_rgb(rgb)
if invert:
img = 1 - img
im2[:, :, 3] = img * alpha
r, g, b = rgb
im2[:, :, 0] = r
im2[:, :, 1] = g
im2[:, :, 2] = b
# if ax is None:
# ax = plt.gca()
# plt.sca(ax)
# plt.imshow(im2)
return im2 | 73a19cfed712d93cbbaa69454ca2df780bc523fa | 3,532 |
def parameters(number, size, v=3):
"""
sets item parameters of items and puts in list
:param number: number of items
:param size: characteristic size of the items
:param v: velocity
:return: list with items
"""
param = []
for i in range(number):
angle = randint(0, int(2 * pi * 100))
param.append({
'x': randint(100, screen_width - 100),
'y': randint(100, screen_height - 100),
'vx': v * cos(angle / 100),
'vy': v * sin(angle / 100),
'r': randint(40, 255),
'g': randint(40, 255),
'b': randint(40, 255),
's': size
})
return param | 1b2ad225359c53ce4dd3feb9d356b1637c76c79c | 3,533 |
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape : bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square : bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape | 977a40e279d3a0000cc03b181e65182e2fe325cc | 3,534 |
def get_application_id():
"""Returns the app id from the app_identity service."""
return app_identity.get_application_id() | f23f6ac7473f81af6c2b40807dd20f8f602bbd96 | 3,536 |
def prior(X, ls, kernel_func=rbf,
ridge_factor=1e-3, name=None):
"""Defines Gaussian Process prior with kernel_func.
Args:
X: (np.ndarray of float32) input training features.
with dimension (N, D).
kernel_func: (function) kernel function for the gaussian process.
Default to rbf.
ls: (float32) length scale parameter.
ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition.
name: (str) name of the random variable
Returns:
(ed.RandomVariable) A random variable representing the Gaussian Process,
dimension (N,)
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
N, _ = X.shape.as_list()
K_mat = kernel_func(X, ls=ls, ridge_factor=ridge_factor)
return ed.MultivariateNormalTriL(loc=tf.zeros(N, dtype=tf.float32),
scale_tril=tf.cholesky(K_mat),
name=name) | 6ca9d150924473a0b78baa8da4e0734b7719e615 | 3,537 |
def multiclass_eval(y_hat: FloatTensor, y: IntTensor) -> int:
"""
Returns number correct: how often the rounded predicted value matches gold.
Arguments:
y_hat: 2d (N x C): guesses for each class
y: 2d (N x C): onehot representation of class labels
Returns:
nubmer correct
"""
# max(dim) returns both values and indices. compare best indices from
# predictions and gold (which are just onehot)
_, pred_idxes = y_hat.max(1)
_, gold_idxes = y.max(1)
return (pred_idxes == gold_idxes).sum() | 6385d1113a0c29191a8951a30d8219278511451d | 3,539 |
def extract止めないでお姉さま(item):
"""
Parser for '止めないで、お姉さま…'
"""
badwords = [
'subs',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False | 1ee52b649fbf24452285c44538b1bfc0fe3cb0c8 | 3,541 |
import urllib
import json
def GetExpID(startRow=0,numRows=2000,totalRows = -1):
"""
Queries the Allen Mouse Brain Institute website for all gene expression data available for download.
Returns:
--------
GeneNames: list[dict()]
list of all genes where expression data is available for download. Dict contains experiment/gene metadata.
SectionDataSetID : list(int)
corresponding SectionDataSetID (SectionDataSet: see "http://help.brain-map.org/display/api/Data+Model")
ID needed to specify download target.
"""
startRow = startRow
numRows = numRows
totalRows = totalRows
rows = []
GeneNames = []
SectionDataSetID = []
info = list()
done = False
while not done:
r = "&start_row={0}&num_rows={1}".format(startRow,numRows)
pagedUrl = API_DATA_PATH + "query.json?criteria=model::SectionDataSet,rma::criteria,products%5Bid$eq5%5D,rma::include,specimen(stereotaxic_injections(primary_injection_structure,structures))" + r
source = urllib.request.urlopen(pagedUrl).read()
response = json.loads(source)
rows += response['msg']
for x in response['msg']:
if x['failed'] == False :
print(x['id'])
info.append(x['id'])
if totalRows < 0:
totalRows = int(response['total_rows'])
startRow += len(response['msg'])
if startRow >= totalRows:
done = True
return info | f361e94b5d2c61bc80b182c2ee63a734d7e8dc4e | 3,542 |
from typing import Counter
def _add_biotype_attribute(gene_content):
"""
Add `biotype` attribute to all intervals in gene_content.
Parameters
----------
gene_content_ : dict
Intervals in gene separated by transcript id.
Returns
-------
dict
Same gene_content_ object with added `biotype` attributes.
"""
gene_content = gene_content.copy()
# Determine gene biotype:
gbiotype = _get_biotype(gene_content['gene'])
# List to keep track of all possible biotypes in gene:
gene_biotypes = [gbiotype] if gbiotype else []
for transcript_id, transcript_intervals in gene_content.items():
if transcript_id == 'gene':
continue
first_exon = [i for i in transcript_intervals if i[2] in ['CDS', 'ncRNA']][0]
biotype = _get_biotype(first_exon)
gene_biotypes.append(biotype)
new_intervals = []
for interval in transcript_intervals:
new_intervals.append(_add_biotype_value(interval, biotype))
gene_content[transcript_id] = new_intervals
# Finally, make also gene biotype: a list of all biotypes in gene,
# sorted by frequency. Additionally, another sorting is added to sort
# by alphabet if counts are equal.
biotype = ', '.join([i[0] for i in sorted(
sorted(Counter(gene_biotypes).items()), key=lambda x: x[1], reverse=True)])
gene_content['gene'] = _add_biotype_value(gene_content['gene'], biotype)
return gene_content | 21e09ebd4048acf4223855c23b819352aac013db | 3,543 |
def random_solution(W, D, n):
"""
We generate a solution of size n
"""
sol = np.random.permutation(n) + 1
fitness_sol = fitness(W=W, D=D, sol=sol)
return [fitness_sol, sol] | be95c1f08591e90e0e808ea7938763bb7fecd1da | 3,544 |
def format_perm(model, action):
"""
Format a permission string "app.verb_model" for the model and the
requested action (add, change, delete).
"""
return '{meta.app_label}.{action}_{meta.model_name}'.format(
meta=model._meta, action=action) | 12f532e28f685c2a38a638de63928f07039d44c8 | 3,545 |
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Verify plain password and hashed password.
Args:
plain_password (str): Plain text password.
hashed_password (str): Hashed password.
Returns:
bool: Returns true if secret is verified against given hash.
"""
return pwd_context.verify(plain_password, hashed_password) | adb7ac87516a02298468858216481d9ddad1ce13 | 3,546 |
from typing import List
from typing import Dict
def render_siecle2(tpl: str, parts: List[str], data: Dict[str, str]) -> str:
"""
>>> render_siecle2("siècle2", ["1"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["I"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["i"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["18"], defaultdict(str))
'XVIII<sup>e</sup>'
>>> render_siecle2("siècle2", ["XVIII"], defaultdict(str))
'XVIII<sup>e</sup>'
>>> render_siecle2("siècle2", ["xviii"], defaultdict(str))
'XVIII<sup>e</sup>'
"""
number = parts[0]
number = int_to_roman(int(number)) if number.isnumeric() else number.upper()
suffix = "er" if number == "I" else "e"
return f"{number}{superscript(suffix)}" | d0abd2703aebf3a05c0cc893c6678ed6683bb31a | 3,547 |
def get_interactions(request):
"""Function to get the interactions for a molecule"""
dist_dict = {"SingleAtomAcceptor_SingleAtomDonor": {"dist": 4.0}, # H-bonding
"SingleAtomAcceptor_WeakDonor": {"dist": 3.0}, # Weak H-bond
"Halogen_SingleAtomAcceptor": {"dist": 4.0}, # Halogen bonding
"AcidicGroup_BasicGroup": {"dist": 4.0}, # Acid-base
"Arom5_Arom6": {"dist": 5.5},"Arom6_Arom6": {"dist": 5.5},"Arom5_Arom5": {"dist": 5.5},# Aromatic-aromatic interactions
"Arom6_Carbonyl": {"dist": 4.5}, "Arom5_Carbonyl": {"dist": 4.5},# Carbonyl-aromatic interactions - CARBONLY from PROTEIN ONLY!!!!
"Hydrophobe_Hydrophobe": {"dist": 4.5}}#Hydrophobic interactions
mol_pk = request.GET['obs_id']
my_dist = request.GET['dist']
host = request.get_host()
mol = Molecule.objects.get(pk=mol_pk)
out_l = []
prot = mol.prot_id
# Get the interactions
interactions = ProbeBit.objects.filter(prot_id=prot, mol_id=mol, dist__lte=my_dist)
i = -1
for my_int in interactions:
if my_int.type not in dist_dict:
continue
if my_int.dist > dist_dict[my_int.type]["dist"]:
continue
print "HERE"
i += 1
out_l.append({})
f = my_int.probe_source_id
out_l[i]["url_1"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com)
f = my_int.probe_dest_id
out_l[i]["url_2"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com)
out_l[i]["dist"] = my_int.dist
out_l[i]["type"] = my_int.type
out_l[i]["angle_1"] = my_int.angle_1
out_l[i]["angle_2"] = my_int.angle_2
return HttpResponse(json.dumps(out_l)) | 86d4444b1c4a9251e8be91f453e1489234edf087 | 3,548 |
import json
def sendmail_action():
"""Send an email to the address entered in the sendmail form."""
if not MSGRAPHAPI.loggedin:
redirect("/sendmail")
email_body = json.dumps(
{
"Message": {
"Subject": request.query.subject,
"Body": {"ContentType": "HTML", "Content": request.query.body},
"ToRecipients": [{"EmailAddress": {"Address": request.query.to}}],
},
"SaveToSentItems": "true",
}
)
# send the email
response = MSGRAPHAPI.post(endpoint="me/microsoft.graph.sendMail", data=email_body)
# refresh the sendmail page, showing result (status_code) for this action
return template(
"sendmail.tpl",
dict(
fullname=MSGRAPHAPI.loggedin_name,
email=MSGRAPHAPI.loggedin_email,
status_code=response.status_code,
),
) | ed4f31ee22005e9ea1b3e24ca92b33be3980ebe2 | 3,549 |
import math
def normal_to_angle(x, y):
"""
Take two normal vectors and return the angle that they give.
:type x: float
:param x: x normal
:type y: float
:param y: y normal
:rtype: float
:return: angle created by the two normals
"""
return math.atan2(y, x) * 180 / math.pi | c6f5b5e2952858cd3592b4e0849806b0ccd5de78 | 3,551 |
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffersEXT( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers ) | 8c14fa4ce55c6fe995c01822776b17728e132987 | 3,552 |
def outline(image, mask, color):
"""
Give a color to the outline of the mask
Args:
image: an image
mask: a label
color: a RGB color for outline
Return:
image: the image which is drawn outline
"""
mask = np.round(mask)
yy, xx = np.nonzero(mask)
for y, x in zip(yy, xx):
if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0:
image[max(0, y) : y + 1, max(0, x) : x + 1] = color
return image | dc66410053e2326965c591a9d3b566e477133295 | 3,554 |
import socket
def is_open_port(port):
"""
Check if port is open
:param port: port number to be checked
:type port: int
:return: is port open
:rtype: bool
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
except socket.error:
return False
return True | df81cc942f39d00bbdb8cb11628666a117c9788f | 3,556 |
def stopping_fn_from_metric(metric_name: str):
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
"""
def stopping_fn(engine: Engine):
return engine.state.metrics[metric_name]
return stopping_fn | c6b47fd417134eb72c017a4cd55b8dfc995ea0c5 | 3,557 |
def build_attribute_set(items, attr_name):
"""Build a set off of a particular attribute of a list of
objects. Adds 'None' to the set if one or more of the
objects in items is missing the attribute specified by
attr_name.
"""
attribute_set = set()
for item in items:
attribute_set.add(getattr(item, attr_name, None))
return attribute_set | 2cee5922463188a4a8d7db79d6be003e197b577f | 3,559 |
def potential_fn(q):
"""
- log density for the normal distribution
"""
return 0.5 * np.sum(((q['z'] - true_mean) / true_std) ** 2) | f2b4ff07b7188494c9901c85f3b920e9d273a3f4 | 3,560 |
import yaml
def patch_rbac(rbac_v1: RbacAuthorizationV1Api, yaml_manifest) -> RBACAuthorization:
"""
Patch a clusterrole and a binding.
:param rbac_v1: RbacAuthorizationV1Api
:param yaml_manifest: an absolute path to yaml manifest
:return: RBACAuthorization
"""
with open(yaml_manifest) as f:
docs = yaml.safe_load_all(f)
role_name = ""
binding_name = ""
for dep in docs:
if dep["kind"] == "ClusterRole":
print("Patch the cluster role")
role_name = dep["metadata"]["name"]
rbac_v1.patch_cluster_role(role_name, dep)
print(f"Patched the role '{role_name}'")
elif dep["kind"] == "ClusterRoleBinding":
print("Patch the binding")
binding_name = dep["metadata"]["name"]
rbac_v1.patch_cluster_role_binding(binding_name, dep)
print(f"Patched the binding '{binding_name}'")
return RBACAuthorization(role_name, binding_name) | 1830cc26dc79674bfb628556e34572ddc6d6e89e | 3,562 |
def _shp_to_boundary_gdf(shp_file_path):
""".shpかshpが格納された.zipを指定してgdfを作成する
Args:
shp_file_path (Path): 変換対象のshpファイルを格納するディレクトリのパス文字列
Returns:
gpd.GeoDataFrame: shpを変換したGeoDataFrame
"""
s2g = ShapeToGeoPandas(str(shp_file_path.resolve()))
gdf = s2g.gdf
necessary_columns = [
"KEY_CODE",
"PREF",
"CITY",
"PREF_NAME",
"CITY_NAME",
"geometry"]
geo_d = GdfDissolve(gdf, necessary_columns)
geo_d.join_columns("AREA_CODE", "PREF", "CITY")
geo_d.dissolve_poly("AREA_CODE")
boundary_gdf = geo_d.new_gdf
geojson_obj = geojson_str_to_obj(df_to_geojson(boundary_gdf))
write_geojson(geojson_obj, "./created/", "boundary.geojson")
output_csv_from_df(boundary_gdf, "./created/", "boundary.csv")
return boundary_gdf | 4e1aef42f6bb16f184f0669b37dc7720851bee14 | 3,563 |
import random
def insert_site(site, seq, offset=None):
"""Inserts a sequence (represeting a site) into a larger sequence (which
is a sequence object rather than a series of letters."""
# inputs:
# site The site to be inserted
# offsets the offset where the site is to be inserted
# seq The sequence into which the specified site is to
# be implanted
# get sequence info
name = seq.getName()
seq_data = seq.getSeq()
assert ((offset == None) or ((offset >= 0) and \
(offset <= (len(seq_data) - len(site)))))
# select a random offset if none given:
if (offset == None):
# insert signal in a random position, from 0 up to m (= l - w)
offset = random.randint(0,(len(seq_data) - len(site)))
# insert the signal
signal_seq = seq_data[:offset]+str(site)+seq_data[(offset + len(site)):]
# create a modified sequence object to return
new_seq = sequence.Seq(name, signal_seq)
return new_seq | 88a4df8e2ab094337a27d0e2d84c2078dfed5cc4 | 3,564 |
def sex2bpzmags(f, ef, zp=0., sn_min=1., m_lim=None):
"""
This function converts a pair of flux, error flux measurements from SExtractor
into a pair of magnitude, magnitude error which conform to BPZ input standards:
- Nondetections are characterized as mag=99, errormag=m_1sigma
- Objects with absurd flux/flux error combinations or very large errors are
characterized as mag=-99 errormag=0.
"""
nondetected = less_equal(f, 0.) * greater(
ef, 0) #Flux <=0, meaningful phot. error
nonobserved = less_equal(ef, 0.) #Negative errors
#Clip the flux values to avoid overflows
f = clip(f, 1e-100, 1e10)
ef = clip(ef, 1e-100, 1e10)
nonobserved += equal(ef, 1e10)
nondetected += less_equal(
old_div(f, ef),
sn_min) #Less than sn_min sigma detections: consider non-detections
detected = logical_not(nondetected + nonobserved)
m = zeros(len(f)) * 1.
em = zeros(len(ef)) * 1.
m = where(detected, -2.5 * log10(f) + zp, m)
m = where(nondetected, 99., m)
m = where(nonobserved, -99., m)
em = where(detected, 2.5 * log10(1. + old_div(ef, f)), em)
if not m_lim:
em = where(nondetected, -2.5 * log10(ef) + zp, em)
else:
em = where(nondetected, m_lim, em)
em = where(nonobserved, 0., em)
return m, em | ec95566f680326cd550626495a6983d0221ae29b | 3,565 |
def get_elk_command(line):
"""Return the 2 character command in the message."""
if len(line) < 4:
return ""
return line[2:4] | 550eda4e04f57ae740bfd294f9ec3b243e17d279 | 3,566 |
def safe_div(a, b):
"""
Safe division operation. When b is equal to zero, this function returns 0.
Otherwise it returns result of a divided by non-zero b.
:param a: number a
:param b: number b
:return: a divided by b or zero
"""
if b == 0:
return 0
return a / b | 68e5bccbe812315b9a1d27a1fa06d26d5339d6fd | 3,567 |
def decrypt_story():
"""
Using the methods you created in this problem set,
decrypt the story given by the function getStoryString().
Use the functions getStoryString and loadWords to get the
raw data you need.
returns: string - story in plain text
"""
story = CiphertextMessage(get_story_string())
return story.decrypt_message() | af636efedf5e95847c4a7dd52031cfc3dfa094a0 | 3,568 |
def shouldAvoidDirectory(root, dirsToAvoid):
"""
Given a directory (root, of type string) and a set of directory
paths to avoid (dirsToAvoid, of type set of strings), return a boolean value
describing whether the file is in that directory to avoid.
"""
subPaths = root.split('/')
for i, subPath in enumerate(subPaths):
dir = '/'.join(subPaths[:i+1])
if dir in dirsToAvoid:
return True
return False | afc92111f57031eb1e2ba797d80ea4abc2a7ccd0 | 3,569 |
def atom_explicit_hydrogen_valences(xgr):
""" explicit hydrogen valences, by atom
"""
return dict_.transform_values(atom_explicit_hydrogen_keys(xgr), len) | e409b82606f113373086ca52689baeb117d3a8ea | 3,570 |
def change_account_type(user_id):
"""Change a user's account type."""
if current_user.id == user_id:
flash('You cannot change the type of your own account. Please ask '
'another administrator to do this.', 'error')
return redirect(url_for('admin.user_info', user_id=user_id))
user = User.query.get(user_id)
if user is None:
abort(404)
form = ChangeAccountTypeForm()
if form.validate_on_submit():
user.role = form.role.data
db.session.add(user)
db.session.commit()
flash('Role for user {} successfully changed to {}.'.format(
user.full_name(), user.role.name), 'form-success')
return render_template('admin/manage_user.html', user=user, form=form) | 3c13b4ea1d3b080a4c925d576eee29c61346363a | 3,571 |
import re
def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
Prints information about the tests being run, executes the setup and
teardown commands and the command under test itself. Also determines
success/failure based on the information in the test case and generates
TAP output accordingly.
"""
testlist = filtered_tests
tcount = len(testlist)
index = 1
tap = str(index) + ".." + str(tcount) + "\n"
for tidx in testlist:
result = True
tresult = ""
if "flower" in tidx["category"] and args.device == None:
continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
exit_code = p.returncode
if (exit_code != int(tidx["expExitCode"])):
result = False
print("exit:", exit_code, int(tidx["expExitCode"]))
print(procout)
else:
match_pattern = re.compile(str(tidx["matchPattern"]), re.DOTALL)
(p, procout) = exec_cmd(tidx["verifyCmd"])
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
result = False
if result == True:
tresult += "ok "
else:
tresult += "not ok "
tap += tresult + str(index) + " " + tidx["id"] + " " + tidx["name"] + "\n"
if result == False:
tap += procout
prepare_env(tidx["teardown"])
index += 1
return tap | 39e1af33a563dd9915ec6f107c8dd1f199528cab | 3,572 |
def get_all_infos_about_argument(db_argument: Argument, main_page, db_user, lang) -> dict:
"""
Returns bunch of information about the given argument
:param db_argument: The argument
:param main_page: url of the application
:param db_user: User
:param lang: Language
:rtype: dict
:return: dictionary with many information or an error
"""
_t = Translator(lang.ui_locales)
return_dict = dict()
db_votes = DBDiscussionSession.query(ClickedArgument).filter(ClickedArgument.argument_uid == db_argument.uid,
ClickedArgument.is_valid == True,
ClickedStatement.is_up_vote == True).all()
db_author = DBDiscussionSession.query(User).get(db_argument.author_uid)
return_dict['vote_count'] = str(len(db_votes))
return_dict['author'] = db_author.global_nickname
return_dict['author_url'] = main_page + '/user/' + str(db_author.uid)
return_dict['gravatar'] = get_profile_picture(db_author)
return_dict['timestamp'] = sql_timestamp_pretty_print(db_argument.timestamp, db_argument.lang)
text = get_text_for_argument_uid(db_argument.uid)
return_dict['text'] = start_with_capital(text)
supporters = []
gravatars = dict()
public_page = dict()
for vote in db_votes:
db_author = DBDiscussionSession.query(User).get(vote.author_uid)
name = db_author.global_nickname
if db_user.nickname == db_author.nickname:
name += ' (' + _t.get(_.itsYou) + ')'
supporters.append(name)
gravatars[name] = get_profile_picture(db_author)
public_page[name] = main_page + '/user/' + str(db_author.uid)
return_dict['supporter'] = supporters
return_dict['gravatars'] = gravatars
return_dict['public_page'] = public_page
return return_dict | 56c03e8d11d23c3726b54b2d2be954ecefba786b | 3,573 |
def average_proxy(ray, method, proxy_type):
""" method to average proxy over the raypath.
Simple method is direct average of the proxy: $\sum proxy(r) / \sum dr$.
Other methods could be: $1/(\sum 1 / proxy)$ (better for computing \delta t)
"""
total_proxy = 0.
try:
methode.evaluation
except (NameError, AttributeError):
method.evaluation = None # in case the variable was not defined.
if method.evaluation == "inverse":
for _, point in enumerate(ray):
_proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type]
total_proxy += 1. / _proxy
number = len(ray)
proxy = 1. / total_proxy / float(number)
else:
for j, point in enumerate(ray):
_proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type]
total_proxy += _proxy
number = len(ray)
proxy = total_proxy / float(number)
return proxy | 912925339f6a2087020781e898de102c3ee4f0d6 | 3,574 |
import _datetime
from datetime import datetime
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base | 80f3ccb2d45f247a93a0d69bf0a55bc9264ee7de | 3,575 |
import string
def open_lib(ifile):
"""Opens lib with name ifile and returns stationmeta, arraydim, rlengths, heights, sectors, data."""
with open(ifile, 'rb') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
data = {}
lines = filter(lambda x: x.strip(), lines)
data["meta"] = lines[0]
printable = set(string.printable)
data["meta"] = filter(lambda x: x in printable, data["meta"])
data["dim"] = np.array(lines[1].split()).astype(int)
data["R"] = np.array(lines[2].split()).astype(float) #[m]
data["H"] = np.array(lines[3].split()).astype(float) #[m]
data["sect"] = int(data["dim"][2])
data_block = lines[4:]
# frequencies
data["f"] = convert_to_np(data_block[::len(data["H"])*2+1],data["sect"])
# create masks for A, k value
mask = np.ones(len(data_block), dtype=bool)
mask[::len(data["H"])*2+1] = False
AK = convert_to_np(list(compress(data_block,mask)),data["sect"])
data["A"] = AK[::2]
data["k"] = AK[1::2]
f.close()
return data | 22fb842112f1558ab086058d997e08cb416e9a19 | 3,576 |
def convert_examples_to_features(examples, intent_label_list, slot_label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, intent_label_list, slot_label_list,
max_seq_length, tokenizer)
features.append(feature)
return features | 078f6e23e3a2f42a12851763a3424d938569d25b | 3,577 |
from bs4 import BeautifulSoup
def get_blb_links(driver):
"""takes (driver) and returns list of links to scrape"""
homepage = "https://www.bloomberg.com/europe"
rootpage = "https://www.bloomberg.com"
driver.get(homepage)
ssm = driver.find_elements_by_class_name("single-story-module")[0].get_attribute(
"outerHTML"
)
spm_1 = driver.find_elements_by_class_name("story-package-module")[0].get_attribute(
"outerHTML"
)
spm_2 = driver.find_elements_by_class_name("story-package-module")[1].get_attribute(
"outerHTML"
)
oped = driver.find_elements_by_class_name("story-package-module")[2].get_attribute(
"outerHTML"
)
soup = BeautifulSoup(ssm + spm_1 + spm_2 + oped, "lxml")
links = [
rootpage + link.get("href")
for link in soup.findAll("a")
if "/news/" in link.get("href")
]
links = list(dict.fromkeys(links))
return links | f2ecf967aa6e755b51e43450239b5606013cb9bf | 3,578 |
def randomProfile(freq,psd):
"""
Generate a random profile from an input PSD.
freq should be in standard fft.fftfreq format
psd should be symmetric as with a real signal
sqrt(sum(psd)) will equal RMS of profile
"""
amp = np.sqrt(psd)*len(freq)
ph = randomizePh(amp)
f = amp*ph
sig = np.fft.ifft(f)
return np.real(sig) | 54477fc37bb81c24c0bde3637112a18613201565 | 3,579 |
def getP(W, diagRegularize = False):
"""
Turn a similarity matrix into a proability matrix,
with each row sum normalized to 1
:param W: (MxM) Similarity matrix
:param diagRegularize: Whether or not to regularize
the diagonal of this matrix
:returns P: (MxM) Probability matrix
"""
if diagRegularize:
P = 0.5*np.eye(W.shape[0])
WNoDiag = np.array(W)
np.fill_diagonal(WNoDiag, 0)
RowSum = np.sum(WNoDiag, 1)
RowSum[RowSum == 0] = 1
P = P + 0.5*WNoDiag/RowSum[:, None]
return P
else:
RowSum = np.sum(W, 1)
RowSum[RowSum == 0] = 1
P = W/RowSum[:, None]
return P | 1c5c7da2e86b5c800660acaf825507914cd630ce | 3,580 |
def get_structure_index(structure_pattern,stream_index):
"""
Translates the stream index into a sequence of structure indices identifying an item in a hierarchy whose structure is specified by the provided structure pattern.
>>> get_structure_index('...',1)
[1]
>>> get_structure_index('.[.].',1)
[1, 0]
>>> get_structure_index('.[[...],..].',1)
[1, 0, 0]
>>> get_structure_index('.[[...]...].',2)
[1, 0, 1]
>>> get_structure_index('.[[...]...].',3)
[1, 0, 2]
>>> get_structure_index('.[[...]...].',4)
[1, 1]
>>> get_structure_index('.[[...]...].',5)
[1, 2]
>>> get_structure_index('.[[...]...].',6)
[1, 3]
>>> get_structure_index('.[[...]...].',7)
[2]
"""
structure_index = [0]
current_stream_index = 0
for p in structure_pattern:
if p == '[':
structure_index.append(0)
elif p == '.':
if current_stream_index == stream_index:
return structure_index
structure_index[-1] += 1
current_stream_index += 1
elif p == ']':
structure_index.pop(-1)
structure_index[-1] += 1
else:
raise Exception('Invalid character in structure pattern: %s' % repr(p))
raise Exception('Provided stream index does not exist in the provided structure pattern') | 8f1def101aa2ec63d1ea69382db9641bf0f51380 | 3,581 |
import random
import torch
def n_knapsack(n_knapsacks=5,
n_items=100, # Should be divisible by n_knapsack
n_weights_per_items=500,
use_constraints=False,
method='trust-constr',
backend='tf'
):
"""
Here we solve a continuous relaxation of the multiknapsack problem.
"""
# Let's emulate the multiknapsack problem with random weights
weights_ = random((n_weights_per_items, n_items))
# We create knapsacks with attribution of the items to knapsacks [0,1,2,3,4] as:
# [0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4]
capacity_knapsacks = weights_.reshape(
(n_weights_per_items, -1, n_knapsacks)).sum(-2)
if backend == 'tf':
weights_ = tf.constant(weights_, tf.float32)
capacity_knapsacks_ = tf.constant(capacity_knapsacks, tf.float32)
def func(W):
# We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one
if use_constraints:
W = tf.nn.softmax(W, 1)
# We add a penalty only when the weights attribution sums higher than the knapsacks capacity.
res = tf.nn.relu(weights_@W-capacity_knapsacks_)
res = tf.reduce_mean(res**2)
return res
dev = None
else:
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
weights_ = torch.tensor(weights_, dtype=torch.float32, device=dev)
capacity_knapsacks_ = torch.tensor(
capacity_knapsacks, dtype=torch.float32, device=dev)
def func(W):
# We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one
if use_constraints:
W = torch.nn.functional.softmax(W, 1)
# We add a penalty only when the weights attribution sums higher than the knapsacks capacity.
res = torch.nn.functional.relu(weights_@W-capacity_knapsacks_)
res = (res**2).mean()
return res
if use_constraints:
if backend == 'tf':
def eq_fun(W):
return tf.reduce_sum(W, 1)-1
else:
def eq_fun(W):
return W.sum(1)-1
constraints = {
'type': 'eq',
'fun': eq_fun,
'lb': 0,
'ub': 0,
'use_autograd': False
}
else:
constraints = None
Winit = np.zeros((n_items, n_knapsacks))
res = minimize(func, Winit, tol=1e-8,
constraints=constraints,
bounds=(0, None),
method=method,
torch_device=dev,
backend=backend)
return res | ed4068bad29aff385a86286eab224da3c1beefa5 | 3,582 |
def has_url(account: Accounts) -> bool:
"""Return True if the account's note or fields seem to contain a URL."""
if account.note and "http" in account.note.lower():
return True
if "http" in str(account.fields).lower():
return True
return False | d858ef1bedcac064bbc9a5c20de02b1438c5cdad | 3,583 |
def conv2x2(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=2,
stride=stride,
padding=padding,
groups=groups,
bias=False,
dilation=dilation) | a96112cd56817940292f92b5417584f18b7d2fb7 | 3,584 |
def face_normals(P,T,normalize=True):
"""Computes normal vectors to triangles (faces).
Args:
P: n*3 float array
T: m*3 int array
normalize: Whether or not to normalize to unit vectors. If False, then the magnitude of each vector is twice the area of the corresponding triangle. Default is True
Returns:
A Numpy array of size (num_tri,3) containing the face normal vectors.
"""
P1 = P[T[:,0],:]
P2 = P[T[:,1],:]
P3 = P[T[:,2],:]
N = np.cross(P2-P1,P3-P1)
if normalize:
N = (N.T/np.linalg.norm(N,axis =1)).T
return N | 379c82624aeb1a772befb91acee1b46a7ff7f937 | 3,585 |
import itertools
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Reference: "Large Margin Rank Boundaries for Ordinal Regression",
R. Herbrich, T. Graepel, K. Obermayer.
Authors: Fabian Pedregosa <[email protected]>
Alexandre Gramfort <[email protected]>
Args:
X: (np.array), shape (n_samples, n_features)
The data
y: (np.array), shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns:
X_trans: (np.array), shape (k, n_feaures)
Data as pairs, where k = n_samples * (n_samples-1)) / 2 if grouping
values were not passed. If grouping variables exist, then returns
values computed for each group.
y_trans: (np.array), shape (k,)
Output class labels, where classes have values {-1, +1}
If y was shape (n_samples, 2), then returns (k, 2) with groups on
the second dimension.
"""
X_new, y_new, y_group = [], [], []
y_ndim = y.ndim
if y_ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
y_group.append(y[i, 1])
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = -y_new[-1]
X_new[-1] = -X_new[-1]
if y_ndim == 1:
return np.asarray(X_new), np.asarray(y_new).ravel()
elif y_ndim == 2:
return np.asarray(X_new), np.vstack((np.asarray(y_new), np.asarray(y_group))).T | d4c376fe1a594f6baecb78aeaa11e90bffcde8a5 | 3,586 |
def format_project_title(rank: int, project_id: str, status: str) -> str:
"""Formats a project title for display in Slack.
Args:
rank: The rank of in the list. Will be prepended to the title.
project_id: The project ID.
status: The status of the project. This is used to determine which
emoji is used to prefix the title string.
Returns:
A formatted title string.
"""
project_link = generate_gcp_project_link(project_id)
if status == SETTINGS.STATUS_WARNING:
return f':warning: *{rank}. {project_link}*'
return f':white_check_mark: *{rank}. {project_link}*' | 35a8b7dd2c3e8afd3975ddf97056a81d631e3576 | 3,587 |
def fake_3dimage_vis():
"""
:return: a Nifti1Image (3D) in RAS+ space
Following characteristics:
- shape[LR] = 7
- shape[PA] = 8
- shape[IS] = 9
Visual thing using voxel art...
"""
shape = (7,8,9)
data = np.zeros(shape, dtype=np.float32, order="F")
# "L"
indices =np.array([
(0,1,6),
(0,1,5),
(0,1,4),
(0,1,3),
(0,1,2),
(0,1,1),
(0,2,1),
(0,3,1),
(0,4,1),
(0,5,1),
]).T
data[indices[0], indices[1], indices[2]] = 7
# "P"
indices =np.array([
(1,0,6),
(1,0,5),
(1,0,4),
(1,0,3),
(1,0,2),
(1,0,1),
(2,0,6),
(3,0,5),
(3,0,4),
(2,0,3),
(1,0,2),
]).T
data[indices[0], indices[1], indices[2]] = 9
# "I"
indices =np.array([
(3,1,0),
(2,1,0),
(1,1,0),
(4,1,0),
(5,1,0),
(3,1,0),
(3,2,0),
(3,3,0),
(3,4,0),
(3,5,0),
(3,6,0),
(3,7,0),
(2,7,0),
(1,7,0),
(4,7,0),
(5,7,0),
]).T
data[indices[0], indices[1], indices[2]] = 9
affine = np.eye(4)
return nibabel.nifti1.Nifti1Image(data, affine) | 65d766d04a6a85e5cafcdc0d7c62e2f3974caa5b | 3,588 |
import atexit
def watch_dependencies(dependency, func, time_execution=15000, registry=None, app=current_app):
"""
Register dependencies metrics up
"""
if not registry:
registry = app.extensions.get("registry", CollectorRegistry())
app.extensions["registry"] = registry
# pylint: disable=invalid-name
DEPENDENCY_UP = Gauge(
'dependency_up',
'records if a dependency is up or down. 1 for up, 0 for down',
["name"],
registry=registry
)
def register_dependecy():
DEPENDENCY_UP.labels(dependency).set(func())
scheduler = BackgroundScheduler()
scheduler.add_job(
func=register_dependecy,
trigger="interval",
seconds=time_execution/1000,
max_instances=1,
name='dependency',
misfire_grace_time=2,
replace_existing=True
)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(scheduler.shutdown)
return scheduler | f502f3e1e6beba5169160459bc0887462ac6662c | 3,589 |
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
if hasattr(args[0], 'build_absolute_uri'):
uri = args[0].build_absolute_uri()
else:
uri = args[0]
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra)) | 85139dc6a77d3758b6e691b565361b390c643e91 | 3,590 |
def get_filter(sampling_freq, f_pass, f_stop, taps):
"""Get FIR filter coefficients using the Remez exchange algorithm.
Args:
f_pass (float): Passband edge.
f_stop (float): Stopband edge.
taps (int): Number of taps or coefficients in the resulting filter.
Returns:
(numpy.ndarray): Computed filter coefficients.
"""
return ffd.remez(taps, [0, f_pass/sampling_freq, f_stop/sampling_freq, 0.5], [0, 1]) | a0fb303ad74ee6e60dd0521af5e00b4b1d1d42e0 | 3,591 |
def errorcode_from_error(e):
"""
Get the error code from a particular error/exception caused by PostgreSQL.
"""
return e.orig.pgcode | 981b447d540e949834c10f4a05fb21769091b104 | 3,592 |
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW28
A SlotW28 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
slot_pitch = 2 * pi / self.Zs
# comp point coordinate (in complex)
Z0 = Rbo * exp(1j * 0)
Z8 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z7 = Z8 + self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
# Z7 = x7 + 1j*y7
# Z6 = x + 1j * W3/2
# C2,Z6 _|_ Z6,Z5 => Re(C2) = Re(Z6)
# ||Z6,zc2|| = R1 => Zc2 = x + 1j*(W3/2+R1)
# ||Z7,zc2||² = R1² => (x7-x)²+ (y7-(W3/2+R1))² = R1²
# x² - 2*x7 x + (x7²+(y7-(W3/2+R1))²-R1²) = 0
# D = 4*x7² - 4*(x7²+(y7-(W3/2+R1))²-R1²) = -4((y7-(W3/2+R1))²-R1²)
# x = x7 + sqrt(-4((y7-(W3/2+R1))²-R1²))/2
Z6 = (
Z7.real
+ sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 + self.H3
rot_sign = 1
else: # inward slot
Z7 = Z8 - self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
Z6 = (
Z7.real
- sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 - self.H3
rot_sign = -1
# Tooth ref to slot
Z1, Z2, Z3, Z4 = (
Z8 * exp(-1j * slot_pitch / 2),
Z7 * exp(-1j * slot_pitch / 2),
Z6 * exp(-1j * slot_pitch / 2),
Z5 * exp(-1j * slot_pitch / 2),
)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
# symetry
point_dict["Z5"] = Z4.conjugate()
point_dict["Z6"] = Z3.conjugate()
point_dict["Z7"] = Z2.conjugate()
point_dict["Z8"] = Z1.conjugate()
# Center
A = Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards())
point_dict["Zc1"] = A.get_center()
point_dict["Zc2"] = (point_dict["Z4"] + point_dict["Z5"]) / 2
point_dict["Zc3"] = point_dict["Zc1"].conjugate()
return point_dict | 0938c9ecba5e9fc1ed76e804e8eab6f9bd97f253 | 3,593 |
from typing import IO
def save_expected_plot(series: pd.Series, colour="C0") -> IO:
"""Return an image of the plot with the given `series` and `colour`."""
fig, ax = plt.subplots()
ax.add_line(mpl_lines.Line2D(series.index, series.values, color=colour))
return _save_fig(fig, ax) | 6c898a622f983cebd46fb368a40e2a492bddb37f | 3,594 |
def GRU_architecture(
GRU_layers,
GRU_neurons,
Dense_layers,
Dense_neurons,
add_Dropout,
Dropout_rate,
data_shape,
):
"""
Parameters
----------
GRU_layers : int
Number of GRU layers.
GRU_neurons : list
List with the numbers of GRU cells in each GRU layer.
Dense_layers : int
Number of Dense layers after GRU layers.
Dense_neurons : list
List with the numbers of neurons in each fully-connecred layer.
add_Dropout : bool
Specifies whether dropout regularization should be applied.
Dropout_rate : float
Dropout rate - the number between 0 and 1.
data_shape : tuple
Shape of the training data.
Returns
-------
model : keras.engine.training.Model
Model with the specified architecture.
"""
# data_shape[1] - lag, data_shape[2] - number of signals
input_layer = Input((data_shape[1], data_shape[2]))
# If there is only one GRU layer, than return_sequences should be false
if GRU_layers == 1:
layers_gru = GRU(
GRU_neurons[0],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(input_layer)
# For many GRU layers return_sequences should be True, to conncect layers with each other
else:
layers_gru = input_layer
# Adding GRU layers
for grul in range(0, GRU_layers - 1):
layers_gru = GRU(
GRU_neurons[grul],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=True,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding last GRU layer
layers_gru = GRU(
GRU_neurons[-1],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding Dense layers if asked
for densel in range(Dense_layers):
layers_gru = Dense(Dense_neurons[densel], activation="relu")(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding output layer
output = Dense(1, activation="linear")(layers_gru)
model = Model(inputs=input_layer, outputs=output)
return model | 77c2aaed6a82f2f4aedade73649d843ff8afd2e2 | 3,595 |
def create_lock(name):
"""Creates a file in the /locks folder by the given name"""
lock_path = get_lock_path(name)
if not check_lock(lock_path):
return touch_file(lock_path)
else:
return False | 1f5e4d43a85a01aaba19295a72cdee20fd7adb1b | 3,596 |
def gen_rigid_tform_rot(image, spacing, angle):
"""
generate a SimpleElastix transformation parameter Map to rotate image by angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
angle : float
angle of rotation in degrees, rotates counter-clockwise if positive
Returns
-------
SimpleITK.ParameterMap of rotation transformation (EulerTransform)
"""
tform = BASE_RIG_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=angle)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
c_x, c_y = (image.GetSize()[0] - 1) / 2, (image.GetSize()[1] - 1) / 2
c_x_phy, c_y_phy = image.TransformContinuousIndexToPhysicalPoint(
(c_x, c_y)
)
t_x = rot_cent_pt[0] - c_x_phy
t_y = rot_cent_pt[1] - c_y_phy
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(int(np.ceil(bound_w))), str(int(np.ceil(bound_h)))]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
tform["TransformParameters"] = [
str(np.radians(angle)),
str(-1 * t_x),
str(-1 * t_y),
]
return tform | 5cadbd59340328d1af5e4ec2c5c0c47e69e19013 | 3,597 |
def get_param_response(param_name, dict_data, num=0, default=None):
"""
:param param_name: 从接口返回值中要提取的参数
:param dict_data: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:param default: 函数异常返回
:return: 提取的参数值
"""
if isinstance(dict_data, dict):
for k, v in dict_data.items():
if k == param_name:
return v
else:
if isinstance(v, dict):
ret = get_param_response(param_name, v)
if ret is not default:
return ret
if isinstance(v, list):
if num:
try:
if isinstance(v[num], dict):
ret = get_param_response(param_name, v[num])
if ret is not default:
return ret
except IndexError:
return {'error': ErrorCode.index_error}
else:
for i in v:
if isinstance(i, dict):
ret = get_param_response(param_name, i)
if ret is not default:
return ret
if isinstance(v, str):
try:
value = eval(v)
ret = get_param_response(param_name, value)
if ret is not default:
return ret
except Exception:
pass
elif isinstance(dict_data, list):
for content in dict_data:
ret = get_param_response(param_name, content)
if ret is not default:
return ret
return default | b9ffa5dfe6e9707771a812dad1d8d9284acbc2e7 | 3,598 |
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1]) | dabe247f97c285ed5d250d7d8d99dae5795a8fec | 3,599 |
def timed_zip_map_agent(func, in_streams, out_stream,
call_streams=None, name=None):
"""
Parameters
----------
in_streams: list of Stream
The list of input streams of the agent.
Each input stream is timed, i.e. the elements
are pairs (timestamp, value)
out_stream: Stream
The single output stream of the agent.
The output_stream is also timed.
call_streams: list of Stream
The list of call_streams. A new value in any stream in this
list causes a state transition of this agent.
name: Str
Name of the agent created by this function.
Returns
-------
Agent.
The agent created by this function.
Notes
-----
Each stream in in_streams must be a stream of tuples or lists
or NumPy arrays where element[0] is a time and where time is
a total order. Each stream in in_stream must be strictly
monotonically increasing in time.
out_stream merges the in_streams in order of time. An element
of out_stream is a list where element[0] is a time T and
element[1] is a list consisting of all elements of in in_streams
that have time T.
"""
# Check types of arguments
check_list_of_streams_type(list_of_streams=in_streams,
agent_name=name, parameter_name='in_streams')
check_stream_type(name, 'out_stream', out_stream)
check_list_of_streams_type(list_of_streams=call_streams,
agent_name=name, parameter_name='call_streams')
num_in_streams = len(in_streams)
indices = range(num_in_streams)
# The transition function for this agent.
def transition(in_lists, state):
# Check the types of in_lists
check_in_lists_type(name, in_lists, num_in_streams)
# input_lists is the list of lists that this agent can operate on
# in this transition.
input_lists = [in_list.list[in_list.start:in_list.stop]
for in_list in in_lists]
# pointers is a list where pointers[i] is a pointer into the i-th
# input lists
pointers = [0 for i in indices]
# stops is a list where pointers[i] must not exceed stops[i].
stops = [len(input_lists[i]) for i in indices]
# output_list is the single output list for this agent.
output_list = []
while all(pointers[i] < stops[i] for i in indices):
# slice is a list with one element per input stream.
# slice[i] is the value pointed to by pointers[i].
slice = [input_lists[i][pointers[i]] for i in indices]
# slice[i][0] is the time field for slice[i].
# earliest_time is the earliest time pointed to by pointers.
earliest_time = min(slice[i][0] for i in indices)
# slice[i][1:] is the list of fields other than the time
# field for slice[i].
# next_output_value is a list with one element for
# each input stream.
# next_output_value[i] is the empty list if the time
# for slice[i] is later than earliest time. If the time
# for slice[i] is the earliest time, hen next_output_value[i]
# is the list of all the non-time fields.
next_output_value = [slice[i][1] if slice[i][0] == earliest_time
else None for i in indices]
# increment pointers for this indexes where the time was the
# earliest time.
pointers = [pointers[i]+1 if slice[i][0] == earliest_time
else pointers[i] for i in indices]
# Make next_output a list consisting of a time: the earliest time
# followed by a sequence of lists, one for each input stream.
# Each list in this sequence consists of the non-time fields.
next_output = [earliest_time]
next_output.append(next_output_value)
next_output = func(next_output)
# output_list has an element for each time in the input list.
output_list.append(next_output)
# Return: (1) output_lists, the list of outputs, one per
# output stream. This agent has a single output stream
# and so output_lists = [output_list]
# (2) the new state; the state is irrelevant for this
# agent because all it does is merge streams.
# (3) the new starting pointer into this stream for
# this agent. Since this agent has read
# pointers[i] number of elements in the i-th input
# stream, move the starting pointer for the i-th input
# stream forward by pointers[i].
return [output_list], state, [in_lists[i].start+pointers[i] for i in indices]
# Finished transition
# Create agent
state = None
# Create agent with the following parameters:
# 1. list of input streams.
# 2. list of output streams. This agent has a single output stream and so
# out_streams is [out_stream].
# 3. transition function
# 4. new state (irrelevant for this agent), so state is None
# 5. list of calling streams
# 6. Agent name
return Agent(in_streams, [out_stream], transition, state, call_streams, name) | 0c23ff84ed72ee25b04bf118d57203c7831702e8 | 3,600 |
def get_repository_ids_requiring_prior_install( trans, tsr_ids, repository_dependencies ):
"""
Inspect the received repository_dependencies and determine if the encoded id of each required repository is in the received tsr_ids. If so,
then determine whether that required repository should be installed prior to it's dependent repository. Return a list of encoded repository
ids, each of which is contained in the received list of tsr_ids, and whose associated repositories must be installed prior to the dependent
repository associated with the received repository_dependencies.
"""
prior_install_ids = []
if repository_dependencies:
for key, rd_tups in repository_dependencies.items():
if key in [ 'description', 'root_key' ]:
continue
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required = suc.parse_repository_dependency_tuple( rd_tup )
if asbool( prior_installation_required ):
repository = suc.get_repository_for_dependency_relationship( trans.app, tool_shed, name, owner, changeset_revision )
if repository:
encoded_repository_id = trans.security.encode_id( repository.id )
if encoded_repository_id in tsr_ids:
prior_install_ids.append( encoded_repository_id )
return prior_install_ids | 035e7f358b8af7d4915b255b988f1fabc56605c1 | 3,601 |
from typing import Iterable
from typing import Tuple
from typing import List
def get_words_and_spaces(
words: Iterable[str], text: str
) -> Tuple[List[str], List[bool]]:
"""Given a list of words and a text, reconstruct the original tokens and
return a list of words and spaces that can be used to create a Doc. This
can help recover destructive tokenization that didn't preserve any
whitespace information.
words (Iterable[str]): The words.
text (str): The original text.
RETURNS (Tuple[List[str], List[bool]]): The words and spaces.
"""
if "".join("".join(words).split()) != "".join(text.split()):
raise ValueError(Errors.E194.format(text=text, words=words))
text_words = []
text_spaces = []
text_pos = 0
# normalize words to remove all whitespace tokens
norm_words = [word for word in words if not word.isspace()]
# align words with text
for word in norm_words:
try:
word_start = text[text_pos:].index(word)
except ValueError:
raise ValueError(Errors.E194.format(text=text, words=words)) from None
if word_start > 0:
text_words.append(text[text_pos : text_pos + word_start])
text_spaces.append(False)
text_pos += word_start
text_words.append(word)
text_spaces.append(False)
text_pos += len(word)
if text_pos < len(text) and text[text_pos] == " ":
text_spaces[-1] = True
text_pos += 1
if text_pos < len(text):
text_words.append(text[text_pos:])
text_spaces.append(False)
return (text_words, text_spaces) | 205c48435018a99433fce298a6035de902774810 | 3,602 |
from typing import List
import ast
def parse_names(source: str) -> List['Name']:
"""Parse names from source."""
tree = ast.parse(source)
visitor = ImportTrackerVisitor()
visitor.visit(tree)
return sum([split_access(a) for a in visitor.accessed], []) | 42f622052bc8acd5cdec187cd16695cec3b86154 | 3,603 |
Subsets and Splits