content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def mvw_ledoit_wolf(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with ledoit_wolf covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = ledoit_wolf(r)[0]
return covar | 086f6430d189fd12509d56ce4a96a351a178979b | 3,651,100 |
def _PadLabels3d(logits, labels):
"""Pads or slices 3-d labels to match logits.
Covers the case of 2-d softmax output, when labels is [batch, height, width]
and logits is [batch, height, width, onehot]
Args:
logits: 4-d Pre-softmax fully-connected output.
labels: 3-d, but not necessarily matching in size.
Returns:
labels: Resized by padding or clipping to match logits.
"""
logits_shape = shapes.tensor_shape(logits)
labels_shape = shapes.tensor_shape(labels)
labels = tf.reshape(labels, [-1, labels_shape[2]])
labels = _PadLabels2d(logits_shape[2], labels)
labels = tf.reshape(labels, [labels_shape[0], -1])
labels = _PadLabels2d(logits_shape[1] * logits_shape[2], labels)
return tf.reshape(labels, [labels_shape[0], logits_shape[1], logits_shape[2]]) | 223f7dfea9ebc970e62dbe71e2f27dfb5c9f161d | 3,651,101 |
def intx():
"""Returns the default int type, as a string.
(e.g. 'int16', 'int32', 'int64').
# Returns
String, the current default int type.
"""
return _INTX | 57661ef00953e07228ff81abc93ec22c216797ff | 3,651,102 |
import json
def dev_end_hardware_script() -> Response:
"""Designate the end of a hardware script in flask log.
Can be invoked by: curl http://localhost:4567/development/end_hardware_script
"""
return Response(json.dumps({}), mimetype="application/json") | 714b448642180753e639992f2d101841074aeefd | 3,651,103 |
def _init_train(opt):
"""Common initilization stuff for all training process."""
ArgumentParser.validate_prepare_opts(opt)
if opt.train_from:
# Load checkpoint if we resume from a previous training.
checkpoint = load_checkpoint(ckpt_path=opt.train_from)
fields = load_fields(opt.save_data, checkpoint)
transforms_cls = get_transforms_cls(opt._all_transform)
if (hasattr(checkpoint["opt"], '_all_transform') and
len(opt._all_transform.symmetric_difference(
checkpoint["opt"]._all_transform)) != 0):
_msg = "configured transforms is different from checkpoint:"
new_transf = opt._all_transform.difference(
checkpoint["opt"]._all_transform)
old_transf = checkpoint["opt"]._all_transform.difference(
opt._all_transform)
if len(new_transf) != 0:
_msg += f" +{new_transf}"
if len(old_transf) != 0:
_msg += f" -{old_transf}."
logger.warning(_msg)
if opt.update_vocab:
logger.info("Updating checkpoint vocabulary with new vocabulary")
fields, transforms_cls = prepare_fields_transforms(opt)
else:
checkpoint = None
#数据预处理准备阶段,目的是将数据处理成torchtext.field格式
fields, transforms_cls = prepare_fields_transforms(opt)
# Report src and tgt vocab sizes
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
return checkpoint, fields, transforms_cls | bb2a043d1a59f996b303aabf9db724ced3505dbf | 3,651,104 |
import os
import fnmatch
def main(wf):
"""Run workflow script."""
opts = docopt.docopt(__doc__, argv=wf.args, version=wf.version)
if opts['list']:
return list_actions(opts)
dry_run = opts['--nothing']
log.info('=' * 50)
log.debug('opts=%r', opts)
log.info('looking for workflows using an outdated version '
'of Alfred-Workflow...')
# subprocess.call(['open', '-a', 'Console', wf.logfile])
root = get_workflow_directory()
if not root:
log.critical('could not find your workflow directory')
print('ERROR: could not find workflow directory')
return 1
log.info('workflow directory: %r', root)
blacklisted = load_blacklist()
updated = 0
failed = 0
# loop through subdirectories of workflow directory
# 1. ignore symlinks
# 2. ignore files
# 3. ignore blacklisted workflows
# 4. identify AW workflows
# 5. check version of AW the workflow has
# 6. if AW is outdated, backup the existing copy and replace
# it with an up-to-date version of AW
for dn in os.listdir(root):
p = os.path.join(root, dn)
if os.path.islink(p):
log.info('ignoring symlink: %s', dn)
continue
if not os.path.isdir(p):
log.debug('ignoring non-directory: %s', dn)
continue
try:
info = get_workflow_info(p)
except Exception as err:
log.error('could not read workflow: %s: %s', dn, err)
continue
if not info or not info.aw.dir:
log.debug('not an AW workflow: %s', dn)
continue
if info.id == wf.bundleid:
log.debug('ignoring self')
continue
ok = True
for pat in blacklisted:
if fnmatch(info.id, pat):
log.debug('blacklisted: "%s" matches "%s"', info.id, pat)
log.info('skipping blacklisted workflow: %s', dn)
ok = False
break
if not ok:
continue
log.info('')
log.info('found AW workflow: %s', dn)
log.info(' name: %s', info.name)
log.info(' bundle ID: %s', info.id)
log.info(' AW version: %s', info.aw.version)
if info.aw.version >= MIN_VERSION:
log.info('[OK] workflow "%s" has current version of '
'Alfred-Workflow', info.name)
log.info('')
continue
log.info('[!!] workflow "%s" is using outdated version '
'(%s) of Alfred-Workflow', info.name, info.aw.version)
if not dry_run:
try:
update_workflow(info)
except Exception as err:
failed += 1
log.error('failed to update workflow "%s" (%s): %s',
info.name, info.aw.dir, err, exc_info=True)
log.info('')
continue
log.info('')
updated += 1
if dry_run:
log.info('[DONE] would update %d workflow(s) with a newer version of '
'Alfred-Workflow', updated)
print('Would update {} workflow(s)'.format(updated))
return
else:
if failed:
log.info('[DONE] failed to update %d/%d workflow(s) with a '
'newer version of Alfred-Workflow',
failed, failed + updated)
print('ERROR: Failed to update {}/{} workflow(s)'.format(
failed, failed + updated))
return 1
else:
log.info('[DONE] updated %d workflow(s) with a newer version of '
'Alfred-Workflow', updated)
print('Updated {} workflow(s)'.format(updated))
return | 7d62c3e498374097eaf232bc8195da908a370dbd | 3,651,105 |
def compare(isamAppliance1, isamAppliance2):
"""
Compare Update Servers between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid']) | e29025ca0af897f10b3b8498f8def86841b76c97 | 3,651,106 |
from rasterio import Affine, features
from gisutils import get_authority_crs
from fiona.crs import from_epsg, to_string
def rasterize(feature, grid, id_column=None,
include_ids=None,
crs=None, epsg=None, proj4=None,
dtype=np.float32, **kwargs):
"""Rasterize a feature onto the model grid, using
the rasterio.features.rasterize method. Features are intersected
if they contain the cell center.
Parameters
----------
feature : str (shapefile path), list of shapely objects,
or dataframe with geometry column
id_column : str
Column with unique integer identifying each feature; values
from this column will be assigned to the output raster.
grid : grid.StructuredGrid instance
crs : obj
A Python int, dict, str, or pyproj.crs.CRS instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
dtype : dtype
Datatype for the output array
**kwargs : keyword arguments to rasterio.features.rasterize()
https://rasterio.readthedocs.io/en/stable/api/rasterio.features.html
Returns
-------
2D numpy array with intersected values
"""
try:
except:
print('This method requires rasterio.')
return
if epsg is not None:
warnings.warn("The epsg argument is deprecated. Use crs instead, "
"which requires gisutils >= 0.2",
DeprecationWarning)
if proj4 is not None:
warnings.warn("The epsg argument is deprecated. Use crs instead, "
"which requires gisutils >= 0.2",
DeprecationWarning)
if crs is not None:
if version.parse(gisutils.__version__) < version.parse('0.2.0'):
raise ValueError("The crs argument requires gisutils >= 0.2")
crs = get_authority_crs(crs)
trans = grid.transform
kwargs = {}
if isinstance(feature, str):
proj4 = get_proj_str(feature)
kwargs = {'dest_crs': grid.crs}
kwargs = get_input_arguments(kwargs, shp2df)
df = shp2df(feature, **kwargs)
elif isinstance(feature, pd.DataFrame):
df = feature.copy()
elif isinstance(feature, collections.Iterable):
# list of shapefiles
if isinstance(feature[0], str):
proj4 = get_proj_str(feature[0])
kwargs = {'dest_crs': grid.crs}
kwargs = get_input_arguments(kwargs, shp2df)
df = shp2df(feature, **kwargs)
else:
df = pd.DataFrame({'geometry': feature})
elif not isinstance(feature, collections.Iterable):
df = pd.DataFrame({'geometry': [feature]})
else:
print('unrecognized feature input')
return
# handle shapefiles in different CRS than model grid
if 'dest_crs' not in kwargs:
reproject = False
# todo: consolidate rasterize reprojection to just use crs
if crs is not None:
if crs != grid.crs:
df['geometry'] = project(df.geometry.values, crs, grid.crs)
if proj4 is not None:
if proj4 != grid.proj_str:
reproject = True
elif epsg is not None and grid.epsg is not None:
if epsg != grid.epsg:
reproject = True
proj4 = to_string(from_epsg(epsg))
if reproject:
df['geometry'] = project(df.geometry.values, proj4, grid.proj_str)
# subset to include_ids
if id_column is not None and include_ids is not None:
df = df.loc[df[id_column].isin(include_ids)].copy()
# create list of GeoJSON features, with unique value for each feature
if id_column is None:
numbers = range(1, len(df)+1)
# if IDs are strings, get a number for each one
# pd.DataFrame.unique() generally preserves order
elif isinstance(df[id_column].dtype, np.object):
unique_values = df[id_column].unique()
values = dict(zip(unique_values, range(1, len(unique_values) + 1)))
numbers = [values[n] for n in df[id_column]]
else:
numbers = df[id_column].tolist()
geoms = list(zip(df.geometry, numbers))
result = features.rasterize(geoms,
out_shape=(grid.nrow, grid.ncol),
transform=trans)
assert result.sum(axis=(0, 1)) != 0, "Nothing was intersected!"
return result.astype(dtype) | 474fd8dc871d6d2b64eb459f2c026be764f6a48d | 3,651,107 |
import random
def get_random():
"""
Retrieves the current issue of XKCD, chooses an issue 1 - current issue #, and returns a json object.
Returns null if an requests error occurs.
"""
return get_issue(random.randint(1, int(get_current()["num"]))) | 10fbf75681901722510b0b9fbb2de298eb80b45e | 3,651,108 |
def get_fasta_readlengths(fasta_file):
"""
Get a sorted list of contig lengths
:return: (tuple)
"""
lens = []
with open_fasta_reader(fasta_file) as f:
for record in f:
lens.append(len(record.sequence))
lens.sort()
return lens | 769cf5af50ba684c107a1312d2aeaab2721a29c6 | 3,651,109 |
def postprocess(p, gt, width_and_height, p_binary, false_positives=False, false_negatives=False):
"""
This function does matching and then postprocessing of p's and gt's
:param p: the objects given from rcnn
:param gt: the objects we get from the ground truth
:param width_and_height: the width and height of the image
:return: info_image: a list which contains the postprocessed p, rectangels for p, postprocessed gt, rectangles
for gt, width and height
"""
len_p = len(p)
len_gt = len(gt)
elements_in_p = [i for i in xrange(len_p)]
elements_in_gt = [i for i in xrange(len_gt)]
matching_table = create_matching_table(p, gt)
max_number_of_matches = min(matching_table.shape[0], matching_table.shape[1])
new_p = []
new_gt = []
new_rects_p = []
new_rects_gt = []
new_p_binary = []
new_gt_binary = []
threshold = 0.5
# on this part we create the real matches between p and gt
for _ in xrange(max_number_of_matches):
best_match = unravel_index(matching_table.argmax(), matching_table.shape)
if matching_table[best_match[0], best_match[1]] > threshold: # check if it is a different value from 0
matching_table[best_match[0], :] = 0.
matching_table[:, best_match[1]] = 0.
new_p.append(p[best_match[0], :21])
new_p_binary.append(p_binary[best_match[0]])
new_gt_binary.append(np.array([1., 0.]))
new_rects_p.append(p[best_match[0], 21:])
new_gt.append(gt[best_match[1], :21])
new_rects_gt.append(gt[best_match[1], 21:])
elements_in_p.remove(best_match[0])
elements_in_gt.remove(best_match[1])
# here we add the matches of false positives by inserting background class on the given rectangles on the ground
# truth
if false_positives:
for element in elements_in_p:
new_p.append(p[element, :21])
new_p_binary.append(p_binary[element])
new_rects_p.append(p[element, 21:])
new_gt.append(create_background_peak_array())
new_gt_binary.append(np.array([0., 1.])) # 0 - not background; 1 - background
new_rects_gt.append(p[element, 21:])
# here we deal with false negatives, by adding them as r-cnn outputs equal to the ground truth
if false_negatives:
for element in elements_in_gt:
new_p.append(gt[element, :21])
new_p_binary.append(np.array([1., 0.]))
new_rects_p.append(gt[element, 21:])
new_gt.append(gt[element, :21])
new_gt_binary.append((np.array([1., 0.])))
new_rects_gt.append(gt[element, 21:])
# convert all the lists to numpy arrays
new_p = np.asarray(new_p)
new_rects_p = np.asarray(new_rects_p)
new_gt = np.asarray(new_gt)
new_rects_gt = np.asarray(new_rects_gt)
# add all the postprocessed information to a list
info_image = [new_p, new_gt, new_rects_p, new_rects_gt, width_and_height, new_p_binary, new_gt_binary]
return info_image | dd83de4547f7c1461b64fcd2dfa4c3df54aefd10 | 3,651,110 |
from csb.bio.structure import TorsionAngles
import numpy
def deg(x):
"""
Convert an array of torsion angles in radians to torsion degrees
ranging from -180 to 180.
@param x: array of angles
@type x: numpy array
@rtype: numpy array
"""
func = numpy.vectorize(TorsionAngles.deg)
return func(x) | 95e37a0c644df1562e417c1ad61e4788bd46c279 | 3,651,111 |
import timeit
def run_median_trial():
"""Generate table for Median Trial."""
tbl = DataTable([10,15,15],['N', 'median_time', 'sort_median'])
trials = [2**k+1 for k in range(8,20)]
for n in trials:
t_med = 1000*min(timeit.repeat(stmt='assert(linear_median(a) == {}//2)'.format(n),
setup='''
import random
from ch01.challenge import linear_median
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
t_sort = 1000*min(timeit.repeat(stmt='assert(median_from_sorted_list(a) == {0}//2)'.format(n),
setup='''
import random
from ch01.challenge import median_from_sorted_list
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
tbl.row([n, t_med, t_sort])
return tbl | ed4c5ebe8bd6259c4adc45c4b023cc5bb96a1055 | 3,651,112 |
def regroup(X, N):
"""
Regroups the rows and columns of X such that rows/cols
that are N apart in X, are adjeacent in Y. If N is a
2 element vector, N[0] is used for rows and N[1] is used
for columns.
Parameters:
X: m by n matrix to be regrouped.
N: Integer or two element vector.
Returns:
Y: Regrouped matrix.
"""
m, n = X.shape
if isinstance(N, int):
N = [N, N]
if m % N[0] != 0 or n % N[1] != 0:
raise ValueError('X dimensions need to be multiple\
of elements in N')
row_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[0])] for k in range(N[0])])
col_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[1])] for k in range(N[1])])
Y = X[row_ind, :]
Y = Y[:, col_ind]
return Y | 7ad92b878cb6a55820ef9ad92c68e934184d725d | 3,651,113 |
def return_estimators(n_components):
"""Returns all of the estimators that can be used to generate models.
A larger selection of possible estimators have been commented out, but
could be uncommented."""
estimators = [
('PCArandom',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True))
]
# estimators = [
# ('PCArandom',
# decomposition.PCA(n_components=n_components,
# svd_solver='randomized',
# whiten=True)),
# ('PCAfull',
# decomposition.PCA(n_components=n_components,
# svd_solver='full',
# whiten=True)),
# ('PCAarpack',
# decomposition.PCA(n_components=n_components,
# svd_solver='arpack',
# whiten=True)),
# ('PCAauto',
# decomposition.PCA(n_components=n_components,
# svd_solver='auto',
# whiten=True))
# ]
return estimators | 680aa1d50c4e2db0e4d3df9e60749350df437bb8 | 3,651,114 |
def _check_type_picks(picks):
"""helper to guarantee type integrity of picks"""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
if not all(isinstance(i, int) for i in picks):
raise ValueError(err_msg)
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise ValueError(err_msg)
else:
raise ValueError(err_msg)
return picks | 79493f75db8e57f32a6369ad18900e0632d2bc18 | 3,651,115 |
def get_test_standard_scaler_str():
"""
Get a pandas projection code str
"""
test_code = cleandoc("""
standard_scaler = StandardScaler()
encoded_data = standard_scaler.fit_transform(df)
""")
return test_code | fd6e1daa7e0dddb603437e5b35c283a11e68ec00 | 3,651,116 |
from typing import List
from typing import Tuple
import re
def add_command(
command_list: List[Tuple[re.Pattern, callable]], func: callable, command_str: str
) -> List[Tuple[re.Pattern, callable]]:
"""Add a function and the command pattern to the command list.
Args:
func: Function it will be called
command_str: command string that specifies the pattern
"""
command_pattern = build_command_pattern(command_str)
command_list.append((command_pattern, func))
return command_list | f8076e4a6b37722591eae04a67feb1c25e606b84 | 3,651,117 |
def get_clusters_and_critical_nodes(G, k, rho_star, phi_in):
"""
The implementation of the main body of the partitioning Algorithm.
The main while-loop of the algorithm is executed as long as a refinement is still possible.
:param phi_in: An algorithm parameter used to lower bound the inner conductance of each cluster
:param rho_star: A technical parameter of the algorithm
:param G: A networkx graph
:param k: The (supposed) number of clusters
:return: a list containing an l-wise partitioning of the nodes of G, for some l <= k
"""
# A list of vertices in the graph G
vertices = list(G.nodes())
# Initially the graph contains one cluster P_1 = V with core set core_1 = P_1.
P_1 = vertices[:]
core_1 = P_1[:]
# num_clusters is the variable denoting the current number of clusters
num_clusters = 1
# clusters is a list storing the current cluster structure of G (i.e. P_1, ..., P_l)
clusters = [P_1]
# core_sets is a list containing the current core_subsets of each cluster.
# (i.e. core_1, ..., core_(num_clusters) with core_i being a subset of P_i)
core_sets = [core_1]
# A list of lists, where each element grouped_critical_nodes[i] is a list of critical nodes from the tree T_i of
# cluster clusters[i]
grouped_critical_nodes = []
# The main loop of the algorithm. We continue as long as an update is possible
overall_update_is_found = True
while overall_update_is_found:
# At the beginning of the loop there is no update found
overall_update_is_found = False
# The main loop of the Partition Algorithm. We continue as long as a GT_update is possible
GT_update_is_found = True
while GT_update_is_found:
# First we check if a GT_update is possible
GT_update_is_found, index_cluster_to_update = check_if_GT_update_is_possible(G, clusters, core_sets,
phi_in)
if GT_update_is_found:
GT_update_is_done = False
# Notation of the corresponding sets of vertices
P_i = clusters[index_cluster_to_update]
core_i = core_sets[index_cluster_to_update]
S = cheeger_cut.cheeger_cut(G.subgraph(P_i))
S_complement = diff(vertices, S)
S_plus = intersect(S, core_i)
S_plus_bar = intersect(S_complement, core_i)
S_minus = intersect(diff(P_i, core_i), S)
S_minus_bar = intersect(diff(P_i, core_i), S_complement)
# Without loss of generality we assume vol(S_plus) < vol(core_i) / 2
if vol(G, S_plus) > vol(G, S_plus_bar):
S_plus, S_plus_bar = S_plus_bar, S_plus
S_minus, S_minus_bar = S_minus_bar, S_minus
# First "if" in the algorithm
if is_first_if_condition_satisfied(G, S_plus, S_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
S_plus, S_plus_bar, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# Second "if" in the algorithm
if not GT_update_is_done and is_second_if_condition_satisfied(G, S_plus, S_plus_bar, core_i, k):
update_core_to_subset_T_or_T_bar(G, S_plus, S_plus_bar, core_sets, index_cluster_to_update)
GT_update_is_done = True
# Third "if" in the algorithm
if not GT_update_is_done and is_third_if_condition_satisfied(G, S_minus, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_of_P_i(S_minus, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# At this point only a refinement of the partition is possible
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(P_i - core_i -> P_i) < w(P_i - core_i -> P_j),
# then merge (P_i - core_i) with argmax_(P_j){w(P_i - core_i -> P_j)}
P_i_minus_core_i = diff(P_i, core_i)
# Find the index j of argmax_(P_j){w(P_i - core_i -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, P_i_minus_core_i,
clusters)
# Forth "if" in the algorithm.
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(P_i_minus_core_i, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(S_minus -> P_i) < w(S_minus -> P_j),
# then merge S_minus with argmax_(P_j){w(S_minus -> P_j)}
# Find the index j of argmax_(P_j){w(S_minus -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, S_minus, clusters)
# Fifth "if" in the algorithm
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(S_minus, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
raise Exception('No GT_update performed in iteration')
grouped_critical_nodes = []
# Check if critical nodes need refinements
for i in range(len(clusters)):
# Get the list of critical nodes in the degree based construction of the graph G_i = G[P_i]
P_i = clusters[i]
core_i = core_sets[i]
G_i = G.subgraph(P_i)
T_i = tree.Tree()
T_i.make_tree(G_i, "degree")
critical_nodes_of_T_i = T_i.get_critical_nodes()
grouped_critical_nodes = grouped_critical_nodes + [critical_nodes_of_T_i]
for node in critical_nodes_of_T_i:
# Notation
N = node.vertices
N_complement = diff(vertices, N)
N_plus = intersect(N, core_i)
N_plus_bar = intersect(N_complement, core_i)
N_minus = intersect(diff(P_i, core_i), N)
N_minus_bar = intersect(diff(P_i, core_i), N_complement)
# Sixth "if" of the algorithm, first "if" of the refinement of the nodes,
if is_sixth_if_condition_satisfied(G, N_plus, N_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
N_plus, N_plus_bar, clusters, core_sets, i)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
overall_update_is_found = True
break
# Seventh "if" of the algorithm, second if of the refinement of the nodes
if not overall_update_is_found and is_seventh_if_condition_satisfied(G, N_plus, core_i, k):
update_core_to_subset_T_or_T_bar(G, N_plus, N_plus_bar, core_sets, i)
overall_update_is_found = True
break
# We attempt to move N_minus to the cluster P_j that maximises w(N_minus -> P_j)
if not overall_update_is_found and vol(G, N_minus) <= vol(G, P_i) / 2:
# Find the index j of argmax_(P_j){w(N_minus -> P_j)}.
# If best_cluster_index = i, then the eighth "if" is not satisfied
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, N_minus, clusters)
# Eighth "if" of the algorithm, third if of the refinement of the nodes.
if weight(G, N_minus, P_i) < weight(G, N_minus, clusters[best_cluster_index]):
move_subset_T_from_P_i_to_P_j(N_minus, clusters, i,
best_cluster_index)
overall_update_is_found = True
break
if overall_update_is_found:
break
return clusters, grouped_critical_nodes | e7374c9cad30a87477ee5b9ce4d0a0e9cb7de041 | 3,651,118 |
def get_edges_out_for_vertex(edges: list, vertex: int) -> list:
"""Get a sublist of edges that have the specified vertex as first element
:param edges: edges of the graph
:param vertex: vertex of which we want to find the corresponding edges
:return: selected edges
"""
return [e for e in edges if e[0] == vertex] | 21485073df1c754e7c8e2b7dd9cafef284e601e7 | 3,651,119 |
def pellet_plot_multi_unaligned(FEDs, shade_dark, lights_on,
lights_off,**kwargs):
"""
FED3 Viz: Plot cumulaive pellet retrieval for multiple FEDs, keeping the
x-axis to show absolute time.
Parameters
----------
FEDs : list of FED3_File objects
FED3 files (loaded by load.FED3_File)
shade_dark : bool
Whether to shade lights-off periods
lights_on : int
Integer between 0 and 23 denoting the start of the light cycle.
lights_off : int
Integer between 0 and 23 denoting the end of the light cycle.
**kwargs :
ax : matplotlib.axes.Axes
Axes to plot on, a new Figure and Axes are
created if not passed
date_filter : array
A two-element array of datetimes (start, end) used to filter
the data
**kwargs also allows FED3 Viz to pass all settings to all functions.
Returns
-------
fig : matplotlib.figure.Figure
"""
if not isinstance(FEDs, list):
FEDs = [FEDs]
for file in FEDs:
assert isinstance(file, FED3_File),'Non FED3_File passed to pellet_plot_multi()'
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(7,3.5), dpi=150)
else:
ax = kwargs['ax']
min_date = np.datetime64('2100')
max_date = np.datetime64('1970')
for file in FEDs:
df = file.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
x = df.index
y = df['Pellet_Count']
ax.plot(x, y, label=file.filename, alpha=.6, lw=1)
if max(x) > max_date:
max_date = max(x)
if min(x) < min_date:
min_date = min(x)
ax.set_xlabel('Time (h)')
date_format_x(ax, min_date, max_date)
ax.set_ylabel('Cumulative Pellets')
title = ('Pellets Retrieved for Multiple FEDs')
ax.set_title(title)
if shade_dark:
shade_darkness(ax, min_date, max_date,
lights_on=lights_on,
lights_off=lights_off)
if len(FEDs) < 10:
ax.legend(bbox_to_anchor=(1,1), loc='upper left')
plt.tight_layout()
return fig if 'ax' not in kwargs else None | 3601e8ecff20a3d7978f7261ebaa5236d662a25e | 3,651,120 |
import time
def sync_via_mrmsdtw(f_chroma1: np.ndarray,
f_chroma2: np.ndarray,
f_DLNCO1: np.ndarray = None,
f_DLNCO2: np.ndarray = None,
input_feature_rate: float = 50,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int32),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
threshold_rec: int = 10000, win_len_smooth: np.ndarray = np.array([201, 101, 21, 1]),
downsamp_smooth: np.ndarray = np.array([50, 25, 5, 1]),
verbose: bool = False,
dtw_implementation: str = 'synctoolbox',
normalize_chroma: bool = True,
chroma_norm_ord: int = 2,
chroma_norm_threshold: float = 0.001):
"""Compute memory-restricted multi-scale DTW (MrMsDTW) using chroma and (optionally) DLNCO features.
MrMsDTW is performed on multiple levels that get progressively finer, with rectangular constraint
regions defined by the alignment found on the previous, coarser level.
If DLNCO features are provided, these are used on the finest level in addition to chroma
to provide higher synchronization accuracy.
Parameters
----------
f_chroma1 : np.ndarray [shape=(12, N)]
Chroma feature matrix of the first sequence
f_chroma2 : np.ndarray [shape=(12, M)]
Chroma feature matrix of the second sequence
f_DLNCO1 : np.ndarray [shape=(12, N)]
DLNCO feature matrix of the first sequence (optional, default: None)
f_DLNCO2 : np.ndarray [shape=(12, M)]
DLNCO feature matrix of the second sequence (optional, default: None)
input_feature_rate: float
Input feature rate of the chroma features (default: 50)
step_sizes: np.ndarray
DTW step sizes (default: np.array([[1, 0], [0, 1], [1, 1]]))
step_weights: np.ndarray
DTW step weights (np.array([1.0, 1.0, 1.0]))
threshold_rec: int
Defines the maximum area that is spanned by the rectangle of two
consecutive elements in the alignment (default: 10000)
win_len_smooth : np.ndarray
Window lengths for chroma feature smoothing (default: np.array([201, 101, 21, 1]))
downsamp_smooth : np.ndarray
Downsampling factors (default: np.array([50, 25, 5, 1]))
verbose : bool
Set `True` for visualization (default: False)
dtw_implementation : str
DTW implementation, librosa or synctoolbox (default: synctoolbox)
normalize_chroma : bool
Set `True` to normalize input chroma features after each downsampling
and smoothing operation.
chroma_norm_ord: int
Order of chroma normalization, relevant if ``normalize_chroma`` is True.
(default: 2)
chroma_norm_threshold: float
If the norm falls below threshold for a feature vector, then the
normalized feature vector is set to be the unit vector. Relevant, if
``normalize_chroma`` is True (default: 0.001)
Returns
-------
alignment : np.ndarray [shape=(2, T)]
Resulting warping path
"""
# If DLNCO features are given as input, high resolution MrMsDTW is activated.
high_res = False
if f_DLNCO1 is not None and f_DLNCO2 is not None:
high_res = True
if high_res and (f_chroma1.shape[1] != f_DLNCO1.shape[1] or f_chroma2.shape[1] != f_DLNCO2.shape[1]):
raise ValueError('Chroma and DLNCO features must be of the same length.')
if downsamp_smooth[-1] != 1 or win_len_smooth[-1] != 1:
raise ValueError('The downsampling factor of the last iteration must be equal to 1, i.e.'
'at the last iteration, it is computed at the input feature rate!')
num_iterations = win_len_smooth.shape[0]
cost_matrix_size_old = tuple()
feature_rate_old = input_feature_rate / downsamp_smooth[0]
alignment = None
total_computation_time = 0.0
for it in range(num_iterations):
tic1 = time.perf_counter()
# Smooth and downsample given raw features
f_chroma1_cur, _ = smooth_downsample_feature(f_chroma1,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
f_chroma2_cur, feature_rate_new = smooth_downsample_feature(f_chroma2,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
if normalize_chroma:
f_chroma1_cur = normalize_feature(f_chroma1_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
f_chroma2_cur = normalize_feature(f_chroma2_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
# Project path onto new resolution
cost_matrix_size_new = (f_chroma1_cur.shape[1], f_chroma2_cur.shape[1])
if alignment is None:
# Initialize the alignment with the start and end frames of the feature sequence
anchors = np.array([[0, f_chroma1_cur.shape[1] - 1], [0, f_chroma2_cur.shape[1] - 1]])
else:
projected_alignment = project_alignment_on_a_new_feature_rate(alignment=alignment,
feature_rate_old=feature_rate_old,
feature_rate_new=feature_rate_new,
cost_matrix_size_old=cost_matrix_size_old,
cost_matrix_size_new=cost_matrix_size_new)
anchors = derive_anchors_from_projected_alignment(projected_alignment=projected_alignment,
threshold=threshold_rec)
# Cost matrix and warping path computation
if high_res and it == num_iterations - 1:
# Compute cost considering chroma and pitch onset features and alignment only in the last iteration,
# where the features are at the finest level.
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=anchors)
else:
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=anchors)
wp_list = compute_warping_paths_from_cost_matrices(cost_matrices_step1,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
# Concatenate warping paths
wp = build_path_from_warping_paths(warping_paths=wp_list,
anchors=anchors)
anchors_step1 = None
wp_step1 = None
num_rows_step1 = 0
num_cols_step1 = 0
ax = None
toc1 = time.perf_counter()
if verbose and cost_matrices_step1 is not None:
anchors_step1 = np.array(anchors, copy=True)
wp_step1 = np.array(wp, copy=True)
num_rows_step1, num_cols_step1 = np.sum(np.array([dtw_mat.shape for dtw_mat in cost_matrices_step1], int),
axis=0)
fig, ax = sync_visualize_step1(cost_matrices_step1,
num_rows_step1,
num_cols_step1,
anchors,
wp)
tic2 = time.perf_counter()
# Compute neighboring anchors and refine alignment using local path between neighboring anchors
anchor_indices_in_warping_path = find_anchor_indices_in_warping_path(wp, anchors=anchors)
# Compute neighboring anchors for refinement
neighboring_anchors, neighboring_anchor_indices = \
derive_neighboring_anchors(wp, anchor_indices=anchor_indices_in_warping_path)
if neighboring_anchor_indices.shape[0] > 1 \
and it == num_iterations - 1 and high_res:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=neighboring_anchors)
else:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=neighboring_anchors)
wp_list_refine = compute_warping_paths_from_cost_matrices(cost_matrices=cost_matrices_step2,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
wp = __refine_wp(wp, anchors, wp_list_refine, neighboring_anchors, neighboring_anchor_indices)
toc2 = time.perf_counter()
computation_time_it = toc2 - tic2 + toc1 - tic1
total_computation_time += computation_time_it
alignment = wp
feature_rate_old = feature_rate_new
cost_matrix_size_old = cost_matrix_size_new
if verbose and cost_matrices_step2 is not None:
sync_visualize_step2(ax,
cost_matrices_step2,
wp,
wp_step1,
num_rows_step1,
num_cols_step1,
anchors_step1,
neighboring_anchors)
print('Level {} computation time: {:.2f} seconds'.format(it, computation_time_it))
if verbose:
print('Computation time of MrMsDTW: {:.2f} seconds'.format(total_computation_time))
return alignment | 00dac7bdde14597e0daece958e65761ec01d1494 | 3,651,121 |
def simulate_beta_binomial(
K, D, sigma2, theta, mu=0, invlink=logistic, seed=None):
"""Simulates from binomial Gaussian process with Beta latent noise.
Args:
K: Cell-state kernel, for example as generated by create_linear_kernel
or create_rbf_kernel.
D: Array of total counts.
sigma2: Kernel variance component.
theta: Dispersion parameter. If zero, sample from a regular Binomial
distribution instead.
mu: Optional fixed effects on a logit scale. Defaults to zero, which
corresponds to a binomial mean of 0.5.
invlink: Inverse link function. Defaults to invlogit.
seed: Random seed.
Returns:
List with alternative counts, latent rates as well as sampled binomial
means.
"""
D = atleast_2d_column(D)
n, p = D.shape
rng = np.random.default_rng(seed)
if sigma2 == 0:
latent = mu * np.ones((n, p))
else:
mu = mu * np.ones((n, 1))
latent = _sample_normal(p, mu, sigma2*K, rng)
beta_mean = invlink(latent)
if theta > 0:
binomial_mean = rng.beta(a=beta_mean / theta, b=(1-beta_mean) / theta)
else:
binomial_mean = beta_mean
a = rng.binomial(n=D, p=binomial_mean)
return {'A': a, 'beta_mean': beta_mean, 'binomial_mean': binomial_mean} | de4648af70a6b35c7b7f5edc2c151a98db6d7603 | 3,651,122 |
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return {k: numberfy(v) for k, v in list(attrs.items()) if v} | a6f93f402c547435fa9fe611481084215f52f13b | 3,651,123 |
def properties_filter(mol):
"""
Calculates the properties that contain logP, MW, HBA, HBD, TPSA, NRB
"""
#frag = Chem.rdmolops.GetMolFrags(mol) # remove '.'
#if len(frag) > 1:
#return False
MW_s = Descriptors.MolWt(mol) # MW
if MW_s < 250 or MW_s > 750:
return False
ALOGP_s = Descriptors.MolLogP(mol) # ALOGP
if ALOGP_s < -2 or ALOGP_s > 7:
return False
HBA_s = 0
for hba in Acceptors: # HBA
if mol.HasSubstructMatch(hba):
matches = mol.GetSubstructMatches(hba)
HBA_s += len(matches)
HBD_s = Descriptors.NumHDonors(mol) # HBD
if HBA_s + HBD_s >= 10:
return False
TPSA_s = Descriptors.TPSA(mol) # TPSA
if TPSA_s >= 150:
return False
NRB_s = Descriptors.NumRotatableBonds(mol) # NRB
if NRB_s >= 10:
return False
return True | bc124620baddb828b4c5cb82e0b0374bdb51bad7 | 3,651,124 |
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b('basicConstraints'), False, b('CA:true'))
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 512)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b("20000101000000Z"))
cacert.set_notAfter(b("20200101000000Z"))
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 512)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b("20000101000000Z"))
icert.set_notAfter(b("20200101000000Z"))
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 512)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b("20000101000000Z"))
scert.set_notAfter(b("20200101000000Z"))
scert.add_extensions([
X509Extension(b('basicConstraints'), True, b('CA:false'))])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)] | 156a61e8159b1826def8fa33d5c5965add2c7f2e | 3,651,125 |
def build_job_spec_name(file_name, version="develop"):
"""
:param file_name:
:param version:
:return: str, ex. job-hello_world:develop
"""
name = file_name.split('.')[-1]
job_name = 'job-%s:%s' % (name, version)
return job_name | 55a45052852e6b24cb4370f7efe5c213da83e423 | 3,651,126 |
import torch
def draw_mask(im: torch.Tensor, mask: torch.Tensor, t=0.2, color=(255, 255, 255), visualize_instances=True):
"""
Visualize mask where mask = 0.
Supports multiple instances.
mask shape: [N, C, H, W], where C is different instances in same image.
"""
assert len(mask.shape) in (3, 4), mask.shape
mask = mask.view(-1, *mask.shape[-3:])
im = im.view(-1, *im.shape[-3:])
assert im.dtype == torch.uint8, im.dtype
assert 0 <= t <= 1
if not visualize_instances:
mask = mask.any(dim=1, keepdim=True)
mask = mask.float()
kernel = torch.ones((3, 3), dtype=mask.dtype, device=mask.device)
outer_border = dilation(mask, kernel).logical_xor(mask)
outer_border = outer_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
inner_border = erosion(mask, kernel).logical_xor(mask)
inner_border = inner_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
mask = (mask == 0).any(dim=1, keepdim=True).repeat(1, 3, 1, 1)
color = torch.tensor(color).to(im.device).byte().view(1, 3, 1, 1)#.repeat(1, *im.shape[1:])
color = color.repeat(im.shape[0], 1, *im.shape[-2:])
im[mask] = (im[mask] * (1-t) + t * color[mask]).byte()
im[outer_border] = 255
im[inner_border] = 0
return im | 45d12dbc695755f0231ca2a8d0f8d1cdf2f423ff | 3,651,127 |
def view_about():
"""
shows the about page
:return:
:rtype:
"""
return render_template('about.html', title="About Flask AWS Template") | a364842c165864aba34605f3ffdd8c1d412015e8 | 3,651,128 |
import numpy
def viterbi(observed_values,
transition_probabilities,
emission_probabilities,
initial_distribution,
file_name,
log=True):
"""Calculates the viterbi-path for a given hidden-markov-model, heavily
inspired by Abhisek Janas Blogpost "Implement Viterbi Algorithm in Hidden
Markov Model using Python and R" at February 21, 2019.
The Blog as well as the original source-code can be found under http://www.adeveloperdiary.com/data-science/machine-learning/implement-viterbi-algorithm-in-hidden-markov-model-using-python-and-r/ #noqa
Args:
observed_values (np.array): visible part of the hidden-markov-model
transition_probabilities (np.array): transition probabilities for the
hidden part of the hidden-markov-model
emission_probabilities (np.array): transition probabilities for the
visible part of the hidden-markov-model
initial_distribution (np.array): probabilities for the initial status
log (bool) = True: The results are calculated using the logarithmic
projection
Returns:
(np.array): the viterbi-path for the given hidden-markov-model
"""
# Amount of steps
epochs = observed_values.shape[0]
# Amount of states
states = transition_probabilities.shape[0]
# Hightest probability to end in specific state
omega = numpy.zeros((epochs, states), dtype=numpy.longdouble)
prev = numpy.zeros((epochs - 1, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all forward probability for every
# state and epoch
forward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all backword probability for every
# state and epoch
backward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Since we start at the pack of the list we need to init it with a one,
# instead of a zero
backward_probs[epochs - 1] = numpy.ones((states))
# Two Dimensional Array, which holds all posteriori probability for every
# state and epoch
posteriori_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Calculation of the probability for the observed initial state
if log:
omega[0, :] = numpy.log(initial_distribution * emission_probabilities[:, observed_values[0]-1]) #noqa
else:
omega[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
forward_probs[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
for epoch in range(1, epochs):
for state in range(1, -1, -1):
# Calculate the probability of obtaining the observed value for
# each possible transition.
if log:
probability = omega[epoch - 1] + \
numpy.log(transition_probabilities[:, state]) + \
numpy.log(emission_probabilities[state, observed_values[epoch]-1]) #noqa
else:
probability = omega[epoch - 1] * \
transition_probabilities[:, state] * \
emission_probabilities[state, observed_values[epoch]-1]
# This is our most probable state given previous state at epoch
prev[epoch - 1, state] = numpy.argmax(probability)
# save probability of the most probable state
omega[epoch, state] = numpy.max(probability)
# Calculate forward probability's for Posteriori-Decoding
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation
# is provided!
if not log:
forward_probs[epoch, state] = emission_probabilities[state, observed_values[epoch]-1] * forward_probs[epoch - 1].dot(transition_probabilities[:, state]) #noqa
# Path Array
path = numpy.zeros(epochs)
# Find the most probable last hidden state
last_state = numpy.argmax(omega[epochs - 1, :]).astype(int)
# Start building the path
path[0] = last_state
# Start backtracking
backtrack_index = 1
for i in range(epochs - 2, -1, -1):
# Calculate the next hidden state based on its successor
next_hidden = prev[i, last_state]
# Add state to the path
path[backtrack_index] = next_hidden
# Save state for the next backtracking step
last_state = next_hidden.astype(int)
backtrack_index += 1
# Posteriori-Decoding, calculate backward probability's.
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation is
# provided!
# The results are at this point in the reversed order, since we started
# do calculate them from the end!
if not log:
for state in range(states):
backward_probs[i, state] = (backward_probs[i+1]*emission_probabilities[:, observed_values[i]-1]).dot(transition_probabilities[state, :]) #noqa
# Flip the path array since we were backtracking
path = numpy.flip(path, axis=0)
# Convert numeric values to actual hidden states
result = ""
for element in path:
if element == 0:
result = result + "F"
else:
result = result + "L"
# Posteriori-Decoding, calculate posteriori probability's.
if not log:
# Flip the backward probability's to provide the probability's in
# the correct order
backward_probs = numpy.flip(backward_probs, axis=0)
increase = 1
for i in range(epochs):
# A counter to manage the constant multiplication used
if(i % 20 == 0):
# increase the multiplication factor
increase *= numpy.longdouble(10**5)
# Calculate the posteriori probability based on the given algorithm
posteriori_probs[i, :] = ((forward_probs[i, :]*increase) * (backward_probs[i, :]*increase)) / (numpy.max(omega[epochs-1, :])*increase) #noqa
# Remove the constant factor and override the current posteriori
# probability, to give a correct value
posteriori_probs[i, :] = posteriori_probs[i, :] / increase
numpy.savetxt("results\\posteriori-decoding"+file_name, posteriori_probs) #noqa
dirName = "results\\viterbi-Path"+file_name
text_file = open(dirName, "w")
text_file.write(result)
text_file.close()
return result | b063e5c5bbf566afb0f16175d9d229bef7a953f1 | 3,651,129 |
def extract_psf_fitting_names(psf):
"""
Determine the names of the x coordinate, y coordinate, and flux from
a model. Returns (xname, yname, fluxname)
"""
if hasattr(psf, 'xname'):
xname = psf.xname
elif 'x_0' in psf.param_names:
xname = 'x_0'
else:
raise ValueError('Could not determine x coordinate name for '
'psf_photometry.')
if hasattr(psf, 'yname'):
yname = psf.yname
elif 'y_0' in psf.param_names:
yname = 'y_0'
else:
raise ValueError('Could not determine y coordinate name for '
'psf_photometry.')
if hasattr(psf, 'fluxname'):
fluxname = psf.fluxname
elif 'flux' in psf.param_names:
fluxname = 'flux'
else:
raise ValueError('Could not determine flux name for psf_photometry.')
return xname, yname, fluxname | cee108dd1f97e506b60ba621c7f08efa7b5c33d7 | 3,651,130 |
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
# TODO: Implement --date, --time and -t
p.set_defaults(func=func)
p.description = (
"Update the access and modification times of each "
+ "FILE to the current time. A FILE argument that does "
+ "not exist is created empty. A FILE argument string "
+ "of - is handled specially and causes touch to"
)
p.add_argument('FILE', nargs='*')
p.add_argument(
"-a", action="store_true", dest="accessonly", help="change only the access time"
)
p.add_argument(
"-c",
"--no-create",
action="store_true",
dest="nocreate",
help="do not create any files",
)
p.add_argument(
"-f", action="store_true", dest="thisoptionshouldbeignored", help="(ignored)"
)
p.add_argument(
"-m",
action="store_true",
dest="modonly",
help="change only the modification time",
)
p.add_argument(
"-r",
"--reference",
dest="reference",
help="use this file's times instead of current time",
)
return p | b6689761da04ebf3ac7e1b9682b4291c5dd4e9c1 | 3,651,131 |
def config_check_conformance(cookie, dn):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigCheckConformance")
method.cookie = cookie
method.dn = dn
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 598fbd665dcf18a35104400bf7debfc64347c3b5 | 3,651,132 |
def get_dist_to_port(geotiff):
"""
Extract "truth" dist_to_port from geotiff
"""
with Geotiff(geotiff) as tif:
dist_to_port = tif.values
return dist_to_port | 1a77c2ac905eea2d1796529297168dac394b4bdb | 3,651,133 |
import inspect
def build_dataset_exporter(
dataset_type, strip_none=True, warn_unused=True, **kwargs
):
"""Builds the :class:`DatasetExporter` instance for the given parameters.
Args:
dataset_type: the :class:`fiftyone.types.dataset_types.Dataset` type
strip_none (True): whether to exclude None-valued items from ``kwargs``
warn_unused (True): whether to issue warnings for any non-None unused
parameters encountered
**kwargs: keyword arguments to pass to the dataset exporter's
constructor via ``DatasetExporter(**kwargs)``
Returns:
a tuple of:
- the :class:`DatasetExporter` instance
- a dict of unused keyword arguments
"""
if dataset_type is None:
raise ValueError(
"You must provide a `dataset_type` in order to build a dataset "
"exporter"
)
if inspect.isclass(dataset_type):
dataset_type = dataset_type()
dataset_exporter_cls = dataset_type.get_dataset_exporter_cls()
if strip_none:
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs, unused_kwargs = fou.extract_kwargs_for_class(
dataset_exporter_cls, kwargs
)
try:
dataset_exporter = dataset_exporter_cls(**kwargs)
except Exception as e:
raise ValueError(
"Failed to construct exporter of type %s using the provided "
"parameters. See above for the error. You may need to supply "
"additional mandatory arguments. Please consult the documentation "
"of %s to learn more"
% (dataset_exporter_cls, dataset_exporter_cls)
) from e
if warn_unused:
for key, value in unused_kwargs.items():
if value is not None:
logger.warning(
"Ignoring unsupported parameter '%s' for exporter type %s",
key,
dataset_exporter_cls,
)
return dataset_exporter, unused_kwargs | 6a21c90ee2a9c297ad86515f5078221459b1fb01 | 3,651,134 |
def conditions(x):
"""
This function will check whether the constraints that apply to
our optimization are met or not.
"""
if ( (10/x[0]) > 66.0 ):
return False
elif ( (10/x[0] + 12/x[1]) > 88.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2]) > 107.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3]) > 128.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4]) > 157.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5]) > 192.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6]) > 222.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7]) > 242.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8]) > 268.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8] + 8/x[9]) > 292.0 ):
return False
return True | 263fdc3fd07aa656982401f71071fcd684b8625f | 3,651,135 |
import scipy
from typing import Mapping
from typing import OrderedDict
import logging
def load_reco_param(source):
"""Load reco parameterisation (energy-dependent) from file or dictionary.
Parameters
----------
source : string or mapping
Source of the parameterization. If string, treat as file path or
resource location and load from the file; this must yield a mapping. If
`source` is a mapping, it is used directly. See notes below on format.
Returns
-------
reco_params : OrderedDict
Keys are stringified flavintgroups and values are dicts of strings
representing the different reco dimensions and lists of distribution
properties. These latter have a 'fraction', a 'dist' and a 'kwargs' key.
The former two hold callables, while the latter holds a dict of
key-callable pairs ('loc', 'scale'), which can be evaluated at the desired
energies and passed into the respective `scipy.stats` distribution.
The distributions for a given dimension will be superimposed according
to their relative weights to form the reco kernels (via integration)
when called with energy values (parameterisations are functions of
energy only!).
Notes
-----
The mapping passed via `source` or loaded therefrom must have the format:
{
<flavintgroup_string>:
{
<dimension_string>:[
{
"dist": dist_id,
"fraction": val,
"kwargs": {
"loc": val,
"scale": val,
...
}
},
...
]
},
<flavintgroup_string>:
...
}
`flavintgroup_string`s must be parsable by
pisa.utils.flavInt.NuFlavIntGroup. Note that the `transform_groups` defined
in a pipeline config file using this must match the groupings defined
above.
`dimension_string`s denote the observables/dimensions whose reco error
distribution is parameterised (`"energy"` or `"coszen"`).
`dist_id` needs to be a string identifying a probability distribution/statistical
function provided by `scipy.stats`. No implicit assumptions about the
distribution will be made if the `"dist"` key is missing.
`"fraction"` holds the relative weight of the distribution. For a given
dimension, the sum of all fractions present must be 1.
Valid kwargs for distributions must at least include `"loc"` and `"scale"` -
these will be passed into the respective `scipy.stats` function.
`val`s can be one of the following:
- Callable with one argument
- String such that `eval(val)` yields a callable with one argument
"""
if not (source is None or isinstance(source, (basestring, Mapping))):
raise TypeError('`source` must be string, mapping, or None')
if isinstance(source, basestring):
orig_dict = from_file(source)
elif isinstance(source, Mapping):
orig_dict = source
else:
raise TypeError('Cannot load reco parameterizations from a %s'
% type(source))
valid_dimensions = ('coszen', 'energy')
required_keys = ('dist', 'fraction', 'kwargs')
# Build dict of parameterizations (each a callable) per flavintgroup
reco_params = OrderedDict()
for flavint_key, dim_dict in orig_dict.iteritems():
flavintgroup = NuFlavIntGroup(flavint_key)
reco_params[flavintgroup] = {}
for dimension in dim_dict.iterkeys():
dim_dist_list = []
if not isinstance(dimension, basestring):
raise TypeError("The dimension needs to be given as a string!"
" Allowed: %s."%valid_dimensions)
if dimension not in valid_dimensions:
raise ValueError("Dimension '%s' not recognised!"%dimension)
for dist_dict in dim_dict[dimension]:
dist_spec_dict = {}
# allow reading in even if kwargs not present - computation of
# transform will fail because "loc" and "scale" hard-coded
# requirement
for required in required_keys:
if required not in dist_dict:
raise ValueError("Found distribution property dict "
"without required '%s' key for "
"%s - %s!"
%(required, flavintgroup, dimension))
for k in dist_dict.iterkeys():
if k not in required_keys:
logging.warn("Unrecognised key in distribution"
" property dict: '%s'"%k)
dist_spec = dist_dict['dist']
if not isinstance(dist_spec, basestring):
raise TypeError(" The resolution function needs to be"
" given as a string!")
if not dist_spec:
raise ValueError("Empty string found for resolution"
" function!")
try:
dist = getattr(stats, dist_spec.lower())
except AttributeError:
try:
sp_ver_str = scipy.__version__
except:
sp_ver_str = "N/A"
raise AttributeError("'%s' is not a valid distribution"
" from scipy.stats (your scipy"
" version: '%s')."
%(dist_spec.lower(), sp_ver_str))
logging.debug("Found %s - %s resolution function: '%s'"
%(flavintgroup, dimension, dist.name))
dist_spec_dict['dist'] = dist
frac = dist_dict['fraction']
if isinstance(frac, basestring):
frac_func = eval(frac)
elif callable(frac):
frac_func = frac
else:
raise TypeError(
"Expected 'fraction' to be either a string"
" that can be interpreted by eval or a callable."
" Got '%s'." % type(frac)
)
dist_spec_dict['fraction'] = frac_func
kwargs = dist_dict['kwargs']
if not isinstance(kwargs, dict):
raise TypeError(
"'kwargs' must hold a dictionary. Got '%s' instead."
% type(kwargs)
)
dist_spec_dict['kwargs'] = kwargs
for kwarg, kwarg_spec in kwargs.iteritems():
if isinstance(kwarg_spec, basestring):
kwarg_eval = eval(kwarg_spec)
elif callable(kwarg_spec) or isscalar(kwarg_spec):
kwarg_eval = kwarg_spec
else:
raise TypeError(
"Expected kwarg '%s' spec to be either a string"
" that can be interpreted by eval, a callable or"
" a scalar. Got '%s'." % type(kwarg_spec)
)
dist_spec_dict['kwargs'][kwarg] = kwarg_eval
dim_dist_list.append(dist_spec_dict)
reco_params[flavintgroup][dimension] = dim_dist_list
return reco_params | 9d707f3403e0225223b6fe081158d31476b8281c | 3,651,136 |
def get_commit_ancestors_graph(refenv, starting_commit):
"""returns a DAG of all commits starting at some hash pointing to the repo root.
Parameters
----------
refenv : lmdb.Environment
lmdb environment where the commit refs are stored
starting_commit : string
commit hash to start creating the DAG from
Returns
-------
dict
a dictionary where each key is a commit hash encountered along the way,
and it's value is a list containing either one or two elements which
identify the child commits of that parent hash.
"""
parent_commit = starting_commit
commit_graph = {}
seen = set(starting_commit)
more_work = []
end_commit = False
if parent_commit == '':
end_commit = True
while end_commit is not True:
childCommit = get_commit_ancestors(refenv, parent_commit)
if ((childCommit.master_ancestor == '') or (childCommit.master_ancestor in seen)):
end_commit = True
commit_graph[parent_commit] = [childCommit.master_ancestor]
if len(more_work) != 0:
master_commit = more_work.pop(0)
end_commit = False
else:
continue
elif childCommit.is_merge_commit is True:
master_commit = childCommit.master_ancestor
dev_commit = childCommit.dev_ancestor
more_work.append(dev_commit)
commit_graph[parent_commit] = [master_commit, dev_commit]
seen.add(master_commit)
seen.add(dev_commit)
else:
master_commit = childCommit.master_ancestor
commit_graph[parent_commit] = [master_commit]
seen.add(master_commit)
parent_commit = master_commit
return commit_graph | 078819cf0291a5e4e1e8ad4ea409f475c0df93fd | 3,651,137 |
def is_verification_handshake(rjson):
"""
Determines if the request is the Slack application APIs verification handshake
:rtype: bool
"""
# Check body contains the right keys
for x in ['token', 'challenge', 'type']:
if x not in rjson:
return False
# Check type is correct
if rjson['type'] != "url_verification":
return False
# Note: no need to check the token, we check the request is signed
# before this code is ever run.
# It's a verification request
log.info("Received URL verification handshake request")
return True | 1ceccd9ca578bd09e9629cd59e565bc523502030 | 3,651,138 |
def template_node(scope_key):
""" Create and return a new template node.
Parameters
----------
scope_key : object
The key for the local scope in the local storage maps.
Returns
-------
result : TemplateNode
A new compiler template node.
"""
node = TemplateNode()
node.scope_key = scope_key
return node | 4cd9721dd9f9f91cb84326391630274b8f5764a7 | 3,651,139 |
def GetAutoResult(chroot_path, buildbucket_id):
"""Returns the conversion of the result of 'cros buildresult'."""
# Calls 'cros buildresult' to get the status of the tryjob.
build_result = GetStatusFromCrosBuildResult(chroot_path, buildbucket_id)
# The string returned by 'cros buildresult' might not be in the mapping.
if build_result not in builder_status_mapping:
raise ValueError(
'"cros buildresult" return value is invalid: %s' % build_result)
return builder_status_mapping[build_result] | 705fbc011c11fa67d0b61f130a3b6f024a6dcd44 | 3,651,140 |
def rft(x):
"""
Real Fourier Transform
"""
# XXX figure out what exactly this is doing...
s = x.shape[-1]
xp = np.zeros(x.shape,dtype="complex64")
xp[...,1:s/2] = x[...,1:-1:2]+x[...,2::2]*1.j
xp[...,0] = x[...,0]/2.
xp[...,s/2] = x[...,-1]/2.
return np.array(nmr_reorder(np.fft.fft(2*xp,axis=-1).real),dtype="float32") | 3a65f0a0059df4c74b223f3284e996b82d7ebf02 | 3,651,141 |
def yam_path(manifestsdir):
"""Bundletracker manifest."""
return join(manifestsdir, 'yam.json') | 5d1b5162bd8285d8e33c822a3b5edcc996452719 | 3,651,142 |
def single_from(iterable):
"""Check that an iterable contains one unique value, and return it."""
unique_vals = set(iterable)
if len(unique_vals) != 1:
raise ValueError('multiple unique values found')
return unique_vals.pop() | c8fb8864083195ad913ff1ddf0114b5a50068902 | 3,651,143 |
import requests
def vthash(filehash: str):
"""Returns the analysis data class for a file in VirusTotal's database"""
endpoint_path = f'/files/{filehash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if r.status_code == 404 and r.json()['error']['code'] == 'NotFoundError':
return None
elif r.status_code == 200:
return analysisdata(r) | bf4f334ad7a35e1141f9e00a44544fdd0709b411 | 3,651,144 |
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x) | 8962e7b6abd16c9354f076c0c6d718b82fe44223 | 3,651,145 |
from typing import List
import difflib
def menu(queue: List[str] = None):
"""Fred Menu"""
fred_controller = FredController(queue)
an_input = "HELP_ME"
while True:
# There is a command in the queue
if fred_controller.queue and len(fred_controller.queue) > 0:
# If the command is quitting the menu we want to return in here
if fred_controller.queue[0] in ("q", "..", "quit"):
print("")
if len(fred_controller.queue) > 1:
return fred_controller.queue[1:]
return []
# Consume 1 element from the queue
an_input = fred_controller.queue[0]
fred_controller.queue = fred_controller.queue[1:]
# Print the current location because this was an instruction and we want user to know what was the action
if an_input and an_input.split(" ")[0] in fred_controller.CHOICES_COMMANDS:
print(f"{get_flair()} /economy/fred/ $ {an_input}")
# Get input command from user
else:
# Display help menu when entering on this menu from a level above
if an_input == "HELP_ME":
fred_controller.print_help()
# Get input from user using auto-completion
if session and gtff.USE_PROMPT_TOOLKIT and fred_controller.completer:
an_input = session.prompt(
f"{get_flair()} /economy/fred/ $ ",
completer=fred_controller.completer,
search_ignore_case=True,
)
# Get input from user without auto-completion
else:
an_input = input(f"{get_flair()} /economy/fred/ $ ")
try:
# Process the input command
fred_controller.queue = fred_controller.switch(an_input)
except SystemExit:
print(
f"\nThe command '{an_input}' doesn't exist on the /economy/fred menu.",
end="",
)
similar_cmd = difflib.get_close_matches(
an_input.split(" ")[0] if " " in an_input else an_input,
fred_controller.CHOICES,
n=1,
cutoff=0.7,
)
if similar_cmd:
if " " in an_input:
candidate_input = (
f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}"
)
if candidate_input == an_input:
an_input = ""
fred_controller.queue = []
print("\n")
continue
an_input = candidate_input
else:
an_input = similar_cmd[0]
print(f" Replacing by '{an_input}'.")
fred_controller.queue.insert(0, an_input)
else:
print("\n") | b8133dd748f0a48099359b6503edee6c9f875fb6 | 3,651,146 |
def generic_repr(name, obj, deferred):
"""
Generic pretty printer for NDTable and NDArray.
Output is of the form::
Array(3, int32)
values := [Numpy(ptr=60597776, dtype=int64, shape=(3,))];
metadata := [contigious]
layout := Identity;
[1 2 3]
"""
if deferred:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " metadata := %s \n" % obj._metadata
else:
header = ''
else:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " values := %s \n" % list(obj.space)
header += " metadata := %s \n" % obj._metadata
header += " layout := %s \n" % obj._layout.desc
else:
header = ''
# Show the data below
fullrepr = header + generic_str(obj, deferred)
return fullrepr | c9de29b792d943420b02455752f01a9c12fcf66c | 3,651,147 |
def build_model(X, y, ann_hidden_dim, num_passes=20000):
"""
:param ann_hidden_dim: Number of nodes in the hidden layer
:param num_passes: Number of passes through the training data for gradient descent
:return: returns the parameters of artificial neural network for prediction using forward propagation of the parameters
"""
model = {}
# Initialize the parameters to random values.
np.random.seed(0)
w1 = np.random.randn(ann_input_dim, ann_hidden_dim) / np.sqrt(ann_input_dim)
c1 = np.zeros((1, ann_hidden_dim))
w2 = np.random.randn(ann_hidden_dim, ann_output_dim) / np.sqrt(ann_hidden_dim)
c2 = np.zeros((1, ann_output_dim))
# Batch gradient descent
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(w1) + c1
a1 = np.tanh(z1)
z2 = a1.dot(w2) + c2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Back propagation
delta3 = probs
delta3[range(len(X)), y] -= 1
dw2 = (a1.T).dot(delta3)
dc2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(w2.T) * (1 - np.power(a1, 2))
dw1 = np.dot(X.T, delta2)
dc1 = np.sum(delta2, axis=0)
# Add regularization terms (c1 and c2 don't have regularization terms)
dw2 += REG_LAMBDA * w2
dw1 += REG_LAMBDA * w1
# Gradient descent parameter update
w1 += -EPSILON * dw1
c1 += -EPSILON * dc1
w2 += -EPSILON * dw2
c2 += -EPSILON * dc2
# Assign new parameters to the model
model = {'w1': w1, 'c1': c1, 'w2': w2, 'c2': c2}
return model | bccdf828050af8a6ff5943eb84b574756f9f54ab | 3,651,148 |
def g_square_dis(dm, x, y, s):
"""G square test for discrete data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
levels: levels of each column in the data matrix
(as a list()).
Returns:
p_val: the p-value of conditional independence.
"""
levels = np.amax(dm, axis=0) + 1
def _calculate_tlog(x, y, s, dof, levels, dm):
prod_levels = np.prod(list(map(lambda x: levels[x], s)))
nijk = np.zeros((levels[x], levels[y], prod_levels))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
k = []
k_index = 0
for s_index in range(s_size):
if s_index == 0:
k_index += dm[row_index, z[s_index]]
else:
lprod = np.prod(list(map(lambda x: levels[x], z[:s_index])))
k_index += (dm[row_index, z[s_index]] * lprod)
pass
pass
nijk[i, j, k_index] += 1
pass
nik = np.ndarray((levels[x], prod_levels))
njk = np.ndarray((levels[y], prod_levels))
for k_index in range(prod_levels):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], prod_levels))
tlog.fill(np.nan)
for k in range(prod_levels):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
return (nijk, tlog)
row_size = dm.shape[0]
s_size = len(s)
dof = ((levels[x] - 1) * (levels[y] - 1)
* np.prod(list(map(lambda x: levels[x], s))))
row_size_required = 10 * dof
nijk = None
if s_size < 5:
if s_size == 0:
nijk = np.zeros((levels[x], levels[y]))
for row_index in range(row_size):
i = dm[row_index, x]
j = dm[row_index, y]
nijk[i, j] += 1
pass
tx = np.array([nijk.sum(axis = 1)]).T
ty = np.array([nijk.sum(axis = 0)])
tdij = tx.dot(ty)
tlog = nijk * row_size / tdij
pass
if s_size > 0:
nijk, tlog = _calculate_tlog(x, y, s, dof, levels, dm)
pass
pass
else:
nijk = np.zeros((levels[x], levels[y], 1))
i = dm[0, x]
j = dm[0, y]
k = []
for z in s:
k.append(dm[:, z])
pass
k = np.array(k).T
parents_count = 1
parents_val = np.array([k[0, :]])
nijk[i, j, parents_count - 1] = 1
for it_sample in range(1, row_size):
is_new = True
i = dm[it_sample, x]
j = dm[it_sample, y]
tcomp = parents_val[:parents_count, :] == k[it_sample, :]
for it_parents in range(parents_count):
if np.all(tcomp[it_parents, :]):
nijk[i, j, it_parents] += 1
is_new = False
break
pass
if is_new is True:
parents_count += 1
parents_val = np.r_[parents_val, [k[it_sample, :]]]
nnijk = np.zeros((levels[x], levels[y], parents_count))
for p in range(parents_count - 1):
nnijk[:, :, p] = nijk[:, :, p]
pass
nnijk[i, j, parents_count - 1] = 1
nijk = nnijk
pass
pass
nik = np.ndarray((levels[x], parents_count))
njk = np.ndarray((levels[y], parents_count))
for k_index in range(parents_count):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], parents_count))
tlog.fill(np.nan)
for k in range(parents_count):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
pass
log_tlog = np.log(tlog)
G2 = np.nansum(2 * nijk * log_tlog)
if dof == 0:
p_val = 1
else:
p_val = chi2.sf(G2, dof)
if s_size == 0:
nijk = nijk.reshape((nijk.shape[0], nijk.shape[1], 1))
log_tlog = log_tlog.reshape((log_tlog.shape[0], log_tlog.shape[1], 1))
return G2, p_val, nijk, log_tlog | 2f0f0b44a919177c0f5775a34e0493c62720a21d | 3,651,149 |
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = "/usr/sbin/svcadm enable -s -t {0}".format(name)
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = "/usr/sbin/svcadm clear {0}".format(name)
__salt__["cmd.retcode"](clear_cmd, python_shell=False)
return not __salt__["cmd.retcode"](cmd, python_shell=False)
return False | 607b559281c6b13002d7237b8c4409533074d0bc | 3,651,150 |
from typing import Dict
def line_coloring(num_vertices) -> Dict:
"""
Creates an edge coloring of the line graph, corresponding to the optimal
line swap strategy, given as a dictionary where the keys
correspond to the different colors and the values are lists of edges (where edges
are specified as tuples). The graph coloring consists of one color for all even-numbered
edges and one color for all odd-numbered edges.
Args:
num_vertices: The number of vertices in the line graph
Returns:
Graph coloring as a dictionary of edge lists
"""
line_coloring = {}
for i in range(num_vertices - 1):
line_coloring[(i, i + 1)] = i % 2
line_coloring[(i + 1, i)] = i % 2
return line_coloring | 423e626ecbf4f48e0a192241375484a077fbe0b2 | 3,651,151 |
def flatten_outputs(predictions, number_of_classes):
"""Flatten the prediction batch except the prediction dimensions"""
logits_permuted = predictions.permute(0, 2, 3, 1)
logits_permuted_cont = logits_permuted.contiguous()
outputs_flatten = logits_permuted_cont.view(-1, number_of_classes)
return outputs_flatten
# outputs_flatten = torch.tensor(predictions | c58fb965443a5402e9bec32afaebe9376c74653f | 3,651,152 |
def get_r_vals(cell_obj):
"""Get radial distances for inner and outer membranes for the cell object"""
r_i = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_inner']['x'],
cell_obj.data.data_dict['storm_inner']['y'])
r_o = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_outer']['x'],
cell_obj.data.data_dict['storm_outer']['y'])
return r_i, r_o | d51c926791845006dfe9a97cbd9c82c041ea701b | 3,651,153 |
def get_all_migrations(ctxt, inactive=0):
"""Get all non-deleted source hypervisors.
Pass true as argument if you want deleted sources returned also.
"""
return db.migration_get_all(ctxt, inactive) | c8e8ae084ca42d560e79412e4ff56d79059055a6 | 3,651,154 |
def extract(input_data: str) -> tuple:
"""take input data and return the appropriate data structure"""
rules = input_data.split('\n')
graph = dict()
reverse_graph = dict()
for rule in rules:
container, contents = rule.split('contain')
container = ' '.join(container.split()[:2])
content_graph = dict()
for content in contents.split(','):
if content == " no other bags.":
break
parts = content.split()
amount = int(parts[0])
color = ' '.join(parts[1:3])
content_graph[color] = amount
if color in reverse_graph.keys():
reverse_graph[color].append(container)
else:
reverse_graph[color] = [container]
graph[container] = content_graph
return (graph, reverse_graph) | f71cdc23fdfaf6ef0d054c0c68e513db66289c12 | 3,651,155 |
def get_total_indemnity(date_of_joining, to_date):
"""To Calculate the total Indemnity of an employee based on employee's Joining date.
Args:
date_of_joining ([date]): Employee's Joining Date
to_date ([data]): up until date
Returns:
total_allocation: Total Indemnity Allocation calculated from joining date till 'to_date'.
"""
#get no. of year and days employee has worked.
total_working_year = relativedelta(to_date, date_of_joining ).years
total_working_days = (to_date - date_of_joining).days
#reason: Any no. of days after completing 5 years as different calculation.
five_year_in_days = 5*365
# up until 5 years of working year, the monthly calculation takes "15 days" salary in to consideration.
if total_working_year < 5 or (total_working_year == 5 and total_working_days == 5*365):
#15 days salary is divided over a year and that becomes each day's allocation.
return 15 / 365 * total_working_days
elif total_working_year >= 5 and total_working_days > 5*365:
#calculation takes 15 days salary for 5 years and 30 days salary after 5 years
return (15 / 365 * five_year_in_days) + (30 / 365 * (total_working_days-five_year_in_days)) | 1b09d0dc7971ab4c3d63c303a93f64da924dcfa4 | 3,651,156 |
import os
def run_species_phylogeny_iqtree(roary_folder, collection_dir, threads=8, overwrite=False, timing_log=None):
"""
Run iqtree to create phylogeny tree from core gene alignment. If the list of samples has
not changed, and none of the samples has changed, the existing tree will be kept unless
overwrite is set to True
Parameters
----------
report: object
A report object
collection_dir: str
working directory of the collection
threads: int
number of threads to use
overwrite: bool
whether to overwrite existing result even if input did not change
timing_log: str
file to log timing
Returns
report object
-------
"""
phylogeny_folder = os.path.join(collection_dir, 'phylogeny')
if not os.path.exists(phylogeny_folder):
os.makedirs(phylogeny_folder)
#report['phylogeny'] = phylogeny_folder
phylogeny_file = os.path.join(phylogeny_folder, 'core_gene_alignment.treefile')
if os.path.isfile(phylogeny_file) and (not overwrite):
logger.info('phylogeny tree exists and input has not changed, skip phylogeny analysis')
return phylogeny_folder
aln_file = os.path.join(phylogeny_folder, 'core_gene_alignment.aln.gz')
if not os.path.isfile(aln_file):
aln_file = os.path.join(report['roary'], 'core_gene_alignment.aln.gz')
cmd = 'iqtree -s {alignment} --prefix {prefix} -B 1000 -T {threads} -czb -keep-ident'.format(
alignment=aln_file, prefix=phylogeny_folder+'/core_gene_alignment', threads=threads)
ret = run_command(cmd, timing_log)
if ret != 0:
raise Exception('iqtree fail to create phylogeny tree from core gene alignment!')
return phylogeny_folder | a5a6cd8e77cc3622264f4827753a2260e38d9f70 | 3,651,157 |
def api_2_gamma_oil(value):
"""
converts density in API(American Petroleum Institute gravity) to gamma_oil (oil relative density by water)
:param value: density in API(American Petroleum Institute gravity)
:return: oil relative density by water
"""
return (value + 131.5) / 141.5 | 20e625f22092461fcf4bc2e2361525abf8051f97 | 3,651,158 |
def compute_metrics(pred, label):
"""Compute metrics like True/False Positive, True/False Negative.`
MUST HAVE ONLY 2 CLASSES: BACKGROUND, OBJECT.
Args:
pred (numpy.ndarray): Prediction, one-hot encoded. Shape: [2, H, W], dtype: uint8
label (numpy.ndarray): Ground Truth, one-hot encoded. Shape: [H, W], dtype: uint8
Returns:
float: IOU, TP, TN, FP, FN
"""
if len(pred.shape) > 3:
raise ValueError("pred should have shape [2, H, W], got: {}".format(pred.shape))
if len(label.shape) > 2:
raise ValueError("label should have shape [H, W], got: {}".format(label.shape))
total_pixels = pred.shape[0] * pred.shape[1]
tp = np.sum(np.logical_and(pred == 1, label > 0))
tn = np.sum(np.logical_and(pred == 0, label == 0))
fp = np.sum(np.logical_and(pred == 1, label == 0))
fn = np.sum(np.logical_and(pred == 0, label > 0))
if (tp + tn + fp + fn) != total_pixels:
raise ValueError('The number of total pixels ({}) and sum of tp,fp,tn,fn ({}) is not equal'.format(
total_pixels, (tp + tn + fp + fn)))
iou = tp / (tp + fp + fn)
_tp = tp / np.sum(label == 1)
tp_rate = (tp / (tp + fn)) * 100
fp_rate = (fp / (fp + tn)) * 100
tn_rate = (tn / (tn + fp)) * 100
fn_rate = (fn / (fn + tp)) * 100
return iou, tp_rate, tn_rate, fp_rate, fn_rate | be8415c997197c06a5998671ffe09e70c6d3719c | 3,651,159 |
import jinja2
def expand_template(template, variables, imports, raw_imports=None):
"""Expand a template."""
if raw_imports is None:
raw_imports = imports
env = jinja2.Environment(loader=OneFileLoader(template))
template = env.get_template(template)
return template.render(imports=imports, variables=variables, raw_imports=raw_imports) | c5ebe1610a6e2fa9e0b18afa7d23652c1f7c25ba | 3,651,160 |
from typing import Any
from operator import truth
def __contains__(container: Any, item: Any, /) -> bool:
"""Check if the first item contains the second item: `b in a`."""
container_type = type(container)
try:
contains_method = debuiltins._mro_getattr(container_type, "__contains__")
except AttributeError:
# Cheating until `for` is unravelled (and thus iterators).
return debuiltins.any(x is item or x == item for x in container)
else:
if contains_method is None:
raise TypeError(f"{container_type.__name__!r} object is not a container")
is_contained = contains_method(container, item)
return truth(is_contained) | b58a5f400895df472f83a5e2410dff9cd112fc91 | 3,651,161 |
def generate_search_url(request_type):
"""Given a request type, generate a query URL for kitsu.io."""
url = BASE_URL_KITSUIO.format(request_type)
return url | 9508d909fb8eb018770b2191f7d62ccb3881f285 | 3,651,162 |
from typing import Callable
def register_magic(func: Callable[[Expr], Expr]):
"""
Make a magic command more like Julia's macro system.
Instead of using string, you can register a magic that uses Expr as the
input and return a modified Expr. It is usually easier and safer to
execute metaprogramming this way.
Parameters
----------
func : Callable[[Expr], Expr]
Function that will used as a magic command.
Returns
-------
Callable
Registered function itself.
Examples
--------
.. code-block:: python
@register_magic
def print_code(expr):
print(expr)
return expr
The ``print_code`` magic is registered as an ipython magic.
.. code-block:: python
%print_code a = 1
.. code-block:: python
%%print_code
def func(a):
return a + 1
"""
@register_line_cell_magic
@needs_local_scope
@wraps(func)
def _ipy_magic(line: str, cell: str = None, local_ns=None):
if cell is None:
cell = line
block = parse(cell)
block_out = func(block)
return block_out.eval(local_ns, local_ns)
return func | 06d93f8a48758dc39679af396c10a54927e3696e | 3,651,163 |
import os
def flowcellDirFastqToBwaBamFlow(self, taskPrefix="", dependencies=set()) :
"""
Takes as input 'flowcellFastqDir' pointing to the CASAVA 1.8 flowcell
project/sample fastq directory structure. For each project/sample,
the fastqs are aligned using BWA, sorted and merged into a single
BAM file. The bam output is placed in a parallel project/sample
directory structure below 'flowcellBamDir'
params:
samtoolsBin
flowcellFastqDir
flowcellBamDir
calls:
FastqPairToBwaBamFlow
supplies:
bamFile
fastq1File
fastq2File
"""
#
# 1. separate fastqs into matching pairs:
#
fqs = {}
fqDigger = FileDigger(".fastq.gz", ["Project_", "Sample_"])
for (project, sample, fqPath) in fqDigger.getNextFile(self.params.flowcellFastqDir) :
if (self.params.sampleNameList != None) and \
(len(self.params.sampleNameList) != 0) and \
(sample not in self.params.sampleNameList) : continue
fqFile = os.path.basename(fqPath)
w = (fqFile.split(".")[0]).split("_")
if len(w) != 5 :
raise Exception("Unexpected fastq filename format: '%s'" % (fqPath))
(sample2, index, lane, read, num) = w
if sample != sample2 :
raise Exception("Fastq name sample disagrees with directory sample: '%s;" % (fqPath))
key = (project, sample, index, lane, num)
if key not in fqs : fqs[key] = [None, None]
readNo = int(read[1])
if fqs[key][readNo - 1] != None :
raise Exceptoin("Unresolvable repeated fastq file pattern in sample: '%s'" % (fqPath))
fqs[key][readNo - 1] = fqPath
ensureDir(self.params.flowcellBamDir)
#
# 2. run all fastq pairs through BWA:
#
nextWait = set()
for key in fqs.keys() :
(project, sample, index, lane, num) = key
sampleBamDir = os.path.join(self.params.flowcellBamDir, "Project_" + project, "Sample_" + sample)
ensureDir(sampleBamDir)
keytag = "_".join(key)
self.params.bamFile = os.path.join(sampleBamDir, keytag + ".bam")
self.params.fastq1File = fqs[key][0]
self.params.fastq2File = fqs[key][1]
nextWait.add(self.addWorkflowTask(preJoin(taskPrefix, keytag), FastqPairToBwaBamFlow(self.params), dependencies=dependencies))
return nextWait | 6f18083fc2c9e4a260e87c332d40d9322f2c7bc1 | 3,651,164 |
def ValidatePregnum(resp):
"""Validate pregnum in the respondent file.
resp: respondent DataFrame
"""
# read the pregnancy frame
preg = nsfg.ReadFemPreg()
# make the map from caseid to list of pregnancy indices
preg_map = nsfg.MakePregMap(preg)
# iterate through the respondent pregnum series
for index, pregnum in resp.pregnum.items():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
return False
return True | a51f3af130cbad4a5cd3d3c9707788f783302000 | 3,651,165 |
def is_super_admin(view, view_args, view_kwargs, *args, **kwargs):
"""
Permission function for things allowed exclusively to super admin.
Do not use this if the resource is also accessible by a normal admin, use the is_admin decorator instead.
:return:
"""
user = current_user
if not user.is_super_admin:
return ForbiddenError({'source': ''}, 'Super admin access is required').respond()
return view(*view_args, **view_kwargs) | 503550fcd52e62053d42a3059aba298009d3eb01 | 3,651,166 |
def normalize_depth(val, min_v, max_v):
"""
print 'nomalized depth value'
nomalize values to 0-255 & close distance value has high value. (similar to stereo vision's disparity map)
"""
return (((max_v - val) / (max_v - min_v)) * 255).astype(np.uint8) | 431cda7af30ef1127c60069b6958ef4d8234eaae | 3,651,167 |
def parse_iori_block(block):
"""Turn IORI data blocks into `IoriData` objects.
Convert rotation from Quaternion format to Euler angles.
Parameters
----------
block: list of KVLItem
A list of KVLItem corresponding to a IORI data block.
Returns
-------
iori_data: IoriData
A IoriData object holding the IORI information of a block.
"""
block_dict = {
s.key: s for s in block
}
data = block_dict['IORI'].value * 1.0 / block_dict["SCAL"].value
rotation = np.array([R.from_quat(q).as_euler('zyx', degrees=True) for q in data])
z, y, x = rotation.T
return IoriData(
cts = block_dict['STMP'].value,
z = z,
y = y,
x = x,
) | b9ad59677e51c30b2bec51a0503fc2718cde0f7d | 3,651,168 |
def ungap_all(align):
"""
Removes all gaps (``-`` symbols) from all sequences of the :class:`~data.Align`
instance *align* and returns the resulting ~data.Container instance.
"""
result = data.Container()
for n,s,g in align:
result.append(n, s.translate(None, '-'), g)
return result | 511b6aeb7fc262b733a97b5180a23c7f044fea06 | 3,651,169 |
def expandBcv(bcv):
"""If the bcv is an interval, expand if.
"""
if len(bcv) == 6:
return bcv
else:
return "-".join(splitBcv(bcv)) | abfb1bf31acca579fecb526d571b32cefa7ecd61 | 3,651,170 |
import os
def get_starting_dir_abs_path() -> str:
"""
Returns the absolute path to the starting directory of the project. Starting directory is used for example for
turning relative paths (from Settings) into absolute paths (those paths are relative to the starting directory).
"""
if _starting_dir is None:
dir_path = os.getenv("QF_STARTING_DIRECTORY")
if dir_path is None:
raise KeyError("Starting directory wasn't set. Use set_starting_dir_abs_path() function "
"or set the environment variable QF_STARTING_DIRECTORY to the proper value")
else:
return dir_path
else:
return _starting_dir | af91f10a2dd9af8ba010f75fe295acb2406e9372 | 3,651,171 |
def cluster_profile_platform(cluster_profile):
"""Translate from steps.cluster_profile to workflow.as slugs."""
if cluster_profile == 'azure4':
return 'azure'
if cluster_profile == 'packet':
return 'metal'
return cluster_profile | 0a01f566562002fe43c3acbb00d5efcc09d25314 | 3,651,172 |
def get_price_lambda_star_lp_1_cvxpy(w: np.ndarray, c_plus: np.ndarray, psi_plus: np.ndarray) \
-> float:
"""
Computes lambda_star based on dual program of the projection of w_star.
:param w: current state in workload space.
:param c_plus: vector normal to the level set in the monotone region 'right above' the face.
:param psi_plus: vector normal to the closest face.
:return: lambda_star: price of random oscillations along the closest face.
"""
assert not StrategicIdlingHedging._is_w_inside_artificial_monotone_region(w, psi_plus)
num_wl = w.shape[0]
lambda_var = cvx.Variable(1)
v_dagger_var = cvx.Variable((num_wl, 1))
objective = cvx.Maximize(v_dagger_var.T @ w)
constraints = [c_plus - v_dagger_var - lambda_var * psi_plus == 0,
v_dagger_var >= 0]
prob = cvx.Problem(objective, constraints)
_ = prob.solve(solver=cvx.SCS, eps=1e-8)
lambda_star = lambda_var.value[0]
if prob.status != 'optimal':
lambda_star = None
return lambda_star | 0a1a658cd86a0253fe3caf8a5e162393926b351a | 3,651,173 |
import typing
import random
def _get_nodes(
network: typing.Union[NetworkIdentifier, Network],
sample_size: typing.Optional[int],
predicate: typing.Callable,
) -> typing.List[Node]:
"""Decaches domain objects: Node.
"""
nodeset = [i for i in get_nodes(network) if predicate(i)]
if sample_size is not None:
sample_size = min(sample_size, len(nodeset))
return nodeset if sample_size is None else random.sample(nodeset, sample_size) | f3be401c2fd0adf58f10b679d254ff2075f4546b | 3,651,174 |
def cdl_key():
"""Four-class system (grain, forage, vegetable, orchard. Plus 5: non-ag/undefined"""
key = {1: ('Corn', 1),
2: ('Cotton', 1),
3: ('Rice', 1),
4: ('Sorghum', 1),
5: ('Soybeans', 1),
6: ('Sunflower', 1),
7: ('', 5),
8: ('', 5),
9: ('', 5),
10: ('Peanuts', 1),
11: ('Tobacco', 2),
12: ('Sweet Corn', 1),
13: ('Pop or Orn Corn', 1),
14: ('Mint', 2),
15: ('', 5),
16: ('', 5),
17: ('', 5),
18: ('', 5),
19: ('', 5),
20: ('', 5),
21: ('Barley', 1),
22: ('Durum Wheat', 1),
23: ('Spring Wheat', 1),
24: ('Winter Wheat', 1),
25: ('Other Small Grains', 1),
26: ('Dbl Crop WinWht/Soybeans', 1),
27: ('Rye', 1),
28: ('Oats', 1),
29: ('Millet', 1),
30: ('Speltz', 1),
31: ('Canola', 1),
32: ('Flaxseed', 1),
33: ('Safflower', 1),
34: ('Rape Seed', 1),
35: ('Mustard', 1),
36: ('Alfalfa', 3),
37: ('Other Hay/Non Alfalfa', 3),
38: ('Camelina', 1),
39: ('Buckwheat', 1),
40: ('', 5),
41: ('Sugarbeets', 2),
42: ('Dry Beans', 2),
43: ('Potatoes', 2),
44: ('Other Crops', 2),
45: ('Sugarcane', 2),
46: ('Sweet Potatoes', 2),
47: ('Misc Vegs & Fruits', 2),
48: ('Watermelons', 2),
49: ('Onions', 2),
50: ('Cucumbers', 2),
51: ('Chick Peas', 2),
52: ('Lentils', 2),
53: ('Peas', 2),
54: ('Tomatoes', 2),
55: ('Caneberries', 2),
56: ('Hops', 2),
57: ('Herbs', 2),
58: ('Clover/Wildflowers', 3),
59: ('Sod/Grass Seed', 3),
60: ('Switchgrass', 3),
61: ('Fallow/Idle Cropland', 3),
62: ('Pasture/Grass', 3),
63: ('Forest', 5),
64: ('Shrubland', 5),
65: ('Barren', 5),
66: ('Cherries', 4),
67: ('Peaches', 4),
68: ('Apples', 4),
69: ('Grapes', 4),
70: ('Christmas Trees', 4),
71: ('Other Tree Crops', 4),
72: ('Citrus', 4),
73: ('', 5),
74: ('Pecans', 4),
75: ('Almonds', 4),
76: ('Walnuts', 4),
77: ('Pears', 4),
78: ('', 5),
79: ('', 5),
80: ('', 5),
81: ('Clouds/No Data', 5),
82: ('Developed', 5),
83: ('Water', 5),
84: ('', 5),
85: ('', 5),
86: ('', 5),
87: ('Wetlands', 5),
88: ('Nonag/Undefined', 5),
89: ('', 5),
90: ('', 5),
91: ('', 5),
92: ('Aquaculture', 5),
93: ('', 5),
94: ('', 5),
95: ('', 5),
96: ('', 5),
97: ('', 5),
98: ('', 5),
99: ('', 5),
100: ('', 5),
101: ('', 5),
102: ('', 5),
103: ('', 5),
104: ('', 5),
105: ('', 5),
106: ('', 5),
107: ('', 5),
108: ('', 5),
109: ('', 5),
110: ('', 5),
111: ('Open Water', 5),
112: ('Perennial Ice/Snow', 5),
113: ('', 5),
114: ('', 5),
115: ('', 5),
116: ('', 5),
117: ('', 5),
118: ('', 5),
119: ('', 5),
120: ('', 5),
121: ('Developed/Open Space', 5),
122: ('Developed/Low Intensity', 5),
123: ('Developed/Med Intensity', 5),
124: ('Developed/High Intensity', 5),
125: ('', 5),
126: ('', 5),
127: ('', 5),
128: ('', 5),
129: ('', 5),
130: ('', 5),
131: ('Barren', 5),
132: ('', 5),
133: ('', 5),
134: ('', 5),
135: ('', 5),
136: ('', 5),
137: ('', 5),
138: ('', 5),
139: ('', 5),
140: ('', 5),
141: ('Deciduous Forest', 5),
142: ('Evergreen Forest', 5),
143: ('Mixed Forest', 5),
144: ('', 5),
145: ('', 5),
146: ('', 5),
147: ('', 5),
148: ('', 5),
149: ('', 5),
150: ('', 5),
151: ('', 5),
152: ('Shrubland', 5),
153: ('', 5),
154: ('', 5),
155: ('', 5),
156: ('', 5),
157: ('', 5),
158: ('', 5),
159: ('', 5),
160: ('', 5),
161: ('', 5),
162: ('', 5),
163: ('', 5),
164: ('', 5),
165: ('', 5),
166: ('', 5),
167: ('', 5),
168: ('', 5),
169: ('', 5),
170: ('', 5),
171: ('', 5),
172: ('', 5),
173: ('', 5),
174: ('', 5),
175: ('', 5),
176: ('Grassland/Pasture', 5),
177: ('', 5),
178: ('', 5),
179: ('', 5),
180: ('', 5),
181: ('', 5),
182: ('', 5),
183: ('', 5),
184: ('', 5),
185: ('', 5),
186: ('', 5),
187: ('', 5),
188: ('', 5),
189: ('', 5),
190: ('Woody Wetlands', 5),
191: ('', 5),
192: ('', 5),
193: ('', 5),
194: ('', 5),
195: ('Herbaceous Wetlands', 5),
196: ('', 5),
197: ('', 5),
198: ('', 5),
199: ('', 5),
200: ('', 5),
201: ('', 5),
202: ('', 5),
203: ('', 5),
204: ('Pistachios', 4),
205: ('Triticale', 1),
206: ('Carrots', 2),
207: ('Asparagus', 2),
208: ('Garlic', 2),
209: ('Cantaloupes', 2),
210: ('Prunes', 2),
211: ('Olives', 2),
212: ('Oranges', 3),
213: ('Honeydew Melons', 2),
214: ('Broccoli', 2),
215: ('Avocados', 2),
216: ('Peppers', 2),
217: ('Pomegranates', 4),
218: ('Nectarines', 4),
219: ('Greens', 2),
220: ('Plums', 4),
221: ('Strawberries', 2),
222: ('Squash', 2),
223: ('Apricots', 4),
224: ('Vetch', 3),
225: ('Dbl Crop WinWht/Corn', 1),
226: ('Dbl Crop Oats/Corn', 1),
227: ('Lettuce', 2),
228: ('', 1),
229: ('Pumpkins', 2),
230: ('Dbl Crop Lettuce/Durum Wht', 2),
231: ('Dbl Crop Lettuce/Cantaloupe', 2),
232: ('Dbl Crop Lettuce/Cotton', 2),
233: ('Dbl Crop Lettuce/Barley', 2),
234: ('Dbl Crop Durum Wht/Sorghum', 1),
235: ('Dbl Crop Barley/Sorghum', 1),
236: ('Dbl Crop WinWht/Sorghum', 1),
237: ('Dbl Crop Barley/Corn', 1),
238: ('Dbl Crop WinWht/Cotton', 1),
239: ('Dbl Crop Soybeans/Cotton', 1),
240: ('Dbl Crop Soybeans/Oats', 1),
241: ('Dbl Crop Corn/Soybeans', 1),
242: ('Blueberries', 2),
243: ('Cabbage', 2),
244: ('Cauliflower', 2),
245: ('Celery', 2),
246: ('Radishes', 2),
247: ('Turnips', 2),
248: ('Eggplants', 2),
249: ('Gourds', 2),
250: ('Cranberries', 2),
251: ('', 5),
252: ('', 5),
253: ('', 5),
254: ('Dbl Crop Barley/Soybeans', 1),
255: ('', 5)}
return key | 634a35d2962695dd0ef1b38a0c353498ca3dea89 | 3,651,175 |
def colmeta(colname, infile=None, name=None, units=None, ucd=None, desc=None,
outfile=None):
"""
Modifies the metadata of one or more columns. Some or all of the name,
units, ucd, utype and description of the column(s),
identified by "colname" can be set by using some or all of the listed flags.
Typically, "colname" will simply be the name of a single column.
:param colname: string, name of the column to change meta data for
:param infile: string, the location and file name for the input file, if
not defined will return the STILTS command string
:param outfile: string, the location and file name for the output file,
if not defined will default to infile
:param name: string, new name for the column
:param units: string, new unit for the column
:param ucd: string, new UCD for the column
:param desc: string, new description for the column
:return:
"""
cmdstr = "colmeta "
if name is None and units is None and ucd is None and desc is None:
return 0
if name is not None:
cmdstr += '-name {0} '.format(__checkq__(str(name)))
if units is not None:
cmdstr += '-units {0} '.format(__checkq__(str(units)))
if ucd is not None:
cmdstr += '-ucd {0} '.format(__checkq__(str(ucd)))
if desc is not None:
cmdstr += '-desc {0} '.format(__checkq__(str(desc)))
cmdstr += '{0}'.format(colname)
if infile is None:
return cmdstr
if outfile is not None:
tpipe(cmdstr, infile=infile, outfile=outfile)
else:
tpipe(cmdstr, infile=infile, outfile=infile) | 15fc5b53e4ebd3563b00ef771a707d2ad2473ad7 | 3,651,176 |
def get_confusion_matrix_chart(cm, title):
"""Plot custom confusion matrix chart."""
source = pd.DataFrame([[0, 0, cm['TN']],
[0, 1, cm['FP']],
[1, 0, cm['FN']],
[1, 1, cm['TP']],
], columns=["actual values", "predicted values", "count"])
base = alt.Chart(source).encode(
y='actual values:O',
x='predicted values:O',
).properties(
width=200,
height=200,
title=title,
)
rects = base.mark_rect().encode(
color='count:Q',
)
text = base.mark_text(
align='center',
baseline='middle',
color='black',
size=12,
dx=0,
).encode(
text='count:Q',
)
return rects + text | 28884c46a51f3baf51dc5a6f3c0396a5c8f24e10 | 3,651,177 |
def get_ppo_plus_eco_params(scenario):
"""Returns the param for the 'ppo_plus_eco' method."""
assert scenario in DMLAB_SCENARIOS, (
'Non-DMLab scenarios not supported as of today by PPO+ECO method')
if scenario == 'noreward' or scenario == 'norewardnofire':
return md(get_common_params(scenario), {
'action_set': '' if scenario == 'noreward' else 'nofire',
'_gin.create_single_env.run_oracle_before_monitor': True,
'_gin.CuriosityEnvWrapper.scale_task_reward': 0.0,
'_gin.create_environments.scale_task_reward_for_eval': 0,
'_gin.create_environments.scale_surrogate_reward_for_eval': 1,
'_gin.OracleExplorationReward.reward_grid_size': 30,
'r_checkpoint': '',
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
})
else:
return md(get_common_params(scenario), {
'action_set': '',
'r_checkpoint': '',
'_gin.EpisodicMemory.capacity': 200,
'_gin.similarity_to_memory.similarity_aggregation': 'percentile',
'_gin.EpisodicMemory.replacement': 'random',
'_gin.CuriosityEnvWrapper.scale_task_reward': 1.0,
'_gin.CuriosityEnvWrapper.scale_surrogate_reward':
0.03017241379310345,
'_gin.train.ent_coef': 0.002053525026457146,
'_gin.create_environments.online_r_training': True,
'_gin.RNetworkTrainer.observation_history_size': 60000,
'_gin.RNetworkTrainer.training_interval': -1,
'_gin.CuriosityEnvWrapper.exploration_reward_min_step': 60000,
'_gin.RNetworkTrainer.num_epochs': 10,
}) | 26bb3db0cf14eceea86cd659332c9bbc0195ab9b | 3,651,178 |
def field_display(name):
"""
Works with Django's get_FOO_display mechanism for fields with choices set. Given
the name of a field, returns a producer that calls get_<name>_display.
"""
return qs.include_fields(name), producers.method(f"get_{name}_display") | 7fbc17dddfa398934496099f605f6cee97a802ad | 3,651,179 |
import time
def set_trace(response):
"""
Set a header containing the request duration and push detailed trace to the MQ
:param response:
:return:
"""
if TRACE_PERFORMANCE:
req_time = int((time.time() - g.request_start) * 1000)
trace = {
"duration": req_time,
"depth": g.request_depth,
"method": g.request_method,
"url": g.request_url,
"uuid": g.request_uuid,
"sequence": g.request_seq_this,
"responseCode": response.status_code,
"dbTime": g.db_time
}
if g.first_request:
trace["totalRequestCount"] = g.request_seq_next
trace_publisher.push('trace', trace)
flask_logger.debug(f'request trace: {req_time} ms ({g.request_method} {g.request_url})')
response.headers.add('x-trace-request-time', str(req_time))
response.headers.add('x-trace-seq-next', str(g.request_seq_next))
return response | 1b7067daaf9fd3b72cf9b2db9a78b33b64bf8fb9 | 3,651,180 |
from typing import Dict
from typing import List
def extract_attachments(payload: Dict) -> List[Image]:
"""
Extract images from attachments.
There could be other attachments, but currently we only extract images.
"""
attachments = []
for item in payload.get('attachment', []):
# noinspection PyProtectedMember
if item.get("type") in ("Document", "Image") and item.get("mediaType") in Image._valid_media_types:
if item.get('pyfed:inlineImage', False):
# Skip this image as it's indicated to be inline in content and source already
continue
attachments.append(
ActivitypubImage(
url=item.get('url'),
name=item.get('name') or "",
media_type=item.get("mediaType"),
)
)
return attachments | afb9d959e680c51fc327d6c7e5f5e74fdc5db5e6 | 3,651,181 |
from ...model_zoo import get_model
def yolo3_mobilenet1_0_custom(
classes,
transfer=None,
pretrained_base=True,
pretrained=False,
norm_layer=BatchNorm, norm_kwargs=None,
**kwargs):
"""YOLO3 multi-scale with mobilenet base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from yolo networks trained on other
datasets.
pretrained_base : boolean
Whether fetch and load pretrained weights for base network.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
mxnet.gluon.HybridBlock
Fully hybrid yolo3 network.
"""
if transfer is None:
base_net = get_mobilenet(multiplier=1,
pretrained=pretrained_base,
norm_layer=norm_layer, norm_kwargs=norm_kwargs,
**kwargs)
stages = [base_net.features[:33],
base_net.features[33:69],
base_net.features[69:-2]]
anchors = [
[10, 13, 16, 30, 33, 23],
[30, 61, 62, 45, 59, 119],
[116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
net = get_yolov3(
'mobilenet1.0', stages, [512, 256, 128], anchors, strides, classes, 'voc',
pretrained=pretrained, norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
else:
net = get_model(
'yolo3_mobilenet1.0_' +
str(transfer),
pretrained=True,
**kwargs)
net.reset_class(classes)
return net | 2da86fe66538e3cd9a21c456c00312a217ab5ca0 | 3,651,182 |
def calculate_levenshtein_distance(str_1, str_2):
"""
The Levenshtein distance is a string metric for measuring the difference between two sequences.
It is calculated as the minimum number of single-character edits necessary to transform one string into another
"""
distance = 0
buffer_removed = buffer_added = 0
for x in ndiff(str_1, str_2):
code = x[0]
# Code ? is ignored as it does not translate to any modification
if code == ' ':
distance += max(buffer_removed, buffer_added)
buffer_removed = buffer_added = 0
elif code == '-':
buffer_removed += 1
elif code == '+':
buffer_added += 1
distance += max(buffer_removed, buffer_added)
return distance | 949d54fbcbd2169aa06cedc7341e98c12412d03c | 3,651,183 |
from datetime import datetime
def make_datetime(value, *, format_=DATETIME_FORMAT):
"""
>>> make_datetime('2001-12-31T23:59:59')
datetime.datetime(2001, 12, 31, 23, 59, 59)
"""
return datetime.datetime.strptime(value, format_) | 5c6d79ae0ddc9f4c47592a90ed3232f556df0a49 | 3,651,184 |
import inspect
def named_struct_dict(typename, field_names=None, default=None, fixed=False, *, structdict_module=__name__,
base_dict=None, sorted_repr=None, verbose=False, rename=False, module=None, qualname_prefix=None,
frame_depth=1):
"""Returns a new subclass of StructDict with all fields as properties."""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if fixed:
mixin_type = NamedFixedStructDictMixin.__name__
else:
mixin_type = NamedStructDictMixin.__name__
if inspect.isclass(base_dict):
base_dict = base_dict.__name__
if base_dict is None:
base_dict = 'dict'
elif base_dict not in ('dict', 'OrderedDict', 'SortedDict'):
raise NotImplementedError(f"base_dict: {base_dict} is not supported.")
if sorted_repr is None:
sorted_repr = True if base_dict in ('dict',) else False
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names)) if field_names else []
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f"_{index}"
seen.add(name)
for name in [typename, structdict_module] + field_names:
if type(name) is not str:
raise TypeError('Type names, field names and structdict_module must be strings')
if name is not structdict_module and not name.isidentifier():
raise ValueError(f"Type names and field names must be valid identifiers: {name!r}")
if _iskeyword(name):
raise ValueError(f"Type names and field names cannot be a keyword: {name!r}")
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError(f"Field names cannot start with an underscore: {name!r}")
if name in seen:
raise ValueError(f"Encountered duplicate field name: {name!r}")
seen.add(name)
default_val = "None" if default is None else 'default_val'
# Fill-in the class template
class_definition = _struct_prop_dict_class_template.format(
structdict_module=structdict_module,
mixin_type=mixin_type,
base_dict=base_dict,
typename=typename,
field_names=tuple(field_names),
kwargs_map=(", ".join([f"{field_name}={default_val}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
kwargs_eq_map=(", ".join([f"{field_name}={field_name}" for field_name in field_names]).replace("'", "")) + (
"," if field_names else ""),
sorted_repr=sorted_repr
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__=f"struct_prop_dict_{typename}")
namespace.update(default_val=default)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named structdict is created. Bypass this step in environments where
# _sys._getframe is not defined (Jython for example) or _sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
try:
frame = _sys._getframe(frame_depth)
except (AttributeError, ValueError):
pass
else:
if module is None:
module = frame.f_globals.get('__name__', '__main__')
if qualname_prefix is None:
qualname_prefix = frame.f_locals.get('__qualname__', '')
if module is not None:
result.__module__ = module
if qualname_prefix:
result.__qualname__ = f'{qualname_prefix}.' + result.__qualname__
return result | 465ac4783697b749c092d96fa8af498e67f15d51 | 3,651,185 |
from .pytorch.pytorch_onnxruntime_model import PytorchONNXRuntimeModel
def PytorchONNXRuntimeModel(model, input_sample=None, onnxruntime_session_options=None):
"""
Create a ONNX Runtime model from pytorch.
:param model: 1. Pytorch model to be converted to ONNXRuntime for inference
2. Path to ONNXRuntime saved model.
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached,
defaults to None.
:param onnxruntime_session_options: A session option for onnxruntime accelerator.
:return: A PytorchONNXRuntimeModel instance
"""
return PytorchONNXRuntimeModel(model, input_sample,
onnxruntime_session_options=onnxruntime_session_options) | d925b67c3628995d75d1ea6c687e5beb022fdbd8 | 3,651,186 |
import os
def get_model_python_path():
"""
Returns the python path for a model
"""
return os.path.dirname(__file__) | 5ddd66f8b0c37b8a84eab614c4e3efd6efe9d9ef | 3,651,187 |
def intensity_variance(mask: np.ndarray, image: np.ndarray) -> float:
"""Returns variance of all intensity values in region of interest."""
return np.var(image[mask]) | e967b4cd3c3a896fba785d8c9e5f8bf07daa620d | 3,651,188 |
def permute_array(arr, axis=0):
"""Permute array along a certain axis
Args:
arr: numpy array
axis: axis along which to permute the array
"""
if axis == 0:
return np.random.permutation(arr)
else:
return np.random.permutation(arr.swapaxes(0, axis)).swapaxes(0, axis) | ce5f6d571062f36888d22836579332034f4fe924 | 3,651,189 |
import os
import errno
def convertGMLToGeoJSON(config, outputDir, gmlFilepath, layerName, t_srs='EPSG:4326',
flip_gml_coords=False):
""" Convert a GML file to a shapefile. Will silently exit if GeoJSON already exists
@param config A Python ConfigParser containing the section 'GDAL/OGR' and option 'PATH_OF_OGR2OGR'
@param outputDir String representing the absolute/relative path of the directory into which GeoJSON should be written
@param gmlFilepath String representing the absolute path of the GML file to convert
@param layerName String representing the name of the layer contained in the GML file to write to a GeoJSON
@param t_srs String representing the spatial reference system of the output GeoJSON, of the form 'EPSG:XXXX'
@return String representing the name of the GeoJSON written
@exception Exception if the conversion failed.
"""
pathToOgrCmd = config.get('GDAL/OGR', 'PATH_OF_OGR2OGR')
if not os.path.isdir(outputDir):
raise IOError(errno.ENOTDIR, "Output directory %s is not a directory" % (outputDir,))
if not os.access(outputDir, os.W_OK):
raise IOError(errno.EACCES, "Not allowed to write to output directory %s" % (outputDir,))
outputDir = os.path.abspath(outputDir)
geojsonFilename = "%s.geojson" % (layerName,)
geojsonFilepath = os.path.join(outputDir, geojsonFilename)
if not os.path.exists(geojsonFilepath):
# Need to flip coordinates in GML as SSURGO WFS now returns coordinates in lat, lon order
# rather than lon, lat order that OGR expects. For more information, see:
# http://trac.osgeo.org/gdal/wiki/FAQVector#HowdoIflipcoordinateswhentheyarenotintheexpectedorder
if flip_gml_coords and t_srs =='EPSG:4326':
ogrCommand = "%s -f 'GeoJSON' -nln %s -s_srs '+proj=latlong +datum=WGS84 +axis=neu +wktext' -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath)
else:
ogrCommand = "%s -f 'GeoJSON' -nln %s -t_srs %s %s %s" % (pathToOgrCmd, layerName, t_srs, geojsonFilepath, gmlFilepath)
returnCode = os.system(ogrCommand)
if returnCode != 0:
raise Exception("GML to GeoJSON command %s returned %d" % (ogrCommand, returnCode))
return geojsonFilename | 70ee0676d13a647d42a39313d5be1545042f73c7 | 3,651,190 |
def dsmatch(name, dataset, fn):
"""
Fuzzy search best matching object for string name in dataset.
Args:
name (str): String to look for
dataset (list): List of objects to search for
fn (function): Function to obtain a string from a element of the dataset
Returns:
First element with the maximun fuzzy ratio.
"""
max_ratio = 0
matching = None
for e in dataset:
if fuzz and name:
ratio = fuzz.token_sort_ratio(normalize(name), normalize(fn(e)))
if ratio > max_ratio:
max_ratio = ratio
matching = e
elif normalize(name) == normalize(fn(e)):
matching = e
break
return matching | 0835c0da3773eedab95c78e1b4f7f28abde0d8fd | 3,651,191 |
import os
import re
def _generate_flame_clip_name(item, publish_fields):
"""
Generates a name which will be displayed in the dropdown in Flame.
:param item: The publish item being processed.
:param publish_fields: Publish fields
:returns: name string
"""
# this implementation generates names on the following form:
#
# Comp, scene.nk (output background), v023
# Comp, Nuke, v023
# Lighting CBBs, final.nk, v034
#
# (depending on what pieces are available in context and names, names
# may vary)
context = item.context
name = ""
# If we have template fields passed in, then we'll try to extract
# some information from them. If we don't, then we fall back on
# some defaults worked out below.
publish_fields = publish_fields or dict()
# the shot will already be implied by the clip inside Flame (the clip
# file which we are updating is a per-shot file. But if the context
# contains a task or a step, we can display that:
if context.task:
name += "%s, " % context.task["name"].capitalize()
elif context.step:
name += "%s, " % context.step["name"].capitalize()
# If we have a channel set for the write node or a name for the scene,
# add those. If we don't have a name from the template fields, then we
# fall back on the file sequence's basename without the extension or
# frame number on the end (if possible).
default_name, _ = os.path.splitext(
os.path.basename(item.properties["sequence_paths"][0])
)
# Strips numbers off the end of the file name, plus any underscore or
# . characters right before it.
#
# foo.1234 -> foo
# foo1234 -> foo
# foo_1234 -> foo
default_name = re.sub(r"[._]*\d+$", "", default_name)
rp_name = publish_fields.get("name", default_name,)
rp_channel = publish_fields.get("channel")
if rp_name and rp_channel:
name += "%s.nk (output %s), " % (rp_name, rp_channel)
elif not rp_name:
name += "Nuke output %s, " % rp_channel
elif not rp_channel:
name += "%s.nk, " % rp_name
else:
name += "Nuke, "
# Do our best to get a usable version number. If we have data extracted
# using a template, we use that. If we don't, then we can look to see
# if this publish item came with a clip PublishedFile, in which case
# we use the version_number field from that entity +1, as a new version
# of that published clip will be created as part of this update process,
# and that is what we want to associate ourselves with here.
version = publish_fields.get("version")
if version is None and "flame_clip_publish" in item.properties:
version = item.properties["flame_clip_publish"]["version_number"] + 1
version = version or 0
name += "v%03d" % version
return name | 847956c6897a873145c78adbcf6530f0a47a9259 | 3,651,192 |
def f(q):
"""Constraint map for the origami."""
return 0.5 * (np.array([
q[0] ** 2,
(q[1] - q[0]) ** 2 + q[2] ** 2 + q[3] ** 2,
(q[4] - q[1]) ** 2 + (q[5] - q[2]) ** 2 + (q[6] - q[3]) ** 2,
q[4] ** 2 + q[5] ** 2 + q[6] ** 2,
q[7] ** 2 + q[8] ** 2 + q[9] ** 2,
(q[7] - q[1]) ** 2 + (q[8] - q[2]) ** 2 + (q[9] - q[3]) ** 2,
(q[7] - q[4]) ** 2 + (q[8] - q[5]) ** 2 + (q[9] - q[6]) ** 2,
q[10] ** 2 + q[11] ** 2,
(q[10] - q[0]) ** 2 + q[11] ** 2,
(q[10] - q[1]) ** 2 + (q[11] - q[2]) ** 2 + q[3] ** 2,
(q[10] - q[7]) ** 2 + (q[11] - q[8]) ** 2 + q[9] ** 2,
]) - lengths2) / (lengths) | 77c3617a76cb2e184b1f22404f1db8be8212a4c9 | 3,651,193 |
def resize(clip, newsize=None, height=None, width=None):
"""
Returns a video clip that is a resized version of the clip.
Parameters
------------
newsize:
Can be either
- ``(height,width)`` in pixels or a float representing
- A scaling factor, like 0.5
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if newsize != None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fun = lambda gf,t: (1.0*resizer((255 * gf(t))
.astype('uint8'),
newsize2(t))/255)
else:
fun = lambda gf,t: resizer(gf(t).astype('uint8'),
newsize2(t))
return clip.fl(fun, keep_duration=True, apply_to='mask')
else:
newsize = trans_newsize(newsize)
elif height != None:
newsize = [w * height / h, height]
elif width != None:
newsize = [width, h * width / w]
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'),
newsize)/255
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
return clip.fl_image(fl, apply_to='mask') | 5a8541e1320d37bd47aa35978794d849af358cb6 | 3,651,194 |
def calc_rt_pytmm(pol, omega, kx, n, d):
"""API-compatible wrapper around pytmm
"""
vec_omega = omega.numpy()
vec_lambda = C0/vec_omega*2*np.pi
vec_n = n.numpy()
vec_d = d.numpy()
vec_d = np.append(np.inf, vec_d)
vec_d = np.append(vec_d, np.inf)
vec_kx = kx.numpy().reshape([-1,1])
vec_k0 = 2 * np.pi / vec_lambda.reshape([1,-1])
vec_theta = np.arcsin(vec_kx / vec_k0)
r = np.zeros((len(kx), len(omega)), dtype=np.complex64)
t = np.zeros((len(kx), len(omega)), dtype=np.complex64)
for i, theta in enumerate(vec_theta):
for j, lam in enumerate(vec_lambda):
out = coh_tmm(pol, vec_n, vec_d, theta[j], lam)
r[i, j] = out['r']
t[i, j] = out['t']
t = tf.constant(t)
r = tf.constant(r)
return tf.constant(t), tf.constant(r) | def2fb22d2e72a873794838601bc74a7c65cb9c3 | 3,651,195 |
def statistic_bbox(dic, dic_im):
""" Statistic number of bbox of seed and image-level data for each class
Parameters
----------
dic: seed roidb dictionary
dic_im: image-level roidb dictionary
Returns
-------
num_bbox: list for number of 20 class's bbox
num_bbox_im: list for number of 20 class's bbox
"""
num_bbox = [0] * 20
num_bbox_im = [0] * 20
for d in dic:
for c in d['gt_classes']:
num_bbox[c-1] += 1
for d in dic_im:
for c in d['gt_classes']:
num_bbox_im[c-1] += 1
print("Statistic for seed data bbox: ", num_bbox)
print("Statistic for image-level data bbox: ", num_bbox_im)
return num_bbox, num_bbox_im | 782314baeab7fbec36c9ea56bcec57d5a508a918 | 3,651,196 |
def github_youtube_config_files():
"""
Function that returns a list of pyGithub files with youtube config channel data
Returns:
A list of pyGithub contentFile objects
"""
if settings.GITHUB_ACCESS_TOKEN:
github_client = github.Github(settings.GITHUB_ACCESS_TOKEN)
else:
github_client = github.Github()
repo = github_client.get_repo(CONFIG_FILE_REPO)
return repo.get_contents(CONFIG_FILE_FOLDER, ref=settings.OPEN_VIDEO_DATA_BRANCH) | 166ca3653173feee7513097c9313ebb5ab3b4d17 | 3,651,197 |
def reverse_uint(uint,num_bits=None):
"""
This function takes an unsigned integer and reverses all of its bits.
num_bits is number of bits to assume are present in the unsigned integer.
If num_bits is not specified, the minimum number of bits needed to represent the unsigned integer is assumed.
If num_bits is specified, it must be greater than the minimum number of bits needed to represent the unsigned integer.
>>> reverse_uint(3,8)
192
>>> bin(192)
'0b11000000'
"""
if not isinstance(uint,int):
raise Exception('input must be an integer, not %s' % repr(type(uint)))
if uint < 0:
raise Exception('input must be non-negative: %s' % repr(uint))
if min_bits_uint(uint) > num_bits:
raise Exception('Input uint must be storable in at most num_bits (%d) number of bits, but requires %d bits' % (num_bits,min_bits_uint(uint)))
result = 0
extracted_bits = 0
while (num_bits is not None and extracted_bits < num_bits) or uint != 0:
uint,rem = divmod(uint,2)
result = (result<<1) | rem
extracted_bits += 1
return result | a3197aa3f199a5677a15e053c0455c0216d07827 | 3,651,198 |
def min_by_tail(lhs, ctx):
"""Element ↓
(any) -> min(a, key=lambda x: x[-1])
"""
lhs = iterable(lhs, ctx=ctx)
if len(lhs) == 0:
return []
else:
return min_by(lhs, key=tail, cmp=less_than, ctx=ctx) | 88fce303e6ff95f89e57ebd05c575810238497ea | 3,651,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.