content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores | 0e7217ec3e36a361a6747729543cd694912a2874 | 3,657,400 |
import json
def single_request(gh,kname='CVE exploit',page=1,per_page=50):
"""
解析单页仓库数据,获取CVE和exp标记
:return cve_list:list, cve id in each page by searching github.com
"""
cve=dict()
url="https://api.github.com/search/repositories?q={key_name}&sort=updated&order=desc&page={page}&per_page={per_page}".format(key_name=kname,page=page,per_page=per_page)
r=gh.call_to_the_api(url)
if r:
content=r.text
js=json.loads(content)
items=js['items']
total_count=js['total_count']
cve_add=single_parser(gh,items)
if cve_add:
cve={**cve,**cve_add}
return total_count,cve
else:
return False,False | 5fdd3fe28f0e973fb9d854e20b8ce77ed109d3c6 | 3,657,401 |
def ownerOf(tokenId: bytes) -> UInt160:
"""
Get the owner of the specified token.
The parameter tokenId SHOULD be a valid NFT. If not, this method SHOULD throw an exception.
:param tokenId: the token for which to check the ownership
:type tokenId: ByteString
:return: the owner of the specified token.
:raise AssertionError: raised if `tokenId` is not a valid NFT.
"""
owner = get_owner_of(tokenId)
debug(['ownerOf: ', owner])
return owner | 68f164a377f59614f6c1287b97a53ca14937800f | 3,657,402 |
def stuff_context(sites, rup, dists):
"""
Function to fill a rupture context with the contents of all of the
other contexts.
Args:
sites (SiteCollection): A SiteCollection object.
rup (RuptureContext): A RuptureContext object.
dists (DistanceContext): A DistanceContext object.
Returns:
RuptureContext: A new RuptureContext whose attributes are all of
the elements of the three inputs.
"""
ctx = RuptureContext()
for name in [name for name in vars(sites) if not name.startswith("__")]:
setattr(ctx, name, getattr(sites, name))
for name in [name for name in vars(rup) if not name.startswith("__")]:
setattr(ctx, name, getattr(rup, name))
for name in [name for name in vars(dists) if not name.startswith("__")]:
setattr(ctx, name, getattr(dists, name))
return ctx | 9c197a41414a875942a6df22c03899c3e936967f | 3,657,403 |
def number_to_float(value):
"""The INDI spec allows a number of different number formats, given any, this returns a float
:param value: A number string of a float, integer or sexagesimal
:type value: String
:return: The number as a float
:rtype: Float
"""
# negative is True, if the value is negative
negative = value.startswith("-")
if negative:
value = value.lstrip("-")
# Is the number provided in sexagesimal form?
if value == "":
parts = [0, 0, 0]
elif " " in value:
parts = value.split(" ")
elif ":" in value:
parts = value.split(":")
elif ";" in value:
parts = value.split(";")
else:
# not sexagesimal
parts = [value, "0", "0"]
# Any missing parts should have zero
if len(parts) == 2:
# assume seconds are missing, set to zero
parts.append("0")
assert len(parts) == 3
number_strings = list(x if x else "0" for x in parts)
# convert strings to integers or floats
number_list = []
for part in number_strings:
try:
num = int(part)
except ValueError:
num = float(part)
number_list.append(num)
floatvalue = number_list[0] + (number_list[1]/60) + (number_list[2]/360)
if negative:
floatvalue = -1 * floatvalue
return floatvalue | 8b754a32848b3e697e0f82dbee4a1c35c560f1be | 3,657,404 |
def spg_line_search_step_length(current_step_length, delta, f_old, f_new,
sigma_one=0.1, sigma_two=0.9):
"""Return next step length for line search."""
step_length_tmp = (-0.5 * current_step_length ** 2 * delta /
(f_new - f_old - current_step_length * delta))
next_step_length = 0
if sigma_one <= step_length_tmp <= sigma_two * current_step_length:
next_step_length = step_length_tmp
else:
next_step_length = 0.5 * current_step_length
return next_step_length | 844cccdfe1ec3f9c2c287384284ceb2ac3530e8e | 3,657,405 |
def group_by_iter(metrics_dict):
"""
Restructure our metrics dictionary to have the last list store all the trials' values \
for a given iteration, instead of all the iterations' values for a given trial.
:param metrics_dict: data for an experiment (output of parse_party_data)
:type metrics_dict: `dict[list[list[np.array]]]`
:return: A new, reorganized dict
:rtype: `dict[list[list[np.array]]]`
"""
# TODO: more pythonic, pandas-thonic, or numpy-thonic way of doing this?
metrics_gbi = {}
# look into the metrics...
for (metric_key, metric_llist) in metrics_dict.items():
metrics_gbi[metric_key] = []
# ... for each party...
for (party_idx, metric_for_party) in enumerate(metric_llist):
metrics_gbi[metric_key] += [[]]
# ... for each trial...
for metric_for_trial in metric_for_party:
# ... and finally for each iter.
for (iter_idx, iter_val) in enumerate(metric_for_trial):
if len(metrics_gbi[metric_key][party_idx]) <= iter_idx:
metrics_gbi[metric_key][party_idx] += [[]]
metrics_gbi[metric_key][party_idx][iter_idx] += [iter_val]
return metrics_gbi | 469fb0df4d7acb63a982c0aab629031dbb345be6 | 3,657,406 |
def calc_pv_invest(area, kw_to_area=0.125, method='EuPD'):
"""
Calculate PV investment cost in Euro
Parameters
----------
area : float
Photovoltaic area
kw_to_area : float , optional
Ratio of peak power to area (default: 0.125)
For instance, 0.125 means 0.125 kWp / m2 area
(http://www.solaranlagen-portal.com/photovoltaik/leistung)
method : str, optional
Method to calculate cost (default: 'EuPD')
Options:
- 'sap':
Based on: Solaranlagenportal
http://www.solaranlagen-portal.com/photovoltaik/kosten
- 'EuPD':
Based on: EuPD Research, Photovoltaik-Preismonitor Deutschland: German PV
ModulePriceMonitor.
Returns
-------
pv_invest : float
Investcost into PV system in Euro
"""
assert method in ['sap', 'EuPD'], 'Unknown method'
assert area > 0, 'Area has to be larger than zero.'
assert kw_to_area > 0, 'kWp / area ratio has to be larger than zero.'
if method == 'sap':
kw_peak = area * kw_to_area # kW peak load
# kw_peak * (spec_price + spec_install_cost) + inverter cost
pv_invest = kw_peak * (1100 + 120) + 2000
if method == 'EuPD':
kw_peak = area * kw_to_area # kW peak load
# kw_peak * (spec_cost) + inverter cost
pv_invest = kw_peak * 1400 + 2000
return pv_invest | 2de9ee05580bc9d41522272a06cd97aaf3f5bc55 | 3,657,407 |
def procrustes_2d(x, y, n_restart=10, scale=True):
"""Align two sets of coordinates using an affine transformation.
Attempts to find the affine transformation (composed of a rotation
matrix `r` and a transformation vector `t`) for `y` such that
`y_affine` closely matches `x`. Closeness is measures using MSE.
y_affine = np.matmul(y, r) + t
This algorithm only works with 2D coordinates (i.e., n_dim=2).
Arguments:
x: The first set of points.
shape = (n_point, n_dim)
y: The second set of points.
shape = (n_point, n_dim)
n_restart (optional): A scalar indicating the number of
restarts for the optimization routine.
scale (optional): Boolean indicating if scaling is permitted
in the affine transformation.
Returns:
r: A rotation matrix.
shape=(n_dim, n_dim)
t: A transformation vector.
shape=(1, n_dim)
"""
n_dim = 2
def assemble_r_t(params):
# Assemble valid rotation matrix.
s = params[3] * np.eye(n_dim)
r = rotation_matrix(params[2])
r = np.matmul(s, r)
f = np.array([[np.sign(params[4]), 0], [0, np.sign(params[5])]])
r = np.matmul(f, r)
# Assemble translation vector.
t = np.array([params[0], params[1]])
t = np.expand_dims(t, axis=0)
return r, t
# In order to avoid impossible rotation matrices, perform optimization
# on rotation components separately (theta, scaling, mirror).
def objective_fn(params, x, y):
r, t = assemble_r_t(params)
# Apply affine transformation.
y_affine = np.matmul(y, r) + t
# loss = np.mean(np.sum((x - y_affine)**2, axis=1)) TODO
# Loss is defined as MAE, since MSE chases outliers and can result
# in rediculous solutions.
loss = np.mean(np.sum(np.abs(x - y_affine), axis=1))
return loss
# t_0, t_1, theta, scaling, flip
params_best = np.array((0., 0., 0., 1.))
loss_best = np.inf
for _ in range(n_restart):
(x0, y0) = np.random.rand(2) - .5
theta0 = 2 * np.pi * np.random.rand(1)
if scale:
s0 = np.random.rand(1) + .5
s_bnds = (0., None)
else:
s0 = 1
s_bnds = (1., 1.)
# Perform a flip on some restarts.
if np.random.rand(1) < .5:
fx0 = -.1
else:
fx0 = .1
if np.random.rand(1) < .5:
fy0 = -.1
else:
fy0 = .1
params0 = np.array((x0, y0, theta0, s0, fx0, fy0))
bnds = (
(None, None),
(None, None),
(0., 2*np.pi),
s_bnds,
(-.1, .1),
(-.1, .1)
)
res = minimize(objective_fn, params0, args=(x, y), bounds=bnds)
params_candidate = res.x
loss_candidate = res.fun
if loss_candidate < loss_best:
loss_best = loss_candidate
params_best = params_candidate
r, t = assemble_r_t(params_best)
return r, t | f315f475af58419ac95896e8ce25f68f7197294d | 3,657,408 |
def samps2ms(samples: float, sr: int) -> float:
"""samples to milliseconds given a sampling rate"""
return (samples / sr) * 1000.0 | 49e07ee02984bf0e9a0a54715ef6b6e5a3c87798 | 3,657,409 |
def nice_year(dt, lang=None, bc=False):
"""Format a datetime to a pronounceable year.
For example, generate 'nineteen-hundred and eighty-four' for year 1984
Args:
dt (datetime): date to format (assumes already in local timezone)
lang (string): the language to use, use Mycroft default language if
not provided
bc (bool) pust B.C. after the year (python does not support dates
B.C. in datetime)
Returns:
(str): The formatted year string
"""
return lingua_franca.format.nice_year(dt, lang, bc) | 641195195023ecca030f6cd8d12ff9a3fc9c989c | 3,657,410 |
def get_results(job_id):
"""
Get the result of the job based on its id
"""
try:
job = Job.fetch(job_id, connection=conn)
if job.is_finished:
return jsonify({
"status": "finished",
"data": job.result
}), 200
elif job.is_failed:
return jsonify({
"status": "failed"
}), 200
else:
return jsonify({
"status": "in-progress"
}), 200
except NoSuchJobError:
return jsonify({
"msg": "job id does not exist"
}), 404 | ada9042cd4d7961415ec274a68631f6e9af81fad | 3,657,411 |
def get_clean_dict(obj: HikaruBase) -> dict:
"""
Turns an instance of a HikaruBase into a dict without values of None
This function returns a Python dict object that represents the hierarchy
of objects starting at ``obj`` and recusing into any nested objects.
The returned dict **does not** include any key/value pairs where the value
of the key is None or empty.
If you wish to instead have a dict with all key/value pairs even when
there is no useful value then you should use the dataclasses module's
``asdict()`` function on obj.
:param obj: some api_version_group of subclass of HikaruBase
:return: a dict representation of the obj instance, but if any value
in the dict was originally None, that key:value is removed from the
returned dict, hence it is a minimal representation
:raises TypeError: if the supplied obj is not a HikaruBase (dataclass),
or if obj is not an instance of a HikaruBase subclass
"""
if not isinstance(obj, HikaruBase):
raise TypeError("obj must be a kind of HikaruBase")
initial_dict = asdict(obj)
clean_dict = _clean_dict(initial_dict)
return clean_dict | 3daca47b6d8c42fca8856221f39b635791eb0fce | 3,657,412 |
def generate_html_frieze(type, value):
"""
Gets the data to be able to generate the frieze.
Calls the function to actually generate HTML.
Input:
- Type (session or dataset) of the second input
- A SQLAlchemy DB session or a dataset (list of mappings)
Output:
- The HTML to be displayed
"""
if type == "session":
session = value
mappings = list(get_all_mappings(session))
elif type == "dataset":
mappings = value
holes_raw = calc_all_holes("dataset", mappings)
holes = []
for hole in holes_raw:
holes.append(
{
"devices_id": -1000,
"id": -1000,
"iova": None,
"phys_addr": hole[0],
"size": hole[1],
}
)
for hole in holes:
hole["devices_id"] = -1
try:
mappings = add_device_info(mappings, session)
except:
session = create_session()
mappings = add_device_info(mappings, session)
mappings_as_dict = []
for m in mappings:
mappings_as_dict.append(m.__dict__)
memory_state = sorted(
mappings_as_dict + holes, key=lambda mapping: mapping["phys_addr"]
)
memory_state = unify_common_space(memory_state)
html_frieze = create_html_from_memory_state(memory_state, session)
return html_frieze | ddf914d9d710e60af48a6dc687a9e3961ab0cf94 | 3,657,413 |
import os
def get_folder_name(path, prefix=''):
"""
Look at the current path and change the name of the experiment
if it is repeated
Args:
path (string): folder path
prefix (string): prefix to add
Returns:
string: unique path to save the experiment
"""
if prefix == '':
prefix = path.split('/')[-1]
path = '/'.join(path.split('/')[:-1])
folders = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
if prefix not in folders:
path = os.path.join(path, prefix)
elif not os.path.isdir(os.path.join(path, '{}_0'.format(prefix))):
path = os.path.join(path, '{}_0'.format(prefix))
else:
n = sorted([int(f.split('_')[-1]) for f in folders if '_' in f[-2:]])[-1]
path = os.path.join(path, '{}_{}'.format(prefix, n+1))
return path | 126648fbe460581272eedafc8599a3af1ded07e4 | 3,657,414 |
from typing import Optional
import re
def instantiate_model(model_to_train: str,
dataset_directory: str,
performance_directory: str,
gpu: Optional[bool] = None):
"""
A function to create the instance of the imported Class,
Classifier.
Args:
model_to_train (str): name of the pretrained model to train
dataset directory (str): Directory containing the data
performance directory (str): The directory where the generated text, checkpoints
model_stats will be saved.
gpu (bool): Boolean indicating availability of a GPU
Returns:
None.
"""
file = get_latest_exp(performance_directory)
if file is not None:
filename = re.findall('\\\\([^\\\\]+)\.txt', file)
exp_no = int((re.findall('_([0-9]+)', filename[0]))[0])
exp_no += 1
else:
exp_no = 1
Model = Classifier(exp_no, model_to_train, dataset_directory, performance_directory, gpu=gpu)
return Model | 8053053b5e77f1c74404826e7335b05bece8b99f | 3,657,415 |
def generate_hmac_key():
"""
Generates a key for use in the :func:`~securitylib.advanced_crypto.hmac` function.
:returns: :class:`str` -- The generated key, in byte string.
"""
return generate_secret_key(HMAC_KEY_MINIMUM_LENGTH) | 877cf9fbe56b6715f1744839ce83ac1abf9d7da8 | 3,657,416 |
import argparse
def get_args():
"""! Command line parser for Utterance level classification Leave
one speaker out schema pipeline -- Find Best Models"""
parser = argparse.ArgumentParser(
description='Utterance level classification Leave one '
'speaker out schema pipeline -- Find Best Models' )
parser.add_argument('-i', '--input_features_paths', nargs='+',
help='File paths of the features you want to '
'concatenate and the classify')
args = parser.parse_args()
return args | 4a349da4fe3b570dae359937ed80494075bf26ea | 3,657,417 |
def uscensus(location, **kwargs):
"""US Census Provider
Params
------
:param location: Your search location you want geocoded.
:param benchmark: (default=4) Use the following:
> Public_AR_Current or 4
> Public_AR_ACSYYYY or 8
> Public_AR_Census2010 or 9
:param vintage: (default=4) Use the following:
> Current_Current or 4
> Census2010_Current or 410
> ACS2013_Current or 413
> ACS2014_Current or 414
> ACS2015_Current or 415
> Current_ACS2015 or 8
> Census2010_ACS2015 or 810
> ACS2013_ACS2015 or 813
> ACS2014_ACS2015 or 814
> ACS2015_ACS2015 or 815
> Census2010_Census2010 or 910
> Census2000_Census2010 or 900
:param method: (default=geocode) Use the following:
> geocode
> reverse
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
return get(location, provider='uscensus', **kwargs) | bd73acb87f27e3f14d0b1e22ebd06b91fcec9d85 | 3,657,418 |
def getAllItemsWithName(name, cataloglist):
"""Searches the catalogs in a list for all items matching a given name.
Returns:
list of pkginfo items; sorted with newest version first. No precedence
is given to catalog order.
"""
def compare_item_versions(a, b):
"""Internal comparison function for use with sorting"""
return cmp(munkicommon.MunkiLooseVersion(b['version']),
munkicommon.MunkiLooseVersion(a['version']))
itemlist = []
# we'll throw away any included version info
name = nameAndVersion(name)[0]
munkicommon.display_debug1('Looking for all items matching: %s...' % name)
for catalogname in cataloglist:
if not catalogname in CATALOG.keys():
# in case catalogname refers to a non-existent catalog...
continue
# is name in the catalog name table?
if name in CATALOG[catalogname]['named']:
versionsmatchingname = CATALOG[catalogname]['named'][name]
for vers in versionsmatchingname.keys():
if vers != 'latest':
indexlist = CATALOG[catalogname]['named'][name][vers]
for index in indexlist:
thisitem = CATALOG[catalogname]['items'][index]
if not thisitem in itemlist:
munkicommon.display_debug1(
'Adding item %s, version %s from catalog %s...' %
(name, thisitem['version'], catalogname))
itemlist.append(thisitem)
if itemlist:
# sort so latest version is first
itemlist.sort(compare_item_versions)
return itemlist | 0babcd8363918d15d835fb67a37600b960adb942 | 3,657,419 |
def reco_source_position_sky(cog_x, cog_y, disp_dx, disp_dy, focal_length, pointing_alt, pointing_az):
"""
Compute the reconstructed source position in the sky
Parameters
----------
cog_x: `astropy.units.Quantity`
cog_y: `astropy.units.Quantity`
disp: DispContainer
focal_length: `astropy.units.Quantity`
pointing_alt: `astropy.units.Quantity`
pointing_az: `astropy.units.Quantity`
Returns
-------
"""
src_x, src_y = disp_to_pos(disp_dx, disp_dy, cog_x, cog_y)
return camera_to_sky(src_x, src_y, focal_length, pointing_alt, pointing_az) | 14b7fee325bc8a571a13d257f046cd0e7bf838db | 3,657,420 |
def segment_annotations(table, num, length, step=None):
""" Generate a segmented annotation table by stepping across the audio files, using a fixed
step size (step) and fixed selection window size (length).
Args:
table: pandas DataFrame
Annotation table.
num: int
Number of segments
length: float
Selection length in seconds.
step: float
Selection step size in seconds. If None, the step size is set
equal to the selection length.
Returns:
df: pandas DataFrame
Annotations table
"""
if step is None:
step = length
segs = []
for n in range(num):
# select annotations that overlap with segment
t1 = n * step
t2 = t1 + length
a = table[(table.start < t2) & (table.end > t1)].copy()
if len(a) > 0:
# shift and crop annotations
a['start'] = a['start'].apply(lambda x: max(0, x - t1))
a['end'] = a['end'].apply(lambda x: min(length, x - t1))
a['sel_id'] = n #map to segment
segs.append(a)
df = pd.concat(segs)
df.set_index(keys=['sel_id'], inplace=True, append=True)
df = df.swaplevel()
df = df.sort_index()
return df | 4b1bb8298113b43716fcd5f7d2a27b244f63829c | 3,657,421 |
def get_vdw_style(vdw_styles, cut_styles, cutoffs):
"""Get the VDW_Style section of the input file
Parameters
----------
vdw_styles : list
list of vdw_style for each box, one entry per box
cut_styles : list
list of cutoff_style for each box, one entry per box. For a
box with vdw_style == 'none', the cutoff style is None
cutoffs : list
list with cutoffs for each box, one entry per box For a
box with vdw_style == 'none', the cutoff is None
"""
assert len(vdw_styles) == len(cut_styles)
assert len(vdw_styles) == len(cutoffs)
valid_vdw_styles = ["lj", "none"]
valid_cut_styles = {vstyle: [] for vstyle in valid_vdw_styles}
valid_cut_styles["lj"].append("cut")
valid_cut_styles["lj"].append("cut_tail")
valid_cut_styles["lj"].append("cut_switch")
valid_cut_styles["lj"].append("cut_shift")
valid_cut_styles["none"].append(None)
for vdw_style in vdw_styles:
if vdw_style not in valid_vdw_styles:
raise ValueError(
"Unsupported vdw_style: {}. Supported options "
"include {}".format(vdw_style, vdw_styles)
)
for cut_style, vdw_style in zip(cut_styles, vdw_styles):
if cut_style not in valid_cut_styles[vdw_style]:
raise ValueError(
"Unsupported cutoff style: {}. Supported "
"options for the selected vdw_style ({}) include "
"{}".format(cut_style, vdw_style, valid_cut_styles[vdw_style])
)
for cut_style, cutoff in zip(cut_styles, cutoffs):
if cut_style == "cut_switch":
if not isinstance(cutoff, np.ndarray) or len(cutoff) != 2:
raise ValueError(
'Style "cut_switch" requires an inner '
"and outer cutoff. Use the "
"cutoffs=[inner_cut,outer_cut] "
"kwargs option."
)
inp_data = """
# VDW_Style"""
for vdw_style, cut_style, cutoff in zip(vdw_styles, cut_styles, cutoffs):
if vdw_style == "none":
inp_data += """
{vdw_style}""".format(
vdw_style=vdw_style
)
else:
if cut_style == "cut_switch":
inner_cutoff = cutoff[0]
outer_cutoff = cutoff[1]
inp_data += """
{vdw_style} {cut_style} {inner_cutoff} {outer_cutoff}""".format(
vdw_style=vdw_style,
cut_style=cut_style,
inner_cutoff=inner_cutoff,
outer_cutoff=outer_cutoff,
)
else:
inp_data += """
{vdw_style} {cut_style} {cutoff}""".format(
vdw_style=vdw_style, cut_style=cut_style, cutoff=cutoff
)
inp_data += """
!------------------------------------------------------------------------------
"""
return inp_data | 5cd0825d73e11c4fcb8ecce0526493414842697c | 3,657,422 |
import subprocess
def load_yaml_file(file):
"""
Loads a yaml file from file system.
@param file Path to file to be loaded.
"""
try:
with open(file, 'r') as yaml:
kwargs=ruamel.yaml.round_trip_load(yaml, preserve_quotes=True)
return kwargs
except subprocess.CalledProcessError as e:
print("error")
return(e.output.decode("utf-8")) | 7fe74fefcb5dd7068bc0040528670c68e926eaf5 | 3,657,423 |
def freduce(x, axis=None):
"""
Reduces a spectrum to positive frequencies only
Works on the last dimension (contiguous in c-stored array)
:param x: numpy.ndarray
:param axis: axis along which to perform reduction (last axis by default)
:return: numpy.ndarray
"""
if axis is None:
axis = x.ndim - 1
siz = list(x.shape)
siz[axis] = int(np.floor(siz[axis] / 2 + 1))
return np.take(x, np.arange(0, siz[axis]), axis=axis) | 8d13e66a18ef950422af49a68012605cf0d03947 | 3,657,424 |
import os
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
"""load params of pretrained model, NOT including moment, learning_rate"""
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def _existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=_existed_params)
print("Load pretraining parameters from {}.".format(
pretraining_params_path)) | 16d9a3fc2c73613047348f5b6234f5150ca9ef6c | 3,657,425 |
import json
def sort_shipping_methods(request):
"""Sorts shipping methods after drag 'n drop.
"""
shipping_methods = request.POST.get("objs", "").split('&')
assert (isinstance(shipping_methods, list))
if len(shipping_methods) > 0:
priority = 10
for sm_str in shipping_methods:
sm_id = sm_str.split('=')[1]
sm_obj = ShippingMethod.objects.get(pk=sm_id)
sm_obj.priority = priority
sm_obj.save()
priority = priority + 10
result = json.dumps({
"message": _(u"The shipping methods have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | 307ecef020ac296982a7006ce1392cb807461546 | 3,657,426 |
def appendRecordData(record_df, record):
"""
Args:
record_df (pd.DataFrame):
record (vcf.model._Record):
Returns:
(pd.DataFrame): record_df with an additional row of record (SNP) data.
"""
# Alternate allele bases
if len(record.ALT) == 0:
alt0, alt1 = np.nan, np.nan
elif len(record.ALT) == 1:
alt0, alt1 = record.ALT[0], np.nan
varIdentifier = pd.Series(record.ID, name="varIdentifier")
df = pd.DataFrame(
data = {"refBase": record.REF, "altAllele0": alt0,
"altAllele1": alt1},
index = varIdentifier)
record_df = record_df.append(df, ignore_index=False)
return record_df | 0904b317e1925743ed9449e1fcb53aaafa2ffc81 | 3,657,427 |
def get_removed_channels_from_file(fn):
"""
Load a list of removed channels from a file.
Raises
------
* NotImplementedError if the file format isn't supported.
Parameters
----------
fn : str
Filename
Returns
-------
to_remove : list of str
List of channels to remove.
"""
assert isinstance(fn, str)
if fn.endswith('.mat'):
# try:
data = loadmat(fn)
# except: for old .mat files in hdf5 format...
assert('CHANNAMES' in data), f"{fn} must contain CHANNAMES!"
assert('CHANACTIVE' in data), f"{fn} must contain CHANACTIVE!"
channel_active = data['CHANACTIVE'].flatten()
channel_names = np.array(
[str(i[0]) for i in data['CHANNAMES'].flatten()],
)
idx = np.argwhere(channel_active == 0).flatten()
return channel_names[idx].tolist()
else:
raise NotImplementedError(f"Cannot load file: {fn}") | ac3cbeb83c7f1305adf343ce26be3f70f8ae48e8 | 3,657,428 |
def invertHomogeneous(M, range_space_homogeneous=False, A_property=None):
""" Return the inverse transformation of a homogeneous matrix.
A homogenous matrix :math:`M` represents the transformation :math:`y = A x + b`
in homogeneous coordinates. More precisely,
..math:
M \tilde{x} = \left[ \begin{matrix}
A & b \\
\end{matrix} \right]
\left[ \begin{matrix}
x \\
1
\end{matrix} \right]
Its inverse is the homogeneous matrix that represents the transformation
:math:`x = A^{-1} ( y - b )`.
Parameters
----------
M : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1)
Matrix representing an affine transformation in homogeneous coordinates.
if ``M.shape == (num_dims + 1, num_dims + 1)``, its last row is :math:`[0 1]`
so that its output is also in homogeneous coordinates.
range_space_homogeneous : bool, optional
If True, the output has an extra row :math:`[ 0 1 ]` appended to the bottom
so that its range space is also expressed in homogeneous coordinates.
A_property : {'diag', 'ortho'}, optional
Special property of the submatrix `A` that could make inversion easier.
If no argument is given, this function just calls `m.np.linalg.pinv`.
Returns
-------
M_inverse : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1)
Inverse transformation corresponding to input `M`.
"""
if A_property is None:
invert = m.np.pinv
elif A_property == 'diag':
def invert(x):
return m.np.diag(1 / m.np.diag(A))
elif A_property == 'ortho':
invert = m.np.transpose
else:
err_str = f"Can't parse keyword argument 'A_property={A_property}'"
raise ValueError(err_str)
A, b = fromHomogeneous(M)
A_inverse = invert(A)
b_inverse = -A_inverse @ b
M_inverse = homogeneousMatrix(
A_inverse, b_inverse,
range_space_homogeneous=range_space_homogeneous
)
return M_inverse | ea9039c935c82686291145652f762eb79404e417 | 3,657,429 |
import string
import os
def rename(level_folder: str) -> int:
"""Rename a custom level folder to the correct name."""
prefix = load_info(level_folder)[PREFIX].strip()
suffix = load_info(level_folder)[SUFFIX].strip()
prefix = prefix.translate(str.maketrans('', '', string.punctuation))
suffix = suffix.translate(str.maketrans('', '', string.punctuation))
new_name = f'{prefix} {CONNECT} {suffix}'.strip()
if new_name != level_folder or FORCED:
os.rename(MAIN_FOLDER + f'/{level_folder}',
MAIN_FOLDER + f'/{new_name}')
print(f"'{level_folder}' is renamed to '{new_name}'.")
return 1
return 0 | 0a35edefd9e27fdbdf1d758142d3cfe8b8907bd4 | 3,657,430 |
import requests
def show_department(department_id):
"""
Returns rendered template to show department with its employees.
:param department_id: department id
:return: rendered template to show department with its employees
"""
url = f'{HOST}api/department/{department_id}'
department = requests.get(url).json()
return render_template('department.html', department=department) | 170318ea40a4f7355fab77f2aeaaad682b9fab2f | 3,657,431 |
def archive_scan():
"""
Returns converted to a dictionary of functions to apply to parameters of archive_scan.py
"""
# Dictionary of default values setter, type converters and other applied functions
d_applied_functions = {
'favor': [bool_converter, favor_default],
'cnn': [bool_converter],
'gpd': [bool_converter],
'model-name': [apply_default_model_name],
'weights': [apply_default_weights],
'features-number': [int_converter],
'waveform-duration': [float_converter],
'start': [utc_datetime_converter, start_date_default],
'end': [utc_datetime_converter, end_date_default],
'database': [database_filler],
'threshold': [threshold_converter],
'batch-size': [int_converter],
'frequency': [float_converter],
'trace-size': [float_converter, trace_size_converter],
'shift': [int_converter],
'generate-s-files': [string_trimmer],
'detections-for-event': [int_converter],
'generate-waveforms': [string_trimmer],
'register-events': [string_trimmer],
'no-filter': [bool_converter],
'no-detrend': [bool_converter],
'trace-normalization': [bool_converter],
'wavetool-waveforms': [bool_converter],
'detection-stations': [bool_converter],
'plot-positives': [bool_converter],
'silence-wavetool': [bool_converter],
'plot-positives-original': [bool_converter],
'print-scores': [bool_converter],
'print-precision': [int_converter],
'combine-events-range': [float_converter],
'time': [bool_converter],
'cpu': [bool_converter],
'print-files': [bool_converter],
'channel-order': [channel_order_converter],
'print-params': [bool_converter],
}
return d_applied_functions | 71aa3d2c17e880a152529de09b0614dfd619e7da | 3,657,432 |
def esOperador(o):
""""retorna true si 'o' es un operador"""
return o == "+" or o == "-" or o == "/" or o == "*" | 7e1088b641dee7cad2594159c4a34cf979362458 | 3,657,433 |
def valid_identity(identity):
"""Determines whether or not the provided identity is a valid value."""
valid = (identity == "homer") or (identity == "sherlock")
return valid | 9865d19802b596d1d5fdce6ff8d236678da29ee6 | 3,657,434 |
def is_align_flow(*args):
"""
is_align_flow(ea) -> bool
"""
return _ida_nalt.is_align_flow(*args) | 40aa1fb7d86083bc3ace94c6913eb9b4b5ab200e | 3,657,435 |
import time
def avro_rdd(ctx, sqlContext, hdir, date=None, verbose=None):
"""
Parse avro-snappy files on HDFS
:returns: a Spark RDD object
"""
if date == None:
date = time.strftime("year=%Y/month=%-m/day=%-d", time.gmtime(time.time()-60*60*24))
path = '%s/%s' % (hdir, date)
elif len(str(date)) == 8: # YYYYMMDD
ddd = dt.strptime(str(date), "%Y%m%d")
date = time.strftime("year=%Y/month=%-m/day=%-d", ddd.utctimetuple())
path = '%s/%s' % (hdir, date)
else:
path = hdir
if date:
path = '%s/%s' % (hdir, date)
print("### hdir", path, type(path))
if isinstance(path, list):
afiles = path
else:
# get avro files from HDFS
afiles = avro_files(path, verbose=verbose)
print("### avro_files", afiles)
# define newAPIHadoopFile parameters, java classes
aformat="org.apache.avro.mapreduce.AvroKeyInputFormat"
akey="org.apache.avro.mapred.AvroKey"
awrite="org.apache.hadoop.io.NullWritable"
aconv="org.apache.spark.examples.pythonconverters.AvroWrapperToJavaConverter"
rdd = []
# load data from HDFS
if len(afiles) == 0:
rdd = ctx.emptyRDD()
else:
rdd = ctx.union([ctx.newAPIHadoopFile(f, aformat, akey, awrite, aconv) for f in afiles])
# the records are stored as [(dict, None), (dict, None)], therefore we take first element
# and assign them to new rdd
avro_rdd = rdd.map(lambda x: x[0])
records = avro_rdd.take(1) # take function will return list of records
if verbose:
print("### avro records", records, type(records))
return avro_rdd | caa923e4b6186e106a59764cbb61f908858acd70 | 3,657,436 |
import random
def generate_gesture_trace(position):
"""
生成手势验证码轨迹
:param position:
:return:
"""
x = []
y = []
for i in position:
x.append(int(i.split(',')[0]))
y.append(int(i.split(',')[1]))
trace_x = []
trace_y = []
for _ in range(0, 2):
tepx = [x[_], x[_ + 1], x[_ + 2]]
tepy = [y[_], y[_ + 1], y[_ + 2]]
[a, b, c] = get_func(tepx, tepy)
if _ == 0:
for i in range(x[0], x[1]):
trace_x.append(i)
trace_y.append(a * i * i + b * i + c)
for i in range(x[1], x[2]):
trace_x.append(i)
if random.randint(1, 5) == 1:
trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1] + random.randint(-1, 1))
else:
trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1])
else:
for i in range(x[2], x[3]):
trace_x.append(i)
trace_y.append(a * i * i + b * i + c)
trace_x = [int(i) for i in trace_x]
trace_y = [int(i) for i in trace_y]
last_trace_x = []
last_trace_y = []
plot_line(trace_x, trace_y, [0, 280], [0, 158])
xx = 0
while xx < len(trace_x) - 1:
last_trace_x.append(trace_x[xx])
last_trace_y.append(trace_y[xx])
xx += random.randint(1, 4)
last_trace_x.append(trace_x[-1])
last_trace_y.append(trace_y[-1])
timestamp_list = []
timestamp = random.randint(180, 220)
for i in range(len(last_trace_x)):
t = random.randint(5, 10)
timestamp += t
timestamp_list.append(timestamp)
i += 1
trace = [{
'p': ','.join([str(last_trace_x[0]), str(last_trace_y[0])]),
't': 1
}]
for i in range(len(last_trace_x)):
trace.append({
'p': ','.join([str(last_trace_x[i]), str(last_trace_y[i])]),
't': timestamp_list[i]
})
trace.append({
'p': ','.join([str(last_trace_x[-1]), str(last_trace_y[-1])]),
't': timestamp_list[-1] + random.randint(50, 100)
})
return x[3] - x[0], trace | 3281cf9e99175190e2855ac98593f67473703c77 | 3,657,437 |
import argparse
def parse_arguments():
"""parse_arguments"""
parser = argparse.ArgumentParser(description="MindSpore Tensorflow weight transfer")
parser.add_argument("--pretrained", default=None, type=str)
parser.add_argument("--name", default="imagenet22k", choices=["imagenet22k",])
args = parser.parse_args()
return args | 08b57fffe7f95a96f19e35839ca137f9382573ba | 3,657,438 |
def mad_daub_noise_est(x, c=0.6744):
""" Estimate the statistical dispersion of the noise with Median Absolute
Deviation on the first order detail coefficients of the 1d-Daubechies
wavelets transform.
"""
try:
_, cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=1)
except ValueError:
cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=0)
return mad(cD, c=c) | 3811d490e344cd4029e5b7f018823ad02c27e3dd | 3,657,439 |
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
From Django's "django/template/defaultfilters.py".
Copied from: https://github.com/django/django/blob/a6b3938afc0204093b5356ade2be30b461a698c5/django/utils/text.py#L394
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower()).strip()
return re.sub(r'[-\s]+', '-', value) | 3fc85ffec7faa3b4df2d1556dfd7b1d7c3e9920e | 3,657,440 |
import json
def get_categories() -> dict:
""" :return: dictionary with a hirachy of all categories """
with open("../src/categories.json", "r", encoding="utf-8") as f:
return json.load(f) | 90a442840550f3251137b2f9ff8fb5581d8d49e5 | 3,657,441 |
import os
def check_file_integrity(indir, outdir):
""" Parse file in dir and check integrity """
dic_files={}
dic_param={}
dic_integ={}
for f in os.listdir(indir):
path= os.path.join(indir, f)
#if os.path.isdir(path)==True:
# print (str(f) + "is a dir" )
#elif os.path.isfile(path):
if os.path.isfile(path):
#dic_param['size']=Path(path).stat().st_size
dic_param['size']=os.path.getsize(path)
md5hasher = FileHash('md5')
dic_param['md5']= md5hasher.hash_file(path)
dic_files[f]=dic_param
#print( f + " : It is a normal file")
#Reinitialize dict
dic_param={}
#else:
# print(f + "It is a special file (socket, FIFO, device file)" )
#print (dic_files)
return dic_files | 48a8bf3d506925b1b41408c3305002365b38413d | 3,657,442 |
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
### LOAD VALIDATION SET | 97acc81878076c030287840a0bbacccbde0e50a8 | 3,657,443 |
import argparse
def build_arg_parser():
"""
Builds an argparse object to handle command-line arguments passed in.
"""
parser = argparse.ArgumentParser(description="Loads an ontology file in " +
"OBO file format into a Neo4j graph database.")
parser.add_argument('-i', '--input_obo_file', required=True,
help="The input OBO file")
parser.add_argument('-s', '--neo4j_server_address', required=True,
help="The address to the Neo4j server. Must include port number")
parser.add_argument('-t', '--typedefs', default="is_a", help="Typedefs that" +
"that are present in this ontology. These will be used to define " +
"the types of relationships supported in the input ontology")
parser.add_argument('-r', '--root_node', required=True, action="append",
default=[], help='DOID\'s for any root nodes in this ontology')
args = parser.parse_args()
return args | 8b9da4a060138b5487f346452c9e3ca85e55b801 | 3,657,444 |
def createSynthModel():
"""Return the modeling mesh, the porosity distribution and the
parametric mesh for inversion.
"""
# Create the synthetic model
world = mt.createCircle(boundaryMarker=-1, segments=64)
tri = mt.createPolygon([[-0.8, -0], [-0.5, -0.7], [0.7, 0.5]],
isClosed=True, area=0.0015)
c1 = mt.createCircle(radius=0.2, pos=[-0.2, 0.5], segments=32,
area=0.0025, marker=3)
c2 = mt.createCircle(radius=0.2, pos=[0.32, -0.3], segments=32,
area=0.0025, marker=3)
poly = mt.mergePLC([world, tri, c1, c2])
poly.addRegionMarker([0.0, 0, 0], 1, area=0.0015)
poly.addRegionMarker([-0.9, 0, 0], 2, area=0.0015)
c = mt.createCircle(radius=0.99, segments=16, start=np.pi, end=np.pi*3)
[poly.createNode(p.pos(), -99) for p in c.nodes()]
mesh = pg.meshtools.createMesh(poly, q=34.4, smooth=[1, 10])
mesh.scale(1.0/5.0)
mesh.rotate([0., 0., 3.1415/3])
mesh.rotate([0., 0., 3.1415])
petro = pg.solver.parseArgToArray([[1, 0.9], [2, 0.6], [3, 0.3]],
mesh.cellCount(), mesh)
# Create the parametric mesh that only reflect the domain geometry
world = mt.createCircle(boundaryMarker=-1, segments=32, area=0.0051)
paraMesh = pg.meshtools.createMesh(world, q=34.0, smooth=[1, 10])
paraMesh.scale(1.0/5.0)
return mesh, paraMesh, petro | aa63ce6c8b633530efb17add4d902da30c62689c | 3,657,445 |
def edits_dir():
"""
Return the directory for the editable files (used by the
website).
"""
return _mkifnotexists("") | eb882c04e3269496a610103908453a73e4a7ae5f | 3,657,446 |
def get_expected(stage, test_block_config, sessions):
"""Get expected responses for each type of request
Though only 1 request can be made, it can cause multiple responses.
Because we need to subcribe to MQTT topics, which might be formatted from
keys from included files, the 'expected'/'response' needs to be formatted
BEFORE running the request.
Args:
stage (dict): test stage
sessions (dict): all available sessions
Returns:
dict: mapping of request type: expected response dict
"""
plugins = load_plugins(test_block_config)
expected = {}
for p in plugins:
if p.plugin.response_block_name in stage:
logger.debug("Getting expected response for %s", p.name)
plugin_expected = p.plugin.get_expected_from_request(stage, test_block_config, sessions[p.name])
expected[p.name] = plugin_expected
return expected | 67c6704b8a70f7eac1301ebcd3caa2bb60e69dba | 3,657,447 |
def convolve_hrf(X, onsets, durations, n_vol, tr, ops=100):
"""
Convolve each X's column iteratively with HRF and align with the timeline of BOLD signal
parameters:
----------
X[array]: [n_event, n_sample]
onsets[array_like]: in sec. size = n_event
durations[array_like]: in sec. size = n_event
n_vol[int]: the number of volumes of BOLD signal
tr[float]: repeat time in second
ops[int]: oversampling number per second
Returns:
---------
X_hrfed[array]: the result after convolution and alignment
"""
assert np.ndim(X) == 2, 'X must be a 2D array'
assert X.shape[0] == len(onsets) and X.shape[0] == len(durations), \
'The length of onsets and durations should be matched with the number of events.'
assert ops in (10, 100, 1000), 'Oversampling rate must be one of the (10, 100, 1000)!'
# unify the precision
decimals = int(np.log10(ops))
onsets = np.round(np.asarray(onsets), decimals=decimals)
durations = np.round(np.asarray(durations), decimals=decimals)
tr = np.round(tr, decimals=decimals)
n_clipped = 0 # the number of clipped time points earlier than the start point of response
onset_min = onsets.min()
if onset_min > 0:
# The earliest event's onset is later than the start point of response.
# We supplement it with zero-value event to align with the response.
X = np.insert(X, 0, np.zeros(X.shape[1]), 0)
onsets = np.insert(onsets, 0, 0, 0)
durations = np.insert(durations, 0, onset_min, 0)
onset_min = 0
elif onset_min < 0:
print("The earliest event's onset is earlier than the start point of response.\n"
"We clip the earlier time points after hrf_convolution to align with the response.")
n_clipped = int(-onset_min * ops)
# do convolution in batches for trade-off between speed and memory
batch_size = int(100000 / ops)
bat_indices = np.arange(0, X.shape[-1], batch_size)
bat_indices = np.r_[bat_indices, X.shape[-1]]
vol_t = (np.arange(n_vol) * tr * ops).astype(int) # compute volume acquisition timing
n_time_point = int(((onsets + durations).max()-onset_min) * ops)
X_hrfed = np.zeros([n_vol, 0])
for idx, bat_idx in enumerate(bat_indices[:-1]):
X_bat = X[:, bat_idx:bat_indices[idx+1]]
# generate X raw time course
X_tc = np.zeros((n_time_point, X_bat.shape[-1]), dtype=np.float32)
for i, onset in enumerate(onsets):
onset_start = int(onset * ops)
onset_end = int(onset_start + durations[i] * ops)
X_tc[onset_start:onset_end, :] = X_bat[i, :]
# generate hrf kernel
hrf = spm_hrf(tr, oversampling=tr*ops)
hrf = hrf[:, np.newaxis]
# convolve X raw time course with hrf kernal
X_tc_hrfed = convolve(X_tc, hrf, method='fft')
X_tc_hrfed = X_tc_hrfed[n_clipped:, :]
# downsample to volume timing
X_hrfed = np.c_[X_hrfed, X_tc_hrfed[vol_t, :]]
print('hrf convolution: sample {0} to {1} finished'.format(bat_idx+1, bat_indices[idx+1]))
return X_hrfed | d035b47ffafe0ac3d7e1446d4d36dc2f707363bd | 3,657,448 |
def flatten(x, params):
"""
Plain ol' 2D flatten
:param x: input tensor
:param params: {dict} hyperparams (sub-selection)
:return: output tensor
"""
return layers.Flatten()(x) | 6db829641681ab48f75b23894f9a4a3250250cec | 3,657,449 |
def xml_unescape(text):
""" Do the inverse of `xml_escape`.
Parameters
----------
text: str
The text to be escaped.
Returns
-------
escaped_text: str
"""
return unescape(text, xml_unescape_table) | 2e53d8bc617ad70fd22bb5dd82cd34db366b80a4 | 3,657,450 |
def tseb_pt(T_air, T_rad, u, p, z, Rs_1, Rs24, vza, zs,
aleafv, aleafn, aleafl, adeadv, adeadn, adeadl,
albedo, ndvi, lai, clump, hc, time, t_rise, t_end,
leaf_width, a_PT_in=1.32, iterations=35):
"""Priestley-Taylor TSEB
Calculates the Priestley Taylor TSEB fluxes using a single observation of
composite radiometric temperature and using resistances in series.
Parameters
----------
T_air : ee.Image
Air temperature (Kelvin).
T_rad : ee.Image
Radiometric composite temperature (Kelvin).
u : ee.Image
Wind speed above the canopy (m s-1).
p : ee.Image
Atmospheric pressure (kPa)
z : ee.Image
Elevation (m)
Rs_1 : ee.Image
Overpass insolation (w m-2)
Rs24 : ee.Image
Daily insolation (w m-2)
vza : float
View Zenith Angle (radians).
zs : ee.Image
Solar Zenith Angle (radians).
aleafv : ee.Image
aleafn : ee.Image
aleafl : ee.Image
adeadv : ee.Image
adeadn : ee.Image
adeadl : ee.Image
albedo : ee.Image
ndvi : ee.Image
Normalized Difference Vegetation Index
lai : ee.Image
Effective Leaf Area Index (m2 m-2).
clump : ee.Image
hc : ee.Image
Canopy height (m).
time
t_rise : ee.Image
t_end : ee.Image
leaf_width : ee.Image
Average/effective leaf width (m)
a_PT_in : float, optional
Priestley Taylor coefficient for canopy potential transpiration
(the default is 1.32).
iterations: int, optional
Number of iterations of main calculation
(the default is 35)
Returns
-------
ET : ee.Image
Evapotranspiration (mm).
References
----------
.. [Norman1995] J.M. Norman, W.P. Kustas, & K.S. Humes (1995),
Source approach for estimating soil and vegetation energy fluxes in
observations of directional radiometric surface temperature,
Agricultural and Forest Meteorology,
Volume 77, Issues 3-4, Pages 263-293,
http://dx.doi.org/10.1016/0168-1923(95)02265-Y.
.. [Kustas1999] W.P. Kustas, & J.M. Norman (1999), Evaluation of soil
and vegetation heat flux predictions using a simple two-source
model with radiometric temperatures for partial canopy cover,
Agricultural and Forest Meteorology, Volume 94, Issue 1, Pages 13-29,
http://dx.doi.org/10.1016/S0168-1923(99)00005-2.
"""
# print('\nINPUTS')
# print('T_rad: {:20.14f}'.format(float(utils.image_value(T_rad).values()[0])))
# print('T_air: {:20.14f}'.format(float(utils.image_value(T_air).values()[0])))
# print('u: {:20.14f}'.format(float(utils.image_value(u).values()[0])))
# print('Rs_1: {:20.14f}'.format(float(utils.image_value(Rs_1).values()[0])))
# print('Rs24: {:20.14f}'.format(float(utils.image_value(Rs24).values()[0])))
# # print('vza: {:20.14f}'.format(float(utils.image_value(vza).values()[0])))
# print('zs: {:20.14f}'.format(float(utils.image_value(zs).values()[0])))
# print('albedo: {:20.14f}'.format(float(utils.image_value(albedo).values()[0])))
# print('ndvi: {:20.14f}'.format(float(utils.image_value(ndvi).values()[0])))
# print('lai: {:20.14f}'.format(float(utils.image_value(lai).values()[0])))
# print('clump: {:20.14f}'.format(float(utils.image_value(clump).values()[0])))
# print('hc: {:20.14f}'.format(float(utils.image_value(hc).values()[0])))
# print('time: {:20.14f}'.format(float(utils.image_value(time).values()[0])))
# print('t_rise: {:20.14f}'.format(float(utils.image_value(t_rise).values()[0])))
# print('t_end: {:20.14f}'.format(float(utils.image_value(t_end).values()[0])))
# ************************************************************************
# Correct Clumping Factor
f_green = 1.
# LAI for leaf spherical distribution
F = lai.expression('lai * clump', {'lai': lai, 'clump': clump})
# Fraction cover at nadir (view=0)
fc = F.expression('1.0 - exp(-0.5 * F)', {'F': F}) \
.clamp(0.01, 0.9)
# LAI relative to canopy projection only
lai_c = lai.expression('lai / fc', {'lai': lai, 'fc': fc})
# Houborg modification (according to Anderson et al. 2005)
fc_q = lai \
.expression('1 - (exp(-0.5 * F / cos(vza)))', {'F': F, 'vza': vza}) \
.clamp(0.05, 0.90)
# Brutsaert (1982)
z0m = hc.expression('hc * 0.123', {'hc': hc})
# CGM - add(0) is to mimic numpy copy, check if needed
z0h = z0m.add(0)
d_0 = hc.expression('hc * (2.0 / 3.0)', {'hc': hc})
# Correction of roughness parameters for bare soils (F < 0.1)
d_0 = d_0.where(F.lte(0.1), 0.00001)
z0m = z0m.where(F.lte(0.1), 0.01)
z0h = z0h.where(F.lte(0.1), 0.0001)
# Correction of roughness parameters for water bodies
# (NDVI < 0 and albedo < 0.05)
water_mask = ndvi.lte(0).And(albedo.lte(0.05))
d_0 = d_0.where(water_mask, 0.00001)
z0m = z0m.where(water_mask, 0.00035)
z0h = z0h.where(water_mask, 0.00035)
# Check to avoid division by 0 in the next computations
z0h = z0h.where(z0h.eq(0), 0.001)
z0m = z0m.where(z0m.eq(0), 0.01)
# DEADBEEF
# z_u = ee.Number(50.0)
# z_t = ee.Number(50.0)
z_u = ee.Image.constant(50.0)
z_t = ee.Image.constant(50.0)
# z_u = lai.multiply(0).add(50)
# z_t = lai.multiply(0).add(50)
# Parameters for In-Canopy Wind Speed Extinction
leaf = lai.expression(
'(0.28 * (F ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'F': F, 'hc': hc, 'leaf_width': leaf_width})
leaf_c = lai.expression(
'(0.28 * (lai_c ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'lai_c': lai_c, 'hc': hc, 'leaf_width': leaf_width})
leaf_s = lai.expression(
'(0.28 * (0.1 ** (0.66667)) * (hc ** (0.33333)) * '
'(leaf_width ** (-0.33333)))',
{'hc': hc, 'leaf_width': leaf_width})
# ************************************************************************
# Atmospheric Parameters
# Saturation vapour pressure [kPa] (FAO56 3-8)
e_s = T_air.expression(
'0.6108 * exp((17.27 * (T_air - 273.16)) / ((T_air - 273.16) + 237.3))',
{'T_air': T_air})
# Slope of the saturation vapor pressure [kPa] (FAO56 3-9)
Ss = T_air.expression(
'4098. * e_s / (((T_air - 273.16) + 237.3) ** 2)',
{'e_s': e_s, 'T_air': T_air})
# Latent heat of vaporization (~2.45 at 20 C) [MJ kg-1] (FAO56 3-1)
lambda1 = T_air.expression(
'(2.501 - (2.361e-3 * (T_air - 273.16)))',
{'T_air': T_air})
# Psychrometric constant [kPa C-1] (FAO56 3-10)
g = p.expression('1.615E-3 * p / lambda1', {'p': p, 'lambda1': lambda1})
# ************************************************************************
# Initialization of
a_PT = albedo.multiply(0).add(a_PT_in)
# a_PT = ee.Image.constant(a_PT_in)
# a_PT = mask.multiply(a_PT)
# CGM - This was also being computed inside albedo_separation function below
# Commented out from here for now.
# e_atm = T_air.expression(
# '1.0 - (0.2811 * (exp(-0.0003523 * ((T_air - 273.16) ** 2))))',
# {'T_air': T_air})
Rs_c, Rs_s, albedo_c, albedo_s = tseb_utils.albedo_separation(
albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, zs)
# CGM - Moved emissivity calculation to separate function.
# I removed the Rs0 check.
e_atm = tseb_utils.emissivity(T_air)
# p = T_air.expression(
# '101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26)',
# {'T_air': T_air, 'z': z})
# Density of air? (kg m-3)
r_air = T_air.expression(
'101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26) / 1.01 / T_air / 0.287',
{'T_air': T_air, 'z': z})
cp = ee.Number(1004.16)
# cp = ee.Image.constant(1004.16)
# Assume neutral conditions on first iteration (use T_air for Ts and Tc)
# CGM - Using lai for F to match Python code
u_attr = tseb_utils.compute_u_attr(
u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=0)
r_ah = tseb_utils.compute_r_ah(
u_attr=u_attr, d0=d_0, z0h=z0h, z_t=z_t, fh=0)
# CGM - Why is this function is passing "lai" to "F"?
r_s = tseb_utils.compute_r_s(
u_attr=u_attr, T_s=T_air, T_c=T_air, hc=hc, F=lai, d0=d_0, z0m=z0m,
leaf=leaf, leaf_s=leaf_s, fm_h=0)
r_x = tseb_utils.compute_r_x(
u_attr=u_attr, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width,
leaf_c=leaf_c, fm_h=0)
# r_ah, r_s, r_x, u_attr = tseb_utils.compute_resistance(
# u, T_air, T_air, hc, lai, d_0, z0m, z0h, z_u, z_t, leaf_width, leaf,
# leaf_s, leaf_c, 0, 0, 0)
T_c = T_air
# DEADBEEF - In IDL, this calculation is in C, not K?
T_s = lai.expression(
'((T_rad - 273.16) - (fc_q * (T_c - 273.16))) / (1 - fc_q) + 273.16',
{'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q})
# T_s = lai.expression(
# '(T_rad - (fc_q * T_c)) / (1 - fc_q)',
# {'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q})
# CGM - Initialize to match T_air shape
# This doesn't seem to do anything, commenting out for now
# H_iter = T_air.multiply(0).add(200.16)
EF_s = T_air.multiply(0)
# print('\nF: {:20.14f}'.format(float(utils.image_value(F).values()[0])))
# print('fc: {:20.14f}'.format(float(utils.image_value(fc).values()[0])))
# print('lai_c: {:20.14f}'.format(float(utils.image_value(lai_c).values()[0])))
# print('fc_q: {:20.14f}'.format(float(utils.image_value(fc_q).values()[0])))
# print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0])))
# print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0])))
# print('leaf: {:20.14f}'.format(float(utils.image_value(leaf).values()[0])))
# print('leaf_c: {:20.14f}'.format(float(utils.image_value(leaf_c).values()[0])))
# print('leaf_s: {:20.14f}'.format(float(utils.image_value(leaf_s).values()[0])))
# print('e_s: {:20.14f}'.format(float(utils.image_value(e_s).values()[0])))
# print('Ss: {:20.14f}'.format(float(utils.image_value(Ss).values()[0])))
# print('lambda1: {:20.14f}'.format(float(utils.image_value(lambda1).values()[0])))
# print('p: {:20.14f}'.format(float(utils.image_value(p).values()[0])))
# print('z: {:20.14f}'.format(float(utils.image_value(z).values()[0])))
# print('g: {:20.14f}'.format(float(utils.image_value(g).values()[0])))
# print('a_PT: {:20.14f}'.format(float(utils.image_value(a_PT).values()[0])))
# print('Rs_c: {:20.14f}'.format(float(utils.image_value(Rs_c).values()[0])))
# print('Rs_s: {:20.14f}'.format(float(utils.image_value(Rs_s).values()[0])))
# print('albedo_c: {:20.14f}'.format(float(utils.image_value(albedo_c).values()[0])))
# print('albedo_s: {:20.14f}'.format(float(utils.image_value(albedo_s).values()[0])))
# print('e_atm: {:20.14f}'.format(float(utils.image_value(e_atm).values()[0])))
# print('r_air: {:20.14f}'.format(float(utils.image_value(r_air).values()[0])))
# print('cp: {:20.14f}'.format(float(cp.getInfo())))
# print('d_0: {:20.14f}'.format(float(utils.image_value(d_0).values()[0])))
# print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0])))
# print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0])))
# print('u_attr: {:20.14f}'.format(float(utils.image_value(u_attr).values()[0])))
# print('r_ah: {:20.14f}'.format(float(utils.image_value(r_ah).values()[0])))
# print('r_s: {:20.14f}'.format(float(utils.image_value(r_s).values()[0])))
# print('r_x: {:20.14f}'.format(float(utils.image_value(r_x).values()[0])))
# print('T_c: {:20.14f}'.format(float(utils.image_value(T_c).values()[0])))
# print('T_s: {:20.14f}'.format(float(utils.image_value(T_s).values()[0])))
# print('EF_s: {:20.14f}'.format(float(utils.image_value(EF_s).values()[0])))
# print('Iterations: {}'.format(iterations))
# ************************************************************************
# Start Loop for Stability Correction and Water Stress
def iter_func(n, prev):
# Extract inputs from previous iteration
a_PT_iter = ee.Image(ee.Dictionary(prev).get('a_PT'))
EF_s_iter = ee.Image(ee.Dictionary(prev).get('EF_s'))
r_ah_iter = ee.Image(ee.Dictionary(prev).get('r_ah'))
r_s_iter = ee.Image(ee.Dictionary(prev).get('r_s'))
r_x_iter = ee.Image(ee.Dictionary(prev).get('r_x'))
T_c_iter = ee.Image(ee.Dictionary(prev).get('T_c'))
T_s_iter = ee.Image(ee.Dictionary(prev).get('T_s'))
u_attr_iter = ee.Image(ee.Dictionary(prev).get('u_attr'))
Rn_c = tseb_utils.compute_Rn_c(
albedo_c, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, F)
Rn_s = tseb_utils.compute_Rn_s(
albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_s, F)
Rn = Rn_c.add(Rn_s)
# Rn_s, Rn_c, Rn = tseb_utils.compute_Rn(
# albedo_c, albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, Rs_s, F)
G = tseb_utils.compute_G0(
Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s_iter)
LE_c = albedo \
.expression(
'f_green * (a_PT * Ss / (Ss + g)) * Rn_c',
{'f_green': f_green, 'a_PT': a_PT_iter, 'Ss': Ss, 'g': g,
'Rn_c': Rn_c}) \
.max(0)
H_c = albedo.expression(
'Rn_c - LE_c', {'Rn_c': Rn_c, 'LE_c': LE_c})
T_c_iter = tseb_utils.temp_separation_tc(
H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp)
T_s_iter = tseb_utils.temp_separation_ts(T_c_iter, fc_q, T_air, T_rad)
T_ac = tseb_utils.temp_separation_tac(
T_c_iter, T_s_iter, fc_q, T_air, r_ah_iter, r_s_iter, r_x_iter)
# T_c_iter, T_s_iter, T_ac = tseb_utils.temp_separation(
# H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp)
H_s = albedo.expression(
'r_air * cp * (T_s - T_ac) / r_s',
{'r_air': r_air, 'cp': cp, 'T_s': T_s_iter, 'T_ac': T_ac, 'r_s': r_s_iter})
H_c = albedo.expression(
'r_air * cp * (T_c - T_ac) / r_x',
{'r_air': r_air, 'cp': cp, 'T_c': T_c_iter, 'T_ac': T_ac, 'r_x': r_x_iter})
H = albedo.expression('H_s + H_c', {'H_s': H_s, 'H_c': H_c})
LE_s = albedo.expression(
'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s})
LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c})
# CGM - Is there a reason this isn't up with the H calculation?
H = H.where(H.eq(0), 10.0)
# CGM - This wont doing anything at this position in the code.
# Commenting out for now.
# r_ah_iter = r_ah_iter.where(r_ah_iter.eq(0), 10.0)
# CGM - This doesn't seem to do anything, commenting out for now
# mask_iter = H_iter.divide(H).lte(1.05).And(H_iter.divide(H).gte(0.95))
# chk_iter = np.sum(mask_iter) / np.size(mask_iter)
fh = tseb_utils.compute_stability_fh(
H, T_rad, u_attr_iter, r_air, z_t, d_0, cp)
fm = tseb_utils.compute_stability_fm(
H, T_rad, u_attr_iter, r_air, z_u, d_0, z0m, cp)
fm_h = tseb_utils.compute_stability_fm_h(
H, T_rad, u_attr_iter, r_air, hc, d_0, z0m, cp)
# CGM - z0h is not used in this function, should it be?
# fm, fh, fm_h = tseb_utils.compute_stability(
# H, T_rad, r_air, cp, u_attr, z_u, z_t, hc, d_0, z0m, z0h)
u_attr_iter = tseb_utils.compute_u_attr(
u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=fm)
r_ah_iter = tseb_utils.compute_r_ah(
u_attr=u_attr_iter, d0=d_0, z0h=z0h, z_t=z_t, fh=fh)
r_s_iter = tseb_utils.compute_r_s(
u_attr=u_attr_iter, T_s=T_s_iter, T_c=T_c_iter, hc=hc, F=lai,
d0=d_0, z0m=z0m, leaf=leaf, leaf_s=leaf_s, fm_h=fm_h)
# CGM - Why is this function is passing "lai" to "F"?
r_x_iter = tseb_utils.compute_r_x(
u_attr=u_attr_iter, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width,
leaf_c=leaf_c, fm_h=fm_h)
# r_ah_iter, r_s_iter, r_x_iter, u_attr_iter = tseb_utils.compute_resistance(
# u, T_s_iter, T_c_iter, hc, lai, d_0, z0m, z0h, z_u, z_t,
# leaf_width, leaf, leaf_s, leaf_c, fm, fh, fm_h)
a_PT_iter = a_PT_iter \
.where(LE_s.lte(0), a_PT_iter.subtract(0.05)) \
.where(a_PT_iter.lte(0), 0.01)
den_s = albedo.expression('Rn_s - G', {'Rn_s': Rn_s, 'G': G})
den_s = den_s.updateMask(den_s.neq(0))
# den_s[den_s == 0.] = np.nan
EF_s_iter = albedo.expression(
'LE_s / den_s', {'LE_s': LE_s, 'den_s': den_s})
return ee.Dictionary({
'a_PT': a_PT_iter, 'EF_s': EF_s_iter, 'G': G,
'H_c': H_c, 'H_s': H_s, 'LE_c': LE_c, 'LE_s': LE_s,
'Rn_c': Rn_c, 'Rn_s': Rn_s,
'r_ah': r_ah_iter, 'r_s': r_s_iter, 'r_x': r_x_iter,
'T_ac': T_ac, 'T_c': T_c_iter, 'T_s': T_s_iter,
'u_attr': u_attr_iter})
# Iterate the function n times
# CGM - Iteration count is an input to the function
input_images = ee.Dictionary({
'a_PT': a_PT, 'EF_s': EF_s, 'G': ee.Image(0),
'H_c': ee.Image(0), 'H_s': ee.Image(0),
'LE_c': ee.Image(0), 'LE_s': ee.Image(0),
'Rn_c': ee.Image(0), 'Rn_s': ee.Image(0),
'r_ah': r_ah, 'r_s': r_s, 'r_x': r_x,
'T_ac': ee.Image(0), 'T_c': T_c, 'T_s': T_s, 'u_attr': u_attr
})
iter_output = ee.Dictionary(
ee.List.sequence(1, iterations).iterate(iter_func, input_images))
# Unpack the iteration output
a_PT = ee.Image(iter_output.get('a_PT'))
Rn_c = ee.Image(iter_output.get('Rn_c'))
Rn_s = ee.Image(iter_output.get('Rn_s'))
G = ee.Image(iter_output.get('G'))
H_c = ee.Image(iter_output.get('H_c'))
H_s = ee.Image(iter_output.get('H_s'))
LE_c = ee.Image(iter_output.get('LE_c'))
LE_s = ee.Image(iter_output.get('LE_s'))
# T_ac = ee.Image(iter_output.get('T_ac'))
# T_c = ee.Image(iter_output.get('T_c'))
# T_s = ee.Image(iter_output.get('T_s'))
# r_ah = ee.Image(iter_output.get('r_ah'))
# r_s = ee.Image(iter_output.get('r_s'))
# r_x = ee.Image(iter_output.get('r_x'))
# print('\na_PT: {:20.14f}'.format(utils.image_value(a_PT).values()[0]))
# print('Rn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0]))
# print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0]))
# print('G: {:20.14f}'.format(utils.image_value(G).values()[0]))
# print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0]))
# print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0]))
# print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0]))
# print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0]))
# print('r_ah: {:20.14f}'.format(utils.image_value(r_ah).values()[0]))
# print('r_s: {:20.14f}'.format(utils.image_value(r_s).values()[0]))
# print('r_x: {:20.14f}'.format(utils.image_value(r_x).values()[0]))
# print('T_ac: {:20.14f}'.format(utils.image_value(T_ac).values()[0]))
# print('T_c: {:20.14f}'.format(utils.image_value(T_c).values()[0]))
# print('T_s: {:20.14f}'.format(utils.image_value(T_s).values()[0]))
# ************************************************************************
# Check Energy Balance Closure
ind = a_PT.lte(0.01)
LE_s = LE_s.where(ind, 1.0)
LE_c = LE_c.where(ind, 1.0)
G = G.where(ind, Rn_s.subtract(H_s))
ind = LE_s.gt(Rn_s)
LE_s = LE_s.where(ind, Rn_s)
H_s = H_s.where(ind, Rn_s.subtract(G).subtract(LE_s))
# CGM - Check order of operations
ind = LE_c.gt(Rn_c.add(100))
# CGM - Not used below since LE_c is recomputed
LE_c = LE_c.where(ind, Rn_c.add(100))
H_c = H_c.where(ind, -100)
LE_s = albedo.expression(
'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s})
LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c})
# The latent heat of vaporization is 2.45 MJ kg-1
# Assume Rs24 is still in W m-2 day-1 and convert to MJ kg-1
# CGM - Leaving out scaling value for now
ET = albedo \
.expression(
'((LE_c + LE_s) / Rs_1) * (Rs24 / 2.45) * scaling',
{'LE_c': LE_c, 'LE_s': LE_s, 'Rs_1': Rs_1,
'Rs24': Rs24.multiply(0.0864 / 24.0),
'scaling': 1}) \
.max(0.01)
# print('\nRn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0]))
# print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0]))
# print('G: {:20.14f}'.format(utils.image_value(G).values()[0]))
# print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0]))
# print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0]))
# print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0]))
# print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0]))
# print('\nET: {:20.14f}'.format(utils.image_value(ET).values()[0]))
return ET | 6851b00f27b1819e79ce7ed625074c37ac35298f | 3,657,451 |
def GetPrivateIpv6GoogleAccessTypeMapper(messages, hidden=False):
"""Returns a mapper from text options to the PrivateIpv6GoogleAccess enum.
Args:
messages: The message module.
hidden: Whether the flag should be hidden in the choice_arg
"""
help_text = """
Sets the type of private access to Google services over IPv6.
PRIVATE_IPV6_GOOGLE_ACCESS_TYPE must be one of:
bidirectional
Allows Google services to initiate connections to GKE pods in this
cluster. This is not intended for common use, and requires previous
integration with Google services.
disabled
Default value. Disables private access to Google services over IPv6.
outbound-only
Allows GKE pods to make fast, secure requests to Google services
over IPv6. This is the most common use of private IPv6 access.
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=disabled
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=outbound-only
$ gcloud alpha container clusters create \
--private-ipv6-google-access-type=bidirectional
"""
return arg_utils.ChoiceEnumMapper(
'--private-ipv6-google-access-type',
messages.NetworkConfig.PrivateIpv6GoogleAccessValueValuesEnum,
_GetPrivateIPv6CustomMappings(),
hidden=hidden,
help_str=help_text) | 9aa87977be9d0888d572c70d07535c9ec0b9d8f4 | 3,657,452 |
def calc_director(moi):
""" Calculate the director from a moment of inertia.
The director is the dominant eigenvector of the MOI tensor
Parameters:
-----------
moi : list
3x3 array; MOItensor
Returns:
--------
director : list
3 element list of director vector
"""
w, v = np.linalg.eig(moi)
director = v[:, np.argmin(w)]
return director | 28f8b3446f83759704d426653dc8f7812e71e900 | 3,657,453 |
def _solve_upper_triangular(A, b):
""" Solves Ax=b when A is upper triangular. """
return solve_triangular(A, b, lower=False) | 5c33d5d10922172a133a478bdfdcb8cf7cd83120 | 3,657,454 |
def check_create_account_key(key):
"""
Returns the user_id if the reset key is valid (matches a user_id and that
user does not already have an account). Otherwise returns None.
"""
query = sqlalchemy.text("""
SELECT user_id
FROM members
WHERE create_account_key = :k
AND user_id NOT IN (SELECT user_id FROM users)
""")
result = flask.g.db.execute(query, k=key).first()
if result is not None:
return result['user_id']
else:
return None | b02a710d443410b5b60c31a030d056f3282a5747 | 3,657,455 |
import os
def find_gaia_files_hp(nside, pixlist, neighbors=True):
"""Find full paths to Gaia healpix files in a set of HEALPixels.
Parameters
----------
nside : :class:`int`
(NESTED) HEALPixel nside.
pixlist : :class:`list` or `int`
A set of HEALPixels at `nside`.
neighbors : :class:`bool`, optional, defaults to ``True``
Also return files corresponding to all neighbors that touch the
pixels in `pixlist` to prevent edge effects (e.g. a Gaia source
is 1 arcsec outside of `pixlist` and so in an adjacent pixel).
Returns
-------
:class:`list`
A list of all Gaia files that need to be read in to account for
objects in the passed list of pixels.
Notes
-----
- The environment variable $GAIA_DIR must be set.
"""
# ADM the resolution at which the healpix files are stored.
filenside = _get_gaia_nside()
# ADM check that the GAIA_DIR is set and retrieve it.
gaiadir = _get_gaia_dir()
hpxdir = os.path.join(gaiadir, 'healpix')
# ADM work with pixlist as an array.
pixlist = np.atleast_1d(pixlist)
# ADM determine the pixels that touch the passed pixlist.
pixnum = nside2nside(nside, filenside, pixlist)
# ADM if neighbors was sent, then retrieve all pixels that touch each
# ADM pixel covered by the provided locations, to prevent edge effects...
if neighbors:
pixnum = add_hp_neighbors(filenside, pixnum)
# ADM reformat in the Gaia healpix format used by desitarget.
gaiafiles = [os.path.join(hpxdir, 'healpix-{:05d}.fits'.format(pn)) for pn in pixnum]
return gaiafiles | fec2bc3b73df17617802ef88d586a6b895d2af32 | 3,657,456 |
def _crc16(data, start = _CRC16_START) :
"""Compute CRC16 for bytes/bytearray/memoryview data"""
crc = start
for b in data :
crc ^= b << 8
for _ in range(8) :
crc = ((crc << 1) & 0xFFFF) ^ _CRC16_POLY if crc & 0x8000 else (crc << 1)
return crc | e6e33471601d3126ac7873b61e23f843349e8e90 | 3,657,457 |
import json
def load_json():
"""Load the translation dictionary."""
try:
with open(JSON_FILENAME, "r", encoding="utf8") as file:
known_names = json.load(file)
if "version" in known_names:
if known_names.get("version") < JSON_VERSION:
print("Unkown version: {}, current version: {}".format(
known_names.get("version"), JSON_VERSION))
raise Exception(
"Version mismatch. Backup the file and recreate.")
else:
print("No version number found")
known_names = {}
except FileNotFoundError:
known_names = {}
return known_names | d263411d0c0aae7bba30f92c5af22dd7ff596542 | 3,657,458 |
def get_username() -> str:
"""
Prompts the user to enter a username and then returns it
:return: The username entered by the user
"""
while True:
print("Please enter your username (without spaces)")
username = input().strip()
if ' ' not in username:
return username | 1a18a229908b86c32a0822c068b5b9081cc9fdc3 | 3,657,459 |
def condition(f):
"""
Decorator for conditions
"""
@wraps(f)
def try_execute(*args, **kwargs):
try:
res, m = f(*args, **kwargs)
m.conditions_results.append(res)
return m
except Exception as e:
raise ConditionError(e)
return try_execute | fb05645861c7aa234f894cc8eee3689e1f1293c9 | 3,657,460 |
def get_spatial_anomalies(
coarse_obs_path, fine_obs_rechunked_path, variable, connection_string
) -> xr.Dataset:
"""Calculate the seasonal cycle (12 timesteps) spatial anomaly associated
with aggregating the fine_obs to a given coarsened scale and then reinterpolating
it back to the original spatial resolution. The outputs of this function are
dependent on three parameters:
* a grid (as opposed to a specific GCM since some GCMs run on the same grid)
* the time period which fine_obs (and by construct coarse_obs) cover
* the variable
Parameters
----------
coarse_obs : xr.Dataset
Coarsened to a GCM resolution. Chunked along time.
fine_obs_rechunked_path : xr.Dataset
Original observation spatial resolution. Chunked along time.
variable: str
The variable included in the dataset.
Returns
-------
seasonal_cycle_spatial_anomalies : xr.Dataset
Spatial anomaly for each month (i.e. of shape (nlat, nlon, 12))
"""
# interpolate coarse_obs back to the original scale
[coarse_obs, fine_obs_rechunked] = load_paths([coarse_obs_path, fine_obs_rechunked_path])
obs_interpolated, _ = regrid_dataset(
ds=coarse_obs,
ds_path=coarse_obs_path,
target_grid_ds=fine_obs_rechunked.isel(time=0),
variable=variable,
connection_string=connection_string,
)
# use rechunked fine_obs from coarsening step above because that is in map chunks so it
# will play nice with the interpolated obs
schema_maps_chunks.validate(fine_obs_rechunked[variable])
# calculate difference between interpolated obs and the original obs
spatial_anomalies = obs_interpolated - fine_obs_rechunked
# calculate seasonal cycle (12 time points)
seasonal_cycle_spatial_anomalies = spatial_anomalies.groupby("time.month").mean()
return seasonal_cycle_spatial_anomalies | 54dc830e9eb6b7440abf5857141ab369d8d45358 | 3,657,461 |
def get_pip_package_name(provider_package_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_package_id: id of the package
:return: the name of pip package
"""
return "apache-airflow-providers-" + provider_package_id.replace(".", "-") | e7aafbdfb0e296e60fedfcf7e4970d750e4f3ffa | 3,657,462 |
import numpy
def func_asymmetry_f_b(z, flag_z: bool = False):
"""Function F_b(z) for asymmetry factor.
"""
f_a , dder_f_a = func_asymmetry_f_a(z, flag_z=flag_z)
res = 2*(2*numpy.square(z)-3)*f_a
dder = {}
if flag_z:
dder["z"] = 8 * z * f_a + 2*(2*numpy.square(z)-3)*dder_f_a["z"]
return res, dder | 5fc157856c379267c12137551f0eb5e6c4ddd3aa | 3,657,463 |
def parse_args():
"""Command-line argument parser for generating scenes."""
# New parser
parser = ArgumentParser(description='Monte Carlo rendering generator')
# Rendering parameters
parser.add_argument('-t', '--tungsten', help='tungsten renderer full path', default='tungsten', type=str)
parser.add_argument('-d', '--scene-path', help='scene root path', type=str)
parser.add_argument('-r', '--resolution', help='image resolution (w, h)', nargs='+', type=int)
parser.add_argument('-s', '--spp', help='sample per pixel', default=16, type=int)
parser.add_argument('-n', '--nb-renders', help='number of renders', default=10, type=int)
parser.add_argument('--hdr-buffers', help='save buffers as hdr images', action='store_true')
parser.add_argument('--hdr-targets', help='save targets as hdr images', action='store_true')
parser.add_argument('-o', '--output-dir', help='output directory', default='../../data/renders', type=str)
return parser.parse_args() | 4fad89d60f5446f9dbd66f4624a43b9436ee97a5 | 3,657,464 |
def unique_id(token_id):
"""Return a unique ID for a token.
The returned value is useful as the primary key of a database table,
memcache store, or other lookup table.
:returns: Given a PKI token, returns it's hashed value. Otherwise, returns
the passed-in value (such as a UUID token ID or an existing
hash).
"""
return cms.cms_hash_token(token_id) | 9526e483f617728b4a9307bd10097c78ec361ad0 | 3,657,465 |
def encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame,
df_aval_types: pd.DataFrame):
"""
It encodes the type of parameters and return according to visible type hints
"""
types = df_aval_types['Types'].tolist()
def trans_aval_type(x):
for i, t in enumerate(types):
if x in t:
return i
return len(types) - 1
# If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type.
df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type)
df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type)
df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type)
return df_param, df_ret | a68ff812f69c264534daf16935d88f528ba35464 | 3,657,466 |
def first(iterable, default=None):
"""
Returns the first item or a default value
>>> first(x for x in [1, 2, 3] if x % 2 == 0)
2
>>> first((x for x in [1, 2, 3] if x > 42), -1)
-1
"""
return next(iter(iterable), default) | 6907e63934967c332eea9cedb5e0ee767a88fe8f | 3,657,467 |
def generate_uuid_from_wf_data(wf_data: np.ndarray, decimals: int = 12) -> str:
"""
Creates a unique identifier from the waveform data, using a hash. Identical arrays
yield identical strings within the same process.
Parameters
----------
wf_data:
The data to generate the unique id for.
decimals:
The number of decimal places to consider.
Returns
-------
:
A unique identifier.
"""
waveform_hash = hash(wf_data.round(decimals=decimals).tobytes())
return str(waveform_hash) | e12a6a8807d68181f0e04bf7446cf5e381cab3f9 | 3,657,468 |
def aggregate(table, key, aggregation=None, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""Group rows under the given key then apply aggregation functions.
E.g.::
>>> import petl as etl
>>>
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 3, True],
... ['a', 7, False],
... ['b', 2, True],
... ['b', 2, False],
... ['b', 9, False],
... ['c', 4, True]]
>>> # aggregate whole rows
... table2 = etl.aggregate(table1, 'foo', len)
>>> table2
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 2 |
+-----+-------+
| 'b' | 3 |
+-----+-------+
| 'c' | 1 |
+-----+-------+
>>> # aggregate single field
... table3 = etl.aggregate(table1, 'foo', sum, 'bar')
>>> table3
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 10 |
+-----+-------+
| 'b' | 13 |
+-----+-------+
| 'c' | 4 |
+-----+-------+
>>> # alternative signature using keyword args
... table4 = etl.aggregate(table1, key=('foo', 'bar'),
... aggregation=list, value=('bar', 'baz'))
>>> table4
+-----+-----+-------------------------+
| foo | bar | value |
+=====+=====+=========================+
| 'a' | 3 | [(3, True)] |
+-----+-----+-------------------------+
| 'a' | 7 | [(7, False)] |
+-----+-----+-------------------------+
| 'b' | 2 | [(2, True), (2, False)] |
+-----+-----+-------------------------+
| 'b' | 9 | [(9, False)] |
+-----+-----+-------------------------+
| 'c' | 4 | [(4, True)] |
+-----+-----+-------------------------+
>>> # aggregate multiple fields
... from collections import OrderedDict
>>> import petl as etl
>>>
>>> aggregation = OrderedDict()
>>> aggregation['count'] = len
>>> aggregation['minbar'] = 'bar', min
>>> aggregation['maxbar'] = 'bar', max
>>> aggregation['sumbar'] = 'bar', sum
>>> # default aggregation function is list
... aggregation['listbar'] = 'bar'
>>> aggregation['listbarbaz'] = ('bar', 'baz'), list
>>> aggregation['bars'] = 'bar', etl.strjoin(', ')
>>> table5 = etl.aggregate(table1, 'foo', aggregation)
>>> table5
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| foo | count | minbar | maxbar | sumbar | listbar | listbarbaz | bars |
+=====+=======+========+========+========+===========+=====================================+===========+
| 'a' | 2 | 3 | 7 | 10 | [3, 7] | [(3, True), (7, False)] | '3, 7' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'b' | 3 | 2 | 9 | 13 | [2, 2, 9] | [(2, True), (2, False), (9, False)] | '2, 2, 9' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'c' | 1 | 4 | 4 | 4 | [4] | [(4, True)] | '4' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
"""
if callable(aggregation):
return SimpleAggregateView(table, key, aggregation=aggregation,
value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache)
elif aggregation is None or isinstance(aggregation, (list, tuple, dict)):
# ignore value arg
return MultiAggregateView(table, key, aggregation=aggregation,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
else:
raise ArgumentError('expected aggregation is callable, list, tuple, dict '
'or None') | 22d857001d0dcadaed82a197101125e5ca922e07 | 3,657,469 |
def show_mpls_bypass_lsp_name_extensive_rpc(self, show_lsp_input_info=None, api_timeout=''):
"""
This is an auto-generated method for the PySwitchLib.
**Supported Versions**:
* SLXOS: 17r.1.01a, 17r.2.00, 17s.1.02
**Child Instance Keyword Argument Tuple(s)**:
:type show_lsp_input_info: lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more
:param show_lsp_input_info: Keyword argument tuple.
:type lsp: unicode
:param lsp: **show_lsp_input_info** tuple argument.
:type lsp_wide: YANGBool
:param lsp_wide: **show_lsp_input_info** tuple argument.
:type lsp_detail: YANGBool
:param lsp_detail: **show_lsp_input_info** tuple argument.
:type lsp_extensive: YANGBool
:param lsp_extensive: **show_lsp_input_info** tuple argument.
:type lsp_debug: YANGBool
:param lsp_debug: **show_lsp_input_info** tuple argument.
:type lsp_name: unicode
:param lsp_name: **show_lsp_input_info** tuple argument.
:type lsp_name_extensive: YANGBool
:param lsp_name_extensive: **show_lsp_input_info** tuple argument.
:type lsp_name_debug: YANGBool
:param lsp_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp: unicode
:param bypass_lsp: **show_lsp_input_info** tuple argument.
:type bypass_lsp_wide: YANGBool
:param bypass_lsp_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_detail: YANGBool
:param bypass_lsp_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_extensive: YANGBool
:param bypass_lsp_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_debug: YANGBool
:param bypass_lsp_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name: unicode
:param bypass_lsp_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name_extensive: YANGBool
:param bypass_lsp_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_name_debug: YANGBool
:param bypass_lsp_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static: unicode
:param bypass_lsp_static: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_wide: YANGBool
:param bypass_lsp_static_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_detail: YANGBool
:param bypass_lsp_static_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_extensive: YANGBool
:param bypass_lsp_static_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_debug: YANGBool
:param bypass_lsp_static_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name: unicode
:param bypass_lsp_static_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name_extensive: YANGBool
:param bypass_lsp_static_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_static_name_debug: YANGBool
:param bypass_lsp_static_name_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic: unicode
:param bypass_lsp_dynamic: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_wide: YANGBool
:param bypass_lsp_dynamic_wide: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_detail: YANGBool
:param bypass_lsp_dynamic_detail: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_extensive: YANGBool
:param bypass_lsp_dynamic_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_debug: YANGBool
:param bypass_lsp_dynamic_debug: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name: unicode
:param bypass_lsp_dynamic_name: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name_extensive: YANGBool
:param bypass_lsp_dynamic_name_extensive: **show_lsp_input_info** tuple argument.
:type bypass_lsp_dynamic_name_debug: YANGBool
:param bypass_lsp_dynamic_name_debug: **show_lsp_input_info** tuple argument.
:type lsp_input_lsp_name: unicode
:param lsp_input_lsp_name: **show_lsp_input_info** tuple argument.
:type lsp_input_bypass: YANGBool
:param lsp_input_bypass: **show_lsp_input_info** tuple argument.
:type lsp_input_dynamic: YANGBool
:param lsp_input_dynamic: **show_lsp_input_info** tuple argument.
:type lsp_input_brief: YANGBool
:param lsp_input_brief: **show_lsp_input_info** tuple argument.
:type lsp_input_wide: YANGBool
:param lsp_input_wide: **show_lsp_input_info** tuple argument.
:type lsp_input_detail: YANGBool
:param lsp_input_detail: **show_lsp_input_info** tuple argument.
:type lsp_input_extensive: YANGBool
:param lsp_input_extensive: **show_lsp_input_info** tuple argument.
:type lsp_input_debug: YANGBool
:param lsp_input_debug: **show_lsp_input_info** tuple argument.
:type lsp_input_one: YANGBool
:param lsp_input_one: **show_lsp_input_info** tuple argument.
:type lsp_input_all: YANGBool
:param lsp_input_all: **show_lsp_input_info** tuple argument.
:type lsp_input_more: YANGBool
:param lsp_input_more: **show_lsp_input_info** tuple argument.
:type api_timeout: long or tuple(long, long)
:param api_timeout: Timeout for connection and response in seconds. If a tuple is specified, then the first value is for the connection timeout and the second value is for the response timeout.
:rtype: (*bool, list*)
:returns: Returns a tuple.
#. **api_success** (*bool*) - The success or failure of the API.
#. **details** (*list*) - List of REST request/response dictionaries, keyed by the asset's ip address.
:raises ConnectionError: If requests module connection or response timeout occurs.
:raises UnsupportedOSError: If firmware version installed on asset is not supported.
:raises RestInterfaceError: If requests module does not get a successful response from the rest URI.
:raises ValueError: If the argument value does not meet type requirements or value restrictions.
"""
operation_type = 'rpc'
compositions_list = []
bindings_list = [('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc'), ('pybind.slxos.v17s_1_02.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc')]
composed_child_list = [('pybind.slxos.v17s_1_02.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info'), ('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.show_mpls_bypass_lsp_name_extensive.input', u'show_lsp_input_info')]
compositions_keyval_list = []
bindings_keyval = {'kwargs_key_name': '', 'keyval': '', 'extra_keyval': ''}
composed_child_leafval_list = [{'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}, {'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}, {'leafval': 'lsp, lsp_wide, lsp_detail, lsp_extensive, lsp_debug, lsp_name, lsp_name_extensive, lsp_name_debug, bypass_lsp, bypass_lsp_wide, bypass_lsp_detail, bypass_lsp_extensive, bypass_lsp_debug, bypass_lsp_name, bypass_lsp_name_extensive, bypass_lsp_name_debug, bypass_lsp_static, bypass_lsp_static_wide, bypass_lsp_static_detail, bypass_lsp_static_extensive, bypass_lsp_static_debug, bypass_lsp_static_name, bypass_lsp_static_name_extensive, bypass_lsp_static_name_debug, bypass_lsp_dynamic, bypass_lsp_dynamic_wide, bypass_lsp_dynamic_detail, bypass_lsp_dynamic_extensive, bypass_lsp_dynamic_debug, bypass_lsp_dynamic_name, bypass_lsp_dynamic_name_extensive, bypass_lsp_dynamic_name_debug, lsp_input_lsp_name, lsp_input_bypass, lsp_input_dynamic, lsp_input_brief, lsp_input_wide, lsp_input_detail, lsp_input_extensive, lsp_input_debug, lsp_input_one, lsp_input_all, lsp_input_more'}]
leafval_map = {}
rest_leaf_name = ''
choices_kwargs_map = {}
leaf_os_support_map = {}
self._api_validation(choices_kwargs_map=choices_kwargs_map, leaf_os_support_map=leaf_os_support_map, show_lsp_input_info=show_lsp_input_info)
pybind_object = self._get_pybind_object(operation_type=operation_type, compositions_list=compositions_list, bindings_list=bindings_list, composed_child_list=composed_child_list, compositions_keyval_list=compositions_keyval_list, bindings_keyval=bindings_keyval, composed_child_leafval_list=composed_child_leafval_list, leafval_map=leafval_map, show_lsp_input_info=show_lsp_input_info)
return self._rpc_worker(operation_type=operation_type, pybind_object=pybind_object, resource_depth=1, timeout=api_timeout) | 4819c0f1f9cdd4eb46440cd51656cce5a93b3005 | 3,657,470 |
def most_similar(sen, voting_dict):
"""
Input: the last name of a senator, and a dictionary mapping senator names
to lists representing their voting records.
Output: the last name of the senator whose political mindset is most
like the input senator (excluding, of course, the input senator
him/herself). Resolve ties arbitrarily.
Example:
>>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]}
>>> most_similar('Klein', vd)
'Fox-Epstein'
Note that you can (and are encouraged to) re-use you policy_compare procedure.
"""
most_sim = -1000
most_sim_senator = ""
for key, val in voting_dict.items():
if key != sen:
cmp = policy_compare(sen, key, voting_dict)
if most_sim < cmp:
most_sim = cmp
most_sim_senator = key
return most_sim_senator | 6889d08af21d4007fa01dbe4946748aef0d9e3e6 | 3,657,471 |
def fixed_ro_bci_edge(ascentlat, lat_fixed_ro_ann,
zero_bounds_guess_range=np.arange(0.1, 90, 5)):
"""Numerically solve fixed-Ro, 2-layer BCI model of HC edge."""
def _solver(lat_a, lat_h):
# Reasonable to start guess at the average of the two given latitudes.
init_guess = 0.5 * (lat_a + lat_h)
return brentq_solver_sweep_param(
_fixed_ro_bci_edge,
lat_a,
init_guess,
zero_bounds_guess_range,
funcargs=(lat_h,),
)
return xr.apply_ufunc(_solver, ascentlat, lat_fixed_ro_ann,
vectorize=True, dask="parallelized") | 544c1747450cac52d161aa267a6332d4902798d1 | 3,657,472 |
def fresh_jwt_required(fn):
"""
A decorator to protect a Flask endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid and fresh access token before allowing the endpoint to be
called.
See also: :func:`~flask_jwt_extended.jwt_required`
"""
@wraps(fn)
def wrapper(*args, **kwargs):
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
if not jwt_data['fresh']:
raise FreshTokenRequired('Fresh token required')
if not verify_token_claims(jwt_data[config.user_claims_key]):
raise UserClaimsVerificationError('User claims verification failed')
_load_user(jwt_data[config.identity_claim_key])
return fn(*args, **kwargs)
return wrapper | e5f30192c68018a419bb086522217ce86b27e6f6 | 3,657,473 |
import random
def random_small_number():
"""
随机生成一个小数
:return: 返回小数
"""
return random.random() | 45143c2c78dc72e21cbbe0a9c10babd00100be77 | 3,657,474 |
def get_sample(df, col_name, n=100, seed=42):
"""Get a sample from a column of a dataframe.
It drops any numpy.nan entries before sampling. The sampling
is performed without replacement.
Example of numpydoc for those who haven't seen yet.
Parameters
----------
df : pandas.DataFrame
Source dataframe.
col_name : str
Name of the column to be sampled.
n : int
Sample size. Default is 100.
seed : int
Random seed. Default is 42.
Returns
-------
pandas.Series
Sample of size n from dataframe's column.
"""
np.random.seed(seed)
random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False)
return df.loc[random_idx, col_name] | a4fb8e1bbc7c11026b54b2ec341b85310596de13 | 3,657,475 |
def gen_mail_content(content, addr_from):
"""
根据邮件体生成添加了dkim的新邮件
@param content: string 邮件体内容
@return str_mail: 加上dkim的新邮件
"""
try:
domain = addr_from.split('@')[-1]
dkim_info = get_dkim_info(domain)
if dkim_info:
content = repalce_mail(content, addr_from)
selector, private = dkim_info
private = private.replace('\r\n', '\n')
dkim_sig = dkim.sign(content, selector, domain, private, include_headers=['From', 'To', 'Subject', 'Date'])
dk_sig = domainkeys(dkim_sig + content, selector, domain, private, include_heads=['From', 'To', 'Subject'])
return dk_sig + dkim_sig + content
else:
return content
except Exception, e:
print >>sys.stderr, e
print >>sys.stderr, traceback.format_exc()
return content | 90ad569f8f69b7fa39edab799b41522fddc3ce97 | 3,657,476 |
import warnings
def autocov(ary, axis=-1):
"""Compute autocovariance estimates for every lag for the input array.
Parameters
----------
ary : Numpy array
An array containing MCMC samples
Returns
-------
acov: Numpy array same size as the input array
"""
axis = axis if axis > 0 else len(ary.shape) + axis
n = ary.shape[axis]
m = next_fast_len(2 * n)
ary = ary - ary.mean(axis, keepdims=True)
# added to silence tuple warning for a submodule
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ifft_ary = np.fft.rfft(ary, n=m, axis=axis)
ifft_ary *= np.conjugate(ifft_ary)
shape = tuple(
slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape)
)
cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape]
cov /= n
return cov | e17dcfcbdee37022a5ab98561287f891acfefaf6 | 3,657,477 |
import argparse
def get_args(argv=None):
"""Parses given arguments and returns argparse.Namespace object."""
prsr = argparse.ArgumentParser(
description="Perform a conformational search on given molecules."
)
group = prsr.add_mutually_exclusive_group(required=True)
group.add_argument(
'-m', '--molecules', nargs='+',
help='One or more files with molecule specification.'
)
group.add_argument(
'-d', '--directory', help='Directory with .mol files.'
)
prsr.add_argument(
'-o', '--output_dir', default='.\\confsearch', help='Output directory.'
)
prsr.add_argument(
'-n', '--num_confs', type=int, default=10,
help='Number of cnformers to generate.'
)
prsr.add_argument(
'-r', '--rms_tresh', type=float, default=1,
help='Maximum RMSD of conformers.'
)
prsr.add_argument(
'-e', '--energy_window', type=float, default=5,
help='Maximum energy difference from lowest-energy conformer '
'in kcal/mol.'
)
prsr.add_argument(
'-c', '--max_cycles', type=int, default=10,
help='Maximum number of energy minimization cycles.'
)
prsr.add_argument(
'-f', '--fixed', type=int, nargs='+', default=(),
help='Indices (starting at 1) of atoms fixed during molecule embedding.'
)
prsr.add_argument(
'-x', '--constraints',
help='File with constraints specified in format '
'"kind a [b [c [d]] rel min] max [const]", one for line. `kind` '
'should be one of: P (position), D (distance), A (angle), T '
'(torsion). Number of required atoms indices depends on `kind` '
'given and should be 1, 2, 3 or 4 respectively. Atoms indices '
'start at 1. `rel` should be 0 or 1 and specifies if `min` and '
'`max` values should be treated as absolute values or relative '
'to current value. `min` and `max` should be floats, representing '
'minimum and maximum value of constrained property in relevant '
'units (angstroms or degrees). `rel` and `min` should be omitted '
'if `kind` is P. `const` is force constant for given constraint, '
'should be integer or float, defaults to 1e5.'
)
prsr.add_argument(
'-V', '--verbose', action='store_true',
help='Sets logging level to INFO.'
)
prsr.add_argument(
'-D', '--debug', action='store_true',
help='Sets logging level to DEBUG.'
)
return prsr.parse_args(argv) | 424ac2f714a30e31bce79a31b950e4ade06c8eab | 3,657,478 |
def _optimize_rule_mip(
set_opt_model_func,
profile,
committeesize,
resolute,
max_num_of_committees,
solver_id,
name="None",
committeescorefct=None,
):
"""Compute rules, which are given in the form of an optimization problem, using Python MIP.
Parameters
----------
set_opt_model_func : callable
sets constraints and objective and adds additional variables, see examples below for its
signature
profile : abcvoting.preferences.Profile
approval sets of voters
committeesize : int
number of chosen alternatives
resolute : bool
max_num_of_committees : int
maximum number of committees this method returns, value can be None
solver_id : str
name : str
name of the model, used for error messages
committeescorefct : callable
a function used to compute the score of a committee
Returns
-------
committees : list of sets
a list of winning committees,
each of them represented as set of integers from `0` to `num_cand`
"""
maxscore = None
committees = []
if solver_id not in ["gurobi", "cbc"]:
raise ValueError(f"Solver {solver_id} not known in Python MIP.")
while True:
model = mip.Model(solver_name=solver_id)
# note: verbose = 1 causes issues with unittests, seems as if output is printed too late
# and anyway the output does not seem to be very helpful
model.verbose = 0
# `in_committee` is a binary variable indicating whether `cand` is in the committee
in_committee = [
model.add_var(var_type=mip.BINARY, name=f"cand{cand}_in_committee")
for cand in profile.candidates
]
set_opt_model_func(
model,
profile,
in_committee,
committeesize,
)
# find a new committee that has not been found yet by excluding previously found committees
for committee in committees:
model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1
# emphasis is optimality:
# activates procedures that produce improved lower bounds, focusing in pruning the search
# tree even if the production of the first feasible solutions is delayed.
model.emphasis = 2
model.opt_tol = ACCURACY
model.max_mip_gap = ACCURACY
model.integer_tol = ACCURACY
status = model.optimize()
if status not in [mip.OptimizationStatus.OPTIMAL, mip.OptimizationStatus.INFEASIBLE]:
raise RuntimeError(
f"Python MIP returned an unexpected status code: {status}"
f"Warning: solutions may be incomplete or not optimal (model {name})."
)
elif status == mip.OptimizationStatus.INFEASIBLE:
if len(committees) == 0:
# we are in the first round of searching for committees
# and Gurobi didn't find any
raise RuntimeError("Python MIP found no solution (INFEASIBLE) (model {name})")
break
committee = set(
cand
for cand in profile.candidates
if in_committee[cand].x >= 0.9
# this should be >= 1 - ACCURACY, but apparently it is not necessarily the case that
# integers are only ACCURACY apart from either 0 or 1
)
if len(committee) != committeesize:
raise RuntimeError(
"_optimize_rule_mip produced a committee with "
"fewer than `committeesize` members (model {name})."
)
if committeescorefct is None:
objective_value = model.objective_value # numeric value from MIP
else:
objective_value = committeescorefct(profile, committee) # exact value
if maxscore is None:
maxscore = objective_value
elif (committeescorefct is not None and objective_value > maxscore) or (
committeescorefct is None and objective_value > maxscore + CMP_ACCURACY
):
raise RuntimeError(
"Python MIP found a solution better than a previous optimum. This "
f"should not happen (previous optimal score: {maxscore}, "
f"new optimal score: {objective_value}, model {name})."
)
elif (committeescorefct is not None and objective_value < maxscore) or (
committeescorefct is None and objective_value < maxscore - CMP_ACCURACY
):
# no longer optimal
break
committees.append(committee)
if resolute:
break
if max_num_of_committees is not None and len(committees) >= max_num_of_committees:
return committees
return committees | 41c3ace270be4dcb4321e4eeedb23d125e6766c3 | 3,657,479 |
import itertools
def Zuo_fig_3_18(verbose=True):
"""
Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017
This input acts as an example as well as a reference
Returns:
dictionary: tags is the dictionary of all input and output paramter needed to reproduce that figure.
"""
# INPUT
# Create Silicon structure (Could be produced with Silicon routine)
if verbose:
print('Sample Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017')
tags = {'crystal_name': 'Silicon'}
if verbose:
print('tags[\'crystal\'] = ', tags['crystal_name'])
a = 0.514 # nm
tags['lattice_parameter_nm'] = a
if verbose:
print('tags[\'lattice_parameter_nm\'] =', tags['lattice_parameter_nm'])
tags['unit_cell'] = [[a, 0, 0], [0, a, 0], [0, 0, a]]
if verbose:
print('tags[\'unit_cell\'] =', tags['unit_cell'])
tags['elements'] = list(itertools.repeat('Si', 8))
if verbose:
print('tags[\'elements\'] =', tags['elements'])
base = [(0., 0., 0.), (0.5, 0.0, 0.5), (0.5, 0.5, 0.), (0., 0.5, 0.5)]
tags['base'] = np.array(base + (np.array(base) + (.25, .25, .25)).tolist())
if verbose:
print('tags[\'base\'] =', tags['base'])
# Define Experimental Conditions
tags['convergence_angle_mrad'] = 7
tags['acceleration_voltage_V'] = 101.6*1000.0 # V
if verbose:
print('tags[\'acceleration_voltage_V\'] =', tags['acceleration_voltage_V'])
tags['convergence_angle_mrad'] = 7.1 # mrad; 0 is parallel illumination
if verbose:
print('tags[\'convergence_angle_mrad\'] =', tags['convergence_angle_mrad'])
tags['zone_hkl'] = np.array([-2, 2, 1]) # incident neares zone axis: defines Laue Zones!!!!
if verbose:
print('tags[\'zone_hkl\'] =', tags['zone_hkl'])
tags['mistilt'] = np.array([0, 0, 0]) # mistilt in degrees
if verbose:
print('tags[\'mistilt\'] =', tags['mistilt'])
# Define Simulation Parameters
tags['Sg_max'] = .2 # 1/nm maximum allowed excitation error
if verbose:
print('tags[\'Sg_max\'] =', tags['Sg_max'])
tags['hkl_max'] = 9 # Highest evaluated Miller indices
if verbose:
print('tags[\'hkl_max\'] =', tags['hkl_max'])
print('##################')
print('# Output Options #')
print('##################')
# Output options
tags['background'] = 'black' # 'white' 'grey'
if verbose:
print('tags[\'background\'] =', tags['background'], '# \'white\', \'grey\' ')
tags['color map'] = 'plasma'
if verbose:
print('tags[\'color map\'] =', tags['color map'], '#,\'cubehelix\',\'Greys\',\'jet\' ')
tags['plot HOLZ'] = 1
if verbose:
print('tags[\'plot HOLZ\'] =', tags['plot HOLZ'])
tags['plot HOLZ excess'] = 1
if verbose:
print('tags[\'plot HOLZ excess\'] =', tags['plot HOLZ excess'])
tags['plot Kikuchi'] = 1
if verbose:
print('tags[\'plot Kikuchi\'] =', tags['plot Kikuchi'])
tags['plot reflections'] = 1
if verbose:
print('tags[\'plot reflections\'] =', tags['plot reflections'])
tags['label HOLZ'] = 0
if verbose:
print('tags[\'label HOLZ\'] =', tags['label HOLZ'])
tags['label Kikuchi'] = 0
if verbose:
print('tags[\'label Kikuchi\'] =', tags['label Kikuchi'])
tags['label reflections'] = 0
if verbose:
print('tags[\'label reflections\'] =', tags['label reflections'])
tags['label color'] = 'black'
if verbose:
print('tags[\'label color\'] =', tags['label color'])
tags['label size'] = 10
if verbose:
print('tags[\'label size\'] =', tags['label size'])
tags['color Laue Zones'] = ['red', 'blue', 'green', 'blue', 'green'] # , 'green', 'red'] #for OLZ give a sequence
if verbose:
print('tags[\'color Laue Zones\'] =', tags['color Laue Zones'], ' #[\'red\', \'blue\', \'lightblue\']')
tags['color Kikuchi'] = 'green'
if verbose:
print('tags[\'color Kikuchi\'] =', tags['color Kikuchi'])
tags['linewidth HOLZ'] = -1 # -1: linewidth according to intensity (structure factor F^2
if verbose:
print('tags[\'linewidth HOLZ\'] =', tags['linewidth HOLZ'], '# -1: linewidth according to intensity '
'(structure factor F^2)')
tags['linewidth Kikuchi'] = -1 # -1: linewidth according to intensity (structure factor F^2
if verbose:
print('tags[\'linewidth Kikuchi\'] =', tags['linewidth Kikuchi'], '# -1: linewidth according to intensity '
'(structure factor F^2)')
tags['color reflections'] = 'intensity' # 'Laue Zone'
if verbose:
print('tags[\'color reflections\'] =', tags['color reflections'], '#\'Laue Zone\' ')
tags['color zero'] = 'white' # 'None', 'white', 'blue'
if verbose:
print('tags[\'color zero\'] =', tags['color zero'], '#\'None\', \'white\', \'blue\' ')
tags['color ring zero'] = 'None' # 'Red' #'white' #, 'None'
if verbose:
print('tags[\'color ring zero\'] =', tags['color ring zero'], '#\'None\', \'white\', \'Red\' ')
print('########################')
print('# End of Example Input #')
print('########################\n\n')
return tags | 560272c4c28c8e0628403573dd76c0573ae9d937 | 3,657,480 |
def subscribe_feed(feed_link: str, title: str, parser: str, conn: Conn) -> str:
"""Return the feed_id if nothing wrong."""
feed_id = new_feed_id(conn)
conn.execute(
stmt.Insert_feed,
dict(
id=feed_id,
feed_link=feed_link,
website="",
title=title,
author_name="",
updated=arrow.now().format(RFC3339),
notes="",
parser=parser,
),
)
return feed_id | 88a49ebaa4f766bfb228dc3ba271e8c98d50da99 | 3,657,481 |
import os
def run(fname):
"""
Create a new C file and H file corresponding to the filename "fname",
and add them to the corresponding include.am.
This function operates on paths relative to the top-level tor directory.
"""
# Make sure we're in the top-level tor directory,
# which contains the src directory
if not os.path.isdir("src"):
raise RuntimeError("Could not find './src/'. "
"Run this script from the top-level tor source "
"directory.")
# And it looks like a tor/src directory
if not os.path.isfile("src/include.am"):
raise RuntimeError("Could not find './src/include.am'. "
"Run this script from the top-level tor source "
"directory.")
# Make the file name relative to the top-level tor directory
tor_fname = tordir_file(fname)
# And check that we're adding files to the "src" directory,
# with canonical paths
if tor_fname[:4] != "src/":
raise ValueError("Requested file path '{}' canonicalized to '{}', "
"but the canonical path did not start with 'src/'. "
"Please add files to the src directory."
.format(fname, tor_fname))
c_tor_fname = makeext(tor_fname, "c")
h_tor_fname = makeext(tor_fname, "h")
if os.path.exists(c_tor_fname):
print("{} already exists".format(c_tor_fname))
return 1
if os.path.exists(h_tor_fname):
print("{} already exists".format(h_tor_fname))
return 1
with open(c_tor_fname, 'w') as f:
f.write(instantiate_template(C_FILE_TEMPLATE, c_tor_fname))
with open(h_tor_fname, 'w') as f:
f.write(instantiate_template(HEADER_TEMPLATE, h_tor_fname))
iam = get_include_am_location(c_tor_fname)
if iam is None or not os.path.exists(iam):
print("Made files successfully but couldn't identify include.am for {}"
.format(c_tor_fname))
return 1
amfile = ParsedAutomake()
cur_chunk = AutomakeChunk()
with open(iam) as f:
for line in f:
if cur_chunk.addLine(line):
amfile.addChunk(cur_chunk)
cur_chunk = AutomakeChunk()
amfile.addChunk(cur_chunk)
amfile.add_file(c_tor_fname, "sources")
amfile.add_file(h_tor_fname, "headers")
with open(iam+".tmp", 'w') as f:
amfile.dump(f)
os.rename(iam+".tmp", iam) | c20130f6f46b4f47996d9a817f93099484b284fd | 3,657,482 |
def process_grid(procstatus, dscfg, radar_list=None):
"""
Puts the radar data in a regular grid
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
gridconfig : dictionary. Dataset keyword
Dictionary containing some or all of this keywords:
xmin, xmax, ymin, ymax, zmin, zmax : floats
minimum and maximum horizontal distance from grid origin [km]
and minimum and maximum vertical distance from grid origin [m]
Defaults -40, 40, -40, 40, 0., 10000.
hres, vres : floats
horizontal and vertical grid resolution [m]
Defaults 1000., 500.
latorig, lonorig, altorig : floats
latitude and longitude of grid origin [deg] and altitude of
grid origin [m MSL]
Defaults the latitude, longitude and altitude of the radar
wfunc : str. Dataset keyword
the weighting function used to combine the radar gates close to a
grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST
Default NEAREST
roif_func : str. Dataset keyword
the function used to compute the region of interest.
Possible values: dist_beam, constant
roi : float. Dataset keyword
the (minimum) radius of the region of interest in m. Default half
the largest resolution
beamwidth : float. Dataset keyword
the radar antenna beamwidth [deg]. If None that of the key
radar_beam_width_h in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
a default 1 deg value will be used
beam_spacing : float. Dataset keyword
the beam spacing, i.e. the ray angle resolution [deg]. If None,
that of the attribute ray_angle_res of the radar object will be
used. If the attribute is None a default 1 deg value will be used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the gridded data
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_names_aux = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names_aux.append(get_fieldname_pyart(datatype))
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
# keep only fields present in radar object
field_names = []
nfields_available = 0
for field_name in field_names_aux:
if field_name not in radar.fields:
warn('Field name '+field_name+' not available in radar object')
continue
field_names.append(field_name)
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
# default parameters
xmin = -40.
xmax = 40.
ymin = -40.
ymax = 40.
zmin = 0.
zmax = 10000.
hres = 1000.
vres = 500.
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = float(radar.altitude['data'])
if 'gridConfig' in dscfg:
if 'xmin' in dscfg['gridConfig']:
xmin = dscfg['gridConfig']['xmin']
if 'xmax' in dscfg['gridConfig']:
xmax = dscfg['gridConfig']['xmax']
if 'ymin' in dscfg['gridConfig']:
ymin = dscfg['gridConfig']['ymin']
if 'ymax' in dscfg['gridConfig']:
ymax = dscfg['gridConfig']['ymax']
if 'zmin' in dscfg['gridConfig']:
zmin = dscfg['gridConfig']['zmin']
if 'zmax' in dscfg['gridConfig']:
zmax = dscfg['gridConfig']['zmax']
if 'hres' in dscfg['gridConfig']:
hres = dscfg['gridConfig']['hres']
if 'vres' in dscfg['gridConfig']:
vres = dscfg['gridConfig']['vres']
if 'latorig' in dscfg['gridConfig']:
lat = dscfg['gridConfig']['latorig']
if 'lonorig' in dscfg['gridConfig']:
lon = dscfg['gridConfig']['lonorig']
if 'altorig' in dscfg['gridConfig']:
alt = dscfg['gridConfig']['altorig']
wfunc = dscfg.get('wfunc', 'NEAREST')
roi_func = dscfg.get('roi_func', 'dist_beam')
# number of grid points in cappi
nz = int((zmax-zmin)/vres)+1
ny = int((ymax-ymin)*1000./hres)+1
nx = int((xmax-xmin)*1000./hres)+1
min_radius = dscfg.get('roi', np.max([vres, hres])/2.)
# parameters to determine the gates to use for each grid point
beamwidth = dscfg.get('beamwidth', None)
beam_spacing = dscfg.get('beam_spacing', None)
if beamwidth is None:
if (radar.instrument_parameters is not None and
'radar_beam_width_h' in radar.instrument_parameters):
beamwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
else:
warn('Unknown radar beamwidth. Default 1 deg will be used')
beamwidth = 1
if beam_spacing is None:
if radar.ray_angle_res is not None:
beam_spacing = radar.ray_angle_res['data'][0]
else:
warn('Unknown beam spacing. Default 1 deg will be used')
beam_spacing = 1
# cartesian mapping
grid = pyart.map.grid_from_radars(
(radar,), gridding_algo='map_to_grid',
weighting_function=wfunc,
roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing,
min_radius=min_radius, constant_roi=min_radius,
grid_shape=(nz, ny, nx),
grid_limits=((zmin, zmax), (ymin*1000., ymax*1000.),
(xmin*1000., xmax*1000.)),
grid_origin=(lat, lon), grid_origin_alt=alt,
fields=field_names)
new_dataset = {'radar_out': grid}
return new_dataset, ind_rad | b414fb327d3658cc6f9ba1296ec1226b5d2a7ff6 | 3,657,483 |
def float_to_16(value):
""" convert float value into fixed exponent (8) number
returns 16 bit integer, as value * 256
"""
value = int(round(value*0x100,0))
return value & 0xffff | 0a587e4505c9c19b0cbdd2f94c8a964f2a5a3ccd | 3,657,484 |
def create_keras_one_layer_dense_model(*,
input_size,
output_size,
verbose=False,
**kwargs
):
"""
Notes:
https://www.tensorflow.org/tutorials/keras/save_and_load
"""
# ...................................................
# Create model
model = Sequential()
#.. add fully connected layer
model.add(Dense(
input_dim=input_size, # IE 784 PIXELS, !
units=output_size,
activation=kwargs["out_activation"],
kernel_regularizer=tf.keras.regularizers.l2(0.001),
kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
))
# Print network summary
if verbose==True:
print(model.summary())
else:
pass
# ...................................................
# Define Loss Function and Trianing Operation
""" # [option]: Use only default values,
model.compile( optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['acc'])
"""
model.compile(
optimizer= kwargs["optimizer"],
loss= losses.sparse_categorical_crossentropy,
metrics= kwargs["metrics"] # even one arg must be in the list
)
return model | d7d34d9981aac318bca5838c42ed7f844c27cfda | 3,657,485 |
def API_encrypt(key, in_text, formatting:str = "Base64", nonce_type:str = "Hybrid"):
""" Returns: Input Text 147 Encrypted with Input Key. """
try:
# Ensure an Appropriate Encoding Argument is Provided.
try: encoding = FORMATS[formatting]
except: raise ValueError("Invalid Encoding Argument")
# Generate Nonce Integer Based on Input Argument.
nonce = gen_nonce(nonce_type)
# Encode Text into Specified Encoding and Remove any Padding.
encoded_text = convert_input(in_text, "encode", encoding)
# Encode Key into Decimal Number (Base10).
dec_key = key_convert(key, encoding)
# Substitute Down Input Text.
shifted_text = substitution(dec_key, nonce, encoded_text, encoding, "encrypt", "normal")
# Randomly join Shifted Text and Nonce into one Text.
full_text = pair_function(shifted_text, dec_key, encoding, nonce)
# Substitute Up Input Text.
return substitution(dec_key + 135, 147, full_text, encoding, "encrypt", "reverse")
except: raise ValueError(f"Encryption with Key: {key} Failed for Input: {in_text}") | d7336197ba1d32d89c8dc0c098bfbc20c795168d | 3,657,486 |
def convert_log_dict_to_np(logs):
"""
Take in logs and return params
"""
# Init params
n_samples_after_warmup = len(logs)
n_grid = logs[0]['u'].shape[-1]
u = np.zeros((n_samples_after_warmup, n_grid))
Y = np.zeros((n_samples_after_warmup, n_grid))
k = np.zeros((n_samples_after_warmup, n_grid))
kl_trunc_errs = np.empty((n_samples_after_warmup,1))
n_stoch_disc = logs[0]['coefs'].shape[-1] # e.g., n_alpha_indices for PCE, or kl_dim for KL-E
coefs = np.empty((n_samples_after_warmup, n_grid, n_stoch_disc))
stoch_dim = logs[0]['rand_insts'].shape[-1]
rand_insts = np.empty((n_samples_after_warmup, stoch_dim))
# Copy logs into params
for n, log in enumerate(logs):
k[n,:] = log['rand_param']
Y[n,:] = log['Y']
u[n,:] = log['u']
kl_trunc_errs[n,0] = log['kl_trunc_err']
coefs[n,:,:] = log['coefs']
rand_insts[n,:] = log['rand_insts']
return k, Y, u, kl_trunc_errs, coefs, rand_insts | 1962fa563ee5d741f7f1ec6453b7fd5693efeca2 | 3,657,487 |
import os
def get_parent_dir():
"""Returns the root directory of the project."""
return os.path.abspath(os.path.join(os.getcwd(), os.pardir)) | f6c0a43cf2a38f507736f09429b7ca7012739559 | 3,657,488 |
from pathlib import Path
from typing import Callable
def map_links_in_markdownfile(
filepath: Path,
func: Callable[[Link], None]
) -> bool:
"""Dosyadaki tüm linkler için verilen fonksiyonu uygular
Arguments:
filepath {Path} -- Dosya yolu objesi
func {Callable[[Link], None]} -- Link alan ve değiştiren fonksiyon
Returns:
bool -- Değişim olduysa True
"""
content = filesystem.read_file(filepath)
content = map_links_in_string(content, func)
return filesystem.write_to_file(filepath, content) | 4f6aa7ee5ecb7aed1df8551a69161305601d0489 | 3,657,489 |
def half_cell_t_2d_triangular_precursor(p, t):
"""Creates a precursor to horizontal transmissibility for prism grids (see notes).
arguments:
p (numpy float array of shape (N, 2 or 3)): the xy(&z) locations of cell vertices
t (numpy int array of shape (M, 3)): the triangulation of p for which the transmissibility
precursor is required
returns:
a pair of numpy float arrays, each of shape (M, 3) being the normal length and flow length
relevant for flow across the face opposite each vertex as defined by t
notes:
this function acts as a precursor to the equivalent of the half cell transmissibility
functions but for prism grids; for a resqpy VerticalPrismGrid, the triangulation can
be shared by many layers with this function only needing to be called once; the first
of the returned values (normal length) is the length of the triangle edge, in xy, when
projected onto the normal of the flow direction; multiplying the normal length by a cell
height will yield the area needed for transmissibility calculations; the second of the
returned values (flow length) is the distance from the trangle centre to the midpoint of
the edge and can be used as the distance term for a half cell transmissibilty; this
function does not account for dip, it only handles the geometric aspects of half
cell transmissibility in the xy plane
"""
assert p.ndim == 2 and p.shape[1] in [2, 3]
assert t.ndim == 2 and t.shape[1] == 3
# centre points of triangles, in xy
centres = np.mean(p[t], axis = 1)[:, :2]
# midpoints of edges of triangles, in xy
edge_midpoints = np.empty(tuple(list(t.shape) + [2]), dtype = float)
edge_midpoints[:, 0, :] = 0.5 * (p[t[:, 1]] + p[t[:, 2]])[:, :2]
edge_midpoints[:, 1, :] = 0.5 * (p[t[:, 2]] + p[t[:, 0]])[:, :2]
edge_midpoints[:, 2, :] = 0.5 * (p[t[:, 0]] + p[t[:, 1]])[:, :2]
# triangle edge vectors, projected in xy
edge_vectors = np.empty(edge_midpoints.shape, dtype = float)
edge_vectors[:, 0] = (p[t[:, 2]] - p[t[:, 1]])[:, :2]
edge_vectors[:, 1] = (p[t[:, 0]] - p[t[:, 2]])[:, :2]
edge_vectors[:, 2] = (p[t[:, 1]] - p[t[:, 0]])[:, :2]
# vectors from triangle centres to mid points of edges (3 per triangle), in xy plane
cem_vectors = edge_midpoints - centres.reshape((-1, 1, 2))
cem_lengths = vec.naive_lengths(cem_vectors)
# unit length vectors normal to cem_vectors, in the xy plane
normal_vectors = np.zeros(edge_midpoints.shape)
normal_vectors[:, :, 0] = cem_vectors[:, :, 1]
normal_vectors[:, :, 1] = -cem_vectors[:, :, 0]
normal_vectors = vec.unit_vectors(normal_vectors)
# edge lengths projected onto normal vectors (length perpendicular to nominal flow direction)
normal_lengths = np.abs(vec.dot_products(edge_vectors, normal_vectors))
# return normal (cross-sectional) lengths and nominal flow direction lengths
assert normal_lengths.shape == t.shape and cem_lengths.shape == t.shape
return normal_lengths, cem_lengths | 555f8b260f7c5b3f2e215378655710533ee344d5 | 3,657,490 |
def count_datavolume(sim_dict):
"""
Extract from the given input the amount of time and the memory you need to
process each simulation through the JWST pipeline
:param dict sim_dict: Each key represent a set of simulations (a CAR activity for instance)
each value is a list of simulation. Each simulation being a dict with detailled info
:return: Return (mem, time) where mem and time are dictionnaries with the same keys as the input dict.
:rtype: Memory is in GB, Time is in hours
"""
mem_volume = {} # Total memory required in GB
time_volume = {} # Pipeline estimated run time in s
for (car, sim_list) in sim_dict.items():
memory = []
times = []
for sim in sim_list:
if "detector" in sim.keys():
if sim["detector"] in ["IMAGER", "ALL"]:
tmp = {
"integrations": sim["ima_integrations"],
"frames": sim["ima_frames"],
"exposures": sim["exposures"],
"subarray": sim["subarray"],
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
if sim["detector"] in ["ALL", "MRS"]:
tmp = {
"integrations": sim["LW_integrations"],
"frames": sim["LW_frames"],
"exposures": sim["exposures"],
"subarray": "FULL",
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
tmp = {
"integrations": sim["SW_integrations"],
"frames": sim["SW_frames"],
"exposures": sim["exposures"],
"subarray": "FULL",
"NDither": sim["NDither"],
}
(ram, time, nb_exps) = get_prediction(tmp)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
else:
(ram, time, nb_exps) = get_prediction(sim)
memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse
times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse
mem_volume[car] = np.array(memory)
time_volume[car] = np.array(times)
return mem_volume, time_volume | dabe86d64be0342486d1680ee8e5a1cb72162550 | 3,657,491 |
import inspect
import sys
def get_exception_class_by_code(code):
"""Gets exception with the corresponding error code,
otherwise returns UnknownError
:param code: error code
:type code: int
:return: Return Exception class associated with the specified API error.
"""
code = int(code)
module_classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
exception_classes = (h[1] for h in module_classes
if is_valid_error_cls(h[1]))
exception_cls = None
for e_c in exception_classes:
if e_c.error_code == code:
exception_cls = e_c
break
if exception_cls is None:
exception_cls = UnknownError
return exception_cls | 36d42291737a7a18f7ffea07c1d5ffc71a82e8b1 | 3,657,492 |
def context():
"""Return an instance of the JIRA tool context."""
return dict() | e24e859add22eef279b650f28dce4f6732c346b8 | 3,657,493 |
def dist_to_group(idx: int, group_type: str, lst):
"""
A version of group_count that allows for sorting with solo agents
Sometimes entities don't have immediately adjacent neighbors.
In that case, the value represents the distance to any neighbor, e.g
-1 means that an entity one to the left or right has a neighbor of that type.
Args:
idx (int):index in the list
group_type (str):group type we care about matching
lst ([type]): [description]
"""
my_count = group_count(idx, group_count, lst)
if my_count > 0:
return my_count
adjacent_counts = []
l_neighbor_count = dist_to_group(idx-1, group_type, lst) if idx > 0 else None
r_neighbor_count = dist_to_group(idx+1, group_type, lst) if idx < len(lst)-1 else None
for neighbor_count in (l_neighbor_count, r_neighbor_count):
if neighbor_count != 0:
if neighbor_count < 0 and neighbor_count is not None: #The neighbor doesn't have any next directly to it either
adjacent_counts.append(neighbor_count - 1)
else: #The neighbor does have one next to it!
adjacent_counts.append(neighbor_count)
return max(adjacent_counts) | 74ae510de4145f097fbf9daf406a6156933bae20 | 3,657,494 |
from typing import AnyStr
from typing import List
from typing import Dict
def get_nodes_rating(start: AnyStr,
end: AnyStr,
tenant_id: AnyStr,
namespaces: List[AnyStr]) -> List[Dict]:
"""
Get the rating by node.
:start (AnyStr) A timestamp, as a string, to represent the starting time.
:end (AnyStr) A timestamp, as a string, to represent the ending time.
:tenant_id (AnyStr) A string representing the tenant, only used by decorators.
:namespaces (List[AnyStr]) A list of namespaces accessible by the tenant.
Return the results of the query as a list of dictionary.
"""
qry = sa.text("""
SELECT frame_begin,
sum(frame_price) as frame_price,
node
FROM frames
WHERE frame_begin >= :start
AND frame_end <= :end
AND namespace != 'unspecified'
AND pod != 'unspecified'
AND namespace IN :namespaces
GROUP BY frame_begin, node
ORDER BY frame_begin, node
""").bindparams(bindparam('namespaces', expanding=True))
params = {
'start': start,
'end': end,
'tenant_id': tenant_id,
'namespaces': namespaces
}
return process_query(qry, params) | b75d35fc195b8317ed8b84ab42ce07339f2f1bf3 | 3,657,495 |
def f(OPL,R):
""" Restoration function calculated from optical path length (OPL)
and from rational function parameter (R). The rational is multiplied
along all optical path.
"""
x = 1
for ii in range(len(OPL)):
x = x * (OPL[ii] + R[ii][2]) / (R[ii][0] * OPL[ii] + R[ii][1])
return x | 5b64b232646768d2068b114d112a8da749c84706 | 3,657,496 |
def _str_conv(number, rounded=False):
"""
Convenience tool to convert a number, either float or int into a string.
If the int or float is None, returns empty string.
>>> print(_str_conv(12.3))
12.3
>>> print(_str_conv(12.34546, rounded=1))
12.3
>>> print(_str_conv(None))
<BLANKLINE>
>>> print(_str_conv(1123040))
11.2e5
"""
if not number:
return str(' ')
if not rounded and isinstance(number, (float, int)):
if number < 100000:
string = str(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant)
elif rounded == 2 and isinstance(number, (float, int)):
if number < 100000:
string = '{0:.2f}'.format(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.2f}'.format(number / divisor) + 'e' + str(exponant)
elif rounded == 1 and isinstance(number, (float, int)):
if number < 100000:
string = '{0:.1f}'.format(number)
else:
exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1
divisor = 10 ** exponant
string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant)
else:
return str(number)
return string | d352e8f0956b821a25513bf4a4eecfae5a6a7dcd | 3,657,497 |
def build_eval_graph(input_fn, model_fn, hparams):
"""Build the evaluation computation graph."""
dataset = input_fn(None)
batch = dataset.make_one_shot_iterator().get_next()
batch_holder = {
"transform":
tf.placeholder(
tf.float32,
[1, 1, hparams.n_parts, hparams.n_dims + 1, hparams.n_dims + 1]),
"joint":
tf.placeholder(tf.float32, [1, 1, hparams.n_parts, hparams.n_dims]),
"point":
tf.placeholder(tf.float32, [1, 1, None, hparams.n_dims]),
"label":
tf.placeholder(tf.float32, [1, 1, None, 1]),
}
latent_holder, latent, occ = model_fn(batch_holder, None, None, "gen_mesh")
# Eval Summary
iou_holder = tf.placeholder(tf.float32, [])
best_holder = tf.placeholder(tf.float32, [])
tf.summary.scalar("IoU", iou_holder)
tf.summary.scalar("Best_IoU", best_holder)
return {
"batch_holder": batch_holder,
"latent_holder": latent_holder,
"latent": latent,
"occ": occ,
"batch": batch,
"iou_holder": iou_holder,
"best_holder": best_holder,
"merged_summary": tf.summary.merge_all(),
} | 3f3d1425d08e964de68e99ea0c6cb4397975427a | 3,657,498 |
def _encodeLength(length):
"""
Encode length as a hex string.
Args:
length: write your description
"""
assert length >= 0
if length < hex160:
return chr(length)
s = ("%x" % length).encode()
if len(s) % 2:
s = "0" + s
s = BinaryAscii.binaryFromHex(s)
lengthLen = len(s)
return chr(hex160 | lengthLen) + str(s) | fd85d5faf85da6920e4a0704118e41901f327d9c | 3,657,499 |
Subsets and Splits