content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def geopad(lon, lat, data, /, nlon=1, nlat=0):
"""
Return array padded circularly along longitude and over the poles for finite
difference methods.
"""
# Pad over longitude seams
if nlon > 0:
pad = ((nlon, nlon),) + (data.ndim - 1) * ((0, 0),)
data = np.pad(data, pad, mode='wrap')
lon = np.pad(lon, nlon, mode='wrap') # should be vector
# Pad over poles
if nlat > 0:
if (data.shape[0] % 2) == 1:
raise ValueError(
'Data must have even number of longitudes '
'if you wish to pad over the poles.'
)
append = np.roll( # descending in lat
np.flip(data, axis=1), data.shape[0] // 2, axis=0
)
data = np.concatenate(
(
append[:, -nlat:, ...], # -87.5, -88.5, -89.5 (crossover)
data, # -89.5, -88.5, -87.5, ..., 87.5, 88.5, 89.5 (crossover)
append[:, :nlat, ...], # 89.5, 88.5, 87.5
),
axis=1,
)
lat = np.pad(lat, nlat, mode='symmetric')
lat[:nlat] = 180 - lat[:nlat] # monotonic ascent
lat[-nlat:] = 180 - lat[-nlat:]
return lon, lat, data | 5,357,600 |
def test_diffusion_constant():
"""Ensure the diffusion constant is giving a reasonable result."""
known_diffusion = 1e-3
offset = 1e-4
time = np.arange(10000)
msd = time*known_diffusion + offset
diff, diff_err = relaxation.diffusion_constant(time, msd)
assert np.isclose(diff, known_diffusion)
assert np.isclose(diff_err, 0) | 5,357,601 |
def SL_EAKF(N,loc_rad,taper='GC',ordr='rand',infl=1.0,rot=False,**kwargs):
"""
Serial, covariance-localized EAKF.
Ref: Karspeck, Alicia R., and Jeffrey L. Anderson. (2007):
"Experimental implementation of an ensemble adjustment filter..."
Used without localization, this should be equivalent
(full ensemble equality) to the EnKF 'Serial'.
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
N1 = N-1
R = h.noise
Rm12 = h.noise.C.sym_sqrt_inv
E = X0.sample(N)
stats.assess(0,E=E)
for k,kObs,t,dt in progbar(chrono.forecast_range):
E = f(E,t-dt,dt)
E = add_noise(E, dt, f.noise, kwargs)
if kObs is not None:
stats.assess(k,kObs,'f',E=E)
y = yy[kObs]
inds = serial_inds(ordr, y, R, anom(E)[0])
locf_at = h.loc_f(loc_rad, 'y2x', t, taper)
for i,j in enumerate(inds):
hE = h(E,t)
hx = mean(hE,0)
Y = hE - hx
mu = mean(E ,0)
A = E-mu
# Update j-th component of observed ensemble
Yj = Rm12[j,:] @ Y.T
dyj = Rm12[j,:] @ (y - hx)
#
skk = Yj@Yj # N1 * prior var
su = 1/( 1/skk + 1/N1 ) # N1 * KG
alpha = (N1/(N1+skk))**(0.5) # update contraction factor
#
dy2 = su*dyj/N1 # mean update
Y2 = alpha*Yj # anomaly update
if skk<1e-9: continue
# Update state (regress update from observation space)
# Localized
local, coeffs = locf_at(j)
if len(local) == 0: continue
Regression = (A[:,local]*coeffs).T @ Yj/np.sum(Yj**2)
mu[ local] += Regression*dy2
A[:,local] += np.outer(Y2 - Yj, Regression)
# Without localization:
#Regression = A.T @ Yj/np.sum(Yj**2)
#mu += Regression*dy2
#A += np.outer(Y2 - Yj, Regression)
E = mu + A
E = post_process(E,infl,rot)
stats.assess(k,kObs,E=E)
return assimilator | 5,357,602 |
def compose_test_module_skeleton(module_file):
"""
Writes a pytest file based on the given module.
Args:
module_file (str): path to python module. e.g. "example_module.py"
"""
module = str(inspect.getmodulename(module_file))
test_module_file = f"test_{module}.py"
exec(f"import {module}")
class_members = inspect.getmembers(sys.modules[module], inspect.isclass)
skeleton = (
"""
import pytest
"""
)
for class_member in class_members:
method_members = inspect.getmembers(
class_member[1], predicate=inspect.isfunction) # predicate=inspect.ismethod
with open(test_module_file, "w") as f:
for method in method_members:
method_name = method[0]
method_signature = inspect.signature(method[1])
args = [arg for arg in method_signature.parameters.keys() if arg != 'self']
if args:
params_dict_str = params_function_str = ""
for i in range(len(args)):
if i < len(args)-1:
params_dict_str_end_format = ",\n\t\t\t\t\t"
params_function_str_end_format = ", "
else:
params_dict_str_end_format = params_function_str_end_format = ""
params_dict_str += f"'{args[i]}': 3{params_dict_str_end_format}"
params_function_str += f"params['{args[i]}']{params_function_str_end_format}"
skeleton += compose_test_class_skeleton(module, class_member[0], method_name, params_dict_str, params_function_str)
f.write(skeleton) | 5,357,603 |
def CalculateNMaxNCharge(mol):
"""
#################################################################
Most negative charge on N atoms
-->QNmin
Usage:
result=CalculateNMaxNCharge(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return _CalculateElementMaxNCharge(mol,AtomicNum=7) | 5,357,604 |
def cnn_5l4(image, **kwargs):
"""
:param in: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(image, 'c1', n_filters=222, filter_size=4, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs))
layer_4 = activ(conv(layer_3, 'c4', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs))
layer_5 = activ(conv(layer_4, 'c5', n_filters=222, filter_size=2, stride=1, pad='SAME', init_scale=np.sqrt(2), **kwargs))
layer_lin = conv_to_fc(layer_5)
return layer_lin | 5,357,605 |
def purge_yaml(data):
"""Checks and converts data in basic types."""
basic_types = [int, float, text_type, list]
for key, value in data.items():
if isinstance(value, dict):
purge_yaml(value)
elif isinstance(value, date):
data[key] = value.isoformat()
elif value and not any([isinstance(value, type_) for type_ in basic_types]):
raise Exception(
"!!!Warning!!! '{}' not recognized. [{}]->[{}]".format(
type(value), key, value)
) | 5,357,606 |
def read_dns_data(dns_fn):
"""
Read data in from a DNS file
:param str dns_fn: The filename of the DNS
"""
fed = open(dns_fn, 'r')
begin_data = False
dns_data = {}
for line in fed.readlines():
if begin_data:
if "t = " in line:
tc = float(line[3:])
dns_data.update({ tc:{'N':np.empty((0, 3)), 'MP':np.empty((0, 3))} })
else:
data = [s.replace(',', '') for s in line.split()]
typ = data[0]
pos = np.array([float(data[i]) for i in range(2, 5)])
dns_data[tc][typ] = np.vstack([dns_data[tc][typ], pos])
if (line.strip() == "BEGIN DATA"):
begin_data = True
fed.close()
return dns_data | 5,357,607 |
def arithmetic_mean(iterable):
"""Zero-length-safe arithmetic mean."""
values = np.asarray(iterable)
if not values.size:
return 0
return values.mean() | 5,357,608 |
def play_process(url):
""" Create and return process to read audio from url and send to analog output"""
return FfmpegProcess(f'ffmpeg -i {url} -f alsa default') | 5,357,609 |
def lint_all_views():
"""Mimic a modification of all views, which will trigger a relint."""
for window in sublime.windows():
for view in window.views():
if view.buffer_id() in persist.view_linters:
hit(view) | 5,357,610 |
def iterable_to_wikitext(
items: Iterable[object], *, prefix: str = "\n* "
) -> str:
"""
Convert iterable to wikitext.
Pages are converted to links.
All other objects use their string representation.
:param items: Items to iterate
:param prefix: Prefix for each item when there is more than one item
"""
if not items:
return ""
if len(list(items)) == 1:
prefix = ""
text = ""
for item in items:
if isinstance(item, BasePage):
item = item.title(as_link=True, textlink=True)
text += f"{prefix}{item}"
return text | 5,357,611 |
def ComputeRelativeRisk(first_pmf, other_pmf):
"""Computes relative risks for two PMFs.
first_pmf: Pmf object
other_pmf: Pmf object
"""
print 'Risks:'
funcs = [ProbEarly, ProbOnTime, ProbLate]
risks = {}
for func in funcs:
for pmf in [first_pmf, other_pmf]:
prob = func(pmf)
risks[func.__name__, pmf.name] = prob
print func.__name__, pmf.name, prob
print
print 'Risk ratios (first babies / others):'
for func in funcs:
try:
ratio = (risks[func.__name__, 'first babies'] /
risks[func.__name__, 'others'])
print func.__name__, ratio
except ZeroDivisionError:
pass | 5,357,612 |
def plot_timeSeries(df, col_name, divide=None, xlabel="Days", line=True, title="Time series values", figsize=(9,9)):
"""
Plot a column of the given time series DataFrame.
Parameters
----------
df: pd.DataFrame
DataFrame indexed by days (i.e. the index is a pd.DatetimeIndex).
col_name: str
Indicates the specified column to plot.
divide: str
Indicates if and how to divide the plotted values.
It can either be None, "year", "month" or "season". (The meteorological seasons are considered, and not the
astronomical ones).
That division is simply made graphically using different colors.
xlabel: str
Label to put on the x axis.
line: bool
Indicates whether to connect the points with a line.
title: str
Title of the plot.
figsize: tuple
Dimensions of the plot.
Returns
----------
matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
"""
fig, ax = plt.subplots(figsize=figsize)
if not divide:
ax.plot(df.index, df[col_name], 'o:' if line else 'o')
else:
groups = group_days_by(df.index, criterion=divide)
color = None
for group in groups:
if divide=="season":
colors = {"Winter":"blue", "Spring":"green", "Summer":"yellow", "Fall":"red"}
color = colors[group[0]]
elif divide=="month":
colors = {"January":"b",
"February":"g",
"March":"r",
"April":"c",
"May":"m",
"June":"y",
"July":"k",
"August":"peru",
"September":"crimson",
"October":"orange",
"November":"darkgreen",
"December":"olivedrab"}
color = colors[group[0]]
ax.plot(group[1], df.loc[group[1],col_name], 'o:' if line else 'o', color=color , label=group[0])
ax.set_xlabel(xlabel)
ax.set_ylabel(col_name)
ax.set_title(title)
ax.grid()
if divide:
ax.legend()
return ax | 5,357,613 |
def save_bedtools(cluster_regions, clusters, assigned_dir):
"""
Given cluster regions file saves all bedtools sanely and returns result
:param cluster_regions:
:return:
"""
for region in cluster_regions:
output_file = "%s.%s.real.BED" % (clusters, region)
cluster_regions[region]['real'] = cluster_regions[region]['real'].sort().saveas(os.path.join(assigned_dir, output_file))
if "rand" not in cluster_regions[region]:
continue
for n_rand in cluster_regions[region]['rand']:
output_file = "%s.%s.rand.%s.BED" % (clusters, region, n_rand)
cluster_regions[region]['rand'][n_rand] = cluster_regions[region]['rand'][n_rand].sort().saveas(os.path.join(assigned_dir, output_file))
return cluster_regions | 5,357,614 |
def is_bv(a):
"""Return `True` if `a` is a Z3 bit-vector expression.
>>> b = BitVec('b', 32)
>>> is_bv(b)
True
>>> is_bv(b + 10)
True
>>> is_bv(Int('x'))
False
"""
return isinstance(a, BitVecRef) | 5,357,615 |
def bgsub_1D(raw_data, energy_axis, edge, **kwargs):
"""
Full background subtraction function for the 1D case-
Optional LBA, log fitting, LCPL, and exponential fitting.
For more information on non-linear fitting function, see information at https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
Inputs:
raw_data - 1D spectrum
energy_axis - corresponding energy axis
edge - edge parameters defined by KEM convention
**kawrgs:
fit - choose the type of background fit, default == 'pl' == Power law. Can also use 'exp'== Exponential, 'lin' == Linear, 'lcpl' == LCPL.
log - Boolean, if true, log transform data and fit using QR factorization, default == False.
nstd - Standard deviation spread of r error from non-linear power law fitting. Default == 100.
ftol - default to 0.0005, Relative error desired in the sum of squares.
gtol - default to 0.00005, Orthogonality desired between the function vector and the columns of the Jacobian.
xtol - default to None, Relative error desired in the approximate solution.
maxfev - default to 50000, Only change if you are consistenly catching runtime errors and loosening gtol/ftols are not making a good enough fit.
method - default is 'trf', see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares for description of methods
Note: may need stricter tolerances on ftol/gtol for noisier data. Anecdotally, a stricter gtol (as low as 1e-8) has a larger effect on the quality of the bgsub.
Outputs:
bg_1D - background spectrum
"""
fit_start_ch = eVtoCh(edge[0], energy_axis)
fit_end_ch = eVtoCh(edge[1], energy_axis)
zdim = len(raw_data)
ewin = energy_axis[fit_start_ch:fit_end_ch]
esub = energy_axis[fit_start_ch:]
bg_1D = np.zeros_like(raw_data)
fy = np.zeros((1,zdim))
fy[0,:] = raw_data
## Either fast fitting -> log fitting, Or slow fitting -> non-linear fitting
if 'log' in kwargs.keys():
log = kwargs['log']
else:
log = False
## Fitting parameters for non-linear curve fitting if non-log based fitting
if 'ftol' in kwargs.keys():
ftol = kwargs['ftol']
else:
ftol = 1e-8
if 'gtol' in kwargs.keys():
gtol = kwargs['gtol']
else:
gtol = 1e-8
if 'xtol' in kwargs.keys():
xtol = kwargs['xtol']
else:
xtol = 1e-8
if 'maxfev' in kwargs.keys():
maxfev = kwargs['maxfev']
else:
maxfev = 50000
if 'method' in kwargs.keys():
method = kwargs['method']
else:
method = 'trf'
## Determine if fitting is power law or exponenetial
if 'fit' in kwargs.keys():
fit = kwargs['fit']
if fit == 'exp':
fitfunc = exponential
bounds = ([0, 0], [np.inf, np.inf])
elif fit == 'pl':
fitfunc = powerlaw
elif fit == 'lcpl':
fitfunc = lcpowerlaw
elif fit == 'lin':
fitfunc = linear
else:
print('Did not except fitting function, please use either \'pl\' for powerlaw, \'exp\' for exponential, \'lin\' for linear or \'lcpl\' for LCPL.')
else:
fitfunc = powerlaw
## If fast fitting linear background, find fit using qr factorization
if fitfunc==linear:
Blin = fy[:,fit_start_ch:fit_end_ch]
Alin = np.zeros((len(ewin),2))
Alin[:,0] = np.ones(len(ewin))
Alin[:,1] = ewin
Xlin = qrnorm(Alin,Blin.T)
Elin = np.zeros((len(esub),2))
Elin[:,0] = np.ones(len(esub))
Elin[:,1] = esub
bgndLINline = np.dot(Xlin.T,Elin.T)
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndLINline
## If fast log fitting and powerlaw, find fit using qr factorization
elif log & (fitfunc==powerlaw):
Blog = fy[:,fit_start_ch:fit_end_ch]
Alog = np.zeros((len(ewin),2))
Alog[:,0] = np.ones(len(ewin))
Alog[:,1] = np.log(ewin)
Xlog = qrnorm(Alog,np.log(abs(Blog.T)))
Elog = np.zeros((len(esub),2))
Elog[:,0] = np.ones(len(esub))
Elog[:,1] = np.log(esub)
bgndPLline = np.exp(np.dot(Xlog.T,Elog.T))
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndPLline
## If fast log fitting and exponential, find fit using qr factorization
elif log & (fitfunc==exponential):
Bexp = fy[:,fit_start_ch:fit_end_ch]
Aexp = np.zeros((len(ewin),2))
Aexp[:,0] = np.ones(len(ewin))
Aexp[:,1] = ewin
Xexp = qrnorm(Aexp,np.log(abs(Bexp.T)))
Eexp = np.zeros((len(esub),2))
Eexp[:,0] = np.ones(len(esub))
Eexp[:,1] = esub
bgndEXPline = np.exp(np.dot(Xexp.T,Eexp.T))
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - bgndEXPline
## Power law non-linear curve fitting using scipy.optimize.curve_fit
elif ~log & (fitfunc==powerlaw):
popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method,
verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol)
c,r = popt_pl
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - powerlaw(energy_axis[fit_start_ch:],c,r)
## Exponential non-linear curve fitting using scipy.optimize.curve_fit
elif ~log & (fitfunc==exponential):
popt_exp,pcov_exp=curve_fit(exponential, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method,
verbose = 0,p0=[0,0], ftol=ftol, gtol=gtol, xtol=xtol)
a,b = popt_exp
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - exponential(energy_axis[fit_start_ch:],a,b)
## LCPL non-linear curve fitting using scipy.optimize.curve_fit
elif fitfunc==lcpowerlaw:
if 'nstd' in kwargs.keys():
nstd = kwargs['nstd']
else:
nstd = 100
popt_pl,pcov_pl=curve_fit(powerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method,
verbose = 0, ftol=ftol, gtol=gtol, xtol=xtol)
c,r = popt_pl
perr = np.sqrt(np.diag(pcov_pl))
rstd = perr[1]
popt_lcpl,pcov_lcpl=curve_fit(lcpowerlaw, ewin, raw_data[fit_start_ch:fit_end_ch],maxfev=maxfev,method=method,
verbose = 0,p0=[c/2,r-nstd*rstd,c/2,r+nstd*rstd], ftol=ftol, gtol=gtol, xtol=xtol)
c1,r1,c2,r2 = popt_lcpl
bg_1D[fit_start_ch:] = raw_data[fit_start_ch:] - lcpowerlaw(energy_axis[fit_start_ch:],c1,r1,c2,r2)
return bg_1D | 5,357,616 |
def about_garble():
"""
about_garble
Returns one of several strings for the about page
"""
garble = ["leverage agile frameworks to provide a robust synopsis for high level overviews.",
"iterate approaches to corporate strategy and foster collaborative thinking to further the overall value proposition.",
"organically grow the holistic world view of disruptive innovation via workplace change management and empowerment.",
"bring to the table win-win survival strategies to ensure proactive and progressive competitive domination.",
"ensure the end of the day advancement, a new normal that has evolved from epistemic management approaches and is on the runway towards a streamlined cloud solution.",
"provide user generated content in real-time will have multiple touchpoints for offshoring."]
return garble[random.randint(0, len(garble) - 1)] | 5,357,617 |
def parse_term_5_elems(expr_list, idx):
"""
Try to parse a terminal node from five elements of {expr_list}, starting
from {idx}.
Return the new expression list on success, None on error.
"""
# The only 3 items node is pk_h
if expr_list[idx : idx + 2] != [OP_DUP, OP_HASH160]:
return
if not isinstance(expr_list[idx + 2], bytes):
return
if len(expr_list[idx + 2]) != 20:
return
if expr_list[idx + 3 : idx + 5] != [OP_EQUAL, OP_VERIFY]:
return
node = Node().construct_pk_h(expr_list[idx + 2])
expr_list[idx : idx + 5] = [node]
return expr_list | 5,357,618 |
def display_clusters():
"""
Method to display the clusters
"""
offset = int(request.args.get('offset', '0'))
limit = int(request.args.get('limit', '50'))
clusters_id_sorted = sorted(clusters, key=lambda x : -len(clusters[x]))
batches = chunks(range(len(clusters_id_sorted)), size=limit)
return render_template('clusters.html',
offset=offset, limit=limit, batches=batches,
ordered_list=clusters_id_sorted[offset:offset+limit+1],
idx_to_path=idx_to_path,
clusters=clusters) | 5,357,619 |
def densify_sampled_item_predictions(tf_sample_predictions_serial, tf_n_sampled_items, tf_n_users):
"""
Turns the serial predictions of the sample items in to a dense matrix of shape [ n_users, n_sampled_items ]
:param tf_sample_predictions_serial:
:param tf_n_sampled_items:
:param tf_n_users:
:return:
"""
densified_shape = tf.cast(tf.stack([tf_n_users, tf_n_sampled_items]), tf.int32)
densified_predictions = tf.reshape(tf_sample_predictions_serial, shape=densified_shape)
return densified_predictions | 5,357,620 |
def get_market_book(symbols=None, **kwargs):
"""
Top-level function to obtain Book data for a symbol or list of symbols
Parameters
----------
symbols: str or list, default None
A symbol or list of symbols
kwargs:
Additional Request Parameters (see base class)
"""
return Book(symbols, **kwargs).fetch() | 5,357,621 |
def ndarange(*args, shape: tuple = None, **kwargs):
"""Generate arange arrays of arbitrary dimensions."""
arr = np.array([np.arange(*args[i], **kwargs) for i in range(len(args))])
return arr.reshape(shape) if shape is not None else arr.T | 5,357,622 |
def runningSum(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
5% faster
100% less memory
"""
sum = 0
runningSum = [0] * len(nums)
for i in range(len(nums)):
for j in range(i+1):
runningSum[i] += nums[j]
return runningSum | 5,357,623 |
def setup_vmedia_for_boot(task, boot_iso, parameters=None):
"""Sets up the node to boot from the given ISO image.
This method attaches the given boot_iso on the node and passes
the required parameters to it via virtual floppy image.
:param task: a TaskManager instance containing the node to act on.
:param boot_iso: a bootable ISO image to attach to. Should be either
of below:
* A Swift object - It should be of format 'swift:<object-name>'.
It is assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP(S) URL.
:param parameters: the parameters to pass in the virtual floppy image
in a dictionary. This is optional.
:raises: ImageCreationFailed, if it failed while creating the floppy image.
:raises: SwiftOperationError, if any operation with Swift fails.
:raises: IloOperationError, if attaching virtual media failed.
"""
LOG.info(_LI("Setting up node %s to boot from virtual media"),
task.node.uuid)
if parameters:
floppy_image_temp_url = _prepare_floppy_image(task, parameters)
attach_vmedia(task.node, 'FLOPPY', floppy_image_temp_url)
boot_iso_url = None
parsed_ref = urlparse.urlparse(boot_iso)
if parsed_ref.scheme == 'swift':
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
object_name = parsed_ref.path
timeout = CONF.ilo.swift_object_expiry_timeout
boot_iso_url = swift_api.get_temp_url(
container, object_name, timeout)
elif service_utils.is_glance_image(boot_iso):
boot_iso_url = (
images.get_temp_url_for_glance_image(task.context, boot_iso))
attach_vmedia(task.node, 'CDROM', boot_iso_url or boot_iso) | 5,357,624 |
def _select_features_1run(df, target, problem_type="regression", verbose=0):
"""
One feature selection run.
Inputs:
- df: nxp pandas DataFrame with n data points and p features; to avoid overfitting, only provide data belonging
to the n training data points. The variables have to be scaled to have 0 mean and unit variance.
- target: n dimensional array with targets corresponding to the data points in df
- problem_type: str, either "regression" or "classification" (default: "regression")
- verbose: verbosity level (int; default: 0)
Returns:
- good_cols: list of column names for df with which a prediction model can be trained
"""
if df.shape[0] <= 1:
raise ValueError("n_samples = {}".format(df.shape[0]))
# initial selection of too few but (hopefully) relevant features
if problem_type == "regression":
model = lm.LassoLarsCV(cv=5, eps=1e-8)
elif problem_type == "classification":
model = lm.LogisticRegressionCV(cv=5, penalty="l1", solver="saga", class_weight="balanced")
else:
print("[featsel] WARNING: Unknown problem_type %r - not performing feature selection!" % problem_type)
return []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO: remove if sklearn least_angle issue is fixed
try:
model.fit(df, target)
except ValueError:
# try once more with shuffled data, if it still doesn't work, give up
rand_idx = np.random.permutation(df.shape[0])
model.fit(df.iloc[rand_idx], target[rand_idx])
# model.fit(df, target)
if problem_type == "regression":
coefs = np.abs(model.coef_)
else:
# model.coefs_ is n_classes x n_features, but we need n_features
coefs = np.max(np.abs(model.coef_), axis=0)
# weight threshold: select at most 0.2*n_train initial features
thr = sorted(coefs, reverse=True)[min(df.shape[1]-1, df.shape[0]//5)]
initial_cols = list(df.columns[coefs > thr])
# noise filter
initial_cols = _noise_filtering(df[initial_cols].to_numpy(), target, initial_cols, problem_type)
good_cols = set(initial_cols)
if verbose > 0:
print("[featsel]\t %i initial features." % len(initial_cols))
# add noise features
X_w_noise = _add_noise_features(df[initial_cols].to_numpy())
# go through all remaining features in splits of n_feat <= 0.5*n_train
other_cols = list(np.random.permutation(list(set(df.columns).difference(initial_cols))))
if other_cols:
n_splits = int(np.ceil(len(other_cols)/max(10, 0.5*df.shape[0]-len(initial_cols))))
split_size = int(np.ceil(len(other_cols)/n_splits))
for i in range(n_splits):
current_cols = other_cols[i*split_size:min(len(other_cols), (i+1)*split_size)]
X = np.hstack([df[current_cols].to_numpy(), X_w_noise])
if problem_type == "regression":
model = lm.LassoLarsCV(cv=5, eps=1e-8)
else:
model = lm.LogisticRegressionCV(cv=5, penalty="l1", solver="saga", class_weight="balanced")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO: remove if sklearn least_angle issue is fixed
try:
model.fit(X, target)
except ValueError:
rand_idx = np.random.permutation(X.shape[0])
model.fit(X[rand_idx], target[rand_idx])
# model.fit(X, target)
current_cols.extend(initial_cols)
if problem_type == "regression":
coefs = np.abs(model.coef_)
else:
# model.coefs_ is n_classes x n_features, but we need n_features
coefs = np.max(np.abs(model.coef_), axis=0)
weights = dict(zip(current_cols, coefs[:len(current_cols)]))
# only include features that are more important than our known noise features
noise_w_thr = np.max(coefs[len(current_cols):])
good_cols.update([c for c in weights if abs(weights[c]) > noise_w_thr])
if verbose > 0:
print("[featsel]\t Split %2i/%i: %3i candidate features identified." % (i+1, n_splits, len(good_cols)), end="\r")
# noise filtering on the combination of features
good_cols = list(good_cols)
good_cols = _noise_filtering(df[good_cols].to_numpy(), target, good_cols, problem_type)
if verbose > 0:
print("\n[featsel]\t Selected %3i features after noise filtering." % len(good_cols))
return good_cols | 5,357,625 |
def main():
"""Console script for github_terminal."""
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-v",
"--verbose",
action="store_true",
help="Show verbose information")
group.add_argument("-q",
"--quiet",
action="store_true",
help="Display less information")
parser.add_argument(
'category',
help='Use the task you want to create like issue, pr, repo ',
choices=["issue", "pr", "repo"])
parser.add_argument(
'action',
help='Use the action to perform in the category.',
choices=["create", "list", "edit", "delete", "close", "status"])
parser.add_argument("-t",
"--title",
help="Title of issue or PR or name of repository")
parser.add_argument("-d",
"--description",
help="Description of issue or PR or repo.")
parser.add_argument("-c", "--config", help="Configuration file to use.")
parser.add_argument("-T",
"--token",
help="Personal access token for github.")
parser.add_argument("-u", "--username", help="Username of the user")
parser.add_argument("-a",
"--assignee",
help="Filter by assignee or set assignee")
parser.add_argument("-b",
"--base",
help="Filter by base branch the pull request are being merged to (ONLY FOR PR AND REPO)")
parser.add_argument("-A", "--author", help="Filter by or set author")
parser.add_argument("-l",
"--label",
help="Filter or set label separated by comma")
parser.add_argument("-L", "--limit", help="Maximum number to fetch")
parser.add_argument("-s", "--state", help="Filter by state")
parser.add_argument(
"-S",
"--since",
help="List issues that have been updated at or after the given date."
" (You can also use value like 2 weeks ago)")
parser.add_argument("-r",
"--repo",
help="Repository to perform action on.")
args = parser.parse_args()
category_specific_action = handle_category_action(args)
category_specific_action(args)
return 0 | 5,357,626 |
def async_add_defaults(hass: HomeAssistant, config_entry: ConfigEntry):
"""Populate default options."""
host: str = config_entry.data[CONF_HOST]
imported_options: dict = hass.data[DOMAIN].get(f"imported_options_{host}", {})
options = {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_CONSIDER_HOME: DEFAULT_CONSIDER_HOME,
CONF_INTERFACES: [DEFAULT_INTERFACE],
CONF_TRY_HOTSPOT: True,
CONF_INCLUDE_ARP: True,
CONF_INCLUDE_ASSOCIATED: True,
**imported_options,
**config_entry.options,
}
if options.keys() - config_entry.options.keys():
hass.config_entries.async_update_entry(config_entry, options=options) | 5,357,627 |
def edit_recovery(request, recovery_id):
"""This view is used to edit/update existing tag recoveries."""
clip_codes = sorted(list(CLIP_CODE_CHOICES), key=lambda x: x[0])
tag_types = sorted(list(TAG_TYPE_CHOICES), key=lambda x: x[0])
tag_origin = sorted(list(TAG_ORIGIN_CHOICES), key=lambda x: x[0])
tag_colours = sorted(list(TAG_COLOUR_CHOICES), key=lambda x: x[0])
tag_position = sorted(list(TAG_POSITION_CHOICES), key=lambda x: x[0])
recovery = get_object_or_404(Recovery, id=recovery_id)
report = recovery.report
form = RecoveryForm(
report_id=report.id, instance=recovery, data=request.POST or None
)
if request.method == "POST":
if form.is_valid():
recovery = form.save(report)
return redirect("tfat:recovery_detail", recovery_id=recovery.id)
return render(
request,
"tfat/recovery_form.html",
{
"form": form,
"action": "edit",
"clip_codes": clip_codes,
"tag_types": tag_types,
"tag_origin": tag_origin,
"tag_colours": tag_colours,
"tag_position": tag_position,
},
) | 5,357,628 |
def bio2output(text_dir, input_dir, output_dir, output_template, do_copy_text, file_suffix='ann'):
"""
we expect the input as a directory of all bio files end with .txt suffix
we expect the each bio file contain the offset info (start; end position of each words) and tag info;
original words are not required
convert the bio formatted files to brat formatted .ann file
the output directory will not contain the .txt file
"""
t_input, p_input, p_output = __prepare_path(text_dir, input_dir, output_dir)
for ifn in p_input.glob("*.txt"):
try:
ifn_stem = ifn.stem.split(".")[0]
doc_text_file = t_input / "{}.txt".format(ifn_stem)
ofn = p_output / "{}.{}".format(ifn_stem, file_suffix)
sents = load_bio_file_into_sents(ifn, do_lower=False)
doc_text = read_from_file(doc_text_file)
entities = tag2entity(sents)
output_entities = []
for idx, entity in enumerate(entities):
ann_text, offset_s, offset_e, sem_tag = entity
offset_s, offset_e = int(offset_s), int(offset_e)
# we need to use original text not the ann text here
# you can use ann_text for debugging
raw_entity_text = doc_text[offset_s:offset_e]
if "\n" in raw_entity_text:
idx = raw_entity_text.index("\n")
offset_s = "{} {};{}".format(offset_s, offset_s+idx, offset_s+idx+1)
raw_entity_text = raw_entity_text.replace("\n", " ")
if file_suffix == "ann":
formatted_output = output_template.format("T{}".format(idx+1), sem_tag, offset_s, offset_e, raw_entity_text)
elif file_suffix == "xml":
formatted_output = output_template.format(a=idx+1, b=raw_entity_text, c=offset_s, d=offset_e-offset_s, e=sem_tag)
else:
formatted_output = None
print('formatted output is None due to unknown formatter code')
output_entities.append(formatted_output)
if do_copy_text:
new_text_file = p_output / "{}.txt".format(ifn_stem)
shutil.copy2(doc_text_file.as_posix(), new_text_file.as_posix())
with open(ofn, "w") as f:
formatted_output = "\n".join(output_entities)
if file_suffix == "xml":
formatted_output = BIOC_HEADER.format(ifn.stem) + formatted_output + BIOC_END
f.write(formatted_output)
f.write("\n")
except Exception as ex:
traceback.print_exc() | 5,357,629 |
def e(string, *args):
"""Function which formats error messages."""
return string.format(*[pformat(arg) for arg in args]) | 5,357,630 |
def membership_ending_task(user):
"""
:return: Next task that will end the membership of the user
"""
task = (UserTask.q
.filter_by(user_id=user.id,
status=TaskStatus.OPEN,
type=TaskType.USER_MOVE_OUT)
# Casting jsonb -> bool directly is only supported since PG v11
.filter(UserTask.parameters_json['end_membership'].cast(String).cast(Boolean) == True)
.order_by(UserTask.due.asc())).first()
return task | 5,357,631 |
def hmsstr_to_rad(hmsstr):
"""Convert HH:MM:SS.SS sexigesimal string to radians.
"""
hmsstr = np.atleast_1d(hmsstr)
hours = np.zeros(hmsstr.size)
for i,s in enumerate(hmsstr):
# parse string using regular expressions
match = hms_re.match(s)
if match is None:
warnings.warn("Input is not a valid sexigesimal string: %s" % s)
hours[i] = np.nan
continue
d = match.groupdict(0) # default value is 0
# Check sign of hms string
if d['sign'] == '-':
sign = -1
else:
sign = 1
hour = float(d['hour']) + \
float(d['min'])/60.0 + \
float(d['sec'])/3600.0
hours[i] = sign*hour
return hour_to_rad(hours) | 5,357,632 |
def find_optimum_transformations(init_trans, s_pts, t_pts, template_spacing,
e_func, temp_tree, errfunc):
"""
Vary the initial transformation by a translation of up to three times the
grid spacing and compute the transformation with the smallest least square
error.
Parameters:
-----------
init_trans : 4-D transformation matrix
Initial guess of the transformation matrix from the subject brain to
the template brain.
s_pts :
Vertex coordinates in the subject brain.
t_pts :
Vertex coordinates in the template brain.
template_spacing : float
Grid spacing of the vertices in the template brain.
e_func : str
Error function to use. Either 'balltree' or 'euclidian'.
temp_tree :
BallTree(t_pts) if e_func is 'balltree'.
errfunc :
The error function for the computation of the least squares error.
Returns:
--------
poss_trans : list of 4-D transformation matrices
List of one transformation matrix for each variation of the intial
transformation with the smallest least squares error.
"""
# template spacing in meters
tsm = template_spacing / 1e3
# Try different initial translations in space to avoid local minima
# No label should require a translation by more than 3 times the grid spacing (tsm)
auto_match_iters = np.array([[0., 0., 0.],
[0., 0., tsm], [0., 0., tsm * 2], [0., 0., tsm * 3],
[tsm, 0., 0.], [tsm * 2, 0., 0.], [tsm * 3, 0., 0.],
[0., tsm, 0.], [0., tsm * 2, 0.], [0., tsm * 3, 0.],
[0., 0., -tsm], [0., 0., -tsm * 2], [0., 0., -tsm * 3],
[-tsm, 0., 0.], [-tsm * 2, 0., 0.], [-tsm * 3, 0., 0.],
[0., -tsm, 0.], [0., -tsm * 2, 0.], [0., -tsm * 3, 0.]])
# possible translation matrices
poss_trans = []
for p, ami in enumerate(auto_match_iters):
# vary the initial translation value by adding ami
tx, ty, tz = init_trans[0, 3] + ami[0], init_trans[1, 3] + ami[1], init_trans[2, 3] + ami[2]
sx, sy, sz = init_trans[0, 0], init_trans[1, 1], init_trans[2, 2]
rx, ry, rz = 0, 0, 0
# starting point for finding the transformation matrix trans which
# minimizes the error between np.dot(s_pts, trans) and t_pts
x0 = np.array([tx, ty, tz, rx, ry, rz])
def error(x):
tx_, ty_, tz_, rx_, ry_, rz_ = x
trans0 = np.zeros([4, 4])
trans0[:3, :3] = rotation3d(rx_, ry_, rz_) * [sx, sy, sz]
trans0[0, 3] = tx_
trans0[1, 3] = ty_
trans0[2, 3] = tz_
# rotate and scale
estim = np.dot(s_pts, trans0[:3, :3].T)
# translate
estim += trans0[:3, 3]
if e_func == 'balltree':
err = errfunc(estim[:, :3], temp_tree)
else:
# e_func == 'euclidean'
err = errfunc(estim[:, :3], t_pts)
return err
est, _, info, msg, _ = leastsq(error, x0, full_output=True)
est = np.concatenate((est, (init_trans[0, 0],
init_trans[1, 1],
init_trans[2, 2])
))
trans = _trans_from_est(est)
poss_trans.append(trans)
return poss_trans | 5,357,633 |
def export_excel(filename, data: list or dict, columns: list, **kwargs):
"""导出excel文件"""
df = pd.DataFrame(data=data, columns=columns)
file_path = os.path.join(os.path.join(base_dir, "export_files"), filename)
df.to_excel(file_path, **kwargs)
print(f"===== Finished in saving Excel file: {file_path} =====") | 5,357,634 |
def execute_transaction(query):
"""Execute Transaction"""
return Neo4jHelper.run_single_query(query) | 5,357,635 |
def generate_linear_constraints(points, verbose=False):
""" Given point coordinates, generate angle constraints. """
from scipy.linalg import null_space
from angle_set import create_theta, get_n_linear, perturbe_points
N, d = points.shape
num_samples = get_n_linear(N) * 2
if verbose:
print('N={}, generating {}'.format(N, num_samples))
M = int(N * (N - 1) * (N - 2) / 2)
thetas = np.empty((num_samples, M + 1))
for i in range(num_samples):
points_pert = perturbe_points(points, magnitude=0.0001)
theta, __ = create_theta(points_pert)
thetas[i, :-1] = theta
thetas[i, -1] = -1
CT = null_space(thetas)
A = CT[:-1, :].T
b = CT[-1, :]
return A, b | 5,357,636 |
def add_node_to_parent(node, parent_node):
"""
Add given object under the given parent preserving its local transformations
:param node: str
:param parent_node: str
"""
return maya.cmds.parent(node, parent_node, add=True, s=True) | 5,357,637 |
def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor | 5,357,638 |
def get_module_config_filename():
"""Returns the path of the module configuration file (e.g. 'app.yaml').
Returns:
The path of the module configuration file.
Raises:
KeyError: The MODULE_YAML_PATH environment variable is not set.
"""
module_yaml_path = os.environ['MODULE_YAML_PATH']
logging.info('Using module_yaml_path from env: %s', module_yaml_path)
return module_yaml_path | 5,357,639 |
def Binary(value):
"""construct an object capable of holding a binary (long) string value."""
return value | 5,357,640 |
def _get_domain_session(token, domain_name=None):
"""
Return v3 session for token
"""
domain_name = domain_name or 'default'
auth = v3.Token(auth_url=get_auth_url(),
domain_id=domain_name,
token=token)
return session.Session(auth=auth, user_agent=USER_AGENT,
verify=verify_https()) | 5,357,641 |
def open_invoice_as_email(inv: Invoice):
""" Opens E-Mail windows to send the invoice """
recipients = []
if inv.payer.email != "":
recipients.append(inv.payer.email)
accounting_company = Company(config.CONSTANTS["COMPANY_NAME_ACCOUNTING"])
if accounting_company.email != "":
recipients.append(accounting_company.email)
popup_email(recipients=recipients,
subject="Fatura " + inv.serial,
attachment=inv.file_path) | 5,357,642 |
def load_model(name: str, root: str = "") -> Tuple[Model, Any]:
"""Load the trained model (structure, weights) and vectorizer from files."""
json_file, h5_file, vec_file = (
os.path.join(root, "{}.{}".format(name, ext)) for ext in ("json", "h5", "pkl")
)
with open(json_file) as fp:
model = model_from_json(fp.read()) # type: Model
model.load_weights(h5_file)
with open(vec_file, "rb") as bfp: # type: BinaryIO
vectorizer = pickle.load(bfp)
logging.info("Model loaded from {}".format(root + "/"))
return model, vectorizer | 5,357,643 |
def fix_troposphere_references(template):
""""Tranverse the troposphere ``template`` looking missing references.
Fix them by adding a new parameter for those references."""
def _fix_references(value):
if isinstance(value, troposphere.Ref):
name = value.data['Ref']
if name not in (list(template.parameters.keys()) + list(template.resources.keys())) and not name.startswith('AWS::'):
template.add_parameter(
troposphere.Parameter(
name,
Type=getattr(value, '_type', 'String'),
)
)
elif isinstance(value, troposphere.Join):
for v in value.data['Fn::Join'][1]:
_fix_references(v)
elif isinstance(value, troposphere.BaseAWSObject):
for _, v in six.iteritems(value.properties):
_fix_references(v)
for _, resource in six.iteritems(template.resources):
for _, value in six.iteritems(resource.properties):
_fix_references(value)
return template | 5,357,644 |
def parse_config_to_dict(cfg_file, section):
""" Reads config file and returns a dict of parameters.
Args:
cfg_file: <String> path to the configuration ini-file
section: <String> section of the configuration file to read
Returns:
cfg: <dict> configuration parameters of 'section' as a dict
"""
cfg = configparser.ConfigParser()
cfg.read(cfg_file)
if cfg.has_section(section):
return dict(cfg.items(section))
else:
print("Section '%s' not found in file %s!" % (section, cfg_file))
return None | 5,357,645 |
def cnn_net(data,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
win_size=3):
"""
Conv net
"""
# embedding layer
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# convolution layer
conv_3 = fluid.nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=win_size,
act="tanh",
pool_type="max")
# full connect layer
fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2)
# softmax layer
prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax")
return prediction, fc_1 | 5,357,646 |
def get_commands(servo):
"""Get specific flash commands for the build target.
Each board needs specific commands including the voltage for Vref, to turn
on and turn off the SPI flash. The get_*_commands() functions provide a
board-specific set of commands for these tasks. The voltage for this board
needs to be set to 1.8 V.
Args:
servo (servo_lib.Servo): The servo connected to the target DUT.
Returns:
list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"],
["cmd2", "arg3", "arg4"]]
where cmd1 will be run before cmd2
flashrom_cmd=command to flash via flashrom
futility_cmd=command to flash via futility
"""
dut_control_on = []
dut_control_off = []
# TODO: Add the supported servo cases and their commands.
if servo:
programmer = ''
else:
raise Exception('%s not supported' % servo.version)
flashrom_cmd = ['flashrom', '-p', programmer, '-w']
futility_cmd = ['futility', 'update', '-p', programmer, '-i']
return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] | 5,357,647 |
def test_random_deviation_profile_count(game, _):
"""Test dev profile count"""
rest = game.random_restriction()
devs = restrict.deviation_profiles(game, rest)
assert devs.shape[0] == restrict.num_deviation_profiles(game, rest), \
"num_deviation_profiles didn't return correct number"
assert np.sum(devs > 0) == restrict.num_deviation_payoffs(game, rest), \
"num_deviation_profiles didn't return correct number"
assert np.all(np.sum(devs * ~rest, 1) == 1)
count = 0
for r_ind in range(game.num_roles):
r_devs = restrict.deviation_profiles(game, rest, r_ind)
assert np.all(np.sum(r_devs * ~rest, 1) == 1)
count += r_devs.shape[0]
assert count == restrict.num_deviation_profiles(game, rest) | 5,357,648 |
def start_linux(user, password, url, personal, branch, remote,
mvngoals, mvnargs, jdk):
"""
Start a custom linux build
"""
props = dict_as_properties({'project-default-jdk': "%{}%".format(jdk),
'maven-goals': mvngoals,
'maven-args': mvnargs})
data = request_xml(_NEO4JLINUX_ID, personal, branch, remote, props)
send_request(user, password, url, data) | 5,357,649 |
def FormatRow(Cn, Row, COLSP):
"""
"""
fRow = ""
for i, c in enumerate(Row):
sc = str(c)
lcn = len(Cn[i])
sc = sc[ 0 : min(len(sc), lcn+COLSP-2) ]
fRow += sc + " "*(COLSP+lcn-len(sc))
return fRow | 5,357,650 |
def handle_release(pin, evt):
"""
Clears the last tone/light when a button
is released.
"""
if pin > 4:
return False
pin -= 1
explorerhat.light[pin].off()
tone.power_off() | 5,357,651 |
def makeRoute(start : str, end : str) -> List[str]:
"""Find the shortest route between two systems.
:param str start: string name of the starting system. Must exist in bbData.builtInSystemObjs
:param str end: string name of the target system. Must exist in bbData.builtInSystemObjs
:return: list of string system names where the first element is start, the last element is end,
and all intermediary systems are adjacent
:rtype: list[str]
"""
return bbAStar(start, end, bbData.builtInSystemObjs) | 5,357,652 |
def list_composers():
""" GET all composers """
r = requests.get(url = COMPOSERS_ENDPOINT)
result_text = r.text
print("")
print("result: " + result_text) | 5,357,653 |
def main():
"""The main program.
"""
parser = \
argparse.ArgumentParser(description='Deep-learning based classifiers')
parser.add_argument('--evaluate', action='store_true', default=False,
help='evaluate the classifier on the given datasource')
parser.add_argument('--top', type=int, default=None,
help='evaluate top-n accuracy of classifier')
parser.add_argument('--scores', '--no-scores', dest='scores',
action=ToolboxArgparse.NegateAction,
nargs=0, default=None,
help='output classification scores '
'(in case of soft classifier)')
parser.add_argument('--classifier-info', action='store_true',
default=False,
help='output additional information on the network')
parser.add_argument('--densenet', action='store_true', default=False,
help='use densenet as classifier')
ToolboxArgparse.add_arguments(parser)
NetworkArgparse.prepare(parser)
parser.add_argument('image', metavar='IMAGE', nargs='*',
help='images to classify')
args = parser.parse_args()
ToolboxArgparse.process_arguments(parser, args)
if args.densenet:
# FIXME[hack]: densenet should be properly integrated into the toolbox
import dltb.thirdparty.tensorflow
from experiments.densenet import DenseNet
classifier = DenseNet()
else:
classifier = NetworkArgparse.network(parser, args)
if classifier is None:
print("No classifier was specified.")
return
if args.classifier_info:
print(f"{type(classifier).__name__} is an ImageClassifier:",
isinstance(classifier, ImageClassifier))
print(f"{type(classifier).__name__} is a SoftClassifier:",
isinstance(classifier, SoftClassifier))
print(f"{type(classifier).__name__} is a Network:",
isinstance(classifier, Network))
if args.evaluate:
#
# Evaluate classifier on a (labeled) dataset
#
evaluator = Evaluator(classifier)
terminal = Terminal()
imagenet = ImageNet()
imagenet.prepare()
evaluator.evaluate(imagenet, top=args.top, terminal=terminal)
else:
#
# Classify data given as command line arguments
#
if args.scores is None:
args.scores = isinstance(classifier, SoftClassifier)
elif args.scores and not isinstance(classifier, SoftClassifier):
args.scores = False
LOG.warning("Not reporting scores as %s is not a soft classifier",
classifier)
if args.top is not None and not isinstance(classifier, SoftClassifier):
args.top = None
LOG.warning("Not listing top classes as %s is not a "
"soft classifier", classifier)
for filename in args.image:
if args.top is None:
if args.scores:
label, score = \
classifier.classify(filename, confidence=True)
print(f"classify('{filename}', confidence=True): "
f"{label['text'], score}")
else:
label = classifier.classify(filename)
print(f"classify('{filename}'): {label['text']}")
else:
if args.scores:
labels, scores = \
classifier.classify(filename, top=args.top,
confidence=True)
print(f"classify('{filename}', top={args.top}, "
f"scores={args.scores}): ")
for i, (label, score) in enumerate(zip(labels, scores)):
print(f"({i+1}) {label['text']} ({score*100:.2f}%)")
else:
labels = classifier.classify(filename, top=args.top)
print(f"classify('{filename}', top=args.top): "
f"{[label['text'] for label in labels]}")
# else:
# scores = classifier.class_scores(filename)
# print(f"class_scores('{filename}': {scores.shape}") | 5,357,654 |
def update_node_categories(
target_graph: BaseGraph,
clique_graph: nx.MultiDiGraph,
clique: List,
category_mapping: Optional[Dict[str, str]],
strict: bool = True,
) -> List:
"""
For a given clique, get category for each node in clique and validate against Biolink Model,
mapping to Biolink Model category where needed.
For example, If a node has ``biolink:Gene`` as its category, then this method adds all of its ancestors.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.Graph
The clique graph
clique: List
A list of nodes from a clique
category_mapping: Optional[Dict[str, str]]
Mapping for non-Biolink Model categories to Biolink Model categories
strict: bool
Whether or not to merge nodes in a clique that have conflicting node categories
Returns
-------
List
The clique
"""
updated_clique_graph_properties = {}
updated_target_graph_properties = {}
for node in clique:
# For each node in a clique, get its category property
data = clique_graph.nodes()[node]
if 'category' in data:
categories = data['category']
else:
categories = get_category_from_equivalence(target_graph, clique_graph, node, data)
# differentiate between valid and invalid categories
(
valid_biolink_categories,
invalid_biolink_categories,
invalid_categories,
) = check_all_categories(categories)
log.debug(
f"valid biolink categories: {valid_biolink_categories} invalid biolink categories: {invalid_biolink_categories} invalid_categories: {invalid_categories}"
)
# extend categories to have the longest list of ancestors
extended_categories: List = []
for x in valid_biolink_categories:
ancestors = get_biolink_ancestors(x)
if len(ancestors) > len(extended_categories):
extended_categories.extend(ancestors)
log.debug(f"Extended categories: {extended_categories}")
clique_graph_update_dict: Dict = {'category': list(extended_categories)}
target_graph_update_dict: Dict = {}
if invalid_biolink_categories:
if strict:
clique_graph_update_dict['_excluded_from_clique'] = True
target_graph_update_dict['_excluded_from_clique'] = True
clique_graph_update_dict['invalid_biolink_category'] = invalid_biolink_categories
target_graph_update_dict['invalid_biolink_category'] = invalid_biolink_categories
if invalid_categories:
clique_graph_update_dict['_invalid_category'] = invalid_categories
target_graph_update_dict['_invalid_category'] = invalid_categories
updated_clique_graph_properties[node] = clique_graph_update_dict
updated_target_graph_properties[node] = target_graph_update_dict
nx.set_node_attributes(clique_graph, updated_clique_graph_properties)
target_graph.set_node_attributes(target_graph, updated_target_graph_properties)
return clique | 5,357,655 |
def _configSpecial_OrthoOpts_zcentre(
target, parser, shortArg, longArg, helpText):
"""Configures the ``zcentre`` option for the ``OrthoOpts`` class. """
parser.add_argument(
shortArg, longArg, metavar=('X', 'Y'),
type=float, nargs=2, help=helpText) | 5,357,656 |
def norm_error(series):
"""Normalize time series.
"""
# return series
new_series = deepcopy(series)
new_series[:,0] = series[:,0] - np.mean(series[:,0])
return 2*(new_series)/max(abs(new_series[:,0])) | 5,357,657 |
def f1d(x):
"""Non-linear function for simulation"""
return(1.7*(1/(1+np.exp(-(x-0.5)*20))+0.75*x)) | 5,357,658 |
def get_field_map(src, flds):
"""
Returns a field map for an arcpy data itme from a list or dictionary.
Useful for operations such as renaming columns merging feature classes.
Parameters:
-----------
src: str, arcpy data item or arcpy.mp layer or table
Source data item containing the desired fields.
flds: dict <str: str>
Mapping between old (keys) and new field names (values).
Returns:
--------
arcpy.FieldMappings
"""
mappings = arcpy.FieldMappings()
if isinstance(flds, list):
flds = {n: n for n in flds}
for old_name, new_name in flds.items():
fm = arcpy.FieldMap()
fm.addInputField(src, old_name)
out_f = fm.outputField
out_f.name = new_name
out_f.aliasName = new_name
fm.outputField = out_f
fm.outputField.name = new_name
mappings.addFieldMap(fm)
return mappings | 5,357,659 |
def b32qlc_decode(value):
"""
Decodes a value in qlc encoding to bytes using base32 algorithm
with a custom alphabet: '13456789abcdefghijkmnopqrstuwxyz'
:param value: the value to decode
:type: bytes
:return: decoded value
:rtype: bytes
>>> b32qlc_decode(b'fxop4ya=')
b'okay'
"""
return b32decode(value.translate(QLC_DECODE_TRANS)) | 5,357,660 |
def createPreProcessingLayers():
"""
Creates a model with the initial pre-processing layers.
"""
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((50, 20), (0, 0))))
return model | 5,357,661 |
def get_random_atoms(a=2.0, sc_size=2, numbers=[6, 8],
set_seed: int = None):
"""Create a random structure."""
if set_seed:
np.random.seed(set_seed)
cell = np.eye(3) * a
positions = np.array([[0, 0, 0], [a/2, a/2, a/2]])
unit_cell = Atoms(cell=cell, positions=positions, numbers=numbers,
pbc=True)
multiplier = np.identity(3) * sc_size
atoms = make_supercell(unit_cell, multiplier)
atoms.positions += (2 * np.random.rand(len(atoms), 3) - 1) * 0.1
flare_atoms = FLARE_Atoms.from_ase_atoms(atoms)
return flare_atoms | 5,357,662 |
def pocsense(kspace, sensitivities, i=None, r=None, l=None, g=None, o=None, m=None):
"""
Perform POCSENSE reconstruction.
:param kspace array:
:param sensitivities array:
:param i int: max. number of iterations
:param r float: regularization parameter
:param l int: toggle l1-wavelet or l2 regularization
:param g bool: ()
:param o float: ()
:param m float: ()
"""
usage_string = "pocsense [-i d] [-r f] [-l d] kspace sensitivities output"
cmd_str = f'{BART_PATH} '
cmd_str += 'pocsense '
flag_str = ''
opt_args = f''
multituples = []
if i is not None:
flag_str += f'-i {i} '
if r is not None:
flag_str += f'-r {r} '
if l is not None:
flag_str += f'-l {l} '
if g is not None:
flag_str += f'-g '
if o is not None:
flag_str += f'-o {o} '
if m is not None:
flag_str += f'-m {m} '
cmd_str += flag_str + opt_args + ' '
cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {NAME}kspace {NAME}sensitivities {NAME}output "
cfl.writecfl(NAME + 'kspace', kspace)
cfl.writecfl(NAME + 'sensitivities', sensitivities)
if DEBUG:
print(cmd_str)
os.system(cmd_str)
outputs = cfl.readcfl(NAME + 'output')
return outputs | 5,357,663 |
def vgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model | 5,357,664 |
def dump_yaml(content: dict, filepath: str):
"""Dump the content into filepath."""
with open(filepath, "w") as file:
file.write(yaml.dump(content)) | 5,357,665 |
def tau(x, cval):
"""Robust estimators of location and scale, with breakdown points of 50%.
Also referred to as: Tau measure of location by Yohai and Zamar
Source: Yohai and Zamar JASA, vol 83 (1988), pp 406-413 and
Maronna and Zamar Technometrics, vol 44 (2002), pp. 307-317"""
med = median(x)
mad = median(numpy.abs(x - med))
zscore = 0.675 # Z-score of the 75th percentile of the normal distribution
s = zscore * mad
wnom = 0
wden = 0
for i in range(len(x)):
y = (x[i] - med) / s
temp = (1 - (y / cval)**2)**2
if abs(temp) <= cval:
wnom += temp * x[i]
wden += temp
return wnom / wden | 5,357,666 |
def shn_gis_location_represent(id, showlink=True):
""" Represent a location given its id """
table = db.gis_location
try:
location = db(table.id == id).select(table.id,
table.name,
table.level,
table.parent,
table.lat,
table.lon,
cache=(cache.ram, 60),
limitby=(0, 1)).first()
return shn_gis_location_represent_row(location, showlink)
except:
try:
# "Invalid" => data consistency wrong
represent = location.id
except:
represent = NONE
return represent | 5,357,667 |
def de_pearson_dataframe(df, genes, pair_by='type', gtex=True, tcga=True):
"""
PearsonR scores of gene differential expression between tumor and normal types.
1. Calculate log2FC of genes for TCGA tumor samples with matching TCGA normal types
2. Compare log2fc to tumor type compared to all other normal types
3. Calculate PearsonR and save
:param pd.DataFrame df: Exp/TPM dataframe containing "type"/"tissue/tumor/label" metadata columns
:param list genes: Genes to use in differential expression calculation
:param str pair_by: How to pair tumors/normals. Either by "type" or "tissue"
:param bool gtex: If True, includes GTEx in normal set
:param bool tcga: If True, includes TCGA in normal set
:return: PearsonR dataframe
:rtype: pd.DataFrame
"""
# Subset by Tumor/Normal
tumor = df[df.label == 'tcga-tumor']
tcga_n = df[df.label == 'tcga-normal']
# Determine normal comparison group based on options
if gtex and tcga:
normal = df[df.tumor == 'no']
elif gtex:
normal = df[df.label == 'gtex']
else:
normal = tcga_n
# Identify tumor types with paired tcga-normal
tum_types = [x for x in sorted(tumor[pair_by].unique())
if x in sorted(df[df.label == 'tcga-normal'][pair_by].unique())]
norm_types = []
# For all paired tumor_types, calculate l2fc, then PearsonR of l2fc to all normal tumor types
pearson_l2fc = defaultdict(list)
for tum_type in tum_types:
# First calculate TCGA tumor/normal prior for comparison
t_med = tumor[tumor[pair_by] == tum_type][genes].median()
n_med = tcga_n[tcga_n[pair_by] == tum_type][genes].median()
prior_l2fc = log2fc(t_med, n_med)
# For every normal type, calculate pearsonR correlation
for (norm_type, label), _ in normal.groupby(pair_by).label.value_counts().iteritems():
if tum_type == norm_type:
l2fc = prior_l2fc
else:
n_med = normal[normal[pair_by] == norm_type][genes].median()
l2fc = log2fc(t_med, n_med)
# Calculate PearsonR of l2fc and comparison tissue/type
pearson_r = round(pearsonr(prior_l2fc, l2fc)[0], 2)
pearson_l2fc[tum_type[:20]].append(pearson_r)
norm_label = '{}_{}'.format(label, norm_type[:20])
if norm_label not in norm_types:
norm_types.append(norm_label)
return pd.DataFrame(pearson_l2fc, index=norm_types) | 5,357,668 |
def chunks(list_, n):
"""
Yield successive n-sized chunks from list_.
Based on https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""
for offset in range(0, len(list_), n):
yield list_[offset:offset + n] | 5,357,669 |
def load_YUV_as_dic_tensor(path_img):
"""
Construct a dic with 3 entries ('y','u', 'v'), each of them
is a tensor and is loaded from path_img + key + '.png'.
! Return a dictionnary of 3D tensor (i.e. without a dummy batch index)
"""
dic_res = {}
key = ['y', 'u', 'v']
for k in key:
img = Image.open(path_img + '_' + k + '.png')
# check if image mode is correct: it should be a one
# canal uint8 image (i.e. mode L)
if img.mode != 'L':
img = img.convert('L')
dic_res[k] = to_tensor(img)
return dic_res | 5,357,670 |
def tensor_log10(t1, out_format, dtype=None):
"""
Takes the log base 10 of each input in the tensor.
Note that this is applied to all elements in the tensor not just non-zeros.
Warnings
---------
The log10 of 0 is undefined and is performed on every element in the tensor regardless of sparsity.
Parameters
------------
t1: tensor, array_like
input tensor or array_like object
out_format: format, mode_format, optional
* If a :class:`format` is specified, the result tensor is stored in the format out_format.
* If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions
stored in the :class:`mode_format` passed in.
dtype: Datatype
The datatype of the output tensor.
Examples
----------
>>> import pytaco as pt
>>> pt.tensor_log10([10, 100], out_format=pt.compressed, dtype=pt.float32).to_array()
array([1., 2.], dtype=float32)
Returns
--------
log10: tensor
The element wise log10 of the input tensor.
"""
t1 = as_tensor(t1, copy=False)
cast_val = _cm.max_type(_cm.float32, t1.dtype)
f = lambda x: _cm.log10(_cm.cast(x, cast_val))
return _compute_unary_elt_eise_op(f, t1, out_format, dtype) | 5,357,671 |
def threadVideoGet(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Main thread shows video frames.
"""
video_getter = VideoGet(source).start()
cps = CountsPerSec().start()
while True:
if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
cv2.imshow("Video", frame)
cps.increment() | 5,357,672 |
def main():
"""Convert YAML specifications to database DDL."""
parser = cmd_parser("Generate SQL statements to update a PostgreSQL "
"database to match the schema specified in a "
"YAML-formatted file(s)", __version__)
parser.add_argument('-m', '--multiple-files', action='store_true',
help='input from multiple files (metadata directory)')
parser.add_argument('spec', nargs='?', type=FileType('r'),
default=sys.stdin, help='YAML specification')
parser.add_argument('-1', '--single-transaction', action='store_true',
dest='onetrans', help="wrap commands in BEGIN/COMMIT")
parser.add_argument('-u', '--update', action='store_true',
help="apply changes to database (implies -1)")
parser.add_argument('--revert', action='store_true',
help="generate SQL to revert changes")
parser.add_argument('--quote-reserved', action='store_true',
help="quote SQL reserved words")
parser.add_argument('-n', '--schema', metavar='SCHEMA', dest='schemas',
action='append', default=[],
help="process only named schema(s) (default all)")
cfg = parse_args(parser)
output = cfg['files']['output']
options = cfg['options']
db = Database(cfg)
if options.multiple_files:
inmap = db.map_from_dir()
else:
inmap = yaml.safe_load(options.spec)
stmts = db.diff_map(inmap)
if stmts:
fd = output or sys.stdout
if options.onetrans or options.update:
print("BEGIN;", file=fd)
for stmt in stmts:
if isinstance(stmt, tuple):
outstmt = "".join(stmt) + '\n'
else:
outstmt = "%s;\n" % stmt
if PY2:
outstmt = outstmt.encode('utf-8')
print(outstmt, file=fd)
if options.onetrans or options.update:
print("COMMIT;", file=fd)
if options.update:
try:
for stmt in stmts:
if isinstance(stmt, tuple):
# expected format: (\copy, table, from, path, csv)
db.dbconn.copy_from(stmt[3], stmt[1])
else:
db.dbconn.execute(stmt)
except:
db.dbconn.rollback()
raise
else:
db.dbconn.commit()
print("Changes applied", file=sys.stderr)
if output:
output.close() | 5,357,673 |
def test_score_scaling(sequences):
"""
Scaling the substitution scores and gap penalties by a constant
factor should not influence the obtained E-values.
Test this by aligning real sequences with a standard and scaled
scoring scheme and comparing the calculated E-values of these
alignments.
"""
SCALING_FACTOR = 1000
GAP_PENALTY = (-12, -1)
SEQ_LENGTH = 300
matrix = align.SubstitutionMatrix.std_protein_matrix()
np.random.seed(0)
std_estimator = align.EValueEstimator.from_samples(
seq.ProteinSequence.alphabet, matrix, GAP_PENALTY,
BACKGROUND
)
scores = [
align.align_optimal(
sequences[i], sequences[i+1], matrix, GAP_PENALTY, local=True,
max_number=1
)[0].score for i in range(9)
]
std_log_evalues = std_estimator.log_evalue(
scores, SEQ_LENGTH, SEQ_LENGTH
)
scaled_matrix = align.SubstitutionMatrix(
seq.ProteinSequence.alphabet,
seq.ProteinSequence.alphabet,
matrix.score_matrix() * SCALING_FACTOR
)
scaled_gap_penalty = (
GAP_PENALTY[0] * SCALING_FACTOR,
GAP_PENALTY[1] * SCALING_FACTOR
)
scaled_estimator = align.EValueEstimator.from_samples(
seq.ProteinSequence.alphabet, scaled_matrix, scaled_gap_penalty,
BACKGROUND
)
scores = [
align.align_optimal(
sequences[i], sequences[i+1], scaled_matrix, scaled_gap_penalty,
local=True, max_number=1
)[0].score for i in range(9)
]
scaled_log_evalues = scaled_estimator.log_evalue(
scores, SEQ_LENGTH, SEQ_LENGTH
)
# Due to relatively low sample size, expect rather large deviation
assert std_log_evalues.tolist() \
== pytest.approx(scaled_log_evalues.tolist(), rel=0.2) | 5,357,674 |
def networkedge_polygon_intersection(
edge_shapefile,
hazard_shapefile,
output_shapefile,
edge_id_column,
polygon_id_column,
edge_length_column,
crs={"init": "epsg:4326"},
):
"""Intersect network edges and hazards and write results to shapefiles
Parameters
----------
edge_shapefile
Shapefile of network LineStrings
hazard_shapefile
Shapefile of hazard Polygons
output_shapefile
String name of edge-hazard shapefile for storing results
Outputs
-------
output_shapefile
- edge_id - String name of intersecting edge ID
- length - Float length of intersection of edge LineString and hazard Polygon
- geometry - Shapely LineString geometry of intersection of edge LineString and hazard Polygon
"""
print(
"* Starting {} and {} intersections".format(
edge_shapefile, hazard_shapefile
)
)
line_gpd = gpd.read_file(edge_shapefile)
line_gpd.to_crs(crs)
poly_gpd = gpd.read_file(hazard_shapefile)
poly_gpd.to_crs(crs)
if polygon_id_column is None:
polygon_id_column = "ID"
poly_gpd["ID"] = poly_gpd.index.values.tolist()
if len(line_gpd.index) > 0 and len(poly_gpd.index) > 0:
line_gpd.columns = map(str.lower, line_gpd.columns)
poly_gpd.columns = map(str.lower, poly_gpd.columns)
line_bounding_box = line_gpd.total_bounds
line_bounding_box_coord = list(
itertools.product(
[line_bounding_box[0], line_bounding_box[2]],
[line_bounding_box[1], line_bounding_box[3]],
)
)
line_bounding_box_geom = Polygon(line_bounding_box_coord)
line_bounding_box_gpd = gpd.GeoDataFrame(
pd.DataFrame([[1], [line_bounding_box_geom]]).T, crs=crs
)
line_bounding_box_gpd.columns = ["ID", "geometry"]
poly_bounding_box = poly_gpd.total_bounds
poly_bounding_box_coord = list(
itertools.product(
[poly_bounding_box[0], poly_bounding_box[2]],
[poly_bounding_box[1], poly_bounding_box[3]],
)
)
poly_bounding_box_geom = Polygon(poly_bounding_box_coord)
poly_bounding_box_gpd = gpd.GeoDataFrame(
pd.DataFrame([[1], [poly_bounding_box_geom]]).T, crs=crs
)
poly_bounding_box_gpd.columns = ["ID", "geometry"]
poly_sindex = poly_bounding_box_gpd.sindex
selected_polys = poly_bounding_box_gpd.iloc[
list(
poly_sindex.intersection(
line_bounding_box_gpd.geometry.iloc[0].bounds
)
)
]
if len(selected_polys.index) > 0:
data = []
poly_sindex = poly_gpd.sindex
for lines in line_gpd.itertuples():
intersected_polys = poly_gpd.iloc[
list(poly_sindex.intersection(lines.geometry.bounds))
]
for poly in intersected_polys.itertuples():
if (
(lines.geometry.intersects(poly.geometry) is True)
and (poly.geometry.is_valid is True)
and (lines.geometry.is_valid is True)
):
if line_length(lines.geometry) > 1e-3:
geom = lines.geometry.intersection(poly.geometry)
if crs == {"init": "epsg:4326"}:
data.append(
{
edge_id_column: getattr(
lines, edge_id_column
),
polygon_id_column: getattr(
poly, polygon_id_column
),
edge_length_column: 1000.0
* line_length(geom),
"geometry": geom,
}
)
else:
data.append(
{
edge_id_column: getattr(
lines, edge_id_column
),
polygon_id_column: getattr(
poly, polygon_id_column
),
edge_length_column: 1000.0
* geom.length,
"geometry": geom,
}
)
else:
data.append(
{
edge_id_column: getattr(
lines, edge_id_column
),
polygon_id_column: getattr(
poly, polygon_id_column
),
edge_length_column: 0,
"geometry": lines.geometry,
}
)
if data:
intersections_data = gpd.GeoDataFrame(
data,
columns=[edge_id_column, edge_length_column, "geometry"],
crs=crs,
)
intersections_data.to_file(output_shapefile, driver="GPKG")
del intersections_data
del line_gpd, poly_gpd | 5,357,675 |
def get_file_phenomena_i(index):
"""
Return file phenomena depending on the value of index.
"""
if index <= 99:
return [phen[0]]
elif index >= 100 and index <= 199:
return [phen[1]]
elif index >= 200 and index <= 299:
return [phen[2]]
elif index >= 300 and index <= 399:
return [phen[3]]
elif index >= 400 and index <= 499:
return phen[0:2]
elif index >= 500 and index <= 599:
return phen[0:3]
elif index >= 600 and index <= 699:
tmp_l = phen[0:2]
tmp_l.append(phen[3])
return tmp_l | 5,357,676 |
def resource_cache_map(resource_id, flush=True):
"""cache resource info"""
if flush:
map_resources(resource_ids=[resource_id, ])
if resource_id not in CDNRESOURCE:
raise InvalidArgument('Resource not exit')
return CDNRESOURCE[resource_id] | 5,357,677 |
def _preprocess_html(table_html):
"""Parses HTML with bs4 and fixes some glitches."""
table_html = table_html.replace("<br />", "<br /> ")
table = bs4.BeautifulSoup(table_html, "html5lib")
table = table.find("table")
# Delete hidden style annotations.
for tag in table.find_all(attrs={"style": "display:none"}):
tag.decompose()
# Make sure "rowspan" is not set to an illegal value.
for tag in table.find_all("td"):
for attr in list(tag.attrs):
if attr == "rowspan":
tag.attrs[attr] = ""
return table | 5,357,678 |
def parse_campus_hours(data_json, eatery_model):
"""Parses a Cornell Dining json dictionary.
Returns 1) a list of tuples of CampusEateryHour objects for a corresponding CampusEatery object and their unparsed
menu 2) an array of the items an eatery serves.
Args:
data_json (dict): a valid dictionary from the Cornell Dining json
eatery_model (CampusEatery): the CampusEatery object to which to link the hours.
"""
eatery_hours_and_menus = []
dining_items = []
for eatery in data_json["data"]["eateries"]:
eatery_slug = eatery.get("slug", "")
if eatery_model.slug == eatery_slug:
dining_items = get_trillium_menu() if eatery_slug == TRILLIUM_SLUG else parse_dining_items(eatery)
hours_list = eatery["operatingHours"]
for hours in hours_list:
new_date = hours.get("date", "")
hours_events = hours["events"]
if hours_events:
for event in hours_events:
start, end = format_time(event.get("start", ""), event.get("end", ""), new_date)
eatery_hour = CampusEateryHour(
eatery_id=eatery_model.id,
date=new_date,
event_description=event.get("descr", ""),
event_summary=event.get("calSummary", ""),
end_time=end,
start_time=start,
)
eatery_hours_and_menus.append((eatery_hour, event.get("menu", [])))
else:
eatery_hour = CampusEateryHour(
eatery_id=eatery_model.id,
date=new_date,
event_description=None,
event_summary=None,
end_time=None,
start_time=None,
)
eatery_hours_and_menus.append((eatery_hour, []))
return eatery_hours_and_menus, dining_items | 5,357,679 |
def _change_relationships(edge: Dict) -> Tuple[bool, bool]:
"""Validate relationship."""
if 'increases' in edge[1]['relation'] or edge[1]['relation'] == 'positive_correlation':
return True, True
elif 'decreases' in edge[1]['relation'] or edge[1]['relation'] == 'negative_correlation':
return True, False
return False, False | 5,357,680 |
def extract_behaviour_sync(sync, chmap=None, display=False, tmax=np.inf):
"""
Extract wheel positions and times from sync fronts dictionary
:param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
:param chmap: dictionary containing channel index. Default to constant.
chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
:param display: bool or matplotlib axes: show the full session sync pulses display
defaults to False
:return: trials dictionary
"""
bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax)
if bpod.times.size == 0:
raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. '
'Check channel maps.')
frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax)
audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax)
# extract events from the fronts for each trace
t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(
bpod['times'], bpod['polarities'])
t_ready_tone_in, t_error_tone_in = _assign_events_audio(
audio['times'], audio['polarities'])
trials = Bunch({
'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in),
'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open),
'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2),
'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'),
'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']),
'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in)
})
# feedback times are valve open on good trials and error tone in on error trials
trials['feedback_times'] = np.copy(trials['valveOpen_times'])
ind_err = np.isnan(trials['valveOpen_times'])
trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err]
trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']]
if display:
width = 0.5
ymax = 5
if isinstance(display, bool):
plt.figure("Ephys FPGA Sync")
ax = plt.gca()
else:
ax = display
r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k')
plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k')
plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k')
plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k')
plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax,
ax=ax, label='goCue_times', color='b', linewidth=width)
plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax,
ax=ax, label='start_trial', color='m', linewidth=width)
plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax,
ax=ax, label='error tone', color='r', linewidth=width)
plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax,
ax=ax, label='valveOpen_times', color='g', linewidth=width)
plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax,
ax=ax, label='stimFreeze_times', color='y', linewidth=width)
plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax,
ax=ax, label='stim off', color='c', linewidth=width)
plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax,
ax=ax, label='stimOn_times', color='tab:orange', linewidth=width)
c = _get_sync_fronts(sync, chmap['left_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k')
c = _get_sync_fronts(sync, chmap['right_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k')
c = _get_sync_fronts(sync, chmap['body_camera'])
plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k')
ax.legend()
ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
ax.set_yticks([0, 1, 2, 3, 4, 5])
ax.set_ylim([0, 5])
return trials | 5,357,681 |
def download_and_load_model(model_files) -> RecursiveScriptModule:
"""
Downloads and torch.jit.load the model from google drive, the downloaded model is saved in /tmp
since in heroku we get /tmp to save all our stuff, if the app is not running in production
the model must be saved in load storage, hence the model is directly loaded
Args:
model_files: the dict containing the model information
Returns:
(RecursiveScriptModule): the loaded torch.jit model
"""
if "PRODUCTION" in os.environ:
logger.info(
f"=> Downloading Model {model_files['model_file']} from {model_files['model_url']}"
)
# heroku gives you `/tmp` to store files, which can be cached
model_path: Path = Path("/tmp") / f"{model_files['model_file']}.pt"
if not model_path.exists():
gdown.cached_download(url=model_files["model_url"], path=model_path)
logger.info(f"=> Loading {model_files['model_file']} from download_cache")
model: RecursiveScriptModule = torch.jit.load(str(model_path))
else:
logger.info(f"=> Loading {model_files['model_file']} from Local")
model = torch.jit.load(
str((Path("models") / (model_files["model_file"] + ".pt")))
)
return model | 5,357,682 |
def is_attr_defined(attrs,dic):
"""
Check if the sequence of attributes is defined in dictionary 'dic'.
Valid 'attrs' sequence syntax:
<attr> Return True if single attrbiute is defined.
<attr1>,<attr2>,... Return True if one or more attributes are defined.
<attr1>+<attr2>+... Return True if all the attributes are defined.
"""
if OR in attrs:
for a in attrs.split(OR):
if dic.get(a.strip()) is not None:
return True
else: return False
elif AND in attrs:
for a in attrs.split(AND):
if dic.get(a.strip()) is None:
return False
else: return True
else:
return dic.get(attrs.strip()) is not None | 5,357,683 |
def c_str_repr(str_):
"""Returns representation of string in C (without quotes)"""
def byte_to_repr(char_):
"""Converts byte to C code string representation"""
char_val = ord(char_)
if char_ in ['"', '\\', '\r', '\n']:
return '\\' + chr(char_val)
elif (ord(' ') <= char_val <= ord('^') or char_val == ord('_') or
ord('a') <= char_val <= ord('~')):
return chr(char_val)
else:
return '\\x%02x' % char_val
return '"%s"' % ''.join((byte_to_repr(x) for x in str_)) | 5,357,684 |
def _check_signature(signature, template):
"""
Check that the given `Signature` is valid.
"""
pick = _LockPick()
template.format_map(pick)
path_vars = {name for name, _ in _get_parameters(Path, signature)}
path_vars_diff = pick.keys - path_vars
if path_vars_diff:
raise FurnishError(
"missing Path parameters: {}".format(path_vars_diff))
for type_ in [Body, Json]:
if len(list(_get_parameters(type_, signature))) > 1:
raise FurnishError(
"multiple parameters annotated as {}".format(type_.__name__)) | 5,357,685 |
def hour_paths_for_range(hours_path, start, end):
"""Generate a list of hour paths to check when looking for segments between start and end."""
# truncate start and end to the hour
def truncate(dt):
return dt.replace(microsecond=0, second=0, minute=0)
current = truncate(start)
end = truncate(end)
# Begin in the hour prior to start, as there may be a segment that starts in that hour
# but contains the start time, eg. if the start time is 01:00:01 and there's a segment
# at 00:59:59 which goes for 3 seconds.
# Checking the entire hour when in most cases it won't be needed is wasteful, but it's also
# pretty quick and the complexity of only checking this case when needed just isn't worth it.
current -= datetime.timedelta(hours=1)
while current <= end:
yield os.path.join(hours_path, current.strftime("%Y-%m-%dT%H"))
current += datetime.timedelta(hours=1) | 5,357,686 |
def getActiveTeamAndID():
"""Returns the Team ID and CyTeam for the active player."""
return getActiveTeamID(), getActiveTeam() | 5,357,687 |
def is_nitf(
file_name: Union[str, BinaryIO],
return_version=False) -> Union[bool, Tuple[bool, Optional[str]]]:
"""
Test whether the given input is a NITF 2.0 or 2.1 file.
Parameters
----------
file_name : str|BinaryIO
return_version : bool
Returns
-------
is_nitf_file: bool
Is the file a NITF file, based solely on checking initial bytes.
nitf_version: None|str
Only returned is `return_version=True`. Will be `None` in the event that
`is_nitf_file=False`.
"""
header = _fetch_initial_bytes(file_name, 9)
if header is None:
if return_version:
return False, None
else:
return False
ihead = header[:4]
vers = header[4:]
if ihead == b'NITF':
try:
vers = vers.decode('utf-8')
return (True, vers) if return_version else True
except ValueError:
pass
return (False, None) if return_version else False | 5,357,688 |
def main():
"""A simple main for testing via command line."""
parser = argparse.ArgumentParser(
description='A manual test for ros-pull-request-builder access'
'to a GitHub repo.')
parser.add_argument('user', type=str)
parser.add_argument('repo', type=str)
parser.add_argument('--callback-url', type=str,
default='http://build.ros.org/ghprbhook/')
parser.add_argument('--hook-user', type=str,
default='ros-pull-request-builder')
parser.add_argument('--password-env', type=str,
default='ROSGHPRB_TOKEN')
args = parser.parse_args()
password = os.getenv(args.password_env)
if not password:
parser.error(
'OAUTH Token with hook and organization read access'
'required in ROSGHPRB_TOKEN environment variable')
errors = []
result = check_hooks_on_repo(
args.user,
args.repo,
errors,
args.hook_user,
args.callback_url,
password)
if errors:
print('Errors detected:', file=sys.stderr)
for e in errors:
print(e, file=sys.stderr)
if result:
return 0
return 1 | 5,357,689 |
def get_xlsx_filename() -> str:
"""
Get the name of the excel file. Example filename:
kesasetelihakemukset_2021-01-01_23-59-59.xlsx
"""
local_datetime_now_as_str = timezone.localtime(timezone.now()).strftime(
"%Y-%m-%d_%H-%M-%S"
)
filename = f"kesasetelihakemukset_{local_datetime_now_as_str}.xlsx"
return filename | 5,357,690 |
def retrieved_secret(secret_name):
"""retrieved_secret"""
log_level = environ.get("APP_LOG_LEVEL", logging.INFO)
logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level)
if (
"tenant_id" in environ.keys()
and "client_id" in environ.keys()
and "client_secret" in environ.keys()
):
tenant_id = environ["tenant_id"]
client_id = environ["client_id"]
client_secret = environ["client_secret"]
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
else:
credential = DefaultAzureCredential()
kv_uri = environ["KEY_VAULT_NAME"]
client = SecretClient(vault_url=kv_uri, credential=credential)
secret = client.get_secret(secret_name)
if hasattr(secret, "name") and hasattr(secret, "value"):
logging.info("\t'SecretName:'\t'%s'", secret.name)
logging.info("\t'SecretValue:'\t'%s'", secret.value) | 5,357,691 |
def get_content_directory() -> Path:
"""
Get the path of the markdown `content` directory.
"""
return get_base_directory() / "content" | 5,357,692 |
def mag_inc(x, y, z):
"""
Given *x* (north intensity), *y* (east intensity), and *z*
(vertical intensity) all in [nT], return the magnetic inclincation
angle [deg].
"""
h = math.sqrt(x**2 + y**2)
return math.degrees(math.atan2(z, h)) | 5,357,693 |
def bootstrap():
""" initialize remote host environment (virtualenv, deploy, update) """
# Require a valid env.root value
require('root', provided_by=('pro'))
# Create env.root directory
run('mkdir -p %(root)s' % env)
create_virtualenv()
deploy()
update_requirements() | 5,357,694 |
def rgb_to_rgba(image, alpha_val):
"""
Convert an image from RGB to RGBA.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}")
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W).Got {image.shape}")
if not isinstance(alpha_val, (float, torch.Tensor)):
raise TypeError(f"alpha_val type is not a float or torch.Tensor. Got {type(alpha_val)}")
# add one channel
r, g, b = torch.chunk(image, image.shape[-3], dim=-3)
if isinstance(alpha_val, float):
a = torch.full_like(r, fill_value=float(alpha_val))
return torch.cat([r, g, b, a], dim=-3) | 5,357,695 |
def pdpc_decision(csv, download, corpus, action, root, extras, extra_corpus, verbose):
"""
Scripts to scrape all decisions of the Personal Data Protection Commission of Singapore.
Accepts the following actions.
"all" Does all the actions (scraping the website, saving a csv, downloading all files and creating a corpus).
"corpus" After downloading all the decisions from the website, converts them into text files.
"csv" Save the items gathered by the scraper as a csv file.
"files" Downloads all the decisions from the PDPC website into a folder.
"""
start_time = time.time()
if verbose:
logging.basicConfig(level='INFO')
options = Options(csv_path=csv, download_folder=download, corpus_folder=corpus, action=action, root=root,
extras=extras, extra_corpus=extra_corpus)
logger.info(f'Options: {options}')
if options['root']:
os.chdir(root)
scrape_results = Scraper.scrape()
if (action == 'all') or (action == 'files'):
download_files(options, scrape_results)
if (action == 'all') or (action == 'corpus'):
create_corpus(options, scrape_results)
if extras and ((action == 'all') or (action == 'csv')):
scraper_extras(scrape_results, options)
if (action == 'all') or (action == 'csv'):
save_scrape_results_to_csv(options, scrape_results)
diff = time.time() - start_time
logger.info('Finished. This took {}s.'.format(diff)) | 5,357,696 |
def has_labels(dataset_dir, filename=LABELS_FILENAME):
"""Specifies whether or not the dataset directory
contains a label map file.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
`True` if the labels file exists and `False` otherwise.
"""
return tf.io.gfile.exists(os.path.join(dataset_dir, filename)) | 5,357,697 |
def get(identifier):
"""get the activation function"""
if identifier is None:
return linear
if callable(identifier):
return identifier
if isinstance(identifier, str):
activations = {
"relu": relu,
"sigmoid": sigmoid,
"tanh": tanh,
"linear": linear,
}
return activations[identifier] | 5,357,698 |
def df_add_column_codelines(self, key):
"""Generate code lines to add new column to DF"""
func_lines = df_set_column_index_codelines(self) # provide res_index = ...
results = []
for i, col in enumerate(self.columns):
col_loc = self.column_loc[col]
type_id, col_id = col_loc.type_id, col_loc.col_id
res_data = f'res_data_{i}'
func_lines += [
f' data_{i} = self._data[{type_id}][{col_id}]',
f' {res_data} = pandas.Series(data_{i}, index=res_index, name="{col}")',
]
results.append((col, res_data))
res_data = 'new_res_data'
literal_key = key.literal_value
func_lines += [f' {res_data} = pandas.Series(value, index=res_index, name="{literal_key}")']
results.append((literal_key, res_data))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [f' return pandas.DataFrame({{{data}}}, index=res_index)']
return func_lines | 5,357,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.