content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import scipy
def memory_kernel_logspace(dt, coeffs, dim_x, noDirac=False):
"""
Return the value of the estimated memory kernel
Parameters
----------
dt: Timestep
coeffs : Coefficients for diffusion and friction
dim_x: Dimension of visible variables
noDirac: Remove the dirac at time zero
Returns
-------
timespan : array-like, shape (n_samples, )
Array of time to evaluate memory kernel
kernel_evaluated : array-like, shape (n_samples, dim_x,dim_x)
Array of values of the kernel at time provided
"""
Avv = coeffs["A"][:dim_x, :dim_x]
Ahv = coeffs["A"][dim_x:, :dim_x]
Avh = coeffs["A"][:dim_x, dim_x:]
Ahh = coeffs["A"][dim_x:, dim_x:]
eigs = np.linalg.eigvals(Ahh)
Kernel = np.zeros((150, dim_x, dim_x))
final_time = 25 / np.min(np.abs(np.real(eigs)))
times = np.logspace(np.log10(dt), np.log10(final_time), num=150)
for n, t in enumerate(times):
Kernel[n, :, :] = -np.matmul(Avh, np.matmul(scipy.linalg.expm(-1 * t * Ahh), Ahv))
if not noDirac:
Kernel[0, :, :] = Kernel[0, :, :] + Avv
return times, Kernel | 21e6aed08bebd91f359efa216ab1331cf9ace310 | 3,654,276 |
def _is_constant(x, atol=1e-7, positive=None):
"""
True if x is a constant array, within atol
"""
x = np.asarray(x)
return (np.max(np.abs(x - x[0])) < atol and
(np.all((x > 0) == positive) if positive is not None else True)) | 0b272dd843adbd4eaa4ebbe31efe6420de05a6dd | 3,654,277 |
def estimate_M(X, estimator, B, ratio):
"""Estimating M with Block or incomplete U-statistics estimator
:param B: Block size
:param ratio: size of incomplete U-statistics estimator
"""
p = X.shape[1]
x_bw = util.meddistance(X, subsample = 1000)**2
kx = kernel.KGauss(x_bw)
if estimator == 'inc':
hsic_M = hsic.HSIC_Inc(kx, kx, ratio = ratio)
else: # 'block'
hsic_M = hsic.HSIC_Block(kx, kx, bsize = B)
M_true = np.zeros((p, p))
for i in range(p):
for j in range(i+1):
M_true[i, j] = np.mean(hsic_M.estimates(X[:, i, np.newaxis], X[:, j, np.newaxis]))
M_true[j, i] = M_true[i, j]
M = nearestPD(M_true) # positive definite approximation
return M_true, M | 656b83eac9e522b1feb20a4b5b56649b9553ecb0 | 3,654,278 |
def query_yes_no(question, default="yes"):
"""Queries user for confimration"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
console.print(question + escape(prompt))
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
console.print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 58e9bba831155ca9f4d4879a5e960949757b0562 | 3,654,279 |
import base64
import binascii
def decode(password, encoded, notice):
"""
:type password: str
:type encoded: str
"""
dec = []
try:
encoded_bytes = base64.urlsafe_b64decode(encoded.encode()).decode()
except binascii.Error:
notice("Invalid input '{}'".format(encoded))
return
for i in range(len(encoded_bytes)):
key_c = password[i % len(password)]
dec_c = chr((256 + ord(encoded_bytes[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec) | 5cf82bfbbe7eee458914113f648dadbe7b15dee8 | 3,654,280 |
from functools import reduce
def replace(data, replacements):
""" Allows to performs several string substitutions.
This function performs several string substitutions on the initial ``data`` string using a list
of 2-tuples (old, new) defining substitutions and returns the resulting string.
"""
return reduce(lambda a, kv: a.replace(*kv), replacements, data) | 37b2ad5b9b6d50d81a8c1bcded9890de3c840722 | 3,654,282 |
def fake_kafka() -> FakeKafka:
"""Fixture for fake kafka."""
return FakeKafka() | 35fdcf2030dda1cab2be1820549f67dc246cf88f | 3,654,283 |
from typing import Union
import operator
def rr20(prec: pd.Series) -> Union[float, int]:
"""Function for count of heavy precipitation (days where rr greater equal 20mm)
Args:
prec (list): value array of precipitation
Returns:
np.nan or number: the count of icing days
"""
assert isinstance(prec, pd.Series)
op = operator.ge
num = 20.0
return number_of(prec, num, op) | 4686eccac5be53b4a888d8bf0649c72e65d81bdb | 3,654,284 |
def get_neg_label(cls_label: np.ndarray, num_neg: int) -> np.ndarray:
"""Generate random negative samples.
:param cls_label: Class labels including only positive samples.
:param num_neg: Number of negative samples.
:return: Label with original positive samples (marked by 1), negative
samples (marked by -1), and ignored samples (marked by 0)
"""
seq_len, num_scales = cls_label.shape
cls_label = cls_label.copy().reshape(-1)
cls_label[cls_label < 0] = 0 # reset negative samples
neg_idx, = np.where(cls_label == 0)
np.random.shuffle(neg_idx)
neg_idx = neg_idx[:num_neg]
cls_label[neg_idx] = -1
cls_label = np.reshape(cls_label, (seq_len, num_scales))
return cls_label | 3cd0ad5c1973eff969330f014c405f39092b733b | 3,654,285 |
def G12(x, a):
"""
Eqs 20, 24, 25 of Khangulyan et al (2014)
"""
alpha, a, beta, b = a
pi26 = np.pi ** 2 / 6.0
G = (pi26 + x) * np.exp(-x)
tmp = 1 + b * x ** beta
g = 1.0 / (a * x ** alpha / tmp + 1.0)
return G * g | 6b84d5f5978a9faf8c9d77a2b9351f73f5717f48 | 3,654,286 |
def binomial(n, k):
""" binomial coefficient """
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
num = 1
den = 1
for i in range(1, min(k, n - k) + 1): # take advantage of symmetry
num *= (n + 1 - i)
den *= i
c = num // den
return c | 78910202202f749f8e154b074a55f6a5ddf91f64 | 3,654,287 |
def pagination(page):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator = page.paginator
page_num = page.number
#pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if False: #not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(1, paginator.num_pages + 1)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(1, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(1, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages + 1))
else:
page_range.extend(range(page_num + 1, paginator.num_pages + 1))
#need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'paginator': paginator,
'page_obj': page,
'page': page.number,
#'pagination_required': pagination_required,
#'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
#'ALL_VAR': ALL_VAR,
'1': 1,
'is_paginated': True,
} | 60d90adfbeceab9d159652b641e60da8fa995954 | 3,654,288 |
def bubbleSort(arr):
"""
>>> bubbleSort(arr)
[11, 12, 23, 25, 34, 54, 90]
"""
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr | 28bc9d505ef44a4b403c0f91a971cccf74644c5a | 3,654,289 |
def generate_kronik_feats(fn):
"""Generates features from a Kronik output file"""
header = get_tsv_header(fn)
return generate_split_tsv_lines(fn, header) | 8b98f346ef5d833e0bfb876a7985c8bb3ced905c | 3,654,290 |
def delete_product(uuid: str, db: Session = Depends(auth)):
"""Delete a registered product."""
if product := repo.get_product_by_uuid(db=db, uuid=uuid):
if product.taken:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Cannot delete products already taken.",
)
repo.delete_product(db=db, product=product)
return {
"deleted": True,
"product": product,
}
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No product found for the code specified.",
) | 97aa45eec0ae98a58984f8ca97d584b5a715cba6 | 3,654,292 |
import functools
def CreateMnemonicsC(mnemonicsIds):
""" Create the opcodes arrays for C header files. """
opsEnum = "typedef enum {\n\tI_UNDEFINED = 0, "
pos = 0
l2 = sorted(mnemonicsIds.keys())
for i in l2:
s = "I_%s = %d" % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i])
if i != l2[-1]:
s += ","
pos += len(s)
if pos >= 70:
s += "\n\t"
pos = 0
elif i != l2[-1]:
s += " "
opsEnum += s
opsEnum += "\n} _InstructionType;"
# Mnemonics are sorted by insertion order. (Psuedo mnemonics depend on this!)
# NOTE: EXTRA BACKSLASHES FORE RE.SUB !!!
s = "const unsigned char _MNEMONICS[] =\n\"\\\\x09\" \"UNDEFINED\\\\0\" "
l = list(zip(mnemonicsIds.keys(), mnemonicsIds.values()))
l = sorted(l, key=functools.cmp_to_key(lambda x, y: x[1] - y[1]))
for i in l:
s += "\"\\\\x%02x\" \"%s\\\\0\" " % (len(i[0]), i[0])
if len(s) - s.rfind("\n") >= 76:
s += "\\\\\n"
s = s[:-1] + ";" # Ignore last space.
# Return enum & mnemonics.
return (opsEnum, s) | a20a01fbefc1175c24144753264edc938258cdca | 3,654,293 |
import math
def create_windows(c_main, origin, J=None, I=None, depth=None, width=None):
"""
Create windows based on contour and windowing parameters. The first
window (at arc length = 0) is placed at the spline origin.
Note: to define the windows, this function uses pseudo-radial and
pseudo-angular coordinates. The pseudo-radial coordinate is based
on the distance transform of the rasterized version of the continuous
spline that defines the contour of the cell. The pseudo-angular coordinate
for layer j is based on the distance transform of the discrete contour of
layer j. So there is a bit of an inconsistency between continuous and
discrete contours.
Parameters
----------
c_main: 2d array
A rasterized version of the contour, as obtained
by spline_to_param_image.
origin: tuple
(y, x) coordinates of the origin of the curve.
J: int
Number of window layers.
I: list of int
Vector of dimension J specifying the number of windows per layer.
depth: int
Desired depth of the windows.
width: int
Desired width of the windows.
Returns
-------
w: 3d list
w[i][j][0] and w[i][j][1] are 1d arrays representing
lists of x,y indices of pixels belonging to window in i'th layer
in j'th window
J: int
number of layers (calculated if not provided as input)
I: list of int
number of windows per layer (calculated if not provided as input)
"""
origin = [origin[1], origin[0]]
# Compute the distance transform of the main contour
D_main = distance_transform_edt(-1 == c_main)
# Compute the mask corresponding to the main contour
mask_main = binary_fill_holes(
-1 < c_main
) # Maybe not necessary? Can't we just use the segmented mask here?
# Divide the radial coordinate into J layers with specified depth
Dmax = np.amax(D_main * mask_main)
if J is None:
J = int(math.ceil(Dmax / depth))
b = np.linspace(
0, Dmax, J + 1
) # Boundaries of the layers in terms of distances to the main contour
if I is None:
compute_num_win = True
I = []
else:
compute_num_win = False
w = []
for j in range(J):
w.append([])
# The mask containing the interior of the cell starting from
# the j-th layer
mask = (b[j] <= D_main) * mask_main
# Extract the contour of the mask
# We must fix certain frames where multiple contours are returned.
# So we choose the longest contour. Some pixels may be lost in the process,
# i.e., the windows may not cover the entire cell.
clist = find_contours(mask, 0, fully_connected="high")
cvec = np.asarray(
clist[np.argmax([cel.shape[0] for cel in clist])], dtype=np.int
)
# An alternative fix using OpenCV's findContours routine---doesn't solve the problem
# contours, hierarchy = cv.findContours(np.asarray(mask, dtype=np.uint8), cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
# cvec = np.asarray(contours[np.argmax([cel.shape[0] for cel in contours])], dtype=np.int)
# cvec = cvec.reshape((cvec.shape[0], cvec.shape[2]))
# cvec = cvec[::-1, [1,0]] # Sort boundary pixels in clockwise direction and switch (x, y) coordinates
# Lvec = compute_discrete_arc_length(cvec)
# c = create_arc_length_image(mask.shape, cvec, Lvec)
# plt.figure()
# plt.imshow(c, 'gray', vmin=-Lvec[-1], vmax=Lvec[-1])
# plt.plot(origin[1], origin[0], 'or')
# # plt.show()
# Adjust the origin of the contour:
# on the discrete contour cvec, find the closest point to the origin,
# then apply a circular shift to cvec to make this point the first one.
n0 = np.argmin(np.linalg.norm(cvec - origin, axis=1))
cvec = np.roll(cvec, -n0, axis=0)
# Compute the discrete arc length along the contour
Lvec = compute_discrete_arc_length(cvec)
# Create an image of the contour where the intensity is the arc length
arc = create_arc_length_image(mask.shape, cvec, Lvec)
# Compute the feature transform of this image:
# for each pixel position, we get the coordinates of the closest pixel on the contour
F = distance_transform_edt(
-1 == arc, return_distances=False, return_indices=True
)
# Fill array with arc lengths of closest points on the contour
# L = np.zeros(c.shape)
# for u in range(c.shape[0]):
# for v in range(c.shape[1]):
# L[u, v] = c[F[0, u, v], F[1, u, v]]
# gridx, gridy = np.meshgrid(range(c.shape[1]), range(c.shape[0]))
# L = c[F[0,:,:][gridy, gridx], F[1,:,:][gridy, gridx]]
L = arc[F[0, :, :], F[1, :, :]]
# Create sampling windows for the j-th layer
if compute_num_win:
I.append(int(math.ceil(Lvec[-1] / width)))
w_borders = np.linspace(0, Lvec[-1], I[j] + 1)
for i in range(I[j]):
# w[-1].append(np.where(mask & (s1[i] <= L) & (L < s1[i+1]) & (b[0] <= D) & (D < b[1])))
w[-1].append(
np.where(
mask
& (w_borders[i] <= L)
& (L < w_borders[i + 1])
& (b[j] <= D_main)
& (D_main < b[j + 1])
)
)
# plt.figure()
# plt.imshow(w[j][i])
# plt.show()
# # Compute positions on the contour that will be used for the displacement estimation
# if j == 0:
# t = define_contour_positions(Lvec, I[0], cvec, c_main)
return w, J, I | c5e3989b8f8f0f558cdc057b6f3bb9901c4363cf | 3,654,294 |
from bs4 import BeautifulSoup
def extractsms(htmlsms) :
"""
extractsms -- extract SMS messages from BeautifulSoup tree of Google Voice SMS HTML.
Output is a list of dictionaries, one per message.
"""
msgitems = [] # accum message items here
# Extract all conversations by searching for a DIV with an ID at top level.
tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree
conversations = tree.findAll("div",attrs={"id" : True},recursive=False)
for conversation in conversations :
# For each conversation, extract each row, which is one SMS message.
rows = conversation.findAll(attrs={"class" : "gc-message-sms-row"})
for row in rows : # for all rows
# For each row, which is one message, extract all the fields.
msgitem = {"id" : conversation["id"]} # tag this message with conversation ID
spans = row.findAll("span",attrs={"class" : True}, recursive=False)
for span in spans : # for all spans in row
cl = span["class"].replace('gc-message-sms-', '')
msgitem[cl] = (" ".join(span.findAll(text=True))).strip() # put text in dict
msgitems.append(msgitem) # add msg dictionary to list
return msgitems | e31a66ae5ee56faf4eab131044c395fcd8de3a2a | 3,654,295 |
def load_ch_wubi_dict(dict_path=e2p.E2P_CH_WUBI_PATH):
"""Load Chinese to Wubi Dictionary.
Parameters
---------
dict_path : str
the absolute path to chinese2wubi dictionary.
In default, it's E2P_CH_WUBI_PATH.
Returns
-------
dict : Dictionary
a mapping between Chinese to Wubi Code
"""
return load_dict(dict_path) | e9297968b5dc4d1811659084e03ef0b2156c8a00 | 3,654,296 |
def middle_flow(middle_inputs: Tensor) -> Tensor:
"""
Middle flow
Implements the second of the three broad parts of the model
:param middle_inputs: middle_inputs: Tensor output generate by the Entry Flow,
having shape [*, new_rows, new_cols, 728]
:return: Output tensor of shape [*, new_rows, new_cols, 728]
"""
# Block 4 - Conv B (Green)
middle_outputs = middle_inputs
for _ in range(8):
res = middle_outputs
for _ in range(3):
middle_outputs = separable_convolutional_unit(middle_outputs, 728)
middle_outputs = Add()([res, middle_outputs])
return middle_outputs | 80fedffbb6da2f3e0b99a931d66d593bf627bdbe | 3,654,297 |
def feature_extraction(sample_index, labels, baf, lrr, rawcopy_pred, data_shape, margin=10000, pad_val=-2):
"""
Extract features at sample index
:param sample_index: sample index
:param labels: break point labels
:param baf: b-allele frequency values
:param lrr: log r ratio values
:param rawcopy_pred: rawcop predictions
:param data_shape: shape of the data
:param margin: margin to use
:param pad_val: padding value for windows appearing on start or end of data sequence
:return:
"""
window_size = margin * 4
if sample_index < margin * 2:
running_idx = margin * 2 - sample_index
running_idx2 = margin * 2 + sample_index
if running_idx2 >= len(baf):
running_idx2 = len(baf) - 1
ix = range(sample_index, sample_index + margin)
baf_ix = range(0, running_idx2)
baf_ = baf[baf_ix]
baf = np.pad(baf_, (running_idx, 0), 'constant', constant_values=pad_val)
lrr_ = lrr[baf_ix]
lrr = np.pad(lrr_, (running_idx, 0), 'constant', constant_values=pad_val)
elif sample_index + margin * 2 > data_shape[0]:
running_idx = sample_index - margin * 2
ix = range(sample_index - margin, data_shape[0])
baf_ix = range(running_idx, data_shape[0])
baf_ = baf[baf_ix]
baf = np.pad(baf_, (0, running_idx), 'constant', constant_values=pad_val)
lrr_ = lrr[baf_ix]
lrr = np.pad(lrr_, (0, running_idx), 'constant', constant_values=pad_val)
else:
ix = range(sample_index - margin, sample_index + margin)
baf_ix = range(sample_index - margin * 2, sample_index + margin * 2)
baf = baf[baf_ix]
lrr = lrr[baf_ix]
label = []
for l in labels[baf_ix]:
if label == []:
label.append(l)
elif l != label[-1]:
label.append(l)
rc_pred = []
for l in rawcopy_pred[baf_ix]:
if rc_pred == []:
rc_pred.append(l)
elif l != label[-1]:
rc_pred.append(l)
assert baf.shape[0] == window_size
assert lrr.shape[0] == window_size
feat = np.vstack((baf, lrr))
return feat, rc_pred, label, ix | 2b70229d3e4021d4a0cce9bf7dce2222956e299d | 3,654,298 |
def get_filename(file_fullpath):
"""
Returns the filename without the full path
:param file_fullpath:
:return: Returns the filename
"""
filename = file_fullpath.split("/")[-1].split(".")[0]
return filename | 903cb26c89d1d18c9ebafe1a468c7fa66c51f119 | 3,654,299 |
def create_and_assign_household(humans_with_same_house, housetype, conf, city, allocated_humans):
"""
Creates a residence and allocates humans in `humans_with_same_house` to the same.
Args:
humans_with_same_house (list): a list of `Human` objects which are to be allocated to the same residence of type `type`.
housetype (HouseType): type of allocation
conf (dict): yaml configuration of the experiment
city (covid19sim.location.City): simulator's city object
allocated_humans (list): a list of humans that have been allocated a household
Returns:
allocated_humans (list): a list of humans that have been allocated a household
"""
assert all(human not in allocated_humans for human in humans_with_same_house), f"reassigning household to human"
res = Household(
env=city.env,
rng=np.random.RandomState(city.rng.randint(2 ** 16)),
conf=conf,
name=f"HOUSEHOLD:{len(city.households)}",
location_type="HOUSEHOLD",
lat=city.rng.randint(*city.x_range),
lon=city.rng.randint(*city.y_range),
area=None,
capacity=None,
)
for human in humans_with_same_house:
allocated_humans = _assign_household(human, res, allocated_humans)
res.allocation_type = housetype
city.households.add(res)
return allocated_humans | 594830aec1c820de94f7277499239f19e51ba0de | 3,654,300 |
import torch
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx | f86f5485ddd3400161d9e233ad66cc492fd6d277 | 3,654,302 |
import click
def init():
"""Top level command handler."""
@click.command()
@click.option('--policy-servers', type=cli.LIST,
required=True,
help='Warpgate policy servers')
@click.option('--service-principal', type=str,
default='host',
help='Warpgate service principal.')
@click.option('--policy', type=str, required=True,
envvar='WARPGATE_POLICY',
help='Warpget policy to use')
@click.option('--tun-dev', type=str, required=True,
help='Device to use when establishing tunnels.')
@click.option('--tun-addr', type=str, required=False,
help='Local IP address to use when establishing tunnels.')
def warpgate(policy_servers, service_principal, policy, tun_dev, tun_addr):
"""Run warpgate connection manager.
"""
_LOGGER.info(
'Launch client => %s, tunnel: %s[%s], policy: %s, principal: %s',
policy_servers,
tun_dev, tun_addr,
policy,
service_principal,
)
# Never exits
client.run_client(
policy_servers, service_principal, policy,
tun_dev, tun_addr
)
return warpgate | fcadaa48fead63b10431bf509f4f4398216be564 | 3,654,303 |
def load(file):
"""unpickle an object from a file"""
pik = Unpickler(file)
pik._main = _main_module
obj = pik.load()
if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'):
# point obj class to main
try: obj.__class__ = getattr(pik._main, type(obj).__name__)
except (AttributeError,TypeError): pass # defined in a file
#_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ?
return obj | 22050da1c2ff891180ce9581a1cf2c6f1cf9e0b9 | 3,654,304 |
def setup(app):
"""Set up the Sphinx extension."""
app.add_config_value(
name="doctr_versions_menu_conf", default={}, rebuild="html",
)
app.connect('builder-inited', ext.add_versions_menu_js_file)
app.connect('build-finished', ext.cleanup)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | 01173da317d1058811b01842be8492265ac0a62b | 3,654,306 |
import click
def get_help_recursive(group, ctx, commands):
"""
Returns help for arbitrarily nested subcommands of the given click.Group.
"""
try:
command_name = commands.pop(0)
group = group.get_command(ctx, command_name)
if not group:
raise click.ClickException('Invalid command: {}'.format(command_name))
except IndexError:
# end of subcommand chain
return group.get_help(ctx)
except AttributeError:
# group is actually a command with no children
return group.get_help(ctx)
return get_help_recursive(group, ctx, commands) | 412f0cb9e9aa1f19caf4a4a5db95c8040a0d2f36 | 3,654,308 |
def clump_tracker(fprefix, param=None, directory=None, nsmooth=32, verbose=True):
"""
Finds and tracks clumps over a simulation with multiple time steps and
calculates various physical properties of the clumps.
Runs all the steps necessary to find/track clumps, these are:
get_fnames
pFind_clumps
pClump_properties
pLink2
multilink
build_clumps
If the iord property is not found, the linking will only work if the number
of particles remains constant through the simulation
**ARGUMENTS**
fprefix : str
Prefix of the simulation outputs
param : str (recommended)
Filename of a .param file for the simulation
directory : str (optional)
Directory to search through. Default is current working directory
nsmooth : int (optional)
Number of nearest neighbors used for particle smoothing in the
simulation. This is used in the definition of a density threshold
for clump finding.
verbose : bool (optional)
Verbosity flag. Default is True
**RETURNS**
clump_list : list
A list containing dictionaries for all clumps foujohn obryan fiddlend in the simulation
See clump_properties for a list of the properties calculated for clumps
"""
# Get a list of all snapshot files
fnames = get_fnames(fprefix, directory)
nfiles = len(fnames)
# Run the clump (halo) finder
if verbose: print "\n\nRunning clump finder on {} files\n\n".format(nfiles)
clumpnum_list = pFind_clumps(fnames, nsmooth, param, verbose=verbose)
nclumps = np.zeros(nfiles, dtype=int)
for i, clumpnums in enumerate(clumpnum_list):
nclumps[i] = clumpnums.max()
if nclumps.max() <= 0:
if verbose: print 'No clumps found'
return []
# Calculate the physical properties of the clumps
if verbose: print "\n\nCalculating the physical of properties of clumps\n\n"
properties = pClump_properties(fnames, clumpnum_list)
# Link clumps on consecutive time-steps
if verbose: print "\n\nLinking Clumps\n\n"
link_list = pLink2(properties)
# Link on multiple time-steps
multilink_list = multilink(link_list)
# Build the clumps
clump_list = build_clumps(multilink_list, properties, fnames, param)
return clump_list | bc72ae48e152ada388aa2421290d41d9865fa439 | 3,654,309 |
def OptimizeGraph(config_proto,
metagraph,
verbose=True,
graph_id=b'graph_to_optimize',
cluster=None,
strip_default_attributes=False):
"""Optimize the provided metagraph.
For best results, the signature_def field in `metagraph` should be populated
with information about input (feed) and output (fetch) tensors.
Args:
config_proto: a ConfigProto protobuf.
metagraph: a MetagraphDef protobuf.
verbose: whether to log optimization results.
graph_id: a string identifying this graph.
cluster: a grappler cluster object representing hardware resources
available to run this graph.
strip_default_attributes: whether graph node attributes having default
values should be removed after all the optimization passes. This
option is useful if the resulting graph will be executed by an older
process that might not know some of the recently added attributes.
"""
if not isinstance(config_proto, config_pb2.ConfigProto):
raise TypeError('Argument `config_proto` should be a tf.ConfigProto, '
f'received type: {type(config_proto).__name__}')
if cluster is not None:
out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster,
config_proto.SerializeToString(),
metagraph.SerializeToString(), verbose,
graph_id, strip_default_attributes)
else:
# Currently Grappler assumes no more than 1 sessions alive globally.
# See comments on SingleMachine::Provision(), hence we use the following
# lock to prevent concurrent access to the following code.
with _OPTIMIZE_GRAPH_CLUSTER_LOCK:
cluster = gcluster.Cluster()
try:
out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster,
config_proto.SerializeToString(),
metagraph.SerializeToString(),
verbose, graph_id,
strip_default_attributes)
finally:
# Force the cleanup instead of waiting on python GC to cleanup the
# temporary cluster we've created. Otherwise subsequent calls might
# not have a clean slate because GC may not have run yet.
cluster.Shutdown()
return graph_pb2.GraphDef().FromString(out_graph) | 0d1fc74ffe6c16da953b9ac711534b125afb82d6 | 3,654,310 |
def parse_imei(msg):
"""Parse an IMEI (in BCD format) into ASCII format."""
imei = ''
for octet in msg[1:]:
imei += imei_parse_nibble(ord(octet) & 0x0f)
imei += imei_parse_nibble(ord(octet) >> 4)
return imei | 664d9472b51dd806b28b2b2ecee1047307e4e15a | 3,654,312 |
def get_blender_frame_time(skeleton, frame_id, rate, time_scale, actor_id):
"""Goes from multi-actor integer frame_id to modded blender float time."""
# stays within video frame limits
frame_id2 = skeleton.mod_frame_id(frame_id=frame_id) # type: int
time_ = skeleton.get_time(frame_id)
if actor_id > 0:
time_ = frame_id2 / rate
print('time is {} for {} ({}), orig time: {}, rate: {}, '
'time_scale: {}'
.format(time_, frame_id, frame_id2,
skeleton.get_time(frame_id), rate, time_scale))
frame_time = time_ * time_scale
return frame_time | ca8ab45dbbb1b28b05894b9dd92529245441c60b | 3,654,313 |
from ..plots.wx_symbols import wx_code_to_numeric
from datetime import datetime
import contextlib
def parse_metar(metar_text, year, month, station_metadata=station_info):
"""Parse a METAR report in text form into a list of named tuples.
Parameters
----------
metar_text : str
The METAR report
station_metadata : dict
Mapping of station identifiers to station metadata
year : int
Reported year of observation for constructing 'date_time'
month : int
Reported month of observation for constructing 'date_time'
Returns
-------
metar : namedtuple
Named tuple of parsed METAR fields
Notes
-----
Returned data has named tuples with the following attributes:
* 'station_id': Station Identifier (ex. KLOT)
* 'latitude': Latitude of the observation, measured in degrees
* 'longitude': Longitude of the observation, measured in degrees
* 'elevation': Elevation of the observation above sea level, measured in meters
* 'date_time': Date and time of the observation, datetime object
* 'wind_direction': Direction the wind is coming from, measured in degrees
* 'wind_speed': Wind speed, measured in knots
* 'wind_gust': Wind gust, measured in knots
* 'current_wx1': Current weather (1 of 3)
* 'current_wx2': Current weather (2 of 3)
* 'current_wx3': Current weather (3 of 3)
* 'skyc1': Sky cover (ex. FEW)
* 'skylev1': Height of sky cover 1, measured in feet
* 'skyc2': Sky cover (ex. OVC)
* 'skylev2': Height of sky cover 2, measured in feet
* 'skyc3': Sky cover (ex. FEW)
* 'skylev3': Height of sky cover 3, measured in feet
* 'skyc4': Sky cover (ex. CLR)
* 'skylev4:': Height of sky cover 4, measured in feet
* 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values
* 'temperature': Temperature, measured in degrees Celsius
* 'dewpoint': Dewpoint, measured in degrees Celsius
* 'altimeter': Altimeter value, measured in inches of mercury
* 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'visibility': Visibility distance, measured in meters
* 'remarks': Remarks (unparsed) in the report
"""
# Decode the data using the parser (built using Canopy) the parser utilizes a grammar
# file which follows the format structure dictated by the WMO Handbook, but has the
# flexibility to decode the METAR text when there are missing or incorrectly
# encoded values
tree = parse(metar_text)
# Station ID which is used to find the latitude, longitude, and elevation
station_id = tree.siteid.text.strip()
# Extract the latitude and longitude values from 'master' dictionary
try:
info = station_metadata[station_id]
lat = info.latitude
lon = info.longitude
elev = info.altitude
except KeyError:
lat = np.nan
lon = np.nan
elev = np.nan
# Set the datetime, day, and time_utc
try:
day_time_utc = tree.datetime.text.strip()
day = int(day_time_utc[0:2])
hour = int(day_time_utc[2:4])
minute = int(day_time_utc[4:6])
date_time = datetime(year, month, day, hour, minute)
except ValueError:
date_time = np.nan
# Set the wind values
wind_units = 'kts'
try:
# If there are missing wind values, set wind speed and wind direction to nan
if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''):
wind_dir = np.nan
wind_spd = np.nan
# If the wind direction is variable, set wind direction to nan but keep the wind speed
else:
wind_spd = float(tree.wind.wind_spd.text)
if 'MPS' in tree.wind.text:
wind_units = 'm/s'
wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots')
if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'):
wind_dir = np.nan
else:
wind_dir = int(tree.wind.wind_dir.text)
# If there are any errors, return nan
except ValueError:
wind_dir = np.nan
wind_spd = np.nan
# Parse out the wind gust field
if 'G' in tree.wind.text:
wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]),
wind_units).m_as('knots')
else:
wind_gust = np.nan
# Handle visibility
try:
if tree.vis.text.endswith('SM'):
visibility = 0
# Strip off the SM and any whitespace around the value and any leading 'M'
vis_str = tree.vis.text[:-2].strip().lstrip('M')
# Case of e.g. 1 1/4SM
if ' ' in vis_str:
whole, vis_str = vis_str.split(maxsplit=1)
visibility += int(whole)
# Handle fraction regardless
if '/' in vis_str:
num, denom = vis_str.split('/', maxsplit=1)
visibility += int(num) / int(denom)
else: # Should be getting all cases of whole number without fraction
visibility += int(vis_str)
visibility = units.Quantity(visibility, 'miles').m_as('meter')
# CAVOK means vis is "at least 10km" and no significant clouds or weather
elif 'CAVOK' in tree.vis.text:
visibility = 10000
elif not tree.vis.text or tree.vis.text.strip() == '////':
visibility = np.nan
else:
# Only worry about the first 4 characters (digits) and ignore possible 'NDV'
visibility = int(tree.vis.text.strip()[:4])
# If there are any errors, return nan
except ValueError:
visibility = np.nan
# Set the weather symbols
# If the weather symbol is missing, set values to nan
current_wx = []
current_wx_symbol = []
if tree.curwx.text.strip() not in ('', '//', 'NSW'):
current_wx = tree.curwx.text.strip().split()
# Handle having e.g. '+' and 'TSRA' parsed into separate items
if current_wx[0] in ('-', '+') and current_wx[1]:
current_wx[0] += current_wx[1]
current_wx.pop(1)
current_wx_symbol = wx_code_to_numeric(current_wx).tolist()
while len(current_wx) < 3:
current_wx.append(np.nan)
while len(current_wx_symbol) < 3:
current_wx_symbol.append(0)
# Set the sky conditions
skyc = [np.nan] * 4
skylev = [np.nan] * 4
if tree.skyc.text[1:3] == 'VV':
skyc[0] = 'VV'
level = tree.skyc.text.strip()[2:5]
skylev[0] = np.nan if '/' in level else 100 * int(level)
else:
for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)):
cover = part[:3]
level = part[3:6] # Strips off any ending text like in FEW017CB
if '/' not in cover:
skyc[ind] = cover
if level and '/' not in level:
with contextlib.suppress(ValueError):
skylev[ind] = float(level) * 100
# Set the cloud cover variable (measured in oktas)
if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text:
cloudcover = 8
elif 'BKN' in tree.skyc.text:
cloudcover = 6
elif 'SCT' in tree.skyc.text:
cloudcover = 4
elif 'FEW' in tree.skyc.text:
cloudcover = 2
elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text
or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text):
cloudcover = 0
else:
cloudcover = 10
# Set the temperature and dewpoint
temp = np.nan
dewp = np.nan
if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM':
with contextlib.suppress(ValueError):
temp = float(tree.temp_dewp.temp.text[-2:])
if 'M' in tree.temp_dewp.temp.text:
temp *= -1
with contextlib.suppress(ValueError):
dewp = float(tree.temp_dewp.dewp.text[-2:])
if 'M' in tree.temp_dewp.dewp.text:
dewp *= -1
# Set the altimeter value and sea level pressure
if tree.altim.text:
val = float(tree.altim.text.strip()[1:5])
altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg')
else:
altim = np.nan
# Strip off extraneous stuff off the remarks section
remarks = tree.remarks.text.lstrip().rstrip('= ')
if remarks.startswith('RMK'):
remarks = remarks[3:].strip()
# Returns a named tuple with all the relevant variables
return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust,
visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0],
skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp,
dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2],
remarks) | 3660aeda77343c1bb21729b6b0d36ce597c5ca0d | 3,654,314 |
def update_facemap_material(self, context):
""" Assign the updated material to all faces belonging to active facemap
"""
set_material_for_active_facemap(self.material, context)
return None | 61e5f05cd059ca7646609f4d65f0bb86aaaebc8a | 3,654,315 |
def calculate_accuracy(y_true, y_pred):
"""Calculates the accuracy of the model.
Arguments:
y_true {numpy.array} -- the true labels corresponding to each input
y_pred {numpy.array} -- the model's predictions
Returns:
accuracy {str} -- the accuracy of the model (%)
"""
correctpred, total = 0, 0
for index in range(len(y_pred)):
if(y_pred[index] == y_true[index]):
correctpred = correctpred + 1
total = total+1
return 'accuracy='+str((correctpred*100)/total) | 1ea14f8e4f50d13e2ae557aeec466c5372b99171 | 3,654,316 |
def resolve_diff_args(args):
"""Resolve ambiguity of path vs base/remote for git:
Cases:
- No args: Use defaults
- One arg: Either base or path, check with is_gitref.
- Two args or more: Check if first two are base/remote by is_gitref
"""
base = args.base
remote = args.remote
paths = getattr(args, 'paths', None)
if not paths:
paths = None
if remote is None and paths is None:
# One arg only:
if not is_gitref(base):
paths = base
base = 'HEAD'
# Two or more args:
elif paths is None:
# Two exactly
# - Two files (not git-mode, do nothing)
# - Base gitref one file (remote=None, path = file)
# - Base gitref remote gitref (do nothing)
if is_gitref(base) and not is_gitref(remote):
paths = remote
remote = None
elif base and remote:
# Three or more
if not is_gitref(base):
paths = [base, remote] + paths
base = remote = None
elif is_gitref(base) and not is_gitref(remote):
paths = [remote] + paths
remote = None
return base, remote, paths | 6260d69bffd8a4a4d35471c5710c9a86324f9549 | 3,654,317 |
def get_coco_metrics_from_gt_and_det(groundtruth_dict, detection_boxes_list, category=''):
"""
Get COCO metrics given dictionary of groundtruth dictionary and the list of
detections.
"""
coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(detection_boxes_list)
box_evaluator = coco_tools.COCOEvalWrapper(coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
include_metrics_per_category=False,
all_metrics_per_category=False,
super_categories=None
)
box_metrics.update(box_per_category_ap)
box_metrics = {'DetectionBoxes_'+ category + key: value
for key, value in iter(box_metrics.items())}
return box_metrics | fbf6ca237f43c74ebe37772006c856f3a1850683 | 3,654,318 |
def createDataset(dataPath,dStr,sigScale=1):
"""
dStr from ["20K", "1M", "10M"]
"""
print("Loading D1B dataset...")
ft1_d = loadD1B(dataPath,dStr,w=40)
if dStr=="20K":
ft1_d = ft1_d[:10000,:]
print("Running PCA on D1B")
pcaD1B = PCA(n_components=ft1_d.shape[1],random_state=0)
ft1_d = pcaD1B.fit_transform(ft1_d)
print("Loading FAS dataset")
ft1_f, ft2_f, gt_f, pos1_f, pos2_f = loadFAS(dataPath)
if dStr=="20K":
ft1_f = ft1_f[:10000,:]
ft2_f = ft2_f[:10000,:]
print("Running PCA on FAS")
pcaFAS = PCA(n_components=ft1_d.shape[1],random_state=0)
ft1_f = pcaFAS.fit_transform(ft1_f)
ft2_f = pcaFAS.transform(ft2_f)
print("Re-scaling Variance of D1B using FAS data")
ft1_d = np.std(ft1_f,axis=0)*ft1_d/np.std(ft1_d,axis=0)
print("Computing a new version of D1B to be used as a query traverse")
ftDiff = calcChange(dataPath)
noiseVar = np.var(ftDiff,axis=0)
noiseMean = np.mean(ftDiff,axis=0)
print("\t Incorporating the 'change' from FAS along with some noise")
ft1_n = addNoiseToFt(ft1_d,noiseMean,noiseVar,sigScale)
print("Concatenating the two datasets")
ft1 = np.concatenate([ft1_d,ft1_f],axis=0)
ft2 = np.concatenate([ft1_n,ft2_f],axis=0)
del ft1_d, ft1_n, ft1_f, ft2_f
return ft1, ft2 | 02cf1b4a5708abf6d7e3fee323c5fb096fdbbffb | 3,654,319 |
def generate_interblock_leader():
"""Generates the leader between normal blocks"""
return b'\x55' * 0x2 | 99878b67a31a4169bc73ad9b9b249a981a22177f | 3,654,320 |
import itertools
import warnings
def discover_handlers(entrypoint_group_name="databroker.handlers", skip_failures=True):
"""
Discover handlers via entrypoints.
Parameters
----------
entrypoint_group_name: str
Default is 'databroker.handlers', the "official" databroker entrypoint
for handlers.
skip_failures: boolean
True by default. Errors loading a handler class are converted to
warnings if this is True.
Returns
-------
handler_registry: dict
A suitable default handler registry
"""
group = entrypoints.get_group_named(entrypoint_group_name)
group_all = entrypoints.get_group_all(entrypoint_group_name)
if len(group_all) != len(group):
# There are some name collisions. Let's go digging for them.
for name, matches in itertools.groupby(group_all, lambda ep: ep.name):
matches = list(matches)
if len(matches) != 1:
winner = group[name]
warnings.warn(
f"There are {len(matches)} entrypoints for the "
f"databroker handler spec {name!r}. "
f"They are {matches}. The match {winner} has won the race."
)
handler_registry = {}
for name, entrypoint in group.items():
try:
handler_class = entrypoint.load()
except Exception as exc:
if skip_failures:
warnings.warn(
f"Skipping {entrypoint!r} which failed to load. "
f"Exception: {exc!r}"
)
continue
else:
raise
handler_registry[name] = handler_class
return handler_registry | d6b4b5c2071833503689abf474d5ebbc928c30c8 | 3,654,321 |
def create_highway_layer(highway_type,
num_layer,
unit_dim,
window_size,
activation,
dropout,
num_gpus,
default_gpu_id,
regularizer,
random_seed,
trainable):
"""create highway layer"""
scope = "highway/{0}".format(highway_type)
if highway_type == "highway":
highway_layer = StackedHighway(num_layer=num_layer, unit_dim=unit_dim,
activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id,
regularizer=regularizer, random_seed=random_seed, trainable=trainable)
elif highway_type == "conv_highway":
highway_layer = StackedHighway(num_layer=num_layer, num_filter=unit_dim, window_size=window_size,
activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id,
regularizer=regularizer, random_seed=random_seed, trainable=trainable)
else:
raise ValueError("unsupported dense type {0}".format(highway_type))
return highway_layer | 3bb1aafe9935f81683dfb036c91ec52da808932f | 3,654,322 |
def compute_metrics(y_true, y_predicted, y_prob = None):
"""compute metrics for the prredicted labels against ground truth
@args:
y_true: the ground truth label
y_predicted: the predicted label
y_predicted_prob: probability of the predicted label
@returns:
various metrics: F1-score, AUC of ROC, brier-score, also plots AUC
"""
# plot AUC
if y_prob:
fpr, tpr, _ = roc_curve(y_true, y_prob)
auc = roc_auc_score(y_true, y_prob)
plt.plot(fpr, tpr, label="data 1, auc=" + str(auc))
plt.legend(loc=4)
plt.show()
# brier = brier_score_loss((y_true, y_prob))
# F1 score and brier score
f1 = f1_score(y_true, y_predicted)
# classification report
plot_classification_report(classification_report(y_true, y_predicted))
return f1 | e31264fa05ad02bcc73de0746df12dcccb1889fd | 3,654,323 |
def session_store(decoy: Decoy) -> SessionStore:
"""Get a mock SessionStore interface."""
return decoy.mock(cls=SessionStore) | 92518d32c7195f8fe6a6f3e44640cb2a5accb28b | 3,654,324 |
from typing import Dict
from typing import Any
from typing import List
def extract_values(obj: Dict[str, Any], key: str, val: Any) -> List[Dict[str, Any]]:
"""
Pull all values of specified key from nested JSON.
Args:
obj (dict): Dictionary to be searched
key (str): tuple of key and value.
value (any): value, which can be any type
Returns:
list of matched key-value pairs
"""
return [elem for elem in extract(obj, key, val)] | 368203a85ded379d6c4042dc90e803611bf810d9 | 3,654,326 |
def createMeshPatches(ax, mesh, rasterized=False, verbose=True):
"""Utility function to create 2d mesh patches within a given ax."""
if not mesh:
pg.error("drawMeshBoundaries(ax, mesh): invalid mesh:", mesh)
return
if mesh.nodeCount() < 2:
pg.error("drawMeshBoundaries(ax, mesh): to few nodes:", mesh)
return
pg.tic()
polys = [_createCellPolygon(c) for c in mesh.cells()]
patches = mpl.collections.PolyCollection(polys, picker=True,
rasterized=rasterized)
if verbose:
pg.info("Creation of mesh patches took = ", pg.toc())
return patches | 977de081b20e0ab0709887213b53f5318b1ff5f0 | 3,654,327 |
def get_url_name(url_):
"""从url_中获取名字"""
raw_res = url_.split('/', -1)[-1]
raw_res = raw_res.split('.', 1)[0]
res = raw_res[-15:]
return res | a8f3b8dbc4a53e839b3047604e71ffaf36c00767 | 3,654,328 |
def check_uuid_in_db(uuid_to_validate, uuid_type):
"""
A helper function to validate whether a UUID exists within our db.
"""
uuid_in_db = None
if uuid_type.name == "SESSION":
uuid_in_db = Sessions.query.filter_by(session_uuid=uuid_to_validate).first()
elif uuid_type.name == "QUIZ":
uuid_in_db = Scores.query.filter_by(quiz_uuid=uuid_to_validate).first()
elif uuid_type.name == "USER":
uuid_in_db = Users.query.filter_by(user_uuid=uuid_to_validate).first()
if not uuid_in_db:
raise DatabaseError(message=f"{uuid_type.name}_UUID is not in the db.")
return uuid_in_db | b151e7b7b393daf9647f308dea6fddd5eec3cb92 | 3,654,329 |
def delete(uuid):
""" Deletes stored entities and time them.
Args:
uuid: A str, unique identifier, a part of the keynames of entities.
Returns:
A tuple of two lists. A list of float times to delete
all entities, and a list of errors. A zero value signifies
a failure.
"""
timings = []
errors = []
for index in range(0, constants.NUM_SAMPLES):
entity = None
try:
entity = TestModel.get_by_key_name(key_names=uuid + str(index))
if not entity:
raise Exception("Unable to first fetch entity.")
except Exception, exception:
logging.exception(exception)
errors.append(str(exception))
total_time = 0
timings.append(total_time)
logging.error("Left over entity with keyname {0}".\
format(uuid + str(index)))
continue
start = time.time()
try:
entity.delete()
total_time = time.time() - start
except Exception, exception:
logging.exception(exception)
errors.append(str(exception))
total_time = 0
timings.append(total_time * constants.SECONDS_TO_MILLI)
return (timings, errors) | c0f9b42829dd8bd0963ea3a9b904d1aec0c50368 | 3,654,330 |
def remove_prefix(string, prefix):
"""
This function removes the given prefix from a string, if the string does
indeed begin with the prefix; otherwise, it returns the string
unmodified.
"""
if string.startswith(prefix):
return string[len(prefix):]
else:
return string | 73cffca0e9938ea48f3781c7821fcbcf56e0cf25 | 3,654,331 |
import torch
def action_probs_to_action(probs):
""" Takes output of controller and converts to action in format [0,0,0,0] """
forward = probs[:, 0:2]; camera=probs[:, 2:5]; jump=probs[:,5:7];
action = [torch.distributions.Categorical(p).sample().detach().item() for p in [forward,camera,jump]]
action.append(0) # not allowing any motion along side dimension
return action | 00395569cd3fb7696bd0aa050f6fbcd6641d3741 | 3,654,332 |
from typing import Tuple
from typing import List
from typing import Set
def search_for_subject(subject: Synset, num_urls: int, subscription_key: str, custom_config: str,
host: str, path: str) -> Tuple[List[Tuple[str, str, str]], str, str]:
"""Perform the search phase for one particular subject."""
query = get_search_query(subject)
logger.info(f"Subject {subject.name()} - Search query: `{query}`")
urls: Set[str] = set()
results: List[Tuple[str, str, str]] = []
wiki_links: List[str] = []
offset = 0
step = 0
while len(urls) < num_urls:
search_result_json = bing_search(search_query=query,
count=SEARCH_BATCH_SIZE,
offset=offset,
subscription_key=subscription_key,
custom_config=custom_config,
host=host,
path=path)
try:
for url, title, snippet in parse_content_from_search_result(search_result_json):
if url not in urls:
urls.add(url)
results.append((url, title, snippet))
if url.startswith(EN_WIKIPEDIA_PREFIX):
wiki_links.append(url)
if len(urls) >= num_urls:
break
except Exception:
break
offset += SEARCH_BATCH_SIZE
step += 1
if step >= MAX_SEARCH_STEP:
break
if subject.name() in MANUAL_WN2WP:
logger.info("Detected manual WordNet-Wikipedia linking")
wiki = EN_WIKIPEDIA_PREFIX + quote_plus(MANUAL_WN2WP[subject.name()]["wikipedia"]).capitalize()
wiki_map_source = MANUAL_WN2WP[subject.name()]["source"]
else:
if len(wiki_links) == 0:
wiki_links = search_wiki(subject, subscription_key, custom_config, host, path)
wiki = wiki_links[0]
for w in wiki_links:
w = unquote_plus(w)
if "List_" in w:
continue
if "(disambiguation)" in w:
continue
if "Category:" in w:
continue
if "Template:" in w:
continue
wiki = w
break
wiki_map_source = "BING"
# Add Wikipedia article
if wiki.lower() not in set(url.lower() for url in urls):
results[-1] = (wiki, "{} - Wikipedia".format(wiki[(wiki.rindex("/") + 1):]).capitalize(), "")
return results, wiki, wiki_map_source | fde60dc857f5623e8aae9a7a52621d4386034fb5 | 3,654,334 |
def get_kwargs(class_name: str) -> Kwargs:
"""Returns the specific kwargs for each field `class_name`"""
default_kwargs = get_default_kwargs()
class_kwargs = get_setting("COMMON_KWARGS", {})
use_kwargs = class_kwargs.get(class_name, default_kwargs)
return use_kwargs | 8b1ee7448792e2740053edf51528c99f3e2b5698 | 3,654,335 |
def minute_info(x):
"""
separates the minutes from time stamp. Returns minute of time.
"""
n2 = x.minute
return n2/60 | c166bb8f759a5eed1b45b2dd8f228206357deb28 | 3,654,336 |
from bs4 import BeautifulSoup
def remove_html_tags(text):
"""Removes HTML Tags from texts and replaces special spaces with regular spaces"""
text = BeautifulSoup(text, 'html.parser').get_text()
text = text.replace(u'\xa0', ' ')
return text | 7f31a18d81ebc80b202ac697eb7b19fe206aed95 | 3,654,337 |
def patchy(target, source=None):
""" If source is not supplied, auto updates cannot be applied """
if isinstance(target, str):
target = resolve(target)
if isinstance(source, str):
source = resolve(source)
if isinstance(target, ModuleType):
return PatchModule(target, source)
elif isinstance(target, type) and source:
return PatchClass(target, source) | eece41abbc040fd306ae9b2813ae6f3e089cee82 | 3,654,338 |
def _handle_special_addresses(lion):
"""
When there are special address codes/names, ensure that there is a duplicate
row with the special name and code as the primary.
Note: Only for special address type 'P' - addressable place names
"""
special = lion[
(lion['special_address_type'].isin(['P', 'B', 'G'])) &
(lion['street'] != lion['special_address_street_name'])
].drop(columns=['street', 'street_code'])
special['street'] = special['special_address_street_name']
special['street_code'] = special['special_address_street_code']
special['special_address_street_code'] = ""
special['special_address_street_name'] = ""
lion = pd.concat([lion, special], sort=True).reset_index(drop=True)
return lion | c8079ef0cba6e96940ed13b74c87a1bd49416376 | 3,654,339 |
def get_local():
"""Construct a local population."""
pop = CosmicPopulation.simple(SIZE, generate=True)
survey = Survey('perfect')
surv_pop = SurveyPopulation(pop, survey)
return surv_pop.frbs.s_peak | 2ab081ffbd79c991c8a3d6ec7097a09407e5fe8a | 3,654,340 |
def calculate_y_pos(x, centre):
"""Calculates the y-coordinate on a parabolic curve, given x."""
centre = 80
y = 1 / centre * (x - centre) ** 2 + sun_radius
return int(y) | e57501c9e83bc26491266c9237f3e3b722ccacef | 3,654,342 |
def extract_flowlines(gdb_path, target_crs, extra_flowline_cols=[]):
"""
Extracts flowlines data from NHDPlusHR data product.
Extract flowlines from NHDPlusHR data product, joins to VAA table,
and filters out coastlines.
Extracts joins between flowlines, and filters out coastlines.
Parameters
----------
gdb_path : str
path to the NHD HUC4 Geodatabase
target_crs: GeoPandas CRS object
target CRS to project NHD to for analysis, like length calculations.
Must be a planar projection.
extra_cols: list
List of extra field names to extract from NHDFlowline layer
Returns
-------
tuple of (GeoDataFrame, DataFrame)
(flowlines, joins)
"""
### Read in flowline data and convert to data frame
print("Reading flowlines")
flowline_cols = FLOWLINE_COLS + extra_flowline_cols
df = read_dataframe(
gdb_path, layer="NHDFlowline", force_2d=True, columns=[flowline_cols]
)
print("Read {:,} flowlines".format(len(df)))
# Index on NHDPlusID for easy joins to other NHD data
df.NHDPlusID = df.NHDPlusID.astype("uint64")
df = df.set_index(["NHDPlusID"], drop=False)
# convert MultiLineStrings to LineStrings (all have a single linestring)
df.geometry = pg.get_geometry(df.geometry.values.data, 0)
### Read in VAA and convert to data frame
# NOTE: not all records in Flowlines have corresponding records in VAA
# we drop those that do not since we need these fields.
print("Reading VAA table and joining...")
vaa_df = read_dataframe(gdb_path, layer="NHDPlusFlowlineVAA", columns=[VAA_COLS])
vaa_df.NHDPlusID = vaa_df.NHDPlusID.astype("uint64")
vaa_df = vaa_df.set_index(["NHDPlusID"])
df = df.join(vaa_df, how="inner")
print("{:,} features after join to VAA".format(len(df)))
# Simplify data types for smaller files and faster IO
df.FType = df.FType.astype("uint16")
df.FCode = df.FCode.astype("uint16")
df.StreamOrde = df.StreamOrde.astype("uint8")
df.Slope = df.Slope.astype("float32")
df.MinElevSmo = df.MinElevSmo.astype("float32")
df.MaxElevSmo = df.MaxElevSmo.astype("float32")
### Read in flowline joins
print("Reading flowline joins")
join_df = gp.read_file(gdb_path, layer="NHDPlusFlow")[
["FromNHDPID", "ToNHDPID"]
].rename(columns={"FromNHDPID": "upstream", "ToNHDPID": "downstream"})
join_df.upstream = join_df.upstream.astype("uint64")
join_df.downstream = join_df.downstream.astype("uint64")
### Label loops for easier removal later
# WARNING: loops may be very problematic from a network processing standpoint.
# Include with caution.
print("Identifying loops")
df["loop"] = (df.StreamOrde != df.StreamCalc) | (df.FlowDir.isnull())
idx = df.loc[df.loop].index
join_df["loop"] = join_df.upstream.isin(idx) | join_df.downstream.isin(idx)
### Filter out coastlines and update joins
# WARNING: we tried filtering out pipelines (FType == 428). It doesn't work properly;
# there are many that go through dams and are thus needed to calculate
# network connectivity and gain of removing a dam.
print("Filtering out coastlines...")
coastline_idx = df.loc[df.FType == 566].index
df = df.loc[~df.index.isin(coastline_idx)].copy()
# remove any joins that have coastlines as upstream
# these are themselves coastline segments
join_df = join_df.loc[~join_df.upstream.isin(coastline_idx)].copy()
# set the downstream to 0 for any that join coastlines
# this will enable us to mark these as downstream terminals in
# the network analysis later
join_df.loc[join_df.downstream.isin(coastline_idx), "downstream"] = 0
# drop any duplicates (above operation sets some joins to upstream and downstream of 0)
join_df = join_df.drop_duplicates()
print("{:,} features after removing coastlines".format(len(df)))
### Add calculated fields
# Set our internal master IDs to the original index of the file we start from
# Assume that we can always fit into a uint32, which is ~400 million records
# and probably bigger than anything we could ever read in
df["lineID"] = df.index.values.astype("uint32") + 1
join_df = (
join_df.join(df.lineID.rename("upstream_id"), on="upstream")
.join(df.lineID.rename("downstream_id"), on="downstream")
.fillna(0)
)
for col in ("upstream", "downstream"):
join_df[col] = join_df[col].astype("uint64")
for col in ("upstream_id", "downstream_id"):
join_df[col] = join_df[col].astype("uint32")
### Calculate size classes
print("Calculating size class")
drainage = df.TotDASqKm
df.loc[drainage < 10, "sizeclass"] = "1a"
df.loc[(drainage >= 10) & (drainage < 100), "sizeclass"] = "1b"
df.loc[(drainage >= 100) & (drainage < 518), "sizeclass"] = "2"
df.loc[(drainage >= 518) & (drainage < 2590), "sizeclass"] = "3a"
df.loc[(drainage >= 2590) & (drainage < 10000), "sizeclass"] = "3b"
df.loc[(drainage >= 10000) & (drainage < 25000), "sizeclass"] = "4"
df.loc[drainage >= 25000, "sizeclass"] = "5"
print("projecting to target projection")
df = df.to_crs(target_crs)
# Calculate length and sinuosity
print("Calculating length and sinuosity")
df["length"] = df.geometry.length.astype("float32")
df["sinuosity"] = df.geometry.apply(calculate_sinuosity).astype("float32")
# set join types to make it easier to track
join_df["type"] = "internal" # set default
join_df.loc[join_df.upstream == 0, "type"] = "origin"
join_df.loc[join_df.downstream == 0, "type"] = "terminal"
join_df.loc[(join_df.upstream != 0) & (join_df.upstream_id == 0), "type"] = "huc_in"
# drop columns not useful for later processing steps
df = df.drop(columns=["FlowDir", "StreamCalc"])
return df, join_df | 8e0f0fec59441a3370b958452a2e4674f1e0ee34 | 3,654,343 |
def split_str_to_list(input_str, split_char=","):
"""Split a string into a list of elements.
Args:
input_str (str): The string to split
split_char (str, optional): The character to split the string by. Defaults
to ",".
Returns:
(list): The string split into a list
"""
# Split a string into a list using `,` char
split_str = input_str.split(split_char)
# For each element in split_str, strip leading/trailing whitespace
for i, element in enumerate(split_str):
split_str[i] = element.strip()
return split_str | 2b13868aed1869310a1398886f6777ddceb6c777 | 3,654,345 |
def generate_password(length):
"""
This will create a random password for the user
Args:
length - the user's preferred length for the password
Return:
It will return a random password of user's preferred length
"""
return Password.generate_pass(length) | 76fd4e06364b4cbfeffb389cb959f5d22f0acc71 | 3,654,346 |
def export_csv(obj, file_name, point_type='evalpts', **kwargs):
""" Exports control points or evaluated points as a CSV file.
:param obj: a curve or a surface object
:type obj: abstract.Curve, abstract.Surface
:param file_name: output file name
:type file_name: str
:param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points
:type point_type: str
:raises IOError: an error occurred writing the file
"""
if not isinstance(obj, (abstract.Curve, abstract.Surface)):
raise ValueError("Input object should be a curve or a surface")
# Pick correct points from the object
if point_type == 'ctrlpts':
points = obj.ctrlpts
elif point_type == 'evalpts' or point_type == 'curvepts' or point_type == 'surfpts':
points = obj.evalpts
else:
raise ValueError("Please choose a valid point type option. Possible types: ctrlpts, evalpts")
# Prepare CSV header
dim = len(points[0])
line = "dim "
for i in range(dim-1):
line += str(i + 1) + ", dim "
line += str(dim) + "\n"
# Prepare values
for pt in points:
line += ",".join([str(p) for p in pt]) + "\n"
# Write to file
return exch.write_file(file_name, line) | a42f13a5af94344f0ef9c6b9b8aca62067dfd77f | 3,654,347 |
import re
def formatRFC822Headers(headers):
""" Convert the key-value pairs in 'headers' to valid RFC822-style
headers, including adding leading whitespace to elements which
contain newlines in order to preserve continuation-line semantics.
"""
munged = []
linesplit = re.compile(r'[\n\r]+?')
for key, value in headers:
vallines = linesplit.split(value)
while vallines:
if vallines[-1].rstrip() == '':
vallines = vallines[:-1]
else:
break
munged.append('%s: %s' % (key, '\r\n '.join(vallines)))
return '\r\n'.join(munged) | 4c7dd97c9079daf144acf83241ebe9f025020611 | 3,654,348 |
def first_fixation_duration(trial: Trial, region_number: int) -> RegionMeasure:
"""
The duration of the first fixation in a region during first pass reading
(i.e., before the reader fixates areas beyond the region).
If this region is skipped during first pass, this measure is None.
::
fp_fixations = get_first_pass_fixations(trial, region_number)
if length of fp_fixations is 0:
return None
else:
return duration of first fixation in fp_fixations
"""
region = region_exists(trial, region_number)
fp_fixations = get_fp_fixations(trial, region_number)
if not fp_fixations:
return save_measure(trial, region, "first_fixation_duration", None, None)
return save_measure(
trial,
region,
"first_fixation_duration",
fp_fixations[0].duration(),
[fp_fixations[0]],
) | cdb1435f382d277bb3a116e2d268a566b17692a4 | 3,654,349 |
def find_in_path(input_data, path):
"""Finds values at the path in input_data.
:param input_data: dict or list
:param path: the path of the values example: b.*.name
:result: list of found data
"""
result = find(input_data, path.split('.'))
return [value for _, value in result if value] | 6529486013966df264fc3f84a17a8f858a37190c | 3,654,350 |
def post_test_check(duthost, up_bgp_neighbors):
"""Post-checks the status of critical processes and state of BGP sessions.
Args:
duthost: Host DUT.
skip_containers: A list contains the container names which should be skipped.
Return:
This function will return True if all critical processes are running and
all BGP sessions are established. Otherwise it will return False.
"""
return check_all_critical_processes_status(duthost) and duthost.check_bgp_session_state(up_bgp_neighbors, "established") | 6ce585abbfbdb2b8a1f858ce54f4cd837c84bbda | 3,654,351 |
def fill_with_mode(filename, column):
"""
Fill the missing values(NaN) in a column with the mode of that column
Args:
filename: Name of the CSV file.
column: Name of the column to fill
Returns:
df: Pandas DataFrame object.
(Representing entire data and where 'column' does not contain NaN values)
(Filled with above mentioned rules)
"""
df=pd.read_csv(filename)
mode = df[column].mode()
df[column] = df[column].fillna(mode[0])
return df | 6b9dc4b0530c21b0a43776b05ce0d8620f75dd30 | 3,654,352 |
def get_model_spec(
model_zoo,
model_def,
model_params,
dataset_fn,
loss,
optimizer,
eval_metrics_fn,
prediction_outputs_processor,
):
"""Get the model spec items in a tuple.
The model spec tuple contains the following items in order:
* The model object instantiated with parameters specified
in `model_params`,
* The `dataset_fn`,
* The `loss`,
* The `optimizer`,
* The `eval_metrics_fn`,
* The `prediction_outputs_processor`. Note that it will print
warning if it's not inherited from `BasePredictionOutputsProcessor`.
"""
model_def_module_file = get_module_file_path(model_zoo, model_def)
default_module = load_module(model_def_module_file).__dict__
model = load_model_from_module(model_def, default_module, model_params)
prediction_outputs_processor = _get_spec_value(
prediction_outputs_processor, model_zoo, default_module
)
if prediction_outputs_processor and not isinstance(
prediction_outputs_processor, BasePredictionOutputsProcessor
):
logger.warning(
"prediction_outputs_processor is not "
"inherited from BasePredictionOutputsProcessor. "
"Prediction outputs may not be processed correctly."
)
return (
model,
_get_spec_value(dataset_fn, model_zoo, default_module, required=True),
_get_spec_value(loss, model_zoo, default_module, required=True),
_get_spec_value(optimizer, model_zoo, default_module, required=True),
_get_spec_value(
eval_metrics_fn, model_zoo, default_module, required=True
),
prediction_outputs_processor,
) | 427cf6f2705f32a493fdd8c16cc57d337b528a2f | 3,654,354 |
def clean_meta(unclean_list):
"""
cleans raw_vcf_header_list for downstream processing
:return:
"""
clean_list = []
for i in unclean_list:
if "=<" in i:
i = i.rstrip(">")
i = i.replace("##", "")
ii = i.split("=<", 1)
else:
i = i.replace("##", "")
ii = i.split("=", 1)
clean_list.append(ii)
return clean_list | 03dcbcad57b129fd6ff379f3fb3181c91f8f4106 | 3,654,355 |
import itertools
def generate_result_table(models, data_info): # per idx (gene/transcript)
"""
Generate a table containing learned model parameters and statistic tests.
Parameters
----------
models
Learned models for individual genomic positions of a gene.
group_labels
Labels of samples.
data_inf
Dict
Returns
-------
table
List of tuples.
"""
###
condition_names,run_names = get_ordered_condition_run_names(data_info) # information from the config file used for modelling.
###
###
table = []
for key, (model,prefiltering) in models.items():
idx, position, kmer = key
mu = model.nodes['mu_tau'].expected() # K
sigma2 = 1./model.nodes['mu_tau'].expected(var='gamma') # K
var_mu = model.nodes['mu_tau'].variance(var='normal') # K
# mu = model.nodes['y'].params['mean']
# sigma2 = model.nodes['y'].params['variance']
w = model.nodes['w'].expected() # GK
N = model.nodes['y'].params['N'].round() # GK
N0 = N[:, 0].squeeze()
N1 = N[:, 1].squeeze()
w0 = w[:, 0].squeeze()
coverage = np.sum(model.nodes['y'].params['N'], axis=-1) # GK => G # n_reads per group
p_overlap, list_cdf_at_intersections = stats.calc_prob_overlapping(mu, sigma2)
model_group_names = model.nodes['x'].params['group_names'] #condition_names if pooling, run_names otherwise.
### Cluster assignment ###
conf_mu = [calculate_confidence_cluster_assignment(mu[0],model.kmer_signal),calculate_confidence_cluster_assignment(mu[1],model.kmer_signal)]
cluster_idx = {}
if conf_mu[0] > conf_mu[1]:
cluster_idx['unmod'] = 0
cluster_idx['mod'] = 1
else:
cluster_idx['unmod'] = 1
cluster_idx['mod'] = 0
mu_assigned = [mu[cluster_idx['unmod']],mu[cluster_idx['mod']]]
sigma2_assigned = [sigma2[cluster_idx['unmod']],sigma2[cluster_idx['mod']]]
conf_mu = [conf_mu[cluster_idx['unmod']],conf_mu[cluster_idx['mod']]]
w_mod = w[:,cluster_idx['mod']]
mod_assignment = [['higher','lower'][(mu[0]<mu[1])^cluster_idx['mod']]]
### calculate stats_pairwise
stats_pairwise = []
for cond1, cond2 in itertools.combinations(condition_names, 2):
if model.method['pooling']:
cond1, cond2 = [cond1], [cond2]
else:
cond1, cond2 = list(data_info[cond1].keys()), list(data_info[cond2].keys())
if any(r in model_group_names for r in cond1) and any(r in model_group_names for r in cond2):
w_cond1 = w[np.isin(model_group_names, cond1), cluster_idx['mod']].flatten()
w_cond2 = w[np.isin(model_group_names, cond2), cluster_idx['mod']].flatten()
n_cond1 = coverage[np.isin(model_group_names, cond1)]
n_cond2 = coverage[np.isin(model_group_names, cond2)]
z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2) # two=tailed
w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2)
stats_pairwise += [w_mod_mean_diff, p_ws, z_score]
else:
stats_pairwise += [None, None, None]
if len(condition_names) > 2:
### calculate stats_one_vs_all
stats_one_vs_all = []
for cond in condition_names:
if model.method['pooling']:
cond = [cond]
else:
cond = list(data_info[cond].keys())
if any(r in model_group_names for r in cond):
w_cond1 = w[np.isin(model_group_names, cond), cluster_idx['mod']].flatten()
w_cond2 = w[~np.isin(model_group_names, cond), cluster_idx['mod']].flatten()
n_cond1 = coverage[np.isin(model_group_names, cond)]
n_cond2 = coverage[~np.isin(model_group_names, cond)]
z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2)
w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2)
stats_one_vs_all += [w_mod_mean_diff, p_ws, z_score]
else:
stats_one_vs_all += [None, None, None]
###
w_mod_ordered, coverage_ordered = [], [] # ordered by conditon_names or run_names based on headers.
if model.method['pooling']:
names = condition_names
else:
names = run_names
for name in names:
if name in model_group_names:
w_mod_ordered += list(w_mod[np.isin(model_group_names, name)])
coverage_ordered += list(coverage[np.isin(model_group_names, name)])
else:
w_mod_ordered += [None]
coverage_ordered += [None]
###
### prepare values to write
row = [idx, position, kmer]
row += stats_pairwise
if len(condition_names) > 2:
row += stats_one_vs_all
# row += [p_overlap]
# row += list_cdf_at_intersections
row += list(w_mod_ordered)
row += list(coverage_ordered)
row += mu_assigned + sigma2_assigned + conf_mu + mod_assignment
if prefiltering is not None:
row += [prefiltering[model.method['prefiltering']['method']]]
### Filtering those positions with a nearly single distribution.
cdf_threshold = 0.1
x_x1, y_x1, x_x2, y_x2 = list_cdf_at_intersections
is_not_inside = ((y_x1 < cdf_threshold) & (x_x1 < cdf_threshold)) | ((y_x2 < cdf_threshold) & (x_x2 < cdf_threshold)) | (( (1-y_x1) < cdf_threshold) & ((1-x_x1) < cdf_threshold)) | (( (1-y_x2) < cdf_threshold) & ((1-x_x2) < cdf_threshold))
if (p_overlap <= 0.5) and (is_not_inside):
table += [tuple(row)]
return table | 455cbe41c2114e3a81ac186b2adf07753041d753 | 3,654,356 |
def get_href_kind(href, domain):
"""Return kind of href (internal or external)"""
if is_internal_href(href, domain):
kind = 'internal'
else:
kind = 'external'
return kind | e63b3e28d0f6f776338da827f61b0c5709dfe990 | 3,654,357 |
def check_mark(value):
"""Helper method to create an html formatted entry for the flags in tables."""
return format_html('✓') if value == 1 else '' | 07430e1b5be180b01dd8dd045db01ac4ee9ca6ee | 3,654,359 |
def military_to_english_time(time, fmt="{0}:{1:02d}{2}"):
""" assumes 08:33:55 and 22:33:42 type times
will return 8:33am and 10:33pm
(not we floor the minutes)
"""
ret_val = time
try:
h, m = split_time(time)
ampm = "am"
if h >= 12:
ampm = "pm"
if h >= 24:
ampm = "am"
h = h % 12
if h == 0:
h = 12
ret_val = fmt.format(h, m, ampm)
except:
pass
return ret_val | 880f42354c407a7fae5ba2685b38a10260bc9f58 | 3,654,361 |
def parse_ssh_config(text):
"""
Parse an ssh-config output into a Python dict.
Because Windows doesn't have grep, lol.
"""
try:
lines = text.split('\n')
lists = [l.split(' ') for l in lines]
lists = [filter(None, l) for l in lists]
tuples = [(l[0], ''.join(l[1:]).strip().strip('\r')) for l in lists]
return dict(tuples)
except IndexError:
raise Exception("Malformed input") | 7441c39e5ca9127871316d98a6fe195ed1da6522 | 3,654,362 |
import re
def snake_case(string: str) -> str:
"""Convert upper camelcase to snake case."""
return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower() | fe8592bcfa1f2233a07308741de5f912fd7055b3 | 3,654,363 |
import tempfile
import atexit
def create_tempdir(suffix='', prefix='tmp', directory=None, delete=True):
"""Create a tempdir and return the path.
This function registers the new temporary directory
for deletion with the atexit module.
"""
tempd = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory)
if delete:
atexit.register(_cleanup_tempdir, tempd)
return tempd | f0c9b6b3a9198d1e552e5fce838113239021a4fd | 3,654,365 |
import binascii
async def get_transactor_key(request):
"""Get transactor key out of request."""
id_dict = deserialize_api_key(
request.app.config.SECRET_KEY, extract_request_token(request)
)
next_id = id_dict.get("id")
auth_data = await get_auth_by_next_id(next_id)
encrypted_private_key = auth_data.get("encrypted_private_key")
private_key = decrypt_private_key(
request.app.config.AES_KEY, next_id, encrypted_private_key
)
hex_private_key = binascii.hexlify(private_key)
return Key(hex_private_key), next_id | a1766e70ad076eaeb7d19509aeffbb729869df51 | 3,654,366 |
def _get_plot_aeff_exact_to_ground_energy(parsed_ncsd_out_files):
"""Returns a list of plots in the form
(xdata, ydata, const_list, const_dict),
where A=Aeff is xdata, and ground energy is ydata
"""
a_aeff_to_ground_state_energy = get_a_aeff_to_ground_state_energy_map(
parsed_ncsd_out_files=parsed_ncsd_out_files)
a_to_ground_state_energy = dict()
for a_aeff, e in a_aeff_to_ground_state_energy.items():
if a_aeff[0] == a_aeff[1]:
a_to_ground_state_energy[a_aeff[0]] = e
return map_to_arrays(a_to_ground_state_energy) + (list(), dict()) | e4224d43808e9ef0f43bc32041ef567138853bdb | 3,654,367 |
def get_twitter_auth():
"""Setup Twitter connection
return: API object"""
parameters = set_parameters.take_auth_data()
twitter_access_token = parameters['twitter_access_token']
twitter_secret_token = parameters['twitter_secret_token']
twitter_api_key = parameters['twitter_api_key']
twitter_secret_key = parameters['twitter_secret_key']
auth = OAuthHandler(twitter_api_key, twitter_secret_key)
auth.set_access_token(twitter_access_token, twitter_secret_token)
return auth | 1bb6ef2660adf25935f844c29e7e1dae3e674937 | 3,654,368 |
import re
import logging
def pre_process_string_data(item: dict):
"""
remove extra whitespaces, linebreaks, quotes from strings
:param item: dictionary with data for analysis
:return: cleaned item
"""
try:
result_item = {key: item[key] for key in KEYS + ['_id']}
for prop in result_item:
if type(result_item[prop]) is str and prop != '_id':
result_item[prop] = re.sub(' +', ' ', item[prop])
result_item[prop] = re.sub('\n', ' ', item[prop])
result_item[prop] = item[prop].strip().strip('"').strip("'").lower().strip()
return result_item
except KeyError:
logging.warning("Wrong formed entity with id %s", item['_id'])
return None | 32c4218c0e02580ea90a75f117d8b822239ee6d1 | 3,654,369 |
def remove_cmds_from_title(title):
"""
Função que remove os comandos colocados nos títulos
apenas por uma questão de objetividade no título
"""
arr = title.split()
output = " ".join(list(filter(lambda x: x[0] != "!", arr)))
return output | bfaa96aa578455f977549b737a8492afa80e1e7c | 3,654,370 |
def load_config(file_path):
"""Loads the config file into a config-namedtuple
Parameters:
input (pathlib.Path):
takes a Path object for the config file. It does not correct any
relative path issues.
Returns:
(namedtuple -- config):
Contains two sub-structures (run, plot) that will return a
dictionary of configuration options. You can get your desired
config-dictionary via `config.run` or `config.plot`.
"""
with open(file_path) as f:
return config(**loads(f.read())) | 82664fa4e27fd60ae56c435b3deb45cb7535bc17 | 3,654,371 |
def parse_version_number(raw_version_number):
# type: (str) -> Tuple[int, int, int]
"""
Parse a valid "INT.INT.INT" string, or raise an
Exception. Exceptions are handled by caller and
mean invalid version number.
"""
converted_version_number = [int(part) for part in raw_version_number.split(".")]
if len(converted_version_number) != 3:
raise ValueError(
"Invalid version number %r, parsed as %r",
raw_version_number,
converted_version_number,
)
# Make mypy happy
version_number = (
converted_version_number[0],
converted_version_number[1],
converted_version_number[2],
)
return version_number | a899d29790ce03d28e7acb11c87f38890501d462 | 3,654,372 |
def get_error_directory_does_not_exists(dir_kind):
"""dir kind = [dir, file ,url]"""
return f"Error: Directory with {dir_kind} does not exist:" | 171fb09ab341daf2810612f2cc7c077b5326f347 | 3,654,373 |
def var_text(vname, iotype, variable):
"""
Extract info from variable for vname of iotype
and return info as HTML string.
"""
if iotype == 'read':
txt = '<p><i>Input Variable Name:</i> <b>{}</b>'.format(vname)
if 'required' in variable:
txt += '<br><b><i>Required Input Variable</i></b>'
else:
txt = '<p><i>Output Variable Name:</i> <b>{}</b>'.format(vname)
txt += '<br><i>Description:</i> {}'.format(variable['desc'])
txt += '<br><i>Datatype:</i> {}'.format(variable['type'])
if iotype == 'read':
txt += '<br><i>Availability:</i> {}'.format(variable['availability'])
txt += '<br><i>IRS Form Location:</i>'
formdict = variable['form']
for yrange in sorted(formdict.keys()):
txt += '<br>{}: {}'.format(yrange, formdict[yrange])
txt += '</p>'
return txt | 04fdb1727c8eb783f7fb2c0324852e80673e8b77 | 3,654,374 |
def line_search_reset(binary_img, left_lane, right_line):
"""
#---------------------
# After applying calibration, thresholding, and a perspective transform to a road image,
# I have a binary image where the lane lines stand out clearly.
# However, I still need to decide explicitly which pixels are part of the lines
# and which belong to the left line and which belong to the right line.
#
# This lane line search is done using histogram and sliding window
#
# The sliding window implementation is based on lecture videos.
#
# This function searches lines from scratch, i.e. without using info from previous lines.
# However, the search is not entirely a blind search, since I am using histogram information.
#
# Use Cases:
# - Use this function on the first frame
# - Use when lines are lost or not detected in previous frames
#
"""
# I first take a histogram along all the columns in the lower half of the image
histogram = np.sum(binary_img[int(binary_img.shape[0] / 2):, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_img, binary_img, binary_img)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftX_base = np.argmax(histogram[:midpoint])
rightX_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
num_windows = 9
# Set height of windows
window_height = np.int(binary_img.shape[0] / num_windows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
current_leftX = leftX_base
current_rightX = rightX_base
# Set minimum number of pixels found to recenter window
min_num_pixel = 50
# Create empty lists to receive left and right lane pixel indices
win_left_lane = []
win_right_lane = []
window_margin = left_lane.window_margin
# Step through the windows one by one
for window in range(num_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_img.shape[0] - (window + 1) * window_height
win_y_high = binary_img.shape[0] - window * window_height
win_leftx_min = current_leftX - window_margin
win_leftx_max = current_leftX + window_margin
win_rightx_min = current_rightX - window_margin
win_rightx_max = current_rightX + window_margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_leftx_min, win_y_low), (win_leftx_max, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_rightx_min, win_y_low), (win_rightx_max, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
left_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_leftx_min) & (
nonzerox <= win_leftx_max)).nonzero()[0]
right_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_rightx_min) & (
nonzerox <= win_rightx_max)).nonzero()[0]
# Append these indices to the lists
win_left_lane.append(left_window_inds)
win_right_lane.append(right_window_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(left_window_inds) > min_num_pixel:
current_leftX = np.int(np.mean(nonzerox[left_window_inds]))
if len(right_window_inds) > min_num_pixel:
current_rightX = np.int(np.mean(nonzerox[right_window_inds]))
# Concatenate the arrays of indices
win_left_lane = np.concatenate(win_left_lane)
win_right_lane = np.concatenate(win_right_lane)
# Extract left and right line pixel positions
leftx= nonzerox[win_left_lane]
lefty = nonzeroy[win_left_lane]
rightx = nonzerox[win_right_lane]
righty = nonzeroy[win_right_lane]
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_lane.current_fit = left_fit
right_line.current_fit = right_fit
# Generate x and y values for plotting
ploty = np.linspace(0, binary_img.shape[0] - 1, binary_img.shape[0])
# ax^2 + bx + c
left_plotx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_plotx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
left_lane.prevx.append(left_plotx)
right_line.prevx.append(right_plotx)
if len(left_lane.prevx) > 10:
left_avg_line = smoothing(left_lane.prevx, 10)
left_avg_fit = np.polyfit(ploty, left_avg_line, 2)
left_fit_plotx = left_avg_fit[0] * ploty ** 2 + left_avg_fit[1] * ploty + left_avg_fit[2]
left_lane.current_fit = left_avg_fit
left_lane.allx, left_lane.ally = left_fit_plotx, ploty
else:
left_lane.current_fit = left_fit
left_lane.allx, left_lane.ally = left_plotx, ploty
if len(right_line.prevx) > 10:
right_avg_line = smoothing(right_line.prevx, 10)
right_avg_fit = np.polyfit(ploty, right_avg_line, 2)
right_fit_plotx = right_avg_fit[0] * ploty ** 2 + right_avg_fit[1] * ploty + right_avg_fit[2]
right_line.current_fit = right_avg_fit
right_line.allx, right_line.ally = right_fit_plotx, ploty
else:
right_line.current_fit = right_fit
right_line.allx, right_line.ally = right_plotx, ploty
left_lane.startx, right_line.startx = left_lane.allx[len(left_lane.allx)-1], right_line.allx[len(right_line.allx)-1]
left_lane.endx, right_line.endx = left_lane.allx[0], right_line.allx[0]
# Set detected=True for both lines
left_lane.detected, right_line.detected = True, True
measure_curvature(left_lane, right_line)
return out_img | d810c111bcf5731f7c4486c77863c3505d8400a8 | 3,654,375 |
def get_primary_language(current_site=None):
"""Fetch the first language of the current site settings."""
current_site = current_site or Site.objects.get_current()
return get_languages()[current_site.id][0]['code'] | c4d71c30424bb753de353e325a012efb9265a01b | 3,654,376 |
def get_Theta_ref_cnd_H(Theta_sur_f_hex_H):
"""(23)
Args:
Theta_sur_f_hex_H: 暖房時の室内機熱交換器の表面温度(℃)
Returns:
暖房時の冷媒の凝縮温度(℃)
"""
Theta_ref_cnd_H = Theta_sur_f_hex_H
if Theta_ref_cnd_H > 65:
Theta_ref_cnd_H = 65
return Theta_ref_cnd_H | deccaa524aebda2a7457da53b44c517287a190a4 | 3,654,377 |
def hpat_pandas_series_shape(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.shape
Examples
--------
.. literalinclude:: ../../../examples/series/series_shape.py
:language: python
:lines: 27-
:caption: Return a tuple of the shape of the underlying data.
:name: ex_series_shape
.. command-output:: python ./series/series_shape.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.shape` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
"""
_func_name = 'Attribute shape.'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl | 6c27e6276caecaea18650398678d04623ddcc653 | 3,654,379 |
async def port_utilization_range(
port_id: str, direction: str, limit: int, start: str, granularity: int, end=None,
):
"""Get port utilization by date range."""
async with Influx("telegraf", granularity=granularity) as db:
q = (
db.SELECT(f"derivative(max(bytes{direction.title()}), 1s) * 8")
.FROM("interfaces")
.BETWEEN(start, end)
.WHERE(port_id=port_id)
.GROUP("port_id", "participant_id")
.FILL("none")
.LIMIT(limit)
)
return await q.query() | 2d2ac7ad32ee279f88d662bd8f099ccee0407b66 | 3,654,380 |
def composer_includes(context):
"""
Include the composer JS and CSS files in a page if the user has permission.
"""
if context.get('can_compose_permission', False):
url = settings.STATIC_URL
url += '' if url[-1] == '/' else '/'
js = '<script type="text/javascript" src="%sjs/composer.min.js"></script>' % url
css = '<link rel="stylesheet" type="text/css" href="%scss/composer.css">' % url
return js + css
return '' | 7c0a89a5ce1e1fe5838e8022fe568347420ffb0f | 3,654,381 |
def craft(crafter, recipe_name, *inputs, raise_exception=False, **kwargs):
"""
Access function. Craft a given recipe from a source recipe module. A
recipe module is a Python module containing recipe classes. Note that this
requires `settings.CRAFT_RECIPE_MODULES` to be added to a list of one or
more python-paths to modules holding Recipe-classes.
Args:
crafter (Object): The one doing the crafting.
recipe_name (str): The `CraftRecipe.name` to use. This uses fuzzy-matching
if the result is unique.
*inputs: Suitable ingredients and/or tools (Objects) to use in the crafting.
raise_exception (bool, optional): If crafting failed for whatever
reason, raise `CraftingError`. The user will still be informed by the
recipe.
**kwargs: Optional kwargs to pass into the recipe (will passed into
recipe.craft).
Returns:
list: Crafted objects, if any.
Raises:
CraftingError: If `raise_exception` is True and crafting failed to
produce an output. KeyError: If `recipe_name` failed to find a
matching recipe class (or the hit was not precise enough.)
Notes:
If no recipe_module is given, will look for a list `settings.CRAFT_RECIPE_MODULES` and
lastly fall back to the example module `"evennia.contrib."`
"""
# delayed loading/caching of recipes
_load_recipes()
RecipeClass = _RECIPE_CLASSES.get(recipe_name, None)
if not RecipeClass:
# try a startswith fuzzy match
matches = [key for key in _RECIPE_CLASSES if key.startswith(recipe_name)]
if not matches:
# try in-match
matches = [key for key in _RECIPE_CLASSES if recipe_name in key]
if len(matches) == 1:
RecipeClass = matches[0]
if not RecipeClass:
raise KeyError(
f"No recipe in settings.CRAFT_RECIPE_MODULES has a name matching {recipe_name}"
)
recipe = RecipeClass(crafter, *inputs, **kwargs)
return recipe.craft(raise_exception=raise_exception) | 860b839123394f2ba210b4cfdcb40a57595701a3 | 3,654,382 |
from typing import Iterable
from typing import Union
from typing import List
from typing import Any
from typing import Dict
import collections
def load_data(
data,
*,
keys: Iterable[Union[str, int]] = (0,),
unique_keys: bool = False,
multiple_values: bool = False,
unique_values: bool = False,
**kwargs,
) -> Union[List[Any], Dict[Any, Union[Any, List[Any]]]]:
"""Load data.
If no values are provided, then return a list from keys.
If values are provided, then return a dictionary of keys/values.
Args:
data (str): File or buffer.
See Pandas 'filepath_or_buffer' option from 'read_csv()'.
Kwargs:
keys (Iterable[str|int]): Columns to use as dictionary keys.
Multiple keys are stored as tuples in same order as given.
If str, then it corresponds to 'headers' names.
If int, then it corresponds to column indices.
unique_keys (bool): Control if keys can be repeated or not.
Only applies if 'values' is None.
multiple_values (bool): Specify if values consist of single or multiple
elements. For multi-value case, values are placed in an iterable
container. For single-value case, the value is used as-is.
Only applies if 'values' is not None.
unique_values (bool): Control if values can be repeated or not.
Only applies if 'multiple_values' is True.
Kwargs: Options forwarded to 'iload_data()'.
"""
if kwargs.get('values') is None:
if unique_keys:
# NOTE: Convert to a list because JSON does not serializes sets.
_data = list(set(iload_data(data, keys=keys, **kwargs)))
else:
_data = list(iload_data(data, keys=keys, **kwargs))
elif multiple_values:
if unique_values:
_data = collections.defaultdict(list)
for k, v in iload_data(data, keys=keys, **kwargs):
if v not in _data[k]:
_data[k].append(v)
else:
_data = collections.defaultdict(list)
for k, v in iload_data(data, keys=keys, **kwargs):
_data[k].append(v)
else:
# Consider the value of the first appearance of a key.
_data = {}
for k, v in iload_data(data, keys=keys, **kwargs):
if k not in _data:
_data[k] = v
return _data | ad3a5f74a0bbbfbf3de62f691be5b27b63fa9949 | 3,654,383 |
def get_avg_wind_speed(data):
"""this function gets the average wind speeds for each point in the fetched data"""
wind_speed_history = []
for point in data:
this_point_wind_speed = []
for year_reading in point:
hourly = []
for hour in year_reading['weather'][0]['hourly']:
hourly.append(float(hour['windspeedKmph']))
this_point_wind_speed.append(float(np.average(hourly)))
wind_speed_history.append(np.flip(this_point_wind_speed))
return wind_speed_history | fdeeb64f495343893ffc98997de2bad5748591c2 | 3,654,384 |
from typing import List
def get_uris_of_class(repository: str, endpoint: str, sparql_file: str, class_name: str, endpoint_type: str,
limit: int = 1000) -> List[URIRef]:
"""
Returns the list of uris of type class_name
:param repository: The repository containing the RDF data
:param endpoint: The SPARQL endpoint
:param sparql_file: The file containing the SPARQL query
:param class_name: The class_name to search
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: The list of uris of type class_name
"""
uri_list = []
uris_of_class_sparql_query = open(sparql_file).read()
uris_of_class_template = Template(uris_of_class_sparql_query).substitute(class_name=class_name)
uris_of_class_template = Template(uris_of_class_template + " limit $limit offset $offset ")
for uri in get_sparql_results(uris_of_class_template, "uri", endpoint, repository,
endpoint_type, limit):
uri_list.append(uri)
if len(uri_list) % 1000 == 0:
print(len(uri_list))
return uri_list | 7b5cf86d286afd00d40e202e98661be3668364c3 | 3,654,385 |
def nspath_eval(xpath: str) -> str:
"""
Return an etree friendly xpath based expanding namespace
into namespace URIs
:param xpath: xpath string with namespace prefixes
:returns: etree friendly xpath
"""
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{{{}}}{}'.format(NAMESPACES[namespace], element))
return '/'.join(out) | 6e5e558da8d00d57ee1857bce2b8c99d05386c73 | 3,654,386 |
def basic_streamalert_config():
"""Generate basic StreamAlert configuration dictionary."""
return {
'global': {
'account': {
'aws_account_id': '123456789123',
'kms_key_alias': 'stream_alert_secrets',
'prefix': 'unit-testing',
'region': 'us-west-2'
},
'terraform': {
'tfstate_bucket': 'unit-testing.streamalert.terraform.state',
'tfstate_s3_key': 'stream_alert_state/terraform.tfstate',
'tfvars': 'terraform.tfvars'
},
'infrastructure': {
'monitoring': {
'create_sns_topic': True,
'metric_alarms': {
'rule_processor': {
'Aggregate Unit Testing Failed Parses Alarm': {
'alarm_description': '',
'comparison_operator': 'GreaterThanOrEqualToThreshold',
'evaluation_periods': 1,
'metric_name': 'RuleProcessor-FailedParses',
'period': 300,
'statistic': 'Sum',
'threshold': 1.0
}
}
}
}
}
},
'lambda': {
'alert_processor_config': {
'handler': 'stream_alert.alert_processor.main.handler',
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': []
},
'rule_processor_config': {
'handler': 'stream_alert.rule_processor.main.handler',
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': [
'jsonpath_rw',
'netaddr'
]
},
'athena_partition_refresh_config': {
'current_version': '$LATEST',
'enable_metrics': False,
'enabled': True,
'handler': 'main.handler',
'memory': 128,
'partitioning': {
'firehose': {},
'normal': {
'unit-testing.streamalerts': 'alerts'
}
},
'source_bucket': 'unit-testing.streamalert.source',
'source_current_hash': '<auto_generated>',
'source_object_key': '<auto_generated>',
'third_party_libraries': [
'backoff'
],
'timeout': 60
},
},
'clusters': {
'prod': {
'id': 'prod',
'modules': {
'cloudwatch_monitoring': {
'enabled': True
},
'kinesis': {
'firehose': {
'enabled': True,
's3_bucket_suffix': 'streamalert.results'
},
'streams': {
'retention': 24,
'shards': 1
}
},
'kinesis_events': {
'enabled': True
},
'stream_alert': {
'alert_processor': {
'current_version': '$LATEST',
'memory': 128,
'timeout': 10
},
'rule_processor': {
'current_version': '$LATEST',
"enable_metrics": True,
'memory': 128,
'metric_alarms': {
'Prod Unit Testing Failed Parses Alarm': {
'alarm_description': '',
'comparison_operator': 'GreaterThanOrEqualToThreshold',
'evaluation_periods': 1,
'metric_name': 'RuleProcessor-FailedParses-PROD',
'period': 300,
'statistic': 'Sum',
'threshold': 1.0
}
},
'timeout': 10
}
}
},
'outputs': {
'kinesis': [
'username',
'access_key_id',
'secret_key'
]
},
'region': 'us-east-1'
}
}
} | 8e766fa73c9043888c6531659bccc57fcb1a88ea | 3,654,387 |
def _read_elastic_moduli(outfilename):
"""
Read elastic modulus matrix from a completed GULP job
:param outfilename: Path of the stdout from the GULP job
:type outfilename: str
:returns: 6x6 Elastic modulus matrix in GPa
"""
outfile = open(outfilename,'r')
moduli_array = []
while True:
oneline = outfile.readline()
if not oneline: # break at EOF
break
if 'Elastic Constant Matrix' in oneline:
moduli = np.zeros((6,6))
dummyline = outfile.readline()
dummyline = outfile.readline()
dummyline = outfile.readline()
dummyline = outfile.readline()
for i in range(6):
modline = outfile.readline().strip()
e1, e2, e3, e4, e5, e6 = modline[3:13], modline[13:23], modline[23:33], modline[33:43], modline[43:53], modline[53:63]
modarray = [e1,e2,e3,e4,e5,e6]
float_modarray = []
# Handle errors
for element in modarray:
if element[0] == "*":
float_modarray.append(0.0)
else:
float_modarray.append(float(element))
moduli[i,:] = float_modarray
moduli_array.append(moduli)
outfile.close()
return moduli_array | d09672135bed16aa651bbe5befe526e21763fc1b | 3,654,388 |
def predict_koopman(lam, w, v, x0, ncp, g, h, u=None):
"""Predict the future dynamics of the system given an initial value `x0`. Result is returned
as a matrix where rows correspond to states and columns to time.
Args:
lam (tf.Tensor): Koopman eigenvalues.
w (tf.Tensor): Left eigenvectors.
v (tf.Tensor): Right eigenvectors.
x0 (tf.Tensor): Initial value of the system.
N (int): Number of time steps to predict.
g (Net): Encoder network.
h (Net): Decoder network.
u (tf.Tensor): Input signal.
Returns:
tuple: Prediction of the states of the system for N time steps into the future,
prediction of the observables of the system for N time steps into the future.
"""
# Precompute some constants for more efficient computations
wH = tf.linalg.adjoint(w)
norm_vec = 1/tf.math.reduce_sum(tf.math.multiply(tf.math.conj(w),v), axis=0)
# Store each time step in a list
res_x = tf.TensorArray(x0.dtype,size=ncp+1)
res_gx = tf.TensorArray(w.dtype,size=ncp+1)
res_x = res_x.write(0,x0)
res_gx = res_gx.write(0,tf.cast(tf.squeeze(g(tf.expand_dims(x0,0)),axis=[0]), w.dtype))
# Initiate time stepping
xk = x0
if u is not None:
for k in range(1,ncp+1):
xk = tf.concat([tf.expand_dims(xk[:-1],0),tf.reshape(u[k-1],[1,-1])],axis=1)
xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h)
res_x = res_x.write(k,xk)
res_gx = res_gx.write(k,gxk)
else:
for k in range(1,ncp+1):
xk = tf.expand_dims(xk,0)
xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h)
res_x = res_x.write(k,xk)
res_gx = res_gx.write(k,gxk)
return res_x.stack(), res_gx.stack() | 8509a96a5566f69ac238827538591ff9fcf34269 | 3,654,389 |
def handle_registration():
""" Show the registration form or handles the registration
of a user, if the email or username is taken, take them back to the
registration form
- Upon successful login, take to the homepage
"""
form = RegisterForm()
email = form.email.data
username = form.username.data
# If there is a user with this email already
if User.query.filter_by(email=email).first():
form.email.errors = ["This email is already being used"]
# Check if there is a user with this username already
if User.query.filter_by(username=username).first():
form.username.errors = ["This username is already being used"]
if form.email.errors or form.username.errors:
return render_template('login_register/register.html', form=form)
if form.validate_on_submit():
pwd = form.password.data
f_name = form.first_name.data
l_name = form.last_name.data
user = User.register(username=username,
pwd=pwd,
email=email,
f_name=f_name,
l_name=l_name)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Sucessfully logged in!', "success")
# on successful login, redirect to user detail page
return redirect(url_for("homepage.index"))
else:
return render_template("login_register/register.html", form=form) | 27ce2a38202ea5873c53bc53fd5d2843515177cf | 3,654,390 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.