content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def SamAng(Tth,Gangls,Sangl,IFCoup):
"""Compute sample orientation angles vs laboratory coord. system
:param Tth: Signed theta
:param Gangls: Sample goniometer angles phi,chi,omega,azmuth
:param Sangl: Sample angle zeros om-0, chi-0, phi-0
:param IFCoup: True if omega & 2-theta coupled in CW scan
:returns:
psi,gam: Sample odf angles
dPSdA,dGMdA: Angle zero derivatives
"""
if IFCoup:
GSomeg = sind(Gangls[2]+Tth)
GComeg = cosd(Gangls[2]+Tth)
else:
GSomeg = sind(Gangls[2])
GComeg = cosd(Gangls[2])
GSTth = sind(Tth)
GCTth = cosd(Tth)
GSazm = sind(Gangls[3])
GCazm = cosd(Gangls[3])
GSchi = sind(Gangls[1])
GCchi = cosd(Gangls[1])
GSphi = sind(Gangls[0]+Sangl[2])
GCphi = cosd(Gangls[0]+Sangl[2])
SSomeg = sind(Sangl[0])
SComeg = cosd(Sangl[0])
SSchi = sind(Sangl[1])
SCchi = cosd(Sangl[1])
AT = -GSTth*GComeg+GCTth*GCazm*GSomeg
BT = GSTth*GSomeg+GCTth*GCazm*GComeg
CT = -GCTth*GSazm*GSchi
DT = -GCTth*GSazm*GCchi
BC1 = -AT*GSphi+(CT+BT*GCchi)*GCphi
BC2 = DT-BT*GSchi
BC3 = AT*GCphi+(CT+BT*GCchi)*GSphi
BC = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg
psi = acosd(BC)
BD = 1.0-BC**2
C = np.where(BD>1.e-6,rpd/np.sqrt(BD),0.)
dPSdA = [-C*(-BC1*SSomeg*SCchi-BC2*SSomeg*SSchi-BC3*SComeg),
-C*(-BC1*SComeg*SSchi+BC2*SComeg*SCchi),
-C*(-BC1*SSomeg-BC3*SComeg*SCchi)]
BA = -BC1*SSchi+BC2*SCchi
BB = BC1*SSomeg*SCchi+BC2*SSomeg*SSchi+BC3*SComeg
gam = atan2d(BB,BA)
BD = (BA**2+BB**2)/rpd
dBAdO = 0
dBAdC = -BC1*SCchi-BC2*SSchi
dBAdF = BC3*SSchi
dBBdO = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg
dBBdC = -BC1*SSomeg*SSchi+BC2*SSomeg*SCchi
dBBdF = BC1*SComeg-BC3*SSomeg*SCchi
dGMdA = np.where(BD > 1.e-6,[(BA*dBBdO-BB*dBAdO)/BD,(BA*dBBdC-BB*dBAdC)/BD, \
(BA*dBBdF-BB*dBAdF)/BD],[np.zeros_like(BD),np.zeros_like(BD),np.zeros_like(BD)])
return psi,gam,dPSdA,dGMdA | 7b7efb995e9a3a2c659e5392c5c2b5b836b2750f | 3,650,696 |
def debye_C_V(T,thetaD,natoms):
"""
Returns the heat capacity at constant volume, C_V, of the Debeye model at a
given temperature, T, in meV/atom/K.
"""
C_V = 4*debye_func(thetaD/T)-3*(thetaD/T)/(sp.exp(thetaD/T)-1.)
C_V = 3*natoms*BOLTZCONST*C_V
return C_V | 08e708f9c9ce16ed5c54e6f1bc920dd4be81e31e | 3,650,697 |
def setup(clip=True, flip=True):
"""
Project specific data import and setup function
:param clip: bool - use clipping
:param flip: bool - use flipping
:return: data as pandas dataframe, List[LAICPMSData obj]
"""
# calibration
# Zn: y = 0.0395 kcps/(µg/g)* x + 1.308 kcps
# use inverse calibration function to get conc from counts; transformation m = 1/m and b = -1 * b/m
calibration_functions = {
'Zn:64': lambda x: 1/0.0395 * x - 1.308/0.0395,
}
# data files
filenames = ["../data/LA_Data_C1SA1.csv",
"../data/LA_Data_C2SA1.csv",
"../data/LA_Data_C3SA1.csv",
"../data/LA_Data_C4SA1.csv",
"../data/LA_Data_C1SB1.csv",
"../data/LA_Data_C2SB1.csv",
"../data/LA_Data_C3SB1.csv",
"../data/LA_Data_C4SB1.csv",
"../data/LA_Data_C1SC1.csv",
"../data/LA_Data_C2SC1.csv",
"../data/LA_Data_C3SC1.csv",
"../data/LA_Data_C4SC1.csv"]
# short sample names
smpl_names = ["A_1",
"A_2",
"A_3",
"A_4",
"B_1",
"B_2",
"B_3",
"B_4",
"C_1",
"C_2",
"C_3",
"C_4"]
# list on how to flip the data to get matching orientations, h = horizontally, v = vertically
if flip:
flip_list = [
'h',
'v',
'h',
'h',
'h',
'h',
'v',
'v',
'v',
'h',
'h',
'h'
]
else:
flip_list = ['no' for i in range(0, len(filenames))]
# clip data to windows of defined size
# main reason is comparability & tissue folds
if clip:
#100 px x 150 px
clip_list = [
(70,170,30,180),
(70,170,30,180),
(50,150,30,180),
(60,160,50,200),
(30,130,30,180),
(40,140,30,180),
(40,140,30,180),
(40,140,30,180),
(60,160,20,170),
(60,160,20,170),
(60,160,20,170),
(60,160,20,170),
]
else:
clip_list = [None for i in range(0, len(filenames))]
ms_data = []
data = []
# here the data gets processed into LAICPMSData objects - one per file
# data contains all Zn:64 data - masked/segmented based on P:31 content
for smpl, filename, clip, flip in zip(smpl_names, filenames, clip_list, flip_list):
curr_ms_data = LAICPMSData(filename=filename, clip_data_around_center=clip, flip=flip, pixel_dimensions=(15,15))
# only assign directly if you know what you are doing!
curr_ms_data._calibration_functions = calibration_functions
ms_data.append(curr_ms_data)
data.append(curr_ms_data.get_masked_data(element_list=['Zn:64'], discriminator='P:31', only_on_tissue=True))
data[-1]['sample'] = [smpl for i in range(0, len(data[-1]))]
return pd.concat(data, ignore_index=True), ms_data | 2f62f124512daff76b59e3e93a2b50de6d5ca0be | 3,650,698 |
def extract_file_type(file_location:str) -> str:
"""
A function to return the type of file
-> file_location: str = location of a file in string... ex : "C:\\abc\\abc\\file.xyz"
----
=> str: string of the file type, ex : "xyz"
"""
if not isinstance(file_location,str):
raise TypeError("file_location must be a string")
try:
return file_location.rsplit(".", 1)[1]
except IndexError:
raise ValueError(f"Invalid File Location : '{file_location}'") | 091930e1cd285822a6be402eb47ce0457e40b0db | 3,650,700 |
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
class re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
compiled_pat = re_compile(pat)
proxy = re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match | 7ad761906ff43f8e552d9b96cc042490c45830c0 | 3,650,701 |
def search_view(request):
"""Get user's saved keywords from the database if they exist and render search page."""
if request.method == 'GET':
try:
query = request.dbsession.query(Keyword)
user_keywords = query.filter(Association.user_id == request.authenticated_userid, Association.keyword_id == Keyword.keyword)
except KeyError:
return HTTPFound
# except DBAPIError:
# raise DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)
keywords = [keyword.keyword for keyword in user_keywords]
if len(keywords) < 1:
return{'message': 'You do not have any keywords saved. Add one!'}
return{'keywords': user_keywords} | 5b8208704143d0ce5aaf4c379a5bea8c01d2d4b1 | 3,650,702 |
def upload_object(
self, key, body, metadata=None, acl=None, content_type="application/json"
):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
S3 key : `str`
The Object's key identifier.
body : `str` or `bytes`
Object data
metadata : `dict`
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#bucket
Default is `None`, meaning that no ACL is applied to the object.
content_type : `str`, optional
The object's content type. Default is 'application/json'
Returns
-------
S3 URI of the uploaded object: `str`
The location of the S3 object uploaded in the form:
s3://<S3_BUCKET>/<key>
Note:
-----
Boto3 will check these environment variables for credentials:
AWS_ACCESS_KEY_ID
The access key for your AWS account.
AWS_SECRET_ACCESS_KEY
The secret key for your AWS account.
"""
s3 = boto3.resource("s3")
object = s3.Object(S3_BUCKET, key)
args = {}
if metadata is not None:
args["Metadata"] = metadata
if acl is not None:
args["ACL"] = acl
if content_type is not None:
args["ContentType"] = content_type
self.update_state(state="STARTED")
object.put(Body=body, **args)
s3_uri = get_s3_uri(key)
return s3_uri | 3f1425544d8f29fa414995a316869480877464df | 3,650,703 |
def min_ui_count(proteins):
"""
Counts the minimum number of unique identifier peptides across all proteins
in a set
input:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
minimum number of unique identifier peptides across all proteins in
a set
"""
temp = []
for p in peptides_per_protein(unique_identifiers(proteins)):
temp.append(len(p[1]))
if len(proteins) > len(temp):
return 0
else:
return min(temp) | b5bc4300ce79bf680896c6902ea721d8e73316aa | 3,650,704 |
def check_normalize_py(method):
"""A wrapper that wrap a parameter checker to the original function(normalize operation written in Python)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[mean, std], _ = parse_user_args(method, *args, **kwargs)
check_normalize_py_param(mean, std)
return method(self, *args, **kwargs)
return new_method | acee5b58192336499939de722a8ee20778112d14 | 3,650,705 |
def GetOS(build_id, builder_name, step_name, partial_match=False):
# pylint:disable=unused-argument
"""Returns the operating system in the step_metadata.
Args:
build_id (int): Build id of the build.
builder_name (str): Builder name of the build.
step_name (str): The original step name used to get the step metadata.
Returns:
The operating system if it exists, otherwise, None.
"""
step_metadata = GetStepMetadata(build_id, step_name, partial_match)
return step_metadata.get('dimensions',
{}).get('os') if step_metadata else None | 3bdbbabf551d29b0c5bd868dcc56df8fe77109ba | 3,650,706 |
from niftynet.application.label_driven_registration import SUPPORTED_INPUT
def __add_registration_args(parser):
"""
keywords defined for image registration
:param parser:
:return:
"""
parser.add_argument(
"--label_normalisation",
metavar='',
help="whether to map unique labels in the training set to "
"consecutive integers (the smallest label will be mapped to 0)",
type=str2boolean,
default=False)
parser = add_input_name_args(parser, SUPPORTED_INPUT)
return parser | a2b27e27d37e90a83769a86cda2dab12b1966724 | 3,650,707 |
from typing import Optional
def get_catalog_item(catalog_id: Optional[str] = None,
catalog_item_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCatalogItemResult:
"""
Gets a specific catalog item.
"""
__args__ = dict()
__args__['catalogId'] = catalog_id
__args__['catalogItemId'] = catalog_item_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:recommendationengine/v1beta1:getCatalogItem', __args__, opts=opts, typ=GetCatalogItemResult).value
return AwaitableGetCatalogItemResult(
category_hierarchies=__ret__.category_hierarchies,
description=__ret__.description,
item_attributes=__ret__.item_attributes,
item_group_id=__ret__.item_group_id,
product_metadata=__ret__.product_metadata,
tags=__ret__.tags,
title=__ret__.title) | b52b48b90d774d00dec62eb4aa34259aa5bbaf2e | 3,650,708 |
def get_ext_suffix() -> str:
"""Get the extension suffix"""
return get_config_vars()["EXT_SUFFIX"] | e66ff0e8de776e7ae9b17a754738376dd0105bfe | 3,650,709 |
def confusion_matrix(y_true, y_pred):
"""
Args:
y_true: pd.Series or array or list, ground truth (correct) labels.
y_pred: pd.Series or array or list, predicted labels, as returned by a classifier.
Returns:
Confusion matrix.
"""
t = pd.DataFrame({'actual':y_true, 'predict':y_pred})
t = pd.crosstab(t.predict, t.actual)
return t | fdbd1dae9354fc7358595c8a659c80eae19dc812 | 3,650,711 |
import torch
def encode_boxes(boxes, im_shape, encode=True, dim_position=64, wave_length=1000, normalize=False, quantify=-1):
""" modified from PositionalEmbedding in:
Args:
boxes: [bs, num_nodes, 4] or [num_nodes, 4]
im_shape: 2D tensor, [bs, 2] or [2], the size of image is represented as [width, height]
encode: bool, whether to encode the box
dim_position: int, the dimension for position embedding
wave_length: the wave length for the position embedding
normalize: bool, whether to normalize the embedded features
quantify: int, if it is > 0, it will be used to quantify the position of objects
"""
batch = boxes.dim() > 2
if not batch:
boxes = boxes.unsqueeze(dim=0)
im_shape = im_shape.unsqueeze(dim=0)
if quantify > 1:
boxes = boxes // quantify
# in this case, the last 2 dims of input data is num_samples and 4.
# we compute the pairwise relative postion embedings for each box
if boxes.dim() == 3: # [bs, num_sample, 4]
# in this case, the boxes should be tlbr: [x1, y1, x2, y2]
device = boxes.device
bs, num_sample, pos_dim = boxes.size(0), boxes.size(1), boxes.size(2) # pos_dim should be 4
x_min, y_min, x_max, y_max = torch.chunk(boxes, 4, dim=2) # each has the size [bs, num_sample, 1]
# handle some invalid box
x_max[x_max<x_min] = x_min[x_max<x_min]
y_max[y_max<y_min] = y_min[y_max<y_min]
cx_a = (x_min + x_max) * 0.5 # [bs, num_sample_a, 1]
cy_a = (y_min + y_max) * 0.5 # [bs, num_sample_a, 1]
w_a = (x_max - x_min) + 1. # [bs, num_sample_a, 1]
h_a = (y_max - y_min) + 1. # [bs, num_sample_a, 1]
cx_b = cx_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
cy_b = cy_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
w_b = w_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
h_b = h_a.view(bs, 1, num_sample) # [bs, 1, num_sample_b]
delta_x = ((cx_b - cx_a) / w_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_y = ((cy_b - cy_a) / h_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_w = torch.log(w_b / w_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
delta_h = torch.log(h_b / h_a).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
relative_pos = torch.cat((delta_x, delta_y, delta_w, delta_h), dim=-1) # [bs, num_sample_a, num_sample_b, 4]
# if im_shape is not None:
im_shape = im_shape.unsqueeze(dim=-1) # [bs, 2, 1]
im_width, im_height = torch.chunk(im_shape, 2, dim=1) # each has the size [bs, 1, 1]
x = ((cx_b - cx_a) / im_width).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
y = ((cy_b - cy_a) / im_height).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
# w = ((w_b + w_a) / (2 * im_width)).unsqueeze(dim=-1) - 0.5 # [bs, num_sample_a, num_sample_b, 1]
# h = ((h_b + h_a) / (2 * im_height)).unsqueeze(dim=-1) - 0.5 # [bs, num_sample_a. num_sample_b, 1]
w = ((w_b - w_a) / im_width).unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 1]
h = ((h_b - h_a) / im_height).unsqueeze(dim=-1) # [bs, num_sample_a. num_sample_b, 1]
relative_pos = torch.cat((relative_pos, x, y, w, h), dim=-1) # [bs, num_sample_a, num_sample_b, 8]
if not encode:
embedding = relative_pos
else:
position_mat = relative_pos # [bs, num_sample_a, num_sample_b, 8]
pos_dim = position_mat.size(-1)
feat_range = torch.arange(dim_position / (2*pos_dim)).to(device) # [self.dim_position / 16]
dim_mat = feat_range / (dim_position / (2*pos_dim))
dim_mat = 1. / (torch.pow(wave_length, dim_mat)) # [self.dim_position / 16]
dim_mat = dim_mat.view(1, 1, 1, 1, -1) # [1, 1, 1, 1, self.dim_position / 16]
# position_mat = position_mat.view(bs, num_sample, num_sample, pos_dim, -1) # [bs, num_sample_a, num_sample_b, 4, 1]
position_mat = position_mat.unsqueeze(dim=-1) # [bs, num_sample_a, num_sample_b, 8, 1]
position_mat = 100. * position_mat # [bs, num_sample_a, num_sample_b, 8, 1]
mul_mat = position_mat * dim_mat # [bs, num_sample_a, num_sample_b, 8, dim_position / 16]
mul_mat = mul_mat.view(bs, num_sample, num_sample, -1) # [bs, num_sample_a, num_sample_b, dim_position / 2]
sin_mat = torch.sin(mul_mat)# [bs, num_sample_a, num_sample_b, dim_position / 2]
cos_mat = torch.cos(mul_mat)# [bs, num_sample_a, num_sample_b, dim_position / 2]
embedding = torch.cat((sin_mat, cos_mat), -1)# [bs, num_sample_a, num_sample_b, dim_position]
if normalize:
embedding = embedding / torch.clamp(torch.norm(embedding, dim=-1, p=2, keepdim=True), 1e-6)
else:
raise ValueError("Invalid input of boxes.")
if not batch: # 2D tensor, [num_boxes, 4]
embedding = embedding.squeeze(dim=0)
return relative_pos, embedding | 7bc8e2d858391c862538626ea7f3dcc291f807f6 | 3,650,713 |
from typing import List
def plot_t1(times: List[float], contrast: List[float], fname: str = None) -> Figure:
"""
Plot T1 relaxation figure along with laser delay time intervals
:param times: frequencies, unit: ns
:param contrast: contrast, range between 0 and 1
:param fname: if assigned, a '.png' file will be saved
:return: a matplotlib Figure instance
"""
fig = plt.figure()
plt.plot(times, contrast, 'o-')
plt.title('T1 Relaxometry')
plt.xlabel('Relaxation time (ns)')
plt.ylabel('Contrast $N_{sig}/N_{ref}$')
if fname is not None:
plt.savefig(fname, dpi=350)
return fig | fc5461fd2957b30624156e23693e5cd71788fa92 | 3,650,715 |
from pathlib import Path
import re
def inspect_project(dirpath=None):
"""Fetch various information about an already-initialized project"""
if dirpath is None:
dirpath = Path()
else:
dirpath = Path(dirpath)
def exists(*fname):
return Path(dirpath, *fname).exists()
if not exists("pyproject.toml"):
raise InvalidProjectError("Project is missing pyproject.toml file")
if not exists("setup.cfg"):
raise InvalidProjectError("Project is missing setup.cfg file")
if not exists("src"):
raise InvalidProjectError("Project does not have src/ layout")
cfg = read_configuration(str(dirpath / "setup.cfg"))
env = {
"name": cfg["metadata"]["name"],
"short_description": cfg["metadata"]["description"],
"author": cfg["metadata"]["author"],
"author_email": cfg["metadata"]["author_email"],
"python_requires": util.sort_specifier(cfg["options"]["python_requires"]),
"install_requires": cfg["options"].get("install_requires", []),
# Until <https://github.com/pypa/setuptools/issues/2575> is fixed, we
# have to determine versions via read_version() instead of
# read_configuration().
# "version": cfg["metadata"].get("version"),
"keywords": cfg["metadata"].get("keywords", []),
"supports_pypy3": False,
"default_branch": get_default_branch(dirpath),
}
# if env["version"] is None:
# raise InvalidProjectError("Cannot determine project version")
if cfg["options"].get("packages"):
env["is_flat_module"] = False
env["import_name"] = cfg["options"]["packages"][0]
env["version"] = read_version(
(dirpath / "src" / env["import_name"] / "__init__.py").resolve()
)
else:
env["is_flat_module"] = True
env["import_name"] = cfg["options"]["py_modules"][0]
env["version"] = read_version(
(dirpath / "src" / (env["import_name"] + ".py")).resolve()
)
env["python_versions"] = []
for clsfr in cfg["metadata"]["classifiers"]:
m = re.fullmatch(r"Programming Language :: Python :: (\d+\.\d+)", clsfr)
if m:
env["python_versions"].append(m.group(1))
if clsfr == "Programming Language :: Python :: Implementation :: PyPy":
env["supports_pypy3"] = True
env["commands"] = {}
try:
commands = cfg["options"]["entry_points"]["console_scripts"]
except KeyError:
pass
else:
for cmd in commands:
k, v = re.split(r"\s*=\s*", cmd, maxsplit=1)
env["commands"][k] = v
m = re.fullmatch(
r"https://github.com/([^/]+)/([^/]+)",
cfg["metadata"]["url"],
)
assert m, "Project URL is not a GitHub URL"
env["github_user"] = m.group(1)
env["repo_name"] = m.group(2)
if "Documentation" in cfg["metadata"]["project_urls"]:
m = re.fullmatch(
r"https?://([-a-zA-Z0-9]+)\.(?:readthedocs|rtfd)\.io",
cfg["metadata"]["project_urls"]["Documentation"],
)
assert m, "Documentation URL is not a Read the Docs URL"
env["rtfd_name"] = m.group(1)
else:
env["rtfd_name"] = env["name"]
toxcfg = ConfigParser(interpolation=None)
toxcfg.read(str(dirpath / "tox.ini")) # No-op when tox.ini doesn't exist
env["has_tests"] = toxcfg.has_section("testenv")
env["has_doctests"] = False
for pyfile in (dirpath / "src").rglob("*.py"):
if re.search(r"^\s*>>>\s+", pyfile.read_text(), flags=re.M):
env["has_doctests"] = True
break
env["has_typing"] = exists("src", env["import_name"], "py.typed")
env["has_ci"] = exists(".github", "workflows", "test.yml")
env["has_docs"] = exists("docs", "index.rst")
env["codecov_user"] = env["github_user"]
try:
with (dirpath / "README.rst").open(encoding="utf-8") as fp:
rdme = Readme.parse(fp)
except FileNotFoundError:
env["has_pypi"] = False
else:
for badge in rdme.badges:
m = re.fullmatch(
r"https://codecov\.io/gh/([^/]+)/[^/]+/branch/.+" r"/graph/badge\.svg",
badge.href,
)
if m:
env["codecov_user"] = m.group(1)
env["has_pypi"] = any(link["label"] == "PyPI" for link in rdme.header_links)
with (dirpath / "LICENSE").open(encoding="utf-8") as fp:
for line in fp:
m = re.match(r"^Copyright \(c\) (\d[-,\d\s]+\d) \w+", line)
if m:
env["copyright_years"] = list(intspan(m.group(1)))
break
else:
raise InvalidProjectError("Copyright years not found in LICENSE")
env["extra_testenvs"] = parse_extra_testenvs(
dirpath / ".github" / "workflows" / "test.yml"
)
return env | 2b4e503ea801f765997e4dd6b4ad1c6dee28acda | 3,650,717 |
def current_user(request):
"""
Returning the current user with data use of token
"""
serializer = UserSerializer(request.user)
return Response(serializer.data) | 301a3e3caabd2cafebf143bbee7ad81a6957ea3b | 3,650,718 |
import numpy
def getPercentileLevels(h, frac=[0.5, 0.65, 0.95, 0.975]):
"""
Return image levels that corresponds to given percentiles values
Uses the cumulative distribution of the sorted image density values
Hence this works also for any nd-arrays
inputs:
h array
outputs:
res array containing level values
keywords:
frac sample fractions (percentiles)
could be scalar or iterable
default: 50%, 65%, 95%, and 97.5%
"""
if getattr(frac, '__iter__', False):
return numpy.asarray( [getPercentileLevels(h, fk) for fk in frac])
assert( (frac >= 0.) & (frac <1.)), "Expecting a sample fraction in 'frac' and got %f" %frac
# flatten the array to a 1d list
val = h.ravel()
# inplace sort
val.sort()
#reverse order
rval = val[::-1]
#cumulative values
cval = rval.cumsum()
#retrieve the largest indice up to the fraction of the sample we want
ind = numpy.where(cval <= cval[-1]*float(frac))[0].max()
res = rval[ind]
del val, cval, ind, rval
return res | 126d16ab9358d9ec6e72dc653037d9235baef139 | 3,650,719 |
def _ParseSparse(data):
"""Concat sparse tensors together.
Args:
data: A dict of name -> Tensor.
Returns:
A single sparse tensor and a 1-D input spec Tensor.
Raises:
NotImplementedError: Combining dense and sparse tensors is not
supported.
ValueError: If data contains non-string Tensors.
"""
for k in sorted(data.keys()):
if not isinstance(data[k], sparse_tensor.SparseTensor):
raise NotImplementedError(
'Features should be either all sparse or all dense. Use a '
'feature engineering function to convert some of them.')
data_spec = [
constants.DATA_CATEGORICAL if data[data.keys()[0]].dtype == dtypes.string
else constants.DATA_FLOAT
]
return sparse_ops.sparse_concat(1, data.values()), data_spec | 9860442770c52b2ee029531cb18e3dbf8429bfb1 | 3,650,720 |
def get_spectra2(X, E_in, p_dict, outputs=None):
"""
Calls get_Efield() to get Electric field, then use Jones matrices
to calculate experimentally useful quantities.
Alias for the get_spectra2 method in libs.spectra.
Inputs:
detuning_range [ numpy 1D array ]
The independent variable and defines the detuning points over which to calculate. Values in MHz
E_in [ numpy 1/2D array ]
Defines the input electric field vector in the xyz basis. The z-axis is always the direction of propagation (independent of the magnetic field axis), and therefore the electric field should be a plane wave in the x,y plane. The array passed to this method should be in one of two formats:
(1) A 1D array of (Ex,Ey,Ez) which is the input electric field for all detuning values;
or
(2) A 2D array with dimensions (3,len(detuning_range)) - i.e. each detuning has a different electric field associated with it - which will happen on propagation through a birefringent/dichroic medium
p_dict [ dictionary ]
Dictionary containing all parameters (the order of parameters is therefore not important)
Dictionary keys:
Key DataType Unit Description
--- --------- ---- -----------
Elem str -- The chosen alkali element.
Dline str -- Specifies which D-line transition to calculate for (D1 or D2)
# Experimental parameters
Bfield float Gauss Magnitude of the applied magnetic field
T float Celsius Temperature used to calculate atomic number density
GammaBuf float MHz Extra lorentzian broadening (usually from buffer gas
but can be any extra homogeneous broadening)
shift float MHz A global frequency shift of the atomic resonance frequencies
DoppTemp float Celsius Temperature linked to the Doppler width (used for
independent Doppler width and number density)
Constrain bool -- If True, overides the DoppTemp value and sets it to T
# Elemental abundancies, where applicable
rb85frac float % percentage of rubidium-85 atoms
K40frac float % percentage of potassium-40 atoms
K41frac float % percentage of potassium-41 atoms
lcell float m length of the vapour cell
theta0 float degrees Linear polarisation angle w.r.t. to the x-axis
Pol float % Percentage of probe beam that drives sigma minus (50% = linear polarisation)
NOTE: If keys are missing from p_dict, default values contained in p_dict_defaults will be loaded.
outputs: an iterable (list,tuple...) of strings that defines which spectra are returned, and in which order.
If not specified, defaults to None, in which case a default set of outputs is returned, which are:
S0, S1, S2, S3, Ix, Iy, I_P45, I_M45, alphaPlus, alphaMinus, alphaZ
Returns:
A list of output arrays as defined by the 'outputs' keyword argument.
Example usage:
To calculate the room temperature absorption of a 75 mm long Cs reference cell in an applied magnetic field of 100 G aligned along the direction of propagation (Faraday geometry), between -10 and +10 GHz, with an input electric field aligned along the x-axis:
detuning_range = np.linspace(-10,10,1000)*1e3 # GHz to MHz conversion
E_in = np.array([1,0,0])
p_dict = {'Elem':'Cs', 'Dline':'D2', 'Bfield':100, 'T':21, 'lcell':75e-3}
[Transmission] = calculate(detuning_range,E_in,p_dict,outputs=['S0'])
"""
# get some parameters from p dictionary
# need in try/except or equiv.
if 'Elem' in list(p_dict.keys()):
Elem = p_dict['Elem']
else:
Elem = p_dict_defaults['Elem']
if 'Dline' in list(p_dict.keys()):
Dline = p_dict['Dline']
else:
Dline = p_dict_defaults['Dline']
if 'shift' in list(p_dict.keys()):
shift = p_dict['shift']
else:
shift = p_dict_defaults['shift']
if 'lcell' in list(p_dict.keys()):
lcell = p_dict['lcell']
else:
lcell = p_dict_defaults['lcell']
if 'theta0' in list(p_dict.keys()):
theta0 = p_dict['theta0']
else:
theta0 = p_dict_defaults['theta0']
if 'Pol' in list(p_dict.keys()):
Pol = p_dict['Pol']
else:
Pol = p_dict_defaults['Pol']
# get wavenumber
exec('transition = AC.'+Elem+Dline+'Transition')
wavenumber = transition.wavevectorMagnitude
# Calculate Susceptibility
ChiPlus, ChiMinus, ChiZ = calc_chi(X, p_dict)
Chi = [ChiPlus, ChiMinus, ChiZ]
# Complex refractive index
nPlus = sqrt(1.0+ChiPlus) #Complex refractive index driving sigma plus transitions
nMinus = sqrt(1.0+ChiMinus) #Complex refractive index driving sigma minus transitions
nZ = sqrt(1.0+ChiZ) # Complex index driving pi transitions
# convert (if necessary) detuning axis X to np array
if type(X) in (int, float, int):
X = np.array([X])
else:
X = np.array(X)
# Calculate E_field
E_out, R = get_Efield(X, E_in, Chi, p_dict)
#print 'Output E field (Z): \n', E_out[2]
## Apply Jones matrices
# Transmission - total intensity - just E_out**2 / E_in**2
E_in = np.array(E_in)
if E_in.shape == (3,):
E_in = np.array([np.ones(len(X))*E_in[0],np.ones(len(X))*E_in[1],np.ones(len(X))*E_in[2]])
# normalised by input intensity
I_in = (E_in * E_in.conjugate()).sum(axis=0)
S0 = (E_out * E_out.conjugate()).sum(axis=0) / I_in
Iz = (E_out[2] * E_out[2].conjugate()).real / I_in
Transmission = S0
## Some quantities from Faraday geometry don't make sense when B and k not aligned, but leave them here for historical reasons
TransLeft = exp(-2.0*nPlus.imag*wavenumber*lcell)
TransRight = exp(-2.0*nMinus.imag*wavenumber*lcell)
# Faraday rotation angle (including incident linear polarisation angle)
phiPlus = wavenumber*nPlus.real*lcell
phiMinus = wavenumber*nMinus.real*lcell
phi = (phiMinus-phiPlus)/2.0
##
#Stokes parameters
#S1#
Ex = np.array(JM.HorizPol_xy * E_out[:2])
Ix = (Ex * Ex.conjugate()).sum(axis=0) / I_in
Ey = np.array(JM.VertPol_xy * E_out[:2])
Iy = (Ey * Ey.conjugate()).sum(axis=0) / I_in
S1 = Ix - Iy
#S2#
E_P45 = np.array(JM.LPol_P45_xy * E_out[:2])
E_M45 = np.array(JM.LPol_M45_xy * E_out[:2])
I_P45 = (E_P45 * E_P45.conjugate()).sum(axis=0) / I_in
I_M45 = (E_M45 * E_M45.conjugate()).sum(axis=0) / I_in
S2 = I_P45 - I_M45
#S3#
# change to circular basis
E_out_lrz = BC.xyz_to_lrz(E_out)
El = np.array(JM.CPol_L_lr * E_out_lrz[:2])
Er = np.array(JM.CPol_R_lr * E_out_lrz[:2])
Il = (El * El.conjugate()).sum(axis=0) / I_in
Ir = (Er * Er.conjugate()).sum(axis=0) / I_in
S3 = Ir - Il
Ir = Ir.real
Il = Il.real
Ix = Ix.real
Iy = Iy.real
## (Real part) refractive indices
#nMinus = nPlus.real
#nPlus = nMinus.real
## Absorption coefficients - again not a physically relevant quantity anymore since propagation is not as simple as k * Im(Chi) * L in a non-Faraday geometry
alphaPlus = 2.0*nMinus.imag*wavenumber
alphaMinus = 2.0*nPlus.imag*wavenumber
alphaZ = 2.0*nZ.imag*wavenumber
# Refractive/Group indices for left/right/z also no longer make any sense
#d = (array(X)-shift) #Linear detuning
#dnWRTv = derivative(d,nMinus.real)
#GIPlus = nMinus.real + (X + transition.v0*1.0e-6)*dnWRTv
#dnWRTv = derivative(d,nPlus.real)
#GIMinus = nPlus.real + (X + transition.v0*1.0e-6)*dnWRTv
if (outputs == None) or ('All' in outputs):
# Default - return 'all' outputs (as used by GUI)
return S0.real,S1.real,S2.real,S3.real,Ix.real,Iy.real,I_P45.real,I_M45.real,alphaPlus,alphaMinus,alphaZ
else:
# Return the variable names mentioned in the outputs list of strings
# the strings in outputs must exactly match the local variable names here!
return [locals()[output_str] for output_str in outputs] | e55c9a428876e6cb41f92775d46f6c9411913e7d | 3,650,721 |
import operator
def calculate_seat_district(district_deputy_number, parties, votes):
"""
Calculate seats for each party in list of parties for a district
Params:
- district_deputy_number: the number of seats for this district
- parties: list of parties
- votes: list of votes for each party in this district
Assume that parties and votes parameters have the same size
Return:
- A tuple represents number of seats for each party. This tuple has same size with parameter 'parties'
"""
party_count = len(parties)
# Each party has been initially allocated 0 seat
# Initialize a list with initial value is 0
# For example, if party_count = 5
# seats will be seats = [0, 0, 0, 0, 0]
seats = [0] * party_count
# N value for each party
# N= V/(s + 1)
# Init N as a copy of votes list
N = votes[:]
while sum(seats) < district_deputy_number:
# Get the maximum value in list of N value and the index of that maximum value
# Note: this below line uses the Python's builtin operator
max_index, max_value = max(enumerate(N), key=operator.itemgetter(1))
# Update the seats list
# increase the seat of the party that has maximum by 1
seats[max_index] += 1
# Update the lagest N with new value
# using the formal: N= V/(s + 1)
N[max_index] = votes[max_index] / (seats[max_index] + 1)
# return as tuple
# Note: It can be returned as list, however, the tuple is better because it's immutable
return tuple(seats) | 035a167c623d14857dcefe01e4304523959857a6 | 3,650,724 |
import unittest
def dispatch():
"""Run all dispatch tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_dispatch'))
return suite | c0d5235bcc22429f8ed36756c4f531f014316d04 | 3,650,725 |
import torch
def get_mask(height, width, grid_size = 10):
"""
Get the location based on the image size corresponding to relu_4_2
and relu_5_1 layer for a desired grid size.
"""
print(height, width)
x_jump = int(width/grid_size)
y_jump = int(height/grid_size)
x_idx = np.linspace(int(x_jump/2),int(width - x_jump/2), grid_size, dtype = np.int32)
y_idx = np.linspace(int(y_jump/2), int(height - y_jump/2), grid_size, dtype = np.int32)
f_mask = torch.zeros((height//(2**4),width//2**4)).byte()
u_mask = torch.zeros((height//(2**3),width//2**3)).byte()
for i in x_idx:
for j in y_idx:
f_mask[j//(2**4),i//(2**4)] = 1
u_mask[j//(2**3),i//(2**3)] = 1
return(u_mask, f_mask) | a63b1bf2944309f3e5720d06bb5906a059e09ea7 | 3,650,726 |
def arrivalTimes2TimeTraceBH(arrivalTimes, binLength):
"""
Convert a list of arrivalTimes to an intensity time trace I(t)
===========================================================================
Input Meaning
---------------------------------------------------------------------------
arrivalTimes Variable with the arrivalTimes for each detector
element [of ns]
binLength Duration of each bin [in ns]
===========================================================================
Output Meaning
---------------------------------------------------------------------------
data Vector with intensity trace vs time in bins of binLength
===========================================================================
"""
# calculate for each photon in which bin it should be
photonBins = np.int64(arrivalTimes / binLength)
# number of photon bins
Nbins = np.max(photonBins) + 1
# create output vector
data = np.zeros(Nbins, 'int16')
for i in range(len(photonBins)):
data[photonBins[i]] += 1
return data | 0db7d8471bb5b918c66a57ace1b84871012448a4 | 3,650,727 |
from datetime import datetime
import pytz
def datetime_to_ts(dt):
"""
convert naive or aware datetime instance to timestamp in second
Args:
dt(datetime): datetime instance
Returns:
int: timestamp in second.
"""
epoch_dt = datetime.datetime.fromtimestamp(0, tz=pytz.utc)
if not hasattr(dt, 'tzinfo') or dt.tzinfo is None:
local_tz = tzlocal.get_localzone()
dt = local_tz.localize(dt)
delta = dt - epoch_dt
ts = delta.total_seconds()
return ts | 4fd3f0e0a4a051980a6bfe3e21a3ceb4ceca6b7e | 3,650,728 |
def load_csv(source_filepath, start_date = None, end_date = None,
timestamp_index = 0,
column_map = {"bidopen": "bidopen",
"bidclose": "bidclose",
"bidhigh": "bidhigh",
"bidlow": "bidlow",
"askopen": "askopen",
"askclose": "askclose",
"askhigh": "askhigh",
"asklow": "asklow",
"volume": "tickqty"}):
"""Loads a csv price data file
Args:
source_filepath (string): the full or relative filepath for csv input file
start_date (string): optional filter for price data
end_date (string): optional filter for price data
timestamp_index (int): column index for the start of bar timestamp
column_map (dict): maps the csv file columns (value) to the predefined
column names (key)
Returns:
Pandas.DataFrame: returns the price data
"""
#read the csv file
df = pd.read_csv(source_filepath, sep=",", index_col=timestamp_index, parse_dates=True)
#convert the columns to the correct data type
df["askopen"] = df[column_map["askopen"]].astype('float32')
df["askclose"] = df[column_map["askclose"]].astype('float32')
df["askhigh"] = df[column_map["askhigh"]].astype('float32')
df["asklow"] = df[column_map["asklow"]].astype('float32')
df["bidopen"] = df[column_map["bidopen"]].astype('float32')
df["bidclose"] = df[column_map["bidclose"]].astype('float32')
df["bidhigh"] = df[column_map["bidhigh"]].astype('float32')
df["bidlow"] = df[column_map["bidlow"]].astype('float32')
df["volume"] = df[column_map["volume"]].astype('int32')
#reorder the columns in the correct order
cols = ["askopen", "askclose", "askhigh", "asklow", "bidopen", "bidclose",
"bidhigh", "bidlow", "volume"]
df = df[cols]
#filter on dates if required
if start_date is not None:
df = df[df.index >= start_date]
if end_date is not None:
df = df[df.index <= end_date]
return df | c5256dddcea94854c30c6b95d96557cb81003e80 | 3,650,729 |
def create_kernel(dim0, dim1):
"""Create a two-dimensional LPF kernel, with a half-Hamming window along
the first dimension and a Gaussian along the second.
Parameters
----------
dim0 : int
Half-Hamming window length.
dim1 : int
Gaussian window length.
Returns
-------
kernel : np.ndarray
The 2d LPF kernel.
"""
dim0_weights = np.hamming(dim0 * 2 + 1)[:dim0]
dim1_weights = gaussian(dim1, dim1 * 0.25, True)
kernel = dim0_weights[:, np.newaxis] * dim1_weights[np.newaxis, :]
return kernel / kernel.sum() | f19cd7a840fd1562b0c95f614415d3471e1cb4d4 | 3,650,730 |
def generate_fractal_noise_3d(
shape, res, octaves=1, persistence=0.5, lacunarity=2,
tileable=(False, False, False), interpolant=interpolant
):
"""Generate a 3D numpy array of fractal noise.
Args:
shape: The shape of the generated array (tuple of three ints).
This must be a multiple of lacunarity**(octaves-1)*res.
res: The number of periods of noise to generate along each
axis (tuple of three ints). Note shape must be a multiple of
(lacunarity**(octaves-1)*res).
octaves: The number of octaves in the noise. Defaults to 1.
persistence: The scaling factor between two octaves.
lacunarity: The frequency factor between two octaves.
tileable: If the noise should be tileable along each axis
(tuple of three bools). Defaults to (False, False, False).
interpolant: The, interpolation function, defaults to
t*t*t*(t*(t*6 - 15) + 10).
Returns:
A numpy array of fractal noise and of shape shape generated by
combining several octaves of perlin noise.
Raises:
ValueError: If shape is not a multiple of
(lacunarity**(octaves-1)*res).
"""
noise = cp.zeros(shape)
frequency = 1
amplitude = 1
for _ in range(octaves):
noise += amplitude * generate_perlin_noise_3d(
shape,
(frequency*res[0], frequency*res[1], frequency*res[2]),
tileable,
interpolant
)
frequency *= lacunarity
amplitude *= persistence
return noise | 3c20dd9c7de6e53d074926bdf87278f92f5162f4 | 3,650,731 |
def getDeltaBetweenPosition(data_outVTK, wall_outVTK, cord_choice, x_p0, x_p1, Npts, Uinf=None, Rhoinf=None):
"""
Compute the boundary layer thickness for *Npts* equally distributed between the 2 position
defined thanks to *x_p0*, *x_p1* and *cord_choice*. See the documentation of the function
getDeltaAtPosition() for more information.
Parameters
----------
data_outVTK : VTK output object from a VTK reader
This field data MUST fulfill 2 conditions:
- contain the field "U_AVG",
- represent a 2D field data, as we want to extract a 1D velocity
profile from it.
wall_outVTK : VTK output object from a VTK reader
This field is supposed to describe a curve with 1D cell type, like in getLineTangentialVector()
cord_choice : integer, 0, 1 or 2
Gives the axis that is going to be used to define the point where the velocity
profile is extracted. Convention :
- 0 = x axis
- 1 = y axis
- 2 = z axis
x_p0, x_p1 : float between 0 and 1
gives the bound of the line where to compute the BL thickness.
Npts: integer
Number of points where to compute delta wanted.
Uinf : float
(optional) Free stream velocity
Rhoinf : float
(optional) Free stream density
Returns
-------
pos : vector of tuple(3)
the coordinates of the points where delta have been computed.
delta : vector of floats
The BL thickness at the *Npts* different points.
deltaS : vector of floats
The compressible BL displacement thickness at the *Npts* different points.
theta : vector of floats
The compressible BL momentum thickness at the *Npts* different points.
"""
# function display
print '---- DAEPy::getDeltaBetweenPosition ----'
delta = np.zeros(Npts)
deltaS = np.zeros(Npts)
theta = np.zeros(Npts)
pos = np.zeros((Npts, 3))
for i in range(Npts):
x_p_temp = x_p0 + (x_p1-x_p0)/(Npts-1)*i
[pos[i,:], delta[i], deltaS[i], theta[i]] = getDeltaAtPosition(data_outVTK, wall_outVTK, cord_choice, x_p_temp, Uinf, Rhoinf)
return [pos, delta, deltaS, theta] | a765dbecf1d34c8693c73ecc5250d22bf04793fa | 3,650,732 |
def get_clean_url(url):
""" Get a url without the language part, if i18n urls are defined
:param url: a string with the url to clean
:return: a string with the cleaned url
"""
url = url.strip('/')
url = '/' if not url else url
return '/'.join(url.split('/')[1:]) | 9e5d396086d6cc5169c26f6d1645dafd23a3b8d7 | 3,650,733 |
def get_fields_from_url():
"""Returns a list of fields defined in the url as expected by the RESTful standard"""
return request.args.get('fields', '').split(",") | 7e6833f10f8b32a71667402ca3f1c9f5b8648b56 | 3,650,734 |
import collections
def get_triples_processed(dcids, limit=_MAX_LIMIT):
"""
Generate the GetTriple query and send the request.
The response is processed into as triples strings. This API is used by the
pv tree tool.
"""
url = API_ROOT + API_ENDPOINTS['get_triples']
payload = send_request(url, req_json={'dcids': dcids, 'limit': limit})
# Create a map from dcid to list of triples.
results = collections.defaultdict(list)
for dcid in dcids:
# Make sure each dcid is mapped to an empty list.
results[dcid]
# Add triples as appropriate
for t in payload[dcid]:
if 'objectId' in t:
results[dcid].append(
(t['subjectId'], t['predicate'], t['objectId']))
elif 'objectValue' in t:
results[dcid].append(
(t['subjectId'], t['predicate'], t['objectValue']))
return dict(results) | de143106e4f6efad89a223b5e83146bd2de170d0 | 3,650,735 |
from datetime import datetime
def cid_to_date(cid):
"""Converts a cid to date string YYYY-MM-DD
Parameters
----------
cid : int
A cid as it is generated by the function ``utils.create_cid()``
Returns
-------
str
A string formated date (e.g. YYYY-MM-DD, 2018-10-01)
"""
return datetime.utcfromtimestamp(
cid/10000000.0
).strftime("%Y-%m-%d") | ab919f9cfd5c56f6fb6b65cbae8731687fc42faf | 3,650,736 |
import requests
import html
def get_listings(max_pages=10):
"""Returns the listings from the first max_pages of craigslist."""
page = requests.get(URL)
tree = html.fromstring(page.content)
listing_xpath = '//li[@class="result-row"]'
listings = tree.xpath(listing_xpath)
# Get total number of listings
default_lpp = 120 # Default number of listings on each page
num_listings = int(tree.xpath('//span[@class="totalcount"]/text()')[0])
total_pages = ceil(num_listings / default_lpp)
# Get next pages
for i in range(min(max_pages - 1, total_pages)):
next_page = requests.get(URL + "&s=%s" % (default_lpp * i))
next_tree = html.fromstring(page.content)
next_listings = tree.xpath(listing_xpath)
listings.extend(next_listings)
return listings | a01aadf7e1701735fe3757b92da09c6a12848c73 | 3,650,737 |
import random
def remove_edge_stochastic_function(G, parameter, prob_func, prob_func_kws={}, random_seed=None, copy=True):
"""
Recieves a Graph and p.
p is function of a defined parameter
Returns a degraded Graph
"""
if random_seed is not None:
random.seed(random_seed)
if copy:
G_ = G.copy()
else:
G_ = G
lst = [G.edges[n][parameter] for n in G.edges]
vmax, vmin = max(lst), min(lst)
prob_func_kws['vmax'] = vmax
prob_func_kws['vmin'] = vmin
lst=None
for edge in list(G.edges):
p = prob_func(G.edges[edge][parameter], **prob_func_kws)
if random.random()<=p:
G_.remove_edge(*edge)
return(G_) | e4f49a6e512f7ad0e86c0138bf76affa330ba7a5 | 3,650,738 |
def get_sh_type(sh_type):
"""Get the section header type."""
if sh_type == 0:
return 'SHT_NULL'
elif sh_type == 1:
return 'SHT_PROGBITS'
elif sh_type == 2:
return 'SHT_SYMTAB'
elif sh_type == 3:
return 'SHT_STRTAB'
elif sh_type == 4:
return 'SHT_RELA'
elif sh_type == 5:
return 'SHT_HASH'
elif sh_type == 6:
return 'SHT_DYNAMIC'
elif sh_type == 7:
return 'SHT_NOTE'
elif sh_type == 8:
return 'SHT_NOBITS'
elif sh_type == 9:
return 'SHT_REL'
elif sh_type == 10:
return 'SHT_SHLIB'
elif sh_type == 11:
return 'SHT_DYNSYM'
elif sh_type == 14:
return 'SHT_INIT_ARRAY'
elif sh_type == 15:
return 'SHT_FINI_ARRAY'
elif sh_type == 16:
return 'SHT_PREINIT_ARRAY'
elif sh_type == 17:
return 'SHT_GROUP'
elif sh_type == 18:
return 'SHT_SYMTAB_SHNDX'
elif sh_type == 19:
return 'SHT_NUM'
elif sh_type == 1610612736:
return 'SHT_LOOS'
else:
print('Unable to match {} to a sh_type.'.format(sh_type))
raise ValueError | 0d95e651cc817f48178e45373b822be2eb32fbaf | 3,650,739 |
def show_warn_message(text:str, *args:str) -> str:
"""
Show a warning message.
"""
return _base("showWarningMessage", text, *args) | 2b1851aade6ff8952c27b1f63d687ae0e95ce4c2 | 3,650,740 |
def read_gmpe_file(resid_file, period):
"""
Reads the gmpe residuals file and returns all the data
"""
gmpe_data = []
# Read residuals file and get information we need
input_file = open(resid_file, 'r')
# Look over header and figure out which column contains the period
# we need to plot
header = input_file.readline()
header = header.strip()
items = header.split()
index = -1
for idx, item in enumerate(items):
try:
val = float(item)
if val == period:
# Found period, save index
index = idx
break
except:
pass
if index < 0:
# If we don't have this period, nothing to do
print("Residuals file %s does not have data for period %f" %
(resid_file, period))
# Close input file
input_file.close()
# Return empty sets
return gmpe_data
# Read the rest of the file
# Index #2 has station name
# Index #7 has distance
for line in input_file:
items = line.split()
stat = items[2]
dist = items[7]
value = items[index]
gmpe_data.append((stat, dist, value))
# Done reading the file
input_file.close()
return gmpe_data | c7cb325f6c40cc23ae8fa017a6ef924fa7df2c4e | 3,650,741 |
def get_types(name=None):
"""Retrieves the list of device types in the system.
Note that we actually use the "GET device-families" endpoint, as this returns a complete list in one request.
"""
all_types = []
all_families = get_families(name=None, includeTypes=True)
for family in all_families:
all_types.extend(family["types"])
return all_types | dad6ff4fd938aa63e8c42dbf0e70ede0786bd458 | 3,650,743 |
import json
def read_socket(sock, buf_len, echo=True):
""" Read data from socket and return it in JSON format """
reply = sock.recv(buf_len).decode()
try:
ret = json.loads(reply)
except json.JSONDecodeError:
print("Error in reply: ", reply)
sock.close()
raise
if echo:
print(json.dumps(ret))
return ret | 07d7100ed8c1c9d22307ce293e10b2a0cd5849c6 | 3,650,744 |
def temperature_source_function(
rho,
district,
norm,
density: xr.DataArray,
etemp: xr.DataArray,
itemp: xr.DataArray,
density_source: xr.DataArray,
source_strength,
source_centre=0.3,
source_width=0.3,
):
"""
Smooth-step core power injection, mimicking Ohmic power deposition
The temperature source takes into account the power from the density source, to give a constant-power source
"""
core_source = core_temperature_source_function(
rho, district, norm, source_strength, source_centre, source_width
)
total_source = xr.DataArray(
core_source / density - density_source * (etemp + itemp) / density
).assign_attrs(norm=norm.Te0 / norm.tau_0)
core_source = xr.DataArray(core_source / density).assign_attrs(
norm=norm.Te0 / norm.tau_0
)
annular_sink = xr.DataArray(
-density_source * (etemp + itemp) / density
).assign_attrs(norm=norm.Te0 / norm.tau_0)
return total_source, core_source, annular_sink | ba7836ce3f9d2e523dca4a6bf33999a116f20920 | 3,650,745 |
from pathlib import Path
import requests
def mock_dbt_cloud_response(
monkeypatch: MonkeyPatch,
dbt_manifest_file: Path,
dbt_run_results_file: Path,
) -> None:
"""
Mock the dbt cloud response.
Parameters
----------
monkeypatch : MonkeyPatch
The monkey patch fixture.
dbt_manifest_file : Path
The path to the manifest file.
dbt_run_results_file : Path
The path to the run results file.
"""
def get(url: str, headers: dict | None = None):
"""Mock the requests.get method."""
response = requests.Response()
if "manifest.json" in url:
file = dbt_manifest_file
elif "run_results.json" in url:
file = dbt_run_results_file
else:
raise ValueError(f"Unrecognized url: {url}")
with file.open("rb") as f:
response._content = f.read()
response.status_code = requests.codes.ok
return response
monkeypatch.setattr(requests, "get", get) | 1ae3a38c36a2f1468287ed587ee7495e40ceee74 | 3,650,746 |
async def model_copy(request, model_id):
""" route for copy item per row """
request_params = {elem: request.form[elem][0] for elem in request.form}
base_obj_id = utils.extract_obj_id_from_query(request_params["_id"])
try:
new_obj_key = await create_object_copy(
model_id, base_obj_id, cfg.models[model_id]
)
message = f"Object with {base_obj_id} key was copied as {new_obj_key}"
flash_message = (message, "success")
log_history_event(request, message, new_obj_key)
except asyncpg.exceptions.UniqueViolationError as e:
flash_message = (
f"Duplicate in Unique column Error during copy: {e.args}. \n"
f"Try to rename existed id or add manual.",
"error",
)
except asyncpg.exceptions.ForeignKeyViolationError as e:
flash_message = (e.args, "error")
return await model_view_table(request, model_id, flash_message) | 2fd7ed81d10d40cd58c859e3b60de315404d9f4d | 3,650,747 |
def load_pr(fname):
"""Loads predicted tracks in tabular format."""
try:
data = np.loadtxt(fname, delimiter=',', dtype=np.float64, ndmin=2)
except (ValueError, IndexError):
# Try using whitespace delim (default).
data = np.loadtxt(fname, delimiter=None, dtype=np.float64, ndmin=2)
# If category is not -1, then filter by pedestrian.
_, num_cols = data.shape
if CATEGORY_COLUMN < num_cols and not np.all(data[:, CATEGORY_COLUMN] == -1):
data = data[data[:, CATEGORY_COLUMN] == POSITIVE_CATEGORY, :]
return data | 63fd8422adb170ccc2e97d32b9be04efe86fa72d | 3,650,748 |
def build_affine(rotation, scale, origin):
"""
Compute affine matrix given rotation, scaling, and origin.
Parameters
----------
rotation : np.array
rotation
scale : np.array
scale factor
Returns
-------
aff : np.array [4x4]
affine matrix
"""
aff = np.zeros((4, 4))
aff[0:3, 0:3] = rotation
aff[:, 3] = np.append(origin, 1).T
aff[0:3, 0:3] = np.dot(aff[0:3, 0:3], np.diag(scale))
return aff | 927016b37f3c1ea2c7b3720cb7b31f246784258a | 3,650,749 |
from operator import concat
def sample_per_group(data, group_by, ratio = None, n = None):
"""
:type data: DataFrame
:type group_by: list of str
:type ratio: float
:type num_rows: int
:return:
"""
# group the data
data = data.copy()
"""
:type data: DataFrame
"""
data['__order1'] = data.index
grouped = data.groupby(by = group_by)
training_sets = []
test_sets = []
for name, group in grouped:
if n is None:
num_rows = round(group.shape[0] * ratio)
else:
num_rows = round(min(n, group.shape[0]))
shuffled = shuffle(data=group).reset_index(drop=True)
new_training_set = shuffled.iloc[:num_rows, ]
if num_rows>group.shape[0]:
# if all of the data ends up in training set, test set should be empty instead of an error
new_test_set = shuffled[shuffled.index!=shuffled.index]
else:
new_test_set = shuffled.iloc[(num_rows + 1):, ]
training_sets.append(new_training_set)
test_sets.append(new_test_set)
training = concat(training_sets)
"""
:type training: DataFrame
"""
test = concat(test_sets)
"""
:type test: DataFrame
"""
training.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
training.index = training['__order1'].values
test.index = test['__order1'].values
training.sort_index(inplace=True)
test.sort_index(inplace=True)
training.drop(axis=1, labels = '__order1', inplace=True)
test.drop(axis=1, labels='__order1', inplace=True)
return training, test | ca92f79263f428aeafb07ac9efb4db65145a3113 | 3,650,750 |
def get_base64_column(metadata_df: pd.DataFrame) -> pd.DataFrame:
"""
Get accession json base64 str
:return:
"""
# Get accession json object as base64 string
metadata_df['accession_json_base64_str'] = metadata_df[METADATA_PAYLOAD_COLUMNS].\
apply(lambda x: b64encode(bytes(x.to_json(), encoding='utf-8')).decode("ascii"),
axis="columns")
return metadata_df | a68f9d735a4ac1612cb4fea2e83cac903e8111c8 | 3,650,751 |
def bible_studies_view(request):
"""Bible studies view."""
auth = False
try:
auth = request.cookies['auth_tkt']
auth_tools = request.dbsession.query(
MyModel
).filter(MyModel.category == 'admin').all()
except KeyError:
auth_tools = []
query = request.dbsession.query(MyModel)
content = query.filter(MyModel.page == 'ministries').all()
main_menu = query.filter(MyModel.subcategory == 'base').all()
submenu = [item for item in content if item.title == 'menu_place_holder']
topimg = [item for item in content if item.category == 'topimg']
main = [item for item in content if item.category == 'bible_studies']
return {
'auth': auth,
'main_menu': main_menu,
'submenu': submenu,
'topimg': topimg[0],
'main': main,
'auth_tools': auth_tools,
} | c69a5291cbcb8267c0606fef2cae620878cca97b | 3,650,752 |
def get_num_channels(inputs):
""" Get number of channels in one tensor. """
return inputs.shape[1] | 6fb42e60714dc81f03b29ad87b73b41027056472 | 3,650,753 |
def single_face_marker():
"""
Face marker with a single value.
"""
return np.zeros((2, 3)).astype(int) | 2d126b031a1809a8d95df00ccada79ba6191fa61 | 3,650,754 |
def skew(arr, angle, dx=None, dy=None, fwd=True, fill_min=True):
"""
Skew the origin of successive lines by a specified angle
A skew with angle of 30 degrees causes the following transformation:
+-----------+ +---------------+
| | |000/ /|
| input | |00/ output /0|
| image | |0/ image /00|
| | |/ /000|
+-----------+ +---------------+
Calling skew with fwd=False will return the output image
back to the input image.
Skew angle must be between -45 and 45 degrees
Arguments:
arr: array to skew
angle: angle between -45 and 45 to skew by
dx: spacing of the array in the x (sample) direction
(if dx=dy, or dx or dy not supplied, spacing is ignored)
dy: spacing of the array in the y (line) direction
fwd: add skew to image if True, unskew image if False
fill_min: While IPW skew says it fills with zeros, the output
image is filled with the minimum value
Returns:
skewed array
"""
if angle == 0:
return arr
if angle > 45 or angle < -45:
raise ValueError('skew angle must be between -45 and 45 degrees')
if dx is None or dy is None:
dx = 1.0
dy = 1.0
nlines, nsamps = arr.shape
if angle >= 0.0:
negflag = False
else:
negflag = True
angle = -angle
# unequal dx/dy equivalent to changing skew angle
slope = np.tan(angle * np.pi / 180.0) * (dy/dx)
max_skew = int((nlines - 1) * slope + 0.5)
o_nsamps = nsamps
if fwd:
o_nsamps += max_skew
else:
o_nsamps -= max_skew
b = np.zeros((nlines, o_nsamps))
if fill_min:
b += np.min(arr)
# if skewing, first fill output array with original array
if fwd:
b[0:nlines, 0:nsamps] = arr
o = np.arange(nlines)
# positive skew angle means shifts decrease with increasing row index
if not negflag:
o = nlines - o - 1
offset = (o * slope + 0.5).astype(int)
if not fwd: # offset values are negative shifts if unskewing
offset *= -1
if fwd:
b = custom_roll(b, offset)
else:
# assignment indexing added to ensure array shape match
b[:, :] = custom_roll(arr, offset)[:, :o_nsamps]
"""
for line in range(nlines):
o = line if negflag else nlines - line - 1
offset = int(o * slope + 0.5)
if fwd:
b[line, offset:offset+nsamps] = arr[line, :]
else:
b[line, :] = arr[line, offset:offset+o_nsamps]
"""
return b | e25bb8632c6e86c84ab5e5a5bae56fd6f24e7c0d | 3,650,755 |
def websocket_call(configuration, _method, url, **kwargs):
"""An internal function to be called in api-client when a websocket
connection is required. method, url, and kwargs are the parameters of
apiClient.request method."""
url = get_websocket_url(url, kwargs.get("query_params"))
headers = kwargs.get("headers")
_request_timeout = kwargs.get("_request_timeout", 60)
_preload_content = kwargs.get("_preload_content", True)
capture_all = kwargs.get("capture_all", True)
try:
client = WSClient(configuration, url, headers, capture_all)
if not _preload_content:
return client
client.run_forever(timeout=_request_timeout)
return WSResponse('%s' % ''.join(client.read_all()))
except (Exception, KeyboardInterrupt, SystemExit) as e:
raise ApiException(status=0, reason=str(e)) | add109d9caa80b74cb28754792033577e7b70ef3 | 3,650,756 |
def compute_metrics(feats, pids, camids, num_query):
""" Compute CMC and mAP metrics """
# query
qf = feats[:num_query]
q_pids = np.asarray(pids[:num_query])
q_camids = np.asarray(camids[:num_query])
# gallery
gf = feats[num_query:]
g_pids = np.asarray(pids[num_query:])
g_camids = np.asarray(camids[num_query:])
m, n = qf.shape[0], gf.shape[0]
distmat = np.power(qf, 2).sum(axis=1, keepdims=True).repeat(n, axis=1) + \
np.power(gf, 2).sum(axis=1, keepdims=True).repeat(m, axis=1).T
distmat = 1 * distmat - 2 * np.dot(qf, gf.transpose())
cmc, m_ap = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc, m_ap | 4bd35b6e4c7ede5a54685822feeb6264e3fd7275 | 3,650,757 |
from datetime import datetime
def parse_date(string_date: str) -> datetime.datetime:
"""
Parses input string of format 'MMM-yyyy' to datetime.
:param str string_date: Date in string format 'MMM-yyyy'
:return: datetime.datetime: parsed datetime
"""
return datetime.datetime.strptime(string_date, '%b-%Y') | 2d2b3b5332ca354e1600a25495dadd1dced31479 | 3,650,758 |
import math
def round_vzeros(v,d=10) :
"""Returns input vector with rounded to zero components
which precision less than requested number of digits.
"""
prec = pow(10,-d)
vx = v[0] if math.fabs(v[0]) > prec else 0.0
vy = v[1] if math.fabs(v[1]) > prec else 0.0
vz = v[2] if math.fabs(v[2]) > prec else 0.0
return vx,vy,vz | aa16175bf1176383ef255460767502104be2566e | 3,650,759 |
def euclidean_distance_loss(params, params_prev):
"""
Euclidean distance loss
https://en.wikipedia.org/wiki/Euclidean_distance
:param params: the current model parameters
:param params_prev: previous model parameters
:return: float
"""
return K.sqrt(K.sum(K.square(params - params_prev), axis=-1)) | 5c5366a7c60faa783ab55f85d758b7d40ff5627f | 3,650,760 |
import re
import shlex
def read_cloudflare_api_file(prog, file, state):
"""Read the input file for Cloudflare login details.
Args:
prog (State): modified if errors encountered in opening or reading
the file.
file (str): the file to read.
state (ConfigState): to record config file syntax errors.
Returns:
list(str): returns a list of Cloudflare login parameters
(email and key) where a line in the file 'X = Y' is converted
to: 'X:Y'. No checks on the input to any parameters (i.e. 'Y')
are done here: only the list is constructed. If ANY errors
are encountered, 'None' is returned.
"""
try:
with open(str(file), "r") as f:
raw = f.read().splitlines()
except FileNotFoundError as ex:
prog.log.error(
"cloudflare API file '{}' not found".format(ex.filename))
return None
except OSError as ex:
prog.log.error(
"reading cloudflare API file '{}' failed: {}".format(
ex.filename, ex.strerror.lower()))
return None
allowed_params = {'dns_cloudflare_email': 'email',
'dns_cloudflare_api_key': 'key'}
errors = False
ret = []
linepos = 0
for l in raw:
linepos += 1
match = re.match(r'^\s*(#.*)?$', l)
if match:
continue
match = re.match(
r'\s*(?P<param>\w+)\s*=\s*(?P<input>[^#]*)(\s*|\s#.*)$', l)
if match:
param = match.group('param')
try:
inputs = shlex.split(match.group('input'))
except ValueError:
state.add_error(prog, "cloudflare API file '{}' has malformed expression on line {}".format(file, linepos))
errors = True
continue
if param in allowed_params:
if len(inputs) != 1:
state.add_error(prog, "cloudflare API file '{}': malformed '{}' command on line {}".format(file, param, linepos))
errors = True
continue
ret += [ '{}:{}'.format(allowed_params[param], inputs[0]) ]
continue
state.add_error(prog, "cloudflare API file '{}': unrecognized command on line {}: '{}'".format(file, linepos, param))
errors = True
continue
state.add_error(prog, "cloudflare API file '{}' has malformed expression on line {}".format(file, linepos))
errors = True
if errors:
return None
return ret | 39d5fe28f348e9e3285f55cff22f025a86f41715 | 3,650,761 |
import hmac
def hash_msg(key, msg):
"""Return SHA1 hash from key and msg"""
return b64encode(hmac.new(key, msg, sha1).digest()) | 88ad5f5a2a2cad8657440b72d086c541d357ed05 | 3,650,762 |
def insert_pattern(base, pattern, offset=None): #optional!
"""
Takes a base simulation field and places a given pattern with an offset
onto it. When offset is None, the object is placed into the middle
Parameters
----------
base : numpy.ndarray
The base simulation field. Can already hold objects.
pattern : numpy.ndarray
The pattern to be placed. Should fit onto the simulation field
offset : (offset_x, offset_y), optional
offset in the x and y directions, from the middle. The default is None.
Raises
------
ValueError
Error is raised, if the pattern does not fit onto the simulation field.
Returns
-------
field : TYPE
The simulation field with the pattern placed.
"""
#pass
pattern = np.array(pattern)
new_field_config = base
if offset == None:
offset = (0,0)
#neue pos mit offset
pos = (np.int(new_field_config.shape[0] / 2) + offset[0], np.int(new_field_config.shape[1] / 2 + offset[1]))
#überschreibe new_field_coinfig mit pattern an gewünschter pos
new_field_config[pos[0]:pos[0] + pattern.shape[0], pos[1]:pos[1] + pattern.shape[1]] = pattern
return new_field_config | ba72d5c37f9ba06cd5bd3b3803147b873bc2c742 | 3,650,763 |
def get_specific_label_dfs(raw_df, label_loc):
"""
Purpose: Split the instances of data in raw_df based on specific labels/classes
and load them to a dictionary structured -> label : Pandas Dataframe
Params: 1. raw_df (Pandas Dataframe):
- The df containing data
2. label_loc (String):
- The location where the output labels are stored in 1. raw_df
Returns: A dictionary structured -> label : Pandas Dataframe
"""
labels = list(raw_df[label_loc].unique())
# a list of dataframes storing only instances of data belonging to one specific class/label
label_dataframes = {}
for label in labels:
label_dataframes[label] = raw_df.loc[raw_df[label_loc] == label]
return label_dataframes | 756f03f845da64f6fd5534fb786966edb8610a13 | 3,650,764 |
def run(_):
"""
Meant for running/parallelizing training data preparation
:param _: Not used
:return: Runs prep() function
"""
return prep() | 05f2c78a681d1c432ad6b55e8b43bea44fc6242c | 3,650,765 |
def cached_property_named(name, kls=_internal_jit_attr, use_cls_setattr=False):
"""
variation of `cached_property`, just with the ability to explicitly set the attribute name
Primarily of use for when the functor it's wrapping has a generic name (
`functools.partial` instances for example).
Example Usage:
>>> from snakeoil.klass import cached_property_named
>>> class foo:
...
... @cached_property_named("attr")
... def attr(self):
... print("invoked")
... return 1
>>>
>>> obj = foo()
>>> print(obj.attr)
invoked
1
>>> print(obj.attr)
1
"""
return post_curry(kls, name, use_singleton=False, use_cls_setattr=False) | b132cf46f07ac6aa6e6806c81b4e4a4e75ff9b4a | 3,650,766 |
def remix(tracks, n_tracks=50, n_steps=60):
"""Return new tracks generated by remixing given tracks"""
time_step = int(
np.round(next(dt for dt in sorted(tracks["Time"].diff()) if dt > 0) * 60)
)
print(
"Generating {} steps from {} steps {}s apart.".format(
n_tracks * n_steps, len(tracks), time_step
)
)
velocities_only = tracks[tracks["Turning Angle"].isnull()]["Velocity"].dropna()
velo_and_turn = tracks[tracks["Plane Angle"].isnull()][
["Velocity", "Turning Angle"]
].dropna()
remaining_data = tracks[["Velocity", "Turning Angle", "Plane Angle"]].dropna()
new_tracks = pd.DataFrame()
for i in range(n_tracks):
track_data = velo_and_turn.sample()
track_data = track_data.append(
remaining_data.sample(n_steps - 2, replace=True)
)
track_data = track_data.append(
pd.DataFrame(
{
"Velocity": velocities_only.sample()
}
)
)
new_track = silly_steps(track_data, time_step)
new_track["Track_ID"] = i
new_tracks = new_tracks.append(new_track)
if "Condition" in tracks.columns:
new_tracks["Condition"] = tracks["Condition"].iloc[0] + " Remixed"
else:
new_tracks["Condition"] = "Remixed"
return new_tracks.reset_index() | 0546af43785231fbe9c813dd54cdbbd2cbd12874 | 3,650,767 |
from datetime import datetime
import logging
def prohibition_served_recently(**args) -> tuple:
"""
Returns TRUE if the prohibition was served within the previous 3 days;
otherwise returns FALSE
"""
date_served_string = args.get('date_of_service')
config = args.get('config')
delay_days = int(config.DAYS_TO_DELAY_FOR_VIPS_DATA_ENTRY)
# Note: we have to rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
today = args.get('today_date')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
very_recently_served = (today - date_served).days < delay_days
if very_recently_served:
return True, args
error = 'prohibition not served within the past {} days'.format(delay_days)
args['error_string'] = error
logging.info(error)
print("date_served: {}, very_recently_served: {}".format(date_served, very_recently_served))
return False, args | fbdc32a37fd9c7cc975927309181b1ee46a4b4f2 | 3,650,768 |
import json
def project_api(request):
"""
创建项目接口
"""
if not request.user.has_perm('home_application.can_add_project'):
return render(request, '403.html')
if request.method == 'POST':
groupId=request.POST.get('group-id','')
projectName=request.POST.get('project-name','')
domain=request.POST.get('domain','')
domainId=request.POST.get('domain-id',1)
if domainId == '1':
main_domain='.a.com.cn'
elif domainId == '2':
main_domain='.b.com'
else:
main_domain=''
new_domain=domain+main_domain
kwargs={
'groupId':groupId,
'projectName':projectName,
'domain':new_domain,
}
try:
create_project=QuerySet(**kwargs).creat_project()
msg={'resultCode':u'200','data':create_project,'info': u'创建成功'}
except Exception:
msg={'resultCode':'60001','data':'','info': u'数据库操作异常,注意不要重复'}
return HttpResponse(json.dumps(msg))
msg={'resultCode':u'60003','data':u'','info':u'不支持的请求'}
return HttpResponse(json.dumps(msg)) | 1850bf158638c7fcd8c9816fd0832818d67e27d7 | 3,650,769 |
def impute_missing_values(model,
observed_time_series,
parameter_samples,
include_observation_noise=False):
"""Runs posterior inference to impute the missing values in a time series.
This method computes the posterior marginals `p(latent state | observations)`,
given the time series at observed timesteps (a missingness mask should
be specified using `tfp.sts.MaskedTimeSeries`). It pushes this posterior back
through the observation model to impute a predictive distribution on the
observed time series. At unobserved steps, this is an imputed value; at other
steps it is interpreted as the model's estimate of the underlying noise-free
series.
Args:
model: `tfp.sts.Sum` instance defining an additive STS model.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]])` where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries` including a
mask `Tensor` to encode the locations of missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
include_observation_noise: If `False`, the imputed uncertainties
represent the model's estimate of the noise-free time series at each
timestep. If `True`, they represent the model's estimate of the range of
values that could be *observed* at each timestep, including any i.i.d.
observation noise.
Default value: `False`.
Returns:
imputed_series_dist: a `tfd.MixtureSameFamily` instance with event shape
[num_timesteps] and batch shape `concat([sample_shape,
model.batch_shape])`, with `num_posterior_draws` mixture components.
#### Example
To specify a time series with missing values, use `tfp.sts.MaskedTimeSeries`:
```python
time_series_with_nans = [-1., 1., np.nan, 2.4, np.nan, 5]
observed_time_series = tfp.sts.MaskedTimeSeries(
time_series=time_series_with_nans,
is_missing=tf.math.is_nan(time_series_with_nans))
```
Masked time series can be passed to `tfp.sts` methods in place of a
`observed_time_series` `Tensor`:
```python
# Build model using observed time series to set heuristic priors.
linear_trend_model = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series)
model = tfp.sts.Sum([linear_trend_model],
observed_time_series=observed_time_series)
# Fit model to data
parameter_samples, _ = tfp.sts.fit_with_hmc(model, observed_time_series)
```
After fitting a model, `impute_missing_values` will return a distribution
```python
# Impute missing values
imputed_series_distribution = tfp.sts.impute_missing_values(
model, observed_time_series)
print('imputed means and stddevs: ',
imputed_series_distribution.mean(),
imputed_series_distribution.stddev())
```
"""
with tf.name_scope('impute_missing_values'):
[
observed_time_series,
mask
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# predictive means and variances.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
lgssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=parameter_samples)
posterior_means, posterior_covs = lgssm.posterior_marginals(
observed_time_series, mask=mask)
observation_means, observation_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=posterior_covs)
if not include_observation_noise:
# Extract just the variance of observation noise by pushing forward
# zero-variance latents.
_, observation_noise_covs = lgssm.latents_to_observations(
latent_means=posterior_means,
latent_covs=tf.zeros_like(posterior_covs))
# Subtract out the observation noise that was added in the original
# pushforward. Note that this could cause numerical issues if the
# observation noise is very large. If this becomes an issue we could
# avoid the subtraction by plumbing `include_observation_noise` through
# `lgssm.latents_to_observations`.
observation_covs -= observation_noise_covs
# Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]`
# to a scalar time series.
return sts_util.mix_over_posterior_draws(
means=observation_means[..., 0],
variances=observation_covs[..., 0, 0]) | 622546c31a10527aa0429c300e059aee69f2bb96 | 3,650,770 |
def grad_z(y, z, axis=0):
"""
Compute the vertical gradient
"z" can be an array same size as y, or vector along the first axis of "y"
Takes the derivative along the dimension specified by axis(=0)
"""
Nz = z.shape[0]
# Reshape the y variable
y = y.swapaxes(0, axis)
#assert y.shape[0] == Nz
z = z.swapaxes(0, axis)
#assert z.shape == (Nz,) or z.shape == y.shape
dy_dz = np.zeros_like(y)
# Second-order accurate for mid-points
ymid = 0.5*(y[1:,...]+y[0:-1,...])
zmid = 0.5*(z[1:,...]+z[0:-1,...])
dzmid = zmid[1:,...] - zmid[0:-1,...]
dzmidi = 1./dzmid
dy_dz[1:-1, ...] = (ymid[1:,...] - ymid[0:-1,...])*\
dzmidi[:,...]
# First-order accurate for top and bottom cells
dy_dz[0,...] = (y[1,...] - y[0,...])*dzmidi[0,...]
dy_dz[-1,...] = (y[-1,...] - y[-2,...])*dzmidi[-1,...]
return dy_dz.swapaxes(axis, 0) | 8558110580476a509735ebdc8db011806c4266fa | 3,650,771 |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x) | f87c15df408c9307c82a7bc0ab7bb700cac71f41 | 3,650,772 |
def get_all_input_values(corpus_weights):
"""
Returns all relevant input values
"""
azerty = get_azerty()
letters = get_letters()
characters = get_characters()
keyslots = get_keyslots()
similarity_c_c = get_character_similarities()
similarity_c_l = get_character_letter_similarities()
distance_level_0, distance_level_1 = get_distances()
# read in single probabilities
p_single, p_bigram = get_probabilities(corpus_weights)
ergonomics = get_ergonomics()
performance = get_performance()
return azerty, \
characters, \
keyslots, \
letters, \
p_single, p_bigram, \
performance, \
similarity_c_c, similarity_c_l, \
distance_level_0, distance_level_1, \
ergonomics | f7b1054f34a11cde66c0506f1fb38d663ffc3645 | 3,650,773 |
def bulk_rename(doctype, rows=None, via_console = False):
"""Bulk rename documents
:param doctype: DocType to be renamed
:param rows: list of documents as `((oldname, newname), ..)`"""
if not rows:
frappe.throw(_("Please select a valid csv file with data"))
if not via_console:
max_rows = 500
if len(rows) > max_rows:
frappe.throw(_("Maximum {0} rows allowed").format(max_rows))
rename_log = []
for row in rows:
# if row has some content
if len(row) > 1 and row[0] and row[1]:
try:
if rename_doc(doctype, row[0], row[1], rebuild_search=False):
msg = _("Successful: {0} to {1}").format(row[0], row[1])
frappe.db.commit()
else:
msg = _("Ignored: {0} to {1}").format(row[0], row[1])
except Exception as e:
msg = _("** Failed: {0} to {1}: {2}").format(row[0], row[1], repr(e))
frappe.db.rollback()
if via_console:
print(msg)
else:
rename_log.append(msg)
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if not via_console:
return rename_log | c495a70ff7becf076c42ded40ab6c4497e047c9c | 3,650,774 |
def mag_to_flux_AB(mag, mag_err):
"""Calculate flux in erg s-1 cm-2 Hz-1."""
flux = 10 ** (-.4 * (mag + 48.6))
flux_err = abs(-.4 * flux * sp.log(10) * mag_err)
return flux, flux_err | 1d56802fe2803d3ed2fa50c3e6ce06ebcb3add01 | 3,650,775 |
def django_admin_navtree(request, context):
"""show menu"""
if request and request.user.is_staff:
coop_cms_navtrees = context.get('coop_cms_navtrees', None) or []
tree_class = get_navtree_class()
admin_tree_name = "{0}_{1}".format(get_model_app(tree_class), get_model_name(tree_class))
if len(coop_cms_navtrees) == 1:
tree = coop_cms_navtrees[0]
url = reverse('admin:{0}_change'.format(admin_tree_name), args=[tree.id])
label = _('Navigation tree')
else:
url = reverse('admin:{0}_changelist'.format(admin_tree_name))
label = _('Navigation trees')
return make_link(
url, label, 'leaf',
classes=['icon', 'alert_on_click']
) | 5ba3f27c09146e72cb4893529985e818295f63c0 | 3,650,776 |
from typing import Counter
def get_pos_tags(student_comment: str) -> pd.DataFrame:
"""Get the POS (part of speech) tags for each of the words in the student
comments
Keyword arguments
student_comment -- a spacy.tokens.doc.Doc object
"""
# Count how many of each pos tags are in each comment
pos_tags = student_comment.apply(lambda x: Counter([token.pos_ for token in x]))
# Expand the list column into several columns
pos_tags_df = pos_tags.apply(pd.Series).fillna(0)
return pos_tags_df | f04cc91a7a41d65a69ee6ef8d730b98f2ad2ed6c | 3,650,777 |
def get_axis(array, axis, slice_num):
"""Returns a fixed axis"""
slice_list = [slice(None)] * array.ndim
slice_list[axis] = slice_num
slice_data = array[tuple(slice_list)].T # transpose for proper orientation
return slice_data | 558d4f8f8725c752c225e6958881fc18eeeab35e | 3,650,779 |
import pandas as pd
def expand_name_df(df,old_col,new_col):
"""Takes a dataframe df with an API JSON object with nested elements in old_col,
extracts the name, and saves it in a new dataframe column called new_col
Parameters
----------
df : dataframe
old_col : str
new_col : str
Returns
-------
df : dataframe
"""
def expand_name(nested_name):
"""Takes an API JSON object with nested elements and extracts the name
Parameters
----------
nested_name : JSON API object
Returns
-------
object_name : str
"""
if pd.isnull(nested_name):
object_name = 'Likely Missing'
else:
object_name = nested_name['name']
return object_name
df[new_col] = df[old_col].apply(expand_name)
return df | d39209f71719afa0301e15d95f31d98b7949f6b3 | 3,650,781 |
def image_rpms_remove_if_exists(rpmlist):
"""
`image.rpms_remove_if_exists(["baz"])` removes `baz.rpm` if exists.
Note that removals may only be applied against the parent layer -- if your
current layer includes features both removing and installing the same
package, this will cause a build failure.
"""
return _build_rpm_feature(
rpmlist,
"remove_if_exists",
needs_version_set = False,
) | b0de424627b5010e8d6d351280418425cde7981a | 3,650,782 |
import itertools
def expand_set(mySet):
""" pass in a set of genome coords, and it will 'expand' the indels
within the set by adding +/- 3 bp copies for each one """
returnSet = []
for entry in mySet:
l0 = []
l1 = []
try:
sub0 = entry.split('-')[0] # split on `-`
sub1 = entry.split('-')[1] # this guy is good
sub00 = sub0.split(':')[1] # split on :, need to get rid of chrom
chrom = sub0.split(':')[0]
if sub00 != sub1: # got an indel
sub00_1 = int(sub00) + 1
sub00_2 = int(sub00) + 2
sub00_3 = int(sub00) + 3
sub00_4 = int(sub00) - 1
sub00_5 = int(sub00) - 2
sub00_6 = int(sub00) - 3
l0.extend((sub00_1, sub00_2, sub00_3, sub00_4, sub00_5, sub00_6))
try:
sub1_1 = int(sub1) + 1
sub1_2 = int(sub1) + 2
sub1_3 = int(sub1) + 3
sub1_4 = int(sub1) - 1
sub1_5 = int(sub1) - 2
sub1_6 = int(sub1) - 3
l1.extend((sub1_1, sub1_2, sub1_3, sub1_4, sub1_5, sub1_6))
except ValueError:
continue
coord_combos = list(itertools.product(l0, l1))
for pair in coord_combos:
toAdd = chrom + ':' + str(pair[0]) + '-' + str(pair[1])
returnSet.append(toAdd)
else:
returnSet.append(entry)
except IndexError:
continue
return returnSet | 4ccbff705654b5f5b89c59bb13df9fad6cba42db | 3,650,783 |
def trapezoid(t, depth, bigT, littleT):
"""Trapezoid shape for model
INPUT:
t - [float] vector of independent values to evaluate
trapezoid model
depth - [float] depth of trapezoid
bigT - [float] full trapezoid duration
littleT - [float] 'ingress/egress' duration
OUTPUT:
output - [float] vector of trapezoid model values
"""
output = np.full_like(t, 1.0)
t = np.abs(t)
output = np.where(t <= bigT/2.0 - littleT/2.0, 1.0 - depth, output)
output = np.where(np.logical_and(t > bigT/2.0 - littleT/2.0, \
t < bigT/2.0 + littleT/2.0), \
1.0 - depth + ((depth/littleT)* \
(t-bigT/2.0 + littleT/2.0)), output)
return output | 18021c6aff07931d6086d394a77f5ab8ce460f78 | 3,650,784 |
import re
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return ('_').join(re.findall(r'[a-zA-Z0-9\-]+', username))[:USERNAME_MAX_LENGTH] | 98e563eaa04ce98b4f4d71b0b6229ca62324011a | 3,650,785 |
import ast
def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
"""
Compile a HyObject tree into a Python AST Module.
If `get_expr` is True, return a tuple (module, last_expression), where
`last_expression` is the.
"""
body = []
expr = None
if not isinstance(tree, HyObject):
tree = wrap_value(tree)
if not isinstance(tree, HyObject):
raise HyCompileError("`tree` must be a HyObject or capable of "
"being promoted to one")
spoof_positions(tree)
compiler = HyASTCompiler(module_name)
result = compiler.compile(tree)
expr = result.force_expr
if not get_expr:
result += result.expr_as_stmt()
module_docstring = None
if (PY37 and result.stmts and
isinstance(result.stmts[0], ast.Expr) and
isinstance(result.stmts[0].value, ast.Str)):
module_docstring = result.stmts.pop(0).value.s
body = compiler.imports_as_stmts(tree) + result.stmts
ret = root(body=body, docstring=(
None if module_docstring is None else module_docstring))
if get_expr:
expr = ast.Expression(body=expr)
ret = (ret, expr)
return ret | 0aea27067aae9d517ada9a7936b2ad29506346a5 | 3,650,786 |
def user_dss_clients(dss_clients, dss_target):
"""
Fixture that narrows down the dss clients to only the ones that are relevant considering the curent DSS target.
Args:
dss_clients (fixture): All the instanciated dss client for each user and dss targets
dss_target (fixture): The considered DSS target for the test to be executed
Returns:
A dict of dss client instances for the current DSS target and each of its specified users.
"""
return dss_clients[dss_target] | 7d418b49b68d7349a089046837f3c8351c0dcc67 | 3,650,788 |
def build_log(x: np.ndarray) -> np.ndarray:
"""
Logarithmic expansion.
:param x: features
:return: augmented features
"""
expanded = np.ones((x.shape[0], 1))
expanded = np.hstack((expanded, np.nan_to_num(np.log(x))))
return expanded | 7d46b9e7ca4dd7af99da97e3d1faa67f18894fe5 | 3,650,789 |
def all_multibert_finetune_glue(m:Manager, task_name:str='MRPC')->BertGlue:
""" Finetune milti-lingual base-BERT on GLUE dataset
Ref. https://github.com/google-research/bert/blob/master/multilingual.md
"""
refbert=all_fetch_multibert(m)
refglue=all_fetchglue(m)
vocab=mklens(refbert).bert_vocab.refpath
glueref=glue_tfrecords(m, task_name, bert_vocab=vocab,
lower_case=mklens(refbert).cased.val==False, refdataset=refglue)
return bert_finetune_glue(m,refbert,glueref) | 18785c804e9539922cc8fdae2517e2c9221f5d13 | 3,650,790 |
from typing import Union
import hashlib
def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:
"""
Return SHA-256 hashed result.
Note: this API is currently under active research/development so is subject to change
without a major version bump.
Note: it's a placeholder and we aim to migrate to a S[T/N]ARK-friendly hash function in
a future Ethereum 2.0 deployment phase.
"""
return Hash32(hashlib.sha256(data).digest()) | fa43689abac95e54e984f67c623c2242d19bb52c | 3,650,791 |
import zipfile
def read_data(data_dir="../main/datasets/", data_file=DATA_FILE):
"""Returns the data, in order infos, items, orders"""
with zipfile.ZipFile(data_dir+DATA_FILE) as z:
dfs = []
for name in ["infos", "items", "orders"]:
dfs.append(pd.read_csv(z.open(f"1.0v/{name}.csv"), sep="|"))
return dfs | c85f730bfd10fe2f3830ed9d29443b2f99c8deb6 | 3,650,792 |
def get_sprints(root_project_id, rally_number=None):
"""Get list of sprint projects.
Args:
root_project_id: Synapse Project ID with admin annotations,
including the sprint table ID.
rally_number: An integer rally number. If None, return sprints
from all rallies.
Returns:
A Pandas data frame of sprint information from the
Sprint Synapse table.
"""
syn = Synapse().client()
root_project = syn.get(root_project_id)
table_id = root_project.annotations.sprintTableId[0]
tbl = syn.tableQuery("select * from %s" % (table_id, ))
data_frame = tbl.asDataFrame()
if rally_number:
data_frame = data_frame[data_frame.rally == rally_number]
return data_frame | 530ea32640ccfa44b3493eef470a226631e3dd34 | 3,650,793 |
def out_of_bounds(maze: Array, x: int, y: int):
""" Return true if x, y is out of bounds """
w, h = maze.shape
is_x_out = (x < 0) + (x >= w)
is_y_out = (y < 0) + (y >= h)
return is_x_out + is_y_out | e1572dedc76ad979d5053b966b313a31cf22b257 | 3,650,795 |
import json
def cache_set(apollo_client, name, val):
"""
保存数据到redis
:return:
"""
r = redis_handler(apollo_client)
try:
res = r.set(name=name, value=json.dumps(val))
except Exception as e:
logger.error("Storage {} to cache failed!{}".format(name, e.__str__()))
return None
else:
logger.info("{} values {}".format(name, val))
return res | d849ab7d0d65a445530dc61d427571d2bdd97a76 | 3,650,796 |
def guess_pyramid(data):
"""If shape of arrays along first axis is strictly decreasing.
"""
# If the data has ndim and is not one-dimensional then cannot be pyramid
if hasattr(data, 'ndim') and data.ndim > 1:
return False
size = np.array([np.prod(d.shape, dtype=np.uint64) for d in data])
if len(size) > 1:
return np.all(size[:-1] > size[1:])
else:
return False | f63dffe0c0ac5b36b752a7aeaa8baf4b4cc8480a | 3,650,797 |
def create_menu(menu_items, parent=None):
"""
Create the navigation nodes based on a passed list of dicts
"""
nodes = []
for menu_dict in menu_items:
try:
label = menu_dict['label']
except KeyError:
raise ImproperlyConfigured(
"No label specified for menu item in dashboard")
children = menu_dict.get('children', [])
if children:
node = Node(label=label, icon=menu_dict.get('icon', None),
access_fn=menu_dict.get('access_fn', None))
create_menu(children, parent=node)
else:
node = Node(label=label, icon=menu_dict.get('icon', None),
url_name=menu_dict.get('url_name', None),
url_kwargs=menu_dict.get('url_kwargs', None),
url_args=menu_dict.get('url_args', None),
access_fn=menu_dict.get('access_fn', None))
if parent is None:
nodes.append(node)
else:
parent.add_child(node)
return nodes | 0f54399e2101d6e3e4eff094041c92ea7d8eb069 | 3,650,798 |
def get_tg_ids(db):
"""Obtain a list of recognized Telegram user IDs.
Args:
db: Database connector
Returns:
Query results for later iteration
"""
return db.query(QUERY_TG_IDS) | 0b4a2fccbd42024f53a970d2e1cf52481480230d | 3,650,799 |
import json
def solve_with_log(board, out_fname):
"""Wrapper for solve: write log to out_fname"""
log = []
ret = solve(board, log)
with open(out_fname, 'w') as f:
f.write(json.dumps({'model': log}, indent=4))
return ret | c550980f252df724d68f9eb22159463e361997bc | 3,650,800 |
def discrepancy(sample, bounds=None):
"""Discrepancy.
Compute the centered discrepancy on a given sample.
It is a measure of the uniformity of the points in the parameter space.
The lower the value is, the better the coverage of the parameter space is.
Parameters
----------
sample : array_like (n_samples, k_vars)
The sample to compute the discrepancy from.
bounds : tuple or array_like ([min, k_vars], [max, k_vars])
Desired range of transformed data. The transformation apply the bounds
on the sample and not the theoretical space, unit cube. Thus min and
max values of the sample will coincide with the bounds.
Returns
-------
discrepancy : float
Centered discrepancy.
References
----------
[1] Fang et al. "Design and modeling for computer experiments",
Computer Science and Data Analysis Series Science and Data Analysis
Series, 2006.
"""
sample = np.asarray(sample)
n_sample, dim = sample.shape
# Sample scaling from bounds to unit hypercube
if bounds is not None:
min_ = bounds.min(axis=0)
max_ = bounds.max(axis=0)
sample = (sample - min_) / (max_ - min_)
abs_ = abs(sample - 0.5)
disc1 = np.sum(np.prod(1 + 0.5 * abs_ - 0.5 * abs_ ** 2, axis=1))
prod_arr = 1
for i in range(dim):
s0 = sample[:, i]
prod_arr *= (1 +
0.5 * abs(s0[:, None] - 0.5) + 0.5 * abs(s0 - 0.5) -
0.5 * abs(s0[:, None] - s0))
disc2 = prod_arr.sum()
c2 = ((13.0 / 12.0) ** dim - 2.0 / n_sample * disc1 +
1.0 / (n_sample ** 2) * disc2)
return c2 | f54cf5efa3cf12410d5522971983d41ea767767f | 3,650,801 |
def rz(psi, r):
"""
Wrapper for ERFA function ``eraRz``.
Parameters
----------
psi : double array
r : double array
Returns
-------
r : double array
Notes
-----
The ERFA documentation is below.
- - - - - -
e r a R z
- - - - - -
Rotate an r-matrix about the z-axis.
Given:
psi double angle (radians)
Given and returned:
r double[3][3] r-matrix, rotated
Notes:
1) Calling this function with positive psi incorporates in the
supplied r-matrix r an additional rotation, about the z-axis,
anticlockwise as seen looking towards the origin from positive z.
2) The additional rotation can be represented by this matrix:
( + cos(psi) + sin(psi) 0 )
( )
( - sin(psi) + cos(psi) 0 )
( )
( 0 0 1 )
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
r = ufunc.rz(psi, r, r)
return r | 1ea4e9322ba187e91d3b976d74d416ae99a74ee6 | 3,650,802 |
import locale
def _get_ticklabels(band_type, kHz, separator):
"""
Return a list with all tick labels for octave or third octave bands cases.
"""
if separator is None:
separator = locale.localeconv()['decimal_point']
if band_type == 'octave':
if kHz is True:
ticklabels = TICKS_OCTAVE_KHZ
else:
ticklabels = TICKS_OCTAVE
else:
if kHz is True:
ticklabels = TICKS_THIRD_OCTAVE_KHZ
else:
ticklabels = TICKS_THIRD_OCTAVE
ticklabels = _set_separator(ticklabels, separator)
return ticklabels | 95ebdc670a23fdb8561a431e863901df6734fdb9 | 3,650,803 |
def SpearmanP(predicted, observed):
"""abstracts out p from stats.spearmanr"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
coef, p = stats.spearmanr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float))
return p | 41986483ea3d466d94af5c86cedee62165d81d98 | 3,650,804 |
def get_zebra_route_type_by_name(route_type='BGP'):
"""
Returns the constant value for Zebra route type named "ZEBRA_ROUTE_*"
from its name.
See "ZEBRA_ROUTE_*" constants in "ryu.lib.packet.zebra" module.
:param route_type: Route type name (e.g., Kernel, BGP).
:return: Constant value for Zebra route type.
"""
return getattr(zebra, "ZEBRA_ROUTE_%s" % route_type.upper()) | 8cdc3a8384f71c4c04172a8c37f51e3789929e42 | 3,650,805 |
def preprocess(arr):
"""Preprocess image array with simple normalization.
Arguments:
----------
arr (np.array): image array
Returns:
--------
arr (np.array): preprocessed image array
"""
arr = arr / 255.0
arr = arr * 2.0 - 1.0
return arr | 3bccf2f4433c4da62954db4f25f5e9bfabc03c3a | 3,650,806 |
def remove_const(type):
"""removes const from the type definition
If type is not const type, it will be returned as is
"""
nake_type = remove_alias(type)
if not is_const(nake_type):
return type
else:
return nake_type.base | b00d7cca79222d5ac2b6a12019b73a8169df96b7 | 3,650,807 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.