content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def isnum(txt):
"""Return True if @param txt, is float"""
try:
float(txt)
return True
except TypeError:
return False
except ValueError:
return False | c4f3aa3769810439d02312d0b616bd18c45a3da7 | 4,307 |
def xhr(func):
"""A decorator to check for CSRF on POST/PUT/DELETE using a <form>
element and JS to execute automatically (see #40 for a proof-of-concept).
When an attacker uses a <form> to downvote a comment, the browser *should*
add a `Content-Type: ...` header with three possible values:
* application/x-www-form-urlencoded
* multipart/form-data
* text/plain
If the header is not sent or requests `application/json`, the request is
not forged (XHR is restricted by CORS separately).
"""
def dec(self, env, req, *args, **kwargs):
if req.content_type and not req.content_type.startswith("application/json"):
raise Forbidden("CSRF")
return func(self, env, req, *args, **kwargs)
return dec | 9a721d5eaa90be92cc05aa565b7ceb57fd209c7d | 4,308 |
def updateStyle(style, **kwargs):
"""Update a copy of a dict or the dict for the given style"""
if not isinstance(style, dict):
style = getStyle(style) # look up the style by name
style = style.copy()
style.update(**kwargs)
return style | 57db589ff5b1b7d3e6eb3874826f77f5572c2aa7 | 4,309 |
def lift2(f, a, b):
"""Apply f => (a -> b -> c) -> f a -> f b -> f c"""
return a.map(f).apply_to(b) | 6b54cacf23ac4acb9bf9620cd259aa6f9630bbc3 | 4,310 |
def project_version(file_path=settings.PROJECT_VERSION_FILE):
"""Project version."""
try:
with open(file_path) as file_obj:
version = file_obj.read()
return parse_version(version)
except Exception:
pass
return None | 4dd4bc6d9ae4570fa1278a709d45d12e8a7a2f8e | 4,311 |
import numpy
def readBinary(fileName):
"""Read a binary FIXSRC file."""
with FIXSRC(fileName, "rb", numpy.zeros((0, 0, 0, 0))) as fs:
fs.readWrite()
return fs.fixSrc | 053c21a5cbc5b9c5b31eed94cfbea171d1a37d5e | 4,312 |
def short_whitelist(whitelist):
"""A condensed version of the whitelist."""
for x in ["guid-4", "guid-5"]:
whitelist.remove(x)
return whitelist | e0de5f4f8c86df301af03c9362b095f330bffc14 | 4,313 |
def extract_paths(actions):
"""
<Purpose>
Given a list of actions, it extracts all the absolute and relative paths
from all the actions.
<Arguments>
actions: A list of actions from a parsed trace
<Returns>
absolute_paths: a list with all absolute paths extracted from the actions
relative_paths: a list with all relative paths extracted from the actions
"""
absolute_paths = []
relative_paths = []
actions_with_path = ['open', 'creat', 'statfs', 'access', 'stat',
'link', 'unlink', 'chdir', 'rmdir', 'mkdir']
for action in actions:
# get the name of the syscall and remove the "_syscall" part at the end.
action_name = action[0][:action[0].find("_syscall")]
# we only care about actions containing paths
if action_name not in actions_with_path:
continue
# we only care about paths that exist
action_result = action[2]
if action_result == (-1, 'ENOENT'):
continue
path = action[1][0]
if path.startswith("/"):
if path not in absolute_paths:
absolute_paths.append(path)
else:
if path not in relative_paths:
relative_paths.append(path)
# get the second path of link
if action_name == "link":
path = action[1][1]
if path.startswith("/"):
if path not in absolute_paths:
absolute_paths.append(path)
else:
if path not in relative_paths:
relative_paths.append(path)
return absolute_paths, relative_paths | 94aaf8fef1a7c6d3efd8b04c980c9e87ee7ab4ff | 4,314 |
def str2int(s):
"""converts a string to an integer with the same bit pattern (little endian!!!)"""
r = 0
for c in s:
r <<= 8
r += ord(c)
return r | 0dd190b8711e29e12be8cc85a641d9a68251205b | 4,315 |
def load_operations_from_docstring(docstring):
"""Return a dictionary of OpenAPI operations parsed from a
a docstring.
"""
doc_data = load_yaml_from_docstring(docstring)
return {
key: val for key, val in iteritems(doc_data)
if key in PATH_KEYS or key.startswith('x-')
} | 6199a7e8b0c1cdb67f043e656d0797906fbf8bae | 4,316 |
import torch
def inference_model(model, img):
"""Inference image(s) with the classifier.
Args:
model (nn.Module): The loaded segmentor.
img (str/ndarray): The image filename or loaded image.
Returns:
result (list of dict): The segmentation results that contains: ...
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if isinstance(img, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
data = dict(img_info=dict(filename=img), img_prefix=None)
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
data = dict(img=img)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
scores = model(return_loss=False, **data)
model_out = scores[0]
m_shape = model_out.shape
mask = np.zeros([m_shape[0], m_shape[1], 3], dtype=np.uint8)
for i in range(3):
mask[:, :, i] = model_out
ann = sly.Annotation.from_img_path(img)
for idx, class_name in enumerate(model.CLASSES, 1): # curr_col2cls.items():
mask_bools = np.all(mask == idx, axis=2) # exact match (3-channel img & rgb color)
if mask_bools.sum() == 0:
# raise
continue
bitmap = sly.Bitmap(data=mask_bools)
obj_class = g.meta.get_obj_class(class_name)
# obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap)
ann = ann.add_label(sly.Label(bitmap, obj_class))
# clear used pixels in mask to check missing colors, see below
return ann.to_json() | 814ab13018245e51e5c32e43f2c6c67020d4e7dd | 4,317 |
def run_egad(go, nw, **kwargs):
"""EGAD running function
Wrapper to lower level functions for EGAD
EGAD measures modularity of gene lists in co-expression networks.
This was translated from the MATLAB version, which does tiled Cross Validation
The useful kwargs are:
int - nFold : Number of CV folds to do, default is 3,
int - {min,max}_count : limits for number of terms in each gene list, these are exclusive values
Arguments:
go {pd.DataFrame} -- dataframe of genes x terms of values [0,1], where 1 is included in gene lists
nw {pd.DataFrame} -- dataframe of co-expression network, genes x genes
**kwargs
Returns:
pd.DataFrame -- dataframe of terms x metrics where the metrics are
['AUC', 'AVG_NODE_DEGREE', 'DEGREE_NULL_AUC', 'P_Value']
"""
assert nw.shape[0] == nw.shape[1] , 'Network is not square'
assert np.all(nw.index == nw.columns) , 'Network index and columns are not in the same order'
nw_mask = nw.isna().sum(axis=1) != nw.shape[1]
nw = nw.loc[nw_mask, nw_mask].astype(float)
np.fill_diagonal(nw.values, 1)
return _runNV(go, nw, **kwargs) | 816a4c71830b0c576d045c3e413327b8927a7a5e | 4,318 |
def generic_plot(xy_curves, title, save_path, x_label=None, y_label=None, formatter=None, use_legend=True, use_grid=True, close=True, grid_spacing=20, yaxis_sci=False):
"""
:param xy_curves:
:param title:
:param x_label:
:param y_label:
:param formatter:
:param save_path:
:param use_legend:
:param use_grid:
:return:
"""
fig, ax = plt.subplots()
plt.title(title)
plt.grid(use_grid)
for curve in xy_curves:
if curve.color is not None:
ax.plot(curve.x, curve.y, curve.style, label=curve.label, color=curve.color)
else:
ax.plot(curve.x, curve.y, curve.style, label=curve.label)
if formatter is not None:
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_xtick))
ax.xaxis.set_major_locator(MultipleLocator(grid_spacing))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2e'))
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
#ax.margins(0.05)
if use_legend:
ax.legend()
"""if yaxis_sci:
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0), useOffset=None)"""
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight', transparent=True)
if close:
plt.close('all')
return fig | f598578bfb6d63f9e9575223ecc25cdc9ace8082 | 4,320 |
from pathlib import Path
import io
def dump(module, path, **kwargs):
"""Serialize *module* as PVL text to the provided *path*.
:param module: a ``PVLModule`` or ``dict``-like object to serialize.
:param path: an :class:`os.PathLike`
:param ``**kwargs``: the keyword arguments to pass to :func:`dumps()`.
If *path* is an :class:`os.PathLike`, it will attempt to be opened
and the serialized module will be written into that file via
the :func:`pathlib.Path.write_text()` function, and will return
what that function returns.
If *path* is not an :class:`os.PathLike`, it will be assumed to be an
already-opened file object, and ``.write()`` will be applied
on that object to write the serialized module, and will return
what that function returns.
"""
try:
p = Path(path)
return p.write_text(dumps(module, **kwargs))
except TypeError:
# Not an os.PathLike, maybe it is an already-opened file object
try:
if isinstance(path, io.TextIOBase):
return path.write(dumps(module, **kwargs))
else:
return path.write(dumps(module, **kwargs).encode())
except AttributeError:
# Not a path, not an already-opened file.
raise TypeError(
"Expected an os.PathLike or an already-opened "
"file object for writing, but got neither."
) | 3c0d145883c4787ba0a69a3006ed5680dfba952b | 4,321 |
def get_user_labels(client: Client, *_):
""" Returns all user Labels
Args:
client: Client
"""
labels = client.get_user_labels_request()
contents = []
for label in labels:
contents.append({
'Label': label
})
context = {
'Exabeam.UserLabel(val.Label && val.Label === obj.Label)': contents
}
human_readable = tableToMarkdown('Exabeam User Labels', contents)
return human_readable, context, labels | a0ed0f71d2f39ef32a6fcdfd7586017e67957a6d | 4,322 |
def hdf5_sample(sample, request):
"""Fixture which provides the filename of a HDF5 tight-binding model."""
return sample(request.param) | 7d3320f5e4ce84bfa2cf0dbc1b5f321b2e3f6df8 | 4,323 |
def get_graph_embedding_features(fn='taxi_all.txt'):
"""
Get graph embedding vector, which is generated from LINE
"""
ge = []
with open(fn, 'r') as fin:
fin.readline()
for line in fin:
ls = line.strip().split(" ")
ge.append([float(i) for i in ls])
ge = np.array(ge)
ge = ge[np.argsort(ge[:,0])]
return ge[:,1:] | 5710714ad3dea46ee64cf9fcacdfbdfec37c8c1c | 4,324 |
def sub_0_tron(D, Obj, W0, eta=1e0, C=1.0, rtol=5e-2, atol=1e-4,
verbose=False):
"""Solve the Sub_0 problem with tron+cg is in lelm-imf."""
W, f_call = W0.copy(), (f_valp, f_grad, f_hess)
tron(f_call, W.reshape(-1), n_iterations=5, rtol=rtol, atol=atol,
args=(Obj, D, eta, C), verbose=verbose)
return W | 54b4ae6f884da13fe538da6c7d8cb38fc05e2e46 | 4,325 |
def find_flavor_name(nova_connection: NovaConnection, flavor_id: str):
"""
Find all flavor name from nova_connection with the id flavor_id
:param nova_connection: NovaConnection
:param flavor_id: str flavor id to find
:return: list of flavors name
"""
flavor_list = []
for flavor in nova_connection.connection.flavors.list():
flavor_info = dict(flavor.to_dict())
if 'id' in flavor_info and 'name' in flavor_info and flavor_info['id'] == flavor_id:
flavor_list.append(flavor_info['name'])
return flavor_list | c7252ad2b1f2f0676c4b2fed0304f6ddfe64f97f | 4,326 |
def __format_focal_length_tuple(_tuple):
"""format FocalLenght tuple to short printable string
we ignore the position after the decimal point
because it is usually not very essential for focal length
"""
if (isinstance(_tuple,tuple)):
numerator = _tuple[0]
divisor = _tuple[1]
else:
numerator=_tuple.numerator
divisor=_tuple.denominator
if numerator == 0:
return get_zero_value_ersatz()
if numerator % 10 == 0 and divisor % 10 == 0:
# example: change 110/10 -> 11
numerator = numerator // 10
divisor = divisor // 10
if divisor == 1:
# example: change 8/1 to 8mm
_string = f"{numerator}mm"
else:
# example: 524/10 -> 52mm
# we ignore the position after the decimal point
# because it is usually not very essential for focal length
_string = f"{numerator//divisor}mm"
return _string | c27feee47f07558a822acaf57cd6a8a8a1a61c3f | 4,328 |
import json
def submit_resume_file(request):
""" Submit resume """
resume_file = request.FILES['json_file']
# print('resume file=%s' % resume_file)
file_content = resume_file.read()
data = json.loads(file_content.decode('utf-8'))
response = create_resume(data, request.user)
return response | 59f359d3c9c915f8ec228d3fb6ef17cc15ebac77 | 4,329 |
def inner_E_vals(vec):
"""
Returns a list of the terms in the expectation times without dividing
by the length or one minus length.\nThis is meant to be used in
conjunction with an inner-product of two inner_E_vals() lists to
compute variance or covariance.
"""
out = [None] * len(vec)
dm = data_mean(vec)
for i, item in enumerate(vec):
out[i] = item - dm
return(out) | a470674f899341c2ded1bdc682586274daa76ff0 | 4,330 |
def count_infected(pop):
"""
counts number of infected
"""
return sum(p.is_infected() for p in pop) | 03b3b96994cf4156dcbe352b9dafdd027de82d41 | 4,331 |
def esum(expr_iterable):
"""
Expression sum
:param term_iterable:
:return:
"""
var_dict = {}
constant = 0
for expr in expr_iterable:
for (var_name, coef) in expr.var_dict.items():
if coef not in var_dict:
var_dict[var_name] = coef
else:
var_dict[var_name] += coef
constant += expr.constant
return Expression.from_var_dict(
var_dict,
constant
) | e63d574c4442843febf66e0326780d6ffb3ba647 | 4,332 |
def _prompt(func, prompt):
"""Prompts user for data. This is for testing."""
return func(prompt) | c5e8964e5b3a3d0a222e167341a44d8953d1e0c1 | 4,333 |
def to_percent__xy(x, y):
"""
To percent with 2 decimal places by diving inputs.
:param x:
:param y:
:return:
"""
return '{:.2%}'.format(x / y) | 3d5cfcde6f1dbd65b99a4081790e03efb669ee02 | 4,336 |
def generic_constructor(value, name=None, strict=False, allow_downcast=None):
"""SharedVariable Constructor"""
return SharedVariable(type=generic, value=value, name=name, strict=strict,
allow_downcast=allow_downcast) | e4d168449099154ce936d49c18dfc1754a774115 | 4,337 |
import torch
def u_scheme(tree, neighbours):
"""Calculates the u-:ref:`scheme <presolve>`.
"""
unique_neighbours = torch.sort(neighbours, 1, descending=True).values
unique_neighbours[:, 1:][unique_neighbours[:, 1:] == unique_neighbours[:, :-1]] = -1
pairs = torch.stack([tree.id[:, None].expand_as(neighbours), unique_neighbours], -1)
pairs = pairs[(pairs >= 0).all(-1) & tree.terminal[pairs].all(-1)]
partner_is_larger = tree.depths[pairs[:, 0]] > tree.depths[pairs[:, 1]]
smaller_partners = torch.flip(pairs[partner_is_larger], (1,))
pairs = torch.cat([pairs, smaller_partners])
return ragged.from_pairs(pairs, len(tree.id), len(tree.id)) | 4b0727f6bbaa8121435b347100751928e6f0a348 | 4,338 |
def find_and_open_file(f):
"""
Looks in open windows for `f` and focuses the related view.
Opens file if not found. Returns associated view in both cases.
"""
for w in sublime.windows():
for v in w.views():
if normpath(f) == v.file_name():
w.focus_view(v)
return v
return sublime.active_window().open_file(f) | f300b76c9c4f4cf50e34490996d0f57feeb01728 | 4,339 |
def stochastic_fit(input_data: object) -> FitParams:
"""
Acquire parameters for the stochastic input signals.
"""
params = FitParams(0.000036906289210966747, 0.014081285145600045)
return params | b138b1b434c9a4270c6915d67d6fdca3434a59a5 | 4,340 |
from typing import Dict
from typing import Tuple
from typing import List
def sort_features_by_normalization(
normalization_parameters: Dict[int, NormalizationParameters]
) -> Tuple[List[int], List[int]]:
"""
Helper function to return a sorted list from a normalization map.
Also returns the starting index for each feature type"""
# Sort features by feature type
sorted_features: List[int] = []
feature_starts: List[int] = []
assert isinstance(
list(normalization_parameters.keys())[0], str
), "Normalization Parameters need to be str"
for feature_type in identify_types.FEATURE_TYPES:
feature_starts.append(len(sorted_features))
for feature in sorted(normalization_parameters.keys()):
norm = normalization_parameters[feature]
if norm.feature_type == feature_type:
sorted_features.append(feature)
return sorted_features, feature_starts | 7beca199dc71e43fcf9f5c8870ee3f450a116e86 | 4,341 |
def block_sort():
"""
Do from here: https://en.wikipedia.org/wiki/Block_sort
:return: None
"""
return None | 69143373200b0dfc560404c12d1500988869a688 | 4,342 |
def prep_ground_truth(paths, box_data, qgt):
"""adds dbidx column to box data, sets dbidx in qgt and sorts qgt by dbidx
"""
orig_box_data = box_data
orig_qgt = qgt
path2idx = dict(zip(paths, range(len(paths))))
mapfun = lambda x : path2idx.get(x,-1)
box_data = box_data.assign(dbidx=box_data.file_path.map(mapfun).astype('int'))
box_data = box_data[box_data.dbidx >= 0].reset_index(drop=True)
new_ids = qgt.index.map(mapfun)
qgt = qgt[new_ids >= 0]
qgt = qgt.set_index(new_ids[new_ids >= 0])
qgt = qgt.sort_index()
## Add entries for files with no labels...
qgt = qgt.reindex(np.arange(len(paths))) # na values will be ignored...
assert len(paths) == qgt.shape[0], 'every path should be in the ground truth'
return box_data, qgt | 6c01fb121933d5fdf235948136ffc73e08e7d6ee | 4,343 |
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w) | 9fca3e9fdd1c69bda3a96111ca110791adb729be | 4,344 |
def containsdupevalues(structure) -> bool or None:
"""Returns True if the passed dict has duplicate items/values, False otherwise. If the passed structure is not a dict, returns None."""
if isinstance(structure, dict):
# fast check for dupe keys
rev_dict = {}
for key, value in structure.items():
rev_dict.setdefault(value, set()).add(key)
dupes = list(filter(lambda x: len(x) > 1, rev_dict.values()))
if dupes:
return True
else:
return False
return None | 4d3c72e71740e69a13889cef816fc4c00ead5790 | 4,345 |
import re
def only_letters(answer):
"""Checks if the string contains alpha-numeric characters
Args:
answer (string):
Returns:
bool:
"""
match = re.match("^[a-z0-9]*$", answer)
return bool(match) | 32c8905294f6794f09bb7ea81ed7dd4b6bab6dc5 | 4,346 |
def is_finally_visible_func(*args):
"""
is_finally_visible_func(pfn) -> bool
Is the function visible (event after considering 'SCF_SHHID_FUNC' )?
@param pfn (C++: func_t *)
"""
return _ida_funcs.is_finally_visible_func(*args) | 468687af0bafb42887f8e43453a4e6c641abde5e | 4,347 |
def _losetup_list():
"""
List all the loopback devices on the system.
:returns: A ``list`` of
2-tuple(FilePath(device_file), FilePath(backing_file))
"""
output = check_output(
["losetup", "--all"]
).decode('utf8')
return _losetup_list_parse(output) | 00ad4bdb76e22f44da50b35a296d43c5678698ce | 4,348 |
def gaussian_product_center(a,A,b,B):
"""
"""
A = np.array(A)
B = np.array(B)
return (a*A+b*B)/(a+b) | a52828d72f99bef8f666d1dbd33ee8d748e0b543 | 4,349 |
from yaml import YAMLError
def read_yaml_file(yaml_path):
"""Loads a YAML file.
:param yaml_path: the path to the yaml file.
:return: YAML file parsed content.
"""
if is_file(yaml_path):
try:
file_content = sudo_read(yaml_path)
yaml = YAML(typ='safe', pure=True)
return yaml.safe_load(file_content)
except YAMLError as e:
raise YAMLError('Failed to load yaml file {0}, due to {1}'
''.format(yaml_path, str(e)))
return None | 7739f55b4b872392ddad4d5184fa718d8c1daa5e | 4,350 |
def _InUse(resource):
"""All the secret names (local names & remote aliases) in use.
Args:
resource: Revision
Returns:
List of local names and remote aliases.
"""
return ([
source.secretName
for source in resource.template.volumes.secrets.values()
] + [
source.secretKeyRef.name
for source in resource.template.env_vars.secrets.values()
]) | cf13ccf1d0fffcd64ac8b3a40ac19fdb2b1d12c5 | 4,351 |
def filter_dwnmut(gene_data):
"""Removes the variants upstream to Frameshift/StopGain mutation.
Args:
- gene_data(dictionary): gene_transcript wise variants where
there is at least one Frameshift/Stopgain
mutation.
Returns:
- flt_data(dictionary): gene_transcript wise variants where there
is at least one Frameshift/StopGain mutation
and at least one downstream coding exonic
variant.
"""
rfgene = Refgene()
flt_gene_data = {}
for gene_info, val in gene_data.items():
trans_id = gene_info[1]
strand = rfgene.get_strand(trans_id)
if not strand:
continue
for e in val:
t = {}
variants = e.keys()
if strand == '+':
variants.sort()
elif strand == '-':
variants.sort(reverse=True)
size = 0
mut_type = ''
flag = False
for var in variants:
if flag == False and e[var][0] == 'StopGain':
mut_type = 'StopGain'
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == False and e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == True:
if mut_type == 'StopGain':
t[var] = e[var]
elif e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
elif e[var][0].startswith('NonFrameShift'):
if e[var][0][13:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][13:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
else:
t[var] = e[var]
if len(t) > 1:
key = tuple(list(gene_info) + [strand])
if key not in flt_gene_data:
flt_gene_data[key] = [t]
else:
if t != flt_gene_data[key][0]:
flt_gene_data[key].append(t)
return flt_gene_data | 9573e5cbd0ed3f96f8e7f47fa395476cc7bd513b | 4,352 |
def format_scwgbs_file(file_path):
"""
Format a scwgbs file to a more usable manner
:param file_path: The path of the file to format
:type file_path: str
:return: A dict where each key is a chr and the value is an array with all the scwgbs reads
:rtype: dict
"""
chr_dict = extract_cols(file_path)
chr_dict = combine_strands(chr_dict)
return chr_dict | eaf2925e3f634138ba8eb7bf1f61189d30f86d7c | 4,353 |
import torch
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Outputs:
- loss: A PyTorch Tensor containing the loss.
"""
loss = None
####################################
# YOUR CODE HERE #
####################################
labels = torch.ones_like(scores_fake)
loss = 1/2 * mse_loss(scores_fake, labels, reduction = 'mean')
########## END ##########
return loss | 6b6d1b94e13de514e56fe83764869b8c2948a40a | 4,354 |
def rmsd(predicted, reference):
"""
Calculate root-mean-square deviation (RMSD) between two variables.
Calculates the root-mean-square deviation between two variables
PREDICTED and REFERENCE. The RMSD is calculated using the
formula:
RMSD^2 = sum_(n=1)^N [(p_n - r_n)^2]/N
where p is the predicted values, r is the reference values, and
N is the total number of values in p & r. Note that p & r must
have the same number of values.
Input:
PREDICTED : predicted values
REFERENCE : reference values
Output:
R : root-mean-square deviation (RMSD)
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
Created on Dec 9, 2016
"""
# Check that dimensions of predicted and reference fields match
utils.check_arrays(predicted, reference)
# Calculate the RMSE
r = np.sqrt(np.sum(np.square(predicted - reference)) / len(predicted))
return r | 5903c0a900b6f66bddd640f9c65146a08e0b768d | 4,355 |
from typing import List
def _get_ranks_for_sequence(logits: np.ndarray,
labels: np.ndarray) -> List[float]:
"""Returns ranks for a sequence.
Args:
logits: Logits of a single sequence, dim = (num_tokens, vocab_size).
labels: Target labels of a single sequence, dim = (num_tokens, 1).
Returns:
An array of ranks for tokens in the sequence, dim = (num_tokens, 1).
"""
sequence_ranks = []
for logit, label in zip(logits, labels.astype(int)):
rank = rankdata(-logit, method='min')[label] - 1.0
sequence_ranks.append(rank)
return sequence_ranks | 59cb646f5f4f498f3bfe1c3c01672ce61e124428 | 4,356 |
import torch
import time
def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):
"""
Evaluate the trained model (output of fit_model_to_pointcloud) on a voxel grid.
:param model: The trained model returned from fit_model_to_pointcloud
:param bbox: The bounding box defining the region of space on which to evaluate the model
(represented as the pair (origin, size))
:param tx: An affine transformation which transforms points in world coordinates to model
coordinates before evaluating the model (the second return value of fit_model_to_grid).
The transformation is represented as a tuple (t, s) where t is a translation and s is scale.
:param voxel_grid_size: The size of the voxel grid on which to reconstruct
:param cell_vox_min: If not None, reconstruct on the subset of the voxel grid starting at these indices.
:param cell_vox_max: If not None, reconstruct on the subset of the voxel grid ending at these indices.
:param print_message: If true, print status messages to stdout.
:return: A tensor representing the model evaluated on a grid.
"""
bbox_origin, bbox_size = bbox
voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell
if cell_vox_min is None:
cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)
if cell_vox_max is None:
cell_vox_max = voxel_grid_size
if print_message:
print(f"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.")
eval_start_time = time.time()
xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size
xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size
xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()
xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()
xmin, xmax = xmin.numpy(), xmax.numpy()
cell_vox_size = (cell_vox_max - cell_vox_min).numpy()
xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,
xmin[1]:xmax[1]:cell_vox_size[1] * 1j,
xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)
xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)
xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)
ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()
if print_message:
print(f"Evaluated model in {time.time() - eval_start_time}s.")
return ygrid | 7486d27d11c250cccd1c9de5fc65b8f8f773f906 | 4,357 |
import torch
import tqdm
def beam_search(model, test_data_src, beam_size, max_decoding_time_step):
""" Run beam search to construct hypotheses for a list of src-language sentences.
@param model : Model
@param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set.
@param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step)
@param max_decoding_time_step (int): maximum sentence length that Beam search can produce
@returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence.
"""
model.eval()
hypotheses = []
with torch.no_grad():
for src_sent in tqdm(test_data_src, desc='Decoding'):
example_hyps = model.beam_search(src_sent, beam_size=beam_size, max_decoding_time_step=max_decoding_time_step)
hypotheses.append(example_hyps)
return hypotheses | ff5a52a336defa4f647a6eb8d7d39c12aa13b9be | 4,359 |
def filter_months(c, months):
"""Filters the collection by matching its date-time index with the specified months."""
indices = find_all_in(get_months(c), get_months(months))
return take_at(c, indices) | c635c80fb007f49c2ef9238830374d0465b488b3 | 4,360 |
import itertools
def kmode_fisher(ks,mus,param_list,dPgg,dPgv,dPvv,fPgg,fPgv,fPvv,Ngg,Nvv, \
verbose=False):
"""
Fisher matrix for fields g(k,mu) and v(k,mu).
Returns F[g+v] and F[g]
dPgg, dPgv, dPvv are dictionaries of derivatives.
fPgg, fPgv, fPvv are fiducial powers.
"""
# Populate Fisher matrix
num_params = len(param_list)
param_combs = itertools.combinations_with_replacement(param_list,2)
Fisher = np.zeros((num_params,num_params))
FisherG = np.zeros((num_params,num_params))
for param1,param2 in param_combs:
i = param_list.index(param1)
j = param_list.index(param2)
if verbose: print("Calculating Fisher for ",param1,param2)
integral = 0.
integralG = 0.
dCov1 = np.array([[dPgg[param1],dPgv[param1]],
[dPgv[param1],dPvv[param1]]])
dCov2 = np.array([[dPgg[param2],dPgv[param2]],
[dPgv[param2],dPvv[param2]]])
Cov = np.array([[Pgg_fid+Ngg,Pgv_fid],
[Pgv_fid,Pvv_fid+Nvv]])
# Integrate over mu and k
for mu_id,mu in enumerate(mus[:-1]):
dmu = mus[mu_id+1]-mus[mu_id]
for k_id,k in enumerate(ks[:-1]):
dk = ks[k_id+1]-ks[k_id]
dC1 = dCov1[:,:,mu_id,k_id]
dC2 = dCov2[:,:,mu_id,k_id]
Cinv = np.linalg.inv(Cov[:,:,mu_id,k_id])
CinvG = 1./Cov[0,0,mu_id,k_id]
trace = np.trace(np.dot(np.dot(dC1,Cinv),np.dot(dC2,Cinv)))
traceG = dC1[0,0]*dC2[0,0]*CinvG**2.
pref = (k**2.)*dk*V/(2.*np.pi)**2./2.*dmu
integral += pref*trace
integralG += pref*traceG
Fisher[i,j] = integral
if j!=i: Fisher[j,i] = integral
FisherG[i,j] = integralG
if j!=i: FisherG[j,i] = integralG
return stats.FisherMatrix(Fisher,param_list), \
stats.FisherMatrix(FisherG,param_list) | e27699d51ce65b284d1289d5e6ae4472ae6fa63e | 4,361 |
def detect_device(model):
"""
Tries to determine the best-matching device for the given model
"""
model = model.lower()
# Try matching based on prefix, this is helpful to map e.g.
# FY2350H to FY2300
for device in wavedef.SUPPORTED_DEVICES:
if device[:4] == model[:4]:
return device
raise wavedef.UnsupportedDeviceError(
"Unable to autodetect device '%s'. "
"Use FYGen(device_name='fy2300') with one of the supported devices, "
"beware that the waveforms might not match up."
"Supported devices: %s"
% (
model,
', '.join(wavedef.SUPPORTED_DEVICES)
)
) | 6b58b5a8dc67a1f30e499c31b545b70ead908aaf | 4,362 |
import re
def re_identify_image_metadata(filename, image_names_pattern):
"""
Apply a regular expression to the *filename* and return metadata
:param filename:
:param image_names_pattern:
:return: a list with metadata derived from the image filename
"""
match = re.match(image_names_pattern, filename)
return None if match is None else match.groups() | 1730620682f2457537e3f59360d998b251f5067f | 4,363 |
def PSF_Moffat(alpha,beta,x,y):
""" Compute the PSF of the instrument with a Moffat function
Parameters
-----------
alpha: float
radial parameter
beta: float
power indice of the function
x: float
position along the x axis
y: float
position along the y axis
wavelength: float
effective wavelength of the filter in angstrom
Returns:
---------
psf: array
psf of the instrument
"""
psf = (beta-1.)/(np.pi*alpha*alpha) * (1.+(x*x+y*y)/(alpha*alpha))**(-beta)
return psf | c70fe8582c27518ab521d3d663da87e1354f0668 | 4,365 |
def _tf_range_for_stmt(iter_,
extra_test,
body,
get_state,
set_state,
init_vars,
basic_symbol_names,
composite_symbol_names,
opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
_disallow_undefs_into_loop(*init_vars)
start, limit, delta = iter_.op.inputs
def while_body(iterate, *loop_vars):
new_vars = body(iterate, *loop_vars)
loop_vars = (iterate + delta,)
if new_vars:
loop_vars += new_vars
return loop_vars
def while_cond(iterate, *loop_vars):
"""Cond function for `tf.while_loop`."""
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate < limit),
math_ops.logical_and(delta < 0, iterate > limit))
if extra_test is not None:
return control_flow_ops.cond(
main_test,
lambda: extra_test(*loop_vars),
lambda: False,
)
return main_test
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
results = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(start,) + init_vars,
('<internal iterate>',) + basic_symbol_names,
composite_symbol_names,
opts,
)
# Note: the iteration index is not returned by the while loop, however
# if a symbol with the same name exists outside the loop, it will be captured
# by the loop variables and ultimately updated correctly.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results | 37e21af2fbf5bd910743c220e872c45e08131e97 | 4,366 |
import importlib
def _import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'text_recognizer.models.MLP'"""
module_name, class_name = module_and_class_name.rsplit(".", 1) # splits into 2 elements at "."
module = importlib.import_module(module_name)
class_ = getattr(module, class_name) # gives us model.class_name attribute (ex: jacques = Person(), jacques.age -> 28)
return class_ | c5666b6393c89bf9cb32a7a3351d3a8706ffd631 | 4,367 |
def mark_task(func):
"""Mark function as a defacto task (for documenting purpose)"""
func._is_task = True
return func | 9f0156fff2a2a6dcb64e79420022b78d1c254490 | 4,368 |
from operator import getitem
import math
def dict_diff(left: Map, right: Map) -> t.List[t.Dict]:
"""Get the difference between 2 dict-like objects
Args:
left (Map): The left dict-like object
right (Map): The right dict-like object
The value returned is a list of dictionaries with keys ["path", "left", "right"]
which contain the query path and the differences between the left and right mapping.
If a key is missing in either mapping, it will be indicated as a "None".
`math.nan` (not-a-number) is used for default values in the comparison because of
the property: `math.nan != math.nan`. Simple None cannot be used, since it would
not handle keys that both have a value of None. In general, this function might
report false-positives for keys that contain the math.nan (or np.nan) value simply
due to this property. There is no workaround available.
"""
left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False))
right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False))
return list(
{
"path": path,
"left": getitem(left, path, math.nan),
"right": getitem(right, path, math.nan),
}
for path in left_paths.union(right_paths)
if getitem(left, path, math.nan) != getitem(right, path, math.nan)
) | 23f7aa611230879099590b696f9484aa9881a34b | 4,369 |
def make_slack_message_divider() -> dict:
"""Generates a simple divider for a Slack message.
Returns:
The generated divider.
"""
return {'type': 'divider'} | 9d0243c091065056a29d9fa05c62fadde5dcf6f6 | 4,370 |
def get_history(filename: str, extension: int = 0) -> str:
"""
Returns the HISTOR header lines.
Args:
filename: image filename.
extension: image extension number.
Returns:
string containing all HISTORY lines.
"""
filename = azcam.utils.make_image_filename(filename)
hdr = pyfits.getheader(filename, extension)
history = hdr["HISTORY"]
return str(history) | 495b5d85a9313c081cf5bea837440dc51e9f8d6e | 4,372 |
def product_detail(request, product_id):
""" A view to show one product's details """
product = get_object_or_404(Product, pk=product_id)
review_form = ReviewForm()
reviews = Review.objects.filter(product_id=product_id).order_by('-created_at')
context = {
'product': product,
'review_form': review_form,
'reviews': reviews,
}
return render(request, 'products/product_detail.html', context) | 5e83dd11b2cfb4e43186c584424e96e35f52333a | 4,373 |
import json
def deserialize_response_content(response):
"""Convert utf-8 encoded string to a dict.
Since the response is encoded in utf-8, it gets decoded to regular python
string that will be a json string. That gets converted to python
dictionary.
Note: Do not use this method to process non-json response.content
:param requests.models.Response response: object that includes attributes
status code and content
:return: response content as decoded dictionary
:rtype: dict
"""
if response.content:
decoded = response.content.decode("utf-8")
if len(decoded) > 0:
return json.loads(decoded)
return {} | ff5494e38f7a6f5b49b4e84e1e8a2ee1633d3872 | 4,374 |
def _remove_header_create_bad_object(remove, client=None):
""" Create a new bucket, add an object without a header. This should cause a failure
"""
bucket_name = get_new_bucket()
if client == None:
client = get_client()
key_name = 'foo'
# remove custom headers before PutObject call
def remove_header(**kwargs):
if (remove in kwargs['params']['headers']):
del kwargs['params']['headers'][remove]
client.meta.events.register('before-call.s3.PutObject', remove_header)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
return e | bb1f6fb6ca61c7c3137ec4dbf35c2c3500c0e82d | 4,375 |
def SpawnObjectsTab():
"""This function creates a layout containing the object spawning functionality.
Returns:
str : The reference to the layout.
"""
### Create main Layout for the tab
mainTab = cmds.columnLayout(adjustableColumn=True, columnAttach=('both', 20))
cmds.separator(height=10, style="none")
cmds.text(label="Asset Gallery:", align="left")
### Asset Name Text Field
cmds.separator(height=10, style="none")
SpawnObjectsTab.UserField = cmds.textFieldButtonGrp(placeholderText="Write Asset's Name", buttonLabel="Save Asset", buttonCommand=lambda: saveAsset(),
ann="Assign a name for the asset that will be used in the outliner and in the directory hierarchy.")
### Asset Gallery Layout
cmds.separator(height=10, style="none")
cmds.scrollLayout(childResizable=True, height=305, width=455, backgroundColor=(.2,.2,.2))
global objectScroll
objectScroll = cmds.gridLayout(cellWidthHeight=(150,150), autoGrow=True)
populateGallery() # Creates Icons
cmds.setParent(mainTab) # Exit scroll layout
### Choose between Arnold StandIn and Assembly Reference
cmds.separator(height=10, style="none")
global loadMethodRadio
cmds.rowLayout(numberOfColumns=3, adjustableColumn=3)
loadMethodRadio = cmds.radioCollection()
cmds.radioButton("standin", label="Load as Arnold StandIn", select=True,
ann="Arnold standIns bring materials. Render in Arnold to see them.") # Radio button for StandIn
cmds.separator(width=20, style="none")
cmds.radioButton("assembly", label="Load as Assembly Reference",
ann="Assembly references can change their representation mode.") # Radio button for Assembly
cmds.setParent(mainTab)
### Choose how to set the location of the object
cmds.separator(height=10)
cmds.text(label="Spawning method:", align="left")
cmds.separator(height=5, style="none")
cmds.rowLayout(numberOfColumns=4, adjustableColumn=4, columnAttach4=("both","both","both","both"), columnOffset4=(10,10,10,10))
global placingRadio
placingRadio = cmds.radioCollection()
# Create only one copy
cmds.radioButton("single", label="Single Object", select=True,
onCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=False),
offCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=True),
ann="Create one single object. MMC and drag to scene does the same.")
# Create copies along a curve
cmds.radioButton("curve", label="Along Curve",
ann="Spawn assets along a previously created curve")
# Create copies between a range in world space
cmds.radioButton("range", label="In Range",
onCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=True),
offCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=False),
ann="Creates objects in a defined range of coordinates.")
# Create copies on a mesh's surface
cmds.radioButton("mesh", label="On Mesh",
ann="Locate assets on the surface of a selected mesh.")
cmds.setParent(mainTab)
### Randomization parameters
cmds.separator(height=10, style="none")
randomControlLayout = cmds.columnLayout(enable=False)
# How many copies
SpawnObjectsTab.BuildingAmount = cmds.intSliderGrp(label="Building Number", field=True, value=10, min=2, max=50, fieldMaxValue=200)
# Deviation from original rotation
SpawnObjectsTab.RandomRotation = cmds.floatSliderGrp(label="Random Rotation", field=True, value=15, min=0, max=360)
# Deviation from orignal scale
SpawnObjectsTab.RandomScale = cmds.floatSliderGrp(label="Random Scale", field=True, value=0, min=0, max=10)
cmds.setParent(mainTab)
### Range spawning parameters
rangeLayout = cmds.columnLayout(visible=False)
# Min x, y and z coordinates
SpawnObjectsTab.MinimumField = cmds.floatFieldGrp(label="Minimum Range: ", numberOfFields=3)
# Max x, y and z coordinates
SpawnObjectsTab.MaximumField = cmds.floatFieldGrp(label="Maximum Range: ", numberOfFields=3)
cmds.setParent(mainTab)
### Finalize
cmds.separator(height=10, style="none")
cmds.button(label='Load Selected Objects', command=lambda x: choosePlacement(x))
cmds.setParent('..') # Exit column layout
return mainTab | 8381058ce0d7607f81fcdc6ba3e9f03c1495c719 | 4,376 |
import math
def getShdomDirections(Y_shdom, X_shdom, fov=math.pi/2):
"""Calculate the (SHDOM) direction of each pixel.
Directions are calculated in SHDOM convention where the direction is
of the photons.
"""
PHI_shdom = np.pi + np.arctan2(Y_shdom, X_shdom)
PSI_shdom = -np.pi + fov * np.sqrt(X_shdom**2 + Y_shdom**2)
return PHI_shdom, PSI_shdom | e564f5a9988a6a3a0f14b319ea553dd6bfd7d75a | 4,378 |
def twos_comp(val, bits):
"""returns the 2's complement of int value val with n bits
- https://stackoverflow.com/questions/1604464/twos-complement-in-python"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val & ((2 ** bits) - 1) # return positive value as is | 53bc689f9dd0cf1dff6cd3e073608d1827a3dad9 | 4,379 |
def get_batch(source, i, cnf):
"""
Gets a batch shifted over by shift length
"""
seq_len = min(cnf.batch_size, len(source) - cnf.forecast_window - i)
data = source[i : i + seq_len]
target = source[
i + cnf.forecast_window : i + cnf.forecast_window + seq_len
].reshape(-1)
return data, target | 8f4bbfca44bed498dc22d52706389378bf03f7e0 | 4,380 |
def verify_df(df, constraints_path, epsilon=None, type_checking=None,
repair=True, report='all', **kwargs):
"""
Verify that (i.e. check whether) the Pandas DataFrame provided
satisfies the constraints in the JSON ``.tdda`` file provided.
Mandatory Inputs:
*df*:
A Pandas DataFrame, to be checked.
*constraints_path*:
The path to a JSON ``.tdda`` file (possibly
generated by the discover_df function, below)
containing constraints to be checked.
Or, alternatively, an in-memory dictionary
containing the structured contents of a ``.tdda``
file.
Optional Inputs:
*epsilon*:
When checking minimum and maximum values
for numeric fields, this provides a
tolerance. The tolerance is a proportion
of the constraint value by which the
constraint can be exceeded without causing
a constraint violation to be issued.
For example, with epsilon set to 0.01 (i.e. 1%),
values can be up to 1% larger than a max constraint
without generating constraint failure,
and minimum values can be up to 1% smaller
that the minimum constraint value without
generating a constraint failure. (These
are modified, as appropriate, for negative
values.)
If not specified, an *epsilon* of 0 is used,
so there is no tolerance.
NOTE: A consequence of the fact that these
are proportionate is that min/max values
of zero do not have any tolerance, i.e.
the wrong sign always generates a failure.
*type_checking*:
``strict`` or ``sloppy``.
Because Pandas silently, routinely and
automatically "promotes" integer and boolean
columns to reals and objects respectively
if they contain nulls, strict type checking
can be problematical in Pandas. For this reason,
``type_checking`` defaults to ``sloppy``, meaning
that type changes that could plausibly be
attributed to Pandas type promotion will not
generate constraint values.
If this is set to strict, a Pandas ``float``
column ``c`` will only be allowed to satisfy a
an ``int`` type constraint if::
c.dropnulls().astype(int) == c.dropnulls()
Similarly, Object fields will satisfy a
``bool`` constraint only if::
c.dropnulls().astype(bool) == c.dropnulls()
*repair*:
A boolean to specify whether to try to use the
information in the constraints to attempt to
repair potentially-incorrect type inferrences
made when constructing the dataframe. When the
dataframe has been loaded from a .csv file, this
can often be useful (but should not be used with
dataframes that have come from a more reliable
source).
*report*:
``all`` or ``fields``.
This controls the behaviour of the
:py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method on
the resulting :py:class:`~tdda.constraints.pd.constraints.PandasVerification`
object (but not its content).
The default is ``all``, which means that
all fields are shown, together with the
verification status of each constraint
for that field.
If report is set to ``fields``, only fields for
which at least one constraint failed are shown.
Returns:
:py:class:`~tdda.constraints.pd.constraints.PandasVerification` object.
This object has attributes:
- *passes* --- Number of passing constriants
- *failures* --- Number of failing constraints
It also has a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.to_frame()` method for
converting the results of the verification to a Pandas DataFrame,
and a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method to print
both the detailed and summary results of the verification.
Example usage::
import pandas as pd
from tdda.constraints import verify_df
df = pd.DataFrame({'a': [0, 1, 2, 10, np.NaN],
'b': ['one', 'one', 'two', 'three', np.NaN]})
v = verify_df(df, 'example_constraints.tdda')
print('Constraints passing: %d\\n' % v.passes)
print('Constraints failing: %d\\n' % v.failures)
print(str(v))
print(v.to_frame())
See *simple_verification.py* in the :ref:`constraint_examples`
for a slightly fuller example.
"""
pdv = PandasConstraintVerifier(df, epsilon=epsilon,
type_checking=type_checking)
if isinstance(constraints_path, dict):
constraints = DatasetConstraints()
constraints.initialize_from_dict(native_definite(constraints_path))
else:
constraints = DatasetConstraints(loadpath=constraints_path)
if repair:
pdv.repair_field_types(constraints)
return pdv.verify(constraints,
VerificationClass=PandasVerification,
report=report, **kwargs) | 03daa527e9edb61d57a960c335ba574930baf130 | 4,381 |
def logical_and(image1, image2):
"""Logical AND between two videos. At least one of the videos must have
mode "1".
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im)) | 040ec91a09f0ce7251e3e40f95a087e6d1f81b87 | 4,382 |
def predict(endpoint_id: str, instance: object) -> object:
"""Send a prediction request to a uCAIP model endpoint
Args:
endpoint_id (str): ID of the uCAIP endpoint
instance (object): The prediction instance, should match the input format that the endpoint expects
Returns:
object: Prediction results from the model
"""
return UCAIPService.get().predict_tables(endpoint_id, instance) | 9b7801d23e9aed0fc8292cd1b0a7f9621c313797 | 4,383 |
async def resolve_address(ipaddr, *args, **kwargs):
"""Use a resolver to run a reverse query for PTR records.
See ``dns.asyncresolver.Resolver.resolve_address`` for more
information on the parameters.
"""
return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) | b5fbd218ba3e8e1d1a51873a4c12cfc304bfc2fd | 4,384 |
def _stringify(item):
"""
Private funtion which wraps all items in quotes to protect from paths
being broken up. It will also unpack lists into strings
:param item: Item to stringify.
:return: string
"""
if isinstance(item, (list, tuple)):
return '"' + '" "'.join(item) + '"'
if isinstance(item, str) and len(item) == 0:
return None
return '"%s"' % item | 7187b33dccce66cb81b53ed8e8c395b74e125633 | 4,385 |
def _get_tree_filter(attrs, vecvars):
"""
Pull attributes and input/output vector variables out of a tree System.
Parameters
----------
attrs : list of str
Names of attributes (may contain dots).
vecvars : list of str
Names of variables contained in the input or output vectors.
Returns
-------
function
A function that takes a System and returns a list of name value pairs.
"""
def _finder(system):
found = []
for attr in attrs:
parts = attr.split('.') # allow attrs with dots
try:
obj = system
for p in parts:
obj = getattr(obj, p)
found.append((attr, obj))
except AttributeError:
pass
for var in vecvars:
if var in system._outputs:
found.append((var, system._outputs[var]))
elif var in system._inputs:
found.append((var, system._inputs[var]))
return found
return _finder | fb96025a075cfc3c56011f937d901d1b87be4f03 | 4,386 |
def gradient2(Y,x,sum_p):
"""
Description
-----------
Used to calculate the gradients of the beta values (excluding the first).
Parameters
----------
Y: label (0 or 1)
x: flux value
sum_p: sum of all beta values (see 'param_sum' function)
Returns
-------
num/denom: gradient value
"""
if Y == 1:
num = -x * np.exp(-sum_p)
denom = 1 + np.exp(-sum_p)
elif Y == 0:
num = x
denom = 1 + np.exp(-sum_p)
return num/denom | 985b0e72419127291b2ddf99e81a129693e7e8fe | 4,387 |
def takeBlock2(aList, row_list, col_list):
"""
Take sublist given from rows specified by row_list and column specified by col_list from
a doublely iterated list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for row in row_list:
result.append(map(lambda column: aList[row][column], col_list))
return result | 4f891fd0ce8b3bcca88f2f8f6f572ce4253d1d46 | 4,388 |
def param(key, desired_type=None):
"""Return a decorator to parse a JSON request value."""
def decorator(view_func):
"""The actual decorator"""
@wraps(view_func)
def inner(*args, **kwargs):
data = request.get_json() # May raise a 400
try:
value = data[key]
except (KeyError, TypeError):
abort(400, "Missing JSON value '{0}'.".format(key))
if desired_type and not isinstance(value, desired_type):
# For the error message
if desired_type == text_type:
type_name = 'string'
else:
type_name = desired_type.__name__
abort(400, ("Expected '{0}' to be type {1}."
.format(key, type_name)))
# Success, pass through to view function
kwargs[key] = value
return view_func(*args, **kwargs)
return inner
return decorator | 1dca83eb24df9623ddae270ebe1f06461c372af0 | 4,389 |
def blockchain_key_seed(request):
""" Private key template for the nodes in the private blockchain, allows
different keys to be used for each test to avoid collisions.
"""
# Using the test name as part of the template to force the keys to be
# different accross tests, otherwise the data directories would be the same
# and collisions would happen
return escape_for_format(request.node.name) + "cluster:{}" | 05a940c5a18f816b4bba3fb65e354a5dac2ce1cd | 4,392 |
def wls_simple(X, y, yerr):
"""
weighted least squares: (X.T*W*X)*beta = X.T*W*y
solution: beta = (X.T*X)^-1 * X.T *y
Note
----
wls solves single problems (n_problems=1)
BUT! is able to solve multiple-template (same error) problems
Parameters
----------
X: predictors
(n_obs, n_term) array
yerr: error of response
(n_obs, ) weight matrix
y: response
(n_obs, n_problems) array
Return
------
beta: coefficients
(n_term, ) array
"""
yerr = yerr.reshape(-1, 1) # column vector
yerr = np.where((yerr > 0) & np.isfinite(yerr), yerr, 1e5)
X_ = X / yerr
y_ = y / yerr
beta = ols(np.matmul(X_.T, X_), np.matmul(X_.T, y_))
return beta | d428eb22dee7b587788e065a7dd883992f183ef7 | 4,393 |
import re
import copy
def _filter(dict_packages, expression):
"""Filter the dict_packages with expression.
Returns:
dict(rst): Filtered dict with that matches the expression.
"""
expression_list = ['(' + item + ')' for item in expression.split(',')]
expression_str = '|'.join(expression_list)
compiled_exp = re.compile('(?i:^(' + expression_str + ')$)')
cp_dict_packages = copy.deepcopy(dict_packages)
for key in dict_packages.keys():
match = re.search(compiled_exp, key)
if not match:
del cp_dict_packages[key]
return cp_dict_packages | 866bca2847b9d3c8220319f4b394f932931fc076 | 4,394 |
def multi_index_tsv_to_dataframe(filepath, sep="\t", header_rows=None):
"""
Loads a multi-header tsv file into a :py:class:`pd.DataFrame`.
Parameters
----------
filepath : `str`
Path pointing to the tsv file.
sep : `str`, optional, default: '\t'
Character to use as the delimiter.
header_rows : `list`, optional, default: None
0-based indicies corresponding to the row locations to use as the
multi-index column names in the dataframe. Example:
condition E3 E3
value pvalue_raw z
_sy 8.6e-05 3.92
p.Ala16Arg 0.0 3.76raw_barcodes_counts.tsv
The *header_rows* for this instance will be [0, 1]
If not supplied, `header_rows` will be inferred from the file.
Returns
-------
:py:class:`~pd.DataFrame`
A :py:class:`pd.MultiIndex` dataframe.
"""
if header_rows is None:
header_rows = infer_multiindex_header_rows(filepath)
if header_rows == [0] or not header_rows:
return pd.read_table(filepath, index_col=0, sep=sep)
else:
try:
return pd.read_table(filepath, index_col=0, sep=sep, header=header_rows)
except IndexError:
return pd.read_table(filepath, index_col=0, sep=sep) | 9ae5816aed2bfacd05d4130ccc1598c037b9b353 | 4,395 |
def generate_summoner_tab_summoner(db, profile, ss):
"""
:type db: darkarisulolstats.lolstats.database.Database
"""
summoner = {}
for s in ss:
raw_summoner = db.summoners.get(s)
if "profileIconPath" not in summoner:
summoner["profileIconPath"] = data.DataDragon.get_profile_icon_path(raw_summoner["profileIconId"])
summoner["name"] = profile
if "level" not in summoner:
summoner["level"] = raw_summoner["summonerLevel"]
else:
summoner["level"] += raw_summoner["summonerLevel"]
summoner["Playtime"] = 0
raw_matches = db.preprocessed.get(profile, "matchlists")
for raw_match in raw_matches:
summoner["Playtime"] += raw_match["duration"]
return summoner | 0bee2b48c71910ed273925a7cae1c4539b411401 | 4,396 |
def preserve_quotes (s):
"""
Removes HTML tags around greentext.
"""
return quot_pattern.sub(get_first_group, s) | a87ac4ee7fdb0e0c879047066e805f2c9382c599 | 4,397 |
def with_whitespace_inside(expr):
""" Returns an expression that allows for whitespace inside, but not
outside the expression.
"""
return Combine(OneOrMore(expr | White(' ', max=1) + expr)) | 9306ffb73277d249062ffca45ded9d0bd9a45e3c | 4,399 |
def similarity(item, user, sim_dict):
"""
similarity between an item and a user (a set of items)
"""
if user not in sim_dict or item not in sim_dict[user]:
return 0
else:
return sim_dict[user][item] | e63eec781f7ed9fa72d21d1119daf9ce89d39b1b | 4,400 |
import requests
def get_totalt_result(req_url):
"""[summary]
This gets all the results in INT from the specified query
Args:
req_url ([STR]): [The request query that decides the data]
"""
r = requests.get(req_url, headers=headers)
json = r.json()
return json['result']['totalHits'] | 8c6ff54fbb285fc765afd8bb5e5ba3195ec624d0 | 4,401 |
def lorentz(x, FWHM, x0=0):
"""
Returns Lorentzian lineshape.
"""
return FWHM/2/np.pi*((x-x0)**2+(FWHM/2)**2)**-1 | 92c21b7b99b600b2dc622d2ea09b0bd3e39f8047 | 4,402 |
def count_collision(strMap: list[str], right: int, down: int) -> int:
"""Read the map and count how many tree would be encountered if someone start from the top left corner"""
mapWidth = len(strMap[0]) # All lines are assumed to have same width
xCoord, yCoord = right % mapWidth, down
count = 0
while yCoord < len(strMap):
if strMap[yCoord][xCoord] == TREE:
count += 1
xCoord = (xCoord + right) % mapWidth
yCoord += down
return count | b233224a407493757ba7976ede22499b21afe068 | 4,403 |
def svn_config_find_group(*args):
"""svn_config_find_group(svn_config_t cfg, char key, char master_section, apr_pool_t pool) -> char"""
return _core.svn_config_find_group(*args) | 3974798a82f2dfb77957e8de1c4814725d82c3a9 | 4,404 |
def _database_exists():
"""Checks for existence of database"""
_require_environment()
database = _get_database_name()
with settings(hide('warnings'), warn_only=True):
result = run(MYSQL_PREFIX % "\"SHOW DATABASES LIKE '%(NAME)s';\"" % database)
if database['NAME'] in result:
return True
else:
print('Database %(NAME)s does not exist' % database)
return False | 94cbffb4d7e62d6c9fcae7d0966c6d595ddf7907 | 4,405 |
def EncoderDecoder(d_model, d_ff, n_heads, dropout, layer_idx, mode,
ff_activation):
"""Transformer encoder-decoder layer.
The input is a triple (decoder_input, mask, encoder) where the mask is
created from the original source to prevent attending to the padding part
of the encoder.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
layer_idx: which layer are we at (for bookkeeping)
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
the layer, returning a triple (decoder_activations, mask, encoder).
"""
decoder_self_attention = [ # vecs_d pmask vecs_e
tl.LayerNorm(), # vecs_d ..... ......
tl.BasicCausalAttention(
d_model, n_heads=n_heads, dropout=dropout, mode=mode),
tl.Dropout(rate=dropout, mode=mode), # vecs_d ..... ......
]
decoder_to_encoder_attention = [ # vecs_d masks vecs_e
tl.LayerNorm(), # vecs_d masks vecs_e
tl.Parallel([], [], tl.Dup()), # ______ _____ vecs_e vecs_e
tl.Parallel([], tl.Swap()), # ______ vecs_e masks ......
tl.Parallel([], tl.Dup()), # ______ vecs_e vecs_e ..... ......
tl.AttentionQKV( # (q k v masks ... --> vecs_d masks ...)
d_model, n_heads=n_heads, dropout=dropout, mode=mode),
tl.Dropout(rate=dropout, mode=mode), # vecs_d mask vecs_e
]
feed_forward = [
FeedForward(d_model, d_ff, dropout, layer_idx, mode, ff_activation),
]
return tl.Serial( # vecs_d masks vecs_e
tl.Residual(decoder_self_attention), # vecs_d masks vecs_e
tl.Residual(decoder_to_encoder_attention), # vecs_d masks vecs_e
tl.Residual(feed_forward), # vecs_d masks vecs_e
) | 8dd8032d2b0b270f49f21adedb22474565827801 | 4,406 |
from operator import concat
def group_normalize(strokes):
""" normilize a multistroke drawing """
long_stroke = concat(strokes)
x_min = min(long_stroke.x)
x_max = max(long_stroke.x)
y_min = min(long_stroke.y)
y_max = max(long_stroke.y)
x_range = float(x_max-x_min)
y_range = float(y_max-y_min)
normalized_strokes = []
for stroke in strokes:
x = ((np.array(stroke.x) - x_min)/x_range).tolist()
y = ((np.array(stroke.y) - y_min)/y_range).tolist()
normalized_strokes.append(Stroke(x,y))
return normalized_strokes | ec7a44573b2334b69d3d878bd381a0ced1fdd304 | 4,407 |
def _get_sa_bracket(myimt, saset):
"""
For a given SA IMT, look through the input SAs and return a tuple of
a) a pair of IMT strings representing the periods bracketing the given
period; or c) the single IMT representing the first or last period in
the input list if the given period is off the end of the list.
Args:
myper (float): The period to search for in the input lists.
saset (list): A list of SA IMTs.
Returns:
tuple: One or two strings representing the IMTs closest to or
bracketing the input IMT.
"""
if not len(saset):
return ()
#
# Stick the target IMT into a copy of the list of SAs, then sort
# the list by period.
#
ss = saset.copy()
ss.append(myimt)
tmplist = sorted(ss, key=_get_period_from_imt)
nimt = len(tmplist)
#
# Get the index of the target IMT in the sorted list
#
myix = tmplist.index(myimt)
#
# If the target IMT is off the end of the list, return the
# appropriate endpoint; else return the pair of IMTs that
# bracket the target.
#
if myix == 0:
return (tmplist[1], )
elif myix == nimt - 1:
return (tmplist[-2], )
else:
return (tmplist[myix - 1], tmplist[myix + 1]) | 2588fb1a45a008ec81770f69b5f6ee815f1f2511 | 4,408 |
def fb83(A, B, eta=1., nu=None):
"""
Generates the FB8 distribution using the orthogonal vectors A and B
where A = gamma1*kappa and B = gamma2*beta (gamma3 is inferred)
A may have not have length zero but may be arbitrarily close to zero
B may have length zero however. If so, then an arbitrary value for gamma2
(orthogonal to gamma1) is chosen
"""
kappa = norm(A)
beta = norm(B)
gamma1 = A / kappa
if beta == 0.0:
gamma2 = __generate_arbitrary_orthogonal_unit_vector(gamma1)
else:
gamma2 = B / beta
theta, phi, psi = FB8Distribution.gammas_to_spherical_coordinates(
gamma1, gamma2)
gamma1, gamma2, gamma3 = FB8Distribution.spherical_coordinates_to_gammas(
theta, phi, psi)
return FB8Distribution(gamma1, gamma2, gamma3, kappa, beta, eta, nu) | 1162c99eb3964512d935db8e5ce19bfc6eb5b391 | 4,409 |
def replicate(pta, ptac, p0, coefficients=False):
"""Create a replicated residuals conditioned on the data.
Here pta is standard marginalized-likelihood PTA, and
ptac is a hierarchical-likelihood version of pta with
coefficients=True for all GPs. This function:
- calls utils.get_coefficients(pta, p0) to get a realization
of the GP coefficients conditioned on the data and on the
hyperparameters in p0;
- calls ptac.get_delay() to compute the resulting realized
GPs at the toas;
- adds measurement noise (including ECORR) consistent with
the hyperparameters.
To use this (pending further development), you need to set
combine=False on the pta/ptac GPs, and method='sparse' on
the ptac EcorrKernelNoise.
Returns a list of replicated residuals, one list element
per pulsar."""
# GP delays
if not coefficients:
p0 = get_coefficients(pta, p0)
ds = ptac.get_delay(params=p0)
# note: the proper way to cache the Nmat computation is to give
# a `sample` method to csc_matrix_alt and ndarray_alt, which
# would then save the factorization in the instance
nmats = ptac.get_ndiag(params=p0)
for d, nmat in zip(ds, nmats):
if isinstance(nmat, sps.csc_matrix):
# add EFAC/EQUAD/ECORR noise
# use xx' = I => (Lx)(Lx)' = LL' with LL' = PNP'
# hence N[P[:, np.newaxis], P[np.newaxis, :]] = LL'
# see https://scikit-sparse.readthedocs.io/en/latest/cholmod.html
ch = cholesky(nmat)
d[ch.P()] += ch.L() @ np.random.randn(len(d))
elif isinstance(nmat, np.ndarray):
# diagonal case, nmat will be ndarray_alt instance
d += np.sqrt(nmat) * np.random.randn(len(d))
else:
raise NotImplementedError(
"Cannot take Nmat factor; " "you may need to set the EcorrKernelNoise to 'sparse'."
)
return ds | addab624fb2314a004a7454ca3a3199539baabf9 | 4,410 |
def load_danube() -> pd.DataFrame:
"""
The danube dataset contains ranks of base flow observations from the Global River Discharge
project of the Oak Ridge National Laboratory Distributed Active Archive Center (ORNL DAAC),
a NASA data center. The measurements are monthly average flow rate for two stations situated
at Scharding (Austria) on the Inn river and at Nagymaros (Hungary) on the Danube.
The data have been pre-processed to remove any time trend. Specifically, Bacigal et al. (2011)
extracted the raw data, and obtain the fast Fourier transformed centered observations. The
negative spectrum is retained and a linear time series model with 12 seasonal components is
fitted. Residuals are then extracted and AR model fitted to the series, the selection being
done based on the AIC criterion with imposed maximum order of 3 and the number of autoregressive
components may differ for each series.
This data frame contains the following columns:
inn:
A numeric vector containing the rank of pre-whitened level observations of the Inn river
at Nagyramos.
donau:
A numeric vector containing the rank of prewhitened level observations of the Donau river
at Scharding.
"""
return _load_file('danube.csv') | f1ae04e37e69acf1fa805953f58c96633136be09 | 4,411 |
def get_index(grid_mids, values):
"""get the index of a value in an array
Args:
grid_mids: array of grid centers
value: array of values
Returns:
indices
"""
diff = np.diff(grid_mids)
diff = np.concatenate((diff, diff[-1:]))
edges = np.concatenate((grid_mids-diff/2, grid_mids[-1:]+diff[-1:]/2))
#print(edges)
ind = np.digitize(np.array(values), edges)-1
ind[ind > grid_mids.shape[0]-1] = grid_mids.shape[0]-1
return ind | b8277e84ddaae5c951ad032f3a738d1f9c02feac | 4,412 |
def validate_incoming_edges(graphs, param=None):
"""
In case a node of a certain type has more then a threshold of incoming
edges determine a possible stitches as a bad stitch.
"""
param = param or {}
res = {}
i = 0
for candidate in graphs:
res[i] = 'ok'
for node, values in candidate.nodes(data=True):
if values[stitcher.TYPE_ATTR] not in list(param.keys()):
continue
tmp = param[values[stitcher.TYPE_ATTR]]
if len(candidate.in_edges(node)) >= tmp:
res[i] = 'node ' + str(node) + ' has to many edges: ' + \
str(len(candidate.in_edges(node)))
i += 1
return res | 7872ad52c942d986725d7dc1e089ad91850b5c71 | 4,413 |
def face_area(bounding_box, correction):
"""
Increase face area, to square format, face detectors are very close
clipping useless when you want to get whole head
Arguments: bounding box original, correction value
Returns: 4-element list - bounding box for expanded area (ints)
"""
x_1, y_1, x_2, y_2 = bounding_box
x_1 = x_1 + correction
x_2 = x_2 + correction
x_center = int(x_1 + (x_2 - x_1) / 2)
y_center = int(y_1 + (y_2 - y_1) / 2)
factor = 2
square_factor = int(max(x_2 - x_1, y_2 - y_1) * factor / 2)
x_1p = x_center - square_factor
y_1p = y_center - square_factor
x_2p = x_1p + square_factor * 2
y_2p = y_1p + square_factor * 2
return [x_1p, y_1p, x_2p, y_2p] | b4c47b01989acb706e9c959cd29902b6045a7fad | 4,414 |
def ut_to_dt(ut):
"""Converts a universal time in days to a dynamical time in days."""
# As at July 2020, TAI is 37 sec ahead of UTC, TDT is 32.184 seconds ahead of TAI.
return ut + 69.184/SEC_IN_DAY | 1f9af7758c53d32494013280b401393e5723d358 | 4,415 |
from pathlib import Path
def _read_group_h5(filename: Path, groupname: str) -> ndarray:
"""Return group content.
Args:
filename: path of hdf5 file.
groupname: name of group to read.
Returns:
content of group.
"""
try:
with h5py.File(filename, 'r') as h5f:
data = h5f[groupname][()]
except OSError as err:
# h5py doesn't always include the filename in its error messages
err.args += (filename,)
raise
return data | 3febc0c0322ca9d9a7f30b4d9cf94ce32f5d3109 | 4,416 |
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x | bb5b2ed23626e26004ae87bdd7fc03b2d177f38f | 4,417 |
Subsets and Splits