content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def shimenreservoir_operation_rule_lower_limit():
"""
Real Name: ShiMenReservoir Operation Rule Lower Limit
Original Eqn: WITH LOOKUP ( Date, ([(1,190)-(366,250)],(1,240),(32,240),(152,220),(182,220),(244,225),(335,240),(365,\ 240) ))
Units: m
Limits: (None, None)
Type: component
"""
return functions.lookup(date(), [1, 32, 152, 182, 244, 335, 365],
[240, 240, 220, 220, 225, 240, 240]) | 72830cd13bb411afe67398750f33c75a3a5bfba3 | 3,652,000 |
def pre_process(dd, df, dataset_len, batch_size):
"""Partition one dataframe to multiple small dataframes based on a given batch size."""
df = dd.str2ascii(df, dataset_len)
prev_chunk_offset = 0
partitioned_dfs = []
while prev_chunk_offset < dataset_len:
curr_chunk_offset = prev_chunk_offset + batch_size
chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1]
partitioned_dfs.append(chunk)
prev_chunk_offset = curr_chunk_offset
return partitioned_dfs | a0a19916d60476430bdaf27f85f31620f2b5ae2a | 3,652,001 |
from datetime import datetime
import re
def fromisoformat(s):
"""
Hacky way to recover a datetime from an isoformat() string
Python 3.7 implements datetime.fromisoformat() which is the proper way
There are many other 3rd party modules out there, but should be good enough for testing
"""
return datetime(*map(int, re.findall('\d+', s))) | 7db362222f9da28f43eab5363336e0ca09b65960 | 3,652,002 |
def non_repeat(a, decimals=12):
"""
Функция возвращает матрицу А с различными строками.
"""
a = np.ascontiguousarray(a)
a = np.around(a, decimals = int(decimals))
_, index = np.unique(a.view([('', a.dtype)]*a.shape[1]), return_index=True)
index = sorted(index)
return a[index] | 312ce49fe275649c745ee22c79a08c0a2c1b798b | 3,652,003 |
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
is_batch = predictions.ndim == 2
probs = softmax(predictions)
loss = cross_entropy_loss(probs, target_index)
dprediction = probs
if is_batch:
batch_size = target_index.size
i = np.arange(batch_size)
dprediction[i, target_index] -= 1
dprediction /= batch_size
else:
dprediction[target_index] -= 1
return loss, dprediction | 9683da852dae92a5dec1f4353f4d93b2243fd30d | 3,652,004 |
import re
def scraper_main_olx(url):
""" Reads pages with offers from OLX and provides URLS to said offers. """
def __create_url_olx(offs_ids, prefix="https://www.olx.pl"):
""" Method creates an olx offer link from parts read from a main page. """
return [
"/".join([
prefix,
"oferta",
"CID3-ID" + o_id + ".html"
])
for o_id in offs_ids
]
# Loading the page
page = get_page(url)
# Reading the offers' ids
offers_ids = [
re.search("[^_]*$", off.attrib["class"]).group()[2:]
for off in page.element("table[id=offers_table] table[summary=Ogłoszenie]")
]
return {
"url": url,
"offers_urls": __create_url_olx(offers_ids)
} | 4f209dd800124c3b59db31029141e4d37f98e7d8 | 3,652,005 |
import sys
def process_sha1(dataset_infos):
"""Computes the SHA-1 hash of the datasets. Removes the datasets for which the SHA-1 hash is already in the database.
N.B.: a dataset for which the SHA-1 hash is not in the database represents a new dataset version.
:param datasets_infos: A list of DatasetInfos containing to path to the dataset needing a SHA-1 hash verification,
and the previous SHA-1 hashes.
:return: A list of DatasetInfos for which the SHA-1 hashes are not in the database.
"""
if not isinstance(dataset_infos, DatasetInfos):
raise TypeError("Datasets infos must be a valid DatasetInfos list.")
sha1_hash = sha1()
path_to_dataset = dataset_infos.zip_path
previous_sha1_hashes = dataset_infos.previous_sha1_hashes
try:
with open(path_to_dataset, "rb") as f:
while data := f.read(DATA_CHUNK_BYTE_SIZE):
sha1_hash.update(data)
except OSError:
pass
sha1_hash = sha1_hash.hexdigest()
if sha1_hash not in previous_sha1_hashes:
dataset_infos.sha1_hash = sha1_hash
else:
print(
f"SHA-1 hash {sha1_hash} already exists for {path_to_dataset}, dataset discarded\n",
file=sys.stderr,
)
return dataset_infos | 159d5b9b9a43c42b2b5c3fb5af2caa42728a6659 | 3,652,006 |
import torch
from typing import Tuple
from typing import List
def accuracy(
output: torch.Tensor,
target: torch.tensor,
topk: Tuple[int] = (
1,
)) -> List[float]:
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 73dd8a03e729fa89cea7abf535779dd45897113d | 3,652,007 |
def _make_unique(key, val):
"""
Make a tuple of key, value that is guaranteed hashable and should be unique per value
:param key: Key of tuple
:param val: Value of tuple
:return: Unique key tuple
"""
if type(val).__hash__ is None:
val = str(val)
return key, val | 65d746276f635c129aa0a5aeb9b9f467453c0b2a | 3,652,008 |
def replace_caps(x):
"""Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before."""
res = []
for t in x:
if t == '': continue
if t[0].isupper():
if len(t) == 1 and t[0] == 'I':
res.append(TK_MAJ)
if len(t) > 1 and (t[1:].islower() or (t[1] == "’" or t[1] == "'")):
res.append(TK_MAJ)
res.append(t.lower())
return res | 72519c264a97b60b05d430fc86dce1069e3718a7 | 3,652,009 |
def computes_ts_coverage(k, outputs, two_symbols):
""" Computes the input coverage by Two Symbol schematas.
Args:
k (int): the number of inputs.
outpus (list): the list of transition outputs.
two_symbols (list): The final list of Two Symbol permutable schematas. This is returned by `find_two_symbols`.
Returns:
coverage (dict): a dictionary of coverage where keys are inputs states and values are lists of the Two Symbols covering that input.
"""
ts_coverage = {}
for statenum in range(2**k):
binstate = statenum_to_binstate(statenum, base=k)
ts_coverage[binstate] = covering_twosymbols = []
output = int(outputs[statenum])
if output == 2:
output = [0, 1]
else:
output = [int(outputs[statenum])]
for t in output:
for implicant, permut_indxs, same_symbols_indxs in two_symbols[t]:
if __ts_covers(implicant, permut_indxs, binstate):
covering_twosymbols.append((implicant, permut_indxs, same_symbols_indxs))
#
return ts_coverage | 741718bb78ffc6840bb004eb80f096dc30d4df79 | 3,652,010 |
def create_measurements(nh, nv, offset, measurement_type):
"""Creates necessary measurement details for a given type on a given lattice.
Given the lattice size, whether odd or even pairs are being measured,
and the measurement type, this function returns a namedtuple
with the pairs of qubits to be measured, the circuit preparation
function and the measurement_type to be passed to the analysis
function.
The measurement_type can be:
"onsite", "horiz", "vert", "vert0", "vert1"
Args:
nh -- number of horizontal sites
nv -- number of vertical sites
offset -- offset taking care of odd vs even pairing
measurement_type -- onsite, horizontal or vertical measurement
Returns:
Measurements namedtuple with measurement
(pairs, preparation circuit, analysis type)
"""
n = nh * nv
if measurement_type == "onsite":
pairs = [(i, i+n) for i in range(n)]
prep = None
if measurement_type == "horiz":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, nh) for j in range(offset,nh-1,2)]
prep = prepH
if measurement_type == "vert":
pairst = [(i*nh+j, (i+1)*nh+j) for i in range(offset, nv-1, 2) for j in range(nh)]
pairst += [(i*nh+j+n, (i+1)*nh+j+n) for i in range(offset, nv-1, 2) for j in range(0, nh)]
pairs = [ (map_site_to_JW(nh, nv, site1), map_site_to_JW(nh, nv, site2)) for (site1, site2) in pairst]
prep = prepV
if measurement_type == "vert0":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV
if measurement_type == "vert1":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV2wrap(nh, nv)
print(f"Prepped {measurement_type}, pairs={pairs}")
return Measurements(pairs=pairs, prep=prep, analysis=measurement_type) | ff4dbe1ee49a0db41c30fc9ba8fc6ab94c314c48 | 3,652,011 |
def headline(
in_string,
surround = False,
width = 72,
nr_spaces = 2,
spacesym = ' ',
char = '=',
border = None,
uppercase = True,
):
"""return in_string capitalized, spaced and sandwiched:
============================== T E S T ===============================
Parameters are the following:
* char (one-letter string, default='='):
changes the character the title is put between.
* surround (boolean, default=False):
adds additional lines above and under in_string:
====================================================
==================== T E S T =====================
====================================================
* width (int, default=72):
defines the width of each line.
* nr_spaces (int, default=2):
defines number of nr_spaces between in_string and the
char as indicated in ..====__T I T L E__====.. .
* spacesym (one-letter string, default=' '):
instead of using a whitespace to seperate the 'title' letters,
one can use every other character, e.g. '_'.
* border (either string or list/tuple of two strings; defaults to char):
If this is a single character string, it will be used at the left
and right end of the headline.
If this is multiple character string, it will be used at the left
and mirrored at the right. This way you can easily introduce additional
space if you prefer and use, for example c style like inline comments
with border="/*".
If this is not enough for you, the left and right borders can be given
seperately, like in border=("<!--", "-->")
* uppercase (boolean, default=True):
if True, headline will capitalize the letters given by in_string.
if False, in_string will be used as it is given.
"""
if isinstance(border, tuple) or isinstance(border, list):
left_border = border[0]
right_border = border[1]
else:
if border is None:
border = char
left_border = border
right_border = border[::-1]
nr_sym_spaces = len(left_border + right_border)
headline_text = spacesym.join(
l.upper() if uppercase else l for l in in_string
)
headline_text_sandwiched = '{:{}^{}}'.format(
headline_text,
spacesym,
2 * (len(in_string) + nr_spaces) - 1
)
headline_without_sym = '{:{}^{}}'.format(
headline_text_sandwiched,
char,
width - nr_sym_spaces
)
headline_full = '{1}{0}{2}'.format(
headline_without_sym,
left_border,
right_border
)
if surround:
line = '{1}{0}{2}'.format(
(width - nr_sym_spaces) * char,
left_border,
right_border
)
output = line + '\n' + headline_full + '\n' + line
else:
output = headline_full
return output | 1848d91bbf6c9d2216338f35433a26bcd3854664 | 3,652,012 |
import itertools
import unicodedata
def rainbow_cmd(bot, trigger):
"""Make text colored. Options are "rainbow", "usa", "commie", and "spooky"."""
text = clean(trigger.group(2) or '')
scheme = trigger.group(1).lower()
if not text:
try:
msg = SCHEME_ERRORS[scheme]
except KeyError:
msg = "How did you do that?!"
bot.reply(msg)
return module.NOLIMIT
try:
colors = COLOR_SCHEMES[scheme]
except KeyError:
# not possible to reach this at time of writing, but who knows?
# mistakes happen when updating stuff that needs to be changed in parallel
bot.reply("I don't know what color sequence to use for '{}'!".format(scheme))
return module.NOLIMIT
color_cycle = itertools.cycle(colors)
bot.say(
''.join(
char if unicodedata.category(char) == 'Zs'
else formatting.color(char, next(color_cycle))
for char in text
)
) | 292e55511b40c3c265e7ba87164cf179e54c16a6 | 3,652,013 |
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='ignore', separator='&', cls=None):
"""Parse a querystring and return it as :class:`MultiDict`. Per default
only values are decoded into unicode strings. If `decode_keys` is set to
`True` the same will happen for keys.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string.
:param decode_keys: set to `True` if you want the keys to be decoded
as well.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result) | 2b5b9598639ef600900dd1cb50c8ec6de892feff | 3,652,014 |
from typing import Type
def special_loader(as_type: type) -> Type[FullLoader]:
"""Construct new loader class supporting current class structure"""
class TypedLoader(FullLoader): # pylint: disable=too-many-ancestors
"""Custom loader with typed resolver"""
...
_add_path_resolvers(as_type, TypedLoader) # we need to add resolver only to the root typed item
return TypedLoader | e60c96284334fc57cc32af557a86433bb5302526 | 3,652,015 |
def try_(func, *args, **kwargs):
"""Try to call a function and return `_default` if it fails
Note: be careful that in order to have a fallback, you can supply
the keyword argument `_default`. If you supply anything other
than a keyword arg, it will result in it being passed to the wrapped
function and could cause unexpected behavior including always failing
with default value of None.
"""
_default_val = kwargs.pop("_default", None)
try:
return func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return _default_val | 206b25bd2e345d9cd6423e2cbc2706c274f36c89 | 3,652,016 |
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name):
"""
Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions`
set to True (which is no longer the default value). Then 1) enrolls the user in that course,
2) creates a cohort that the user is placed in, and 3) adds the user to the given role.
Returns: a tuple of the created course and the created cohort
"""
cohort_course = CourseFactory.create(
cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True}
)
CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[user])
_assign_role_to_user(user=user, course_id=cohort_course.id, role=role_name)
return [cohort_course, cohort] | 6f55d10d4b1dfa27c067298862e89a558c5618a1 | 3,652,017 |
def relative_vorticity(
u, v, wrap=None, one_sided_at_boundary=False, radius=6371229.0, cyclic=None
):
"""Calculate the relative vorticity using centred finite
differences.
The relative vorticity of wind defined on a Cartesian domain (such
as a plane projection) is defined as
ζcartesian = δv/δx − δu/δy
where x and y are points on along the 'X' and 'Y' Cartesian
dimensions respectively; and u and v denote the 'X' and 'Y'
components of the horizontal winds.
If the wind field field is defined on a spherical
latitude-longitude domain then a correction factor is included:
ζspherical = δv/δx − δu/δy + (u/a)tan(ϕ)
where u and v denote the longitudinal and latitudinal components
of the horizontal wind field; a is the radius of the Earth; and ϕ
is the latitude at each point.
The relative vorticity is calculated using centred finite
differences (see the *one_sided_at_boundary* parameter).
The grid may be global or limited area. If missing values are
present then missing values will be returned at points where the
centred finite difference could not be calculated. The boundary
conditions may be cyclic in longitude. The non-cyclic boundaries
may either be filled with missing values or calculated with
off-centre finite differences.
Reference: H.B. Bluestein, Synoptic-Dynamic Meteorology in
Midlatitudes, 1992, Oxford Univ. Press p113-114
:Parameters:
u: `Field`
A field containing the x-wind. Must be on the same grid as
the y-wind.
v: `Field`
A field containing the y-wind. Must be on the same grid as
the x-wind.
radius: optional
The radius of the sphere when the winds are on a spherical
polar coordinate domain. May be any numeric scalar object
that can be converted to a `Data` object (which includes
numpy array and `Data` objects). By default *radius* has a
value of 6371229.0 metres, representing the Earth's
radius. If units are not specified then units of metres
are assumed.
*Parameter example:*
Five equivalent ways to set a radius of 6371200 metres:
``radius=6371200``, ``radius=numpy.array(6371200)``,
``radius=cf.Data(6371200)``, ``radius=cf.Data(6371200,
'm')``, ``radius=cf.Data(6371.2, 'km')``.
wrap: `bool`, optional
Whether the longitude is cyclic or not. By default this is
autodetected.
one_sided_at_boundary: `bool`, optional
If True then if the field is not cyclic off-centre finite
differences are calculated at the boundaries, otherwise
missing values are used at the boundaries.
:Returns:
`Field`
The relative vorticity calculated with centred finite
differences.
"""
if cyclic:
_DEPRECATION_ERROR_FUNCTION_KWARGS(
"relative_vorticity",
{"cyclic": cyclic},
"Use the 'wrap' keyword instead",
) # pragma: no cover
# Get the standard names of u and v
u_std_name = u.get_property("standard_name", None)
v_std_name = v.get_property("standard_name", None)
# Copy u and v
u = u.copy()
v = v.copy()
# Get the X and Y coordinates
(u_x_key, u_y_key), (u_x, u_y) = get_cartesian_coords(u, "u", ("X", "Y"))
(v_x_key, v_y_key), (v_x, v_y) = get_cartesian_coords(v, "v", ("X", "Y"))
if not u_x.equals(v_x) or not u_y.equals(v_y):
raise ValueError("u and v must be on the same grid.")
# Check for lat/long
is_latlong = (u_x.Units.islongitude and u_y.Units.islatitude) or (
u_x.units == "degrees" and u_y.units == "degrees"
)
# Check for cyclicity
if wrap is None:
if is_latlong:
wrap = u.iscyclic(u_x_key)
else:
wrap = False
# Find the relative vorticity
if is_latlong:
# Save the units of the X and Y coordinates
x_units = u_x.Units
y_units = u_y.Units
# Change the units of the lat/longs to radians
radians = Units("radians")
u_x.Units = radians
u_y.Units = radians
v_x.Units = radians
v_y.Units = radians
# Find cos and tan of latitude
cos_lat = u_y.cos()
tan_lat = u_y.tan()
# Reshape for broadcasting
u_shape = [1] * u.ndim
u_y_index = u.get_data_axes().index(u_y_key)
u_shape[u_y_index] = u_y.size
v_shape = [1] * v.ndim
v_y_index = v.get_data_axes().index(v_y_key)
v_shape[v_y_index] = v_y.size
# Calculate the correction term
corr = u.copy()
corr *= tan_lat.array.reshape(u_shape)
# Calculate the derivatives
v.derivative(
v_x_key,
wrap=wrap,
one_sided_at_boundary=one_sided_at_boundary,
inplace=True,
)
v.data /= cos_lat.array.reshape(v_shape)
u.derivative(
u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
radius = Data.asdata(radius).squeeze()
radius.dtype = float
if radius.size != 1:
raise ValueError(f"Multiple radii: radius={radius!r}")
if not radius.Units:
radius.override_units(Units("metres"), inplace=True)
elif not radius.Units.equivalent(Units("metres")):
raise ValueError(f"Invalid units for radius: {radius.Units!r}")
# Calculate the relative vorticity. Do v-(u-corr) rather than
# v-u+corr to be nice with coordinate reference corner cases.
rv = v - (u - corr)
rv.data /= radius
# Convert the units of latitude and longitude to canonical units
rv.dimension_coordinate("X").Units = x_units
rv.dimension_coordinate("Y").Units = y_units
else:
v.derivative(
v_x_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
u.derivative(
u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
rv = v - u
# Convert the units of relative vorticity to canonical units
rv.Units = Units("s-1")
# Set the standard name if appropriate and delete the long_name
if (u_std_name == "eastward_wind" and v_std_name == "northward_wind") or (
u_std_name == "x_wind" and v_std_name == "y_wind"
):
rv.standard_name = "atmosphere_relative_vorticity"
else:
rv.del_property("standard_name", None)
rv.del_property("long_name", None)
return rv | 6134a44594cd84174f44f00a57df2f7284c4a7e5 | 3,652,018 |
import torch_geometric
import torch
def coalesce(
edge_index: torch.Tensor,
edge_attr: _typing.Union[
torch.Tensor, _typing.Iterable[torch.Tensor], None
] = None,
num_nodes: _typing.Optional[int] = ...,
is_sorted: bool = False,
sort_by_row: bool = True
) -> _typing.Union[
torch.Tensor, _typing.Tuple[torch.Tensor, torch.Tensor],
_typing.Tuple[torch.Tensor, _typing.Iterable[torch.Tensor]]
]:
"""
Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are directly removed, instead of merged.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`Iterable[Tensor]]`)
"""
if not isinstance(num_nodes, int):
num_nodes = None
try:
return torch_geometric.utils.coalesce(
edge_index, edge_attr, num_nodes,
is_sorted=is_sorted,
sort_by_row=sort_by_row
)
except ModuleNotFoundError:
return __coalesce(
edge_index, edge_attr, num_nodes,
is_sorted=is_sorted,
sort_by_row=sort_by_row
) | 00006971c06fc599edb6b3ff12b2e0a7700dd136 | 3,652,019 |
def get_label_names(l_json):
"""
Get names of all the labels in given json
:param l_json: list of labels jsons
:type l_json: list
:returns: list of labels names
:rtype: list
"""
llist = []
for j in l_json:
llist.append(j['name'])
return llist | bab12bedc8b5001b94d6c5f02264b1ebf4ab0e99 | 3,652,020 |
from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
from ...niworkflows.interfaces.utility import KeySelect
from ...smriprep.workflows.outputs import _bids_relative
from ...niworkflows.interfaces.space import SpaceDataSource
def init_asl_derivatives_wf(
bids_root,
metadata,
output_dir,
spaces,
scorescrub=False,
basil=False,
name='asl_derivatives_wf',
):
"""
Set up a battery of datasinks to store derivatives in the right location.
Parameters
----------
bids_root : :obj:`str`
Original BIDS dataset path.
metadata : :obj:`dict`
Metadata dictionary associated to the ASL run.
output_dir : :obj:`str`
Where derivatives should be written out to.
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
This workflow's identifier (default: ``func_derivatives_wf``).
"""
nonstd_spaces = set(spaces.get_nonstandard())
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=[
'asl_mask_std', 'asl_mask_t1', 'asl_std',
'asl_std_ref', 'asl_t1', 'asl_t1_ref', 'asl_native', 'asl_native_ref',
'asl_mask_native','confounds', 'confounds_metadata', 'source_file',
'template', 'spatial_reference', 'cbf', 'meancbf', 'score', 'avgscore',
'scrub', 'basil', 'pv', 'cbf_t1', 'meancbf_t1', 'att_t1', 'score_t1', 'avgscore_t1',
'scrub_t1', 'basil_t1', 'pv_t1', 'cbf_std', 'meancbf_std', 'score_std',
'avgscore_std', 'scrub_std', 'basil_std', 'pv_std','att','att_std','qc_file',
'cbf_hvoxf', 'score_hvoxf', 'scrub_hvoxf', 'basil_hvoxf', 'pvc_hvoxf',
'cbf_sc207', 'score_sc207', 'scrub_sc207', 'basil_sc207', 'pvc_sc207',
'cbf_sc217', 'score_sc217', 'scrub_sc217', 'basil_sc217', 'pvc_sc217',
'cbf_sc407', 'score_sc407', 'scrub_sc407', 'basil_sc407', 'pvc_sc407',
'cbf_sc417', 'score_sc417', 'scrub_sc417', 'basil_sc417', 'pvc_sc417'
]),
name='inputnode')
raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources')
raw_sources.inputs.bids_root = bids_root
ds_confounds = pe.Node(DerivativesDataSink(
base_directory=output_dir, desc='confounds', suffix='regressors'),
name="ds_confounds", run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, raw_sources, [('source_file', 'in_files')]),
(inputnode, ds_confounds, [('source_file', 'source_file'),
('confounds', 'in_file'),
('confounds_metadata', 'meta_dict')]),
])
qcfile = pe.Node(
DerivativesDataSink(base_directory=output_dir,
desc='quality_control',
suffix='cbf', compress=False),
name='qcfile', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, qcfile, [('source_file', 'source_file'),
('qc_file', 'in_file')]),
])
cbf_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_cbf',
compress=False),
name='cbf_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_cbf', compress=False),
name='cbf_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_cbf', compress=False),
name='cbf_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_cbf',
compress=False),
name='cbf_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_cbf', compress=False),
name='cbf_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, cbf_hvoxf, [('source_file', 'source_file'),
('cbf_hvoxf', 'in_file')]),
(inputnode, cbf_sc207, [('source_file', 'source_file'),
('cbf_sc207', 'in_file')]),
(inputnode, cbf_sc217, [('source_file', 'source_file'),
('cbf_sc217', 'in_file')]),
(inputnode, cbf_sc407, [('source_file', 'source_file'),
('cbf_sc407', 'in_file')]),
(inputnode, cbf_sc417, [('source_file', 'source_file'),
('cbf_sc417', 'in_file')]),
])
if scorescrub:
score_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_score', compress=False),
name='score_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_scrub', compress=False),
name='scrub_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_score', compress=False),
name='score_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_scrub', compress=False),
name='scrub_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_score', compress=False),
name='score_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_scrub', compress=False),
name='scrub_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_score', compress=False),
name='score_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_scrub', compress=False),
name='scrub_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_score', compress=False),
name='score_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_scrub', compress=False),
name='scrub_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([ (inputnode, score_hvoxf, [('source_file', 'source_file'),
('score_hvoxf', 'in_file')]),
(inputnode, scrub_hvoxf, [('source_file', 'source_file'),
('scrub_hvoxf', 'in_file')]),
(inputnode, score_sc217, [('source_file', 'source_file'),
('score_sc217', 'in_file')]),
(inputnode, score_sc207, [('source_file', 'source_file'),
('score_sc207', 'in_file')]),
(inputnode, scrub_sc207, [('source_file', 'source_file'),
('scrub_sc207', 'in_file')]),
(inputnode, scrub_sc217, [('source_file', 'source_file'),
('scrub_sc217', 'in_file')]),
(inputnode, score_sc417, [('source_file', 'source_file'),
('score_sc417', 'in_file')]),
(inputnode, scrub_sc417, [('source_file', 'source_file'),
('scrub_sc417', 'in_file')]),
(inputnode, score_sc407, [('source_file', 'source_file'),
('score_sc407', 'in_file')]),
(inputnode, scrub_sc407, [('source_file', 'source_file'),
('scrub_sc407', 'in_file')]),
])
if basil:
basil_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_basil', compress=False),
name='basil_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_pvc',
compress=False),
name='pvc_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_basil', compress=False),
name='basil_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_pvc',
compress=False),
name='pvc_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_basil', compress=False),
name='basil_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_pvc', compress=False),
name='pvc_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_basil', compress=False),
name='basil_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_pvc',
compress=False),
name='pvc_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_basil', compress=False),
name='basil_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_pvc', compress=False),
name='pvc_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basil_hvoxf, [('source_file', 'source_file'),
('basil_hvoxf', 'in_file')]),
(inputnode, pvc_hvoxf, [('source_file', 'source_file'),
('pvc_hvoxf', 'in_file')]),
(inputnode, basil_sc207, [('source_file', 'source_file'),
('basil_sc207', 'in_file')]),
(inputnode, pvc_sc207, [('source_file', 'source_file'),
('pvc_sc207', 'in_file')]),
(inputnode, basil_sc217, [('source_file', 'source_file'),
('basil_sc217', 'in_file')]),
(inputnode, pvc_sc217, [('source_file', 'source_file'),
('pvc_sc217', 'in_file')]),
(inputnode, basil_sc407, [('source_file', 'source_file'),
('basil_sc407', 'in_file')]),
(inputnode, pvc_sc407, [('source_file', 'source_file'),
('pvc_sc217', 'in_file')]),
(inputnode, basil_sc417, [('source_file', 'source_file'),
('basil_sc417', 'in_file')]),
(inputnode, pvc_sc417, [('source_file', 'source_file'),
('pvc_sc417', 'in_file')]),
])
if nonstd_spaces.intersection(('func', 'run', 'asl','sbref')):
ds_asl_native = pe.Node(
DerivativesDataSink(
base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName')),
name='ds_asl_native', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_native_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True,
dismiss_entities=("echo",)),
name='ds_asl_native_ref', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_native = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_native', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True),
name='cbfnative', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True),
name='meancbfnative', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_native, [('source_file', 'source_file'),
('asl_native', 'in_file')]),
(inputnode, ds_asl_native_ref, [('source_file', 'source_file'),
('asl_native_ref', 'in_file')]),
(inputnode, ds_asl_mask_native, [('source_file', 'source_file'),
('asl_mask_native', 'in_file')]),
(inputnode, cbfnative, [('source_file', 'source_file'),
('cbf', 'in_file')]),
(inputnode, meancbfnative, [('source_file', 'source_file'),
('meancbf', 'in_file')]),
])
if scorescrub:
scorenative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
compress=True),
name='scorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorenative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf',
compress=True),
name='meanscorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
compress=True),
name='scrubnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorenative, [('source_file', 'source_file'),
('score', 'in_file')]),
(inputnode, meanscorenative, [('source_file', 'source_file'),
('avgscore', 'in_file')]),
(inputnode, scrubnative, [('source_file', 'source_file'),
('scrub', 'in_file')]),
])
if basil:
basilnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
compress=True),
name='basilnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
compress=True),
name='pvcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
compress=True),
name='attcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilnative, [('source_file', 'source_file'),
('basil', 'in_file')]),
(inputnode, pvnative, [('source_file', 'source_file'),
('pv', 'in_file')]),
(inputnode, attnative, [('source_file', 'source_file'),
('att', 'in_file')]),
(raw_sources, ds_asl_mask_native, [('out', 'RawSources')]),
])
# Resample to T1w space
if nonstd_spaces.intersection(('T1w', 'anat')):
ds_asl_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir, space='T1w', desc='preproc', compress=True,
SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'),
TaskName=metadata.get('TaskName'), dismiss_entities=("echo",)),
name='ds_asl_t1', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_t1_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, space='T1w', suffix='aslref',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_t1_ref', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_t1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, space='T1w', desc='brain',
suffix='mask', compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_t1', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', space='T1w',
compress=True),
name='cbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', space='T1w',
compress=True),
name='meancbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_t1, [('source_file', 'source_file'),
('asl_t1', 'in_file')]),
(inputnode, ds_asl_t1_ref, [('source_file', 'source_file'),
('asl_t1_ref', 'in_file')]),
(inputnode, ds_asl_mask_t1, [('source_file', 'source_file'),
('asl_mask_t1', 'in_file')]),
(inputnode, cbfnativet1, [('source_file', 'source_file'),
('cbf_t1', 'in_file')]),
(inputnode, meancbfnativet1, [('source_file', 'source_file'),
('meancbf_t1', 'in_file')]),
])
if scorescrub:
scorenativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
space='T1w', compress=True),
name='scorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorenativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', desc='score',
space='T1w', compress=True),
name='meanscorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
space='T1w', compress=True),
name='scrubnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorenativet1, [('source_file', 'source_file'),
('score_t1', 'in_file')]),
(inputnode, meanscorenativet1, [('source_file', 'source_file'),
('avgscore_t1', 'in_file')]),
(inputnode, scrubnativet1, [('source_file', 'source_file'),
('scrub_t1', 'in_file')]),
])
if basil:
basilnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
space='T1w', compress=True),
name='basilnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
space='T1w', compress=True),
name='pvcnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
space='T1w', compress=True),
name='attnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilnativet1, [('source_file', 'source_file'),
('basil_t1', 'in_file')]),
(inputnode, pvnativet1, [('source_file', 'source_file'),
('pv_t1', 'in_file')]),
(inputnode, attnativet1, [('source_file', 'source_file'),
('att_t1', 'in_file')]),
])
workflow.connect([
(raw_sources, ds_asl_mask_t1, [('out', 'RawSources')]),
])
if getattr(spaces, '_cached') is None:
return workflow
# Store resamplings in standard spaces when listed in --output-spaces
if spaces.cached.references:
spacesource = pe.Node(SpaceDataSource(),
name='spacesource', run_without_submitting=True)
spacesource.iterables = ('in_tuple', [
(s.fullname, s.spec) for s in spaces.cached.get_standard(dim=(3,))
])
out_names = ['template', 'asl_std', 'asl_std_ref', 'asl_mask_std',
'cbf_std', 'meancbf_std']
if scorescrub:
out_names = out_names + ['score_std', 'avgscore_std', 'scrub_std']
if basil:
out_names = out_names + ['basil_std', 'pv_std','att_std']
select_std = pe.Node(KeySelect(
fields=out_names),
name='select_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'),
dismiss_entities=("echo",)),
name='ds_asl_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_std_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True,
dismiss_entities=("echo",)),
name='ds_asl_std_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_std = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True),
name='cbfstd', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True),
name='meancbfstd', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_std, [('source_file', 'source_file')]),
(inputnode, ds_asl_std_ref, [('source_file', 'source_file')]),
(inputnode, ds_asl_mask_std, [('source_file', 'source_file')]),
(inputnode, cbfstd, [('source_file', 'source_file')]),
(inputnode, meancbfstd, [('source_file', 'source_file')]),
(inputnode, select_std, [('asl_std', 'asl_std'),
('asl_std_ref', 'asl_std_ref'),
('asl_mask_std', 'asl_mask_std'),
('cbf_std', 'cbf_std'),
('meancbf_std', 'meancbf_std'),
('template', 'template'),
('spatial_reference', 'keys')]),
(spacesource, select_std, [('uid', 'key')]),
(select_std, ds_asl_std, [('asl_std', 'in_file')]),
(spacesource, ds_asl_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_asl_std_ref, [('asl_std_ref', 'in_file')]),
(spacesource, ds_asl_std_ref, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_asl_mask_std, [('asl_mask_std', 'in_file')]),
(spacesource, ds_asl_mask_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, cbfstd, [('cbf_std', 'in_file')]),
(spacesource, cbfstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, meancbfstd, [('meancbf_std', 'in_file')]),
(spacesource, meancbfstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(raw_sources, ds_asl_mask_std, [('out', 'RawSources')]),
])
if scorescrub:
scorestd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
compress=True),
name='scorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorestd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf',
compress=True),
name='meanscorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
compress=True),
name='scrubstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorestd, [('source_file', 'source_file')]),
(inputnode, meanscorestd, [('source_file', 'source_file')]),
(inputnode, scrubstd, [('source_file', 'source_file')]),
(inputnode, select_std, [
('score_std', 'score_std'),
('avgscore_std', 'avgscore_std'),
('scrub_std', 'scrub_std')]),
(select_std, scorestd, [('score_std', 'in_file')]),
(spacesource, scorestd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, meanscorestd, [('avgscore_std', 'in_file')]),
(spacesource, meanscorestd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, scrubstd, [('scrub_std', 'in_file')]),
(spacesource, scrubstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
])
if basil:
basilstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
compress=True),
name='basilstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
compress=True),
name='pvcstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
compress=True),
name='attstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilstd, [('source_file', 'source_file')]),
(inputnode, pvstd, [('source_file', 'source_file')]),
(inputnode, attstd, [('source_file', 'source_file')]),
(inputnode, select_std, [
('basil_std', 'basil_std'),
('pv_std', 'pv_std'),
('att_std', 'att_std')]),
(select_std, basilstd, [('basil_std', 'in_file')]),
(spacesource, basilstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, pvstd, [('pv_std', 'in_file')]),
(spacesource, pvstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, attstd, [('att_std', 'in_file')]),
(spacesource, attstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
])
return workflow | c5a5425dd38fd1b451b41a687a44c8edbb3d24b0 | 3,652,021 |
def makehash(w=dict):
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(w) | 5c772c07de9231c40053b29c545e25b611dd3b6e | 3,652,022 |
def sample_parameters(kmodel,
tmodel,
individual,
param_sampler,
scaling_parameters,
only_stable=True,
):
"""
Run sampling on first order model
"""
solution_raw = individual.data.data
# Load fluxes and concentrations
fluxes = load_fluxes(solution_raw, tmodel, kmodel,
density=scaling_parameters.DENSITY,
ratio_gdw_gww=scaling_parameters.GDW_GWW_RATIO,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING,
time_scaling=scaling_parameters.TIME_SCALING)
concentrations = load_concentrations(solution_raw, tmodel, kmodel,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING)
# Fetch equilibrium constants
load_equilibrium_constants(solution_raw, tmodel, kmodel,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING,
in_place=True)
parameter_population_lam_mu,\
lamda_max, lamda_min = param_sampler.sample(kmodel,
fluxes,
concentrations,
only_stable = only_stable,
min_max_eigenvalues=True)
return parameter_population_lam_mu, lamda_max, lamda_min | cc48f170c58c090844dbbf0e72aa2bc9f2a1598b | 3,652,023 |
import yaml
def load_yaml(fpath):
""" load settings from a yaml file and return them as a dictionary """
with open(fpath, 'r') as f:
settings = yaml.load(f)
return settings | bd9c19407c39e190f2d7fd734d118dbb4e9378ab | 3,652,024 |
import statistics
def recommendation(agent, other_agent, resource_id, scale, logger, discovery, recency_limit):
"""
Get recommendations on other agent of third agents and average them to one recommendation value.
:param agent: The agent which calculates the popularity.
:type agent: str
:param other_agent: The other agent for which the popularity value is calculated.
:type other_agent: str
:param resource_id: The URI of the evaluated resource.
:type resource_id: str
:param scale: The Scale object to be used by the agent.
:type scale: Scale
:param logger: The logger object to be used by the agent.
:type logger: BasicLogger
:param discovery: Addresses of all agents within the scenario.
:type discovery: dict
:param recency_limit: A datetime object which is used for "forgetting" old history entries
:type recency_limit: datetime
:return: The Recommendation trust value.
:rtype: float or int
"""
agents_to_ask = []
for third_agent in discovery:
if third_agent != agent and third_agent != other_agent:
combined = get_combined_direct_experience_for_agent(
agent, third_agent, logger, recency_limit, scale)
if combined != None and combined >= scale.minimum_to_trust_others():
agents_to_ask.append(third_agent)
recommendations = ask_for_recommendations(
agent, resource_id, agents_to_ask, scale, logger, discovery, recency_limit)
return statistics.median(recommendations) if len(recommendations) > 0 else None | 0670ec3d388dc008f2c5315907fac11f80aa7ebe | 3,652,025 |
import time
import numpy
import pandas
import numpy.testing
import mhctools
def do_predictions_mhctools(work_item_dicts, constant_data=None):
"""
Each tuple of work items consists of:
(work_item_num, peptides, alleles)
"""
# This may run on the cluster in a way that misses all top level imports,
# so we have to re-import everything here.
if constant_data is None:
constant_data = GLOBAL_DATA
cols = constant_data['cols']
predictor_name = constant_data['args'].predictor
results = []
for (i, d) in enumerate(work_item_dicts):
work_item_num = d['work_item_num']
peptides = d['peptides']
alleles = d['alleles']
print("Processing work item", i + 1, "of", len(work_item_dicts))
result = {}
results.append((work_item_num, result))
if predictor_name == "netmhcpan4-ba":
predictor = mhctools.NetMHCpan4(
alleles=alleles,
program_name="netMHCpan-4.0",
mode="binding_affinity")
elif predictor_name == "netmhcpan4-el":
predictor = mhctools.NetMHCpan4(
alleles=alleles,
program_name="netMHCpan-4.0",
mode="elution_score")
elif predictor_name == "mixmhcpred":
# Empirically determine supported alleles.
mixmhcpred_usable_alleles = []
unusable_alleles = []
for allele in alleles:
predictor = mhctools.MixMHCpred(alleles=[allele])
# We use inf not nan to indicate unsupported alleles since
# we use nan to indicate incomplete results that still need
# to execute.
empty_results = pandas.Series(index=peptides,
dtype=numpy.float16)
empty_results[:] = float('-inf')
try:
predictor.predict_peptides_dataframe(["PEPTIDESS"])
mixmhcpred_usable_alleles.append(allele)
except ValueError:
unusable_alleles.append(allele)
for col in cols:
result["%s %s" % (allele, col)] = empty_results.values
print("MixMHCpred usable alleles: ", *mixmhcpred_usable_alleles)
print("MixMHCpred unusable alleles: ", *unusable_alleles)
predictor = mhctools.MixMHCpred(alleles=mixmhcpred_usable_alleles)
assert mixmhcpred_usable_alleles, mixmhcpred_usable_alleles
else:
raise ValueError("Unsupported", predictor_name)
start = time.time()
df = predictor.predict_peptides_dataframe(peptides)
print("Predicted for %d peptides x %d alleles in %0.2f sec." % (
len(peptides), len(alleles), (time.time() - start)))
for (allele, sub_df) in df.groupby("allele"):
for col in cols:
result["%s %s" % (allele, col)] = (
sub_df[col].values.astype(
constant_data['args'].result_dtype))
return results | c42270f3b31b984973e9e668902ac2018f38b25f | 3,652,026 |
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_epsilon=1e-3, **kwargs) | 7c11c147d01b6551fa1b65cb5d24497efc2a3d3b | 3,652,027 |
import sys
def convertStringToSysEncoding(strng):
"""
Convert a string to the current platform file system encoding.
Returns the new encoded string.
:Args:
strng: string
String to convert.
"""
if type(strng) not in [bytes_t, unicode_t]:
strng = strng.decode("utf-8")
strng = strng.encode(sys.getfilesystemencoding())
return strng | e7287b32577bdb0f832352e073982d1b074f1b07 | 3,652,028 |
def _n_pow_i(a, b, n):
"""
return (1+i)**k
"""
x = a
y = b
for i in range(1, n):
x1 = (x*a) - (y*b)
y1 = (y*a) + (x*b)
x = x1
y = y1
return x, y | 35b00c7bc76aaf19a5acdf012e63c9c0c50e5d1d | 3,652,029 |
def IsNameBased(link):
"""Finds whether the link is name based or not
:param str link:
:return:
True if link is name-based; otherwise, False.
:rtype: boolean
"""
if not link:
return False
# trimming the leading "/"
if link.startswith("/") and len(link) > 1:
link = link[1:]
# Splitting the link(separated by "/") into parts
parts = link.split("/")
# First part should be "dbs"
if not (parts and parts[0].lower() == "dbs"):
return False
# The second part is the database id(ResourceID or Name) and cannot be empty
if len(parts) < 2 or not parts[1]:
return False
# Either ResourceID or database name
databaseID = parts[1]
# Length of databaseID(in case of ResourceID) is always 8
if len(databaseID) != 8:
return True
return not IsValidBase64String(str(databaseID)) | e887fd6cd02c7ef71cbafa825014e1fca2c9d4d1 | 3,652,030 |
def register_submit(class_name, fire) -> None:
"""
Register on a form a handler
:param class_name: class name of the form
:param fire: function that will be fire on form submit
:return: None
"""
def submit_handler(event) -> None:
"""
Handle form submit and fire handler
:param event: Default html form object
:return: None
"""
event.preventDefault()
fire()
if window.jQuery('.' + class_name).length == 1:
return window.jQuery('.' + class_name).on('submit', submit_handler) | f2f8b2b067a282b073d6cc13825aedc3509c8077 | 3,652,031 |
from typing import Any
def compile(obj: Any) -> Definition:
"""Extract a definition from a JSON-like object representation."""
return ConcreteValue(obj) | 5e82471be599e77739e485468571bee296bfca71 | 3,652,032 |
def policy_network(vocab_embed_variable, document_placeholder, label_placeholder):
"""Build the policy core network.
Args:
vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK
document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.max_sent_length]
label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model.
Returns:
Outputs of sentence extractor and logits without softmax
"""
with tf.variable_scope('PolicyNetwork') as scope:
### Full Word embedding Lookup Variable
# PADDING embedding non-trainable
pad_embed_variable = variable_on_cpu("pad_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=False)
# UNK embedding trainable
unk_embed_variable = variable_on_cpu("unk_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=True)
# Get fullvocab_embed_variable
fullvocab_embed_variable = tf.concat(0, [pad_embed_variable, unk_embed_variable, vocab_embed_variable])
# print(fullvocab_embed_variable)
### Lookup layer
with tf.variable_scope('Lookup') as scope:
document_placeholder_flat = tf.reshape(document_placeholder, [-1])
document_word_embedding = tf.nn.embedding_lookup(fullvocab_embed_variable, document_placeholder_flat, name="Lookup")
document_word_embedding = tf.reshape(document_word_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length),
FLAGS.max_sent_length, FLAGS.wordembed_size])
# print(document_word_embedding)
### Convolution Layer
with tf.variable_scope('ConvLayer') as scope:
document_word_embedding = tf.reshape(document_word_embedding, [-1, FLAGS.max_sent_length, FLAGS.wordembed_size])
document_sent_embedding = conv1d_layer_sentence_representation(document_word_embedding) # [None, sentembed_size]
document_sent_embedding = tf.reshape(document_sent_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size])
# print(document_sent_embedding)
### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size]
with variable_scope.variable_scope("ReshapeDoc_TensorToList"):
document_sent_embedding = reshape_tensor2list(document_sent_embedding, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size)
# print(document_sent_embedding)
# document_sents_enc
document_sents_enc = document_sent_embedding[:FLAGS.max_doc_length]
if FLAGS.doc_encoder_reverse:
document_sents_enc = document_sents_enc[::-1]
# document_sents_ext
document_sents_ext = document_sent_embedding[:FLAGS.max_doc_length]
# document_sents_titimg
document_sents_titimg = document_sent_embedding[FLAGS.max_doc_length:]
### Document Encoder
with tf.variable_scope('DocEnc') as scope:
encoder_outputs, encoder_state = simple_rnn(document_sents_enc)
### Sentence Label Extractor
with tf.variable_scope('SentExt') as scope:
if (FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Multiple decoder
print("Multiple decoder is not implement yet.")
exit(0)
# # Decoder to attend captions
# attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state)
# # Attend previous decoder
# logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder)
elif (not FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Attend only titimages during decoding
extractor_output, logits = sentence_extractor_nonseqrnn_titimgatt(document_sents_ext, encoder_state, document_sents_titimg)
elif (FLAGS.attend_encoder) and (len(document_sents_titimg) == 0):
# JP model: attend encoder
extractor_outputs, logits = sentence_extractor_seqrnn_docatt(document_sents_ext, encoder_outputs, encoder_state, label_placeholder)
else:
# Attend nothing
extractor_output, logits = sentence_extractor_nonseqrnn_noatt(document_sents_ext, encoder_state)
# print(extractor_output)
# print(logits)
return extractor_output, logits | d59cf6d1d99fca7c654087d8fc720b64e419bced | 3,652,033 |
def get_feature(file_path: str):
""" Read and parse given feature file"""
print('Reading feature file ', file_path)
file_obj = open(file_path, "r")
steam = file_obj.read()
parser = Parser()
return parser.parse(TokenScanner(steam)) | e30e78afdb205aa2c26e3831ca7b0091579866a3 | 3,652,034 |
def hough_lines_draw(img, outfile, peaks, rhos, thetas):
"""
Returns the image with hough lines drawn.
Args
- img: Image on which lines will be drawn
- outfile: The output file. The file will be saved.
- peaks: peaks returned by hough_peaks
- rhos: array of rhos used in Hough Space
- thetas: array of thetas used in Hough Space
Returns
- img: after drwaing lines on it.
"""
for peak in peaks:
rho = rhos[peak[0]]
theta = thetas[peak[1]] * np.pi / 180.0
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite(outfile, img)
return img | f1731adb7d90a69dc50721c03f9e2ab01b7e2078 | 3,652,035 |
def cg_file_h(tmpdir):
"""Get render config."""
return {
'cg_file': str(tmpdir.join('muti_layer_test.hip'))
} | caedb2324953e4ca90ebffdf80be60fed1b8026d | 3,652,036 |
from ._groupbyuntil import group_by_until_
from typing import Optional
from typing import Callable
from typing import Any
def group_by_until(
key_mapper: Mapper[_T, _TKey],
element_mapper: Optional[Mapper[_T, _TValue]],
duration_mapper: Callable[[GroupedObservable[_TKey, _TValue]], Observable[Any]],
subject_mapper: Optional[Callable[[], Subject[_TValue]]] = None,
) -> Callable[[Observable[_T]], Observable[GroupedObservable[_TKey, _TValue]]]:
"""Groups the elements of an observable sequence according to a
specified key mapper function. A duration mapper function is used
to control the lifetime of groups. When a group expires, it
receives an OnCompleted notification. When a new element with the
same key value as a reclaimed group occurs, the group will be
reborn with a new lifetime request.
.. marble::
:alt: group_by_until
--1--2--a--3--b--c-|
[ group_by_until() ]
-+-----+-----------|
+a-----b--c-|
+1--2-----3-------|
Examples:
>>> group_by_until(lambda x: x.id, None, lambda : reactivex.never())
>>> group_by_until(
lambda x: x.id, lambda x: x.name, lambda grp: reactivex.never()
)
>>> group_by_until(
lambda x: x.id,
lambda x: x.name,
lambda grp: reactivex.never(),
lambda: ReplaySubject()
)
Args:
key_mapper: A function to extract the key for each element.
element_mapper: A function to map each source element to an element in
an observable group.
duration_mapper: A function to signal the expiration of a group.
subject_mapper: A function that returns a subject used to initiate
a grouped observable. Default mapper returns a Subject object.
Returns:
An operator function that takes an observable source and
returns a sequence of observable groups, each of which
corresponds to a unique key value, containing all elements that
share that same key value. If a group's lifetime expires, a new
group with the same key value can be created once an element
with such a key value is encountered.
"""
return group_by_until_(key_mapper, element_mapper, duration_mapper, subject_mapper) | c4f54140dadbd0d043400a35f9be9f978460ae3c | 3,652,037 |
def GetFilesystemSize(options, image_type, layout_filename, num):
"""Returns the filesystem size of a given partition for a given layout type.
If no filesystem size is specified, returns the partition size.
Args:
options: Flags passed to the script
image_type: Type of image eg base/test/dev/factory_install
layout_filename: Path to partition configuration file
num: Number of the partition you want to read from
Returns:
Size of selected partition filesystem in bytes
"""
partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
partition = GetPartitionByNumber(partitions, num)
if 'fs_bytes' in partition:
return partition['fs_bytes']
else:
return partition['bytes'] | 1ea542366a11f9a00b648b5158282a4b5e39f633 | 3,652,038 |
def match_pairs(obj_match, params):
""" Matches objects into pairs given a disparity matrix and removes
bad matches. Bad matches have a disparity greater than the maximum
threshold. """
# Create a list of sets, where the i-th set will store the objects
# from image1 that have merged with objects in image2
# Maybe faster to use a 2D array?
obj_merge = np.zeros(obj_match.shape, dtype=bool)
# Determine optimal pairs
pairs = optimize.linear_sum_assignment(obj_match)
for id1 in pairs[0]:
if obj_match[id1, pairs[1][id1]] > params['MAX_DISPARITY']:
# Set to -1 if object has died (or merged)
pairs[1][id1] = -1
# Find the closest object in image2 to object with id1
id2 = np.argmin(obj_match[id1])
# If this object was in the search radius of object id1,
# add object id1 to obj_merge[id2].
if obj_match[id1, id2] < LARGE_NUM:
obj_merge[id1, id2] = True
pairs = pairs[1] + 1 # ids in current_objects are 1-indexed
return pairs, obj_merge | 42939faca3cc2a61e8dde1b00818da593aa89c7a | 3,652,039 |
def spike_train_convolution(spike_times, interval, dt, sigma):
"""
Needed for Schreiber reliability measure
"""
N = int(np.floor((interval[1]-interval[0])/dt)+1)
x = np.linspace(interval[0], interval[1], N)
s = np.zeros(N)
for spike in spike_times:
s = s + gaussian(x, spike, sigma)
return s | 0dbd2ac6a3cc016ecb0ab7209256d1544b6acfd1 | 3,652,040 |
import os
def touch(file):
"""
update a file's access/modifications times
Attempts to update the access/modifications times on a file. If the file
does not exist, it will be created. This utility call operates in the same
fashion as the ``touch`` system command.
An example when using in the context of script helpers is as follows:
.. code-block:: python
if releng_touch('my-file'):
print('file was created')
else:
print('file was not created')
Args:
file: the file
Returns:
``True`` if the file was created/updated; ``False`` if the file could
not be created/updated
"""
try:
parent_dir = os.path.dirname(file)
if parent_dir and not os.path.isdir(parent_dir):
ensure_dir_exists(parent_dir)
with open(file, 'ab'):
os.utime(file, None)
return True
except OSError:
return False | 0e37f6e1924dc06a05ad54b9412ef6210a4f7feb | 3,652,041 |
def interpolate_peak(spectrum: list, peak: int) -> float:
""" Uses quadratic interpolation of spectral peaks to get a better estimate of the peak.
Args:
- spectrum: the frequency bin to analyze.
- peak: the location of the estimated peak in the spectrum list.
Based off: https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html
"""
prev_neighbour = spectrum[peak-1]
next_neighbour = spectrum[peak+1]
peak_value = spectrum[peak]
estimated_peak = (next_neighbour
- prev_neighbour) / (2 * peak_value - prev_neighbour - next_neighbour) + peak
return abs(estimated_peak) | 0e74057908e7839438325da9adafdf385012ce17 | 3,652,042 |
def _check_trunk_switchport(
dut, check, expd_status: SwitchportTrunkExpectation, msrd_status: dict
) -> tr.CheckResultsCollection:
"""
This function validates a trunk switchport against the expected values.
These checks include matching on the native-vlan and trunk-allowed-vlans.
"""
results = list()
device = dut.device
e_nvl_id = expd_status.native_vlan.vlan_id if expd_status.native_vlan else None
m_nvl_id = msrd_status["trunkingNativeVlanId"]
if e_nvl_id and (e_nvl_id != m_nvl_id):
results.append(
tr.CheckFailFieldMismatch(
device=device,
check=check,
field="native_vlan",
expected=e_nvl_id,
measurement=m_nvl_id,
)
)
# EOS stores this as a CSV string, with ranges, for example:
# 14,16,25-26,29
e_tr_allowed_vids = sorted(
[vlan.vlan_id for vlan in expd_status.trunk_allowed_vlans]
)
# conver the list of vlan-ids to a range string for string comparison
# purposes.
e_tr_alwd_vstr = range_string(e_tr_allowed_vids)
m_tr_alwd_vstr = msrd_status["trunkAllowedVlans"]
# if there no expected allowed vlans on this trunk, then set the expected
# value to "NONE" since that is what EOS reports in this case.
if not e_tr_alwd_vstr:
e_tr_alwd_vstr = "NONE"
if e_tr_alwd_vstr != m_tr_alwd_vstr:
results.append(
tr.CheckFailFieldMismatch(
device=device,
check=check,
field="trunk_allowed_vlans",
expected=e_tr_alwd_vstr,
measurement=m_tr_alwd_vstr,
)
)
return results | a739ae5897c4627ea78d27a07f831e528318f052 | 3,652,043 |
def is_valid_compressed(file):
"""Check tar gz or zip is valid."""
try:
archive = ZipFile(file, 'r')
try:
corrupt = archive.testzip()
except zlib_error:
corrupt = True
archive.close()
except BadZipfile:
corrupt = True
return not corrupt | 261a4fcdfa1117aa749b00805e323f21a04d0f57 | 3,652,044 |
def Krsol_SP_pt(SP,pt):
"""
Krsol_SP_pt solubility of Kr in seawater
==========================================================================
USAGE:
Krsol = sol.Krsol_SP_pt(SP,pt)
DESCRIPTION:
Calculates the krypton, Kr, concentration expected at equilibrium with
air at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar)
including saturated water vapor. This function uses the solubility
coefficients derived from the data of Weiss (1971).
Note that this algorithm has not been approved by IOC and is not work
from SCOR/IAPSO Working Group 127. It is included in the GSW
Oceanographic Toolbox as it seems to be oceanographic best practice.
INPUT:
SP = Practical Salinity (PSS-78) [ unitless ]
pt = potential temperature (ITS-90) referenced [ deg C ]
to one standard atmosphere (0 dbar).
SP & pt need to have the same dimensions.
OUTPUT:
Krsol = solubility of krypton in micro-moles per kg [ umol/kg ]
AUTHOR: Roberta Hamme, Paul Barker and Trevor McDougall
[ [email protected] ]
REFERENCES:
IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of
seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. Available from http://www.TEOS-10.org
Weiss, R.F. and T.K. Kyser, 1978: Solubility of Krypton in Water and
Seawater. J. Chem. Thermodynamics, 23, 69-72.
The software is available from http://www.TEOS-10.org
==========================================================================
"""
x = SP # Note that salinity argument is Practical Salinity, this is
# beacuse the major ionic components of seawater related to Cl
# are what affect the solubility of non-electrolytes in seawater.
pt68 = pt * 1.00024 # pt68 is the potential temperature in degress C on
# the 1968 International Practical Temperature Scale IPTS-68.
y = pt68 + K0
y_100 = y * 1e-2
# Table 2 (Weiss and Kyser, 1978)
a = (-112.6840, 153.5817, 74.4690, -10.0189)
b = (-0.011213, -0.001844, 0.0011201)
Krsol_mL = np.exp(a[0] + a[1] * 100/y + a[2] * np.log(y_100) + a[3] * \
y_100 + x * (b[0] + y_100 * (b[1] + b[2] * y_100)))
# mL/kg to umol/kg for Kr (1/22.3511e-3)
#Molar volume at STP (Dymond and Smith, 1980).
Krsol = Krsol_mL * 4.474052731185490e1
return Krsol | 3402fdf5756ca9a54938211e67a57de1326bcc7f | 3,652,045 |
def get_node_element(tree_element, tag, key=None):
"""
FIXME: This is an ugly function that should be refactored. It wqs written to create the same
function for getting either an attribute or a subelement for an element.
:param tree_element: Element object from the ElementTree package
:param tag: subelement of the tree_element
:param key: key for value to be returned from the 'branch' subelements
:return: either text from the element or value from the attribute
"""
if key:
branch_elements = tree_element.findall('branch')
for each_element in branch_elements:
if key in each_element.attrib.keys():
return each_element.attrib[key]
raise BranchElementError(tree_element.find('name').text, key)
if tree_element.find(tag) is not None:
text = tree_element.find(tag).text
return text
else:
raise NoAttributeTypeName(tree_element.find('name').text, tag) | 2fdd641575a9928aa53389a9bf644698b97cbee8 | 3,652,046 |
def find_title(item):
"""Title of the video"""
title = item['snippet']['title']
return title | 9c6f64e02d959d46cfd1e4536f5faf7ec0c281bd | 3,652,047 |
import hashlib
def calc_fingerprint(text):
"""Return a hex string that fingerprints `text`."""
return hashlib.sha1(text).hexdigest() | 8be154e4e32ae9412a73e73397f0e0198ae9c862 | 3,652,048 |
from typing import List
from typing import Any
from typing import Tuple
import torch
def yolo_collate_fn(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values for Yolo model
input.
:param batch: a batch of data points and annotations transformed by
bounding_box_and_labels_to_yolo_fmt
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
targets = []
annotations = []
for idx, (image, (target, annotation)) in enumerate(batch):
images.append(image.unsqueeze(0))
img_label = torch.ones(target.size(0), 1) * idx
targets.append(torch.cat((img_label, target), 1))
annotations.append(annotation)
images = torch.cat(images, 0)
targets = torch.cat(targets, 0)
return images, (targets, annotations) | 599d4e9bbb91cf6d79225024cbcd9690cb55f8e6 | 3,652,049 |
def delete_category(category_id):
"""Delete a category."""
category = session.query(Category).filter_by(id=category_id).first()
if 'username' not in login_session:
flash("Please log in to continue.")
return redirect(url_for('login'))
if not exists_category(category_id):
flash("We are unable to process your request right now.")
return redirect(url_for('home'))
# If the logged in user does not have authorisation to
# edit the category, redirect to homepage.
if login_session['user_id'] != category.user_id:
flash("We are unable to process your request right now.")
return redirect(url_for('home'))
if request.method == 'POST':
session.delete(category)
session.commit()
flash("Category successfully deleted!")
return redirect(url_for('home'))
else:
return render_template("delete_category.html", category=category) | 979aabe5b6d7730c9f75a714266d6aad61e1cd41 | 3,652,050 |
from typing import List
def get_all_users_of(fx_module: GraphModule, index: int) -> List[int]:
"""Given the graph(fx_module) and an index, return a list of all node indexes that use this node"""
graph = fx_module.graph
current_node = graph.nodes[index]
user_indexes: List[int] = []
"""if the node A is in node B's args, then B is the user of A
go through all the nodes, if the input node in any node's args,
then that node is the input node's user
"""
for i, n in enumerate(graph.nodes):
if find_use(n.args, current_node) or find_use(n.kwargs, current_node):
user_indexes.append(i)
return user_indexes | e3fc32aa7baf549bbfe4a2fb7558aa7bfb3d84b0 | 3,652,051 |
from operator import and_
def insert_from(
table_name, into_table_name, column_names=None, join_columns=None, create_if_not_exists=False, engine=None
):
"""
Inserts records from one table into another
:param table_name: the name of the table from which to insert records
:param into_table_name: the name of the table into which the records will go
:param column_names: an optional reduced list of column names to specify for insertion
:param join_columns: one or more column names that constitute unique records, not to be inserted
:param create_if_not_exists: if True, create into_table_name if it doesn't exist, otherwise exit with warning
:param engine: an optional sqlalchemy.engine to use in the UPDATE query
"""
both_tables = get_tables(engine=engine)
from_table = both_tables.get(table_name)
into_table = both_tables.get(into_table_name)
validate_table_name(from_table, table_name)
if not table_exists(into_table):
if not create_if_not_exists:
raise ValueError(f"No table named {into_table_name} to insert into")
return select_from(table_name, into_table_name, column_names, engine=engine)
# Validate parameters for excluding unique records
if isinstance(join_columns, str):
join_columns = [c.strip() for c in join_columns.split(",")]
if join_columns:
validate_columns_in(
from_table, join_columns,
empty_table=table_name,
message=f"Join columns missing in source table {table_name}"
)
validate_columns_in(
into_table, join_columns,
empty_table=into_table_name,
message=f"Join columns missing in target table {into_table_name}"
)
# Prepare column names to be inserted
log_message = f"insert_from: populating {into_table_name} from {table_name}"
from_cols = from_table.columns
into_cols = into_table.columns
if isinstance(column_names, str):
column_names = column_names.split(",")
if column_names is None or "*" in column_names:
log_message += f", with all columns in {table_name}"
insert_cols = from_cols
else:
log_message += f", with specified columns in {table_name}"
insert_cols = [c for c in from_cols if c.name in column_names]
if not insert_cols:
logger.warning("insert_from: no columns to insert")
return
elif column_names and len(column_names) > len(insert_cols):
target_cols = set(c.name for c in insert_cols)
ignore_cols = ", ".join(set(column_names).difference(target_cols))
logger.warning(f"insert_from: ignoring columns: {ignore_cols}")
# Prepare query with specified columns and filtering
if not join_columns:
insert_vals = Select(insert_cols).select_from(from_table)
else:
log_message += f", excluding those matching: {join_columns}"
# Exclude records matching specified columns via outer join
insert_from = from_table.outerjoin(
into_table, and_(*[from_cols[col] == into_cols[col] for col in join_columns])
)
insert_vals = (
Select(insert_cols)
.select_from(insert_from)
.where(and_(*[into_cols[col].is_(None) for col in join_columns]))
)
logger.info(log_message)
insert_from = Insert(into_table).from_select(names=[c.name for c in insert_cols], select=insert_vals)
with from_table.bind.connect() as conn:
conn.execute(insert_from.execution_options(autocommit=True)) | 8c013bdaeb1c16e1a487c4a90c0554e9b673f4d9 | 3,652,052 |
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format | bd526cab85bd8909904af0c6e32b22d29c1de561 | 3,652,053 |
def array_to_mincvolume(filename, array, like,
volumeType=None, dtype=None, labels=None,
write=True, close=False):
"""
Create a mincVolume from a data array.
Create a mincVolume from a data array, using coordinate system information from another volume.
Parameters
----------
filname : str
A path to the new MINC volume.
array : array_like
Input array to convert to mincVolume.
like : mincVolume or str
Either an existing mincVolume object, or a path to one on disk.
volumeType : str, optional
MINC type. The default is None.
If no value is given (default), then volumeType will be set as ushort if the dtype
is a subtype of np.integer, otherwise volumeType will be set as double.
dtype : np.dtype, optional
Datatype for the mincVolume data array. The default is None.
If no value is given (default), the dtype of array is used.
labels : bool, optional
Does the output mincVolume represent integer labels? The default is None.
If no value is given (default), then labels will be set as True if the dtype
is a subtype of np.integer, otherwise labels will be set as False.
write : bool, optional
Should the mincVolume be written to disk? Default is True.
close : bool, optional
Should the mincVolume be closed? Default is False.
Returns
-------
outvol : mincVolume
An object of mincVolume type.
"""
if dtype is None:
dtype = array.dtype
if labels is None:
if np.issubdtype(array.dtype, np.integer):
labels = True
else:
labels = False
if volumeType is None:
if np.issubdtype(array.dtype, np.integer):
volumeType='ushort'
else:
volumeType='double'
if like.__class__ == mincVolume:
outvol = volumeFromData(outputFilename=filename,
data=array,
dimnames=like.getDimensionNames(),
starts=like.getStarts(),
steps=like.getSeparations(),
volumeType=volumeType,
dtype=dtype,
labels=labels,
x_dir_cosines=[i for i in like._x_direction_cosines],
y_dir_cosines=[i for i in like._y_direction_cosines],
z_dir_cosines=[i for i in like._z_direction_cosines],
)
# Set dimnames and starts
outvol.starts = like.getStarts()
outvol.dimnames = like.getDimensionNames()
else:
outvol = volumeLikeFile(likeFilename=like, outputFilename=filename,
dtype=dtype, volumeType=volumeType, labels=labels)
outvol.data = array
# Finish
if write:
outvol.writeFile()
if close:
outvol.closeVolume()
return(outvol) | 16074668c1143091322969a501b23203378ca169 | 3,652,054 |
import random
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
] | a5789a090ff7ab88b5cf6cbf4ad8e0943ea9ccdf | 3,652,055 |
from typing import Optional
def get_spot_market_price(facility: Optional[str] = None,
plan: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpotMarketPriceResult:
"""
Use this data source to get Packet Spot Market Price.
## Example Usage
```python
import pulumi
import pulumi_packet as packet
example = packet.get_spot_market_price(facility="ewr1",
plan="c1.small.x86")
```
:param str facility: Name of the facility.
:param str plan: Name of the plan.
"""
__args__ = dict()
__args__['facility'] = facility
__args__['plan'] = plan
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('packet:index/getSpotMarketPrice:getSpotMarketPrice', __args__, opts=opts, typ=GetSpotMarketPriceResult).value
return AwaitableGetSpotMarketPriceResult(
facility=__ret__.facility,
id=__ret__.id,
plan=__ret__.plan,
price=__ret__.price) | 912f07ced8a12ba4df7992c8cbba2576673a893f | 3,652,056 |
def _get_cluster_id(emr: boto3.client("emr"), clusterName: str) -> str:
"""
Returns the id of a running cluster with given cluster name.
"""
clusters = emr.list_clusters()["Clusters"]
# choose the correct cluster
clusters = [c for c in clusters if c["Name"] == clusterName and c["Status"]["State"] in ["WAITING", "RUNNING"]]
if not clusters:
logger.info("No valid clusters")
raise Exception("cannot find running cluster: " + clusterName)
# take the first relevant cluster
return clusters[0]["Id"] | 9fba31d05157411d8fddde7502e174433859898f | 3,652,057 |
def seed_student(request, i):
"""Returns the properties for a new student entity.
"""
gsoc2009 = Program.get_by_key_name('google/gsoc2009')
user = User.get_by_key_name('user_%d' % i)
if not gsoc2009:
raise Error('Run seed_db first')
if not user:
raise Error('Run seed_many for at least %d users first.' % i)
properties = {
'key_name':'google/gsoc2009/student_%d' % i,
'link_id': 'student_%d' % i,
'scope_path': 'google/gsoc2009',
'scope': gsoc2009,
'user' : user,
'given_name': 'Student %d' % i,
'surname': 'Last Name',
'name_on_documents': 'Test Example',
'email': '[email protected]',
'res_street': 'Some Street',
'res_city': 'Some City',
'res_state': 'Some State',
'res_country': 'United States',
'res_postalcode': '12345',
'phone': '1-555-BANANA',
'birth_date': db.DateProperty.now(),
'agreed_to_tos': True,
'school_name': 'School %d' % i,
'school_country': 'United States',
'major': 'Computer Science',
'degree': 'Undergraduate',
'expected_graduation': 2012,
'program_knowledge': 'Knowledge %d' % i,
'school': None,
'can_we_contact_you': True,
}
return properties | 01f1923b4d1e5af74c6bbad2649f04be62f29c6f | 3,652,058 |
from typing import List
def apply(effect: List[float], signal: List[float]):
"""Given effect interpolated to length of given signal.
Args:
effect: effect to interpolate to signal length.
signal: length of which effect is interpolated to.
"""
max_len = max(len(effect), len(signal))
# Signal indices to effect indices.
i = interp1d(
np.linspace(0, len(signal) - 1, max_len),
np.linspace(0, len(effect) - 1, max_len),
)(np.arange(len(signal)))
# print(
# f"i[0:10] = {i[0:10]}, np.arange(len(effect))[0:10] = {np.arange(len(effect))[0:10]}, effect[0:10] = {effect[0:10]}"
# )
# Effect indices to effect.
return interp1d(np.arange(len(effect)), effect)(i) | 11bd4938c997cbef445493274fa3ee7447f1821e | 3,652,059 |
def cli_cosmosdb_sql_trigger_update(client,
resource_group_name,
account_name,
database_name,
container_name,
trigger_name,
trigger_body=None,
trigger_type=None,
trigger_operation=None):
"""Updates an Azure Cosmos DB SQL trigger """
logger.debug('reading SQL trigger')
sql_trigger = client.get_sql_trigger(resource_group_name, account_name, database_name, container_name, trigger_name)
sql_trigger_resource = SqlTriggerResource(id=trigger_name)
sql_trigger_resource.body = sql_trigger.resource.body
sql_trigger_resource.trigger_operation = sql_trigger.resource.trigger_operation
sql_trigger_resource.trigger_type = sql_trigger.resource.trigger_type
if _populate_sql_trigger_definition(sql_trigger_resource,
trigger_body,
trigger_operation,
trigger_type):
logger.debug('replacing SQL trigger')
sql_trigger_create_update_resource = SqlTriggerCreateUpdateParameters(
resource=sql_trigger_resource,
options={})
return client.create_update_sql_trigger(resource_group_name,
account_name,
database_name,
container_name,
trigger_name,
sql_trigger_create_update_resource) | bee6e060aecde084c9690eaeb316bdac7ae12b31 | 3,652,060 |
from typing import List
def evaluate_features(features: np.ndarray, labels: np.ndarray, train_frac: float = 0.8) -> List[int]:
"""
Evaluates the marginal impact of each feature in the given array (by retraining).
Args:
features: A [N, T, D] array of input features for each sequence element
labels: A [N] array of labels per instance
Returns:
An (ordered) list of feature indices
"""
# For feasibility purposes, we start with the first feature
result: List[int] = [0]
remaining_idx = list(range(1, features.shape[1]))
split_point = int(features.shape[0] * train_frac)
train_features = features[0:split_point, :, :]
test_features = features[split_point:, :, :]
train_labels = labels[0:split_point]
test_labels = labels[split_point:]
train_samples = train_features.shape[0]
test_samples = test_features.shape[0]
while len(remaining_idx) > 0:
best_accuracy = 0.0
best_idx = None
for feature_idx in remaining_idx:
feature_indices = result + [feature_idx]
X_train = train_features[:, feature_indices, :].reshape(train_samples, -1)
X_test = test_features[:, feature_indices, :].reshape(test_samples, -1)
clf = LogisticRegression(max_iter=500)
clf.fit(X_train, train_labels)
accuracy = clf.score(X_test, test_labels)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_idx = feature_idx
result.append(best_idx)
remaining_idx.pop(remaining_idx.index(best_idx))
print(best_accuracy)
print(result)
return result | 88b9d7cab4723934f16ab59c43e41d5a4140daa5 | 3,652,061 |
import six
def pad_for_tpu(shapes_dict, hparams, max_length):
"""Pads unknown features' dimensions for TPU."""
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes | b72e1463fad9740c8a265b795c4b3c5a45e42a9a | 3,652,062 |
from typing import Union
from typing import Tuple
from typing import List
def _get_child_query_node_and_out_name(
ast: Union[FieldNode, InlineFragmentNode],
child_type_name: str,
child_field_name: str,
name_assigner: IntermediateOutNameAssigner,
) -> Tuple[SubQueryNode, str]:
"""Create a query node out of ast, return node and unique out_name on field with input name.
Create a new document out of the input AST, that has the same structure as the input. For
instance, if the input AST can be represented by
out_Human {
name
}
where out_Human is a vertex field going to type Human, the resulting document will be
{
Human {
name
}
}
If the input AST starts with a type coercion, the resulting document will start with the
coerced type, rather than the original union or interface type.
The output child_node will be wrapped around this new DocumentNode. In addition, if no field
of child_field_name currently exists, such a field will be added. If there is no @output
directive on this field, a new @output directive will be added.
Args:
ast: Representing the AST that we're using to build a child node.
It is not modified by this function.
child_type_name: Name of the type to which this cross schema field leads.
child_field_name: str. If no field of this name currently exists as a part of the root
selections of the input AST, a new field will be created in the AST
contained in the output child query node
name_assigner: Object used to generate and keep track of names of newly created
@output directives.
Returns:
Tuple containing:
- The child sub query node wrapping around the input AST.
- The out_name of the @output directive uniquely identifying the field used for
stitching in this sub query node.
"""
# Get type and selections of child AST, taking into account type coercions
child_selection_set = ast.selection_set
if child_selection_set is None:
raise AssertionError("Invalid AST. child_selection_set cannot be None.")
type_coercion = try_get_inline_fragment(child_selection_set.selections)
if type_coercion is not None:
child_type_name = type_coercion.type_condition.name.value
child_selection_set = type_coercion.selection_set
child_selections: List[SelectionNode] = []
for child_selection in child_selection_set.selections:
if not isinstance(child_selection, FieldNode):
raise AssertionError(
"Expected child_selection to be of type FieldNode, but was of "
f"type {type(child_selection)}."
)
child_selections.append(child_selection)
# Get existing field with name in child
existing_child_property_field = try_get_ast_by_name_and_type(
child_selections, child_field_name, FieldNode
)
# Validate that existing_child_property_field is None or FieldNode.
# It should be impossible for this to *not* be the case, but check so that mypy is happy.
if not (
existing_child_property_field is None
or isinstance(existing_child_property_field, FieldNode)
):
raise AssertionError(
"Unreachable code reached! existing_child_property_field should be None or of type "
f"FieldNode, but was type {type(existing_child_property_field)}."
)
child_property_field = _get_property_field(
existing_child_property_field, child_field_name, None
)
# Add @output if needed, record out_name
child_property_field, child_output_name = _get_out_name_optionally_add_output(
child_property_field, name_assigner
)
# Get new child_selections by replacing or adding in new property field
child_property_fields_map, child_vertex_fields = _split_selections_property_and_vertex(
child_selections
)
child_property_fields_map[child_field_name] = child_property_field
child_selections = _get_selections_from_property_and_vertex_fields(
child_property_fields_map, child_vertex_fields
)
# Wrap around
# NOTE: if child_type_name does not actually exist as a root field (not all types are
# required to have a corresponding root vertex field), then this query will be invalid.
child_query_ast = _get_query_document(child_type_name, child_selections)
child_query_node = SubQueryNode(child_query_ast)
return child_query_node, child_output_name | c99e2a1aa7ea56600203e1550dca6a0a59eed094 | 3,652,063 |
def has_balanced_parens(exp: str) -> bool:
"""
Checks if the parentheses in the given expression `exp` are balanced,
that is, if each opening parenthesis is matched by a corresponding
closing parenthesis.
**Example:**
::
>>> has_balanced_parens("(((a * b) + c)")
False
:param exp: The expression to check.
:return: `True` if the parentheses are balanced, `False` otherwise.
"""
# Use a stack to determine if the expression is balanced.
# Ref: https://youtu.be/HJOnJU77EUs?t=75 [1:15 - 2:47]
paren_stack = []
for e in exp:
if e == '(':
paren_stack.append(e)
elif e == ')':
try:
paren_stack.pop()
except IndexError:
return False
return len(paren_stack) == 0 | f76c7cafcf6aadd0c2cb947f0c49d23835a9f6e4 | 3,652,064 |
def _is_binary(c):
"""Ensures character is a binary digit."""
return c in '01' | b763a5a8ba591b100fea64a589dcb0aea9fbcf53 | 3,652,065 |
import os
import re
def get_interface_ib_name(hosts, interface, verbose=True):
"""Get the InfiniBand name of this network interface on each host.
Args:
hosts (NodeSet): hosts on which to detect the InfiniBand name
interface (str): interface for which to obtain the InfiniBand name
verbose (bool, optional): display command details. Defaults to True.
Returns:
dict: a dictionary of InfiniBand name keys and NodeSet values on which they were detected
"""
net_path = os.path.join(os.path.sep, "sys", "class", "net")
command = f"ls -1 {os.path.join(net_path, interface, 'device', 'infiniband')}"
task = run_task(hosts, command, verbose=verbose)
if verbose:
display_task(task)
# Populate a dictionary of IB names with a NodSet of hosts on which it was detected
ib_names = {}
results = dict(task.iter_retcodes())
if 0 in results:
for output, nodelist in task.iter_buffers(results[0]):
ib_name_list = []
for line in output:
match = re.findall(r"([A-Za-z0-9;_+]+)", line.decode("utf-8"))
if len(match) == 1:
ib_name_list.append(match[0])
if ib_name_list:
ib_names[",".join(ib_name_list)] = NodeSet.fromlist(nodelist)
return ib_names | 41aa431fda790ddfe427363163107f50d60f20e0 | 3,652,066 |
import os
import sys
def _get_script(args_file):
"""compiled contents of script or error out"""
DEFAULT_SCRIPT = 'build.jfdi'
script_path = None
if args_file != None:
script_path = args_file
elif os.path.exists(DEFAULT_SCRIPT):
script_path = DEFAULT_SCRIPT
script_path = None
if os.path.exists(DEFAULT_SCRIPT):
script_path = DEFAULT_SCRIPT
if args_file != None:
script_path = args_file
if script_path == None or not os.path.exists(script_path):
fatal_msg = "Build file not found\n"
fatal_msg += "\nIf this is your first run, use %s init\n" \
% sys.argv[0]
fatal_msg += "%s --help for detailed help.\n\n" \
% sys.argv[0]
_fatal_error(fatal_msg)
with open(script_path) as f:
script = f.read()
try:
pycode = compile(script, script_path, mode='exec')
except SyntaxError as ex:
msg = "SyntaxError in (%s, line %d):\n\t%s\n" \
% (ex.filename, ex.lineno, ex.text)
_fatal_error(msg)
return pycode | 26defa7db2166cb379575463e8e1e2dddae8b6b1 | 3,652,067 |
from ml import cv
import logging
from pathlib import Path
import sys
def render(img,
result,
classes=None,
score_thr=None,
show=True,
wait_time=0,
path=None):
"""Visualize the detection on the image and optionally save to a file.
Args:
img(BGR): CV2 BGR.
result(Tensor[K, 6] or List[(tid, Tensor[6]))+]): detection result in xyxysc
classes(list[str] or tuple[str]): A list of trained class names
score_thr(float): The threshold to visualize the bboxes and masks.
tracking(bool): Whether the results are tracking
wait_time (int): Value of waitKey param for display
path(str, optional): path to save the rendered image
"""
labels = colors = None
img = np.ascontiguousarray(img)
if th.is_tensor(result):
# Detection only
if score_thr:
result = result[result[:, 4] >= score_thr]
labels = [classes[c.int()] for c in result[:, 5]] if classes else [f"[{int(c)}]" for c in result[:, 5]]
colors = [COLORS91[c.int()] for c in result[:, 5]]
logging.debug(f"Drawing detection: {result} with labels={labels}")
cv.drawBoxes(img, result[:, :4], labels=labels, scores=result[:, 4], colors=colors)
elif result:
# Detection with tracking [(tid, xyxysc)*]
tids, boxes = list(zip(*result))
result = th.stack(boxes)
if score_thr:
result = result[result[:, 4] >= score_thr]
if classes:
labels = [f"{classes[c.int()]}[{tid}]" for tid, c in zip(tids, result[:, 5])]
else:
labels = [f"[{int(c)}][{tid}]" for tid, c in zip(tids, result[:, 5])]
colors = [cv.rgb(tid, integral=True) for tid in tids]
cv.drawBoxes(img, result[:, :4], labels=labels, scores=result[:, 4], colors=colors)
# logging.info(f"Tracks labels={labels}")
# logging.info(f"Colors colors={colors}")
else:
logging.warning(f"No RoIs to render")
path = path and Path(path) or None
if sys.x_available() and show:
cv.imshow(img, title=str(path) or '')
if path:
cv.save(img, path)
return img | 4c8cbc005cca4e41bd74eb13cc973fe40186e83e | 3,652,068 |
import logging
import pkgutil
def check_python_import(package_or_module):
"""
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking python import '%s'...", package_or_module)
loader = pkgutil.get_loader(package_or_module)
found = loader is not None
if found:
logger.debug("Python %s '%s' found: %r",
"package" if loader.is_package(package_or_module)
else "module", package_or_module, loader.get_filename())
else:
logger.debug("Python import '%s' not found", package_or_module)
return found | e6371d3bcb08efed2dfbdbfc1b4d30409e0f10ba | 3,652,069 |
def read_frame_positions(lmp_trj):
""" Read stream positions in trajectory file corresponding to
time-step and atom-data.
"""
ts_pos, data_pos = [], []
with open(lmp_trj, 'r') as fid:
while True:
line = fid.readline()
if not line:
break
if line.startswith('ITEM: TIMESTEP'):
ts_pos.append(fid.tell())
elif line.startswith('ITEM: ATOMS id'):
data_pos.append(fid.tell())
return ts_pos, data_pos | c168f08577e38758bf3d9d42bae8379125d7fc33 | 3,652,070 |
async def async_setup_entry(hass, config_entry):
"""Set up Enedis as config entry."""
hass.data.setdefault(DOMAIN, {})
pdl = config_entry.data.get(CONF_PDL)
token = config_entry.data.get(CONF_TOKEN)
session = async_create_clientsession(hass)
enedis = EnedisGateway(pdl=pdl, token=token, session=session)
coordinator = EnedisDataUpdateCoordinator(hass, config_entry, enedis)
await coordinator.async_config_entry_first_refresh()
if coordinator.data is None:
return False
undo_listener = config_entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
CONF_PDL: pdl,
UNDO_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
async def async_reload_history(call) -> None:
await coordinator.async_load_datas_history(call)
hass.services.async_register(
DOMAIN, "reload_history", async_reload_history, schema=vol.Schema({})
)
return True | 93ee0360c509088b75935f6b94bf7d918658e86b | 3,652,071 |
import requests
def get_file_list(prefix):
""" Get file list from http prefix """
print("Fetching file list from", prefix)
k = requests.get(prefix)
if not k.ok:
raise Exception("Unable to get http directory listing")
parser = HRefParser()
parser.feed(k.content.decode())
k.close()
return parser.href_list | ca559a20e6f35f31a07e25f7f2a9dbc5db450cc0 | 3,652,072 |
def train_model(model: nn.Module, trainDataLoader: DataLoader, testDataLoader: DataLoader, epochs: int, optimizer, lossFuction, metric, device) -> dict:
"""
Training model function: it will train the model for a number of epochs, with the corresponding optimizer.
It will return the corresponding losses and metrics in a dictionary.
"""
# Send model to the corresponding device
model.to(device)
# Creating loss dictionary
losses = {
'training_batchs': [],
'training_average': [],
'testing_average': [],
'metric_average': []
}
# Iterating over number of epochs
for epoch in range(epochs):
print(f'Starting epoch {epoch + 1}')
# Training
epoch_loss = training_epoch(
model, trainDataLoader, testDataLoader, lossFuction, optimizer, metric, device)
# Updating loss dictionary
for key, loss in epoch_loss.items():
try:
losses[key].extend(loss)
except:
losses[key].append(loss)
# print training stats after epoch
print(f'Results for epoch {epoch + 1}')
print('------------------------------')
print(f'Training loss average: {epoch_loss["training_average"]}')
print(f'Test loss average: {epoch_loss["testing_average"]}')
print(f'Metric average: {epoch_loss["metric_average"]}')
return losses | bd971d4d5063ad83188e3093f46a6dba86ac995b | 3,652,073 |
import builtins
def _has_profile():
"""Check whether we have kernprof & kernprof has given us global 'profile'
object."""
return kernprof is not None and hasattr(builtins, 'profile') | 3cbb4a0539efbadcea22e2d39ee520e14d7c6da3 | 3,652,074 |
import requests
import re
def get_video_info(url):
"""
adapted from https://www.thepythoncode.com/article/get-youtube-data-python
Function takes a YouTube URL and extracts the different parts of the video:
title, view number, description, date-published, likes, dislikes, channel name,
channel url, and channel subscribers. Returned as python dictionary.
"""
# TODO: This works for most videos however there are videos that come up
# that have video info but are reported missing
# adapted from https://www.thepythoncode.com/article/get-youtube-data-python
# Starts the process of scraping the video information
try:
# requests URL
content = requests.get(url)
# create beautiful soup object to parse HTML
soup = bs(content.content, "html.parser")
# initialize the result
result = {}
# video title
try:
result['title'] = soup.find("span", attrs={"class": "watch-title"}).text.strip()
except:
result['title'] = "Not Found (Perhaps Hidden)"
# try-catch for finding video views using the HTML 'watch-view-count'
try:
# video views (converted to integer)
result['views'] = int(
soup.find("div", attrs={"class": "watch-view-count"}).text[:-6].replace(",", ""))
except:
try:
# Tries to find the views using the 'stat view-count'
result['views'] = int(
soup.find("span", attrs={"class": "stat view-count"}).text[:-6].replace(",", "").replace("views",
""))
except:
# If views can't be found
result['views'] = "Not Found (Perhaps Hidden)"
# video description
try:
result['description'] = soup.find("p", attrs={"id": "eow-description"}).text
except:
result['description'] = "Not Found (Perhaps Hidden)"
# date published
try:
result['date_published'] = soup.find("strong", attrs={"class": "watch-time-text"}).text.replace(
"Published on ", "").replace("Premiered ", "")
except:
result['date_published'] = "Not Found (Perhaps Hidden)"
# try-catch for finding the likes and dislikes
try:
# number of likes as integer
result['likes'] = int(soup.find("button", attrs={"title": "I like this"}).text.replace(",", ""))
# number of dislikes as integer
result['dislikes'] = int(
soup.find("button", attrs={"title": "I dislike this"}).text.replace(",", ""))
except:
try:
# This took me so long to figure out. If you can find a better way PLEASE let me know
# Saves FULL html file into a variable
video_html = soup.prettify()
# pattern to extract html code that has the like count
pattern_like = re.compile(r'\\"likeCount\\":[0-9]+[0-9]')
# pattern to extract numbers our of like count
pattern_like2 = re.compile(r'[0-9]+[0-9]')
# Finds the html code with likecount
matches_in_html_like = pattern_like.findall(video_html)
# Extracts the numbers from the html code
cleaned_html_number_like = int((pattern_like2.findall(''.join(matches_in_html_like)))[0])
result['likes'] = cleaned_html_number_like
pattern_dislike = re.compile(r'\\"dislikeCount\\":[0-9]+[0-9]')
# pattern to extract numbers our of like count
pattern_dislike2 = re.compile(r'[0-9]+[0-9]')
# Finds the html code with dislikeCount
matches_in_html_dislike = pattern_dislike.findall(video_html)
# Extracts the numbers from the html code
cleaned_html_number_dislike = int((pattern_dislike2.findall(''.join(matches_in_html_dislike)))[0])
result['dislikes'] = cleaned_html_number_dislike
except:
result['likes'] = "Not Found (Perhaps Hidden)"
result['dislikes'] = "Not Found (Perhaps Hidden)"
# channel details
try:
channel_tag = soup.find("div", attrs={"class": "yt-user-info"}).find("a")
except:
channel_tag = "Not Found (Perhaps Hidden)"
# channel name
try:
channel_name = channel_tag.text
except:
channel_name = "Not Found (Perhaps Hidden)"
# channel URL
try:
channel_url = f"https://www.youtube.com{channel_tag['href']}"
except:
channel_url = "Not Found (Perhaps Hidden)"
# try-catch for subscription count (youtube user can hide these)
try:
channel_subscribers = soup.find("span", attrs={"class": "yt-subscriber-count"}).text.strip()
except:
channel_subscribers = "Not Found (Perhaps Hidden)"
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
# return the result
print("Video Information Found.")
return result
# If none of the information can be found will result in this a blank video info
except:
# Returns an no video information found dictionary
print("No Video Information Found.")
result = {'title': "No Video Information Found",
'views': "No Video Information Found",
'description': "No Video Information Found",
'date_published': "No Video Information Found",
'likes': "No Video Information Found",
'dislikes': "No Video Information Found"}
channel_tag = 'No Video Information Found'
channel_name = 'No Video Information Found'
channel_url = 'No Video Information Found'
channel_subscribers = 'No Video Information Found'
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
return result | 40e11098b49d676e6b17ca0cc0a6770ab83a996f | 3,652,075 |
from typing import OrderedDict
def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables | 50384fa0f834f6311cea3b2901b6723ca3fab3c7 | 3,652,076 |
def extract_response_objects(image_file, mask_file, stim_file, input_dict):
"""inputs are file names for aligned images, binary mask, and unprocessed stimulus file
outputs a list of response objects"""
# read files
I = read_tifs(image_file)
mask = read_tifs(mask_file)
labels = segment_ROIs(mask)
print('number of ROIs = ' + str(np.max(labels)))
# process stimulus file
stim_data, stim_data_OG, header = count_frames(stim_file)
if (len(I)) != int(stim_data[-1][-1]):
print("number of images does not match stimulus file")
print('stimulus frames = ' + str(int(stim_data[-1][-1])))
print('image frames = ' + str(len(I)))
# stim_data = fix_dropped_frames(len(I),float(input_dict['time_interval']),stim_data,stim_data_OG,int(input_dict['gt_index']))
# get frames, relative time, stimuulus type, and stimulus state from stim data
fr, rt, st = parse_stim_file(stim_data,
rt_index=int(input_dict['rt_index']),
st_index=input_dict['st_index'])
ss = define_stim_state(rt, float(input_dict['on_time']),
float(input_dict['off_time']))
# measure fluorscence intensities in each ROI
responses, num, labels = measure_multiple_ROIs(I, mask)
# load response objects
response_objects = []
for r, n in zip(responses, num):
ro = ResponseClassSimple.Response(F=r, stim_time=rt, stim_state=ss,
ROI_num=n, stim_type=st)
ro.sample_name = input_dict['sample_name']
ro.reporter_name = input_dict['reporter_name']
ro.driver_name = input_dict['driver_name']
ro.stimulus_name = input_dict['stimulus_name']
ro.time_interval = float(input_dict['time_interval'])
response_objects.append(ro)
return response_objects, stim_data, header, labels | 95b7a5e831d9ab0703c51d41966b36babf52b24d | 3,652,077 |
import torch
def get_top_diff_loc(imgs, ref_imgs, crop_size, grid_size, device, topk=10):
"""Randomly get a crop bounding box."""
assert imgs.shape == ref_imgs.shape
batches = imgs.size(0)
img_size = imgs.shape[2:]
crop_size = _pair(crop_size)
grid_size = _pair(grid_size)
stride_h = (img_size[0] - crop_size[0]) // (grid_size[0] - 1)
stride_w = (img_size[1] - crop_size[1]) // (grid_size[1] - 1)
diff_imgs = imgs - ref_imgs
diff_list = []
for i in range(grid_size[0]):
for j in range(grid_size[1]):
crop_diff = diff_imgs[:, :,
i * stride_h:i * stride_h + crop_size[0],
j * stride_w:j * stride_w + crop_size[1]]
diff_list.append(crop_diff.abs().sum(dim=(1, 2, 3)))
# [batches, grid_size**2]
diff_sum = torch.stack(diff_list, dim=1)
diff_topk_idx = torch.argsort(diff_sum, dim=1, descending=True)[:, :topk]
select_idx = diff_topk_idx
idx_i = select_idx // grid_size[1]
idx_j = select_idx % grid_size[1]
crop_y1, crop_y2 = idx_i * stride_h, idx_i * stride_h + crop_size[0]
crop_x1, crop_x2 = idx_j * stride_w, idx_j * stride_w + crop_size[1]
center = torch.stack([(crop_x1 + crop_x2) * 0.5,
(crop_y1 + crop_y2) * 0.5],
dim=-1).float()
return center | 2e35cc56a484432dd1c1ef05f38e01079414eecb | 3,652,078 |
import json
def decode(file):
"""
This function creates a dictionnary out of a given file thanks to pre-existing json functions.
:param file: The file to decode.
:return: The corresponding Python dictionnary or None if something went wrong (i.e: the given file \
is invalid).
"""
# Json to dictionnary
tmp_res = None
try:
with open(file, "r") as f:
tmp_res = json.load(f)
except Exception as e:
print(e)
return None
# Gets the type of problem handled here
problem_type = ProblemType.identify_problem(tmp_res)
res = {}
# Gets the field's limits + the bottom left and top right points of the field
res["field_limits"] = tmp_res["field_limits"]
res["bottom_left"] = Point(res["field_limits"][0][0], res["field_limits"][1][0])
res["top_right"] = Point(res["field_limits"][0][1], res["field_limits"][1][1])
# Gets the list of goals
res["goals"] = []
for goal in tmp_res["goals"]:
posts = goal["posts"]
direction = goal["direction"]
post1 = Point(posts[0][0], posts[0][1])
post2 = Point(posts[1][0], posts[1][1])
direction = Vector(direction[0], -direction[1])
goal = Goal(post1, post2, direction)
res["goals"].append(goal)
# Gets the list of opponents
res["opponents"] = []
for opponent in tmp_res["opponents"]:
res["opponents"].append(Opponent(Point(opponent[0], opponent[1])))
# Gets the radius of the robots
res["radius"] = tmp_res["robot_radius"]
# Gets theta and pos steps for opponents' shots and defenders's position respectively
res["theta_step"] = tmp_res["theta_step"]
res["pos_step"] = tmp_res["pos_step"]
# Gets the list of defenders if the problem is initial positions
if problem_type == ProblemType.INITIAL_POS:
res["defenders"] = []
for defender in tmp_res["defenders"]:
res["defenders"].append(Defender(Point(defender[0], defender[1]), res["radius"]))
# Gets the min dist if the problem is min dist
if problem_type == ProblemType.MIN_DIST:
res["min_dist"] = tmp_res["min_dist"]
# Gets the goalkeeper area if the problem is goal keeper
if problem_type == ProblemType.GOAL_KEEPER:
res["goalkeeper_area"] = tmp_res["goalkeeper_area"]
res["gk_bottom_left"] = Point(res["goalkeeper_area"][0][0], res["goalkeeper_area"][1][0])
res["gk_top_right"] = Point(res["goalkeeper_area"][0][1], res["goalkeeper_area"][1][1])
if problem_type == ProblemType.MAX_SPEED:
res["ball_max_speed"] = tmp_res["ball_max_speed"]
res["robot_max_speed"] = tmp_res["robot_max_speed"]
return (res, problem_type) | bfd0671f9e6bb06faa02a3179c1a5e18a607882c | 3,652,079 |
def kron_compact(x):
"""Calculate the unique terms of the Kronecker product x ⊗ x.
Parameters
----------
x : (n,) or (n,k) ndarray
If two-dimensional, the product is computed column-wise (Khatri-Rao).
Returns
-------
x ⊗ x : (n(n+1)/2,) or (n(n+1)/2,k) ndarray
The "compact" Kronecker product of x with itself.
"""
if x.ndim not in (1,2):
raise ValueError("x must be one- or two-dimensional")
return _np.concatenate([x[i]*x[:i+1] for i in range(x.shape[0])], axis=0) | 55c2c89fa7eb9f7c2c1a3a296798b022c158c399 | 3,652,080 |
def record_speech_sequentially(min_sound_lvl=0.01, speech_timeout_secs=1.):
"""Records audio in sequential audio files.
Args:
min_sound_lvl: The minimum sound level as measured by root mean square
speech_timeout_secs: Timeout of audio after that duration of silence as measured by min_sound_lvl
Returns:
The recorded audio samples.
"""
samples = []
i = 0
while True:
cmd = input("> ").encode()
if cmd == KeyInput.QUIT.value:
return samples
elif cmd == KeyInput.REDO.value:
print("Index now at {}.".format(i))
i = max(i - 1, 0)
try:
samples.pop()
except IndexError:
pass
continue
with AudioSnippetGenerator() as generator:
timeout_len = int(speech_timeout_secs * generator.sr / generator.chunk_size)
active_count = timeout_len
curr_snippet = None
for audio in generator:
if curr_snippet:
curr_snippet.append(audio)
else:
curr_snippet = audio
if audio.amplitude_rms() < min_sound_lvl:
active_count -= 1
else:
active_count = timeout_len
print("Time left: {:<10}".format(active_count), end="\r")
if active_count == 0:
i += 1
samples.append(curr_snippet)
print("Recorded #{:<10}".format(i))
break | f726f90575cf49a7de0608473f16a12f2a80d3cf | 3,652,081 |
def home():
"""
Display Hello World in a local-host website
"""
return 'Hello World' | f65a035d679878cfd897c9ea9c79fc41cf76db95 | 3,652,082 |
def build_cinder(args):
"""Build the cinder client object."""
(os_username, os_password,
os_user_domain_name,
os_auth_url,
os_auth_type,
os_region_name,
os_project_name,
os_project_id,
os_project_domain_name,
os_project_domain_id,
os_region_name,
os_user_domain_id,
os_user_domain_name,
os_user_id,
) = (
args.os_username, args.os_password,
args.os_user_domain_name,
args.os_auth_url,
args.os_auth_type,
args.os_region_name,
args.os_project_name,
args.os_project_id,
args.os_project_domain_name,
args.os_project_domain_id,
args.os_region_name,
args.os_user_domain_id,
args.os_user_domain_name,
args.os_user_id,
)
args = {
"os_auth_url": os_auth_url,
"os_username": os_username,
"os_password": os_password,
"os_user_domain_name": os_user_domain_name,
"os_user_domain_id": os_user_domain_id,
"os_user_id": os_user_id,
"os_project_id": os_project_id,
"os_project_name": os_project_name,
"os_project_domain_name": os_project_domain_name,
"os_project_domain_id": os_project_domain_id
}
session = get_keystone_session(**args)
LOG.info(f"{session}")
client_args = dict(
region_name=os_region_name,
service_type='volumev3',
service_name='',
os_endpoint='',
endpoint_type='publicURL',
insecure=False,
cacert=None,
auth_plugin=None,
http_log_debug=True,
session=session
)
# force this to version 2.0 of Cinder API
api_version = 3.70
#LOG.info(f"{args}")
#LOG.info("Logging in with")
#LOG.info(f"{api_version} {os_username} {os_password} {os_project_name} {os_auth_url}")
#LOG.info(f"{client_args}")
c = cinder.Client(api_version,
os_username,
os_password,
os_project_name,
os_auth_url,
**client_args,
)
return c | ca5ec1cc864524f39d2913b8f9b4c7c8bb0e8306 | 3,652,083 |
def selecaoEscalar(Mcorr, criterios, N=0, a1=0.5, a2=0.5):
""" Performs a scalar feature selection which orders all features individually,
from the best to the worst to separate the classes.
INPUTS
- Mcorr: Correlation matrix of all features.
- criterios:
- N: Number of best features to be returned.
- a1: Weigth for criterios.
- a2: Weight for Mcorr.
OUTPUTS
- ordem: Tuple with the order of features.
- M: Tuple with criteria for each feature.
"""
L = Mcorr.shape[0]
if len(criterios.shape) != 1:
criterios = criterios[0]
if N==0 or N > len(criterios):
N = len(criterios)
print('You either did not specify or you gave a number grater than the number of characteristics.')
print('Function will return all {} characteristics.'.format(N))
Mcorr = abs(Mcorr)
ordem = []
M = []
ordem.append(int(np.where(criterios == max(criterios))[0]))
M.append(criterios[int(ordem[0])])
Mcorr[:, int(ordem[0])] = 1
fator = np.zeros(N)
for n in range(1, N):
index = np.linspace(0, L-1, L)
fator = np.sum(Mcorr[tuple(ordem), :], axis=0)
MK = a1*criterios - a2*fator/n
MK = np.delete(MK, ordem)
index = np.delete(index, ordem)
M.append(max(MK))
ordem.append(int(index[int(np.where(MK == max(MK))[0])]))
ordem = tuple(ordem)
M = tuple(M)
return ordem, M | 713a7c8543cefdef8f4a35dd970d326fb49229a1 | 3,652,084 |
def sum_by_letter(list_of_dicts, letter):
"""
:param list_of_dicts: A list of dictionaries.
:param letter: A value of the letter keyed by 'letter'.
"""
total = 0
for d in list_of_dicts:
if d['letter'] == letter:
total += d['number']
return total | bffc5990eaa9e352d60d86d40b8a8b7070fd00c0 | 3,652,085 |
def gate_settle(gate):
""" Return gate settle times """
return 0 | f452a343550c4f7be2133119c89dc386665921c4 | 3,652,086 |
import psutil
import subprocess
def parse_csr_domains(csr_pem=None, csr_pem_filepath=None, submitted_domain_names=None):
"""
checks found names against `submitted_domain_names`
This routine will use crypto/certbot if available.
If not, openssl is used via subprocesses
`submitted_domain_names` should be all lowecase
"""
log.info("parse_csr_domains >")
if openssl_crypto and certbot_crypto_util:
load_func = openssl_crypto.load_certificate_request
found_domains = certbot_crypto_util._get_names_from_cert_or_req(
csr_pem, load_func, typ=openssl_crypto.FILETYPE_PEM
)
else:
log.debug(".parse_csr_domains > openssl fallback")
# fallback onto OpenSSL
# openssl req -in MYCSR -noout -text
with psutil.Popen(
[openssl_path, "req", "-in", csr_pem_filepath, "-noout", "-text"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("Error loading {0}: {1}".format(csr_pem_filepath, err))
if PY3:
out = out.decode("utf8")
# parse the sans first, then add the commonname
found_domains = san_domains_from_text(out)
# note the conditional whitespace before/after CN
common_name = RE_openssl_x509_subject.search(out)
if common_name is not None:
found_domains.insert(0, common_name.group(1))
# ensure our CERT matches our submitted_domain_names
if submitted_domain_names is not None:
for domain in found_domains:
if domain not in submitted_domain_names:
raise ValueError("domain %s not in submitted_domain_names" % domain)
for domain in submitted_domain_names:
if domain not in found_domains:
raise ValueError("domain %s not in found_domains" % domain)
return sorted(found_domains) | b09fcc35d70c0f08641dceda4a0201fa8c94c7d8 | 3,652,087 |
def fov_gc(lons, lats):
"""Field of view great circle.
Properties
----------
lons: [float]
Field of view longitudes (degE).
lats: [float]
Field of view latitudes (degN).
Return
------
geojson.Feature
GeoJSON field of view polygon.
"""
return geo_polygon(lons, lats, 'Limb', 'Limb field of view', 'blue') | 3013648c04e5626c51995288afd6e441d3afef30 | 3,652,088 |
import logging
def return_covid_data() -> tuple[dict, dict]:
"""A function that acts as a getter method, allowing for functions in main
to get the national and local COVID data and then display the values on
the dashboard.
Returns:
tuple: (england_data, local_data). A tuple of two values (England and
local COVID data), this allows two values to be returned at once,
and removes the need for excessive API calls, as the current
national and local COVID data can be returned without needing to
make another API call.
"""
logging.debug("Entering and exiting the return_covid_data function.")
logging.info(f"{(england_data, local_data)} returned")
return (england_data, local_data) | 723e93d4a878d5a9d8a28dd90cefe83bc2f00be4 | 3,652,089 |
import ietf.sync.rfceditor
from ietf.doc.templatetags.mail_filters import std_level_prompt
def request_publication(request, name):
"""Request publication by RFC Editor for a document which hasn't
been through the IESG ballot process."""
class PublicationForm(forms.Form):
subject = forms.CharField(max_length=200, required=True)
body = forms.CharField(widget=forms.Textarea, required=True, strip=False)
doc = get_object_or_404(Document, type="draft", name=name, stream__in=("iab", "ise", "irtf"))
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to view this page.")
consensus_event = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
m = Message()
m.frm = request.user.person.formatted_email()
(m.to, m.cc) = gather_address_lists('pubreq_rfced',doc=doc).as_strings()
m.by = request.user.person
next_state = State.objects.get(used=True, type="draft-stream-%s" % doc.stream.slug, slug="rfc-edit")
if request.method == 'POST' and not request.POST.get("reset"):
form = PublicationForm(request.POST)
if form.is_valid():
events = []
# start by notifying the RFC Editor
response, error = ietf.sync.rfceditor.post_approved_draft(settings.RFC_EDITOR_SYNC_NOTIFICATION_URL, doc.name)
if error:
return render(request, 'doc/draft/rfceditor_post_approved_draft_failed.html',
dict(name=doc.name,
response=response,
error=error))
m.subject = form.cleaned_data["subject"]
m.body = form.cleaned_data["body"]
m.save()
if doc.group.acronym != "none":
m.related_groups.set([doc.group])
m.related_docs.set([doc])
send_mail_message(request, m)
# IANA copy
(m.to, m.cc) = gather_address_lists('pubreq_rfced_iana',doc=doc).as_strings()
send_mail_message(request, m, extra=extra_automation_headers(doc))
e = DocEvent(doc=doc, type="requested_publication", rev=doc.rev, by=request.user.person)
e.desc = "Sent request for publication to the RFC Editor"
e.save()
events.append(e)
# change state
prev_state = doc.get_state(next_state.type_id)
if next_state != prev_state:
doc.set_state(next_state)
e = add_state_change_event(doc, request.user.person, prev_state, next_state)
if e:
events.append(e)
doc.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
if doc.intended_std_level_id in ("std", "ds", "ps", "bcp"):
action = "Protocol Action"
else:
action = "Document Action"
subject = "%s: '%s' to %s (%s-%s.txt)" % (action, doc.title, std_level_prompt(doc), doc.name, doc.rev)
body = generate_publication_request(request, doc)
form = PublicationForm(initial=dict(subject=subject,
body=body))
return render(request, 'doc/draft/request_publication.html',
dict(form=form,
doc=doc,
message=m,
next_state=next_state,
consensus_filled_in=(
True if (doc.stream_id and doc.stream_id=='ietf')
else (consensus_event != None and consensus_event.consensus != None)),
),
) | d6377b08c5eae6740e98a154d991ba268ed37815 | 3,652,090 |
def strip_trailing_characters(unstripped_string, tail):
"""
Strip the tail from a string.
:param unstripped_string: The string to strip. Ex: "leading"
:param tail: The trail to remove. Ex: "ing"
:return: The stripped string. Ex: "lead"
"""
if unstripped_string.endswith(str(tail)):
return unstripped_string[:len(tail)]
else:
return unstripped_string | dbd09fe9a58b0fb3072a680a9c7ac701257ebfcd | 3,652,091 |
def is_prime(x):
""" Prove if number is prime """
if x == 0 or x == 1:
return 0
for i in range(2, x//2 +1):
if x % i == 0:
return 0
return 1 | 63980c49b9ea05458ecafe874073805df50ce1d0 | 3,652,092 |
import pickle
def obj_to_str(obj, encoding='utf8') -> str:
"""
Examples:
>>> d = dict(a=1, b=2)
>>> assert isinstance(obj_to_str(d), str)
"""
b = pickle.dumps(obj)
return bytes_to_str(b, encoding=encoding) | 76c87052596aefcbd15a5135379ff2a3512bed77 | 3,652,093 |
from pathlib import Path
from sys import path
def deploy_sqlfiles(engine: Engine, directory: str, message: str, display_output: bool = False, scripting_variables: dict = None) -> bool:
"""Run every SQL script file found in given directory and print the executed file names.
If any file in directory cannot be deployed after multiple tries, raise an exeption and
list failed files to user.
Parameters
----------
engine
SQL Alchemy engine.
directory
Path of directory holding the SQL script files.
message
Message passed to OperationManager.
display_output
Indicator to print script output.
variables
Variables passed to SQL script.
Raises
------
ValueError
If engine is not instance of sqlalchemy.engine.Engine.
RuntimeError
If any of the files in given directory fail to deploy after multiple tries.
"""
with OperationManager(message):
if isinstance(engine, dict):
raise ValueError(
"First parameter of function 'deploy_sqlfiles' should be instance of sqlalchemy engine. Check your custom actions!")
if not Path(directory).is_dir():
logger.warning("Directory not found: " + directory)
return False
files = [path.join(directory, f)
for f in listdir(directory) if f.endswith('.sql')]
failed = sql_file_loop(deploy_sql_from_file, engine,
display_output, scripting_variables, file_list=files, max_loop=len(files))
if len(failed) > 0:
error_msg = "Failed to deploy the following files:\n{}".format(
'\n'.join(failed.keys()))
error_msg = error_msg + '\nSee log for error details.'
for fail_object, fail_messages in failed.items():
logger.debug(f'----- Error for object {fail_object} -----')
logger.debug(''.join(fail_messages))
raise RuntimeError(error_msg)
return True | e26fa2f77069b4bbd5415c7e6c6c2aa5f002839a | 3,652,094 |
def sample_ellipsoid(p0, covmat, size=1):
"""
Produce an ellipsoid of walkers around an initial parameter value,
according to a covariance matrix.
:param p0: The initial parameter value.
:param covmat:
The covariance matrix. Must be symmetric-positive definite or
it will raise the exception numpy.linalg.LinAlgError
:param size: The number of samples to produce.
"""
return np.random.multivariate_normal(
np.atleast_1d(p0), np.atleast_2d(covmat), size=size
) | a09448f29920a7758a549ede80608c8c4dd9892a | 3,652,095 |
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding, name=name) | b39dfed959f43346c48d13b7e41601999c1b7f8b | 3,652,096 |
import logging
def patch_base_handler(BaseHandler, log=None):
"""Patch HubAuthenticated into a base handler class
so anything inheriting from BaseHandler uses Hub authentication.
This works *even after* subclasses have imported and inherited from BaseHandler.
.. versionadded: 1.5
Made available as an importable utility
"""
if log is None:
log = logging.getLogger()
if HubAuthenticatedHandler not in BaseHandler.__bases__:
new_bases = (HubAuthenticatedHandler,) + BaseHandler.__bases__
log.info(
"Patching auth into {mod}.{name}({old_bases}) -> {name}({new_bases})".format(
mod=BaseHandler.__module__,
name=BaseHandler.__name__,
old_bases=', '.join(
_nice_cls_repr(cls) for cls in BaseHandler.__bases__
),
new_bases=', '.join(_nice_cls_repr(cls) for cls in new_bases),
)
)
BaseHandler.__bases__ = new_bases
# We've now inserted our class as a parent of BaseHandler,
# but we also need to ensure BaseHandler *itself* doesn't
# override the public tornado API methods we have inserted.
# If they are defined in BaseHandler, explicitly replace them with our methods.
for name in ("get_current_user", "get_login_url"):
if name in BaseHandler.__dict__:
log.debug(
f"Overriding {BaseHandler}.{name} with HubAuthenticatedHandler.{name}"
)
method = getattr(HubAuthenticatedHandler, name)
setattr(BaseHandler, name, method)
return BaseHandler | 132cc3151793e0c033cdf0506fe45b33ddcf2ad6 | 3,652,097 |
from django.contrib.auth import get_user_model
def get_username_field() -> str:
"""Get custom username field.
Returns:
str: username field.
"""
user_model = get_user_model()
return getattr(user_model, "USERNAME_FIELD", "username") | 45dfe6888d8c69e012b98a0edd1b639b7bf56af7 | 3,652,098 |
def get_edited_file_name():
"""
Gets the current open file in xcode
"""
script = '''
tell application "Xcode"
set last_word_in_main_window to (word -1 of (get name of window 1))
set current_document to document 1 whose name ends with last_word_in_main_window
set current_document_path to path of current_document
return current_document_path
end tell
'''
val = run_script(script, [])
if len(val) > 0:
debug_log("Currently editing " + val + " in Xcode, we'll try to use that.")
else:
error_log("Failed to get current edited document in Xcode! Is Xcode running, and is a source file open?")
return val | f919f8475dd553a6b907df1d42f2c48b9d91b81d | 3,652,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.