content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Union
from typing import Tuple
from typing import Dict
from typing import Any
import io
def format_result(result: Union[Pose, PackedPose]) -> Tuple[str, Dict[Any, Any]]:
"""
:param: result: Pose or PackedPose object.
:return: tuple of (pdb_string, metadata)
Given a `Pose` or `PackedPose` object, return a tuple containing
the pdb string and a scores dictionary.
"""
_pdbstring = io.to_pdbstring(result)
_scores_dict = io.to_dict(result)
_scores_dict.pop("pickled_pose", None)
return (_pdbstring, _scores_dict) | 15b3c6ce32a3a5ab860d045ba8679e0299f122f6 | 3,126 |
def str_array( listString):
"""
Becase the way tha Python prints an array is different from CPLEX,
this function goes the proper CPLEX writing of Arrays
:param listString: A list of values
:type listString: List[]
:returns: The String printing of the array, in CPLEX format
:rtype: String
"""
ret = "{"
for i in range(0, len(listString)-1):
ret = ret + "\"" + listString[i] + "\","
ret = ret + "\"" + listString[i] + "\"}"
return ret | 46737bb05f310387d69be6516ebd90afd3d91b08 | 3,127 |
def read_gene_annos(phenoFile):
"""Read in gene-based metadata from an HDF5 file
Args:
phenoFile (str): filename for the relevant HDF5 file
Returns:
dictionary with feature annotations
"""
fpheno = h5py.File(phenoFile,'r')
# Feature annotations:
geneAnn = {}
for key in fpheno['gene_info'].keys():
geneAnn[key] = fpheno['gene_info'][key][:]
fpheno.close()
return geneAnn | 86e1b26a5600e1d52a3beb127ef8e7c3ac41721a | 3,128 |
from typing import Optional
import binascii
def hex_xformat_decode(s: str) -> Optional[bytes]:
"""
Reverse :func:`hex_xformat_encode`.
The parameter is a hex-encoded BLOB like
.. code-block:: none
"X'CDE7A24B1A9DBA3148BCB7A0B9DA5BB6A424486C'"
Original purpose and notes:
- SPECIAL HANDLING for BLOBs: a string like ``X'01FF'`` means a hex-encoded
BLOB. Titanium is rubbish at BLOBs, so we encode them as special string
literals.
- SQLite uses this notation: https://sqlite.org/lang_expr.html
- Strip off the start and end and convert it to a byte array:
http://stackoverflow.com/questions/5649407
"""
if len(s) < 3 or not s.startswith("X'") or not s.endswith("'"):
return None
return binascii.unhexlify(s[2:-1]) | 8f868d4bbd5b6843632f9d3420fe239f688ffe15 | 3,129 |
def threshold(data, direction):
"""
Find a suitable threshold value which maximizes explained variance of the data projected onto direction.
NOTE: the chosen hyperplane would be described mathematically as $ x \dot direction = threshold $.
"""
projected_data = np.inner(data, direction)
sorted_x = np.sort(projected_data)
best_sep_index = explained_variance_list(sorted_x).argmax()
return (sorted_x[best_sep_index] + sorted_x[best_sep_index + 1]) / 2 | 7fdab7f87c3c2e6d937da146ce5a27074ea92f52 | 3,130 |
def StrType_any(*x):
""" Ignores all parameters to return a StrType """
return StrType() | d1faac14a91cd6149811a553113b25f34d5d4a54 | 3,131 |
def height(tree):
"""Return the height of tree."""
if tree.is_empty():
return 0
else:
return 1+ max(height(tree.left_child()),\
height(tree.right_child())) | a469216fc13ed99acfb1bab8db7e031acc759f90 | 3,133 |
def applyTelluric(model, tell_alpha=1.0, airmass=1.5, pwv=0.5):
"""
Apply the telluric model on the science model.
Parameters
----------
model : model object
BT Settl model
alpha : float
telluric scaling factor (the power on the flux)
Returns
-------
model : model object
BT Settl model times the corresponding model
"""
# read in a telluric model
wavelow = model.wave[0] - 10
wavehigh = model.wave[-1] + 10
#telluric_model = smart.getTelluric(wavelow=wavelow, wavehigh=wavehigh, alpha=alpha, airmass=airmass)
telluric_model = smart.Model()
telluric_model.wave, telluric_model.flux = smart.InterpTelluricModel(wavelow=wavelow, wavehigh=wavehigh, airmass=airmass, pwv=pwv)
# apply the telluric alpha parameter
telluric_model.flux = telluric_model.flux**(tell_alpha)
#if len(model.wave) > len(telluric_model.wave):
# print("The model has a higher resolution ({}) than the telluric model ({})."\
# .format(len(model.wave),len(telluric_model.wave)))
# model.flux = np.array(smart.integralResample(xh=model.wave,
# yh=model.flux, xl=telluric_model.wave))
# model.wave = telluric_model.wave
# model.flux *= telluric_model.flux
#elif len(model.wave) < len(telluric_model.wave):
## This should be always true
telluric_model.flux = np.array(smart.integralResample(xh=telluric_model.wave, yh=telluric_model.flux, xl=model.wave))
telluric_model.wave = model.wave
model.flux *= telluric_model.flux
#elif len(model.wave) == len(telluric_model.wave):
# model.flux *= telluric_model.flux
return model | 7eaf7cafe1f8b5f4c273858f289d2c1c3865680b | 3,134 |
def max_power_rule(mod, g, tmp):
"""
**Constraint Name**: DAC_Max_Power_Constraint
**Enforced Over**: DAC_OPR_TMPS
Power consumption cannot exceed capacity.
"""
return (
mod.DAC_Consume_Power_MW[g, tmp]
<= mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp]
) | 2c1845253524a8383f2256a7d67a8231c2a69485 | 3,135 |
def check_archs(
copied_libs, # type: Mapping[Text, Mapping[Text, Text]]
require_archs=(), # type: Union[Text, Iterable[Text]]
stop_fast=False, # type: bool
):
# type: (...) -> Set[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501
"""Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info``, or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``, or the string "universal2",
corresponding to ``['x86_64', 'arm64']``.
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, str):
require_archs = _ARCH_LOOKUP.get(require_archs, [require_archs])
require_archs_set = frozenset(require_archs)
bads = (
[]
) # type: List[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs_set
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs_set.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | d500e0b89ca3edd4e76630a628d9e4d970fadbf1 | 3,137 |
def create_data_table(headers, columns, match_tol=20) -> pd.DataFrame:
"""Based on headers and column data, create the data table."""
# Store the bottom y values of all of the row headers
header_tops = np.array([h.top for h in headers])
# Set up the grid: nrows by ncols
nrows = len(headers)
ncols = len(columns) + 1
# Initialize the grid
grid = np.empty((nrows, ncols), dtype=object)
grid[:, :] = "" # Default value
# Add in the headers
grid[:, 0] = [h.text for h in headers]
# Loop over each column
for col_num, xval in enumerate(columns):
col = columns[xval]
word_tops = np.array([w.top for w in col])
# Find closest row header
for row_num, h in enumerate(headers):
# Find closest word ot this row heasder
word_diff = np.abs(word_tops - h.top)
word_diff[word_diff > match_tol] = np.nan
# Make sure the row header is vertically close enough
if np.isnan(word_diff).sum() < len(word_diff):
# Get the matching word for this row header
notnull = ~np.isnan(word_diff)
order = np.argsort(word_diff[notnull])
for word_index in np.where(notnull)[0][order]:
word = col[word_index]
# IMPORTANT: make sure this is the closest row header
# Sometimes words will match to more than one header
header_diff = np.abs(header_tops - word.top)
header_index = np.argmin(header_diff)
closest_header = headers[header_index]
if closest_header == h:
grid[row_num, col_num + 1] = col[word_index].text
break
return pd.DataFrame(grid) | 56b1cb21afa7813138d03b56849b594e18664348 | 3,138 |
def interp2d_vis(model, model_lsts, model_freqs, data_lsts, data_freqs, flags=None,
kind='cubic', flag_extrapolate=True, medfilt_flagged=True, medfilt_window=(3, 7),
fill_value=None):
"""
Interpolate complex visibility model onto the time & frequency basis of
a data visibility. See below for notes on flag propagation if flags is provided.
Parameters:
-----------
model : type=DataContainer, holds complex visibility for model
keys are antenna-pair + pol tuples, values are 2d complex visibility
with shape (Ntimes, Nfreqs).
model_lsts : 1D array of the model time axis, dtype=float, shape=(Ntimes,)
model_freqs : 1D array of the model freq axis, dtype=float, shape=(Nfreqs,)
data_lsts : 1D array of the data time axis, dtype=float, shape=(Ntimes,)
data_freqs : 1D array of the data freq axis, dtype=float, shape=(Nfreqs,)
flags : type=DataContainer, dictionary containing model flags. Can also contain model wgts
as floats and will convert to booleans appropriately.
kind : type=str, kind of interpolation, options=['linear', 'cubic', 'quintic']
medfilt_flagged : type=bool, if True, before interpolation, replace flagged pixels with output from
a median filter centered on each flagged pixel.
medfilt_window : type=tuple, extent of window for median filter across the (time, freq) axes.
Even numbers are rounded down to odd number.
flag_extrapolate : type=bool, flag extrapolated data_lsts if True.
fill_value : type=float, if fill_value is None, extrapolated points are extrapolated
else they are filled with fill_value.
Output: (new_model, new_flags)
-------
new_model : interpolated model, type=DataContainer
new_flags : flags associated with interpolated model, type=DataContainer
Notes:
------
If the data has flagged pixels, it is recommended to turn medfilt_flagged to True. This runs a median
filter on the flagged pixels and replaces their values with the results, but they remain flagged.
This happens *before* interpolation. This means that interpolation near flagged pixels
aren't significantly biased by their presence.
In general, if flags are fed, flags are propagated if a flagged pixel is a nearest neighbor
of an interpolated pixel.
"""
# make flags
new_model = odict()
new_flags = odict()
# get nearest neighbor points
freq_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_freqs - x)), data_freqs)))
time_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts)))
freq_nn, time_nn = np.meshgrid(freq_nn, time_nn)
# get model indices meshgrid
mod_F, mod_L = np.meshgrid(np.arange(len(model_freqs)), np.arange(len(model_lsts)))
# raise warning on flags
if flags is not None and medfilt_flagged is False:
print("Warning: flags are fed, but medfilt_flagged=False. \n"
"This may cause weird behavior of interpolated points near flagged data.")
# ensure flags are booleans
if flags is not None:
if np.issubdtype(flags[list(flags.keys())[0]].dtype, np.floating):
flags = DataContainer(odict(list(map(lambda k: (k, ~flags[k].astype(np.bool)), flags.keys()))))
# loop over keys
for i, k in enumerate(list(model.keys())):
# get model array
m = model[k]
# get real and imag separately
real = np.real(m)
imag = np.imag(m)
# median filter flagged data if desired
if medfilt_flagged and flags is not None:
# get extent of window along freq and time
f_ext = int((medfilt_window[1] - 1) / 2.)
t_ext = int((medfilt_window[0] - 1) / 2.)
# set flagged data to nan
real[flags[k]] *= np.nan
imag[flags[k]] *= np.nan
# get flagged indices
f_indices = mod_F[flags[k]]
l_indices = mod_L[flags[k]]
# construct fill arrays
real_fill = np.empty(len(f_indices), np.float)
imag_fill = np.empty(len(f_indices), np.float)
# iterate over flagged data and replace w/ medfilt
for j, (find, tind) in enumerate(zip(f_indices, l_indices)):
tlow, thi = tind - t_ext, tind + t_ext + 1
flow, fhi = find - f_ext, find + f_ext + 1
ll = 0
while True:
# iterate until window has non-flagged data in it
# with a max of 10 iterations
if tlow < 0:
tlow = 0
if flow < 0:
flow = 0
r_med = np.nanmedian(real[tlow:thi, flow:fhi])
i_med = np.nanmedian(imag[tlow:thi, flow:fhi])
tlow -= 2
thi += 2
flow -= 2
fhi += 2
ll += 1
if not (np.isnan(r_med) or np.isnan(i_med)):
break
if ll > 10:
break
real_fill[j] = r_med
imag_fill[j] = i_med
# fill real and imag
real[l_indices, f_indices] = real_fill
imag[l_indices, f_indices] = imag_fill
# flag residual nans
resid_nans = np.isnan(real) + np.isnan(imag)
flags[k] += resid_nans
# replace residual nans
real[resid_nans] = 0.0
imag[resid_nans] = 0.0
# propagate flags to nearest neighbor
if flags is not None:
f = flags[k][time_nn, freq_nn]
# check f is boolean type
if np.issubdtype(f.dtype, np.floating):
f = ~(f.astype(np.bool))
else:
f = np.zeros_like(real, bool)
# interpolate
interp_real = interpolate.interp2d(model_freqs, model_lsts, real, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts)
interp_imag = interpolate.interp2d(model_freqs, model_lsts, imag, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts)
# flag extrapolation if desired
if flag_extrapolate:
time_extrap = np.where((data_lsts > model_lsts.max() + 1e-6) | (data_lsts < model_lsts.min() - 1e-6))
freq_extrap = np.where((data_freqs > model_freqs.max() + 1e-6) | (data_freqs < model_freqs.min() - 1e-6))
f[time_extrap, :] = True
f[:, freq_extrap] = True
# rejoin
new_model[k] = interp_real + 1j * interp_imag
new_flags[k] = f
return DataContainer(new_model), DataContainer(new_flags) | 6ac4fad738691f470e36252fc7544e857c8fdca0 | 3,139 |
def eps_divide(n, d, eps=K.epsilon()):
""" perform division using eps """
return (n + eps) / (d + eps) | 2457e5fc4458521b4098cfb144b7ff07e163ba9c | 3,140 |
import requests
def get_mc_uuid(username):
"""Gets the Minecraft UUID for a username"""
url = f"https://api.mojang.com/users/profiles/minecraft/{username}"
res = requests.get(url)
if res.status_code == 204:
raise ValueError("Users must have a valid MC username")
else:
return res.json().get("id") | fceeb1d9eb096cd3e29f74d389c7c851422ec022 | 3,141 |
def api(default=None, api=None, **kwargs):
"""Returns the api instance in which this API function is being ran"""
return api or default | 3d636408914e2888f4dc512aff3f729512849ddf | 3,143 |
def parse_json(data):
"""Parses the PupleAir JSON file, returning a Sensors protobuf."""
channel_a = []
channel_b = {}
for result in data["results"]:
if "ParentID" in result:
channel_b[result["ParentID"]] = result
else:
channel_a.append(result)
sensors = list(_parse_results(channel_a, channel_b))
return model_pb2.Sensors(sensors=sensors) | 11ded094b71d6557cc7c1c7ed489cdcbfe881e0b | 3,144 |
import logging
import traceback
def asynchronize_tornado_handler(handler_class):
"""
A helper function to turn a blocking handler into an async call
:param handler_class: a tornado RequestHandler which should be made asynchronus
:return: a class which does the same work on a threadpool
"""
class AsyncTornadoHelper(handler_class):
"""
A hollow wrapper class which runs requests asynchronously on a threadpool
"""
def _do_work_and_report_error(self, work):
try:
# call the "real" method from the handler_class
work()
except HTTPError as ex:
# request handler threw uncaught error
logging.error(traceback.format_exc())
# send http errors to client
self.write(str(ex))
self.set_status(ex.status_code)
except Exception:
# request handler threw uncaught error
logging.error(traceback.format_exc())
# send 500 error to client. Do not pass on error message
self.write("500 Internal Server Error \n")
self.set_status(500)
finally:
# finished needs to be reported from main tornado thread
tornado.ioloop.IOLoop.instance().add_callback(
# report finished to main tornado thread:
lambda: self.finish()
)
@asynchronous
def get(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.get(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
@asynchronous
def put(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.put(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
@asynchronous
def post(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.post(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
# return the wrapped class instead of the original for Tornado to run asynchronously
return AsyncTornadoHelper | 0e7d3b46b199cdf1aa1a31a19ed3d54f0abbce16 | 3,145 |
def convert_single_example(ex_index, example: InputExample,
tokenizer, label_map, dict_builder=None):
"""Converts a single `InputExample` into a single `InputFeatures`."""
# label_map = {"B": 0, "M": 1, "E": 2, "S": 3}
# tokens_raw = tokenizer.tokenize(example.text)
tokens_raw = list(example.text)
labels_raw = example.labels
# Account for [CLS] and [SEP] with "- 2"
# The convention in BERT is:
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
label_ids = []
for token, label in zip(tokens_raw, labels_raw):
tokens.append(token)
label_ids.append(label_map[label])
input_ids = tokenizer.convert_tokens_to_ids(tokens)
if dict_builder is None:
input_dicts = np.zeros_like(tokens_raw, dtype=np.int64)
else:
input_dicts = dict_builder.extract(tokens)
seq_length = len(tokens)
assert seq_length == len(input_ids)
assert seq_length == len(input_dicts)
assert seq_length == len(label_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
if ex_index < 1:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % example.guid)
tf.logging.info("tokens: %s" % " ".join(
[utils.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_dicts]))
tf.logging.info("labels: %s" % " ".join([str(x) for x in example.labels]))
tf.logging.info("labels_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_dicts=input_dicts,
label_ids=label_ids,
seq_length=seq_length)
return feature | 3dca77aa191f821c9785c8431c4637e47582a588 | 3,146 |
async def converter_self_interaction_target(client, interaction_event):
"""
Internal converter for returning the received interaction event's target. Applicable for context application
commands.
This function is a coroutine.
Parameters
----------
client : ``Client``
The client who received the respective ``InteractionEvent``.
interaction_event : ``InteractionEvent``
The received application command interaction.
Returns
-------
target : `None` or ``DiscordEntity``
The resolved entity if any.
"""
if interaction_event.type is not INTERACTION_TYPE_APPLICATION_COMMAND:
return None
return interaction_event.interaction.target | 66975897b9f8a7d0b224f80d1827af3ea07eb51d | 3,148 |
from typing import List
def publication_pages(publication_index_page) -> List[PublicationPage]:
"""Fixture providing 10 PublicationPage objects attached to publication_index_page"""
rv = []
for _ in range(0, 10):
p = _create_publication_page(
f"Test Publication Page {_}", publication_index_page
)
rv.append(p)
return rv | 5d9b75bcbdc5c9485cc83ddc2befeb946f447227 | 3,149 |
from typing import List
def rp_completion(
rp2_metnet,
sink,
rp2paths_compounds,
rp2paths_pathways,
cache: rrCache = None,
upper_flux_bound: float = default_upper_flux_bound,
lower_flux_bound: float = default_lower_flux_bound,
max_subpaths_filter: int = default_max_subpaths_filter,
logger: Logger = getLogger(__name__)
) -> List[rpPathway]:
"""Process to the completion of metabolic pathways
generated by RetroPath2.0 and rp2paths.
(1) rp2paths generates a sets of master pathways which
each of them is a set of chemical transformations.
(2) Each chemical transformation refers to one or
multiple reaction rule.
(3) Each reaction rule comes from one or multiple
template (original) chemical reaction
The completion consists in:
1. exploring all possible metabolic pathways through
steps (2) and (3)
2. putting back chemical species removed during reaction
rules building process
The completion is done for all master pathways of step (1).
Parameters
----------
rp2_metnet: str
Path to the file containing the metabolic network
sink: str
Path to the file containing the list of
species in the sink
rp2paths_compounds: str
Path to the file containing the chemical
species involved in master metabolic pathways
rp2paths_pathways: str
Path to the file containing the master metabolic
pathways
cache: rrCache, optional
Cache that contains reaction rules data
upper_flux_bound: float, optional
Upper flux bound for all new reactions created
(default: default_upper_flux_bound from Args file),
lower_flux_bound: float, optional
Lower flux bound for all new reactions created
(default: default_lower_flux_bound from Args file),
max_subpaths_filter: int, optional
Number of pathways (best) kept per master pathway
(default: 10)
logger: Logger, optional
Returns
-------
List of rpPathway objects
"""
if cache is None:
cache = rrCache(
attrs=[
'rr_reactions',
'template_reactions',
'cid_strc',
'deprecatedCompID_compid',
]
)
## READ
__rp2paths_compounds_in_cache(
infile=rp2paths_compounds,
cache=cache,
logger=logger
)
pathways, transfos = __read_pathways(
infile=rp2paths_pathways,
logger=logger
)
ec_numbers = __read_rp2_metnet(
infile=rp2_metnet,
logger=logger
)
sink_molecules = __read_sink(
infile=sink,
logger=logger
)
# COMPLETE TRANSFORMATIONS
full_transfos = __complete_transformations(
transfos=transfos,
ec_numbers=ec_numbers,
cache=cache,
logger=logger
)
# GENERATE THE COMBINATORY OF SUB-PATHWAYS
# Build pathways over:
# - multiple reaction rules per transformation (TRS) and
# - multiple template reactions per reaction rule
pathway_combinatorics = __build_pathway_combinatorics(
full_transfos,
pathways,
cache=cache,
logger=logger
)
# BUILD + RANK SUB-PATHWAYS
all_pathways = __build_all_pathways(
pathways=pathway_combinatorics,
transfos=full_transfos,
sink_molecules=sink_molecules,
rr_reactions=cache.get('rr_reactions'),
compounds_cache=cache.get('cid_strc'),
max_subpaths_filter=max_subpaths_filter,
# compartment_id=compartment_id,
lower_flux_bound=lower_flux_bound,
upper_flux_bound=upper_flux_bound,
logger=logger
)
# # Return flat list of overall topX pathways
# return sum(
# [
# pathways
# for pathways in all_pathways.values()
# ], [])[:max_subpaths_filter]
return all_pathways
# for sub_pathways in all_pathways.values():
# for sub_pathway in sub_pathways:
# print(sub_pathway)
# from chemlite import Pathway
# print(all_pathways)
# for sub_pathways in all_pathways.values():
# for i in range(len(sub_pathways)):
# for j in range(i+1, len(sub_pathways)):
# if sub_pathways[i] == sub_pathways[j]:
# print(f'Equality between {sub_pathways[i].get_id()} and {sub_pathways[j].get_id()}')
# print()
# print(Pathway._to_dict(all_pathways[1][0])) | ac7539d1d8f0f9388c9d6bef570362d62ec90414 | 3,150 |
import torch
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
result = result.numpy()
return result
else:
return result | 5a5491e2b911235d7bf858b19d0d32a9e8da20e6 | 3,151 |
def STOCHF(data, fastk_period=5, fastd_period=3, fastd_ma_type=0):
"""
Stochastic %F
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:param int fastk_period: period used for K fast indicator calculation
:param int fastd_period: period used for D fast indicator calculation
:param int fastd_ma_type: fast D moving average type (0 simple, 1 exponential)
:return pd.Series: with indicator data calculation results
"""
fn = Function('STOCHF')
return fn(data, fastk_period=fastk_period, fastd_period=fastd_period,
fastd_matype=fastd_ma_type) | 3412a6832f54b2dfbaff7eb25de0f6644d914934 | 3,152 |
def playerid_reverse_lookup(player_ids, key_type=None):
"""Retrieve a table of player information given a list of player ids
:param player_ids: list of player ids
:type player_ids: list
:param key_type: name of the key type being looked up (one of "mlbam", "retro", "bbref", or "fangraphs")
:type key_type: str
:rtype: :class:`pandas.core.frame.DataFrame`
"""
key_types = ('mlbam', 'retro', 'bbref', 'fangraphs', )
if not key_type:
key_type = key_types[0] # default is "mlbam" if key_type not provided
elif key_type not in key_types:
raise ValueError(
'[Key Type: {}] Invalid; Key Type must be one of "{}"'.format(key_type, '", "'.join(key_types))
)
table = get_lookup_table()
key = 'key_{}'.format(key_type)
results = table[table[key].isin(player_ids)]
results = results.reset_index().drop('index', 1)
return results | e5bbe46567d1c8e517020d9cb9f551249ea8f515 | 3,153 |
def get_delta_z(z, rest_ghz, ghz=None):
"""
Take a measured GHz value, and calculates the restframe GHz value based on the given z of the matched galaxy
:param z:
:param ghz:
:return:
"""
# First step is to convert to nm rom rest frame GHz
set_zs = []
for key, values in transitions.items():
if values[0] - 0.3 <= z <= values[1] + 0.3:
sghz = values[2] * u.GHz # Gets the GHz of the CO line
rest_ghz /= (z+1)
set_z = np.round((sghz - rest_ghz)/ rest_ghz, 3) # (Freq_emitted - Freq_obs)/ Freq_obs = z
set_z = z - set_z
rest_ghz *= (z+1)
print("Z: {} Set Z: {}".format(z, set_z))
set_zs.append((key, set_z))
set_z = np.min([np.abs(i[1]) for i in set_zs])
print(set_zs)
print(set_z)
for element in set_zs:
if np.isclose(np.abs(element[1]),set_z):
return element[1], element[0] | acb0069c56fb34aeaba302368131400f3c35d643 | 3,154 |
def hist_orientation(qval, dt):
"""
provided with quats, and time spent* in the direction defined by quat
produces grouped by ra, dec and roll quaternions and corresponding time, spent in quats
params: qval a set of quats stored in scipy.spatial.transfrom.Rotation class
params: dt corresponding to the set of quaternions, set of time intervals duration (which sc spent in the dirrection defined by quaternion)
return: exptime, qval - histogramed set of quaterninons with corresponding times
"""
oruniq, uidx, invidx = hist_quat(qval)
exptime = np.zeros(uidx.size, np.double)
np.add.at(exptime, invidx, dt)
return exptime, qval[uidx] | bbdecc58a9a3dc248d68b73018cd5f1d803ddbfd | 3,155 |
def proclamadelcaucacom_story(soup):
"""
Function to pull the information we want from Proclamadelcauca.com stories
:param soup: BeautifulSoup object, ready to parse
"""
hold_dict = {}
#text
try:
article_body = soup.find('div', attrs={"class": "single-entradaContent"})
maintext = [para.text.strip() for para in article_body.find_all('p')]
hold_dict['maintext'] = '\n '.join(maintext).strip()
except:
article_body = None
return hold_dict | e8bcf0faaa7731b71e7b9db33e454b422b3285bc | 3,157 |
def asarray_fft(x, inverse):
"""Recursive implementation of the 1D Cooley-Tukey FFT using np asarray
to prevent copying.
Parameters:
x (array): the discrete amplitudes to transform.
inverse (bool): perform the inverse fft if true.
Returns:
x (array): the amplitudes of the original signal.
OR
X (complex number array): the phase and amplitude of the transformation.
"""
coef = 1 if inverse else -1
N = x.shape[0]
# validating input array
if np.log2(N) % 1 > 0:
raise ValueError('array size must be a power of 2')
# 32 was arbitrarily chosen as "good enough"
elif N <= 32:
return dft(x, inverse)
# perform DFT on all N <= 32 sub-arrays
else:
even_terms = asarray_fft(x[::2], inverse)
odd_terms = asarray_fft(x[1::2], inverse)
exp_array = np.exp(coef * 2j * np.pi * np.arange(N) / N)
return np.concatenate([even_terms + exp_array[:(N >> 1)] * odd_terms,
even_terms + exp_array[(N >> 1):] * odd_terms]) | 87f86f8529f5c54535d9a188c454f762f96a7a58 | 3,158 |
import jinja2
def wrap_locale_context(func):
"""Wraps the func with the current locale."""
@jinja2.contextfilter
def _locale_filter(ctx, value, *args, **kwargs):
doc = ctx['doc']
if not kwargs.get('locale', None):
kwargs['locale'] = str(doc.locale)
return func(value, *args, **kwargs)
return _locale_filter | a2f720e9ed38eb2bf0f546ab392f295d969f7ab7 | 3,160 |
from numpy import loadtxt
from scipy.interpolate import UnivariateSpline
def mu_Xe(keV=12):
"""Returns inverse 1/e penetration depth [mm-1 atm-1] of Xe given the
x-ray energy in keV. The transmission through a 3-mm thick slab of Xe at
6.17 atm (76 psi) was calculated every 100 eV over an energy range
spanning 5-17 keV using:
http://henke.lbl.gov/optical_constants/filter2.html
This result was then converted to mu and saved as a tab-delimited text
file. The returned result is calculated using a univariate spline and
should be valid over the range 5-17 keV."""
E_mu = loadtxt('mu_Xe.txt',dtype=float,delimiter='\t')
us_mu = UnivariateSpline(E_mu[:,0],E_mu[:,1],s=0)
return us_mu(1000*keV) | 5f106871e7517ef910739b74a13d2139e03ed480 | 3,161 |
import hashlib
def file_md5(input_file):
"""
:param input_file: Path to input file.
:type input_file: str
:return: Returns the encoded data in the inputted file in hexadecimal format.
"""
with open(input_file, 'rb') as f:
data = f.read()
return hashlib.md5(data).hexdigest() | 4a7ea12e3b5e0429787eb65e651852e49b40ecf7 | 3,162 |
from typing import Dict
def message_args() -> Dict[str, str]:
"""A formatted message."""
return {"subject": "Test message", "message": "This is a test message"} | 4d25d5c9f54aa0997f2e619f90eb6632717cf0d3 | 3,164 |
def to_drive_type(value):
"""Convert value to DriveType enum."""
if isinstance(value, DriveType):
return value.value
sanitized = str(value).upper().strip().replace(" ", "_")
try:
return DriveType[sanitized].value
except KeyError as err:
raise ValueError(f"Unknown drive type: {value}") from err | 10183ac3ad15c2e01d9abf262f097dc6b366e7ab | 3,165 |
def upload_authorized_key(host, port, filepath):
"""UPLOAD (key) upload_authorized_key"""
params = {'method': 'upload_authorized_key'}
files = [('key', filepath, file_get_contents(filepath))]
return _check(https.client.https_post(host, port, '/', params, files=files)) | 68ada5d834ff77c4ee1b09815a6b094c30a42c1b | 3,166 |
def thermalize_cutoff(localEnergies, smoothing_window, tol):
"""Return position where system is thermalized
according to some tolerance tol, based on the derivative
of the smoothed local energies
"""
mean = np.mean(localEnergies)
smoothLocalEnergies = smoothing(localEnergies, smoothing_window)
check_slope = derivative(smoothLocalEnergies) < tol
cutoff = np.where(check_slope)[0][0]
return cutoff | d72904596ab88298232e9c2ed0fac151e3e66a71 | 3,167 |
def annualize_metric(metric: float, holding_periods: int = 1) -> float:
"""
Annualize metric of arbitrary periodicity
:param metric: Metric to analyze
:param holding_periods:
:return: Annualized metric
"""
days_per_year = 365
trans_ratio = days_per_year / holding_periods
return (1 + metric) ** trans_ratio - 1 | 0c84816f29255d49e0f2420b17abba66e2387c99 | 3,168 |
def get_latest_tag():
"""
Find the value of the latest tag for the Adafruit CircuitPython library
bundle.
:return: The most recent tag value for the project.
"""
global LATEST_BUNDLE_VERSION # pylint: disable=global-statement
if LATEST_BUNDLE_VERSION == "":
LATEST_BUNDLE_VERSION = get_latest_release_from_url(
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest"
)
return LATEST_BUNDLE_VERSION | 2195d2cde7e2ff67a110b1a1af2aa8cebad52294 | 3,170 |
def read_gold_conll2003(gold_file):
"""
Reads in the gold annotation from a file in CoNLL 2003 format.
Returns:
- gold: a String list containing one sequence tag per token.
E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O]
- lines: a list list containing the original line split at "\t"
"""
gold = []
lines = []
with open(gold_file, encoding="utf-8") as f:
for line in f:
if line == "\n":
continue
line = line.strip().split("\t")
gold.append(line[3])
lines.append(line)
return gold, lines | 1e11513c85428d20e83d54cc2fa2d42ddd903341 | 3,172 |
import functools
def translate_nova_exception(method):
"""Transforms a cinder exception but keeps its traceback intact."""
@functools.wraps(method)
def wrapper(self, ctx, *args, **kwargs):
try:
res = method(self, ctx, *args, **kwargs)
except (nova_exceptions.ConnectionRefused,
keystone_exceptions.ConnectionError) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.NovaConnectionFailed(reason=err_msg))
except (keystone_exceptions.BadRequest,
nova_exceptions.BadRequest)as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.BadRequest(reason=err_msg))
except (keystone_exceptions.Forbidden,
nova_exceptions.Forbidden):
_reraise(exception.NotAuthorized())
except (keystone_exceptions.NotFound,
nova_exceptions.NotFound) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.InstanceNotFound(reason=err_msg))
return res
return wrapper | 186b7f944b03073c758b4af5f4ccfcaa80e8f5e8 | 3,173 |
def _update_form(form):
""" """
if not form.text():
return form.setStyleSheet(error_css)
return form.setStyleSheet(success_css) | 94e241a98aa6c8305965d4149f4d60e28843aea7 | 3,174 |
import torch
from torch_adapter import TorchAdapter
from openvino.inference_engine import IEPlugin
from openvino_adapter import OpenvinoAdapter
from torch_adapter import TorchAdapter
import importlib
def create_adapter(openvino, cpu_only, force_torch, use_myriad):
"""Create the best adapter based on constraints passed as CLI arguments."""
if use_myriad:
openvino = True
if cpu_only:
raise Exception("Cannot run with both cpu-only and Myriad options")
if force_torch and openvino:
raise Exception("Cannot run with both Torch and OpenVINO")
if not openvino:
if importlib.util.find_spec("torch") is None:
logger.info("Could not find Torch")
openvino = True
elif not cpu_only:
if torch.cuda.is_available():
logger.info("Detected GPU / CUDA support")
return TorchAdapter(False, DEFAULT_STYLE)
else:
logger.info("Failed to detect GPU / CUDA support")
if not force_torch:
if importlib.util.find_spec("openvino") is None:
logger.info("Could not find Openvino")
if openvino:
raise Exception("No suitable engine")
else:
if not cpu_only and not use_myriad:
try:
IEPlugin("GPU")
logger.info("Detected iGPU / clDNN support")
except RuntimeError:
logger.info("Failed to detect iGPU / clDNN support")
cpu_only = True
logger.info("Using OpenVINO")
logger.info("CPU Only: %s", cpu_only)
logger.info("Use Myriad: %s", use_myriad)
adapter = OpenvinoAdapter(cpu_only, DEFAULT_STYLE,
use_myriad=use_myriad)
return adapter
logger.info("Using Torch with CPU")
return TorchAdapter(True, DEFAULT_STYLE) | 4fd0fc51c7758d32a1eac4d86d1b5dc6b90d20b7 | 3,175 |
def _make_ecg(inst, start, stop, reject_by_annotation=False, verbose=None):
"""Create ECG signal from cross channel average."""
if not any(c in inst for c in ['mag', 'grad']):
raise ValueError('Unable to generate artificial ECG channel')
for ch in ['mag', 'grad']:
if ch in inst:
break
logger.info('Reconstructing ECG signal from {}'
.format({'mag': 'Magnetometers',
'grad': 'Gradiometers'}[ch]))
picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False)
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
ecg, times = inst.get_data(picks, start, stop, reject_by_annotation,
True)
elif isinstance(inst, BaseEpochs):
ecg = np.hstack(inst.copy().crop(start, stop).get_data())
times = inst.times
elif isinstance(inst, Evoked):
ecg = inst.data
times = inst.times
return ecg.mean(0, keepdims=True), times | 27d1ef6da9c9d491de4b9806c85528f1226b2c3d | 3,176 |
def lorentzian(freq, freq0, area, hwhm, phase, offset, drift):
"""
Lorentzian line-shape function
Parameters
----------
freq : float or float array
The frequencies for which the function is evaluated
freq0 : float
The center frequency of the function
area : float
hwhm: float
Half-width at half-max
"""
oo2pi = 1/(2*np.pi)
df = freq - freq0
absorptive = oo2pi * area * np.ones(freq.shape[0])*(hwhm / (df**2 + hwhm**2))
dispersive = oo2pi * area * df/(df**2 + hwhm**2)
return (absorptive * np.cos(phase) + dispersive * np.sin(phase) + offset +
drift * freq) | 2f9b2ede75773c2100941e16fd14210b1a85a453 | 3,177 |
def findTilt(root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return findTilt_helper(root)[1] | 1338a704f754678f88dedaf5a968aa3bfe4ff17f | 3,178 |
def generate_report():
"""
Get pylint analization report and write it to file
"""
files = get_files_to_check()
dir_path = create_report_dir()
file_path = create_report_file(dir_path)
config_opts = get_config_opts()
pylint_opts = '--load-plugins pylint_flask' + config_opts
pylint_stdout, pylint_stderr = epylint.py_run(files + ' ' + pylint_opts, return_std=True)
with open(file_path, 'w+') as report:
report.write(pylint_stdout.getvalue())
report.write(pylint_stderr.getvalue())
return True | 655345a128847285712f683274637201ee264010 | 3,179 |
def make_trampoline(func_name):
""" Create a main function that calls another function """
mod = ir.Module('main')
main = ir.Procedure('main')
mod.add_function(main)
entry = ir.Block('entry')
main.add_block(entry)
main.entry = entry
entry.add_instruction(ir.ProcedureCall(func_name, []))
entry.add_instruction(ir.Exit())
return mod | 1dcaf61cbadde4fdd8e94958658ce8b1b69612f1 | 3,181 |
def model_fields(dbo, baseuri=None):
"""Extract known fields from a BQ object, while removing any known
from C{excluded_fields}
@rtype: dict
@return fields to be rendered in XML
"""
attrs = {}
try:
dbo_fields = dbo.xmlfields
except AttributeError:
# This occurs when the object is a fake DB objects
# The dictionary is sufficient
dbo_fields= dbo.__dict__
for fn in dbo_fields:
fn = mapping_fields.get(fn, fn)
# Skip when map is None
if fn is None:
continue
# Map is callable, then call
if callable(fn):
fn, attr_val = fn(dbo, fn, baseuri)
else:
attr_val = getattr(dbo, fn, None)
# Put value in attribute dictionary
if attr_val is not None and attr_val!='':
if isinstance(attr_val, basestring):
attrs[fn] = attr_val
else:
attrs[fn] = str(attr_val) #unicode(attr_val,'utf-8')
return attrs | 59a07057dccb116cc4753a4973a3128ccc79c558 | 3,183 |
def get_bsj(seq, bsj):
"""Return transformed sequence of given BSJ"""
return seq[bsj:] + seq[:bsj] | d1320e5e3257ae22ca982ae4dcafbd4c6def9777 | 3,184 |
from typing import Dict
import warnings
import math
def sample(problem: Dict, N: int, calc_second_order: bool = True,
skip_values: int = 0):
"""Generates model inputs using Saltelli's extension of the Sobol' sequence.
Returns a NumPy matrix containing the model inputs using Saltelli's sampling
scheme. Saltelli's scheme extends the Sobol' sequence in a way to reduce
the error rates in the resulting sensitivity index calculations. If
`calc_second_order` is False, the resulting matrix has ``N * (D + 2)``
rows, where ``D`` is the number of parameters. If `calc_second_order` is True,
the resulting matrix has ``N * (2D + 2)`` rows. These model inputs are
intended to be used with :func:`SALib.analyze.sobol.analyze`.
If `skip_values` is > 0, raises a UserWarning in cases where sample sizes may
be sub-optimal. The convergence properties of the Sobol' sequence requires
``N < skip_values`` and that both `N` and `skip_values` are base 2
(e.g., ``N = 2^n``). See discussion in [4] for context and information.
If skipping values, one recommendation is that the largest possible `n` such that
``(2^n)-1 <= N`` is skipped (see [5]).
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate.
Must be an exponent of 2 and < `skip_values`.
calc_second_order : bool
Calculate second-order sensitivities (default True)
skip_values : int
Number of points in Sobol' sequence to skip, ideally a value of base 2
(default 0, see Owen [3] and Discussion [4])
References
----------
.. [1] Saltelli, A., 2002.
Making best use of model evaluations to compute sensitivity indices.
Computer Physics Communications 145, 280–297.
https://doi.org/10.1016/S0010-4655(02)00280-1
.. [2] Sobol', I.M., 2001.
Global sensitivity indices for nonlinear mathematical models and
their Monte Carlo estimates.
Mathematics and Computers in Simulation,
The Second IMACS Seminar on Monte Carlo Methods 55, 271–280.
https://doi.org/10.1016/S0378-4754(00)00270-6
.. [3] Owen, A. B., 2020.
On dropping the first Sobol' point.
arXiv:2008.08051 [cs, math, stat].
Available at: http://arxiv.org/abs/2008.08051 (Accessed: 20 April 2021).
.. [4] Discussion: https://github.com/scipy/scipy/pull/10844
https://github.com/scipy/scipy/pull/10844#issuecomment-673029539
.. [5] Johnson, S. G.
Sobol.jl: The Sobol module for Julia
https://github.com/stevengj/Sobol.jl
"""
# bit-shift test to check if `N` == 2**n
if not ((N & (N-1) == 0) and (N != 0 and N-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`N` ({N}) is equal to `2^n`.
"""
warnings.warn(msg)
if skip_values > 0:
M = skip_values
if not ((M & (M-1) == 0) and (M != 0 and M-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`skip_values` ({M}) is equal to `2^m`.
"""
warnings.warn(msg)
n_exp = int(math.log(N, 2))
m_exp = int(math.log(M, 2))
if n_exp >= m_exp:
msg = f"Convergence may not be valid as 2^{n_exp} ({N}) is >= 2^{m_exp} ({M})."
warnings.warn(msg)
D = problem['num_vars']
groups = _check_groups(problem)
if not groups:
Dg = problem['num_vars']
else:
G, group_names = compute_groups_matrix(groups)
Dg = len(set(group_names))
# Create base sequence - could be any type of sampling
base_sequence = sobol_sequence.sample(N + skip_values, 2 * D)
if calc_second_order:
saltelli_sequence = np.zeros([(2 * Dg + 2) * N, D])
else:
saltelli_sequence = np.zeros([(Dg + 2) * N, D])
index = 0
for i in range(skip_values, N + skip_values):
# Copy matrix "A"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "B" into "A"
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j + D]
else:
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "A" into "B"
# Only needed if you're doing second-order indices (true by default)
if calc_second_order:
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j]
else:
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
# Copy matrix "B"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
saltelli_sequence = scale_samples(saltelli_sequence, problem)
return saltelli_sequence | a3a356fd037b879c71cb6dc2e4751350857302e8 | 3,185 |
def standardize(table, option):
"""
standardize
Z = (X - mean) / (standard deviation)
"""
if option == 'table':
mean = np.mean(table)
std = np.std(table)
t = []
for row in table:
t_row = []
if option != 'table':
mean = np.mean(row)
std = np.std(row)
for i in row:
if std == 0:
t_row.append(0)
else:
t_row.append((i - mean)/std)
t.append(t_row)
return t | 337ec0d22340ca74e54236e1cb39829eab8ad89b | 3,186 |
def raw_input_nonblock():
"""
return result of raw_input if has keyboard input, otherwise return None
"""
if _IS_OS_WIN32:
return _raw_input_nonblock_win32()
else:
raise NotImplementedError('Unsupported os.') | 90cc9febcaa4866334b69b19809565795a07de49 | 3,187 |
def get_batch_hard(draw_batch_size,hard_batchs_size,semihard_batchs_size,easy_batchs_size,norm_batchs_size,network,dataset,nb_classes, margin):
"""
Create batch of APN "hard" triplets
Arguments:
draw_batch_size -- integer : number of initial randomly taken samples
hard_batchs_size -- interger : select the number of hardest samples to keep
norm_batchs_size -- interger : number of random samples to add
Returns:
triplets -- list containing 3 tensors A,P,N of shape (hard_batchs_size+norm_batchs_size,w,h,c)
"""
X = dataset
m, w, h = X[0].shape # c removed
#Step 1 : pick a random batch to study
studybatch = get_batch_random(draw_batch_size,dataset, nb_classes)
#Step 2 : compute the loss with current network : d(A,P)-d(A,N). The alpha parameter here is omited here since we want only to order them
studybatchloss = np.zeros((draw_batch_size))
#Compute embeddings for anchors, positive and negatives
#print('*',studybatch[0][:,:,:].shape)
A = network.predict(studybatch[0][:,:,:])
P = network.predict(studybatch[1][:,:,:])
N = network.predict(studybatch[2][:,:,:])
#Compute d(A,P)-d(A,N) # HARD
studybatchloss = np.sqrt(np.sum(np.square(A-P),axis=1)) - np.sqrt(np.sum(np.square(A-N),axis=1))
#Sort by distance (high distance first) and take the hardest
selection = np.argsort(studybatchloss)[::-1][:hard_batchs_size]
#Compute d(A,N)-d(A,P) # EASY
studybatchloss = -np.sqrt(np.sum(np.square(A-P),axis=1)) + np.sqrt(np.sum(np.square(A-N),axis=1))
#Sort by distance (high distance first) and take the EASIEST
selection1 = np.argsort(studybatchloss)[::-1][:easy_batchs_size] #
#Compute d(A,N)-d(A,P) SEMI-HARD
semihard_index1 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) + margin > np.sqrt(np.sum(np.square(A-N),axis=1))))
semihard_index2 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) < np.sqrt(np.sum(np.square(A-N),axis=1))))
semihard_index = np.intersect1d(semihard_index1,semihard_index2)
selection2 = semihard_index[:semihard_batchs_size] #
selection = np.append(selection,selection1) #Hard & Easy
selection = np.append(selection,selection2) #Hard & Easy & SemiHard
#Draw other random samples from the batch
selection2 = np.random.choice(np.delete(np.arange(draw_batch_size),selection),norm_batchs_size,replace=False)
selection = np.append(selection,selection2) #Hard & Easy & SemiHard & Random
triplets = [studybatch[0][selection,:,:], studybatch[1][selection,:,:], studybatch[2][selection,:,:]]
return triplets | da6dc7f69354b0b74b59717140c6c46826925050 | 3,188 |
import math
def sine(
start, end, freq, amp: Numeric = 1, n_periods: Numeric = 1
) -> TimeSerie:
"""
Generate a sine TimeSerie.
"""
index = pd.date_range(start=start, end=end, freq=freq)
return TimeSerie(
index=index,
y_values=np.sin(
np.linspace(0, 2 * math.pi * n_periods, num=len(index))
)
* amp,
) | df4254f9fafcb61f0bcf492edf1847d89f4debb0 | 3,189 |
def get_from_address(sending_profile, template_from_address):
"""Get campaign from address."""
# Get template display name
if "<" in template_from_address:
template_display = template_from_address.split("<")[0].strip()
else:
template_display = None
# Get template sender
template_sender = template_from_address.split("@")[0].split("<")[-1]
# Get sending profile domain
if type(sending_profile) is dict:
sp_from = sending_profile["from_address"]
else:
sp_from = sending_profile.from_address
sp_domain = sp_from.split("<")[-1].split("@")[1].replace(">", "")
# Generate from address
if template_display:
from_address = f"{template_display} <{template_sender}@{sp_domain}>"
else:
from_address = f"{template_sender}@{sp_domain}"
return from_address | 8617d2b793b76456cb7d1a17168f27fd1d548e6d | 3,190 |
import ctypes
def is_dwm_compositing_enabled():
"""Is Desktop Window Manager compositing (Aero) enabled.
"""
enabled = ctypes.c_bool()
try:
DwmIsCompositionEnabled = ctypes.windll.dwmapi.DwmIsCompositionEnabled
except (AttributeError, WindowsError):
# dwmapi or DwmIsCompositionEnabled is not present
return False
rval = DwmIsCompositionEnabled(ctypes.byref(enabled))
return rval == 0 and enabled.value | 9b31b3ef62d626008d2b6c6ef59446be79da89f6 | 3,191 |
def fgsm(x, y_true, y_hat, epsilon=0.075):
"""Calculates the fast gradient sign method adversarial attack
Following the FGSM algorithm, determines the gradient of the cost function
wrt the input, then perturbs all the input in the direction that will cause
the greatest error, with small magnitude.
"""
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y_true, logits=y_hat)
grad, = tf.gradients(loss, x)
scaled_grad = epsilon * tf.sign(grad)
return tf.stop_gradient(x + scaled_grad) | a71d2042ea1f5efa0a3f6409836da52bf323aa5c | 3,192 |
def tour_delete(request,id):
""" delete tour depending on id """
success_message, error_message = None, None
form = TourForm()
tour = get_object_or_404(Tour, id=id)
tours = Tour.objects.all()
if request.method=="POST":
tour.delete()
success_message = "deleted tour"
else:
error_message = "to delete tour"
context = {
'form': form,
'tours': tours,
'success_message': success_message,
'error_message': error_message,
'user_info': Employee.objects.get(employee_id=request.user.username),
'cart': Cart.objects.filter(created_by__employee_id=request.user.username).count,
}
return render(request, 'employee/tour_add.html', context) | c42e355734444d858555ad627f202f73161cbedf | 3,193 |
import random
def d3():
"""Simulate the roll of a 3 sided die"""
return random.randint(1, 3) | c2eac44bb36b7e35c66894bce3467f568a735ca1 | 3,194 |
def _search_qr(model, identifier, session):
"""Search the database using a Query/Retrieve *Identifier* query.
Parameters
----------
model : pydicom.uid.UID
Either *Patient Root Query Retrieve Information Model* or *Study Root
Query Retrieve Information Model* for C-FIND, C-GET or C-MOVE.
identifier : pydicom.dataset.Dataset
The request's *Identifier* dataset.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
Returns
-------
list of db.Instance
The Instances that match the query.
"""
# Will raise InvalidIdentifier if check failed
_check_identifier(identifier, model)
if model in _PATIENT_ROOT:
attr = _PATIENT_ROOT[model]
else:
attr = _STUDY_ROOT[model]
# Hierarchical search method: C.4.1.3.1.1
query = None
for level, keywords in attr.items():
# Keywords at current level that are in the identifier
keywords = [kw for kw in keywords if kw in identifier]
# Create query dataset for only the current level and run it
ds = Dataset()
[setattr(ds, kw, getattr(identifier, kw)) for kw in keywords]
query = build_query(ds, session, query)
if level == identifier.QueryRetrieveLevel:
break
return query.all() | 29fe8831b1e44a381202b48212ff7c40c4c8d7fd | 3,197 |
def find_max_value(binary_tree):
"""This function takes a binary tree and returns the largest value of all the nodes in that tree
with O(N) space and O(1) time using breadth first traversal while keeping track of the largest value thus far
in the traversal
"""
root_node = []
rootnode.push(binary_tree.root)
output = []
# helper function
def is_Null(current_value):
"""this is a helper function to check if the value of all nodes in breadth first traversal have null values
which means we have gone off the bottom depth of the tree and returns a boolean"""
return current_value == null
def _walk(input_list):
"""This is the recursive function in our breadth first traversal which implements a queue without the queue class
this function returns the value of each node until all node values are returned; the base case is when all
values of the nodes are null, which means we have gone off the bottom depth of the tree
"""
counter = 0
largest_value = 0
newNodes = []
while counter < len(input_list):
if input_list[counter]:
if input_list[counter].value > largest_value:
largest_value = input_list[counter]
print('new value: ', input_list[counter])
output.push(input_list[counter])
newNodes.push(input_list[counter].left)
newNodes.push(input_list[counter].right)
print('newNodes: ', len(newNodes), '\n', newNodes)
if not all is_Null(newNodes):
_walk(newNodes)
_walk(root_node)
return 'largest value ' + largest_value | a797ea1598195cfcfe1abf00d73562c59617ad9b | 3,199 |
def failed_jobs(username, root_wf_id, wf_id):
"""
Get a list of all failed jobs of the latest instance for a given workflow.
"""
dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)
args = __get_datatables_args()
total_count, filtered_count, failed_jobs_list = dashboard.get_failed_jobs(
wf_id, **args
)
for job in failed_jobs_list:
job.exec_job_id = '<a href="' + url_for(
'.job',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">' + job.exec_job_id + '</a>'
job.stdout = '<a target="_blank" href="' + url_for(
'.stdout',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Application Stdout/Stderr</a>'
job.stderr = '<a target="_blank" href="' + url_for(
'.stderr',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Condor Stderr/Pegasus Lite Log</a>'
return render_template(
'workflow/jobs_failed.xhr.json',
count=total_count,
filtered=filtered_count,
jobs=failed_jobs_list,
table_args=args
) | 5d755f6e84f7c406174fb1a7bdb8de64a6e0c049 | 3,200 |
def get_vm_types(resources):
"""
Get all vm_types for a list of heat resources, do note that
some of the values retrieved may be invalid
"""
vm_types = []
for v in resources.values():
vm_types.extend(list(get_vm_types_for_resource(v)))
return set(vm_types) | f13e8860d5b25cd03360e859527d61a06d103f53 | 3,201 |
def interface_names(obj):
"""
Return: a list of interface names to which `obj' is conformant.
The list begins with `obj' itself if it is an interface.
Names are returned in depth-first order, left to right.
"""
return [o.__name__ for o in interfaces(obj)] | 99aad05e14daeb13ed7f19599684acc8b324df84 | 3,203 |
import six
def add_basic(token):
"""For use with Authorization headers, add "Basic "."""
if token:
return (u"Basic " if isinstance(token, six.text_type) else b"Basic ") + token
else:
return token | cd579e77e243fdfba0853a87087e45cf58bcc6f2 | 3,204 |
import json
import requests
def updateUser(token, leaderboard=None, showUsername=None, username=None):
"""
Update user account information.
Parameters-
token: Authentication token.
leaderboard: True to show user's profit on leaderboard.
showUsername: True to show the username on LN Marktes public data.
username: username to display.
"""
headers = {
'content-type': "application/json",
'accept': "application/json",
'authorization': f"Bearer {token}",
}
payloadDict = dict()
if showUsername is not None:
payloadDict['show_username'] = showUsername
if leaderboard is not None:
payloadDict['show_leaderboard'] = leaderboard
if username is not None:
payloadDict['username'] = username
payload = json.dumps(payloadDict)
userInfo = requests.put(
APIUrls.lnapi+APIUrls.userUrl,
data=payload,
headers=headers,
)
if userInfo.status_code == 200:
return userInfo.json()
else:
raise RuntimeError(
'Unable to update user information:\n'
f'{userInfo.text}'
) | 9b953256b85327411445729b574cf9b79750e735 | 3,205 |
def init_validator(required, cls, *additional_validators):
"""
Create an attrs validator based on the cls provided and required setting.
:param bool required: whether the field is required in a given model.
:param cls: the expected class type of object value.
:return: attrs validator chained correctly (e.g. optional(instance_of))
"""
validator = validators.instance_of(cls)
if additional_validators:
additional_validators = list(additional_validators)
additional_validators.append(validator)
validator = composite(*additional_validators)
return validator if required else validators.optional(validator) | 924b5ff6e77d38c989eef187498f774a2322ed48 | 3,206 |
def ShiftRight(x, **unused_kwargs):
"""Layer to shift the tensor to the right by padding on axis 1."""
if not isinstance(x, (list, tuple)): # non-chunked inputs
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0) # Padding on axis=1
padded = np.pad(x, pad_widths, mode='constant')
return padded[:, :-1]
# Handling chunked inputs. Recall that the list of chunks represents a big
# sequence (the concatenation of the chunks). We want to shift that sequence,
# so we put a 0 in the beginning of the first chunk and the last element of
# that chunk is used as the new first element of the next chunk, and so on.
padded = []
last_value = np.zeros_like(x[0][:, -1])
for chunk in x:
padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1)
last_value = chunk[:, -1]
padded.append(padded_chunk[:, :-1])
return padded | ec5265b5937e3e90e2c3267b6501b008ac7090e5 | 3,208 |
def empty_record():
"""Create an empty record."""
record = dump_empty(Marc21RecordSchema)
record["metadata"] = "<record> <leader>00000nam a2200000zca4500</leader></record>"
record["is_published"] = False
record["files"] = {"enabled": True}
return record | 7797e1bf0ade98a2400daff1f7937b7af2da280d | 3,210 |
def illuminanceToPhotonPixelRate(illuminance,
objective_numerical_aperture=1.0,
illumination_wavelength=0.55e-6,
camera_pixel_size=6.5e-6,
objective_magnification=1,
system_magnification=1,
sample_quantum_yield=1.,
**kwargs):
"""
Function which converts source illuminance and microscope parameters to
photons / px / s.
Based heavily on the publication:
"When Does Computational Imaging Improve Performance?,"
O. Cossairt, M. Gupta and S.K. Nayar,
IEEE Transactions on Image Processing,
Vol. 22, No. 2, pp. 447–458, Aug. 2012.
However, this function implements the same result for
microscopy, replacing f/# with NA, removing reflectance,
and including magnification.
Args:
exposure_time: Integration time, s
source_illuminance: Photometric source illuminance, lux
numerical_aperture: System numerical aperture
pixel_size: Pixel size of detector, um
magnification: Magnification of imaging system
Returns:
Photon counts at the camera.
"""
# Conversion factor from radiometric to photometric cordinates
# https://www.thorlabs.de/catalogPages/506.pdf
K = 1 / 680
# Planck's constant
# h_bar = 6.626176e-34
h_bar = 1.054572e-34
# Speed of light
c = 2.9979e8
# Constant term
const = K * illumination_wavelength / h_bar / c
# Calculate photon_pixel_rate
photon_pixel_rate = sample_quantum_yield * const * (objective_numerical_aperture ** 2) * illuminance * (camera_pixel_size / (system_magnification * objective_magnification)) ** 2
# Return
return photon_pixel_rate | cbbb2f6bdce7592f997b7ab3784c15beb2b846b1 | 3,211 |
def stop_tuning(step):
""" stop tuning the current step method """
if hasattr(step, 'tune'):
step.tune = False
elif hasattr(step, 'methods'):
step.methods = [stop_tuning(s) for s in step.methods]
return step | 45e02b8d3ec86ceda97de69bbc730aa62affb06d | 3,212 |
import json
def assemble_english():
"""Assemble each statement into """
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res | 24c267e2763198056e275feda1e81ef1bf280bdb | 3,213 |
def schema_class(classname, schema, schemarepr=None, basename='SchemaBase'):
"""Generate code for a schema class
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
"""
return SCHEMA_CLASS_TEMPLATE.format(
classname=classname,
basename=basename,
schema=schema if schemarepr is None else schemarepr,
docstring=docstring(classname, schema, indent=4),
init_code=init_code(classname, schema, indent=4)
) | 2f497de54205e5c180805d77638c9ffe342f76c8 | 3,214 |
import requests
def orthology_events(ids='R-HSA-6799198,R-HSA-168256,R-HSA-168249', species='49633'):
"""
Reactome uses the set of manually curated human reactions to computationally infer reactions in
twenty evolutionarily divergent eukaryotic species for which high-quality whole-genome sequence
data are available, and hence a comprehensive and high-quality set of protein predictions exists.
Thus, this method retrieves the orthologies for any given set of events or entities in the specified species.
:param ids: The events identifiers for which the orthology is requested
:param species: The species id for which the orthology is requested
:return: Json dictionary object of the orthologies of a given set of events or entities
"""
headers = {
'accept': 'application/json',
'content-type': 'text/plain',
}
data = ids
url = 'https://reactome.org/ContentService/data/orthologies/ids/species/%s' % species
try:
response = requests.post(url=url, headers=headers, data=data)
except ConnectionError as e:
print(e)
if response.status_code == 200:
return response.json()
else:
print('Status code returned a value of %s' % response.status_code) | 8a75e2bc9af34358164492d1d2b2d8154d4e696e | 3,215 |
def judge(name):
"""
Return some sort of score for automatically ranking names based on all the
features we can extract so far.
I guess we'll just add the scores * weights up for now.
"""
score = 0
for scoreID, scorer, weight in weights:
subscore = scorer(name)
score += subscore * weight
name.scores[scoreID] = subscore
name.score = score
return score | 34811f49fc8fe6c88ef31978702cacddbacd5314 | 3,216 |
import re
def parse_year(inp, option='raise'):
"""
Attempt to parse a year out of a string.
Parameters
----------
inp : str
String from which year is to be parsed
option : str
Return option:
- "bool" will return True if year is found, else False.
- Return year int / raise a RuntimeError otherwise
Returns
-------
out : int | bool
Year int parsed from inp,
or boolean T/F (if found and option is bool).
Examples
--------
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str)
2018
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str, option='bool')
True
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str)
RuntimeError: Cannot parse year from NSRDB_TMY.h5
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str, option='bool')
False
"""
# char leading year cannot be 0-9
# char trailing year can be end of str or not 0-9
regex = r".*[^0-9]([1-2][0-9]{3})($|[^0-9])"
match = re.match(regex, inp)
if match:
out = int(match.group(1))
if 'bool' in option:
out = True
else:
if 'bool' in option:
out = False
else:
raise RuntimeError('Cannot parse year from {}'.format(inp))
return out | a91efb0614e7d0ad6753118f9b4efe8c3b40b4e2 | 3,217 |
def retry_import(e, **kwargs):
"""
When an exception occurs during channel/content import, if
* there is an Internet connection error or timeout error,
or HTTPError where the error code is one of the RETRY_STATUS_CODE,
return return True to retry the file transfer
* the file does not exist on the server or disk, skip the file and return False.
This only applies to content import not channel import.
* otherwise, raise the exception.
return value:
* True - needs retry.
* False - file is skipped. Does not need retry.
"""
skip_404 = kwargs.pop("skip_404")
if (
isinstance(e, ConnectionError)
or isinstance(e, Timeout)
or isinstance(e, ChunkedEncodingError)
or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)
or (isinstance(e, SSLERROR) and "decryption failed or bad record mac" in str(e))
):
return True
elif skip_404 and (
(isinstance(e, HTTPError) and e.response.status_code == 404)
or (isinstance(e, OSError) and e.errno == 2)
):
return False
else:
raise e | b125841dff7154352b2031013ffa7058242e4974 | 3,218 |
def cvCloneMat(*args):
"""cvCloneMat(CvMat mat) -> CvMat"""
return _cv.cvCloneMat(*args) | 84dc4f59d29580477b7ded41f26d95011b2804b3 | 3,219 |
from typing import Callable
import time
def run_episode(kwargs) -> [Trajectory]:
"""
Runs a single episode and collects the trajectories of each agent
"""
total_controller_time = 0
env_dict: Callable = kwargs.get("env_dict")
obs_builder = kwargs.get("obs_builder")
controller_creator: Callable = kwargs.get("controller_creator")
episode_id: int = kwargs.get("episode_id")
max_episode_length: int = kwargs.get("max_episode_length", 1000)
render: bool = kwargs.get("render", False)
# Create and Start Environment
_env = load_env(env_dict, obs_builder_object=obs_builder)
obs, info = _env.reset(regenerate_rail=False, regenerate_schedule=True, )
score = 0
_trajectories = [Trajectory() for _ in _env.get_agent_handles()]
# Create and Start Controller
controller: AbstractController = controller_creator()
start = time.time()
controller.start_of_round(obs=obs, env=_env)
total_controller_time += time.time() - start
if render:
env_renderer = RenderTool(_env)
env_renderer.reset()
for step in range(max_episode_length):
start = time.time()
action_dict, processed_obs = controller.act(observation=obs)
total_controller_time += time.time() - start
next_obs, all_rewards, done, info = _env.step(action_dict)
if render:
env_renderer.render_env(show=True, show_observations=True, show_predictions=False)
# Save actions and rewards for each agent
[_trajectories[agent_handle].add_row(
state=processed_obs[agent_handle],
action=action_dict[agent_handle],
reward=all_rewards[agent_handle],
done=done[agent_handle])
for agent_handle in _env.get_agent_handles()]
score += sum(all_rewards)
obs = next_obs.copy()
if done['__all__']:
break
if render:
env_renderer.close_window()
# print(f"\nController took a total time of: {total_controller_time} seconds", flush=True)
return _trajectories | 91b64a8df57e1fc47ffecc184b0473d633b545c4 | 3,220 |
def get_data(request: Request):
"""
Get the data page.
Parameters
----------
request : Request
The request object.
Returns
-------
HTMLResponse
The data page.
"""
return templates.TemplateResponse("data.html", {"request": request}) | 4a44df5122f9db9009a769d4d9bec99d924fb0f7 | 3,222 |
def remove_last_measurements(dag_circuit, perform_remove=True):
"""Removes all measurements that occur as the last operation
on a given qubit for a DAG circuit. Measurements that are followed by
additional gates are untouched.
This operation is done in-place on the input DAG circuit if perform_pop=True.
Parameters:
dag_circuit (qiskit.dagcircuit._dagcircuit.DAGCircuit): DAG circuit.
perform_remove (bool): Whether to perform removal, or just return node list.
Returns:
list: List of all measurements that were removed.
"""
removed_meas = []
try:
meas_nodes = dag_circuit.get_named_nodes('measure')
except DAGCircuitError:
return removed_meas
for idx in meas_nodes:
_, succ_map = dag_circuit._make_pred_succ_maps(idx)
if len(succ_map) == 2:
# All succesors of the measurement are outputs, one for qubit and one for cbit
# (As opposed to more gates being applied), and it is safe to remove the
# measurement node and add it back after the swap mapper is done.
removed_meas.append(dag_circuit.multi_graph.node[idx])
if perform_remove:
dag_circuit._remove_op_node(idx)
return removed_meas | 858a16c33de67f835cf32535f2e69f9c144d6e25 | 3,223 |
def J(*args, **kwargs):
"""Wrapper around jsonify that sets the Content-Type of the response to
application/vnd.api+json.
"""
response = jsonify(*args, **kwargs)
response.mimetype = "application/vnd.api+json"
return response | 714537b180cab60b7ad614018fa551020aeee292 | 3,225 |
def is_gzipped(filename):
""" Returns True if the target filename looks like a GZIP'd file.
"""
with open(filename, 'rb') as fh:
return fh.read(2) == b'\x1f\x8b' | b1afb5b9cddc91fbc304392171f04f4b018fa929 | 3,227 |
def tag_helper(tag, items, locked=True, remove=False):
""" Simple tag helper for editing a object. """
if not isinstance(items, list):
items = [items]
data = {}
if not remove:
for i, item in enumerate(items):
tagname = '%s[%s].tag.tag' % (tag, i)
data[tagname] = item
if remove:
tagname = '%s[].tag.tag-' % tag
data[tagname] = ','.join(items)
data['%s.locked' % tag] = 1 if locked else 0
return data | 27500df099824fff1d93afbe7649d42141ffa9c1 | 3,229 |
def get_keys_from_file(csv):
"""Extract the credentials from a csv file."""
lines = tuple(open(csv, 'r'))
creds = lines[1]
access = creds.split(',')[2]
secret = creds.split(',')[3]
return access, secret | eccf56c52dd82656bf85fef618133f86fd9276e6 | 3,230 |
import pkg_resources
import json
def fix_model(project, models, invert=False):
"""Fix model name where file attribute is different from values accepted by facets
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'])
['CESM1(BGC)', 'CESM1(BGC)']
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'], invert=True)
['CESM1-BGC', 'CESM1-BGC']
Args:
project (str): data project
models (list) models to convert
invert (bool): Invert the conversion (so go from ``CESM1(BGC)`` to ``CESM1-BGC``)
"""
project = project.upper().split('-')[0]
if project in ['CMIP5', 'CORDEX']:
mfile = pkg_resources.resource_filename(__name__, 'data/'+project+'_model_fix.json')
with open(mfile, 'r') as f:
mdict = json.loads(f.read())
if invert:
mfix = {v: k for k, v in mdict.items()}
else:
mfix = mdict
return [mfix[m] if m in mfix.keys() else m for m in models] | b1e91ba7305a75ed376948d92607b1ab97bc93f2 | 3,231 |
from typing import Union
from typing import List
from typing import Tuple
def _get_choices(choices: Union[str, List]) -> List[Tuple[str, str]]:
"""Returns list of choices, used for the ChoiceFields"""
result = [('', '')]
if isinstance(choices, str):
result.append((choices, choices))
else:
for choice in choices:
result.append((choice, choice))
return result | 249a068571b0ddca858cd8dcc4f2f7af25689b9d | 3,235 |
def invalid_file():
"""Create an invalid filename string."""
return "/tmp/INVALID.FILE" | 9a249f3ef9445cb78bc962e46ef524360bb44bdb | 3,236 |
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registery.
All other methods to acces models might raise an exception about
registery not being ready yet.
This doesn't require that an app with the given app label exists,
which makes it safe to call when the registery is being populated.
Raises LookupError if model isn't found
"""
try:
return apps.get_model(app_label, model_name)
except AppRegistryNotReady:
if apps.apps_ready and not apps.models_ready:
# if this function is called while `apps.populate()` is
# loading models, ensure that the module thar defines
# the target model has been imorted and try looking the
# model up in the app registery. This effectiveness emulates
# `from path.to.app.models import Model` where we use
# `Model = get_model('app', 'Model')` instead
app_config = apps.get_app_config(app_label)
# `app_config.import_models()` cannot be used here because
# it would interfere with `app.populate()`
import_module("%s.%s" % (app_config.name, MODELS_MODULE_NAME))
# In order to account for case-insensitivity of model_name,
# look up the model through a private API of the app registry.
return apps.get_registered_model(app_label, model_name)
else:
# This must be a different case (e.g. the model really doesn't
# exist). We just re-raise the exception.
raise | dd3ba70f2220ba09d256ae58b418cd3401f129e6 | 3,237 |
def affaires_view(request):
"""
Return all affaires
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
query = request.dbsession.query(VAffaire).order_by(VAffaire.id.desc()).all()
return Utils.serialize_many(query) | c44e703680034230121c426e7496746355b8ee4b | 3,238 |
from typing import Union
def metric_try_to_float(s: str) -> Union[float, str]:
"""
Try to convert input string to float value.
Return float value on success or input value on failure.
"""
v = s
try:
if "%" in v:
v = v[:-1]
return float(v)
except ValueError:
return str(s) | 6b0121469d35bc6af04d4808721c3ee06955d02e | 3,239 |
def _table_difference(left: TableExpr, right: TableExpr):
"""
Form the table set difference of two table expressions having identical
schemas. A set difference returns only the rows present in the left table
that are not present in the right table
Parameters
----------
left : TableExpr
right : TableExpr
Returns
-------
difference : TableExpr
"""
return ops.Difference(left, right).to_expr() | aae66ddb29d30a0bc95750d62ce87c16773d3d63 | 3,241 |
def parse_args():
"""
It parses the command-line arguments.
Parameters
----------
args : list[str]
List of command-line arguments to parse
Returns
-------
parsed_args : argparse.Namespace
It contains the command-line arguments that are supplied by the user
"""
parser = ap.ArgumentParser(description="Encoding algorithm.")
parser.add_argument("docking_program", type=str,
help="Path to folder containing the PDB files.")
parser.add_argument("output", type=str,
help="Path to the output file.")
parser.add_argument("-c","--n_proc", type=int,
help='Number of processor.', default = 1)
parser.add_argument("--chain", type=str,
help='Chain ID from the ligand protein.', default = 'B')
parser.add_argument("--score", type=str,
help='Path to normalized scoring file to add in the ' +
'encoding.')
parsed_args = parser.parse_args()
return parsed_args | 7849b9a1422e959be9e5b2504dc7d42c2475572d | 3,243 |
def parse_row(row, entity_dict, span_capture_list, previous_entity):
""" updates the entity dict and span capture list based on row contents """
bio_tag, entity = parse_tag(row.tag)
if bio_tag == 'B':
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
# start collecting new entity
span_capture_list = [row.word]
previous_entity = entity
elif bio_tag == 'I':
# continue collecting entity
span_capture_list.append(row.word)
else:
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
previous_entity = None
return entity_dict, span_capture_list, previous_entity | a7d49b6e4dbe747c65688c01652f1d413314b407 | 3,244 |
def _is_fn_init(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
):
""" "fn" <fn-name> "("<arg>*")" (":" <returned-type>)? <code-body>"""
tokens = extract_tokens_with_code_body(tokens, i)
if tokens is None or not is_kw(tokens[0], 'fn'):
return False
has_type_annotation = len(tokens) >= 4 and is_op(tokens[3], '->')
if len(tokens) < 4 or not is_base_name(tokens[1]) or tokens[2].type != TokenTypes.PARENTHESIS \
or not _is_code_body(tokens[-1]) or (
has_type_annotation and not _is_type_expression(tokens[:-1], errors_handler, path, namehandler, 4)
) or (not has_type_annotation and len(tokens) != 4):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
tokens[-1],
fill=True
)
return False
args_tokens = tokens[2].value
if args_tokens:
if args_tokens[0].type == TokenTypes.TUPLE:
has_default_argument = False
for arg_tokens in args_tokens[0].value:
if not arg_tokens:
break
if not _is_setvalue_expression(arg_tokens, errors_handler, path, namehandler, init_type='let'):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
arg_tokens[0],
fill=True
)
return False
if DummyToken(TokenTypes.OP, '=') in arg_tokens:
has_default_argument = True
elif has_default_argument:
errors_handler.final_push_segment(
path,
'SyntaxError: non-default argument follows default argument',
arg_tokens[0],
fill=True
)
return False
elif not _is_setvalue_expression(args_tokens, errors_handler, path, namehandler, init_type='let'):
return False
return True | 2e65dbe9e7976f7e13215fbf04bd40f08da7e16e | 3,245 |
import asyncio
from typing import cast
async def http_connect(address: str, port: int) -> HttpConnection:
"""Open connection to a remote host."""
loop = asyncio.get_event_loop()
_, connection = await loop.create_connection(HttpConnection, address, port)
return cast(HttpConnection, connection) | 2d98815b17f6d0e03763b643a052737f6931a33f | 3,246 |
def make_parallel_transformer_config() -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
) | f61c02d66075fb71fbecd58e8c369a6ba406c15f | 3,247 |
def get_device_mapping(embedding_sizes, num_gpus, data_parallel_bottom_mlp,
experimental_columnwise_split, num_numerical_features):
"""Get device mappings for hybrid parallelism
Bottom MLP running on device 0. Embeddings will be distributed across among all the devices.
Optimal solution for partitioning set of N embedding tables into K devices to minimize maximal subset sum
is an NP-hard problem. Additionally, embedding tables distribution should be nearly uniform due to the performance
constraints. Therefore, suboptimal greedy approach with max bucket size is used.
Args:
embedding_sizes (Sequence[int]): embedding tables sizes
num_gpus (int): Default 8.
Returns:
device_mapping (dict):
"""
if num_numerical_features == 0:
bottom_mlp_ranks = []
elif data_parallel_bottom_mlp:
bottom_mlp_ranks = list(range(num_gpus))
else:
bottom_mlp_ranks = [0]
if experimental_columnwise_split:
gpu_buckets = num_gpus * [list(range(len(embedding_sizes)))]
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if num_numerical_features > 0:
vectors_per_gpu[0] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu)
if num_gpus > 4 and not data_parallel_bottom_mlp and num_numerical_features > 0:
# for higher no. of GPUs, make sure the one with bottom mlp has no embeddings
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus - 1) # leave one device out for the bottom MLP
gpu_buckets.insert(0, [])
else:
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus)
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if not data_parallel_bottom_mlp:
for rank in bottom_mlp_ranks:
vectors_per_gpu[rank] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu) | 2265831d87d8f48c4b87ca020c7f56293cb62647 | 3,248 |
def _generate_relative_positions_embeddings(length, depth,
max_relative_position, name):
"""Generates tensor of size [length, length, depth]."""
with tf.variable_scope(name):
relative_positions_matrix = _generate_relative_positions_matrix(
length, max_relative_position)
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth.
embeddings_table = tf.get_variable("embeddings", [vocab_size, depth])
embeddings = tf.gather(embeddings_table, relative_positions_matrix)
return embeddings | 7c69705cf5cc161144181b09a377f66d863b12ae | 3,249 |
def continTapDetector(
fs: int, x=[], y=[], z=[], side='right',
):
"""
Detect the moments of finger-raising and -lowering
during a fingertapping task.
Function detects the axis with most variation and then
first detects several large/small pos/neg peaks, then
the function determines sample-wise in which part of a
movement or tap the acc-timeseries is, and defines the
exact moments of finger-raising, finger-lowering, and
the in between stopping moments.
Input:
- x, y, z (arr): all three one-dimensional data-
arrays containing one acc-axis each. Exact
labeling x/y/z is not important. Should have equal
lengths. Typically timeseries from one run.
- fs (int): corresponding sample frequency
- side (string): side where acc-data origin from
Return:
- tapTimes (list of lists): each list contains 4 timestamps
(in seconds from array-start) indicating moments of:
[finger-raise start, finger raise end,
finger-lowering start, finger-lowering end]
- moveTimes, restTimes: idem but then for 'other
movements' and rest periods (of > 1 sec), each list
contains the first and last timestamp of move/rest
period.
"""
# input sanity checks
if x != [] and y != []:
assert len(x) == len(y), f'Arrays X and Y should'
' have equal lengths'
if x != [] and z != []:
assert len(x) == len(z), f'Arrays X and Z should'
' have equal lengths'
if z != [] and y != []:
assert len(y) == len(z), f'Arrays X and Z should'
' have equal lengths'
assert side in ['left', 'right'], f'Side should be '
'left or right'
ax_arrs = []
for ax in [x, y, z]:
if ax != []: ax_arrs.append(ax)
# Find axis with most variation
maxVar = np.argmax([variation(arr) for arr in ax_arrs])
# maxRMS = np.argmax([sum(arr) for arr in ax_arrays])
sig = ax_arrs[maxVar] # acc-signal to use
# check data for pos/neg and order of magn
sig = check_PosNeg_and_Order(sig, fs)
# add differential of signal
sigdf = np.diff(sig)
# timestamps from start (in sec)
timeStamps = np.arange(0, len(sig), 1 / fs)
# Thresholds for movement detection
posThr = np.mean(sig)
negThr = -np.mean(sig)
# Find peaks to help movement detection
peaksettings = {
'peak_dist': 0.1,
'cutoff_time': .25,
}
# find relevant positive peaks
posPeaks = find_peaks(
sig,
height=(posThr, np.max(sig)),
distance=fs * .05, # settings[task]['peak_dist']
)[0]
# select Pos-peaks with surrounding >> Pos and Neg Diff
endPeaks = [np.logical_or(
any(sigdf[i -3:i + 3] < np.percentile(sig, 10)),
any(sigdf[i -3:i + 3] > np.percentile(sig, 90))
) for i in posPeaks]
endPeaks = posPeaks[endPeaks]
# delete endPeaks from posPeaks
for i in endPeaks:
idel = np.where(posPeaks == i)
posPeaks = np.delete(posPeaks, idel)
# delete endPeaks which are too close after each other
# by starting with std False before np.diff, the diff-
# scores represent the distance to the previous peak
tooclose = endPeaks[np.append(
np.array(False), np.diff(endPeaks) < (fs / 6))]
for p in tooclose:
i = np.where(endPeaks == p)
endPeaks = np.delete(endPeaks, i)
posPeaks = np.append(posPeaks, p)
# double check endPeaks with np.diff
hop = 3
endP2 = []
for n in np.arange(hop, sig.shape[0]):
if np.logical_and(
any(np.diff(sig)[n - hop:n] > np.percentile(sig, 90)),
any(np.diff(sig)[n- hop:n] < np.percentile(sig, 10))
): # if diff is above extremes within hop-distance
endP2.append(n)
endP2 = list(compress(endP2, np.diff(endP2) > hop))
for p2 in endP2: # add to endPeaks if not containing
if min(abs(p2 - endPeaks)) > 5:
endPeaks = np.append(endPeaks, p2)
smallNeg = find_peaks(
-1 * sig, # convert pos/neg for negative peaks
height=(-.5e-7, abs(np.min(sig)) * .5),
distance=fs * peaksettings['peak_dist'] * .5,
prominence=abs(np.min(sig)) * .05,
# wlen=40,
)[0]
# largeNeg = find_peaks(
# -1 * sig,
# height=abs(np.min(sig)) * .4,
# # first value is min, second is max
# distance=fs * peaksettings['peak_dist'],
# # prominence=np.min(yEpoch) * .1,
# # wlen=40,
# )[0]
# Lists to store collected indices and timestamps
tapi = [] # list to store indices of tap
movei = [] # list to store indices of other move
resti = [] # list to store indices of rest
resttemp = [] # temp-list to collect rest-indices [1st, Last]
starttemp = [np.nan] * 6 # for during detection process
# [startUP, fastestUp, stopUP,
# startDown, fastestDown, stopDown]
tempi = starttemp.copy() # to start process
state = 'lowRest'
# Sample-wise movement detection
for n, y in enumerate(sig[:-1]):
if state == 'otherMov':
# PM LEAVE OUT OTHER-MOV-STATE
if n in endPeaks: # during other Move: end Tap
tempi[-1] = n # finish and store index list
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi) # save if long enough
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
try:
next10 = sum([negThr < Y < posThr for Y in sig[range(n, n + int(fs * .2)
)]])
if next10 > (fs * .2) * .8:
# End 'other move' if 8 / 10 next samples are inactive
tempi[-1] = n # END of OTHER MOVE
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi)
tempi = starttemp.copy() # after end: start lowRest
state = 'lowRest'
except IndexError: # prevent indexerror out of range for next10
# print('end of timeseries')
continue
elif state == 'lowRest':
if np.logical_and(
y > posThr, # if value is over pos-threshold
sigdf[n] > np.percentile(sigdf, 75) # AND diff is over Thr
# any([Y in posPeaks for Y in range(n, n + int(fs * .2))]) # USED IN PAUSED
):
if resttemp: # close and store active rest period
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='upAcc1'
tempi[0] = n # START TIME Tap-UP
# print('save start UP', n)
# elif np.logical_or(
# np.logical_or(n in posPeaks, n in smallNeg[0]),
# ~ (negThr < y < posThr)
# ):
# if resttemp: # close and store active rest period
# resttemp.append(n) # Add second and last rest-ind
# if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
# resti.append(resttemp) # add finished rest-indices
# resttemp = [] # reset resttemp list
# state = 'otherMov'
# tempi.append(n) # START TIME Othermovement
elif n in endPeaks: # during lowRest, endPeak found
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
else: # lowRest stays lowRest
if not resttemp: # if rest-temp list is empty
resttemp.append(n) # start of rest period
elif state == 'upAcc1':
if n in posPeaks:
state='upAcc2'
# acc getting less, veloc still increasing
# print('acc-peakUP detected', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upAcc2':
if y < 0: # crossing zero-line, start of decelleration
tempi[1] = n # save n as FASTEST MOMENT UP
state='upDec1'
# print('fastest point UP', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state=='upDec1':
if n in smallNeg:
state='upDec2'
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upDec2':
if np.logical_or(y > 0, sigdf[n] < 0):
# if acc is pos, or goes into acceleration
# phase of down movement
state='highRest' # end of UP-decell
tempi[2]= n # END OF UP !!!
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'highRest':
if np.logical_and(
y < negThr,
sigdf[n] < 0 #np.percentile(sigdf, 25)
# from highRest: LOWERING starts when acc
# gets below negative-threshold AND when
# differential is negative
):
state='downAcc1'
tempi[3] = n # START OF LOWERING
# print('LOWERING START', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif state == 'downAcc1':
# if n in largeNeg[0]:
# state='downAcc2'
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
# elif state == 'downAcc2':
elif state == 'downAcc1':
if np.logical_and(
y > 0,
sigdf[n] > 0
):
# if acceleration gets positive again and keeps
# one increasing (sigdf) downwards acceleration
# is finished -> ADD FASTEST DOWNW MOMENT
state='downDec1'
tempi[4] = n
# print('fastest DOWN @', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
elif state == 'downDec1':
if n in endPeaks:
state = 'downDec2'
elif state=='downDec2':
if np.logical_or(
y < 0,
sigdf[n] < 0
): # after large pos-peak, before around impact
# artefectual peaks
state='lowRest'
tempi[5] = n
# store current indices
tapi.append(tempi)
tempi = starttemp.copy() # restart w/ 6*nan
# drop first tap due to starting time
tapi = tapi[1:]
# convert detected indices-lists into timestamps
tapTimes = [] # list to store timeStamps of tap
# moveTimes = [] # alternative list for movements
# restTimes = [] # list to sore rest-timestamps
for tap in tapi: tapTimes.append(
[timeStamps[I] for I in tap if I is not np.nan]
)
# for tap in movei: moveTimes.append(timeStamps[tap])
# for tap in resti: restTimes.append(timeStamps[tap])
return tapi, tapTimes, endPeaks | 742a7f5590e8ad76e521efe5d1c293c43d71de0b | 3,250 |
def parallelize(df, func):
""" Split data into max core partitions and execute func in parallel.
https://www.machinelearningplus.com/python/parallel-processing-python/
Parameters
----------
df : pandas Dataframe
func : any functions
Returns
-------
data : pandas Dataframe
Returned dataframe of func.
"""
cores = cpu_count()
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split), ignore_index=1)
pool.close()
pool.join()
return data | dc9c085ada6ffa26675bd9c4a218cc06807f9511 | 3,251 |
Subsets and Splits