content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import math
def get_weak_model(op, diff_type, nonzero2nonzero_weight, zero2zero_weight=0,
zero2nonzero_weight=math.inf, nonzero2zero_weight=math.inf, precision=0):
"""Return the weak model of the given bit-vector operation ``op``.
Given the `Operation` ``op``, return the
`WeakModel` of ``op`` for the `Difference` type ``diff_type``
with given class attributes ``nonzero2nonzero_weight``,
``zero2zero_weight``,
``zero2nonzero_weight``, ``nonzero2zero_weight`` and
``precision`` (see `WeakModel`).
The returned model is a subclass of `WeakModel` and `OpModel`.
.. note::
To link the returned model ``MyModel`` to ``op``
such that ``MyModel`` is used in ``propagate``,
set the ``xor_model`` or ``rx_model`` attribute of ``op``
to ``MyModel`` (e.g., ``op.xor_model = MyModel``).
See also `differential.difference.XorDiff.propagate`
or `differential.difference.RXDiff.propagate`.
::
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.secondaryop import LutOperation
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.opmodel import get_weak_model
>>> class MyLut(LutOperation): pass # a 2-bit function
>>> XorWeakModelMyLut = get_weak_model(MyLut, XorDiff, decimal.Decimal(1.5), precision=1)
>>> alpha, beta = XorDiff(Variable("a", 2)), XorDiff(Variable("b", 2))
>>> f = XorWeakModelMyLut(alpha)
>>> print(f.vrepr())
XorWeakModelMyLut(XorDiff(Variable('a', width=2)))
>>> f.validity_constraint(beta)
(((a == 0b00) & (b == 0b00)) == 0b1) | ((~(a == 0b00) & ~(b == 0b00)) == 0b1)
>>> f.bv_weight(beta)
Ite(((a == 0b00) & (b == 0b00)) == 0b1, 0b00, 0b11)
>>> f.max_weight(), f.weight_width(), f.error(), f.num_frac_bits()
(3, 2, 0, 1)
"""
assert issubclass(op, operation.Operation)
if diff_type == difference.XorDiff:
prefix = "Xor"
assert zero2zero_weight == 0
# for XOR differentials with Pr. 1, an input property propagates to a unique output property
assert zero2nonzero_weight == math.inf
elif diff_type == difference.RXDiff:
prefix = "RX"
else:
raise ValueError(f"invalid diff_type {diff_type}")
_op, _diff_type = op, diff_type
_zero2zero_weight = zero2zero_weight
_nonzero2nonzero_weight = nonzero2nonzero_weight
_zero2nonzero_weight, _nonzero2zero_weight = zero2nonzero_weight, nonzero2zero_weight
_precision = precision
class MyWeakModel(abstractproperty.opmodel.WeakModel, OpModel):
op, diff_type = _op, _diff_type
zero2zero_weight = _zero2zero_weight
nonzero2nonzero_weight = _nonzero2nonzero_weight
zero2nonzero_weight = _zero2nonzero_weight
nonzero2zero_weight = _nonzero2zero_weight
precision = _precision
# def error(self): # maximum weight of a differential with n-bit input is n
# return sum(p.val.width for p in self.input_prop)
MyWeakModel.__name__ = f"{prefix}{abstractproperty.opmodel.WeakModel.__name__}{op.__name__}"
return MyWeakModel | be34db3112ff7788bb96e6d6cc467d4d98d8af51 | 2,661 |
def get_temp():
"""
読み込んだ温度を返す
"""
return sensor.t | a4c7ed616af202599581cd47be87cb10ea571947 | 2,662 |
def load_clean_yield_data(yield_data_filepath):
"""
Cleans the yield data by making sure any Nan values in the columns we care about
are removed
"""
important_columns = ["Year", "State ANSI", "County ANSI", "Value"]
yield_data = pd.read_csv(yield_data_filepath).dropna(
subset=important_columns, how="any"
)
return yield_data | 14c5facc947d1ff8bcc7714447e9da3b7842bcee | 2,663 |
def create_element_mapping(repnames_bedfile):
"""Create a mapping of the element names to their classes and families"""
elem_key = defaultdict(lambda : defaultdict(str))
with open(repnames_bedfile, "r") as bed:
for line in bed:
l = line.strip().split("\t")
name = l[3]
class_ = l[4]
family = l[5]
elem_key[name]["class"] = class_
elem_key[name]["family"] = family
return elem_key | d3bc0491625d318b8f049c71a10571c21caf03d8 | 2,664 |
def _get_CRABI_iterators(captcha_dataframe,
train_indices,
validation_indices,
batch_size,
image_height,
image_width,
character_length,
categories):
"""
(HELPER FUNCTION)
Args:
captcha_dataframe (pandas.DataFrame): the dataset for training
train_indices (numpy.ndarray): indices of the CAPTCHA dataset used for training data
validation_indices (numpy.ndarray): indices of the CAPTCHA dataset used for validation data
batch_size (int): number of samples to process before the model is updated
image_height (int): height (in pixels) of expected input CAPTCHA image
image_width (int): width (in pixels) of expected input CAPTCHA image
character_length (int): number of characters in expected input CAPTCHA image
categories (int): number of possible characters in expected input
CAPTCHA image, specifying category count in the output layer
('10' for digits 0-9, '26' for alphabet, '36' for alphanumeric)
Returns:
pair of generator objects -> (training_set_iterator, validation_set_iterator)
"""
training_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
train_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
validation_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
validation_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
return training_set_iterator, validation_set_iterator | 7e01586f359860b5d1e461e9612b164e6cf9365f | 2,665 |
import uuid
def run(request, context):
"""Creates a template.
Args:
request (orchestrate_pb2.CreateTemplateRequest): Request payload.
context: Context.
Returns:
A orchestrate_pb2.CreateTemplate with the status of the request.
"""
template = request.template
print('Orchestrate.CreateTemplate name={name} project={project}'.format(
name=template.name,
project=template.project,
))
request_id = uuid.uuid4().hex
try:
# Make sure data is valid before creating individual sizes - don't want to
# clean-up half-way or leave incomplete template families.
for size in template.sizes:
validate_metadata(template, size)
# Data checks out. let's create all template sizes.
for size in template.sizes:
create_template_size(template, size)
return orchestrate_pb2.CreateTemplateResponse(
status='CREATED',
request_id=str(request_id),
)
except errors.HttpError as exception:
if exception.resp.status == 409:
message = 'A template with name {name} already exists.'.format(
name=template.name)
raise OrchestrateTemplateCreationError(message)
else:
raise | 484de4399b23bbc71e35ad70b054c1a62c41952e | 2,666 |
def fit_2dgaussian(data, error=None, mask=None):
"""
Fit a 2D Gaussian to a 2D image.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : `~astropy.modeling.functional_models.Gaussian2D` instance
The best-fitting Gaussian 2D model.
"""
if error is not None:
weights = 1.0 / error
else:
weights = None
if mask is not None:
if weights is None:
weights = np.ones_like(data)
# down-weight masked pixels
weights[mask] = 1.e-20
props = data_properties(data, mask=mask)
init_amplitude = np.ptp(data)
g_init = models.Gaussian2D(
init_amplitude, props.xcentroid.value, props.ycentroid.value,
props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value,
props.orientation.value)
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return gfit | 6ac3c7b7cba17baba719bd1d1fc87030f9c45dca | 2,667 |
def to_roman(number):
"""
Converts an arabic number within range from 1 to 4999 to the
corresponding roman number. Returns None on error conditions.
"""
try:
return roman.toRoman(number)
except (roman.NotIntegerError, roman.OutOfRangeError):
return None | 48fbe99caa527e711f8d0285577d96941a34b9c9 | 2,669 |
def GDAL_like(filename, fileout=""):
"""
GDAL_like
"""
BSx, BSy, Mb, Nb, M, N = 0,0, 0,0, 0,0
dataset1 = gdal.Open(filename, gdal.GA_ReadOnly)
dataset2 = None
if dataset1:
band1 = dataset1.GetRasterBand(1)
M, N = int(dataset1.RasterYSize), int(dataset1.RasterXSize)
B = dataset1.RasterCount
BSx, BSy = band1.GetBlockSize()
Nb = int(N / BSx) + (0 if N % BSx == 0 else 1)
Mb = int(M / BSy) + (0 if M % BSy == 0 else 1)
CO = ["BIGTIFF=YES"]
options = dataset1.GetMetadata("IMAGE_STRUCTURE")
if BSy > 1:
CO += ["TILED=YES", "BLOCKXSIZE=%d" % BSx, "BLOCKYSIZE=%d" % BSy]
for key in options:
if key == "COMPRESSION":
CO.append("COMPRESS=" + options[key])
else:
CO.append(key + "=" + options[key])
driver = gdal.GetDriverByName("GTiff")
fileout = fileout if fileout else forceext(filename, "copy.tif")
dataset2 = driver.Create(fileout, N, M, B, band1.DataType, CO)
dataset2.SetProjection(dataset1.GetProjection())
dataset2.SetGeoTransform(dataset1.GetGeoTransform())
for j in range(1, B + 1):
band1 = dataset1.GetRasterBand(j)
band2 = dataset2.GetRasterBand(j)
if band1.GetNoDataValue() != None:
band2.SetNoDataValue(band1.GetNoDataValue())
else:
band2.SetNoDataValue(np.nan)
dataset1 = None
return (dataset2, BSx, BSy, Mb, Nb, M, N) | 34d4ea83a7c7e1726aa1d5a4d89e16bbed50cdd1 | 2,670 |
def take_attendance(methodcnt):
"""global setup_bool
if (setup_bool == False or methodcnt == False):
print ("in if statement")
setup_bool = True
else:"""
print ("checking in - F.R.")
react_with_sound(attendance_final)
client.CheckIn()
return 2 | 0ecdf80e59de5d968f7adc042d6be369367f4195 | 2,671 |
def feature_selection(data, features):
"""
Choose which features to use for training.
:param data: preprocessed dataset
:param features: list of features to use
:return: data with selected features
"""
return data[features] | 6303e52a9c64acfbb5dcfd115b07b3bef2942821 | 2,672 |
def parse_docstring(docstring, line=0, filename='<string>', logger=None,
format_name=None, options=None):
# type: (str, int, Any, Optional[logging.Logger], Optional[str], Any) -> Tuple[OrderedDict[str, Arg], Optional[Arg]]
"""
Parse the passed docstring.
The OrderedDict holding parsed parameters may be sparse.
Parameters
----------
docstring : str
line : int
start line of the docstring
logger : Optional[logging.Logger]
format_name : Optional[str]
Returns
-------
params : OrderedDict[str, Arg]
results : Optional[Arg]
"""
if format_name is None or format_name == 'auto':
format_cls = guess_format(docstring)
if format_cls is None:
format_cls = RestFormat
else:
format_cls = format_map[format_name]
format = format_cls(line, filename=filename, logger=logger,
options=options)
return format.parse(docstring) | 47cd0318f24ec1a26233ad6e98a398a4c9e95db6 | 2,673 |
def srCyrillicToLatin(cyrillic_text):
"""
Return a conversion of the given string from cyrillic to latin, using
'digraph' letters (this means that e.g. "nj" is encoded as one character). Unknown
letters remain unchanged.
CAVEAT: this will ONLY change letters from the cyrillic subset of Unicode.
For instance, the plain ASCII letter "C" (code point 0x0043) will NOT be converted
to "S", as opposed to the cyrillic letter "C" (code point 0x0421), which WILL be converted.
If you are sure that your cyrillic string does not contain latin portions (e.g. quoted text,
company names), you can "normalize" it to cyrillic by using srNormalizeToCyrillic first.
"""
return __translate_string(cyrillic_text, __cyrillic_to_latin) | cd4850b6c0bcf9b27aa1340dc98956c026e8f557 | 2,674 |
def from_phone(func=None):
"""来自手机的消息(给自己发的) FriendMsg"""
if func is None:
return from_phone
async def inner(ctx):
assert isinstance(ctx, FriendMsg)
if ctx.MsgType == MsgTypes.PhoneMsg:
return await func(ctx)
return None
return inner | 8e47e82e014d3d727a615e310997cd2c634ae821 | 2,675 |
import pathlib
def create_scan_message():
"""Creates a dummy message of type v3.asset.file to be used by the agent for testing purposes.
The files used is the EICAR Anti-Virus Test File.
"""
file_content = (pathlib.Path(__file__).parents[0] / 'files/malicious_dummy.com').read_bytes()
selector = 'v3.asset.file'
msg_data = {'content': file_content, 'path': 'some/dummy/path'}
return message.Message.from_data(selector, data=msg_data) | e899e705fc022046876dd2a1584e7db74c4b7105 | 2,677 |
def is_permutation_matrix( m ):
"""
Test whether a numpy array is a `permutation matrix`_.
.. _permutation_matrix: https://en.wikipedia.org/wiki/Permutation_matrix
Args:
m (mp.matrix): The matrix.
Returns:
(bool): True | False.
"""
m = np.asanyarray(m)
return (m.ndim == 2 and m.shape[0] == m.shape[1] and
(m.sum(axis=0) == 1).all() and
(m.sum(axis=1) == 1).all() and
((m == 1) | (m == 0)).all()) | 7cfe48fd0cd36c4ff151ebe248c79e685ee99cc8 | 2,678 |
def create_security_role(connection, body, error_msg=None):
"""Create a new security role.
Args:
connection: MicroStrategy REST API connection object
body: JSON-formatted definition of the dataset. Generated by
`utils.formjson()`.
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
return connection.session.post(
url=f'{connection.base_url}/api/securityRoles',
headers={'X-MSTR-ProjectID': None},
json=body,
) | fbae3596e0cdcc430b2a7a30fc9ed594f3717ba3 | 2,679 |
def dbm_to_w(dbm):
"""Convert dBm to W."""
return 10 ** (dbm / 10.) * sc.milli | b6b782f35a3a07a2f372958363609b3b0f00a43a | 2,680 |
from operator import inv
def lml(alpha, beta, Phi, Y):
"""
4 marks
:param alpha: float
:param beta: float
:param Phi: array of shape (N, M)
:param Y: array of shape (N, 1)
:return: the log marginal likelihood, a scalar
"""
N = len(Phi)
M = len(Phi[0])
part1 = (-N*0.5)*np.log(2*np.pi)
wholePhi = np.dot(np.dot(Phi, alpha*np.identity(M)), Phi.T)
wholeBeta = beta*np.identity(N)
part2 = - 0.5*np.log(np.linalg.det(wholePhi + wholeBeta))
part3 = -0.5*np.dot(np.dot(Y.T, inv((wholePhi + wholeBeta))), Y)
logFunc = part1 + part2 + part3
return logFunc[0][0] | a6d17ed0f6c81958360687d5758cd8a35147dd56 | 2,681 |
def balance_set(X, Y, adr_labels_size, nonadr_labels_size):
"""balances the set by doing up- and down -sampling to converge into the same class size
# Arguments
X - set samples
Y - set labels
adr_labels_size - ADR_MENTION_CLASS size
nonadr_labels_size - NON_ADR_MENTION_CLASS size
# Returns
new_X - new balanced samples
new_Y - new labels corresponding to new_X
"""
print("Performing Class Balancing...")
adr_samples_needed = nonadr_labels_size - adr_labels_size
new_X = []
new_Y = []
adr_labels_size = 0
nonadr_labels_size = 0
for index, example in enumerate(X):
if adr_samples_needed > 0:
if Y[index] == ADR_MENTION_CLASS_LABEL:
new_X.append(example) # add original 'ADR' sample
new_Y.append(ADR_MENTION_CLASS_LABEL)
new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling
new_Y.append(ADR_MENTION_CLASS_LABEL)
adr_labels_size += 2
adr_samples_needed -= 1
else:
# we don't add original 'No ADR Mention' sample to perform Under-Sampling
adr_samples_needed -= 1
else:
if Y[index] == ADR_MENTION_CLASS_LABEL:
adr_labels_size += 1
else:
nonadr_labels_size += 1
new_X.append(example) # add original sample
new_Y.append(Y[index]) # add original label
print(" Updated dataset size: {}".format(len(new_X)))
print(" {} class size: {}".format(ADR_MENTION_CLASS_NAME, adr_labels_size))
print(" {} class size: {}".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))
return new_X, new_Y | e73468dd600a9d6f9b13a46356110d35fba8ce59 | 2,683 |
from pathlib import Path
def load_det_lcia(result_dir, method, act_code, det_lcia_dict=None):
"""Return precalculated deterministic LCIA score"""
result_dir = Path(_check_result_dir(result_dir))
method = _check_method(method)
if not det_lcia_dict:
det_lcia_dict = _get_det_lcia_dict(result_dir, method)
if not act_code in det_lcia_dict:
raise ValueError("No deterministic result for activity with code {} "
"in deterministic LCIA dictionary".format(
act_code
))
return det_lcia_dict[act_code] | c9ba6532f674bcbe988cdc645b7dd86a93ed27e5 | 2,684 |
def get_geometry(location, geolevel):
"""
Get geometry of a single location code/name
"""
if not utils.is_number(location) and location != "BR":
assert geolevel, "You need to specify which geographic level this location is"
location = ibgetools.ibge_encode(location, geolevel)
if location == -1:
return shapely.geometry.Polygon([])
url = build_url(location)
geojson = get_geojson(url)
features = utils.get_features(geojson)
return shapely.geometry.shape(features[0]["geometry"]) | da53cfe7845c7adffbcbd941dc3f0b62bdb15e2f | 2,685 |
def render_to_string(template, context={}, processors=None):
"""
A function for template rendering adding useful variables to context
automatically, according to the CONTEXT_PROCESSORS settings.
"""
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
context.update(processor(get_request()))
template = local.app.jinja2_env.get_template(template)
return template.render(context) | 678eab60113a05fba86591ee7bb47e26ecfb0b37 | 2,686 |
def find_node_names(structure):
""" Return the names of the nodes for the structure """
# Look through all of the items in the structure for names
# Check through each of the lists and sub-lists
names=set()
for i in xrange(len(structure)):
if isinstance(structure[i],basestring):
# do not return joins
if not structure[i] in [AND_DELIMITER, OR_DELIMITER, " "]:
names.add(structure[i])
elif isinstance(structure[i], list):
names.update(find_node_names(structure[i]))
return names | 812194e2d8dbd34741e9f03a6c775bb30f551341 | 2,687 |
def handle_question():
"""Save response and redirect to next question."""
# get the response choice
choice = request.form['answer']
# add this response to the session
responses = session[RESPONSES_KEY]
responses.append(choice)
session[RESPONSES_KEY] = responses
if (len(responses) == len(survey.questions)):
# They've answered all the questions! Thank them.
return redirect("/complete")
else:
return redirect(f"/questions/{len(responses)}") | 184dc816303f48e134320f602126d381ee820b59 | 2,689 |
from typing import Callable
def makeNotePlayer(seq: Sequencer, out: PortInfo
) -> Callable[[int, bool], None]:
"""Returns a callable object that plays midi notes on a port."""
def playNote(note: int, enabled: bool) -> None:
if enabled:
seq.sendEvent(NoteOn(0, 0, note, 127), out)
else:
seq.sendEvent(NoteOff(0, 0, note, 0), out)
return playNote | 7cb9741944f6f71fbfd55b825c2c7e4638bfa317 | 2,690 |
import base64
def file_to_attachment(filename):
"""
Convert a file to attachment
"""
with open(filename, 'rb') as _file:
return {'_name':filename,
'content':base64.b64encode(_file.read())
} | 9b64fe8a4329eae000cd76d58450c32644a736f6 | 2,692 |
def diff_mean(rolling_window, axis=-1):
"""For M5 purposes, used on an object generated by the
rolling_window function. Returns the mean of the first
difference of a window of sales."""
return np.diff(rolling_window, axis=axis).mean(axis=axis) | 85294f16c89658eaca9562e1ff4652d5865a5a59 | 2,694 |
import numpy
def noiseFraction(truth_h5, measured_h5, tolerance):
"""
Return the fraction of measured localizations that are greater than
tolerance pixels from the nearest truth localization.
Note: This will return 0 if there are no measured localizations.
truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
measured_h5 - A saH5Py.SAH5Py object with the found localizations.
tolerance - The search radius in pixels.
"""
if (measured_h5.getNLocalizations() == 0):
return [0, truth_h5.getNLocalizations()]
noise_locs = 0
total_locs = 0
for i in range(truth_h5.getMovieLength()):
t_locs = truth_h5.getLocalizationsInFrame(i)
m_locs = measured_h5.getLocalizationsInFrame(i)
if bool(t_locs) and bool(m_locs):
dist = iaUtilsC.peakToPeakDistAndIndex(t_locs['x'], t_locs['y'],
m_locs['x'], m_locs['y'],
max_distance = tolerance)[0]
noise_locs += numpy.count_nonzero((dist < 0.0))
total_locs += dist.size
elif bool(t_locs):
total_locs += t_locs['x'].size
return [noise_locs, total_locs] | 282e8c835906cf218e6eb1ef94cbb595419419f5 | 2,695 |
def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False):
"""Compute the rigid flow from target image plane to source image
Args:
depth: depth map of the target image [batch, height_t, width_t]
pose: target to source (or source to target if reverse_pose=True)
camera transformation matrix [batch, 6], in the order of
tx, ty, tz, rx, ry, rz;
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
Rigid flow from target image to source image [batch, height_t, width_t, 2]
"""
with tf.variable_scope('compute_rigid_flow'):
batch, height, width = depth.get_shape().as_list()
# Convert pose vector to matrix
pose = pose_vec2mat(pose)
if reverse_pose:
pose = tf.matrix_inverse(pose)
# Construct pixel grid coordinates
pixel_coords = meshgrid(batch, height, width)
tgt_pixel_coords = tf.transpose(pixel_coords[:,:2,:,:], [0, 2, 3, 1])
# Convert pixel coordinates to the camera frame
cam_coords = pixel2cam(depth, pixel_coords, intrinsics)
# Construct a 4x4 intrinsic matrix
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch, 1, 1])
intrinsics = tf.concat([intrinsics, tf.zeros([batch, 3, 1])], axis=2)
intrinsics = tf.concat([intrinsics, filler], axis=1)
# Get a 4x4 transformation matrix from 'target' camera frame to 'source'
# pixel frame.
proj_tgt_cam_to_src_pixel = tf.matmul(intrinsics, pose)
src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel)
rigid_flow = src_pixel_coords - tgt_pixel_coords
return rigid_flow | 5b01bfb9768bc1f180b06f599e71c4808c945854 | 2,697 |
def get_versions(script_name):
""" 返回指定名称脚本含有的所有版本。"""
versions = repository.get(script_name, None)
if not versions:
return None
return sorted(versions, reverse=True) | 4399c5531bbf0d10f750d64ce3a63e156d62ba1b | 2,698 |
from typing import Any
def linear_search_while(lst: list, value: Any) -> int:
"""Return the index of the first occurrence of value in lst, or return
-1 if value is not in lst.
>>> linear_search([2, 5, 1, -3], 5)
1
>>> linear_search([2, 4, 2], 2)
0
>>> linear_search([2, 5, 1, -3], 4)
-1
>>> linear_search([], 5)
-1
"""
i = 0 # The index of the next item in lst to examine.
# Keep going until we reach the end of lst or until we find value.
while i != len(lst) and lst[i] != value:
i = i + 1
# If we fell off the end of the list, we didn't find value.
if i == len(lst):
return -1
else:
return i | c90c39148b5c30fbb4f6732e322d03632ad63b39 | 2,701 |
def get_pagerduty_secret_name():
"""
Get name of the PagerDuty secret for currently used addon.
Returns:
string: name of the secret
"""
return config.DEPLOYMENT["addon_name"] + constants.MANAGED_PAGERDUTY_SECRET_SUFFIX | be731e1dcebc3f8a225e249def332abd0d8ea71b | 2,702 |
from typing import Dict
def check_docs(
doc_path: str, recurse: bool = True, max_threads: int = 10, delay: float = 0
) -> Dict[str, Dict[str, UrlResult]]:
"""
Check multiple HTML files in `doc_path`.
Parameters
----------
doc_path : str
Path
recurse: bool
If True, recurse subfolders, default is True
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Dict[str, Dict[str, UrlResult]]
Dictionary of pages checked. Results for each page
is a dictionary of checked links for the page.
"""
page_results: Dict[str, Dict[str, UrlResult]] = defaultdict(dict)
link_results: Dict[str, UrlResult] = {}
links_to_check = _get_links_from_files(doc_path, recurse)
print(f"Checking links {len(links_to_check)}...")
checked_links = check_uris(links_to_check, max_threads, delay)
print("\ndone")
for result in checked_links:
link_results[result.url] = result
src_pages = links_to_check[result.url]
for src_page in src_pages:
page_results[src_page][result.url] = result
_print_url_results(page_results)
return page_results | c3cc03a61f633143d03f8bfa1063242de8797bfa | 2,703 |
def timeParser(dstr):
"""
parse clock time string into array
"""
hh, mm, ss = dstr.split(':')
return np.array([hh, mm, ss]).astype(int) | 3b4f72ceaf4f2e9bd5fc93664d896537ad0f9884 | 2,706 |
import struct
def get_43_ai_core_data(input_file=None):
"""Function for getting datas from aicore: ov/cnt/total_cyc/ov_cyc/pmu_cnt/stream_id."""
result_data = []
with open(input_file, 'rb') as ai_core_file:
while True:
line_ = ai_core_file.read(128)
if line_:
if not line_.strip():
continue
else:
break
format_ = "BBHHHIIqqqqqqqqqqIIIIIIII"
result_ = [hex(i) for i in struct.unpack(format_, line_)]
byte01 = bin(int(result_[0].replace('0x', ''), 16)).replace('0b', '').zfill(8)
ov = byte01[-4]
cnt = byte01[0:4]
total_cyc = int(result_[7].replace('0x', ''), 16)
ov_cyc = int(result_[8].replace('0x', ''), 16)
pmu_cnt = tuple(int(i.replace('0x', ''), 16) for i in result_[9:17])
stream_id = int(result_[17].replace('0x', ''), 16)
result_data.append((ov, cnt, total_cyc, ov_cyc, stream_id, pmu_cnt))
return result_data | 03c9a62a4fd2a2041489cbcb19e2e8e4788e6b0d | 2,707 |
from typing import Type
def get_configuration_class_with_attributes(
klass: Type[AlgorithmConfiguration],
) -> Type[AlgorithmConfiguration]:
"""Get AlgorithmConfiguration with set attributes.
Args:
klass: a class to be used to extract attributes from.
Returns:
a class with the attributes set.
"""
configuration_class = deepcopy(AlgorithmConfiguration)
setattr(configuration_class, "algorithm_type", klass.algorithm_type)
setattr(configuration_class, "algorithm_name", klass.algorithm_name)
setattr(configuration_class, "algorithm_application", klass.__name__)
setattr(configuration_class, "algorithm_version", klass.algorithm_version)
return configuration_class | ae8ea9b30781854269eb97da6889d1a61ae29935 | 2,708 |
def get_token_symbol(token_address: str):
"""
Gets the token symbol
If not have the external method `symbol` to get the score symbol,
it will raise JSONRPCException.
"""
call = CallBuilder()\
.from_(wallet.get_address())\
.to(token_address)\
.method("symbol")\
.build()
return icon_service.call(call) | 9c84fbcb893345f701d8ce0d24509dd595139d8c | 2,709 |
def iceil(x):
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such
that `i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {numpy.ndarray, scalar}
The ceiling of each element in `x`, with `int` dtype.
"""
return np.ceil(x).astype(int) | bdc893fe00f073393240b1a861e79c9c4667abc4 | 2,710 |
from typing import Optional
import yaml
def get_repo_version(filename: str, repo: str) -> Optional[str]:
"""Return the version (i.e., rev) of a repo
Args:
filename (str): .pre-commit-config.yaml
repo (str): repo URL
Returns:
Optional[str]: the version of the repo
"""
with open(filename, "r") as stream:
pre_commit_data = yaml.safe_load(stream)
pre_config_repo = next(
(item for item in pre_commit_data["repos"] if item["repo"] == repo), None
)
if pre_config_repo:
return pre_config_repo["rev"]
return None | 821653bdeb60a86fce83fb3a05609996231ec5d4 | 2,711 |
from typing import Iterator
from typing import Tuple
import torch
def plot_grad_flow(named_parameters: Iterator[Tuple[str, torch.nn.Parameter]]) -> plt.Figure:
"""
Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow
"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and ("bias" not in n):
layers.append(n.replace('.weight', ''))
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
fig, ax = plt.subplots()
ax.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
ax.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
ax.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
ax.set_xticks(range(0, len(ave_grads), 1))
ax.set_xticklabels(layers, rotation=45)
ax.set_xlim(left=0, right=len(ave_grads))
ax.set_ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
ax.set_xlabel("Layers")
ax.set_ylabel("average gradient")
ax.set_title("Gradient flow")
ax.grid(True)
ax.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
return fig | cc0bd23b8cea9359e003cff7c414ce80fcbf5b1b | 2,712 |
from typing import Dict
from typing import Any
def mk_cli_context_settings(
mk_db: CliCtxDbBase.MkFnT,
) -> Dict[str, Any]:
"""Create initial click context parameters for this cli application.
This is currently used as input for autocompletion.
Example:
`@click.group(context_settings=mk_cli_context_settings())`
See `init_cli_ctx` which depends on this.
"""
obj_d = mk_cli_db_obj_d(mk_db)
return dict(
obj=obj_d,
# It it also possible to customize cli default values from here.
# <https://click.palletsprojects.com/en/7.x/commands/#overriding-defaults>
# default_map
) | 09f232936ef3c09a5c00edada04a31a64058aaad | 2,713 |
import requests
import ast
def get_file_action(header: 'dict[str,str]') -> str:
"""Gets action file form main repo
Args:
header (dict[str,str]): Header with auth token
Raises:
get_aciton_file_e: Raised when no aciton file was collected
Returns:
str: The content of the action file
"""
response = requests.get("https://api.github.com/repos/vovsike/ImageBuilderAPIScript/contents/action_raw.yaml", headers=header)
try:
response.raise_for_status()
except HTTPError as get_aciton_file_e:
print("Error getting action file")
raise get_aciton_file_e
content = ast.literal_eval(response.content.decode("utf-8")).get("content")
return content | d1c6eb349aea156e2180f5218e247e86a8a60f3c | 2,714 |
from typing import Union
from typing import Callable
import warnings
def sensor(raw_input_shape: StandardizedTensorShape, f: SensorFunction = None,
sensor_id: str = None, history: int = None) \
-> Union[Callable[[SensorFunction], SensorLambda], SensorLambda]:
"""Decorator for creating sensors from functions.
Usage:
@sensor((5, 8))
def my_sensor(env, frame):
sensor_reading = np.random.uniform(0, 1, (5, 8))
return sensor_reading
kernel.add_module(my_sensor)
"""
if f is None:
kwargs = {}
if sensor_id is not None:
kwargs.update(sensor_id=sensor_id)
return partial(sensor, raw_input_shape, **kwargs)
if sensor_id is None:
sensor_id = get_default_sensor_id(f)
if sensor_id in _SENSOR_MAP:
sensor_obj = _SENSOR_MAP[sensor_id]
if isinstance(sensor_obj, SensorHistory):
wrapped = sensor_obj.wrapped
else:
wrapped = sensor_obj
if wrapped.f != f or wrapped.raw_input_shape != raw_input_shape:
warnings.warn("Redefining sensor %s with function %s and shape %s.\n"
"Original function: %s\nOriginal shape: %s" %
(sensor_id, f, raw_input_shape, wrapped.f, wrapped.raw_input_shape))
else:
return sensor_obj
sensor_obj = wraps(f)(SensorLambda(sensor_id, raw_input_shape, f))
if history is not None:
sensor_obj = SensorHistory(sensor_obj, history)
_SENSOR_MAP[sensor_id] = sensor_obj
return sensor_obj | 0bfa60c70cc43cd0929a8db840469d8fd6ffbac7 | 2,715 |
def collection(collection, _pod=None):
"""Retrieves a collection from the pod."""
return _pod.get_collection(collection) | 6d95c9afbcdbb2fe81f71b9d4f17be50aec1aea4 | 2,717 |
def appif(cfg):
"""
Return interface belonging to application
"""
return get_interface_of_network(appnet(cfg)['name']) | e13f7cee8f4785e82bc558500d5d4938a3c728f2 | 2,718 |
def f(x):
"""
Try and have the NN approximate the
xor function.
"""
if x[0] == x[1]:
return 0.
else:
return 1. | 8111e53f0ff0dfdd75f08d845e5176bc287a65e1 | 2,719 |
import pandas
import json
def dataframe_to_list(df: pandas.DataFrame) -> list:
"""
Use caution with datetime columns, as they may not be de/serialized as desired
"""
return json.loads(df.to_json(orient="records")) | 244f76f1970364f13ddf6bb53a6280962d0ae45a | 2,720 |
def decimal_to_binary(integer,nbits=8,grouped=0):
"""Converts integer to binary string of length nbits, sign bit and
then m.s.b. on the left. Negative numbers are twos-complements, i.e.,
bitwise complement + 1."""
# Just remember that minus sign and ignore it
if integer < 0:
negative = True
integer = abs(integer+1)
else:
negative = False
# build up the strin
result = ''
# part of number left to process
remaining_integer = integer
while (remaining_integer > 0) & (nbits > 0):
lsb = remaining_integer % 2
if negative:
lsb = 1-lsb
result = ''.join((str(lsb),result))
remaining_integer = remaining_integer >> 1
nbits -= 1
while nbits > 0:
if negative:
result = ''.join(('1',result))
else:
result = ''.join(('0',result))
nbits -= 1
if grouped:
temp = result
result = ""
for bit in range(len(temp)):
if bit and (bit % grouped) == 0:
result += ' '
result += temp[bit]
return result | 89cef0feaad6d1c25dd67b97a0caf2212ea4a55d | 2,721 |
def line_integrals(state, uloc, vloc, kind="same"):
"""
calculate line integrals along all islands
Arguments:
kind: 'same' calculates only line integral contributions of an island with itself,
while 'full' calculates all possible pairings between all islands.
"""
vs = state.variables
nisle = state.dimensions["isle"]
ipx, ipy = runtime_state.proc_idx
if ipx == 0:
i = slice(1, -2)
ip1 = slice(2, -1)
else:
i = slice(2, -2)
ip1 = slice(3, -1)
if ipy == 0:
j = slice(1, -2)
jp1 = slice(2, -1)
else:
j = slice(2, -2)
jp1 = slice(3, -1)
east = (
vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
+ uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis]
)
west = (
-vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
- uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis]
)
north = (
vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
- uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis]
)
south = (
-vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis]
+ uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis]
)
if kind == "same":
east = npx.sum(east * vs.line_dir_east_mask[i, j], axis=(0, 1))
west = npx.sum(west * vs.line_dir_west_mask[i, j], axis=(0, 1))
north = npx.sum(north * vs.line_dir_north_mask[i, j], axis=(0, 1))
south = npx.sum(south * vs.line_dir_south_mask[i, j], axis=(0, 1))
return global_sum(east + west + north + south)
elif kind == "full":
isle_int = npx.empty((nisle, nisle))
def loop_body(isle, isle_int):
east_isle = npx.sum(
east[..., isle, npx.newaxis] * vs.line_dir_east_mask[i, j],
axis=(0, 1),
)
west_isle = npx.sum(
west[..., isle, npx.newaxis] * vs.line_dir_west_mask[i, j],
axis=(0, 1),
)
north_isle = npx.sum(
north[..., isle, npx.newaxis] * vs.line_dir_north_mask[i, j],
axis=(0, 1),
)
south_isle = npx.sum(
south[..., isle, npx.newaxis] * vs.line_dir_south_mask[i, j],
axis=(0, 1),
)
isle_int = update(isle_int, at[:, isle], east_isle + west_isle + north_isle + south_isle)
return isle_int
isle_int = for_loop(0, nisle, loop_body, isle_int)
return global_sum(isle_int)
else:
raise ValueError('"kind" argument must be "same" or "full"') | 4a8b32246a9a60d9a42368d7643bc6ddea1c44d0 | 2,722 |
def _BBANDS(kwargs):
"""
布林带
技术参数
-------
使用21天,2倍
"""
df = kwargs.get('df')
limit_start = kwargs.get('limit_start')
limit_end = kwargs.get('limit_end')
ndays = 21
inds = indicators(
'BBANDS', df, timeperiod=ndays).loc[limit_start:limit_end, :]
traces = []
for c in inds.columns:
name = 'price_{}_{}'.format(c, ndays)
trace = go.Scatter(
x=np.arange(inds.shape[0]),
y=inds[c],
name=name,
)
traces.append(trace)
return traces | ee19ee06b5fb6a306f6d43285a616e61584c65a8 | 2,723 |
from typing import Optional
from typing import Iterable
from typing import Tuple
def make_colors(color: OpColor, fill_color: OpColor, colors: Optional[Iterable[OpColor]]) -> Tuple[OpColor, ...]:
"""Creates final colors tuple."""
if colors is None:
return conform_color(color), conform_color(fill_color), *DEFAULT_COLORS[2:]
colors = [conform_color(c) for c, _ in zip(colors, range(len(DEFAULT_COLORS)))]
colors.extend(DEFAULT_COLORS[len(colors):])
return tuple(colors) | 8bba1bef72543fb4bd497ada924d23ebf2692f7c | 2,724 |
def print_qa(questions,
answers_gt,
answers_gt_original,
answers_pred,
era,
similarity=dirac,
path=''):
"""
In:
questions - list of questions
answers_gt - list of answers (after modifications like truncation)
answers_gt_original - list of answers (before modifications)
answers_pred - list of predicted answers
era - current era
similarity - measure that measures similarity between gt_original and prediction;
by default dirac measure
path - path for the output (if empty then stdout is used)
by fedault an empty path
Out:
the similarity score
"""
if len(questions) != len(answers_gt):
raise AssertionError('Diferent questions and answers_gt lengths.')
if len(questions) != len(answers_pred):
raise AssertionError('Diferent questions and answers_pred lengths.')
output = ['-' * 50, 'Era {0}'.format(era)]
score = 0.0
for k, q in list(enumerate(questions)):
a_gt = answers_gt[k]
a_gt_original = answers_gt_original[k]
a_p = answers_pred[k]
score += dirac(a_p, a_gt_original)
if isinstance(q[0], unicode_fn):
tmp = unicode_fn('question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n')
else:
tmp = 'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n'
output.append(tmp.format(q, a_gt, a_gt_original, a_p))
score = (score / len(questions)) * 100.0
output.append('Score: {0}'.format(score))
if path == '':
print('%s' % '\n'.join(map(str, output)))
else:
list2file(path, output)
return score | 01b44361066668462868abed00f49811e0648d11 | 2,725 |
def recast_to_supercell(z, z_min, z_max):
"""Gets the position of the particle at ``z`` within the simulation
supercell with boundaries ``z_min`` y ``z_max``. If the particle is
outside the supercell, it returns the position of its closest image.
:param z:
:param z_min:
:param z_max:
:return:
"""
sc_size = (z_max - z_min)
return z_min + (z - z_min) % sc_size | 2d144a656a92eaf3a4d259cf5ad2eadb6cfdf970 | 2,726 |
from typing import List
from typing import Optional
import io
import csv
async def get_pedigree(
internal_family_ids: List[int] = Query(None),
response_type: ContentType = ContentType.JSON,
replace_with_participant_external_ids: bool = True,
replace_with_family_external_ids: bool = True,
include_header: bool = True,
empty_participant_value: Optional[str] = None,
connection: Connection = get_project_readonly_connection,
include_participants_not_in_families: bool = False,
):
"""
Generate tab-separated Pedigree file for ALL families
unless internal_family_ids is specified.
Allow replacement of internal participant and family IDs
with their external counterparts.
"""
family_layer = FamilyLayer(connection)
assert connection.project
pedigree_dicts = await family_layer.get_pedigree(
project=connection.project,
family_ids=internal_family_ids,
replace_with_participant_external_ids=replace_with_participant_external_ids,
replace_with_family_external_ids=replace_with_family_external_ids,
empty_participant_value=empty_participant_value,
include_participants_not_in_families=include_participants_not_in_families,
)
if response_type in (ContentType.CSV, ContentType.TSV):
delim = '\t' if response_type == ContentType.TSV else ','
output = io.StringIO()
writer = csv.writer(output, delimiter=delim)
if include_header:
writer.writerow(PedRow.row_header())
keys = [
'family_id',
'individual_id',
'paternal_id',
'maternal_id',
'sex',
'affected',
]
pedigree_rows = [[(row[k] or '') for k in keys] for row in pedigree_dicts]
writer.writerows(pedigree_rows)
basefn = f'{connection.project}-{date.today().isoformat()}'
if internal_family_ids:
basefn += '-'.join(str(fm) for fm in internal_family_ids)
extension = 'ped' if response_type == ContentType.TSV else 'csv'
return StreamingResponse(
iter(output.getvalue()),
media_type=f'text/{response_type}',
headers={'Content-Disposition': f'filename={basefn}.{extension}'},
)
return pedigree_dicts | 5ecf064d82a6391d3ed025aeac2bf070710d5ebe | 2,729 |
def lang_string_set_to_xml(obj: model.LangStringSet, tag: str) -> etree.Element:
"""
serialization of objects of class LangStringSet to XML
:param obj: object of class LangStringSet
:param tag: tag name of the returned XML element (incl. namespace)
:return: serialized ElementTree object
"""
et_lss = _generate_element(name=tag)
for language in obj:
et_lss.append(_generate_element(name=NS_AAS + "langString",
text=obj[language],
attributes={"lang": language}))
return et_lss | f49a1d73f1fd4354c245427bc1277600c67a5d99 | 2,730 |
def grasp_from_contacts(contact1,contact2):
"""Helper: if you have two contacts, this returns an AntipodalGrasp"""
d = vectorops.unit(vectorops.sub(contact2.x,contact1.x))
grasp = AntipodalGrasp(vectorops.interpolate(contact1.x,contact2.x,0.5),d)
grasp.finger_width = vectorops.distance(contact1.x,contact2.x)
grasp.contact1 = contact1
grasp.contact2 = contact2
return grasp | 945ff950a59b1442efc6abdce68861957b0a60a7 | 2,731 |
def boolean_dumper(dumper, value):
"""
Dump booleans as yes or no strings.
"""
value = u'yes' if value else u'no'
style = None
return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style) | 40a6a270d1ad1a289947c064c7f85edb1d589bb7 | 2,734 |
def preprocess_data_4_catboost(data_df, output_path=None):
"""
preprocess data for working with gradient boosting techniques
specifically with the catboost library. since this is going to use
the preprocessing built into the catboost library there are slightly
different steps to be done
"""
"""
train_data = Pool(
data=FeaturesData(
num_feature_data=np.array([[1, 4, 5, 6],
[4, 5, 6, 7],
[30, 40, 50, 60]],
dtype=np.float32),
cat_feature_data=np.array([[b"a", b"b"],
[b"a", b"b"],
[b"c", b"d"]],
dtype=object)
),
label=[1, 1, -1]
)
"""
new_df_w_labels = data_df.copy()
for idx, odds_string in data_df.ODDS.iteritems():
# skip data qual errors and abnormalities
if not isinstance(odds_string, str):
continue
divied_list = _preprocess_odds_string(odds_string)
for school_or_perc in divied_list:
if school_or_perc in SCHOOLS_REVERSED.keys():
school_idx = divied_list.index(school_or_perc)
# the percent is always the next index after the school
perc = divied_list[school_idx + 1]
# print "School: {};Odds: {}".format(school_or_perc,perc)
# use the standardized name
standard_school_name = SCHOOLS_REVERSED[school_or_perc]
# insert the specific name value for the correct row
new_df_w_labels.at[idx, standard_school_name] = _parse_str_nums(perc)
new_df_w_labels = _reduce_majors_dimensionality(new_df_w_labels)
# drop unused columns
data_after_drop = new_df_w_labels.drop(['ODDS', 'INTERNATIONAL', 'JOBTITLE'], axis=1, inplace=False)
# change categorical data into numeric
categorical_cols = ['UNIVERSITY', 'MAJOR', 'GENDER', 'RACE']
# a dataframe of ONLY the features
features_only_df = data_after_drop.drop(TARGET_LABELS, axis=1, inplace=False)
# determine the columns that are features by subtracting from labels
feature_cols = set(data_after_drop.columns) - set(TARGET_LABELS)
# a dataframe with ONLY labels
labels = data_after_drop.drop(feature_cols, axis=1, inplace=False)
multi_data_set_dict = {}
for school in labels.columns:
df_for_school = features_only_df.join(pd.DataFrame({school: labels[school]}))
# a holder dictionary that contains the features numpy ndarray for features and numpy ndarray for school label
school_dict = {}
# drop the NaNs from the dataset in any feature column or label. otherwise model training will fail
df_for_school.dropna(inplace=True)
# store the features as a numpy ndarray to be fed directly to model training
numerical_features_np_array = df_for_school.drop([school] + categorical_cols, axis=1, inplace=False).values
categorical_features_np_array = df_for_school[categorical_cols].values
# store the labels for a particular school as a numpy ndarray to be fed directly to model training
labels_as_list = df_for_school.drop(feature_cols, axis=1, inplace=False)[school].tolist()
datasetpool = Pool(
data=FeaturesData(
num_feature_data=np.array(numerical_features_np_array,
dtype=np.float32),
cat_feature_data=np.array(categorical_features_np_array,
dtype=object)
),
label=labels_as_list
)
multi_data_set_dict[school] = datasetpool
return multi_data_set_dict | 9bc60ca096963fe6fb8a30e19442d870694f1339 | 2,735 |
def conv_current_to_electrons_second(current):
"""
Convert a current in Amps to a number of
electrons per second.
"""
return int(current / const.electron_charge) | 76051a529c230b54a6d07f282c97b48d4ea59758 | 2,736 |
import json
def get_users():
"""
Use urllib3 to make a REST call to get list of Okta
Users for a given Okta Application
"""
request_url = f"{OKTA_URL}/apps/{OKTA_APP_ID}/users"
okta_users_request = HTTP.request(
'GET',
request_url,
headers={'Content-Type': 'application/json', 'Authorization': OKTA_AUTH},
retries=False,
)
LOGGER.info(f"Retrieved Okta Users Information from {request_url}")
users = json.loads(okta_users_request.data.decode('utf-8'))
return users | b94816de46d843a3a80a53c569d52b17e142d4e9 | 2,737 |
def n_sample_per_class_train_set(df, n_samples=3, class_column="category"):
"""
returns a subset of the provided df that contains n_samples instances of each class
:param df: panda dataframe that contains hidden_reps with class labels
:param n_samples: number of samples per class
:param class_column: column with class labels in the df
:return: subset of the original df that contains maximum n_samples instances of each class
"""
assert class_column in df.columns
classes = list(set(df[class_column]))
class_count_dict = dict([(c, 0) for c in classes])
selection_array = []
for i, c in zip(df.index, df[class_column]):
if class_count_dict[c] >= n_samples:
selection_array.append(False)
continue
else:
selection_array.append(True)
class_count_dict[c] += 1
print(len(class_count_dict), len(selection_array))
assert len(selection_array) == len(df.index)
return df.copy()[selection_array] | 107d9845b8e5efb3da09f13d11ae796fc560b874 | 2,738 |
def clip_count(cand_d, ref_ds):
"""Count the clip count for each ngram considering all references."""
count = 0
for m in cand_d.keys():
m_w = cand_d[m]
m_max = 0
for ref in ref_ds:
if m in ref:
m_max = max(m_max, ref[m])
m_w = min(m_w, m_max)
count += m_w
return count | f33ad8c5a9de8e136ea97684de3bd64779471bb6 | 2,739 |
def container_wrapper(directive, literal_node, caption, classes):
"""adapted from
https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/code.py
"""
container_node = docutils.nodes.container(
'', literal_block=True, classes=classes) # ['literal-block-wrapper']
parsed = docutils.nodes.Element()
directive.state.nested_parse(StringList([caption], source=''),
directive.content_offset, parsed)
if isinstance(parsed[0], docutils.nodes.system_message):
msg = 'Invalid caption: %s' % parsed[0].astext()
raise ValueError(msg)
elif isinstance(parsed[0], docutils.nodes.Element):
caption_node = docutils.nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = literal_node.source
caption_node.line = literal_node.line
container_node += caption_node
container_node += literal_node
return container_node
else:
raise RuntimeError | 17e9db3f494174a721cd80c179514bbc3db773c1 | 2,740 |
def get_current_and_next_quarters(request, num):
"""
Returns the current and next num uw_sws.models.Term objects in a list
for the current quarter refered in the user session. Returns the next
num -1 quarters along with the current one.
"""
term = get_current_quarter(request)
quarters = [term]
for x in range(1, num):
term = get_term_after(term)
quarters.append(term)
return quarters | 26f5d268148d3f0395d1d41739d51b1f06a0bd6a | 2,741 |
from typing import Tuple
from typing import Dict
def _create_metadata_from_dat_df(
csv_df: pd.DataFrame,
) -> Tuple[Dict[int, tuple], Pitch]:
"""Creates meta information from the CSV file as parsed by pd.read_csv().
Parameters
----------
csv_df: DataFrame
Containing all data from the positions CSV file as DataFrame.
Returns
-------
periods: Dict[int, int]
Dictionary with start and endframes:
``periods[segment] = (startframe, endframe)``.
pitch: Pitch
Playing Pitch object.
"""
# create pitch
pi_len = csv_df["pitch_dimension_long_side"].values[0]
pi_wid = csv_df["pitch_dimension_short_side"].values[0]
pitch = Pitch.from_template(
"statsperform",
length=pi_len,
width=pi_wid,
sport="football",
)
# create periods for segments, coded as jumps in the frame sequence
periods = {}
frame_values = csv_df["frame_count"].unique()
seg_idx = np.where(np.diff(frame_values, prepend=frame_values[0]) > 1)
seg_idx = np.insert(seg_idx, 0, 0)
seg_idx = np.append(seg_idx, len(frame_values))
for segment in range(len(seg_idx) - 1):
start = int(frame_values[seg_idx[segment]])
end = int(frame_values[seg_idx[segment + 1] - 1])
periods[segment] = (start, end)
return periods, pitch | d53e66ff343c2391058e2717dde5bbe7c11a2c44 | 2,742 |
def main():
"""
Main function used in script, primarily used as a handle
to get the output into stdout.
"""
# There are no args, but parse them just so help works
print(process_files_json(), end="")
return None | 495af3e19cdd823ee6853f516b17df0c489f34f9 | 2,743 |
def expand(doc, doc_url="param://", params=None):
"""
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE
EXPANDING FEATURE
USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY
:param doc: THE DATA STRUCTURE FROM JSON SOURCE
:param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE)
:param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url)
:return: EXPANDED JSON-SERIALIZABLE STRUCTURE
"""
if doc_url.find("://") == -1:
Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url)
url = URL(doc_url)
url.query = set_default(url.query, params)
phase1 = _replace_ref(doc, url) # BLANK URL ONLY WORKS IF url IS ABSOLUTE
phase2 = _replace_locals(phase1, [phase1])
return wrap(phase2) | d8a8045cb6afea089f1241dc1a47bf6c95fc3628 | 2,745 |
def get_model_creator(hparams):
"""Get the right model class depending on configuration."""
if hparams.architecture == 'peng':
model_creator = model.Model
"""vanilla lstm, seq2seq"""
return model_creator | 84c65108e6be1a723184778db564b75ea333d52f | 2,746 |
import unittest
def extra():
"""Tests faint.extra. That is, less central faint code, possibly
requiring extensions (e.g. tesseract or GraphViz dot).
"""
return unittest.defaultTestLoader.discover("py_tests/test_extra",
top_level_dir="py_tests/") | c6fc2694144e852edaef219d95bc384b5b394d7d | 2,747 |
def cmd_te_activate(abs_filename):
"""最前面に持ってくる(テキストエディタ向け)
ファイルが含まれるVisual Studioを探し出して最前面に持ってくる。
abs_filename- ファイル名の絶対パス
(Ex.) c:/project/my_app/src/main.cpp
"""
return _te_main2(cmd_activate,abs_filename) | 2a4922c89360f049c50cc2b2b81509c48d32968e | 2,748 |
def search(search_domain, fmt=None):
"""Handle redirect from form submit."""
domain = tools.parse_post_data(search_domain)
if domain is None:
return handle_invalid_domain(search_domain)
if fmt is None:
if features.enable_async_search():
return flask.redirect('/search?ed={}'.format(search_domain))
else:
return html_render(domain)
elif fmt == 'json':
return json_render(domain)
elif fmt == 'csv':
return csv_render(domain)
else:
flask.abort(400, 'Unknown export format: {}'.format(fmt)) | 4be9a751fad12fae67a34b7c4bda2b2be3d7ff89 | 2,749 |
def get_graph_from_particle_positions(
particle_positions, box_lengths, cutoff_distance, store_positions=False
):
"""Returns a networkx graph of connections between neighboring particles
Args:
particle_positions (ndarray or dataframe): Shape
(`n_particles`, `n_dimensions`). Each of the `n_particles`
rows is a length `n_dimensions` particle position vector.
Positions must be in range [0, `box_lengths[d]`) for each
dimension `d`.
box_lengths (ndarray): Shape (`n_dimensions`,) array of box lengths for
each box dimension.
cutoff_distance (float): Maximum length between particle pairs to
consider them connected
store_positions (bool, optional): If True, store position vector data
within each node in the graph. Defaults to False.
Returns:
networkx Graph: Graph of connections between all particle pairs with
distance below cutoff_distance
"""
distances = pairwise_distances(particle_positions, box_lengths)
graph = get_within_cutoff_graph(distances, cutoff_distance)
if store_positions is True:
for particle_id, particle_position in zip(
graph.nodes, particle_positions
):
for i, x_i in enumerate(particle_position):
graph.nodes[particle_id][f"x{i}"] = x_i
return graph | a51ecbfd83fe08ae07f4e6952f2796eba9f79f0a | 2,750 |
import numpy as np
def cca(x,y):
""" canonical correlation analysis cca
wx, wy, r = cca(x,y) returns wx, wy two matrices which columns [:,i] correspond to the canonical weights (normalized eigenvectors) and a vector r containing the canonical correlations, all sorted in decreasing order. cca assumes as input matrices x,y of size l*m (time*nvar), and l*n, that are centered (no mean along 1st axis) within the function. cca returns an error if either x,y are not full rank."""
mx = x.shape[1]
my = y.shape[1]
l = x.shape[0] #needs to be the same for y
if l != y.shape[0]:
raise ValueError('Time dimension is not same length for x,y')
xrank = np.linalg.matrix_rank(x)
yrank = np.linalg.matrix_rank(y)
if mx > xrank:
raise ValueError('Matrix x is not full rank.')
if my > yrank:
raise ValueError("Matrix y is not full rank.")
#no mean
x = x - np.outer(x.mean(axis=0),np.ones(l)).transpose()
y = y - np.outer(y.mean(axis=0),np.ones(l)).transpose()
#covariance estimators
Sxy = np.dot(x.transpose(),y) / l
Sxx = np.dot(x.transpose(),x) / l
Syy = np.dot(y.transpose(),y) / l
B1 = np.dot(np.linalg.inv(Sxx),Sxy)
B2 = np.dot(np.linalg.inv(Syy),Sxy.transpose())
evalx, eigvx = np.linalg.eig(np.dot(B1,B2))
evaly, eigvy = np.linalg.eig(np.dot(B2,B1))
#normalize eigenvectors
eigvx = eigvx / np.outer(np.ones((mx,1)),np.sqrt((eigvx**2).sum(axis=0)))
eigvy = eigvy / np.outer(np.ones((my,1)),np.sqrt((eigvy**2).sum(axis=0)))
# eigenvalues should be the same in evalx and evaly
rx = np.sqrt(abs(evalx)) #correlation
ry = np.sqrt(abs(evaly))
#sort
ordargx = np.argsort(rx)[-1:-mx-1:-1] #decreasing order
ordargy = np.argsort(ry)[-1:-mx-1:-1]
rx = rx[ordargx]
ry = ry[ordargy]
eigvx = eigvx[:,ordargx]
eigvy = eigvy[:,ordargy]
if mx >= my:
r = rx
else:
r = ry
return eigvx, eigvy, r | f0d734fc927789d6ecca0685a85f727e48b334df | 2,751 |
def train_models(vae, emulator, em_lr, vae_lr, signal_train, dataset, val_dataset,
epochs, vae_lr_factor, em_lr_factor, vae_min_lr, em_min_lr, vae_lr_patience, em_lr_patience,
lr_max_factor, es_patience, es_max_factor):
"""
Function that train the models simultaneously
:param vae: Keras model object, the VAE
:param emulator: Keras model object, the emulator
:param em_lr: float, initial emulator learning rate
:param vae_lr: float, initial VAE learning rate
:param signal_train: numpy array of training signals
:param dataset: batches from training dataset
:param val_dataset: batches from validation dataset
:param epochs: max number of epochs to train for, early stopping may stop it before
:param vae_lr_factor: factor * old LR (learning rate) is the new LR for the VAE
:param em_lr_factor: factor * old LR (learning rate) is the new LR for the emulator
:param vae_min_lr: minimum allowed LR for VAE
:param em_min_lr: minimum allowed LR for emulator
:param vae_lr_patience: max number of epochs loss has not decreased for the VAE before reducing LR
:param em_lr_patience: max number of epochs loss has not decreased for the emulator before reducing LR
:param lr_max_factor: max_factor * current loss is the max acceptable loss, a larger loss means that the counter
is added to, when it reaches the 'patience', the LR is reduced
:param es_patience: max number of epochs loss has not decreased before early stopping
:param es_max_factor: max_factor * current loss is the max acceptable loss, a larger loss for either the VAE or the
emulator means that the counter is added to, when it reaches the 'patience', early stopping is applied
:return tuple, four lists of losses as they change with epoch for the VAE (training loss and validation loss)
and emulator (training and validation) in that order
"""
# initialize lists of training losses and validation losses
vae_loss = []
vae_loss_val = []
em_loss = []
em_loss_val = []
# Did the model loss plateau?
plateau_vae = False
plateau_em = False
vae_reduced_lr = 0 # epochs since last time lr was reduced
em_reduced_lr = 0 # epochs since last time lr was reduced
# compile the models
compile_VAE(vae, vae_lr)
compile_emulator(emulator, em_lr, signal_train)
@tf.function
def run_train_step(batch):
"""
Function that trains the VAE and emulator for one batch. Returns the losses
for that specific batch.
"""
params = batch[0]
signal = batch[1]
amp_raw = batch[2] # amplitudes, raw because we need to reshape
amplitudes = tf.expand_dims(amp_raw, axis=1) # reshape amplitudes
signal_amplitudes = tf.concat((signal, amplitudes), axis=1) # both signal and amplitude
with tf.GradientTape() as tape:
vae_pred = vae(signal) # apply VAE to signal
vae_batch_loss = vae.losses # get the loss
# back-propagate losses for the VAE
vae_gradients = tape.gradient(vae_batch_loss, vae.trainable_weights)
vae.optimizer.apply_gradients(zip(vae_gradients, vae.trainable_weights))
# same procedure for emulator
with tf.GradientTape() as tape:
em_pred = emulator(params)
loss_function = em_loss_fcn(signal_train)
em_batch_loss = loss_function(signal_amplitudes, em_pred)
em_gradients = tape.gradient(em_batch_loss, emulator.trainable_weights)
emulator.optimizer.apply_gradients(zip(em_gradients, emulator.trainable_weights))
return vae_batch_loss, em_batch_loss
# the training loop
for i in range(epochs):
epoch = int(i + 1)
print("\nEpoch {}/{}".format(epoch, epochs))
# reduce lr if necessary
if plateau_vae and vae_reduced_lr >= 5:
reduce_lr(vae, vae_lr_factor, vae_min_lr)
vae_reduced_lr = 0
if plateau_em and em_reduced_lr >= 5:
reduce_lr(emulator, em_lr_factor, em_min_lr)
em_reduced_lr = 0
vae_batch_losses = []
val_vae_batch_losses = []
em_batch_losses = []
val_em_batch_losses = []
# loop through the batches and train the models on each batch
for batch in dataset:
vae_batch_loss, em_batch_loss = run_train_step(batch)
vae_batch_losses.append(vae_batch_loss) # append VAE train loss for this batch
em_batch_losses.append(em_batch_loss) # append emulator train loss for this batch
# loop through the validation batches, we are not training on them but
# just evaluating and tracking the performance
for batch in val_dataset:
param_val = batch[0]
signal_val = batch[1]
amp_val = tf.expand_dims(batch[2], axis=1)
val_signal_amplitudes = tf.concat((signal_val, amp_val), axis=1)
val_em_batch_loss = emulator.test_on_batch(param_val, val_signal_amplitudes)
val_vae_batch_loss = vae.test_on_batch(signal_val, signal_val)
val_vae_batch_losses.append(val_vae_batch_loss)
val_em_batch_losses.append(val_em_batch_loss)
vae_loss_epoch = K.mean(tf.convert_to_tensor(vae_batch_losses)) # average VAE train loss over this epoch
em_loss_epoch = K.mean(tf.convert_to_tensor(em_batch_losses)) # average emulator train loss
print('VAE train loss: {:.4f}'.format(vae_loss_epoch))
print('Emulator train loss: {:.4f}'.format(em_loss_epoch))
# in case a loss is NaN
# this is unusal, but not a big deal, just restart the training
# (otherwise the loss just stays NaN)
if np.isnan(vae_loss_epoch) or np.isnan(em_loss_epoch):
print("Loss is NaN, restart training")
break
# save each epoch loss to a list with all epochs
vae_loss.append(vae_loss_epoch)
em_loss.append(em_loss_epoch)
vae_loss_epoch_val = np.mean(val_vae_batch_losses) # average VAE train loss over this epoch
em_loss_epoch_val = np.mean(val_em_batch_losses) # average emulator train loss
vae_loss_val.append(vae_loss_epoch_val)
em_loss_val.append(em_loss_epoch_val)
print('VAE val loss: {:.4f}'.format(vae_loss_epoch_val))
print('Emulator val loss: {:.4f}'.format(em_loss_epoch_val))
# save weights
if epoch == 1: # save first epoch
vae.save('checkpoints/best_vae')
emulator.save('checkpoints/best_em')
elif em_loss_val[-1] < np.min(em_loss_val[:-1]): # performance is better than prev epoch
vae.save('checkpoints/best_vae')
emulator.save('checkpoints/best_em')
# early stopping?
keep_going = early_stop(es_patience, es_max_factor, vae_loss_val, em_loss_val)
if not keep_going:
break
# check if loss stopped decreasing
plateau_vae = plateau_check("vae", vae_lr_patience, lr_max_factor, vae_loss_val, em_loss_val)
plateau_em = plateau_check("emulator", em_lr_patience, lr_max_factor, vae_loss_val, em_loss_val)
vae_reduced_lr += 1
em_reduced_lr += 1
return vae_loss, vae_loss_val, em_loss, em_loss_val | a3301f178aade90cb5a5b441ccf7607e2f13c776 | 2,753 |
def get_next_term(cfg):
"""
Gets the next term to be added.
Args:
cfg: Expression config
"""
term = {}
if np.random.choice(['quantity', 'number'], p=[cfg.ratio, 1 - cfg.ratio]) == 'quantity':
idx = np.random.choice(range(len(cfg.quants)))
if cfg.reuse:
term['expression'] = cfg.quants[idx]
term['numerical'] = cfg.vals[idx]
term['estimation_difficulty'] = cfg.diffs[idx]
term['quantity_ids'] = [cfg.quantity_ids[idx]]
term['categories'] = [cfg.categories[idx]]
else:
term['expression'] = cfg.quants.pop(idx)
term['numerical'] = cfg.vals.pop(idx)
term['estimation_difficulty'] = cfg.diffs.pop(idx)
term['quantity_ids'] = [cfg.quantity_ids.pop(idx)]
term['categories'] = [cfg.categories.pop(idx)]
else:
if len(cfg.numbers) != 200:
# Where we're not using the default uniform sampling over numbers
idx = int(np.random.lognormal(3, 8) + abs(np.random.normal(0, 50))) + 1
term['expression'] = str(idx)
term['numerical'] = str(idx)
term['estimation_difficulty'] = 0
term['quantity_ids'] = []
term['categories'] = []
else:
idx = np.random.choice(range(len(cfg.numbers)))
term['expression'] = str(idx)
term['numerical'] = str(idx)
term['estimation_difficulty'] = 0
term['quantity_ids'] = []
term['categories'] = []
return term | edaf22a93ce1a0c51f4193c3ea022202c8bbaaef | 2,754 |
def demand_share_per_timestep_constraint_rule(backend_model, group_name, carrier, timestep, what):
"""
Enforces shares of demand of a carrier to be met by the given groups
of technologies at the given locations, in each timestep.
The share is relative to ``demand`` technologies only.
.. container:: scrolling-wrapper
.. math::
\\sum_{loc::tech::carrier \\in given\\_group} carrier_{prod}(loc::tech::carrier, timestep) \\leq
share \\times \\sum_{loc::tech:carrier \\in loc\\_techs\\_demand \\in given\\_locations}
carrier_{con}(loc::tech::carrier, timestep) for timestep \\in timesteps
"""
share = get_param(backend_model, 'group_demand_share_per_timestep_{}'.format(what), (carrier, group_name))
if share is None:
return return_noconstraint('demand_share_per_timestep', group_name)
else:
lhs_loc_tech_carriers, rhs_loc_tech_carriers = get_demand_share_lhs_and_rhs_loc_tech_carriers(
backend_model, group_name, carrier
)
lhs = sum(
backend_model.carrier_prod[loc_tech_carrier, timestep]
for loc_tech_carrier in lhs_loc_tech_carriers
)
rhs = share * -1 * sum(
backend_model.carrier_con[loc_tech_carrier, timestep]
for loc_tech_carrier in rhs_loc_tech_carriers
)
return equalizer(lhs, rhs, what) | 65cfc120a9a7a5f26b4057a21b7a38a32a335955 | 2,755 |
def b2str(data):
"""Convert bytes into string type."""
try:
return data.decode("utf-8")
except UnicodeDecodeError:
pass
try:
return data.decode("utf-8-sig")
except UnicodeDecodeError:
pass
try:
return data.decode("ascii")
except UnicodeDecodeError:
return data.decode("latin-1") | 05cbe6c8072e1bf24cc9ba7f8c8447d0fa7cbf7f | 2,756 |
def plotalphaerror(alphaarr,errorarr,errorlagarr):
""" This will plot the error with respect then alpha parameter for the
constraint.
"""
sns.set_style('whitegrid')
sns.set_context('notebook')
Nlag=errorlagarr.shape[-1]
nlagplot=4.
nrows=1+int(sp.ceil(float(Nlag)/(2*nlagplot)))
fig=plt.figure(figsize=(8,4*nrows),facecolor='w')
gs=gridspec.GridSpec(nrows,2)
axmain=plt.subplot(gs[0,:])
axlist=[plt.subplot(gs[int(sp.floor(float(i)/2.)+1),int(sp.mod(i,2))]) for i in range(2*(nrows-1))]
axmain.plot(alphaarr,errorarr)
axmain.set_xscale('log')
axmain.set_yscale('log')
axmain.set_title('Error From All Lags Added',fontsize=fs)
axmain.set_ylabel('Error',fontsize=fs)
axmain.set_xlabel(r'$\gamma$',fontsize=fs)
for iaxn,iax in enumerate(axlist):
strlist=[]
handlist=[]
for ilag in range(int(nlagplot)):
curlag=int(iaxn*nlagplot+ilag)
if curlag>=Nlag:
break
handlist.append(iax.plot(alphaarr,errorlagarr[:,curlag])[0])
strlist.append('Lag {0}'.format(curlag))
iax.set_xscale('log')
iax.set_yscale('log')
iax.set_title('Error From Lags',fontsize=fs)
iax.set_ylabel('Error',fontsize=fs)
iax.set_xlabel(r'$\gamma$',fontsize=fs)
iax.legend(handlist,strlist,loc='upper right',fontsize='large')
plt.tight_layout()
return(fig,axlist,axmain) | e87c771212a3e39b1f4d7a1a74fc18c1d2e85f87 | 2,757 |
def fill_space(space, dim, size, minval, maxval, factor):
"""Fill a dim-dimensional discrete space of ℕ^{size} with
some random hyperplane with values ranging from minval to
maxval. Returns a ℕ^{size} array. Changes space in-place."""
offsets=[np.array([0]*dim)]
return ndim_diamond_square_rec(space, dim, size, offsets, minval, maxval, factor) | 7744cb465438b40019f3edae9db04143b16d19b1 | 2,758 |
def fracorder_lowshelving_eastty(w1, w2, G1, G2, rB=None):
"""
Parameters
----------
w1: float
Lower corner frequency.
w2: float
Upper corner frequency.
G1: float
Target level at lower corner frequency in dB.
G2: float
Target level at upper corner frequency in dB.
rB: float
Gain per octave.
Returns
-------
z: array_like
Complex zeros in the Laplace domain.
p: array_like
Complex poles in the Laplace domain.
k: float
Gain.
"""
Gd = G1 - G2
n_eff = effective_order(w1, w2, Gd, rB)
n_int, n_frac = np.divmod(n_eff, 1)
n_int = int(n_int)
z = np.array([])
p = np.array([])
# Second-order sections (complex conjugate pole/zero pairs)
if n_int > 0:
alpha = complex_zp_angles(n_int, n_frac)
alpha = np.concatenate((alpha, -alpha))
z = w1 * np.exp(1j * alpha)
p = w2 * np.exp(1j * alpha)
# First-order section (real pole/zero)
if n_eff % 2 != 0:
s_lower, s_upper = real_zp(n_int, n_frac, w1, w2)
if n_int % 2 == 0:
z_real = s_lower
p_real = s_upper
elif n_int % 2 == 1:
z_real = s_upper
p_real = s_lower
z = np.append(z, z_real)
p = np.append(p, p_real)
return z, p, 1 | 379a87b024ff993c0abf0b75a482a8ab66a67546 | 2,759 |
def get_cookie_date(date):
"""
Return a date string in a format suitable for cookies (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date)
:param date: datetime object
:return: date string in cookie format
"""
return date.strftime("%a, %d %b %Y %H:%M:%S GMT") | f2b4d6decab72cf1f25754bc7e290f62eae92156 | 2,760 |
def fill_block_with_call(newblock, callee, label_next, inputs, outputs):
"""Fill *newblock* to call *callee* with arguments listed in *inputs*.
The returned values are unwraped into variables in *outputs*.
The block would then jump to *label_next*.
"""
scope = newblock.scope
loc = newblock.loc
fn = ir.Const(value=callee, loc=loc)
fnvar = scope.make_temp(loc=loc)
newblock.append(ir.Assign(target=fnvar, value=fn, loc=loc))
# call
args = [scope.get_exact(name) for name in inputs]
callexpr = ir.Expr.call(func=fnvar, args=args, kws=(), loc=loc)
callres = scope.make_temp(loc=loc)
newblock.append(ir.Assign(target=callres, value=callexpr, loc=loc))
# unpack return value
for i, out in enumerate(outputs):
target = scope.get_exact(out)
getitem = ir.Expr.static_getitem(value=callres, index=i,
index_var=None, loc=loc)
newblock.append(ir.Assign(target=target, value=getitem, loc=loc))
# jump to next block
newblock.append(ir.Jump(target=label_next, loc=loc))
return newblock | 76e9edbeca59a75d9854e6ffa2d02658da7511ab | 2,761 |
def data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get(uuid, upper_frequency, lower_frequency): # noqa: E501
"""data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get
returns tapi.photonic.media.FrequencyConstraint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param upper_frequency: Id of available-spectrum
:type upper_frequency: int
:param lower_frequency: Id of available-spectrum
:type lower_frequency: int
:rtype: TapiPhotonicMediaFrequencyConstraint
"""
return 'do some magic!' | 412f0b48a050e9201e6d52450a04a6bef0f4a0f3 | 2,763 |
def image_to_string(filename):
"""Generate a string representation of the image at the given path, for embedding in code."""
image = pyglet.image.load(filename)
data = image.get_data('LA', 16)
s = ''
for x in data:
s += "\\x%02x" % (ord(x))
return s | 19dea26d51dd29449759c5e1a3b4c9fc098702f3 | 2,764 |
def mpf_connectome(
mc, num_sampled, max_depth, args_dict, clt_start=10, sr=0.01, mean_estimate=False
):
"""Perform mpf statistical calculations on the mouse connectome."""
args_dict["max_depth"] = max_depth
args_dict["total_samples"] = num_sampled[0]
args_dict["static_verbose"] = False
args_dict["clt_start"] = clt_start
args_dict["mean_estimate"] = mean_estimate
if max_depth > 1:
sr = None
if mean_estimate is True:
sr = None
cp = CombProb(
mc.num_a,
num_sampled[0],
mc.num_senders,
mc.num_b,
num_sampled[1],
MatrixConnectivity.static_expected_connections,
verbose=True,
subsample_rate=sr,
**args_dict
)
result = {
"expected": cp.expected_connections(),
"total": cp.get_all_prob(),
"each_expected": {k: cp.expected_total(k) for k in range(num_sampled[0] + 1)},
}
return result | 6ae8ddb7c3355ddbf072a5bf97f57fe4e13b500e | 2,765 |
def valuedict(keys, value, default):
"""
Build value dictionary from a list of keys and a value.
Parameters
----------
keys: list
The list of keys
value: {dict, int, float, str, None}
A value or the already formed dictionary
default: {int, float, str}
A default value to set if no value
Returns
-------
dict
A dictionary
Notes
-----
This standalone and generic function is only required by plotters.
"""
if isinstance(value, dict):
return {key: value.get(key, default) for key in keys}
else:
return dict.fromkeys(keys, value or default) | 44283bac3be75c3569e87a890f507f7cff4161b6 | 2,766 |
async def chunks(request):
"""A handler that sends chunks at a slow pace.
The browser will download the page over the range of 2 seconds,
but only displays it when done. This e.g. allows streaming large
files without using large amounts of memory.
"""
async def iter():
yield "<html><head></head><body>"
yield "Here are some chunks dripping in:<br>"
for i in range(20):
await asgineer.sleep(0.1)
yield "CHUNK <br>"
yield "</body></html>"
return 200, {"content-type": "text/html"}, iter() | 590fe83c5ee53c603b973063e0e077ab87a220ae | 2,767 |
def get_zero_ranges(*args):
"""
get_zero_ranges(zranges, range) -> bool
Return set of ranges with zero initialized bytes. The returned set
includes only big zero initialized ranges (at least >1KB). Some zero
initialized byte ranges may be not included. Only zero bytes that use
the sparse storage method (STT_MM) are reported.
@param zranges: pointer to the return value. cannot be NULL (C++:
rangeset_t *)
@param range: the range of addresses to verify. can be NULL - means
all ranges (C++: const range_t *)
@return: true if the result is a non-empty set
"""
return _ida_bytes.get_zero_ranges(*args) | bd95dbb237ca0b2934e8653b1198d10d25abc553 | 2,769 |
def fista_step(L, Wd, X, alpha, last_Z):
"""
Calculates the next sparse code for the FISTA algorithm
Dimension notation:
B - Number of samples. Usually number of patches in image times batch size
K - Number of atoms in dictionary
d - Dimensionality of atoms in dictionary
:param X: Input - Signal to find sparse coding against. Dimensions: d X B
:param Wd: Dictionary - Tensor of atoms we want to get a sparse linear combination of. Dimensions: d X K
:param alpha: Float. Sparsity weight
:param L: Float. Largest eigenvalue in Wd
:param last_Z: Sparse code from previous step. Dimensions: K x B
:return: Z: linear coefficients for Sparse Code solution. Dimensions: K x B
"""
quantization_distance = Wd.mm(last_Z) - X.to(Wd.device)
normalized_dictionary = Wd.t() / L
normalized_quantization_projection = normalized_dictionary.mm(quantization_distance)
cur_Z = last_Z - normalized_quantization_projection
cur_Z = shrink_function(cur_Z, alpha / L)
return cur_Z | 6e237a01e631d08efcc425068c646b792d984cdd | 2,770 |
def get_and_validate_certs_for_replacement(
default_cert_location,
default_key_location,
default_ca_location,
new_cert_location,
new_key_location,
new_ca_location):
"""Validates the new certificates for replacement.
This function validates the new specified certificates for replacement,
based on the new certificates specified and the current ones. E.g. if
onlt a new certificate and key were specified, then it will validate them
with the current CA.
"""
cert_filename, key_filename = get_cert_and_key_filenames(
new_cert_location, new_key_location,
default_cert_location, default_key_location)
ca_filename = get_ca_filename(new_ca_location, default_ca_location)
validate_certificates(cert_filename, key_filename, ca_filename)
return cert_filename, key_filename, ca_filename | 9a4a3b46609fc1e5e7cc525b84397b9adba86b32 | 2,771 |
def build_model(cfg):
"""
Built the whole model, defined by `cfg.model.name`.
"""
name = cfg.model.name
return META_ARCH_REGISTRY.get(name)(cfg) | b106eca0f110007cb852dce9760e5e0ee08940a8 | 2,772 |
def download_n_parse_3k(url):
"""
Gets the article's metadata
Args:
url: The article's URL
"""
article3k = Article(url)
try:
article3k.download()
article3k.parse()
except Exception:
print(f"Download or Parse:\t{url}")
return
return article3k.text | fa63fc7c03b63c5e08004d61488074be538c714b | 2,773 |
def crop_to_square(img, target_size=None):
"""
Takes numpy array img and converts it to a square by trimming
:param img: np.array representing image
:param target_size: optionally specify target size. If None, will return min(l, w) x min(l, w)
:return: np.array
"""
l, w = img.shape
img_copy = img.copy()
if l > w:
delta = l - w
cropped_img = img_copy[delta // 2: -delta + delta // 2, :]
elif l < w:
delta = w - l
cropped_img = img_copy[:, delta // 2: -delta + delta // 2]
else:
cropped_img = img_copy
if target_size:
current_size = cropped_img.shape[0] # should be a square
center = max(target_size, current_size) // 2
offset_min = center - min(target_size, current_size) // 2
offset_max = offset_min + min(target_size, current_size)
if target_size > current_size:
new_image = np.zeros((target_size, target_size))
new_image[offset_min:offset_max, offset_min:offset_max] = cropped_img
cropped_img = new_image.copy()
else:
cropped_img = cropped_img[offset_min:offset_max, offset_min:offset_max]
return np.asarray(cropped_img, dtype=np.float32) | 2ad566c7d0a0c719ff207bc06d33e70208a7a03f | 2,774 |
def reboot(name, path=None):
"""
Reboot a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt 'minion' lxc.reboot myvm
"""
ret = {"result": True, "changes": {}, "comment": "{0} rebooted".format(name)}
does_exist = exists(name, path=path)
if does_exist and (state(name, path=path) == "running"):
try:
stop(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret["comment"] = "Unable to stop container: {0}".format(exc)
ret["result"] = False
return ret
if does_exist and (state(name, path=path) != "running"):
try:
start(name, path=path)
except (SaltInvocationError, CommandExecutionError) as exc:
ret["comment"] = "Unable to stop container: {0}".format(exc)
ret["result"] = False
return ret
ret["changes"][name] = "rebooted"
return ret | 2519f9ad5434dbb9ff0a48f5280483829584ebb1 | 2,775 |
import networkx
def find(domain):
""" Finds connected domains within a domain.
A domain is defined to be a connected region of lattice
points, subject to periodic boundary conditions.
Parameters
----------
domain : :py:class:`~fieldkit.mesh.Domain`
The set of nodes to seek connected domains in.
Returns
-------
tuple
A tuple of all :py:class:`~fieldkit.mesh.Domain` objects
identified within the `domain`. At most, there is only
one domain returned, but many can be identified if the points
in the `domain` are highly disconnected.
Notes
-----
The connected domains are determined using a graph-based approach,
which requires the `networkx` package. Performance is generally good,
but the algorithm may struggle for large numbers of nodes or domains.
"""
comps = networkx.connected_components(domain.graph)
return tuple([Domain(domain.mesh,list(c)) for c in comps]) | 3ea2128f84104686be88d359cda3df2554013f41 | 2,776 |
def url_to_license(url):
"""Given a URL, return the license as a license/version tuple"""
(scheme, netloc, path, *remainder) = urlparse(url)
path_parts = path.split('/')
if len(path_parts) < 4:
raise LicenseException("Did not get 4 path segments, probably not a CC license URL")
license = path_parts[2].upper() # First is '', because it starts with a leading /
version = path_parts[3]
# Handle the PD licenses as special-cases
if license == 'ZERO':
license = 'CC0'
version = '1.0'
if license == 'MARK':
license = 'PDM'
version = '1.0'
if license not in LICENSE_LIST:
raise LicenseException("License fragment %s was not a valid license", license)
return (license, version) | e6ae2d67f1dbd02c0fe0885231dbac4ae112b0d3 | 2,777 |
def dsystem_dt(request):
"""Test systems for test_discrete"""
# SISO state space systems with either fixed or unspecified sampling times
sys = rss(3, 1, 1)
# MIMO state space systems with either fixed or unspecified sampling times
A = [[-3., 4., 2.], [-1., -3., 0.], [2., 5., 3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.]]
C = [[4., 2., -3.], [1., 4., 3.]]
D = [[-2., 4.], [0., 1.]]
dt = request.param
systems = {'sssiso': StateSpace(sys.A, sys.B, sys.C, sys.D, dt),
'ssmimo': StateSpace(A, B, C, D, dt),
'tf': TransferFunction([2, 1], [2, 1, 1], dt)}
return systems | faaf22165fc147955b69b1d983fbc37dafb34772 | 2,778 |
import types
def update_attributes(dsFolder: types.GirderModel, data: dict):
"""Upsert or delete attributes"""
crud.verify_dataset(dsFolder)
validated: AttributeUpdateArgs = crud.get_validated_model(AttributeUpdateArgs, **data)
attributes_dict = fromMeta(dsFolder, 'attributes', {})
for attribute_id in validated.delete:
attributes_dict.pop(str(attribute_id), None)
for attribute in validated.upsert:
attributes_dict[str(attribute.key)] = attribute.dict(exclude_none=True)
upserted_len = len(validated.delete)
deleted_len = len(validated.upsert)
if upserted_len or deleted_len:
update_metadata(dsFolder, {'attributes': attributes_dict})
return {
"updated": upserted_len,
"deleted": deleted_len,
} | d58dfecf68822d4b45688ea16ec39e97e999d458 | 2,779 |
def machado_et_al_2009_matrix_protanomaly(severity):
"""Retrieve a matrix for simulating anomalous color vision.
:param cvd_type: One of "protanomaly", "deuteranomaly", or "tritanomaly".
:param severity: A value between 0 and 100.
:returns: A 3x3 CVD simulation matrix as computed by Machado et al
(2009).
These matrices were downloaded from:
http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html
which is supplementary data from :cite:`Machado-CVD`.
If severity is a multiple of 10, then simply returns the matrix from that
webpage. For other severities, performs linear interpolation.
"""
MACHADO_ET_AL_MATRIX_protanomaly = np.array(
(
(
[1.000000, 0.000000, -0.000000],
[0.000000, 1.000000, 0.000000],
[-0.000000, -0.000000, 1.000000],
),
(
[0.856167, 0.182038, -0.038205],
[0.029342, 0.955115, 0.015544],
[-0.002880, -0.001563, 1.004443],
),
(
[0.734766, 0.334872, -0.069637],
[0.051840, 0.919198, 0.028963],
[-0.004928, -0.004209, 1.009137],
),
(
[0.630323, 0.465641, -0.095964],
[0.069181, 0.890046, 0.040773],
[-0.006308, -0.007724, 1.014032],
),
(
[0.539009, 0.579343, -0.118352],
[0.082546, 0.866121, 0.051332],
[-0.007136, -0.011959, 1.019095],
),
(
[0.458064, 0.679578, -0.137642],
[0.092785, 0.846313, 0.060902],
[-0.007494, -0.016807, 1.024301],
),
(
[0.385450, 0.769005, -0.154455],
[0.100526, 0.829802, 0.069673],
[-0.007442, -0.022190, 1.029632],
),
(
[0.319627, 0.849633, -0.169261],
[0.106241, 0.815969, 0.077790],
[-0.007025, -0.028051, 1.035076],
),
(
[0.259411, 0.923008, -0.182420],
[0.110296, 0.804340, 0.085364],
[-0.006276, -0.034346, 1.040622],
),
(
[0.203876, 0.990338, -0.194214],
[0.112975, 0.794542, 0.092483],
[-0.005222, -0.041043, 1.046265],
),
(
[0.152286, 1.052583, -0.204868],
[0.114503, 0.786281, 0.099216],
[-0.003882, -0.048116, 1.051998],
),
),
dtype=np.float64,
)
assert 0 <= severity <= 100
fraction = severity % 10
low = int(severity - fraction) // 10
high = low + 1
# assert low <= severity <= high
low_matrix = MACHADO_ET_AL_MATRIX_protanomaly[low]
if severity == 100:
# Don't try interpolating between 100 and 110, there is no 110...
return low_matrix
high_matrix = MACHADO_ET_AL_MATRIX_protanomaly[high]
return (1 - fraction / 10.0) * low_matrix + fraction / 10.0 * high_matrix | a99a07a7f447fc741ee1a4bd239bda5ed8079e6b | 2,780 |
Subsets and Splits