content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _reorder_for_qbb_experiment(df: pd.DataFrame) -> pd.DataFrame:
"""By default the entries are ordered alphabetically. We want SPOTA, EPOpt, PPO"""
print("Changed the order")
return df.iloc[[2, 0, 1]] | beccd22a765eb526ed855fd34dde4a05e2b394f2 | 3,657,098 |
def get_field(self, *args, is_squeeze=False, node=None, is_rthetaz=False):
"""Get the value of variables stored in Solution.
Parameters
----------
self : SolutionData
an SolutionData object
*args: list of strings
List of axes requested by the user, their units and values (optional)
Returns
-------
field: array
an array of field values
"""
axname, _ = self.get_axes_list()
symbol = self.field.symbol
if len(args) == 0:
field_dict = self.field.get_along(tuple(axname), is_squeeze=is_squeeze)
else:
field_dict = self.field.get_along(*args, is_squeeze=is_squeeze)
field = field_dict[symbol]
return field | e93455cbc4b306762336fd13603342e9d92badd1 | 3,657,099 |
def handle_session_event(event: EventData) -> core_pb2.SessionEvent:
"""
Handle session event when there is a session event
:param event: event data
:return: session event
"""
event_time = event.time
if event_time is not None:
event_time = float(event_time)
return core_pb2.SessionEvent(
node_id=event.node,
event=event.event_type.value,
name=event.name,
data=event.data,
time=event_time,
) | ddaa78a889c23326f52595d4a7fb71c1813eb971 | 3,657,101 |
def bump_patch(version):
"""Raise the patch part of the version
:param: version string
:return: the raised version string
:rtype: str
"""
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'],
verinfo['patch'] + 1) | 350e53788b0851138eb0d0248250bebd7e357e10 | 3,657,103 |
def _extract_bike_location(bike, lon_abbrev='lon'):
"""
Standardize the bike location data from GBFS. Some have extra fields,
and some are missing fields.
Arguments:
bike (dict[str, str]): A GBFS bike object as it appears in free_bike_status.json
lon_abbrev (str): The abbreviation used for `longitude`
Returns:
dict[str, str]: A normalized GBFS bike object
"""
output = {key: bike.get(key) for key in ['bike_id', 'lat', 'is_reserved', 'is_disabled']}
output['lon'] = bike.get(lon_abbrev)
return output | a20929a85c993a59b82b552fcfee81b1f818648d | 3,657,104 |
def clean_word(word):
"""Return word in lowercase stripped of whitespace"""
return word.strip().lower() | ce57fa95ec111ee18c8a00c2076c686bc0abfe5c | 3,657,105 |
def get_batch_size(tracks):
"""
If tracks is a track-major list of possibly None tracks, get the batch size
"""
return get_shape(tracks)[0] | 677f26a0f42d4e745d77ff6abc1867ce857ea208 | 3,657,106 |
def find_edges_from_wires(body: TopoDS_Shape) -> set[TopoDS_Edge]:
"""Return set of edges from Wires."""
edge_set = set()
for wire in TopologyExplorer(body, ignore_orientation=False).wires():
for edge in WireExplorer(wire).ordered_edges():
edge_set.add(edge)
return edge_set | 89d8d848d98c32e925f955da623a3e1018245f75 | 3,657,107 |
def getSentB(text2: str, offsetB: int, nextPoint: int, sentLength: int):
"""
alignSentences auxiliar function to get the sentences of the original text.
"""
posB = text2[offsetB+sentLength:].find('.')
sentLength += posB+1
sentB = text2[offsetB:offsetB+sentLength]
nextPoint = offsetB + sentLength
return sentB, nextPoint, sentLength | 54914a3c1d85464c0e5a4267538a73693e3df238 | 3,657,109 |
def get_mapping_fcost_local(interface, bus_def):
"""
coarse cost function to cheaply estimate local (subset of ports)
interface match to bus_def
"""
cost = _get_mapping_fcost_base(interface, bus_def, penalize_umap=False)
name_cost = _get_name_fcost2(interface, bus_def)
cost.nc = name_cost
return cost | c945e89174fea0c131f35ad4688c5539a55c3eda | 3,657,110 |
import base64
def base64_image(image: bytes, mime_type: str) -> str:
"""Encode the image for an URL using base64
Args:
image: the image
mime_type: the mime type
Returns:
A string starting with "data:{mime_type};base64,"
"""
base64_data = base64.b64encode(image)
image_data = quote(base64_data)
return f"data:{mime_type};base64,{image_data}" | 3079c73137959fea1d16ceb64251870500ae30a5 | 3,657,111 |
import math
import six
import numpy
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
Generate prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. The details of this algorithm, please refer the
section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs(list|tuple): The list of input Variables, the format
of all Variables is NCHW.
image(Variable): The input image data of PriorBoxOp,
the layout is NCHW.
base_size(int): the base_size is used to get min_size
and max_size according to min_ratio and max_ratio.
num_classes(int): The number of classes.
aspect_ratios(list|tuple): the aspect ratios of generated prior
boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): Name of the prior box layer. Default: None.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the fininal
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc: The predicted boxes' location of the inputs. The layout
is [N, H*W*Priors, 4]. where Priors is the number of predicted
boxes each position of each input.
mbox_conf: The predicted boxes' confidence of the inputs. The layout
is [N, H*W*Priors, C]. where Priors is the number of predicted boxes
each position of each input and C is the number of Classes.
boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4].
num_priors is the total box count of each position of inputs.
variances: the expanded variances of PriorBox. The layout is
[num_priors, 4]. num_priors is the total box count of each position of inputs
Examples:
.. code-block:: python
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
compile_shape = [
mbox_loc.shape[0], cpt.floor_division(
mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3], 4), 4
]
run_shape = tensor.assign(numpy.array([0, -1, 4]).astype("int32"))
mbox_loc_flatten = nn.reshape(
mbox_loc, shape=compile_shape, actual_shape=run_shape)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
new_shape = [0, -1, num_classes]
compile_shape = [
conf_loc.shape[0],
cpt.floor_division(conf_loc.shape[1] * conf_loc.shape[2] *
conf_loc.shape[3], num_classes), num_classes
]
run_shape = tensor.assign(
numpy.array([0, -1, num_classes]).astype("int32"))
conf_loc_flatten = nn.reshape(
conf_loc, shape=compile_shape, actual_shape=run_shape)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var | e3fabec0dd64fec9caea929e0bf4c04848d22df6 | 3,657,112 |
from operator import invert
import numpy
def expandMask(img, shrink = False, step = 1):
"""Grow or shrink a mask by a pixel."""
if shrink:
img = invert(img)
img = jitterSum(img.data, step) > 0
img = Image(data = img.astype(numpy.uint8)*255)
if shrink:
img = invert(img)
return img | 4853a0c42856cc34a5b9b58533d335c0ac858345 | 3,657,113 |
def isHeader(line):
"""
tests to see if 'line' is in the event file
header
"""
if containsAny(line, 'EVF Filename:', 'Generation Time:', 'Start_time:',
'End_time:', 'events in list)', '#', 'Include:',
'Init_value:'):
return True
elif len(line) < 3:
return True
else:
return False | 548d0273b174c16e7ab874fe8a94d4ec7e87703b | 3,657,114 |
import requests
def redirect_page(source_url, destination_url):
"""returns False is current page is not 200"""
def _check_redirect(full_url):
print('Getting ' + full_url)
response = requests.get(full_url, allow_redirects=False)
if response.status_code == 200:
print("Was 200")
return True
elif response.status_code == 404:
print("Was 404")
return False
elif response.status_code == 301:
print("Was 301")
return False
else:
raise Exception("UNEXPECTED STATUS CODE {} FOR {}".format(
response.status_code, full_url))
return True
full_source_url = 'https://www.gov.uk' + source_url
full_destination_url = 'https://www.gov.uk' + destination_url
return _check_redirect(full_source_url) and _check_redirect(
full_destination_url) | 8caa9db41948f44cc015ca51f179ff318eb22ada | 3,657,115 |
def WrapWithQuotes(text, quote='"'):
""" Wrap the supplied text with quotes
Args:
text: Input text to wrap
quote: Quote character to use for wrapping (default = "")
Returns:
Supplied text wrapped in quote char
"""
if not text.startswith(quote):
text = quote + text
if not text.endswith(quote):
text = text + quote
return text | f4f7b83d60e3ea928e3502b9d19ca4c8d52914b9 | 3,657,117 |
def login_aws_via_idp(session, username, password, entity_id):
""" Get a SAML assertion and set of AWS roles which can be assumed with the SAML assertion. """
logger.info("Looking up your IdP")
idp_url, idp_form = get_idp_login_form(
session, username, password, entity_id)
logger.info("Logging in to %s", idp_url)
idp_response = session.post(idp_url, data=idp_form)
idp_response.raise_for_status()
logger.info("Parsing response and presenting assertion to CILogon")
cilogon_url, payload = parse_idp_login_response(idp_response.text)
scimma_saml_proxy_response = session.post(cilogon_url, data=payload)
scimma_saml_proxy_response.raise_for_status()
logger.info("Login complete, extracting credentials")
assertion = parse_scimma_sample_response(scimma_saml_proxy_response.text)
roles = parse_scimma_aws_assertion(assertion)
return assertion, roles | 586250b66771275b5282ae0e22d40298550164e2 | 3,657,119 |
def fit_linreg(x, y, intercept=True):
"""Simple linear regression: y = kx + b.
Arguments
---------
x: :class:`~numpy.ndarray`
A vector of independent variables.
y: :class:`~numpy.ndarray`
A vector of dependent variables.
intercept: bool
If using steady state assumption for fitting, then:
True -- the linear regression is performed with an unfixed intercept;
False -- the linear regresssion is performed with a fixed zero intercept.
Returns
-------
k: float
The estimated slope.
b: float
The estimated intercept.
"""
mask = np.logical_and(~np.isnan(x), ~np.isnan(y))
xx = x[mask]
yy = y[mask]
ym = np.mean(yy)
xm = np.mean(xx)
if intercept:
cov = np.mean(xx * yy) - xm * ym
var_x = np.mean(xx * xx) - xm * xm
k = cov / var_x
b = ym - k * xm
else:
k = np.mean(yy) / np.mean(xx)
b = 0
return k, b | 18248eb0ece96dfda5fbc2d94a591f98570feddd | 3,657,120 |
import torch
def entropy(x, input_as_probabilities):
"""
Helper function to compute the entropy over the batch
input: batch w/ shape [b, num_classes]
output: entropy value [is ideally -log(num_classes)]
"""
if input_as_probabilities:
x_ = torch.clamp(x, min = 1e-8)
b = x_ * torch.log(x_)
else:
b = F.softmax(x, dim = 1) * F.log_softmax(x, dim = 1)
if len(b.size()) == 2: # Sample-wise entropy
return -b.sum(dim = 1).mean()
elif len(b.size()) == 1: # Distribution-wise entropy
return - b.sum()
else:
raise ValueError('Input tensor is %d-Dimensional' %(len(b.size()))) | 9cf9f5ecd59ffe068bbf8f25da62ac3c5c2eedb6 | 3,657,121 |
from typing import Callable
def find_function_in_object(o: object, function_name: str) -> Callable:
"""Finds a callable object matching given function name in given object.
Args:
o: Any object.
function_name: Name of attribute within o.
Returns:
Callable object with name <function_name> in object <o>.
Raises:
LookupError: if <function_Name> is not a callable object in <o>.
"""
try:
function_handle = getattr(o, function_name)
if not hasattr(function_handle, "__call__"):
raise LookupError(
f"Resolved object {function_name} in object {o} is not a function."
)
else:
return function_handle
except AttributeError:
raise LookupError(f"Cannot find function {function_name} in object {o}.") | c3b6ad12f42d005f643bc8a657f728613bd0e93c | 3,657,122 |
async def refresh(db: AsyncSession, schema: RefreshToken):
"""
Refresh token
:param db: DB
:type db: AsyncSession
:param schema: Refresh token
:type schema: RefreshToken
:return: Access token
:rtype: dict
:raise HTTPException 400: User not found
"""
username = verify_refresh_token(schema.refresh_token)
if not await user_crud.exists(db, username=username):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='User not found')
user = await user_crud.get(db, username=username)
return create_token(user.id, username) | f20cde1c44ef515c18318c45af9df4bb360c85e6 | 3,657,123 |
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y | 7612ef322acf77f8c2fdf1963b6d15934f84b416 | 3,657,124 |
def build_custom_Theta(
data,
data_description=[],
add_constant_term=True,
):
"""
builds a matrix Theta(U) from a predefined set of terms
This is used when we subsample and take all the derivatives point by point or if there is an
extra input to put in.
input:
data: column 0 is U
derivatives_description: description of candidate terms in Theta
P: max power of polynomial function of U to be included in Theta
returns:
Theta = Theta(U,Q)
descr = description of what all the columns in Theta are
"""
if len(data) > 0:
n, m = data.shape
# Add first column of Theta as ones.
Theta = np.array([], dtype=np.complex64).reshape((n, 0))
descr = []
# Add "u"-part into Theta
if len(data_description) > 0:
Theta = np.hstack([Theta, data])
descr += data_description
return Theta, descr | 451c306124e94d5f04d436c98ede6af232a6458e | 3,657,126 |
import pathlib
from typing import Optional
import importlib
def load(plugin: pathlib.Path) -> Optional[ModuleType]:
"""Load a specific cemu plugin
Args:
plugin (pathlib.Path): the path of the plugin to load
Returns:
Optional[ModuleType]: the loaded plugin module on success, None if there's no plugin, or it is invalid
"""
try:
if plugin.is_file():
mod = importlib.import_module(f"cemu.plugins.{plugin.stem}")
elif plugin.is_dir():
mod = importlib.import_module(f"cemu.plugins.{plugin.name}")
else:
raise ImportError("invalid format")
except ImportError as ie:
error(f"Failed to import '{plugin}' - reason: {str(ie)}")
return None
if not hasattr(mod, "register"):
error(f"Plugin '{plugin.stem}' has no `register` method")
return None
return mod | eac265743ba9a58842cf7e97a1b961234ea3b17b | 3,657,127 |
import traceback
def getDatabaseConnection(databaseString):
"""Attempt connection to the database"""
sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print(traceback.format_exc())
print("Error in connection to the database")
return sqlsession | 8199838e24c6828977d5fe6a7f2af20f755f25f6 | 3,657,129 |
def prepare_multiple_configs(conf):
""" This function uses workload_1 as a base, and then duplicates its configuration for all
other workloads 2,3... while leaving properties already defined in subsequent workloads (2,3..)
unchanged.
"""
keys_starting_with_workload = []
for k, _ in conf.iteritems():
if k.startswith("workload"):
keys_starting_with_workload.append(k)
for k in keys_starting_with_workload:
if k != "workload_1":
merge_dicts(dst_dic=conf[k], src_dic=conf["workload_1"], overwrite=False)
return conf, keys_starting_with_workload | 760adf50bbca9dd160375ed8d506a33618d39a94 | 3,657,130 |
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None):
"""
given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data
w = w_scaled / sigma
b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu)
:param sklearn linear classifier
:param coefficients: vector of coefficients
:param intercept: scalar for the intercept function
:param scaler: sklearn.Scaler or
:return: coefficients and intercept for unnormalized data
"""
if coefficients is None:
assert clf is not None
assert intercept == 0.0
assert hasattr(clf, 'coef_')
coefficients = clf.coef_
intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0
if scaler is None:
w = np.array(coefficients)
b = float(intercept)
else:
isinstance(scaler, StandardScaler)
x_shift = np.array(scaler.mean_)
x_scale = np.sqrt(scaler.var_)
w = coefficients / x_scale
b = intercept - np.dot(w, x_shift)
w = np.array(w).flatten()
b = float(b)
return w, b | cee60338386bdc87cb50e4b54af43517135fba46 | 3,657,131 |
import copy
def reduce(snail_nr):
"""Returns a fully reduced version of the given snail number."""
new_snail_nr = copy.deepcopy(snail_nr)
# print("Start:")
# print(snail_nr)
while True:
# print("\nNew reduction phase...")
if explode_in_place(new_snail_nr):
# print("Exploded:", new_snail_nr)
continue
# else:
# print("No explode.")
if split_in_place(new_snail_nr):
# print("Split:", new_snail_nr)
continue
# else:
# print("No split.")
break
# print(new_snail_nr)
return new_snail_nr | 1facd7a7bbc73794ff2519ef0894ec9536c18690 | 3,657,132 |
def load_image_embedding_model(input_repr, content_type, embedding_size):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_image_embedding_model_path(input_repr, content_type)
return load_image_embedding_model_from_path(model_path, embedding_size) | f78d458e2cd000206d3fcc35c166ede43e84e8fd | 3,657,133 |
def prepare_alm(alm=None, ainfo=None, lmax=None, pre=(), dtype=np.float64):
"""Set up alm and ainfo based on which ones of them are available."""
if alm is None:
if ainfo is None:
if lmax is None:
raise ValueError("prepare_alm needs either alm, ainfo or lmax to be specified")
ainfo = sharp.alm_info(lmax)
alm = np.zeros(pre+(ainfo.nelem,), dtype=np.result_type(dtype,0j))
else:
ainfo = sharp.alm_info(nalm=alm.shape[-1])
return alm, ainfo | 21406a6b3df7e63eeb05998c8940e525021b62ce | 3,657,134 |
from typing import Any
def increment_occurance_dict(d: dict, k: Any) -> None:
"""
Increment occurance dict, updates in-place so nothing is returned.
"""
try:
d[k] += 1
except KeyError:
d[k] = 1
return None | 725b437494f4c647848c54a3d13b4e974fa7f0e8 | 3,657,135 |
import scipy
def closest_line(query_lines, metric='cosine'):
"""Compute the distance to, and parameters for, the closest line to each
line in query_lines.
Args:
- query_lines: Array of lines to compute closest matches for, shape
(n_lines, width, height, 1)
- metric: String to pass to scipy.spatial.distance.cdist to choose
which distance metric to use
Returns:
- min_dist, starts, ends: Arrays of shape (n_lines,) denoting the
distance to the nearest ``true'' line and the start and end points.
"""
h, w = query_lines.shape[1:-1]
# Construct 10000 lines with these dimensions
angles = np.linspace(0, 2*np.pi - 2*np.pi/10000, 10000)
all_lines = np.array(
[(data.draw_line(angle, h, w)) for angle in angles])
# Produce vectorized versions of both for use with scipy.spatial
flat_query = query_lines.reshape(query_lines.shape[0], -1)
flat_all = all_lines.reshape(all_lines.shape[0], -1)
# Compute pairwise distance matrix of query lines with all valid lines
distances = scipy.spatial.distance.cdist(flat_query, flat_all, metric)
min_dist_idx = np.argmin(distances, axis=-1)
min_dist = distances[np.arange(distances.shape[0]), min_dist_idx]
angles = np.array([angles[n] for n in min_dist_idx])
return min_dist, angles | 187cb6f8266ddf7bd0347fb233fb02a7ea4cbad3 | 3,657,137 |
def deref_vtk(obj):
"""Dereferences the VTK object from the object if possible."""
if isinstance(obj, TVTKBase):
return obj._vtk_obj
else:
return obj | 1ba46f83a389983df3c35f011c94836f12fdd905 | 3,657,138 |
def order_assignee_factory(team):
"""
Creates a :class:`datahub.omis.order.models.OrderAssignee` instance related to ``team``
"""
adviser = Advisor.objects.create(
first_name='John',
last_name='Doe',
email=f'{uuid4()}@example.com',
)
order_assignee = OrderAssignee.objects.create(
order=Order.objects.create(
company=Company.objects.create(),
contact=Contact.objects.create(primary=True),
primary_market=Country.objects.create(),
),
adviser=adviser,
created_by=adviser)
order_assignee.team = team
order_assignee.save()
return order_assignee | fe39d16a105ff01be63614e76dcf001b5ca4171f | 3,657,139 |
def is_bool(space, w_obj):
""" Finds out whether a variable is a boolean"""
return space.wrap(w_obj.tp == space.tp_bool) | 39b62ec08ebbdd4d7505e558ad4901ca67afc12d | 3,657,140 |
def air_density(t_f, elevation):
"""Eq 20, page 25"""
return (1.293 - 1.525e-4 * elevation + 6.379e-9 * elevation ** 2) / (
1 + 0.00367 * t_f
) | d5677c755fc52e1ae8cc5293d4ed5c9a4debb71d | 3,657,143 |
def _strip_after_new_lines(s):
"""Removes leading and trailing whitespaces in all but first line."""
lines = s.splitlines()
if len(lines) > 1:
lines = [lines[0]] + [l.lstrip() for l in lines[1:]]
return '\n'.join(lines) | 247cee0f34ab1e742069e05c8c00095cd24d80bc | 3,657,144 |
def make_connection(request):
"""
Create a StreamSplitRoutine from a MockConnection and a container, return topics 'A' and 'B' as well as the routine
"""
def generate(*, max_items_send: int):
return MockConnection(max_items_send=max_items_send)
yield generate | a0a4adbdf6fb7487d27f9e81c8f4bb5af49fae58 | 3,657,145 |
import copy
def my_browse(*args, **kwargs):
""" Creates and starts an ObjectBrowser with modified summary column.
"""
attribute_columns = copy.deepcopy(DEFAULT_ATTR_COLS)
summary_column = [col for col in attribute_columns if col.name == 'summary'][0]
summary_column.data_fn = my_summary
return browse(*args, attribute_columns = attribute_columns, **kwargs) | 3f5e681112bf5dd7a56a3259e188a1c5773f2cf5 | 3,657,146 |
import psutil
def cpu_min_frequency():
"""
Returns the processor minimum frequency, in Mhz (> int)
"""
return psutil.cpu_freq().min | de4312ccd95e46d6d157bdb1a08d48fe5924942f | 3,657,147 |
def log_error(message: str) -> str:
"""error log"""
return message | dbd86c39bc504dbac8d308e124c73310df21f372 | 3,657,148 |
from datetime import datetime
from operator import or_
from operator import and_
def exclude_preservation_pending(q):
"""
Transform query to exclude MuseumObject entries which are pending
preservation
"""
now = datetime.datetime.now(datetime.timezone.utc)
preservation_boundary = now - PRESERVATION_DELAY
update_boundary = now - UPDATE_DELAY
return (
q.outerjoin(
MuseumPackage,
MuseumPackage.id == MuseumObject.latest_package_id
)
.filter(
# If any of the four conditions is true, the object will not
# be preserved and are thus included in this query:
or_(
# 1. Is metadata information still incomplete?
MuseumObject.metadata_hash == None,
MuseumObject.attachment_metadata_hash == None,
# 2. Is the object frozen?
MuseumObject.frozen,
# 3. The object hasn't been preserved, but it has been less
# than a month passed since the creation of the object?
and_(
MuseumObject.latest_package_id == None,
coalesce(
MuseumObject.created_date, datetime.datetime.min
) > preservation_boundary
),
# 4. Has the object entered preservation before, but...
and_(
MuseumObject.latest_package_id != None,
# ...the package wasn't cancelled, and either...
MuseumPackage.cancelled == False,
or_(
# ...modification date hasn't changed?
coalesce(
MuseumPackage.object_modified_date,
datetime.datetime.min
) == coalesce(
MuseumObject.modified_date,
datetime.datetime.min
),
# ...modification date has changed, but it's been
# less than a month?
coalesce(
MuseumPackage.object_modified_date,
datetime.datetime.min
) > update_boundary,
# ...metadata hashes haven't changed, indicating no
# change has happened?
and_(
MuseumPackage.metadata_hash
== MuseumObject.metadata_hash,
MuseumPackage.attachment_metadata_hash
== MuseumObject.attachment_metadata_hash
)
)
)
)
)
) | a43eefeaaac16ac872ae02bd522873966e5f21e2 | 3,657,149 |
from datetime import datetime
def naturalday(value, format=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
value = localtime(value)
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object.
return value
except ValueError:
# Date arguments out of range.
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days > 7:
return date_format(value, format)
elif delta.days > 2:
if value.weekday() == 0:
return _('Next Monday')
elif value.weekday() == 1:
return _('Next Tuesday')
elif value.weekday() == 2:
return _('Next Wednesday')
elif value.weekday() == 3:
return _('Next Thursday')
elif value.weekday() == 4:
return _('Next Friday')
elif value.weekday() == 5:
return _('Next Saturday')
else:
return _('Next Sunday')
elif delta.days == 2:
return _('After tomorrow')
elif delta.days == 1:
return _('Tomorrow')
elif delta.days == 0:
return _('Today')
elif delta.days == -1:
return _('Yesterday')
elif delta.days == -2:
return _('Before yesterday')
elif delta.days > -7:
if value.weekday() == 0:
return _('Last Monday')
elif value.weekday() == 1:
return _('Last Tuesday')
elif value.weekday() == 2:
return _('Last Wednesday')
elif value.weekday() == 3:
return _('Last Thursday')
elif value.weekday() == 4:
return _('Last Friday')
elif value.weekday() == 5:
return _('Last Saturday')
else:
return _('Last Sunday')
else:
return date_format(value, format) | fbc1fe32f5735f57c989488989aabd427a59c160 | 3,657,150 |
import tensorflow as tf
from torch.utils.data import DataLoader
def test_adaptors(adaptor: str, shuffle_buffer_size: int):
"""
Test if framework-specific generator adpators yield batches.
"""
idx = np.arange(0, 10)
def map_fn(x_, obs_):
"""
Note: Need to convert to numpy in output because torch does not accept dask.
"""
return (np.asarray(x_[:, :2]),),
kwargs = {"idx": {"Mus musculus": idx}, "obs_keys": [], "randomized_batch_access": False, "retrieval_batch_size": 2,
"map_fn": map_fn}
cart = _get_cart(store_format="dao", feature_space="single", **kwargs)
if adaptor == "python":
kwargs = {}
elif adaptor == "tensorflow":
kwargs = {"output_signature": (
tf.TensorSpec(shape=(2,), dtype=tf.float32),
)}
elif adaptor in ["torch", "torch-loader", "torch-iter-loader", "torch-iter"]:
kwargs = {}
else:
assert False
it = cart.adaptor(generator_type=adaptor, shuffle_buffer=shuffle_buffer_size, **kwargs)
if adaptor == "tensorflow":
it = iter(it.range(2))
if adaptor in ["torch", "torch-iter"]:
it = list(DataLoader(it))
it = iter(it)
if adaptor in ["torch-loader", "torch-iter-loader"]:
it = iter(list(it))
_ = next(it) | 088bd70f50b63a07f7392f1712de0d6aab9515a2 | 3,657,151 |
def qg8_graph_write(filename: str, graph: qg8_graph):
"""
Wrapper function which prepares a collection of chunks (graph) and writes it to a file
"""
if not isinstance(graph, qg8_graph):
raise TypeError("Second argument is not a qg8_graph")
try:
qg8f = qg8_file_open(filename, QG8_MODE_WRITE)
except:
raise IOError("Could not open file in write mode")
success = 1
for chunk in graph.chunks:
success *= qg8_file_write_chunk(qg8f, chunk)
qg8_file_flush(qg8f)
qg8_file_close(qg8f)
return success | a26891c86df5541cb1ffa3d3eb463bea5472d3d7 | 3,657,152 |
def valid_post_author(user, post):
"""This function checks whether the post was created by the user"""
if str(user.key().id()) == str(post.user.key().id()):
return True | 94ca2f23aa66f79be997080c61fc2f265e868e5f | 3,657,153 |
import json
import time
import collections
from datetime import datetime
def listing(request, **kwargs):
"""view for processing and applying listings"""
context = {
'view': 'listing',
'all_channels': CHANNELS,
'all_towns': TOWNS,
'method': request.method,
'actions': ['listing_parse', 'listing_apply'],
}
if request.method == 'GET':
context['action'] = 'show_listingModal'
return render(request, 'dvbboxes.html', context)
elif request.method == 'POST':
if 'listing/apply' in request.path:
form = forms.ApplyListingForm(request.POST)
if form.is_valid():
context['action'] = 'listing_apply'
parsed_data = json.loads(form.cleaned_data['parsed_data'])
service_id = form.cleaned_data['service_id']
towns = form.cleaned_data['towns']
if not towns:
towns = TOWNS
towns.sort()
# apply listing to servers in towns
days = [data['day'] for data in parsed_data]
days = sorted(
days,
key=lambda x: time.mktime(time.strptime(x, '%d%m%Y'))
)
response = dvbboxes.Listing.apply(
parsed_data, service_id, towns
)
# reorganize response by days
result = collections.OrderedDict()
for day in days:
result[day] = collections.OrderedDict()
bar = True
for town, data in response.items():
for day, infos in data.items():
for server, statuses in infos.items():
foo = all(statuses.values())
result[day][server] = foo
bar = bar and foo
context['result'] = result
return render(request, 'dvbboxes.html', context)
else:
context['errors'] = form.errors
return render(request, 'dvbboxes.html', context)
else:
form = forms.UploadListingForm(request.POST, request.FILES)
if form.is_valid():
filepath = handle_uploaded_file(request.FILES['filename'])
listing = dvbboxes.Listing(filepath) # get listing object
days = sorted(
listing.days,
key=lambda x: datetime.strptime(x, '%d%m%Y')
) # sort days in the listing
if len(days) > 31:
context['errors'] = ("Cannot process "
"more than 31 days")
return render(request, 'dvbboxes.html', context)
context['action'] = 'listing_parse'
missing_files = [
i for i, j in listing.filenames.items() if not j
] # detect missing files in the listing
result = collections.OrderedDict() # prepare final result
for day in days:
result[day] = []
parsed_listing = listing.parse()
json_result = []
for data in parsed_listing:
infos = collections.OrderedDict()
data = json.loads(data)
json_result.append(data)
day = data['day']
starts = [i for i in data if i != 'day']
starts = sorted(
starts,
key=lambda x: float(x.split('_')[1]))
absent_files = 0
for start in starts:
t, i = start.split('_')
start_litteral = datetime.fromtimestamp(
float(t)).strftime('%H:%M:%S')
stop_litteral = datetime.fromtimestamp(
float(t)+data[start]['duration']).strftime(
'%d-%m-%Y %H:%M:%S')
absent = not data[start]['duration']
if absent:
absent_files += 1
filename = data[start]['filename']
infos[i] = [
start_litteral, filename, absent
]
# we now define if the parsing is fine
limit = datetime.strptime(day, '%d%m%Y') + timedelta(1)
length_ok = (
datetime.fromtimestamp(
float(t)+data[start]['duration']) >= limit
)
if not absent_files and length_ok:
success = 0 # green
elif absent_files and length_ok:
success = 1 # lightblue
elif not absent_files and not length_ok:
success = 2 # orange
else:
success = 3 # red
result[day] = [infos, success, stop_litteral]
context['days'] = days
context['missing_files'] = missing_files
context['result'] = result
context['json_result'] = json.dumps(json_result)
return render(request, 'dvbboxes.html', context)
else:
context['errors'] = form.errors
return render(request, 'dvbboxes.html', context) | c4938dc4db4526ca93558305ea702660956e77fa | 3,657,154 |
def get_rise_or_fall(U, V, Im, demo=0):
"""
Get increase or decrease of intensity in flow direction: This finds us
the front and the wake regions of each wave.
"""
rr, cc = np.shape(Im)
ax_x, ax_y = np.linspace(1, cc, cc), np.linspace(1, rr, rr)
XX, YY = np.meshgrid(ax_x, ax_y)
Velo_mag = np.hypot(U, V)
nU = U / Velo_mag
nV = V / Velo_mag
lookahead = 3
# indices of nearby pixels, small span
XX_next = np.round(XX + lookahead * nU)
YY_next = np.round(YY + lookahead * nV)
# interpolate
Im_next = map_coordinates(
Im, [YY_next.ravel(), XX_next.ravel()], order=3, mode="constant"
).reshape(Im.shape)
# wavesign = np.sign(Im_nxt-Im)
wavesign = Im_next < Im
# test interrupt -demo=3:shelved, 2:activate):
if demo == 2:
plt.close("all")
plt.figure()
plt.imshow(wavesign)
plt.title("front and wakes areas")
plt.xlabel("x (pixels)")
plt.ylabel("y (pixels)")
plt.show()
breakpoint() # click-to-code help
return wavesign | a2d86bd986f576054ccd2686af7d9da4ffd3a1f0 | 3,657,155 |
import functools
def has_vanity_name(func):
"""Decorator checking whether a command has been provided a vanity_name value"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
vanity_name = args[1]
if vanity_name is None:
ctx = args[0]
await ctx.send("Please provide a Steam vanity URL or steamid")
return
return await func(*args, **kwargs)
return wrapper | 5da3cc410822f0e112a2be1b3cdfc66fb4d79b0c | 3,657,156 |
from typing import List
import logging
def get_data_providers(
data_providers_configs: List[dict], data_providers_input: List[str]
) -> List[data.DataProvider]:
"""
Determines which data provider and in which order should be used.
:param data_providers_configs: A list of data provider configurations
:param data_providers_input: A list of data provider names
:return: a list of data providers in order.
"""
logger = logging.getLogger(__name__)
data_providers = []
for data_provider_config in data_providers_configs:
data_provider_config["class"] = DATA_PROVIDER_MAP[data_provider_config["type"]](
**data_provider_config["parameters"]
)
data_providers.append(data_provider_config)
selected_data_providers = []
for data_provider_name in data_providers_input:
found = False
for data_provider_config in data_providers:
if data_provider_config["name"] == data_provider_name:
selected_data_providers.append(data_provider_config["class"])
found = True
break
if not found:
logger.warning(
"The following data provider could not be found: {}".format(
data_provider_name
)
)
if len(selected_data_providers) == 0:
raise ValueError(
"None of the selected data providers are available. The following data providers are valid "
"options: "
+ ", ".join(
data_provider["name"] for data_provider in data_providers_configs
)
)
return selected_data_providers | 076659d2bf619808f5cb0ac124839e569af0c74a | 3,657,157 |
def _PredatorForFracas(config=None):
"""A helper to pass in the standard pipeline class."""
return PredatorForFracas(MOCK_GET_REPOSITORY, config or {}) | c7e1e3c771a8b8afa921a291198adc084f75d186 | 3,657,158 |
def py_SurfStatSmooth(Y, surf, FWHM):
"""Smooths surface data by repeatedly averaging over edges.
Parameters
----------
Y : numpy array of shape (n,v) or (n,v,k)
surface data, v=#vertices, n=#observations, k=#variates.
surf : a dictionary with key 'tri' or 'lat', or a BSPolyData object.
surf['tri'] = numpy array of shape (t,3), triangle indices, or
surf['lat'] = numpy array of shape (nx,ny,nz), 1=in, 0=out,
(nx,ny,nz) = size(volume).
FWHM : approximate FWHM of Gaussian smoothing filter, in mesh units.
Returns
-------
Y : numpy array of shape (n,v) or (n,v,k),
smoothed data.
"""
niter = int(np.ceil(pow(FWHM,2) / (2*np.log(2))))
if isinstance(Y, np.ndarray):
Y = np.array(Y, dtype='float')
if np.ndim(Y) == 2:
n, v = np.shape(Y)
k = 1
isnum = True
elif np.ndim(Y) == 3:
n, v, k = np.shape(Y)
isnum = True
edg = py_SurfStatEdg(surf) + 1
agg_1 = aggregate(edg[:,0], 2, size=(v+1))
agg_2 = aggregate(edg[:,1], 2, size=(v+1))
Y1 = (agg_1 + agg_2)[1:]
if n>1:
print(' %i x %i surfaces to smooth, %% remaining: 100 '%(n, k))
n10 = np.floor(n/10)
for i in range(0, n):
if n10 != 0 and np.remainder(i+1, n10) == 0:
print('%s ' % str(int(100-(i+1)/n10*10)), end = '')
for j in range(0, k):
if isnum:
if np.ndim(Y) == 2:
Ys = Y[i,:]
elif np.ndim(Y) == 3:
Ys = Y[i,:,j]
for itera in range(1, niter+1):
Yedg = Ys[edg[:,0]-1] + Ys[edg[:,1]-1];
agg_tmp1 = aggregate(edg[:,0], Yedg, size=(v+1))[1:]
agg_tmp2 = aggregate(edg[:,1], Yedg, size=(v+1))[1:]
Ys = (agg_tmp1 + agg_tmp2) / Y1
if np.ndim(Y) == 2:
Y[i,:] = Ys
elif np.ndim(Y) == 3:
Y[i,:,j] = Ys
if n>1:
print('Done')
return Y | 6b537e33174459cee6364dbd145181c66156830d | 3,657,159 |
from typing import Tuple
def arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]:
"""Parses arm name into tuple suitable for reverse sorting by key
Example:
arm_names = ["0_0", "1_10", "1_2", "10_0", "control"]
sorted(arm_names, key=arm_name_to_sort_key, reverse=True)
["control", "0_0", "1_2", "1_10", "10_0"]
"""
try:
trial_index, arm_index = arm_name.split("_")
return ("", -int(trial_index), -int(arm_index))
except (ValueError, IndexError):
return (arm_name, 0, 0) | c29958bb541a9754e7b4defc6ad953030a364d2f | 3,657,160 |
from typing import Optional
from typing import Mapping
from typing import Any
def run_query_row(cur: Cursor, sql: str, params: Optional[Mapping[str, Any]] = None, **kwargs: Any
) -> Optional[skytools.dbdict]:
""" Helper function if everything you need is just paramertisized execute to
fetch one row only. If not found none is returned
"""
params = params or kwargs
rows = run_query(cur, sql, params)
if len(rows) == 0:
return None
return rows[0] | 0ba46ba0666d0cbefeda5b3fe62ac5ed883a190f | 3,657,161 |
def vortex_indicator(high_arr, low_arr, close_arr, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Vortex Indicator in cudf.Series
"""
TR = true_range(high_arr.data.to_gpu_array(), low_arr.data.to_gpu_array(),
close_arr.data.to_gpu_array())
VM = lowhigh_diff(high_arr.data.to_gpu_array(),
low_arr.data.to_gpu_array())
VI = division(Rolling(n, VM).sum(), Rolling(n, TR).sum())
return cudf.Series(VI) | 8b34ca26f7cc52361eb95ff1ad17c010fd270759 | 3,657,162 |
from typing import Dict
def getServiceById(serviceId: str, **kwargs) -> Dict:
"""Retrieve service by its identifier.
Args:
serviceId: Identifier of service to be retrieved.
Returns:
Service object.
"""
db_collection_service = (
current_app.config['FOCA'].db.dbs['serviceStore']
.collections['services'].client
)
obj = db_collection_service.find_one({"id": serviceId})
if not obj:
raise NotFound
del obj["_id"]
return obj | fc568b337495873263f9a7ea85d46ac4bcd55819 | 3,657,163 |
from typing import Dict
from typing import Any
def replace_module_prefix(
state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = ""
):
"""
Remove prefixes in a state_dict needed when loading models that are not VISSL
trained models.
Specify the prefix in the keys that should be removed.
Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict
"""
state_dict = {
(key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val
for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "")
}
return state_dict | b8499c818053e7798e9549fbe546bab7d5fbfa84 | 3,657,164 |
def crop(img, left, top, right, bottom):
"""
Crop rectangle from image.
Inputs:
img - The image to crop.
left - The leftmost index to crop the image.
top - The topmost index.
right - The rightmost index.
bottom - The bottommost index.
Outputs:
img - The cropped image.
"""
return img[left:right, top:bottom] | 1507a55bba07dc656f51f873d2328b69f70682c9 | 3,657,166 |
import ipaddress
def get_hosts(network):
"""get_hosts() will return all the hosts within a provided network, range"""
network = ipaddress.IPv4Network(network, strict=False)
hosts_obj = network.hosts()
hosts = []
for i in hosts_obj:
hosts.append(str(i))
return hosts | 097fa3abbf1cda1c3c0ddc0c2fec4a06d1d44fa9 | 3,657,168 |
def select_organization(cursor):
"""organization情報取得(全取得)
Args:
cursor (mysql.connector.cursor): カーソル
Returns:
dict: select結果
"""
# select実行
cursor.execute('SELECT * FROM organization ORDER BY organization_id')
rows = cursor.fetchall()
return rows | 6e5c1a2f90d41223ba09fe3278353370515c0430 | 3,657,169 |
def _GetInstDisk(index, cb):
"""Build function for calling another function with an instance Disk.
@type index: int
@param index: Disk index
@type cb: callable
@param cb: Callback
"""
def fn(ctx, inst):
"""Call helper function with instance Disk.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
nic = inst.disks[index]
except IndexError:
return _FS_UNAVAIL
return cb(ctx, index, nic)
return fn | 4dc83bb5c7ac3556750f9e3a70f77c9325893fb4 | 3,657,170 |
def Jphii_cal(L, W, q, xi_local):
"""タスク写像のヤコビ行列"""
return np.array([[1, 0, -sin(q[2, 0]) * xi_local[0, 0] - cos(q[2, 0]) * xi_local[1, 0]],
[0, 1, cos(q[2, 0]) * xi_local[0, 0] - sin(q[2, 0]) * xi_local[1, 0]]], dtype = np.float32)
#return np.array([[1, 0, -xi_local[1, 0]],
# [0, 1, xi_local[0, 0]]], dtype = np.float32) | 300a3724829d8ce2df15801b6ae02e78e8e2e6b7 | 3,657,171 |
def model_evalution(test_data):
""" function to test the loss and accuracy on validation data """
for X_test, y_test in val_data:
y_pred = model(X_test, training=False)
val_acc_metrics.update_state(y_test, y_pred)
accuracy = val_acc_metrics.result()
return float(accuracy) | d581013f50560082f8f6854f201cfd791be6e876 | 3,657,172 |
import inspect
import numpy
def make_python_script_from_list(list_optical_elements1,script_file=""):
"""
program to build automatically a python script to run shadow3
the system is read from a list of instances of Shadow.Source and Shadow.OE
:argument list of optical_elements A python list with intances of Shadow.Source and Shadow.OE objects
:param script_file: a string with the name of the output file (default="", no output file)
:return: template with the script
"""
#make sure that the list does not contain lists
haslist = sum([isinstance(i,list) for i in list_optical_elements1])
list_optical_elements = list_optical_elements1
if haslist:
while(haslist > 0):
newlist = []
for i in list_optical_elements:
if isinstance(i,list):
newlist.extend(i)
else:
newlist.append(i)
list_optical_elements = newlist
haslist = sum([isinstance(i,list) for i in list_optical_elements])
#make sure that the list does not contain compoundOE (developed)
hascomp = sum([isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)) for i in list_optical_elements])
if hascomp:
newlist = []
for i in list_optical_elements:
if isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)):
newlist.extend(i.list)
else:
newlist.append(i)
list_optical_elements = newlist
template = """#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
"""
n_elements = len(list_optical_elements)
for i,element in enumerate(list_optical_elements):
if isinstance(element,Shadow.Source):
template += "oe0 = Shadow.Source()\n"
elif isinstance(element,Shadow.OE):
template += "oe%d = Shadow.OE()\n"%(i)
elif isinstance(element,Shadow.IdealLensOE):
template += "oe%d = Shadow.IdealLensOE()\n"%(i)
else:
raise Exception("Error: Element not known")
template += "\n#\n# Define variables. See meaning of variables in: \n" \
"# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml \n" \
"# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n#\n"
for ioe,oe1B in enumerate(list_optical_elements):
template += "\n"
if isinstance(oe1B,Shadow.Source):
oe1 = Shadow.Source()
elif isinstance(element,Shadow.OE):
oe1 = Shadow.OE()
elif isinstance(element,Shadow.IdealLensOE):
oe1 = Shadow.IdealLensOE()
else:
raise Exception("Error: Element not known")
if isinstance(oe1B,Shadow.IdealLensOE):
template += "oe"+str(ioe)+".T_SOURCE = "+str(oe1B.T_SOURCE).strip()+"\n"
template += "oe"+str(ioe)+".T_IMAGE = "+str(oe1B.T_IMAGE).strip()+"\n"
template += "oe"+str(ioe)+".focal_x = "+str(oe1B.focal_x).strip()+"\n"
template += "oe"+str(ioe)+".focal_z = "+str(oe1B.focal_z).strip()+"\n"
else:
memB = inspect.getmembers(oe1B)
mem = inspect.getmembers(oe1)
for i,var in enumerate(memB):
ivar = mem[i]
ivarB = memB[i]
if ivar[0].isupper():
if isinstance(ivar[1],numpy.ndarray):
# print(" are ALL different ? ", (ivar[1] != ivarB[1]).all())
# print(" are the same ? ", (ivar[1] == ivarB[1]).all())
# print(" there is at least ONE diff ? ", not((ivar[1] == ivarB[1]).all()))
if not( (ivar[1] == ivarB[1]).all()) :
line = "oe"+str(ioe)+"."+ivar[0]+" = numpy.array("+str(ivarB[1].tolist())+ ")\n"
template += line
# if (ivar[1] != ivarB[1]).all():
# line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n"
# if ("SPECIFIED" in line):
# pass
# else:
# template += line
else:
if ivar[1] != ivarB[1]:
if isinstance(ivar[1],(str,bytes)):
line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1]).strip()+"\n"
#line = re.sub('\s{2,}', ' ',line)
if "SPECIFIED" in line:
pass
else:
template += line
else:
line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n"
template += line
template += """\n\n
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
"""
template_oeA = """\n
#
#run optical element {0}
#
print(" Running optical element: %d"%({0}))
if iwrite:
oe{0}.write("start.{1}")
"""
template_oeB = """\n
if iwrite:
oe{0}.write("end.{1}")
beam.write("star.{1}")
"""
for i in range(1,n_elements):
template += template_oeA.format(i,"%02d"%(i))
if isinstance(list_optical_elements[i],Shadow.OE):
template += "\nbeam.traceOE(oe%d,%d)"%(i,i)
elif isinstance(list_optical_elements[i],Shadow.IdealLensOE):
template += "\nbeam.traceIdealLensOE(oe%d,%d)"%(i,i)
template += template_oeB.format(i,"%02d"%(i))
#
# display results (using ShadowTools, matplotlib needed)
#
template += """\n
Shadow.ShadowTools.plotxy(beam,1,3,nbins=101,nolost=1,title="Real space")
# Shadow.ShadowTools.plotxy(beam,1,4,nbins=101,nolost=1,title="Phase space X")
# Shadow.ShadowTools.plotxy(beam,3,6,nbins=101,nolost=1,title="Phase space Z")
"""
if script_file != "":
open(script_file, "wt").write(template)
print("File written to disk: %s"%(script_file))
return template | 85eb57955badaa4a2748be8ca6f2bf0f370b422d | 3,657,173 |
def flax_tag(arr):
"""Wraps a value in a flax module, to inspect intermediate values."""
return arr | be2fbef6117c859b7fc9dd7274815df4e70df17e | 3,657,174 |
def toEpoch( dateTimeObject = None ):
"""
Get seconds since epoch
"""
if dateTimeObject == None:
dateTimeObject = dateTime()
return nativetime.mktime( dateTimeObject.timetuple() ) | f679f75e9d416c471491b0b933505fc6bbb6eb7d | 3,657,175 |
import requests
import json
def sendNotification(token, title, message, extraData=None, channelID=None):
"""
send Notification to Devices
:param token:
:param title:
:param message:
:return:
"""
url = 'https://exp.host/--/api/v2/push/send'
headers = {
"Content-Type": "application/json"
}
data = {
"to": token,
"title": title,
"body": message
}
# Verify we have Additional data to append
if extraData is not None:
data["data"] = extraData
# Android Only! Verify if we have a channel ID and append it
if channelID is not None:
data["channelId"] = channelID
res = requests.post(url, data=json.dumps(data), headers=headers)
return res.status_code | 1038dfd3872221a0d447b7708d58d95e931c59e5 | 3,657,176 |
def make_phsfct_kernel(size_px, dpx, g_fac):
"""
Make a kernel for phase function convolution
:param size_px:
:param dpx: [deg/px]
:param g_fac:
:return: ph_ker [deg]
"""
ke = np.mgrid[:size_px, :size_px]
half = (size_px - 1) / 2
ke[0] -= half
ke[1] -= half
dist = np.sqrt(ke[0] * ke[0] + ke[1] * ke[1])
dist_deg = dist * dpx
ph_ker = phasefunc(g_fac, dist_deg) # Fill radially with phase function
# ph_ker = ph_ker/np.sum(ph_ker)
ph_ker = ph_ker / (2. * np.pi)
return ph_ker | 0f214d19f7418385f3db9155e8cabb06779fdf83 | 3,657,177 |
def sample_pts_ellipsoid_surface(mu, Q, NB_pts, random=True):
"""
Uniformly samples points on the surface of an ellipsoid, specified as
(xi-mu)^T Q^{-1} (xi-mu) == 1
arguments: mu - mean [dim]
Q - Q [dim x dim]
NB_pts - nb of points
random - True: Uniform sampling.
False: Uniform deterministic grid
output: ell_pts - points on the boundary of the ellipse [xdim x NB_pts]
"""
dim = mu.shape[0]
if dim != Q.shape[0] or dim != Q.shape[1]:
raise ValueError("mu (%d) and Q (%d,%d) must be the same size" %(mu.shape[0], Q.shape[0], Q.shape[1]))
if (Q == np.zeros((dim,dim))).all():
return np.zeros((dim,NB_pts))
if random == False and dim > 2:
raise ValueError("sample_pts_ellipsoid_surface: non random sampling not implemented")
mut = np.array([mu])
pts = sample_pts_unit_sphere(dim, NB_pts, random=random).T
E = np.linalg.cholesky(Q)
ell_pts = (mut + pts @ E.T).T
return ell_pts | 89fa8383d32b74e8c92a52792fe2de4d35816acc | 3,657,178 |
def load_mzml_path():
"""Return the path to the mzML toy file.
Parameters
----------
None
Returns
-------
path_data : str
The path to the mzML data.
Examples
--------
>>> from specio.datasets import load_mzml_path
>>> load_mzml_path() # doctest: +ELLIPSIS
'...spectra.mzml'
"""
module_path = dirname(__file__)
return join(module_path, 'data', 'spectra.mzml') | b0548589a209b14ef336a28eeca74782f3550186 | 3,657,179 |
def _czce_df_read(url, skip_rows, encoding='utf-8', header=0):
"""
郑州商品交易所的网页数据
:param header:
:type header:
:param url: 网站 string
:param skip_rows: 去掉前几行 int
:param encoding: utf-8 or gbk or gb2312
:return: pd.DataFrame
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
"Host": "www.czce.com.cn",
"Cookie": "XquW6dFMPxV380S=CAaD3sMkdXv3fUoaJlICIEv0MVegGq5EoMyBcxkOjCgSjmpuovYFuTLtYFcxTZGw; XquW6dFMPxV380T=5QTTjUlA6f6WiDO7fMGmqNxHBWz.hKIc8lb_tc1o4nHrJM4nsXCAI9VHaKyV_jkHh4cIVvD25kGQAh.MvLL1SHRA20HCG9mVVHPhAzktNdPK3evjm0NYbTg2Gu_XGGtPhecxLvdFQ0.JlAxy_z0C15_KdO8kOI18i4K0rFERNPxjXq5qG1Gs.QiOm976wODY.pe8XCQtAsuLYJ.N4DpTgNfHJp04jhMl0SntHhr.jhh3dFjMXBx.JEHngXBzY6gQAhER7uSKAeSktruxFeuKlebse.vrPghHqWvJm4WPTEvDQ8q",
}
r = requests_link(url, encoding, headers=headers)
data = pd.read_html(r.text, match='.+', flavor=None, header=header, index_col=0, skiprows=skip_rows, attrs=None,
parse_dates=False, thousands=', ', encoding="gbk", decimal='.',
converters=None, na_values=None, keep_default_na=True)
return data | 1491e312f1548141294d20b6ebe2fb4517cd3e07 | 3,657,180 |
import random
def select(weights):
"""
select a node with probability proportional to its "weight"
"""
r = random.random() * sum(weights)
s = 0.0
for k,w in enumerate(weights):
s += w
if r <= s:
return k
raise RuntimeError("select WTF from %s" % weights) | fed92de65cfae6f3532754215f5b88a564365ac7 | 3,657,181 |
def kexo(spacecraft_id, sensor_id, band_id):
"""Sun exo-atmospheric irridiance [W/m2/sr]
This is used for processing surface reflectance.
Spacecraft_id: Landsat7
Sensor_id: ETM+
band_id: band1, band2, band3, band4, band5, band7, band8
Spacecraft_id: Terra
Sensor_id: Aster
band_id: band1, band2, band3, band4, band5, band7, band8, band9
kexo(spacecraft_id, sensor_id, band_id)
"""
if(spacecraft_id == "Landsat7"):
if (sensor_id == "ETM+"):
if(band_id == "band1"):
kexo = 1969.0
if(band_id == "band2"):
kexo = 1840.0
if(band_id == "band3"):
kexo = 1551.0
if(band_id == "band4"):
kexo = 1044.0
if(band_id == "band5"):
kexo = 225.7
if(band_id == "band7"):
kexo = 82.07
if(band_id == "band8"):
kexo = 1385.64 # Self calculated value...
else:
kexo = 0.0
if(spacecraft_id == "Terra"):
if (sensor_id == "Aster"):
if(band_id == "band1"):
kexo = 1828.0
if(band_id == "band2"):
kexo = 1559.0
if(band_id == "band3"):
kexo = 1045.0
if(band_id == "band4"):
kexo = 226.73
if(band_id == "band5"):
kexo = 86.50
if(band_id == "band7"):
kexo = 74.72
if(band_id == "band8"):
kexo = 66.41
if(band_id == "band9"):
kexo = 59.83
else:
kexo = 0.0
else:
kexo = 0.0
else:
kexo = 0.0
return kexo | 0e11a1b0b6ea8a43bef954273ed3a32a1d39c842 | 3,657,182 |
def gen_profile_id(profile_id):
"""
Generates the Elasticsearch document id for a profile
Args:
profile_id (str): The username of a Profile object
Returns:
str: The Elasticsearch document id for this object
"""
return "u_{}".format(profile_id) | 003586fe87d2936d9054aaa35963ae0241a5e594 | 3,657,183 |
async def get_self_info(credential: Credential):
"""
获取自己的信息
Args:
credential (Credential): Credential
"""
api = API["info"]["my_info"]
credential.raise_for_no_sessdata()
return await request("GET", api["url"], credential=credential) | 74cc7f5e43c555de45c382db27cd314bb2b5794e | 3,657,185 |
def mpl_event_handler(event_type: MplEvent):
"""Marks the decorated method as given matplotlib event handler
.. note::
This decorator should be used only for methods of classes that
inherited from :class:`MplEventDispatcher` class.
This decorator can be used for reassignment event handlers in a dispatcher class.
Examples
--------
.. code-block:: python
from mpl_events import MplEventDispatcher, mpl_event_handler, mpl
class MyEventDispatcher(MplEventDispatcher):
@mpl_event_handler(MplEvent.KEY_PRESS)
def on_my_key_press(self, event: mpl.KeyPress):
pass
"""
class HandlerDescriptor:
"""Adds handler method name to event handlers mapping
"""
def __init__(self, handler):
self.handler = handler
def __get__(self, obj, cls=None):
return self.handler.__get__(obj, cls)
def __set_name__(self, owner, name):
if 'mpl_event_handlers' not in owner.__dict__:
owner.mpl_event_handlers = getattr(owner, 'mpl_event_handlers', {}).copy()
owner.mpl_event_handlers[event_type] = name
return HandlerDescriptor | 7cec2aad7f50daf832657bc01ac710159d1161a0 | 3,657,187 |
def get_date_pairs(in_dates, step):
"""
入场点出场点数据
:param in_dates: 所有入场日期
:param step: 步长
:return:
"""
DatePair = namedtuple('DatePair', ['in_date', 'out_date'])
date_pairs = []
for in_date in in_dates:
out_date = date_utility.date_cal(in_date, step)
date_pairs.append(DatePair(in_date, out_date))
return date_pairs | a2da0f3a48296de6c9f70b0e7535c8a2dd8e3d0b | 3,657,188 |
import random
def new_jitters(jitter):
"""
update jitter vector every 100 frames by setting ~half of noise vector units to lower sensitivity
"""
jitters=np.zeros(128)
for j in range(128):
if random.uniform(0,1)<0.5:
jitters[j]=1
else:
jitters[j]=1-jitter
return jitters | cab660f8b8c6cfb21e745479cae95e964dc412b9 | 3,657,189 |
def add_manuscript_urls_to_ci_params(ci_params):
"""
Return and edit in-place the ci_params dictionary to include 'manuscript_url'.
This function assumes Travis CI is used to deploy to GitHub Pages, while
AppVeyor is used for storing manuscript artifacts for pull request builds.
"""
if not ci_params:
return ci_params
assert isinstance(ci_params, dict)
provider = ci_params.get('provider')
if provider == 'travis':
ci_params['manuscript_url'] = (
"https://{repo_owner}.github.io/{repo_name}/v/{commit}/"
.format(**ci_params)
)
if provider == 'appveyor':
ci_params['manuscript_url'] = f"{ci_params['build_url']}/artifacts"
return ci_params | 7d45c4fe8060d387d0238788e4b7566e09abc499 | 3,657,191 |
def count_sites(vcfpath):
"""Extract number of sites in VCF from its tabix index."""
cmd = ["bcftools","index","--nrecords", vcfpath]
so, se, code = slurp_command(cmd)
return int(so) | 4f340827bbfc279e3b2601bd84ef68669ce1d829 | 3,657,192 |
import torch
from typing import Callable
def model_contrast_score(overlays: torch.Tensor, masks: torch.Tensor, object_labels: torch.Tensor,
scene_labels: torch.Tensor, object_model: Callable, scene_model: Callable,
object_method: Callable, scene_method: Callable, device: str):
"""
Model contrast score:
Difference of importance of object pixels for model trained on object labels
(should be important) and model trained on scene labels (should not be important)
"""
overlays = overlays.to(device)
object_labels = object_labels.to(device)
scene_labels = scene_labels.to(device)
masks = masks.squeeze().to(device)
# We check if both the object model and the scene model make the correct classification
with torch.no_grad():
y_pred_obj = torch.argmax(object_model(overlays), dim=1)
y_pred_scene = torch.argmax(scene_model(overlays), dim=1)
correctly_classified = ((y_pred_obj == object_labels) & (y_pred_scene == scene_labels))
object_model_attrs = object_method(overlays, object_labels)
scene_model_attrs = scene_method(overlays, scene_labels)
mask_sizes = torch.sum(masks.flatten(1), dim=1)
diffs = (object_model_attrs - scene_model_attrs) / mask_sizes
return diffs.cpu(), correctly_classified.cpu() | b44b0a958a79a1ad7a84de15817cdbc32160c13b | 3,657,193 |
from typing import Optional
def get_network_insights_access_scope_analysis(network_insights_access_scope_analysis_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInsightsAccessScopeAnalysisResult:
"""
Resource schema for AWS::EC2::NetworkInsightsAccessScopeAnalysis
"""
__args__ = dict()
__args__['networkInsightsAccessScopeAnalysisId'] = network_insights_access_scope_analysis_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:ec2:getNetworkInsightsAccessScopeAnalysis', __args__, opts=opts, typ=GetNetworkInsightsAccessScopeAnalysisResult).value
return AwaitableGetNetworkInsightsAccessScopeAnalysisResult(
analyzed_eni_count=__ret__.analyzed_eni_count,
end_date=__ret__.end_date,
findings_found=__ret__.findings_found,
network_insights_access_scope_analysis_arn=__ret__.network_insights_access_scope_analysis_arn,
network_insights_access_scope_analysis_id=__ret__.network_insights_access_scope_analysis_id,
start_date=__ret__.start_date,
status=__ret__.status,
status_message=__ret__.status_message,
tags=__ret__.tags) | cbd65230cf553b438f4a78ad34f6faa9eafb119f | 3,657,194 |
def wavenumber(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
"""Return the electromagnetic wavenumber-domain field.
Calculate the electromagnetic wavenumber-domain field due to infinitesimal
small electric or magnetic dipole source(s), measured by infinitesimal
small electric or magnetic dipole receiver(s); sources and receivers are
directed along the principal directions x, y, or z, and all sources are at
the same depth, as well as all receivers are at the same depth.
See Also
--------
dipole : Electromagnetic field due to an electromagnetic source (dipoles).
bipole : Electromagnetic field due to an electromagnetic source (bipoles).
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
The x- and y-coordinates only matter for the angle-dependent factor.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
freq : array_like
Frequencies f (Hz), used to calculate etaH/V and zetaH/V.
wavenumber : array
Wavenumbers lambda (1/m)
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
PJ0, PJ1 : array
Wavenumber-domain EM responses:
- PJ0: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order zero.
- PJ1: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order one.
Examples
--------
>>> import numpy as np
>>> from empymod.model import wavenumber
>>> src = [0, 0, 100]
>>> rec = [5000, 0, 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> freq = 1
>>> wavenrs = np.logspace(-3.7, -3.6, 10)
>>> PJ0, PJ1 = wavenumber(src, rec, depth, res, freq, wavenrs, verb=0)
>>> print(PJ0)
[ -1.02638329e-08 +4.91531529e-09j -1.05289724e-08 +5.04222413e-09j
-1.08009148e-08 +5.17238608e-09j -1.10798310e-08 +5.30588284e-09j
-1.13658957e-08 +5.44279805e-09j -1.16592877e-08 +5.58321732e-09j
-1.19601897e-08 +5.72722830e-09j -1.22687889e-08 +5.87492067e-09j
-1.25852765e-08 +6.02638626e-09j -1.29098481e-08 +6.18171904e-09j]
>>> print(PJ1)
[ 1.79483705e-10 -6.59235332e-10j 1.88672497e-10 -6.93749344e-10j
1.98325814e-10 -7.30068377e-10j 2.08466693e-10 -7.68286748e-10j
2.19119282e-10 -8.08503709e-10j 2.30308887e-10 -8.50823701e-10j
2.42062030e-10 -8.95356636e-10j 2.54406501e-10 -9.42218177e-10j
2.67371420e-10 -9.91530051e-10j 2.80987292e-10 -1.04342036e-09j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Check layer parameters (isfullspace not required)
modl = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
False, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, _ = modl
# Check frequency => get etaH, etaV, zetaH, and zetaV
f = check_frequency(freq, res, aniso, epermH, epermV, mpermH, mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = f
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get angle-dependent factor
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# If <ab> = 36 (or 63), field is zero
# In `bipole` and in `dipole`, this is taken care of in `fem`. Here we
# have to take care of it separately
if ab_calc in [36, ]:
PJ0 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
PJ1 = PJ0.copy()
else: # Regular calculation
# Calculate wavenumber response
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(wavenumber), ab_calc,
False, msrc, mrec, False)
# Collect output
PJ1 = factAng[:, np.newaxis]*PJ1
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Because of J2
# J2(kr) = 2/(kr)*J1(kr) - J0(kr)
PJ1 /= off[:, None]
PJ0 = PJ0 + factAng[:, np.newaxis]*PJ0b
# === 4. FINISHED ============
printstartfinish(verb, t0, 1)
return np.squeeze(PJ0), np.squeeze(PJ1) | c108f3343936a62b0d49a3807d2d25b4f3fc1eda | 3,657,196 |
def gumbel_softmax(logits, temperature, dtype=tf.float32, seed=0):
"""Gumbel Softmax Layer."""
log_alpha = tf.nn.log_softmax(logits)
eps = 1e-7
gumbel = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=0, maxval=1 - eps, dtype=dtype, seed=seed) +
eps))
prob = tf.nn.softmax((log_alpha + gumbel) / temperature)
return prob | 3889105f39e6f81c35e1a3ca94685b6e6d7e3f37 | 3,657,197 |
def divide(num1, num2=1):
"""
除法
:param num1: int
:param num2: int
:return: float
"""
# 增加判断操作,抛出自定义异常
if num2 == 0:
raise InvalidOpreation()
val = num1 / num2
return val | 6bcc9631ebba74a15f16f8da0a9dc7f76e372725 | 3,657,198 |
def convert2int(image):
""" Transfrom from float tensor ([-1.,1.]) to int image ([-1024,6500])
"""
return tf.image.convert_image_dtype((image + 1) * 2036 - 1000, tf.float32) | 1697e6bb6911e936e9ff4bbb0ab37ddfc8115340 | 3,657,199 |
import time
def execution_duration(fun):
"""
Calculates the duration the function 'fun' takes to execute.
execution_duration returns a wrapper function to which you pass your arguments.
Example: execution_duration(my_function)(my_first_param, my_second_param)
The result of the wrapper function will be a tuple, where the fist value is the
return value of your function and the second is the execution time in seconds expressed
as a float.
"""
def wrapper(*args, **kwargs):
t1 = time.time()
result = fun(*args, **kwargs)
exec_dur = time.time() - t1
return result, exec_dur
return wrapper | b824ce8e1448a65bd932ec8344b1976d2a86dd09 | 3,657,201 |
def return_origin_and_destination():
"""Return origin and destination from session's waypoints key."""
waypoints = session['waypoints']
if len(waypoints) <= 1:
return 'Please enter at least 2 destinations for your trip.'
else:
origin = session['waypoints'][0]
destination = session['waypoints'][-1]
data = {
"origin": origin,
"destination": destination
}
return jsonify(data) | db8764fc32fe1367f303fa44b9c5c0c113a8c9ee | 3,657,202 |
def attempt_move(piece):
"""
Attempts to make a move if the target coordinate is a legal move.
Returns:
True if the move is made, False otherwise
"""
x, y = pygame.mouse.get_pos()
x = x // 100
y = y // 100
if (piece is not None) and (x, y) in piece.legal_moves:
piece.move(the_board, x, y)
initialize_moves()
update_moves()
return True
return False | 36c2b7764f6bb13765cf2eed7270f90f1cb338d1 | 3,657,203 |
def give(user_id, text, group):
"""construct a message to be sent that mentions a user,
which is surprisingly complicated with GroupMe"""
nickname = group.members().filter(user_id=user_id).first.nickname
mention = attachments.Mentions([user_id], [[0, len(nickname)+1]]).as_dict()
message = '@{} {}'.format(nickname, text)
return (message, mention) | f9d36042b3ab5a2681fe065ac935321d8d398085 | 3,657,204 |
def make_annotation_loader_factory():
"""Generate a factory function for constructing annotation loaders.
Invoke the returned factory function by passing the name of the annotation
loader class you want to construct, followed by the parameters for the
constructor as named arguments
(e.g., factory('FourCornersCSV', annotations_file=...))
"""
return AnnotationLoaderLoader().loader.make_object_factory() | 70e6d9834a903a614a41510b6d97b62c3d1d5b3f | 3,657,206 |
def test_arma():
"""arma, check that rho is correct (appendix 10.A )and reproduce figure 10.2"""
a,b, rho = arma_estimate(marple_data, 20, 20, 40)
psd = arma2psd(A=a,B=b, rho=rho, NFFT=None)
psd = arma2psd(A=a,B=b, rho=rho)
try:
psd = arma2psd(A=None, B=None, rho=rho)
assert False
except:
assert True
return psd | b1db09017fe060746ae1b503315bfaa6f3a44a58 | 3,657,207 |
from typing import Union
def chunks_lists_to_tuples(level: Union[list, int, float]) -> Union[tuple, int, float]:
"""Convert a recursive list of lists of ints into a tuple of tuples of ints. This is
a helper function needed because MongoDB automatically converts tuples to lists, but
the dask constructor wants the chunks defined strictly as tuples.
e.g.
- input: ``[[1, 2], [3, 4]]``
- output: ``((1, 2), (3, 4))``
.. note::
float data type is supported to allow for NaN-sized dask chunks
"""
if isinstance(level, list):
return tuple(chunks_lists_to_tuples(i) for i in level)
if isinstance(level, (int, float)):
return level
raise TypeError(level) | 49cc7923211d50fdf6a386016af12b80a2f821df | 3,657,208 |
def oid_pattern_specificity(pattern):
# type: (str) -> Tuple[int, Tuple[int, ...]]
"""Return a measure of the specificity of an OID pattern.
Suitable for use as a key function when sorting OID patterns.
"""
wildcard_key = -1 # Must be less than all digits, so that e.G. '1.*' is less specific than '1.n' for n = 0...9.
parts = tuple(wildcard_key if digit == '*' else int(digit) for digit in pattern.lstrip('.').split('.'))
return (
len(parts), # Shorter OIDs are less specific than longer OIDs, regardless of their contents.
parts, # For same-length OIDs, compare their contents (integer parts).
) | 7d1b4304791076fca42add7a8b9aeb31f85359f9 | 3,657,209 |
def extract_entities(text, json_={}):
"""
Extract entities from a given text using metamap and
generate a json, preserving infro regarding the sentence
of each entity that was found. For the time being, we preserve
both concepts and the entities related to them
Input:
- text: str,
a piece of text or sentence
- json_: dic,
sometimes the json to be returned is given to us to be enriched
Defaults to an empty json_
Output:
- json_: dic,
json with fields text, sents, concepts and entities
containg the final results
"""
json_['text'] = text
# Tokenize the text
sents = sent_tokenize(text)
json_['sents'] = [{'sent_id': i, 'sent_text': sent} for i, sent in enumerate(sents)]
json_['concepts'], _ = mmap_extract(text)
json_['entities'] = {}
for i, sent in enumerate(json_['sents']):
ents = metamap_ents(sent)
json_['entities'][sent['sent_id']] = ents
return json_ | 15f8b88e430c451a517f11b661aa1c57a93288fe | 3,657,210 |
def gaul_as_df(gaul_path):
"""
Load the Gaussian list output by PyBDSF as a pd.DataFrame
Args:
gaul_path (`str`): Path to Gaussian list (.gaul file)
"""
gaul_df = pd.read_csv(
gaul_path, skiprows=6, names=GAUL_COLUMNS, delim_whitespace=True,
)
return gaul_df | 806f8c386344c5380109705b053b89a82db62e66 | 3,657,211 |
def normalize_matrix(mat, dim=3, p=2):
"""Normalize matrix.
Args:
mat: matrix
dim: dimension
p: p value for norm
Returns: normalized matrix
"""
mat_divided = F.normalize(mat, p=p, dim=dim)
return mat_divided | 35ac155a51818d2b93fc12a0c91ce35c0dfd9fe2 | 3,657,212 |
from typing import List
import math
def species_to_parameters(species_ids: List[str],
sbml_model: 'libsbml.Model') -> List[str]:
"""
Turn a SBML species into parameters and replace species references
inside the model instance.
:param species_ids:
List of SBML species ID to convert to parameters with the same ID as
the replaced species.
:param sbml_model:
SBML model to modify
:return:
List of IDs of species which have been converted to parameters
"""
transformables = []
for species_id in species_ids:
species = sbml_model.getSpecies(species_id)
if species.getHasOnlySubstanceUnits():
logger.warning(
f"Ignoring {species.getId()} which has only substance units."
" Conversion not yet implemented.")
continue
if math.isnan(species.getInitialConcentration()):
logger.warning(
f"Ignoring {species.getId()} which has no initial "
"concentration. Amount conversion not yet implemented.")
continue
transformables.append(species_id)
# Must not remove species while iterating over getListOfSpecies()
for species_id in transformables:
species = sbml_model.removeSpecies(species_id)
par = sbml_model.createParameter()
par.setId(species.getId())
par.setName(species.getName())
par.setConstant(True)
par.setValue(species.getInitialConcentration())
par.setUnits(species.getUnits())
# Remove from reactants and products
for reaction in sbml_model.getListOfReactions():
for species_id in transformables:
# loop, since removeX only removes one instance
while reaction.removeReactant(species_id):
# remove from reactants
pass
while reaction.removeProduct(species_id):
# remove from products
pass
while reaction.removeModifier(species_id):
# remove from modifiers
pass
return transformables | a7cb9df992bad98584124320bc485aa978495050 | 3,657,213 |
import warnings
def gaussian_filter_cv(array: np.ndarray, sigma) -> np.ndarray:
"""
Apply a Gaussian filter to a raster that may contain NaNs, using OpenCV's implementation.
Arguments are for now hard-coded to be identical to scipy.
N.B: kernel_size is set automatically based on sigma
:param array: the input array to be filtered.
:param sigma: the sigma of the Gaussian kernel
:returns: the filtered array (same shape as input)
"""
# Check that array dimension is 2, or can be squeezed to 2D
orig_shape = array.shape
if len(orig_shape) == 2:
pass
elif len(orig_shape) == 3:
if orig_shape[0] == 1:
array = array.squeeze()
else:
raise NotImplementedError("Case of array of dimension 3 not implemented")
else:
raise ValueError(
f"Invalid array shape given: {orig_shape}. Expected 2D or 3D array"
)
# In case array does not contain NaNs, use OpenCV's gaussian filter directly
# With kernel size (0, 0), i.e. set to default, and borderType=BORDER_REFLECT, the output is equivalent to scipy
if np.count_nonzero(np.isnan(array)) == 0:
gauss = cv.GaussianBlur(array, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT)
# If array contain NaNs, need a more sophisticated approach
# Inspired by https://stackoverflow.com/a/36307291
else:
# Run filter on a copy with NaNs set to 0
array_no_nan = array.copy()
array_no_nan[np.isnan(array)] = 0
gauss_no_nan = cv.GaussianBlur(array_no_nan, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT)
del array_no_nan
# Mask of NaN values
nan_mask = 0 * array.copy() + 1
nan_mask[np.isnan(array)] = 0
gauss_mask = cv.GaussianBlur(nan_mask, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT)
del nan_mask
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="invalid value encountered")
gauss = gauss_no_nan / gauss_mask
return gauss.reshape(orig_shape) | f39223111ff6624756491b37c32b7162ae8f3e5c | 3,657,214 |
import inspect
import functools
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
res = f(self, context, *args, **kwargs)
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper | 6ca9449f1ae222052f89da9a8baa611b42b47fe4 | 3,657,215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.