content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import chardet
import os
def get_folder_status(dirname, with_message=False):
"""获取目录状态
Args:
dirname(str): 目录路径
with_message(bool): 是否需要返回状态文件内的信息
"""
status = None
closest_time = 0
message = ''
for status_type in [
DatasetStatus, TaskStatus, PredictStatus, PruneStatus,
DownloadStatus, PretrainedModelStatus
]:
for s in status_type:
if osp.exists(osp.join(dirname, s.name)):
modify_time = os.stat(osp.join(dirname, s.name)).st_mtime
if modify_time > closest_time:
closest_time = modify_time
status = getattr(status_type, s.name)
if with_message:
encoding = 'utf-8'
try:
f = open(
osp.join(dirname, s.name),
'r',
encoding=encoding)
message = f.read()
f.close()
except:
try:
f = open(filename, 'rb')
data = f.read()
f.close()
encoding = chardet.detect(data).get('encoding')
f = open(
osp.join(dirname, s.name),
'r',
encoding=encoding)
message = f.read()
f.close()
except:
pass
if with_message:
return status, message
return status | f07ea80cc517c9653b675484c5ab645ea5f22198 | 3,657,100 |
def handle_session_event(event: EventData) -> core_pb2.SessionEvent:
"""
Handle session event when there is a session event
:param event: event data
:return: session event
"""
event_time = event.time
if event_time is not None:
event_time = float(event_time)
return core_pb2.SessionEvent(
node_id=event.node,
event=event.event_type.value,
name=event.name,
data=event.data,
time=event_time,
) | ddaa78a889c23326f52595d4a7fb71c1813eb971 | 3,657,101 |
import sys
import os
import json
import re
def job_builder(meta, valid_meta, workflow, job_dir, out_dir, coprocess=None, other_args="", writeimg=False):
"""Build a list of image processing jobs.
Args:
meta: Dictionary of processed image metadata.
valid_meta: Dictionary of valid metadata keys.
workflow: PlantCV image processing workflow script file.
job_dir: Intermediate file output directory.
out_dir: Output images directory.
coprocess: Coprocess the specified imgtype with the imgtype specified in meta_filters.
other_args: String of additional arguments to be passed to the workflow script.
writeimg: Boolean that specifies whether output images should be created or not.
Returns:
jobs: List of image processing commands.
:param meta: dict
:param valid_meta: dict
:param workflow: str
:param job_dir: str
:param out_dir: str
:param coprocess: str
:param other_args: str
:param writeimg: bool
:return job_stack: list
"""
# Overall job stack. List of list of jobs
jobs = []
# Get the list of images
# images = list(meta.keys())
images = []
for img in list(meta.keys()):
# # If a date range was requested, check whether the image is within range
# if args.dates:
# # Convert image datetime to unix time
# timestamp = dt_parser(meta[img]['timestamp'])
# time_delta = timestamp - datetime.datetime(1970, 1, 1)
# unix_time = (time_delta.days * 24 * 3600) + time_delta.seconds
# if unix_time < args.start_date or unix_time > args.end_date:
# continue
if coprocess is not None:
if meta[img]['imgtype'] != coprocess:
images.append(img)
else:
images.append(img)
print("Job list will include " + str(len(images)) + " images" + '\n', file=sys.stderr)
# For each image
for img in images:
# Create JSON templates for each image
img_meta = {"metadata": deepcopy(valid_meta), "observations": {}}
coimg_meta = {"metadata": deepcopy(valid_meta), "observations": {}}
# If there is an image co-processed with the image
if (coprocess is not None) and ('coimg' in meta[img]):
# Create an output file to store the co-image processing results and populate with metadata
coimg = meta[meta[img]['coimg']]
coout = open(os.path.join(".", job_dir, meta[img]["coimg"] + ".txt"), 'w')
# Store metadata in JSON
coimg_meta["metadata"]["image"] = {
"label": "image file",
"datatype": "<class 'str'>",
"value": os.path.join(coimg['path'], meta[img]['coimg'])
}
# Valid metadata
for m in list(valid_meta.keys()):
coimg_meta["metadata"][m]["value"] = coimg[m]
json.dump(coimg_meta, coout)
coout.close()
# Create an output file to store the image processing results and populate with metadata
outfile = open(os.path.join(".", job_dir, img + ".txt"), 'w')
# Store metadata in JSON
img_meta["metadata"]["image"] = {
"label": "image file",
"datatype": "<class 'str'>",
"value": os.path.join(meta[img]['path'], img)
}
# Valid metadata
for m in list(valid_meta.keys()):
img_meta["metadata"][m]["value"] = meta[img][m]
json.dump(img_meta, outfile)
outfile.close()
# Build job
job_parts = ["python", workflow, "--image", os.path.join(meta[img]['path'], img),
"--outdir", out_dir, "--result", os.path.join(job_dir, img) + ".txt"]
# Add job to list
if coprocess is not None and ('coimg' in meta[img]):
job_parts = job_parts + ["--coresult", os.path.join(job_dir, meta[img]['coimg']) + ".txt"]
if writeimg:
job_parts.append("--writeimg")
if other_args:
other_args_copy = re.sub("'", "", other_args)
other_args_copy = other_args_copy.split(" ")
job_parts = job_parts + other_args_copy
jobs.append(job_parts)
return jobs | e64cb50f1ecd01217fc5308e5de0d99dd384e67e | 3,657,102 |
def bump_patch(version):
"""Raise the patch part of the version
:param: version string
:return: the raised version string
:rtype: str
"""
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'],
verinfo['patch'] + 1) | 350e53788b0851138eb0d0248250bebd7e357e10 | 3,657,103 |
def _extract_bike_location(bike, lon_abbrev='lon'):
"""
Standardize the bike location data from GBFS. Some have extra fields,
and some are missing fields.
Arguments:
bike (dict[str, str]): A GBFS bike object as it appears in free_bike_status.json
lon_abbrev (str): The abbreviation used for `longitude`
Returns:
dict[str, str]: A normalized GBFS bike object
"""
output = {key: bike.get(key) for key in ['bike_id', 'lat', 'is_reserved', 'is_disabled']}
output['lon'] = bike.get(lon_abbrev)
return output | a20929a85c993a59b82b552fcfee81b1f818648d | 3,657,104 |
def clean_word(word):
"""Return word in lowercase stripped of whitespace"""
return word.strip().lower() | ce57fa95ec111ee18c8a00c2076c686bc0abfe5c | 3,657,105 |
def get_batch_size(tracks):
"""
If tracks is a track-major list of possibly None tracks, get the batch size
"""
return get_shape(tracks)[0] | 677f26a0f42d4e745d77ff6abc1867ce857ea208 | 3,657,106 |
def find_edges_from_wires(body: TopoDS_Shape) -> set[TopoDS_Edge]:
"""Return set of edges from Wires."""
edge_set = set()
for wire in TopologyExplorer(body, ignore_orientation=False).wires():
for edge in WireExplorer(wire).ordered_edges():
edge_set.add(edge)
return edge_set | 89d8d848d98c32e925f955da623a3e1018245f75 | 3,657,107 |
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax | 09c801bf48756e9c118222747ce49dcd8187df90 | 3,657,108 |
def getSentB(text2: str, offsetB: int, nextPoint: int, sentLength: int):
"""
alignSentences auxiliar function to get the sentences of the original text.
"""
posB = text2[offsetB+sentLength:].find('.')
sentLength += posB+1
sentB = text2[offsetB:offsetB+sentLength]
nextPoint = offsetB + sentLength
return sentB, nextPoint, sentLength | 54914a3c1d85464c0e5a4267538a73693e3df238 | 3,657,109 |
def get_mapping_fcost_local(interface, bus_def):
"""
coarse cost function to cheaply estimate local (subset of ports)
interface match to bus_def
"""
cost = _get_mapping_fcost_base(interface, bus_def, penalize_umap=False)
name_cost = _get_name_fcost2(interface, bus_def)
cost.nc = name_cost
return cost | c945e89174fea0c131f35ad4688c5539a55c3eda | 3,657,110 |
import base64
def base64_image(image: bytes, mime_type: str) -> str:
"""Encode the image for an URL using base64
Args:
image: the image
mime_type: the mime type
Returns:
A string starting with "data:{mime_type};base64,"
"""
base64_data = base64.b64encode(image)
image_data = quote(base64_data)
return f"data:{mime_type};base64,{image_data}" | 3079c73137959fea1d16ceb64251870500ae30a5 | 3,657,111 |
import math
import six
import numpy
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
Generate prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. The details of this algorithm, please refer the
section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs(list|tuple): The list of input Variables, the format
of all Variables is NCHW.
image(Variable): The input image data of PriorBoxOp,
the layout is NCHW.
base_size(int): the base_size is used to get min_size
and max_size according to min_ratio and max_ratio.
num_classes(int): The number of classes.
aspect_ratios(list|tuple): the aspect ratios of generated prior
boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): Name of the prior box layer. Default: None.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the fininal
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc: The predicted boxes' location of the inputs. The layout
is [N, H*W*Priors, 4]. where Priors is the number of predicted
boxes each position of each input.
mbox_conf: The predicted boxes' confidence of the inputs. The layout
is [N, H*W*Priors, C]. where Priors is the number of predicted boxes
each position of each input and C is the number of Classes.
boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4].
num_priors is the total box count of each position of inputs.
variances: the expanded variances of PriorBox. The layout is
[num_priors, 4]. num_priors is the total box count of each position of inputs
Examples:
.. code-block:: python
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
compile_shape = [
mbox_loc.shape[0], cpt.floor_division(
mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3], 4), 4
]
run_shape = tensor.assign(numpy.array([0, -1, 4]).astype("int32"))
mbox_loc_flatten = nn.reshape(
mbox_loc, shape=compile_shape, actual_shape=run_shape)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
new_shape = [0, -1, num_classes]
compile_shape = [
conf_loc.shape[0],
cpt.floor_division(conf_loc.shape[1] * conf_loc.shape[2] *
conf_loc.shape[3], num_classes), num_classes
]
run_shape = tensor.assign(
numpy.array([0, -1, num_classes]).astype("int32"))
conf_loc_flatten = nn.reshape(
conf_loc, shape=compile_shape, actual_shape=run_shape)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var | e3fabec0dd64fec9caea929e0bf4c04848d22df6 | 3,657,112 |
from operator import invert
import numpy
def expandMask(img, shrink = False, step = 1):
"""Grow or shrink a mask by a pixel."""
if shrink:
img = invert(img)
img = jitterSum(img.data, step) > 0
img = Image(data = img.astype(numpy.uint8)*255)
if shrink:
img = invert(img)
return img | 4853a0c42856cc34a5b9b58533d335c0ac858345 | 3,657,113 |
def isHeader(line):
"""
tests to see if 'line' is in the event file
header
"""
if containsAny(line, 'EVF Filename:', 'Generation Time:', 'Start_time:',
'End_time:', 'events in list)', '#', 'Include:',
'Init_value:'):
return True
elif len(line) < 3:
return True
else:
return False | 548d0273b174c16e7ab874fe8a94d4ec7e87703b | 3,657,114 |
import requests
def redirect_page(source_url, destination_url):
"""returns False is current page is not 200"""
def _check_redirect(full_url):
print('Getting ' + full_url)
response = requests.get(full_url, allow_redirects=False)
if response.status_code == 200:
print("Was 200")
return True
elif response.status_code == 404:
print("Was 404")
return False
elif response.status_code == 301:
print("Was 301")
return False
else:
raise Exception("UNEXPECTED STATUS CODE {} FOR {}".format(
response.status_code, full_url))
return True
full_source_url = 'https://www.gov.uk' + source_url
full_destination_url = 'https://www.gov.uk' + destination_url
return _check_redirect(full_source_url) and _check_redirect(
full_destination_url) | 8caa9db41948f44cc015ca51f179ff318eb22ada | 3,657,115 |
def semantic_analysis(program, print_results=True):
"""
TODO
:param program: TODO
:param print_results: TODO
:return: TODO
"""
semanter = make_semantic_analyser()
program_ir = semanter.transform(program)
if print_results:
print_readable_ast(program_ir)
return program_ir | 5921e93c8d60a367a13e8154cffaf9e630a7f4ba | 3,657,116 |
def WrapWithQuotes(text, quote='"'):
""" Wrap the supplied text with quotes
Args:
text: Input text to wrap
quote: Quote character to use for wrapping (default = "")
Returns:
Supplied text wrapped in quote char
"""
if not text.startswith(quote):
text = quote + text
if not text.endswith(quote):
text = text + quote
return text | f4f7b83d60e3ea928e3502b9d19ca4c8d52914b9 | 3,657,117 |
import os
def get_fastsync_bin(venv_dir, tap_type, target_type):
"""
Get the absolute path of a fastsync executable
"""
source = tap_type.replace('tap-', '')
target = target_type.replace('target-', '')
fastsync_name = f'{source}-to-{target}'
return os.path.join(venv_dir, 'pipelinewise', 'bin', fastsync_name) | 113835e7620bd378d87cdd162b842e1f7c3a86dc | 3,657,118 |
def login_aws_via_idp(session, username, password, entity_id):
""" Get a SAML assertion and set of AWS roles which can be assumed with the SAML assertion. """
logger.info("Looking up your IdP")
idp_url, idp_form = get_idp_login_form(
session, username, password, entity_id)
logger.info("Logging in to %s", idp_url)
idp_response = session.post(idp_url, data=idp_form)
idp_response.raise_for_status()
logger.info("Parsing response and presenting assertion to CILogon")
cilogon_url, payload = parse_idp_login_response(idp_response.text)
scimma_saml_proxy_response = session.post(cilogon_url, data=payload)
scimma_saml_proxy_response.raise_for_status()
logger.info("Login complete, extracting credentials")
assertion = parse_scimma_sample_response(scimma_saml_proxy_response.text)
roles = parse_scimma_aws_assertion(assertion)
return assertion, roles | 586250b66771275b5282ae0e22d40298550164e2 | 3,657,119 |
def fit_linreg(x, y, intercept=True):
"""Simple linear regression: y = kx + b.
Arguments
---------
x: :class:`~numpy.ndarray`
A vector of independent variables.
y: :class:`~numpy.ndarray`
A vector of dependent variables.
intercept: bool
If using steady state assumption for fitting, then:
True -- the linear regression is performed with an unfixed intercept;
False -- the linear regresssion is performed with a fixed zero intercept.
Returns
-------
k: float
The estimated slope.
b: float
The estimated intercept.
"""
mask = np.logical_and(~np.isnan(x), ~np.isnan(y))
xx = x[mask]
yy = y[mask]
ym = np.mean(yy)
xm = np.mean(xx)
if intercept:
cov = np.mean(xx * yy) - xm * ym
var_x = np.mean(xx * xx) - xm * xm
k = cov / var_x
b = ym - k * xm
else:
k = np.mean(yy) / np.mean(xx)
b = 0
return k, b | 18248eb0ece96dfda5fbc2d94a591f98570feddd | 3,657,120 |
import torch
def entropy(x, input_as_probabilities):
"""
Helper function to compute the entropy over the batch
input: batch w/ shape [b, num_classes]
output: entropy value [is ideally -log(num_classes)]
"""
if input_as_probabilities:
x_ = torch.clamp(x, min = 1e-8)
b = x_ * torch.log(x_)
else:
b = F.softmax(x, dim = 1) * F.log_softmax(x, dim = 1)
if len(b.size()) == 2: # Sample-wise entropy
return -b.sum(dim = 1).mean()
elif len(b.size()) == 1: # Distribution-wise entropy
return - b.sum()
else:
raise ValueError('Input tensor is %d-Dimensional' %(len(b.size()))) | 9cf9f5ecd59ffe068bbf8f25da62ac3c5c2eedb6 | 3,657,121 |
from typing import Callable
def find_function_in_object(o: object, function_name: str) -> Callable:
"""Finds a callable object matching given function name in given object.
Args:
o: Any object.
function_name: Name of attribute within o.
Returns:
Callable object with name <function_name> in object <o>.
Raises:
LookupError: if <function_Name> is not a callable object in <o>.
"""
try:
function_handle = getattr(o, function_name)
if not hasattr(function_handle, "__call__"):
raise LookupError(
f"Resolved object {function_name} in object {o} is not a function."
)
else:
return function_handle
except AttributeError:
raise LookupError(f"Cannot find function {function_name} in object {o}.") | c3b6ad12f42d005f643bc8a657f728613bd0e93c | 3,657,122 |
async def refresh(db: AsyncSession, schema: RefreshToken):
"""
Refresh token
:param db: DB
:type db: AsyncSession
:param schema: Refresh token
:type schema: RefreshToken
:return: Access token
:rtype: dict
:raise HTTPException 400: User not found
"""
username = verify_refresh_token(schema.refresh_token)
if not await user_crud.exists(db, username=username):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='User not found')
user = await user_crud.get(db, username=username)
return create_token(user.id, username) | f20cde1c44ef515c18318c45af9df4bb360c85e6 | 3,657,123 |
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y | 7612ef322acf77f8c2fdf1963b6d15934f84b416 | 3,657,124 |
import copy
def dqn_learn(t, agent, env, env_state, history, args):
"""Learning loop for DeepQAgent"""
step_type, reward, discount, state = env_state
state = copy.deepcopy(state)
# Act
action = agent.act_explore(state)
step_type, reward, discount, successor = env.step(action)
# Learn
if args.cheat:
# TODO: fix this, since _get_hidden_reward seems to be episodic
reward = env._get_hidden_reward()
loss = agent.learn(state, action, reward, successor)
history['writer'].add_scalar('Train/loss', loss, t)
# Modify exploration
eps = agent.update_epsilon()
history['writer'].add_scalar('Train/epsilon', eps, t)
# Sync target and policy networks
if t % args.sync_every == args.sync_every - 1:
agent.sync_target_Q()
return (step_type, reward, discount, successor), history | 1ff3333ef9f2492866a1b32540c5440d3683d600 | 3,657,125 |
def build_custom_Theta(
data,
data_description=[],
add_constant_term=True,
):
"""
builds a matrix Theta(U) from a predefined set of terms
This is used when we subsample and take all the derivatives point by point or if there is an
extra input to put in.
input:
data: column 0 is U
derivatives_description: description of candidate terms in Theta
P: max power of polynomial function of U to be included in Theta
returns:
Theta = Theta(U,Q)
descr = description of what all the columns in Theta are
"""
if len(data) > 0:
n, m = data.shape
# Add first column of Theta as ones.
Theta = np.array([], dtype=np.complex64).reshape((n, 0))
descr = []
# Add "u"-part into Theta
if len(data_description) > 0:
Theta = np.hstack([Theta, data])
descr += data_description
return Theta, descr | 451c306124e94d5f04d436c98ede6af232a6458e | 3,657,126 |
import pathlib
from typing import Optional
import importlib
def load(plugin: pathlib.Path) -> Optional[ModuleType]:
"""Load a specific cemu plugin
Args:
plugin (pathlib.Path): the path of the plugin to load
Returns:
Optional[ModuleType]: the loaded plugin module on success, None if there's no plugin, or it is invalid
"""
try:
if plugin.is_file():
mod = importlib.import_module(f"cemu.plugins.{plugin.stem}")
elif plugin.is_dir():
mod = importlib.import_module(f"cemu.plugins.{plugin.name}")
else:
raise ImportError("invalid format")
except ImportError as ie:
error(f"Failed to import '{plugin}' - reason: {str(ie)}")
return None
if not hasattr(mod, "register"):
error(f"Plugin '{plugin.stem}' has no `register` method")
return None
return mod | eac265743ba9a58842cf7e97a1b961234ea3b17b | 3,657,127 |
from datetime import datetime
import logging
import os
def get_standard_logger(name, log_dir=None):
"""Function to return an instance of type logger."""
if log_dir is None:
log_dir = '/Users/teaton/dev/fantasyAM/logs'
time_stamp = datetime.now().strftime('%Y%m%d_%H%M%S')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# Create a file handler
os.makedirs(log_dir, exist_ok=True)
handler = logging.FileHandler(os.path.join(log_dir, f'{name}_{time_stamp}.log'))
handler.setLevel(logging.INFO)
# Create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger | 1e0c4d6ea3ee74d7f5c88f2861369fd3a37f7750 | 3,657,128 |
import traceback
def getDatabaseConnection(databaseString):
"""Attempt connection to the database"""
sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print(traceback.format_exc())
print("Error in connection to the database")
return sqlsession | 8199838e24c6828977d5fe6a7f2af20f755f25f6 | 3,657,129 |
def prepare_multiple_configs(conf):
""" This function uses workload_1 as a base, and then duplicates its configuration for all
other workloads 2,3... while leaving properties already defined in subsequent workloads (2,3..)
unchanged.
"""
keys_starting_with_workload = []
for k, _ in conf.iteritems():
if k.startswith("workload"):
keys_starting_with_workload.append(k)
for k in keys_starting_with_workload:
if k != "workload_1":
merge_dicts(dst_dic=conf[k], src_dic=conf["workload_1"], overwrite=False)
return conf, keys_starting_with_workload | 760adf50bbca9dd160375ed8d506a33618d39a94 | 3,657,130 |
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None):
"""
given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data
w = w_scaled / sigma
b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu)
:param sklearn linear classifier
:param coefficients: vector of coefficients
:param intercept: scalar for the intercept function
:param scaler: sklearn.Scaler or
:return: coefficients and intercept for unnormalized data
"""
if coefficients is None:
assert clf is not None
assert intercept == 0.0
assert hasattr(clf, 'coef_')
coefficients = clf.coef_
intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0
if scaler is None:
w = np.array(coefficients)
b = float(intercept)
else:
isinstance(scaler, StandardScaler)
x_shift = np.array(scaler.mean_)
x_scale = np.sqrt(scaler.var_)
w = coefficients / x_scale
b = intercept - np.dot(w, x_shift)
w = np.array(w).flatten()
b = float(b)
return w, b | cee60338386bdc87cb50e4b54af43517135fba46 | 3,657,131 |
import copy
def reduce(snail_nr):
"""Returns a fully reduced version of the given snail number."""
new_snail_nr = copy.deepcopy(snail_nr)
# print("Start:")
# print(snail_nr)
while True:
# print("\nNew reduction phase...")
if explode_in_place(new_snail_nr):
# print("Exploded:", new_snail_nr)
continue
# else:
# print("No explode.")
if split_in_place(new_snail_nr):
# print("Split:", new_snail_nr)
continue
# else:
# print("No split.")
break
# print(new_snail_nr)
return new_snail_nr | 1facd7a7bbc73794ff2519ef0894ec9536c18690 | 3,657,132 |
def load_image_embedding_model(input_repr, content_type, embedding_size):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_image_embedding_model_path(input_repr, content_type)
return load_image_embedding_model_from_path(model_path, embedding_size) | f78d458e2cd000206d3fcc35c166ede43e84e8fd | 3,657,133 |
def prepare_alm(alm=None, ainfo=None, lmax=None, pre=(), dtype=np.float64):
"""Set up alm and ainfo based on which ones of them are available."""
if alm is None:
if ainfo is None:
if lmax is None:
raise ValueError("prepare_alm needs either alm, ainfo or lmax to be specified")
ainfo = sharp.alm_info(lmax)
alm = np.zeros(pre+(ainfo.nelem,), dtype=np.result_type(dtype,0j))
else:
ainfo = sharp.alm_info(nalm=alm.shape[-1])
return alm, ainfo | 21406a6b3df7e63eeb05998c8940e525021b62ce | 3,657,134 |
from typing import Any
def increment_occurance_dict(d: dict, k: Any) -> None:
"""
Increment occurance dict, updates in-place so nothing is returned.
"""
try:
d[k] += 1
except KeyError:
d[k] = 1
return None | 725b437494f4c647848c54a3d13b4e974fa7f0e8 | 3,657,135 |
import sys
import os
import pathlib
def package_versions(modules=None, builtins=False, standard_lib=None):
"""Retrieve package version information.
When builtins or standard_lib are None, they will be included only
if a version was found in the package.
@param modules: Modules to inspect
@type modules: list of strings
@param builtins: Include builtins
@type builtins: Boolean, or None for automatic selection
@param standard_lib: Include standard library packages
@type standard_lib: Boolean, or None for automatic selection
"""
if not modules:
modules = sys.modules.keys()
std_lib_dir = get_python_lib(standard_lib=True)
root_packages = {key.split('.')[0] for key in modules}
builtin_packages = {name.split('.')[0] for name in root_packages
if name in sys.builtin_module_names
or '_' + name in sys.builtin_module_names}
# Improve performance by removing builtins from the list if possible.
if builtins is False:
root_packages = list(root_packages - builtin_packages)
std_lib_packages = []
paths = {}
data = {}
for name in root_packages:
try:
package = import_module(name)
except ImportError as e:
data[name] = {'name': name, 'err': e}
continue
info = {'package': package, 'name': name}
if name in builtin_packages:
info['type'] = 'builtins'
if '__file__' in package.__dict__:
# Determine if this file part is of the standard library.
if os.path.normcase(package.__file__).startswith(
os.path.normcase(std_lib_dir)):
std_lib_packages.append(name)
if standard_lib is False:
continue
info['type'] = 'standard libary'
# Strip '__init__.py' from the filename.
path = package.__file__
if '__init__.py' in path:
path = path[0:path.index('__init__.py')]
if PY2:
path = path.decode(sys.getfilesystemencoding())
info['path'] = path
assert path not in paths, 'Path of the package is in defined paths'
paths[path] = name
if '__version__' in package.__dict__:
info['ver'] = package.__version__
elif name.startswith('unicodedata'):
info['ver'] = package.unidata_version
# If builtins or standard_lib is None,
# only include package if a version was found.
if (builtins is None and name in builtin_packages) or \
(standard_lib is None and name in std_lib_packages):
if 'ver' in info:
data[name] = info
else:
# Remove the entry from paths, so it isn't processed below
del paths[info['path']]
else:
data[name] = info
# Remove any pywikibot sub-modules which were loaded as a package.
# e.g. 'wikipedia_family.py' is loaded as 'wikipedia'
_program_dir = _get_program_dir()
if isinstance(pathlib, Exception):
dir_parts = _program_dir.split(os.sep)
else:
dir_parts = pathlib.Path(_program_dir).parts
length = len(dir_parts)
for path, name in paths.items():
if isinstance(pathlib, Exception):
lib_parts = os.path.normpath(path).split(os.sep)
else:
lib_parts = pathlib.Path(path).parts
if dir_parts != lib_parts[:length]:
continue
if lib_parts[length] != '.tox':
del data[name]
return data | 3962c29740c8b7ee9214a3d343a5774973344563 | 3,657,136 |
import scipy
def closest_line(query_lines, metric='cosine'):
"""Compute the distance to, and parameters for, the closest line to each
line in query_lines.
Args:
- query_lines: Array of lines to compute closest matches for, shape
(n_lines, width, height, 1)
- metric: String to pass to scipy.spatial.distance.cdist to choose
which distance metric to use
Returns:
- min_dist, starts, ends: Arrays of shape (n_lines,) denoting the
distance to the nearest ``true'' line and the start and end points.
"""
h, w = query_lines.shape[1:-1]
# Construct 10000 lines with these dimensions
angles = np.linspace(0, 2*np.pi - 2*np.pi/10000, 10000)
all_lines = np.array(
[(data.draw_line(angle, h, w)) for angle in angles])
# Produce vectorized versions of both for use with scipy.spatial
flat_query = query_lines.reshape(query_lines.shape[0], -1)
flat_all = all_lines.reshape(all_lines.shape[0], -1)
# Compute pairwise distance matrix of query lines with all valid lines
distances = scipy.spatial.distance.cdist(flat_query, flat_all, metric)
min_dist_idx = np.argmin(distances, axis=-1)
min_dist = distances[np.arange(distances.shape[0]), min_dist_idx]
angles = np.array([angles[n] for n in min_dist_idx])
return min_dist, angles | 187cb6f8266ddf7bd0347fb233fb02a7ea4cbad3 | 3,657,137 |
def deref_vtk(obj):
"""Dereferences the VTK object from the object if possible."""
if isinstance(obj, TVTKBase):
return obj._vtk_obj
else:
return obj | 1ba46f83a389983df3c35f011c94836f12fdd905 | 3,657,138 |
def order_assignee_factory(team):
"""
Creates a :class:`datahub.omis.order.models.OrderAssignee` instance related to ``team``
"""
adviser = Advisor.objects.create(
first_name='John',
last_name='Doe',
email=f'{uuid4()}@example.com',
)
order_assignee = OrderAssignee.objects.create(
order=Order.objects.create(
company=Company.objects.create(),
contact=Contact.objects.create(primary=True),
primary_market=Country.objects.create(),
),
adviser=adviser,
created_by=adviser)
order_assignee.team = team
order_assignee.save()
return order_assignee | fe39d16a105ff01be63614e76dcf001b5ca4171f | 3,657,139 |
def is_bool(space, w_obj):
""" Finds out whether a variable is a boolean"""
return space.wrap(w_obj.tp == space.tp_bool) | 39b62ec08ebbdd4d7505e558ad4901ca67afc12d | 3,657,140 |
from typing import Tuple
from typing import Dict
from typing import List
from typing import Callable
from typing import Any
def _compile_for_uhfqa(
device: zhinst.Device,
cached_schedule: schedule_helpers.CachedSchedule,
settings_builder: zi_settings.ZISettingsBuilder,
) -> Tuple[zi_settings.ZISettingsBuilder, ZIAcquisitionConfig]:
"""
Initialize programming the UHFQA ZI Instrument.
Creates a sequence program and converts schedule
pulses to waveforms for the UHFQA.
Parameters
----------
device :
cached_schedule :
settings_builder :
Returns
-------
:
"""
instrument_info = zhinst.InstrumentInfo(
clock_rate=device.clock_rate,
resolution=8,
granularity=WAVEFORM_GRANULARITY[device.device_type],
)
channels = device.channels
channels = list(filter(lambda c: c.mode == enums.SignalModeType.REAL, channels))
awg_index = 0
channel = channels[awg_index]
logger.debug(f"[{device.name}-awg{awg_index}] {str(device)}")
mixer_corrections = (
channel.mixer_corrections
if not channel.mixer_corrections is None
else common.MixerCorrections()
)
settings_builder.with_defaults(
[
("awgs/0/single", 1),
("qas/0/rotations/*", (1 + 1j)),
("qas/0/integration/sources/*", 0),
]
).with_sigouts(0, (1, 1)).with_awg_time(
0, device.clock_select
).with_qas_integration_weights_real(
range(NUM_UHFQA_READOUT_CHANNELS), np.zeros(MAX_QAS_INTEGRATION_LENGTH)
).with_qas_integration_weights_imag(
range(NUM_UHFQA_READOUT_CHANNELS), np.zeros(MAX_QAS_INTEGRATION_LENGTH)
).with_sigout_offset(
0, mixer_corrections.dc_offset_I
).with_sigout_offset(
1, mixer_corrections.dc_offset_Q
)
logger.debug(f"[{device.name}-awg{awg_index}] channel={str(channel)}")
instructions = get_execution_table(
cached_schedule,
instrument_info,
channel,
)
# Generate a dictionary of uuid(s) and zhinst.Wave instructions
wave_instructions_dict: Dict[int, zhinst.Wave] = dict(
(i.uuid, i) for i in instructions if isinstance(i, zhinst.Wave)
)
# Create a list of all pulse_id(s).
pulse_ids: List[int] = wave_instructions_dict.keys()
# Generate map containing waveform the location of a pulse_id.
waveform_table: Dict[int, int] = zi_helpers.get_waveform_table(
pulse_ids, cached_schedule.pulseid_pulseinfo_dict
)
# Create a dictionary of uuid(s) and numerical waveforms.
waveforms_dict: Dict[int, np.ndarray] = dict(
(uuid, wf_instr.waveform) for uuid, wf_instr in wave_instructions_dict.items()
)
# Create a dictionary of uuid(s) and zhinst.Measure instructions
n_acquisitions = sum(isinstance(x, zhinst.Measure) for x in instructions)
measure_instructions_dict: Dict[int, zhinst.Measure] = dict(
(i.uuid, i) for i in instructions if isinstance(i, zhinst.Measure)
)
# Generate and apply sequencer program
seqc = _assemble_uhfqa_sequence(
cached_schedule=cached_schedule,
device=device,
instrument_info=instrument_info,
output=device.channel_0,
waveform_table=waveform_table,
instructions=instructions,
)
logger.debug(seqc)
settings_builder.with_compiler_sourcestring(awg_index, seqc)
# Apply waveforms to AWG
_add_wave_nodes(device, awg_index, waveforms_dict, waveform_table, settings_builder)
# Get a list of all acquisition protocol channels
acq_channel_resolvers_map: Dict[int, Callable[..., Any]] = dict()
# the unique acquisitions are acquisitions
unique_acquisition_hashes = []
for acq_uuid, acq_info in cached_schedule.acqid_acqinfo_dict.items():
# the acquisition index is not required for configuring the integration weights.
# we use a hash to identify which acquisitions are identical in this context.
acq_hash = make_hash(acq_info.copy().pop("acq_index"))
if acq_hash in unique_acquisition_hashes:
continue
unique_acquisition_hashes.append(acq_hash)
acq_protocol: str = acq_info["protocol"]
acq_duration: float = acq_info["duration"]
acq_channel: int = acq_info["acq_channel"]
integration_length = round(acq_duration * instrument_info.clock_rate)
logger.debug(
f"[{device.name}] acq_info={acq_info} "
+ f" acq_duration={acq_duration} integration_length={integration_length}"
)
settings_builder.with_qas_integration_mode(
zhinst.QasIntegrationMode.NORMAL
).with_qas_integration_length(integration_length).with_qas_result_enable(
False
).with_qas_monitor_enable(
False
).with_qas_delay(
0
)
if acq_protocol == "trace":
# Disable Weighted integration because we'd like to see
# the raw signal.
settings_builder.with_qas_monitor_enable(True).with_qas_monitor_averages(
cached_schedule.schedule.repetitions
).with_qas_monitor_length(
integration_length
).with_qas_integration_weights_real(
range(NUM_UHFQA_READOUT_CHANNELS), np.ones(MAX_QAS_INTEGRATION_LENGTH)
).with_qas_integration_weights_imag(
range(NUM_UHFQA_READOUT_CHANNELS), np.ones(MAX_QAS_INTEGRATION_LENGTH)
)
monitor_nodes = (
"qas/0/monitor/inputs/0/wave",
"qas/0/monitor/inputs/1/wave",
)
acq_channel_resolvers_map[acq_channel] = partial(
resolvers.monitor_acquisition_resolver, monitor_nodes=monitor_nodes
)
else:
measure_instruction: zhinst.Measure = measure_instructions_dict[acq_uuid]
# Combine a reset and setting acq weights
# by slicing the length of the waveform I and Q values.
# This overwrites 0..length with new values.
# The waveform is slightly larger then the integration_length
# because of the waveform granularity. This is irrelevant
# due to the waveform being appended with zeros. Therefore
# avoiding an extra slice of waveform[0:integration_length]
weights_i = np.zeros(MAX_QAS_INTEGRATION_LENGTH)
weights_q = np.zeros(MAX_QAS_INTEGRATION_LENGTH)
weights_i[
0 : len(measure_instruction.weights_i)
] = measure_instruction.weights_i
weights_q[
0 : len(measure_instruction.weights_q)
] = measure_instruction.weights_q
settings_builder.with_qas_result_mode(
zhinst.QasResultMode.CYCLIC
).with_qas_result_source(
zhinst.QasResultSource.INTEGRATION
).with_qas_result_length(
n_acquisitions
).with_qas_result_enable(
True
).with_qas_result_averages(
cached_schedule.schedule.repetitions
)
# set the integration weights, note that we need to set 4 weights in order
# to use a complex valued weight function in the right way.
# Z = (w0*sI + w1*sQ) + 1j ( w1*sI - w0 * sQ)
settings_builder.with_qas_integration_weights_real(
2 * acq_channel, list(weights_i)
).with_qas_integration_weights_imag(
2 * acq_channel, list(weights_q)
).with_qas_integration_weights_real(
2 * acq_channel + 1, list(weights_q)
).with_qas_integration_weights_imag(
2 * acq_channel + 1, list(-1 * weights_i)
)
# Create partial function for delayed execution
acq_channel_resolvers_map[acq_channel] = partial(
resolvers.result_acquisition_resolver,
result_nodes=[
f"qas/0/result/data/{2*acq_channel}/wave",
f"qas/0/result/data/{2*acq_channel+1}/wave",
],
)
settings_builder.with_qas_result_reset(0).with_qas_result_reset(1)
settings_builder.with_qas_monitor_reset(0).with_qas_monitor_reset(1)
return settings_builder, ZIAcquisitionConfig(
n_acquisitions, acq_channel_resolvers_map
) | ba01201bac5bb40101009145e7cf4a0fca5684aa | 3,657,141 |
import os
import yaml
import logging
def read_config():
"""Parses config and returns config values
:returns: config as dict
"""
dirname = os.path.dirname(__file__)
config_path = os.path.join(dirname, 'config.yaml')
try:
stream = open(config_path, "r")
except FileNotFoundError:
return None
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exception:
logging.error("YAML error while parsing config.yaml:\n%s", exception)
exit()
# Remove / on the end of url
if "url" in config:
config["url"] = config["url"].rstrip("/")
return config | d8584727983880591675fcb99dbcc4b9a3a75626 | 3,657,142 |
def air_density(t_f, elevation):
"""Eq 20, page 25"""
return (1.293 - 1.525e-4 * elevation + 6.379e-9 * elevation ** 2) / (
1 + 0.00367 * t_f
) | d5677c755fc52e1ae8cc5293d4ed5c9a4debb71d | 3,657,143 |
def _strip_after_new_lines(s):
"""Removes leading and trailing whitespaces in all but first line."""
lines = s.splitlines()
if len(lines) > 1:
lines = [lines[0]] + [l.lstrip() for l in lines[1:]]
return '\n'.join(lines) | 247cee0f34ab1e742069e05c8c00095cd24d80bc | 3,657,144 |
def make_connection(request):
"""
Create a StreamSplitRoutine from a MockConnection and a container, return topics 'A' and 'B' as well as the routine
"""
def generate(*, max_items_send: int):
return MockConnection(max_items_send=max_items_send)
yield generate | a0a4adbdf6fb7487d27f9e81c8f4bb5af49fae58 | 3,657,145 |
import copy
def my_browse(*args, **kwargs):
""" Creates and starts an ObjectBrowser with modified summary column.
"""
attribute_columns = copy.deepcopy(DEFAULT_ATTR_COLS)
summary_column = [col for col in attribute_columns if col.name == 'summary'][0]
summary_column.data_fn = my_summary
return browse(*args, attribute_columns = attribute_columns, **kwargs) | 3f5e681112bf5dd7a56a3259e188a1c5773f2cf5 | 3,657,146 |
import psutil
def cpu_min_frequency():
"""
Returns the processor minimum frequency, in Mhz (> int)
"""
return psutil.cpu_freq().min | de4312ccd95e46d6d157bdb1a08d48fe5924942f | 3,657,147 |
def log_error(message: str) -> str:
"""error log"""
return message | dbd86c39bc504dbac8d308e124c73310df21f372 | 3,657,148 |
from datetime import datetime
from operator import or_
from operator import and_
def exclude_preservation_pending(q):
"""
Transform query to exclude MuseumObject entries which are pending
preservation
"""
now = datetime.datetime.now(datetime.timezone.utc)
preservation_boundary = now - PRESERVATION_DELAY
update_boundary = now - UPDATE_DELAY
return (
q.outerjoin(
MuseumPackage,
MuseumPackage.id == MuseumObject.latest_package_id
)
.filter(
# If any of the four conditions is true, the object will not
# be preserved and are thus included in this query:
or_(
# 1. Is metadata information still incomplete?
MuseumObject.metadata_hash == None,
MuseumObject.attachment_metadata_hash == None,
# 2. Is the object frozen?
MuseumObject.frozen,
# 3. The object hasn't been preserved, but it has been less
# than a month passed since the creation of the object?
and_(
MuseumObject.latest_package_id == None,
coalesce(
MuseumObject.created_date, datetime.datetime.min
) > preservation_boundary
),
# 4. Has the object entered preservation before, but...
and_(
MuseumObject.latest_package_id != None,
# ...the package wasn't cancelled, and either...
MuseumPackage.cancelled == False,
or_(
# ...modification date hasn't changed?
coalesce(
MuseumPackage.object_modified_date,
datetime.datetime.min
) == coalesce(
MuseumObject.modified_date,
datetime.datetime.min
),
# ...modification date has changed, but it's been
# less than a month?
coalesce(
MuseumPackage.object_modified_date,
datetime.datetime.min
) > update_boundary,
# ...metadata hashes haven't changed, indicating no
# change has happened?
and_(
MuseumPackage.metadata_hash
== MuseumObject.metadata_hash,
MuseumPackage.attachment_metadata_hash
== MuseumObject.attachment_metadata_hash
)
)
)
)
)
) | a43eefeaaac16ac872ae02bd522873966e5f21e2 | 3,657,149 |
from datetime import datetime
def naturalday(value, format=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
value = localtime(value)
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object.
return value
except ValueError:
# Date arguments out of range.
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days > 7:
return date_format(value, format)
elif delta.days > 2:
if value.weekday() == 0:
return _('Next Monday')
elif value.weekday() == 1:
return _('Next Tuesday')
elif value.weekday() == 2:
return _('Next Wednesday')
elif value.weekday() == 3:
return _('Next Thursday')
elif value.weekday() == 4:
return _('Next Friday')
elif value.weekday() == 5:
return _('Next Saturday')
else:
return _('Next Sunday')
elif delta.days == 2:
return _('After tomorrow')
elif delta.days == 1:
return _('Tomorrow')
elif delta.days == 0:
return _('Today')
elif delta.days == -1:
return _('Yesterday')
elif delta.days == -2:
return _('Before yesterday')
elif delta.days > -7:
if value.weekday() == 0:
return _('Last Monday')
elif value.weekday() == 1:
return _('Last Tuesday')
elif value.weekday() == 2:
return _('Last Wednesday')
elif value.weekday() == 3:
return _('Last Thursday')
elif value.weekday() == 4:
return _('Last Friday')
elif value.weekday() == 5:
return _('Last Saturday')
else:
return _('Last Sunday')
else:
return date_format(value, format) | fbc1fe32f5735f57c989488989aabd427a59c160 | 3,657,150 |
import tensorflow as tf
from torch.utils.data import DataLoader
def test_adaptors(adaptor: str, shuffle_buffer_size: int):
"""
Test if framework-specific generator adpators yield batches.
"""
idx = np.arange(0, 10)
def map_fn(x_, obs_):
"""
Note: Need to convert to numpy in output because torch does not accept dask.
"""
return (np.asarray(x_[:, :2]),),
kwargs = {"idx": {"Mus musculus": idx}, "obs_keys": [], "randomized_batch_access": False, "retrieval_batch_size": 2,
"map_fn": map_fn}
cart = _get_cart(store_format="dao", feature_space="single", **kwargs)
if adaptor == "python":
kwargs = {}
elif adaptor == "tensorflow":
kwargs = {"output_signature": (
tf.TensorSpec(shape=(2,), dtype=tf.float32),
)}
elif adaptor in ["torch", "torch-loader", "torch-iter-loader", "torch-iter"]:
kwargs = {}
else:
assert False
it = cart.adaptor(generator_type=adaptor, shuffle_buffer=shuffle_buffer_size, **kwargs)
if adaptor == "tensorflow":
it = iter(it.range(2))
if adaptor in ["torch", "torch-iter"]:
it = list(DataLoader(it))
it = iter(it)
if adaptor in ["torch-loader", "torch-iter-loader"]:
it = iter(list(it))
_ = next(it) | 088bd70f50b63a07f7392f1712de0d6aab9515a2 | 3,657,151 |
def qg8_graph_write(filename: str, graph: qg8_graph):
"""
Wrapper function which prepares a collection of chunks (graph) and writes it to a file
"""
if not isinstance(graph, qg8_graph):
raise TypeError("Second argument is not a qg8_graph")
try:
qg8f = qg8_file_open(filename, QG8_MODE_WRITE)
except:
raise IOError("Could not open file in write mode")
success = 1
for chunk in graph.chunks:
success *= qg8_file_write_chunk(qg8f, chunk)
qg8_file_flush(qg8f)
qg8_file_close(qg8f)
return success | a26891c86df5541cb1ffa3d3eb463bea5472d3d7 | 3,657,152 |
def valid_post_author(user, post):
"""This function checks whether the post was created by the user"""
if str(user.key().id()) == str(post.user.key().id()):
return True | 94ca2f23aa66f79be997080c61fc2f265e868e5f | 3,657,153 |
import json
import time
import collections
from datetime import datetime
def listing(request, **kwargs):
"""view for processing and applying listings"""
context = {
'view': 'listing',
'all_channels': CHANNELS,
'all_towns': TOWNS,
'method': request.method,
'actions': ['listing_parse', 'listing_apply'],
}
if request.method == 'GET':
context['action'] = 'show_listingModal'
return render(request, 'dvbboxes.html', context)
elif request.method == 'POST':
if 'listing/apply' in request.path:
form = forms.ApplyListingForm(request.POST)
if form.is_valid():
context['action'] = 'listing_apply'
parsed_data = json.loads(form.cleaned_data['parsed_data'])
service_id = form.cleaned_data['service_id']
towns = form.cleaned_data['towns']
if not towns:
towns = TOWNS
towns.sort()
# apply listing to servers in towns
days = [data['day'] for data in parsed_data]
days = sorted(
days,
key=lambda x: time.mktime(time.strptime(x, '%d%m%Y'))
)
response = dvbboxes.Listing.apply(
parsed_data, service_id, towns
)
# reorganize response by days
result = collections.OrderedDict()
for day in days:
result[day] = collections.OrderedDict()
bar = True
for town, data in response.items():
for day, infos in data.items():
for server, statuses in infos.items():
foo = all(statuses.values())
result[day][server] = foo
bar = bar and foo
context['result'] = result
return render(request, 'dvbboxes.html', context)
else:
context['errors'] = form.errors
return render(request, 'dvbboxes.html', context)
else:
form = forms.UploadListingForm(request.POST, request.FILES)
if form.is_valid():
filepath = handle_uploaded_file(request.FILES['filename'])
listing = dvbboxes.Listing(filepath) # get listing object
days = sorted(
listing.days,
key=lambda x: datetime.strptime(x, '%d%m%Y')
) # sort days in the listing
if len(days) > 31:
context['errors'] = ("Cannot process "
"more than 31 days")
return render(request, 'dvbboxes.html', context)
context['action'] = 'listing_parse'
missing_files = [
i for i, j in listing.filenames.items() if not j
] # detect missing files in the listing
result = collections.OrderedDict() # prepare final result
for day in days:
result[day] = []
parsed_listing = listing.parse()
json_result = []
for data in parsed_listing:
infos = collections.OrderedDict()
data = json.loads(data)
json_result.append(data)
day = data['day']
starts = [i for i in data if i != 'day']
starts = sorted(
starts,
key=lambda x: float(x.split('_')[1]))
absent_files = 0
for start in starts:
t, i = start.split('_')
start_litteral = datetime.fromtimestamp(
float(t)).strftime('%H:%M:%S')
stop_litteral = datetime.fromtimestamp(
float(t)+data[start]['duration']).strftime(
'%d-%m-%Y %H:%M:%S')
absent = not data[start]['duration']
if absent:
absent_files += 1
filename = data[start]['filename']
infos[i] = [
start_litteral, filename, absent
]
# we now define if the parsing is fine
limit = datetime.strptime(day, '%d%m%Y') + timedelta(1)
length_ok = (
datetime.fromtimestamp(
float(t)+data[start]['duration']) >= limit
)
if not absent_files and length_ok:
success = 0 # green
elif absent_files and length_ok:
success = 1 # lightblue
elif not absent_files and not length_ok:
success = 2 # orange
else:
success = 3 # red
result[day] = [infos, success, stop_litteral]
context['days'] = days
context['missing_files'] = missing_files
context['result'] = result
context['json_result'] = json.dumps(json_result)
return render(request, 'dvbboxes.html', context)
else:
context['errors'] = form.errors
return render(request, 'dvbboxes.html', context) | c4938dc4db4526ca93558305ea702660956e77fa | 3,657,154 |
def get_rise_or_fall(U, V, Im, demo=0):
"""
Get increase or decrease of intensity in flow direction: This finds us
the front and the wake regions of each wave.
"""
rr, cc = np.shape(Im)
ax_x, ax_y = np.linspace(1, cc, cc), np.linspace(1, rr, rr)
XX, YY = np.meshgrid(ax_x, ax_y)
Velo_mag = np.hypot(U, V)
nU = U / Velo_mag
nV = V / Velo_mag
lookahead = 3
# indices of nearby pixels, small span
XX_next = np.round(XX + lookahead * nU)
YY_next = np.round(YY + lookahead * nV)
# interpolate
Im_next = map_coordinates(
Im, [YY_next.ravel(), XX_next.ravel()], order=3, mode="constant"
).reshape(Im.shape)
# wavesign = np.sign(Im_nxt-Im)
wavesign = Im_next < Im
# test interrupt -demo=3:shelved, 2:activate):
if demo == 2:
plt.close("all")
plt.figure()
plt.imshow(wavesign)
plt.title("front and wakes areas")
plt.xlabel("x (pixels)")
plt.ylabel("y (pixels)")
plt.show()
breakpoint() # click-to-code help
return wavesign | a2d86bd986f576054ccd2686af7d9da4ffd3a1f0 | 3,657,155 |
import functools
def has_vanity_name(func):
"""Decorator checking whether a command has been provided a vanity_name value"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
vanity_name = args[1]
if vanity_name is None:
ctx = args[0]
await ctx.send("Please provide a Steam vanity URL or steamid")
return
return await func(*args, **kwargs)
return wrapper | 5da3cc410822f0e112a2be1b3cdfc66fb4d79b0c | 3,657,156 |
from typing import List
import logging
def get_data_providers(
data_providers_configs: List[dict], data_providers_input: List[str]
) -> List[data.DataProvider]:
"""
Determines which data provider and in which order should be used.
:param data_providers_configs: A list of data provider configurations
:param data_providers_input: A list of data provider names
:return: a list of data providers in order.
"""
logger = logging.getLogger(__name__)
data_providers = []
for data_provider_config in data_providers_configs:
data_provider_config["class"] = DATA_PROVIDER_MAP[data_provider_config["type"]](
**data_provider_config["parameters"]
)
data_providers.append(data_provider_config)
selected_data_providers = []
for data_provider_name in data_providers_input:
found = False
for data_provider_config in data_providers:
if data_provider_config["name"] == data_provider_name:
selected_data_providers.append(data_provider_config["class"])
found = True
break
if not found:
logger.warning(
"The following data provider could not be found: {}".format(
data_provider_name
)
)
if len(selected_data_providers) == 0:
raise ValueError(
"None of the selected data providers are available. The following data providers are valid "
"options: "
+ ", ".join(
data_provider["name"] for data_provider in data_providers_configs
)
)
return selected_data_providers | 076659d2bf619808f5cb0ac124839e569af0c74a | 3,657,157 |
def _PredatorForFracas(config=None):
"""A helper to pass in the standard pipeline class."""
return PredatorForFracas(MOCK_GET_REPOSITORY, config or {}) | c7e1e3c771a8b8afa921a291198adc084f75d186 | 3,657,158 |
def py_SurfStatSmooth(Y, surf, FWHM):
"""Smooths surface data by repeatedly averaging over edges.
Parameters
----------
Y : numpy array of shape (n,v) or (n,v,k)
surface data, v=#vertices, n=#observations, k=#variates.
surf : a dictionary with key 'tri' or 'lat', or a BSPolyData object.
surf['tri'] = numpy array of shape (t,3), triangle indices, or
surf['lat'] = numpy array of shape (nx,ny,nz), 1=in, 0=out,
(nx,ny,nz) = size(volume).
FWHM : approximate FWHM of Gaussian smoothing filter, in mesh units.
Returns
-------
Y : numpy array of shape (n,v) or (n,v,k),
smoothed data.
"""
niter = int(np.ceil(pow(FWHM,2) / (2*np.log(2))))
if isinstance(Y, np.ndarray):
Y = np.array(Y, dtype='float')
if np.ndim(Y) == 2:
n, v = np.shape(Y)
k = 1
isnum = True
elif np.ndim(Y) == 3:
n, v, k = np.shape(Y)
isnum = True
edg = py_SurfStatEdg(surf) + 1
agg_1 = aggregate(edg[:,0], 2, size=(v+1))
agg_2 = aggregate(edg[:,1], 2, size=(v+1))
Y1 = (agg_1 + agg_2)[1:]
if n>1:
print(' %i x %i surfaces to smooth, %% remaining: 100 '%(n, k))
n10 = np.floor(n/10)
for i in range(0, n):
if n10 != 0 and np.remainder(i+1, n10) == 0:
print('%s ' % str(int(100-(i+1)/n10*10)), end = '')
for j in range(0, k):
if isnum:
if np.ndim(Y) == 2:
Ys = Y[i,:]
elif np.ndim(Y) == 3:
Ys = Y[i,:,j]
for itera in range(1, niter+1):
Yedg = Ys[edg[:,0]-1] + Ys[edg[:,1]-1];
agg_tmp1 = aggregate(edg[:,0], Yedg, size=(v+1))[1:]
agg_tmp2 = aggregate(edg[:,1], Yedg, size=(v+1))[1:]
Ys = (agg_tmp1 + agg_tmp2) / Y1
if np.ndim(Y) == 2:
Y[i,:] = Ys
elif np.ndim(Y) == 3:
Y[i,:,j] = Ys
if n>1:
print('Done')
return Y | 6b537e33174459cee6364dbd145181c66156830d | 3,657,159 |
from typing import Tuple
def arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]:
"""Parses arm name into tuple suitable for reverse sorting by key
Example:
arm_names = ["0_0", "1_10", "1_2", "10_0", "control"]
sorted(arm_names, key=arm_name_to_sort_key, reverse=True)
["control", "0_0", "1_2", "1_10", "10_0"]
"""
try:
trial_index, arm_index = arm_name.split("_")
return ("", -int(trial_index), -int(arm_index))
except (ValueError, IndexError):
return (arm_name, 0, 0) | c29958bb541a9754e7b4defc6ad953030a364d2f | 3,657,160 |
from typing import Optional
from typing import Mapping
from typing import Any
def run_query_row(cur: Cursor, sql: str, params: Optional[Mapping[str, Any]] = None, **kwargs: Any
) -> Optional[skytools.dbdict]:
""" Helper function if everything you need is just paramertisized execute to
fetch one row only. If not found none is returned
"""
params = params or kwargs
rows = run_query(cur, sql, params)
if len(rows) == 0:
return None
return rows[0] | 0ba46ba0666d0cbefeda5b3fe62ac5ed883a190f | 3,657,161 |
def vortex_indicator(high_arr, low_arr, close_arr, n):
"""Calculate the Vortex Indicator for given data.
Vortex Indicator described here:
http://www.vortexindicator.com/VFX_VORTEX.PDF
:param high_arr: high price of the bar, expect series from cudf
:param low_arr: low price of the bar, expect series from cudf
:param close_arr: close price of the bar, expect series from cudf
:param n: time steps to do EWM average
:return: Vortex Indicator in cudf.Series
"""
TR = true_range(high_arr.data.to_gpu_array(), low_arr.data.to_gpu_array(),
close_arr.data.to_gpu_array())
VM = lowhigh_diff(high_arr.data.to_gpu_array(),
low_arr.data.to_gpu_array())
VI = division(Rolling(n, VM).sum(), Rolling(n, TR).sum())
return cudf.Series(VI) | 8b34ca26f7cc52361eb95ff1ad17c010fd270759 | 3,657,162 |
from typing import Dict
def getServiceById(serviceId: str, **kwargs) -> Dict:
"""Retrieve service by its identifier.
Args:
serviceId: Identifier of service to be retrieved.
Returns:
Service object.
"""
db_collection_service = (
current_app.config['FOCA'].db.dbs['serviceStore']
.collections['services'].client
)
obj = db_collection_service.find_one({"id": serviceId})
if not obj:
raise NotFound
del obj["_id"]
return obj | fc568b337495873263f9a7ea85d46ac4bcd55819 | 3,657,163 |
from typing import Dict
from typing import Any
def replace_module_prefix(
state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = ""
):
"""
Remove prefixes in a state_dict needed when loading models that are not VISSL
trained models.
Specify the prefix in the keys that should be removed.
Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict
"""
state_dict = {
(key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val
for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "")
}
return state_dict | b8499c818053e7798e9549fbe546bab7d5fbfa84 | 3,657,164 |
import os
import glob
def create_csv(parent_dir, tsv_folder, export_csv = True):
"""
The function reads all .tsv files, combine them into a csv file, and export .csv file into parent directory
Args:
parent_dir (string) : The working directory you are working with
tsv_folder (string) : The name of the folder that you have stored .tsv files (Attention: It should be in your parent_dir)
export_csv (Boolean, Optional) : Set to False if you don't want to store the .csv file in your parent_dir. Default set to True
Returns:
The .csv file of all combined .tsv files
"""
if parent_dir[-1] != '/':
parent_dir += '/'
os.chdir(parent_dir + tsv_folder)
extension = 'tsv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
combined_csv = pd.concat([pd.read_csv(f, delimiter = '\t') for f in all_filenames ])
combined_csv.reset_index(inplace=True, drop= True)
print("The csv file has generated! With " + str(len(combined_csv)) + " number of entries.")
if export_csv:
os.chdir(parent_dir)
combined_csv.to_csv( "files/combined_csv.csv", index=False, encoding='utf-8-sig')
print("The csv file has been exported to " + parent_dir)
return combined_csv
else:
return combined_csv | a5efd653bbb23dc5b0135b03961521019e183c85 | 3,657,165 |
def crop(img, left, top, right, bottom):
"""
Crop rectangle from image.
Inputs:
img - The image to crop.
left - The leftmost index to crop the image.
top - The topmost index.
right - The rightmost index.
bottom - The bottommost index.
Outputs:
img - The cropped image.
"""
return img[left:right, top:bottom] | 1507a55bba07dc656f51f873d2328b69f70682c9 | 3,657,166 |
import os
def cl_file_with_height(tmp_path):
"""Create netcdf file for ``cl`` with hybrid height coordinate."""
nc_path = os.path.join(tmp_path, 'cl_hybrid_height.nc')
dataset = Dataset(nc_path, mode='w')
create_hybrid_height_file(dataset, 'cl')
dataset.close()
return nc_path | ff66c7adb505135a115d7b2ded2b80e2fea942ee | 3,657,167 |
import ipaddress
def get_hosts(network):
"""get_hosts() will return all the hosts within a provided network, range"""
network = ipaddress.IPv4Network(network, strict=False)
hosts_obj = network.hosts()
hosts = []
for i in hosts_obj:
hosts.append(str(i))
return hosts | 097fa3abbf1cda1c3c0ddc0c2fec4a06d1d44fa9 | 3,657,168 |
def select_organization(cursor):
"""organization情報取得(全取得)
Args:
cursor (mysql.connector.cursor): カーソル
Returns:
dict: select結果
"""
# select実行
cursor.execute('SELECT * FROM organization ORDER BY organization_id')
rows = cursor.fetchall()
return rows | 6e5c1a2f90d41223ba09fe3278353370515c0430 | 3,657,169 |
def _GetInstDisk(index, cb):
"""Build function for calling another function with an instance Disk.
@type index: int
@param index: Disk index
@type cb: callable
@param cb: Callback
"""
def fn(ctx, inst):
"""Call helper function with instance Disk.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
nic = inst.disks[index]
except IndexError:
return _FS_UNAVAIL
return cb(ctx, index, nic)
return fn | 4dc83bb5c7ac3556750f9e3a70f77c9325893fb4 | 3,657,170 |
def Jphii_cal(L, W, q, xi_local):
"""タスク写像のヤコビ行列"""
return np.array([[1, 0, -sin(q[2, 0]) * xi_local[0, 0] - cos(q[2, 0]) * xi_local[1, 0]],
[0, 1, cos(q[2, 0]) * xi_local[0, 0] - sin(q[2, 0]) * xi_local[1, 0]]], dtype = np.float32)
#return np.array([[1, 0, -xi_local[1, 0]],
# [0, 1, xi_local[0, 0]]], dtype = np.float32) | 300a3724829d8ce2df15801b6ae02e78e8e2e6b7 | 3,657,171 |
def model_evalution(test_data):
""" function to test the loss and accuracy on validation data """
for X_test, y_test in val_data:
y_pred = model(X_test, training=False)
val_acc_metrics.update_state(y_test, y_pred)
accuracy = val_acc_metrics.result()
return float(accuracy) | d581013f50560082f8f6854f201cfd791be6e876 | 3,657,172 |
import inspect
import numpy
def make_python_script_from_list(list_optical_elements1,script_file=""):
"""
program to build automatically a python script to run shadow3
the system is read from a list of instances of Shadow.Source and Shadow.OE
:argument list of optical_elements A python list with intances of Shadow.Source and Shadow.OE objects
:param script_file: a string with the name of the output file (default="", no output file)
:return: template with the script
"""
#make sure that the list does not contain lists
haslist = sum([isinstance(i,list) for i in list_optical_elements1])
list_optical_elements = list_optical_elements1
if haslist:
while(haslist > 0):
newlist = []
for i in list_optical_elements:
if isinstance(i,list):
newlist.extend(i)
else:
newlist.append(i)
list_optical_elements = newlist
haslist = sum([isinstance(i,list) for i in list_optical_elements])
#make sure that the list does not contain compoundOE (developed)
hascomp = sum([isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)) for i in list_optical_elements])
if hascomp:
newlist = []
for i in list_optical_elements:
if isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)):
newlist.extend(i.list)
else:
newlist.append(i)
list_optical_elements = newlist
template = """#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
"""
n_elements = len(list_optical_elements)
for i,element in enumerate(list_optical_elements):
if isinstance(element,Shadow.Source):
template += "oe0 = Shadow.Source()\n"
elif isinstance(element,Shadow.OE):
template += "oe%d = Shadow.OE()\n"%(i)
elif isinstance(element,Shadow.IdealLensOE):
template += "oe%d = Shadow.IdealLensOE()\n"%(i)
else:
raise Exception("Error: Element not known")
template += "\n#\n# Define variables. See meaning of variables in: \n" \
"# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml \n" \
"# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n#\n"
for ioe,oe1B in enumerate(list_optical_elements):
template += "\n"
if isinstance(oe1B,Shadow.Source):
oe1 = Shadow.Source()
elif isinstance(element,Shadow.OE):
oe1 = Shadow.OE()
elif isinstance(element,Shadow.IdealLensOE):
oe1 = Shadow.IdealLensOE()
else:
raise Exception("Error: Element not known")
if isinstance(oe1B,Shadow.IdealLensOE):
template += "oe"+str(ioe)+".T_SOURCE = "+str(oe1B.T_SOURCE).strip()+"\n"
template += "oe"+str(ioe)+".T_IMAGE = "+str(oe1B.T_IMAGE).strip()+"\n"
template += "oe"+str(ioe)+".focal_x = "+str(oe1B.focal_x).strip()+"\n"
template += "oe"+str(ioe)+".focal_z = "+str(oe1B.focal_z).strip()+"\n"
else:
memB = inspect.getmembers(oe1B)
mem = inspect.getmembers(oe1)
for i,var in enumerate(memB):
ivar = mem[i]
ivarB = memB[i]
if ivar[0].isupper():
if isinstance(ivar[1],numpy.ndarray):
# print(" are ALL different ? ", (ivar[1] != ivarB[1]).all())
# print(" are the same ? ", (ivar[1] == ivarB[1]).all())
# print(" there is at least ONE diff ? ", not((ivar[1] == ivarB[1]).all()))
if not( (ivar[1] == ivarB[1]).all()) :
line = "oe"+str(ioe)+"."+ivar[0]+" = numpy.array("+str(ivarB[1].tolist())+ ")\n"
template += line
# if (ivar[1] != ivarB[1]).all():
# line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n"
# if ("SPECIFIED" in line):
# pass
# else:
# template += line
else:
if ivar[1] != ivarB[1]:
if isinstance(ivar[1],(str,bytes)):
line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1]).strip()+"\n"
#line = re.sub('\s{2,}', ' ',line)
if "SPECIFIED" in line:
pass
else:
template += line
else:
line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n"
template += line
template += """\n\n
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
"""
template_oeA = """\n
#
#run optical element {0}
#
print(" Running optical element: %d"%({0}))
if iwrite:
oe{0}.write("start.{1}")
"""
template_oeB = """\n
if iwrite:
oe{0}.write("end.{1}")
beam.write("star.{1}")
"""
for i in range(1,n_elements):
template += template_oeA.format(i,"%02d"%(i))
if isinstance(list_optical_elements[i],Shadow.OE):
template += "\nbeam.traceOE(oe%d,%d)"%(i,i)
elif isinstance(list_optical_elements[i],Shadow.IdealLensOE):
template += "\nbeam.traceIdealLensOE(oe%d,%d)"%(i,i)
template += template_oeB.format(i,"%02d"%(i))
#
# display results (using ShadowTools, matplotlib needed)
#
template += """\n
Shadow.ShadowTools.plotxy(beam,1,3,nbins=101,nolost=1,title="Real space")
# Shadow.ShadowTools.plotxy(beam,1,4,nbins=101,nolost=1,title="Phase space X")
# Shadow.ShadowTools.plotxy(beam,3,6,nbins=101,nolost=1,title="Phase space Z")
"""
if script_file != "":
open(script_file, "wt").write(template)
print("File written to disk: %s"%(script_file))
return template | 85eb57955badaa4a2748be8ca6f2bf0f370b422d | 3,657,173 |
def flax_tag(arr):
"""Wraps a value in a flax module, to inspect intermediate values."""
return arr | be2fbef6117c859b7fc9dd7274815df4e70df17e | 3,657,174 |
def toEpoch( dateTimeObject = None ):
"""
Get seconds since epoch
"""
if dateTimeObject == None:
dateTimeObject = dateTime()
return nativetime.mktime( dateTimeObject.timetuple() ) | f679f75e9d416c471491b0b933505fc6bbb6eb7d | 3,657,175 |
import requests
import json
def sendNotification(token, title, message, extraData=None, channelID=None):
"""
send Notification to Devices
:param token:
:param title:
:param message:
:return:
"""
url = 'https://exp.host/--/api/v2/push/send'
headers = {
"Content-Type": "application/json"
}
data = {
"to": token,
"title": title,
"body": message
}
# Verify we have Additional data to append
if extraData is not None:
data["data"] = extraData
# Android Only! Verify if we have a channel ID and append it
if channelID is not None:
data["channelId"] = channelID
res = requests.post(url, data=json.dumps(data), headers=headers)
return res.status_code | 1038dfd3872221a0d447b7708d58d95e931c59e5 | 3,657,176 |
def make_phsfct_kernel(size_px, dpx, g_fac):
"""
Make a kernel for phase function convolution
:param size_px:
:param dpx: [deg/px]
:param g_fac:
:return: ph_ker [deg]
"""
ke = np.mgrid[:size_px, :size_px]
half = (size_px - 1) / 2
ke[0] -= half
ke[1] -= half
dist = np.sqrt(ke[0] * ke[0] + ke[1] * ke[1])
dist_deg = dist * dpx
ph_ker = phasefunc(g_fac, dist_deg) # Fill radially with phase function
# ph_ker = ph_ker/np.sum(ph_ker)
ph_ker = ph_ker / (2. * np.pi)
return ph_ker | 0f214d19f7418385f3db9155e8cabb06779fdf83 | 3,657,177 |
def sample_pts_ellipsoid_surface(mu, Q, NB_pts, random=True):
"""
Uniformly samples points on the surface of an ellipsoid, specified as
(xi-mu)^T Q^{-1} (xi-mu) == 1
arguments: mu - mean [dim]
Q - Q [dim x dim]
NB_pts - nb of points
random - True: Uniform sampling.
False: Uniform deterministic grid
output: ell_pts - points on the boundary of the ellipse [xdim x NB_pts]
"""
dim = mu.shape[0]
if dim != Q.shape[0] or dim != Q.shape[1]:
raise ValueError("mu (%d) and Q (%d,%d) must be the same size" %(mu.shape[0], Q.shape[0], Q.shape[1]))
if (Q == np.zeros((dim,dim))).all():
return np.zeros((dim,NB_pts))
if random == False and dim > 2:
raise ValueError("sample_pts_ellipsoid_surface: non random sampling not implemented")
mut = np.array([mu])
pts = sample_pts_unit_sphere(dim, NB_pts, random=random).T
E = np.linalg.cholesky(Q)
ell_pts = (mut + pts @ E.T).T
return ell_pts | 89fa8383d32b74e8c92a52792fe2de4d35816acc | 3,657,178 |
def load_mzml_path():
"""Return the path to the mzML toy file.
Parameters
----------
None
Returns
-------
path_data : str
The path to the mzML data.
Examples
--------
>>> from specio.datasets import load_mzml_path
>>> load_mzml_path() # doctest: +ELLIPSIS
'...spectra.mzml'
"""
module_path = dirname(__file__)
return join(module_path, 'data', 'spectra.mzml') | b0548589a209b14ef336a28eeca74782f3550186 | 3,657,179 |
def _czce_df_read(url, skip_rows, encoding='utf-8', header=0):
"""
郑州商品交易所的网页数据
:param header:
:type header:
:param url: 网站 string
:param skip_rows: 去掉前几行 int
:param encoding: utf-8 or gbk or gb2312
:return: pd.DataFrame
"""
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
"Host": "www.czce.com.cn",
"Cookie": "XquW6dFMPxV380S=CAaD3sMkdXv3fUoaJlICIEv0MVegGq5EoMyBcxkOjCgSjmpuovYFuTLtYFcxTZGw; XquW6dFMPxV380T=5QTTjUlA6f6WiDO7fMGmqNxHBWz.hKIc8lb_tc1o4nHrJM4nsXCAI9VHaKyV_jkHh4cIVvD25kGQAh.MvLL1SHRA20HCG9mVVHPhAzktNdPK3evjm0NYbTg2Gu_XGGtPhecxLvdFQ0.JlAxy_z0C15_KdO8kOI18i4K0rFERNPxjXq5qG1Gs.QiOm976wODY.pe8XCQtAsuLYJ.N4DpTgNfHJp04jhMl0SntHhr.jhh3dFjMXBx.JEHngXBzY6gQAhER7uSKAeSktruxFeuKlebse.vrPghHqWvJm4WPTEvDQ8q",
}
r = requests_link(url, encoding, headers=headers)
data = pd.read_html(r.text, match='.+', flavor=None, header=header, index_col=0, skiprows=skip_rows, attrs=None,
parse_dates=False, thousands=', ', encoding="gbk", decimal='.',
converters=None, na_values=None, keep_default_na=True)
return data | 1491e312f1548141294d20b6ebe2fb4517cd3e07 | 3,657,180 |
import random
def select(weights):
"""
select a node with probability proportional to its "weight"
"""
r = random.random() * sum(weights)
s = 0.0
for k,w in enumerate(weights):
s += w
if r <= s:
return k
raise RuntimeError("select WTF from %s" % weights) | fed92de65cfae6f3532754215f5b88a564365ac7 | 3,657,181 |
def kexo(spacecraft_id, sensor_id, band_id):
"""Sun exo-atmospheric irridiance [W/m2/sr]
This is used for processing surface reflectance.
Spacecraft_id: Landsat7
Sensor_id: ETM+
band_id: band1, band2, band3, band4, band5, band7, band8
Spacecraft_id: Terra
Sensor_id: Aster
band_id: band1, band2, band3, band4, band5, band7, band8, band9
kexo(spacecraft_id, sensor_id, band_id)
"""
if(spacecraft_id == "Landsat7"):
if (sensor_id == "ETM+"):
if(band_id == "band1"):
kexo = 1969.0
if(band_id == "band2"):
kexo = 1840.0
if(band_id == "band3"):
kexo = 1551.0
if(band_id == "band4"):
kexo = 1044.0
if(band_id == "band5"):
kexo = 225.7
if(band_id == "band7"):
kexo = 82.07
if(band_id == "band8"):
kexo = 1385.64 # Self calculated value...
else:
kexo = 0.0
if(spacecraft_id == "Terra"):
if (sensor_id == "Aster"):
if(band_id == "band1"):
kexo = 1828.0
if(band_id == "band2"):
kexo = 1559.0
if(band_id == "band3"):
kexo = 1045.0
if(band_id == "band4"):
kexo = 226.73
if(band_id == "band5"):
kexo = 86.50
if(band_id == "band7"):
kexo = 74.72
if(band_id == "band8"):
kexo = 66.41
if(band_id == "band9"):
kexo = 59.83
else:
kexo = 0.0
else:
kexo = 0.0
else:
kexo = 0.0
return kexo | 0e11a1b0b6ea8a43bef954273ed3a32a1d39c842 | 3,657,182 |
def gen_profile_id(profile_id):
"""
Generates the Elasticsearch document id for a profile
Args:
profile_id (str): The username of a Profile object
Returns:
str: The Elasticsearch document id for this object
"""
return "u_{}".format(profile_id) | 003586fe87d2936d9054aaa35963ae0241a5e594 | 3,657,183 |
import pathlib
import os
import sys
import re
def check_config():
"""
Check required fields are present in config.
"""
sections = [{'name': 'assembly',
'keys': ['accession', 'prefix', 'alias', 'span'],
'defaults': {'accession': 'draft', 'alias': '==prefix'}},
{'name': 'busco',
'keys': ['lineage_dir', 'lineages'],
'defaults': {'lineage_dir': 'busco_lineages', 'lineages': []}},
{'name': 'reads',
'keys': ['paired', 'single'],
'defaults': {'paired': [], 'single': []}},
{'name': 'settings',
'keys': ['blast_chunk', 'blast_max_chunks', 'blast_overlap',
'blobtools2_path', 'chunk', 'taxonomy', 'tmp'],
'defaults': {'blast_chunk': 100000, 'blast_max_chunks': 10,
'blast_overlap': 500, 'chunk': 1000000,
'tmp': '/tmp'}},
{'name': 'similarity',
'keys': ['databases', 'taxrule'],
'defaults': {'taxrule': 'eachdistorder'}},
{'name': 'taxon',
'keys': ['name', 'taxid'],
'defaults': {}}]
similarity_defaults = {'evalue': 1e-25,
'mask_ids': [],
'max_target_seqs': 10,
'root': 1}
container_defaults = {
'busco': {'lineage_dir': '/blobtoolkit/databases/busco'},
'settings': {'blobtools2_path': '/blobtoolkit/blobtools2',
'blobtools_viewer_path': '/blobtoolkit/viewer',
'taxonomy': '/blobtoolkit/databases/ncbi_taxdump'},
'similarity': {
'databases': [
{'local': '/blobtoolkit/databases/ncbi_db',
'name': 'nt'},
{'local': '/blobtoolkit/databases/uniprot_db',
'name': 'reference_proteomes'}
]
}
}
if pathlib.Path('/blobtoolkit/databases/ncbi_db').exists():
# set some container specific defaults
for section, defaults in container_defaults.items():
if section not in config:
config[section] = {}
for key, value in defaults.items():
if key not in config[section]:
config[section].update({key: value})
container_version = os.environ.get('CONTAINER_VERSION')
if container_version:
config.update({'container': {}})
config['container'].update({'version': container_version})
optional = ['busco', 'reads']
for section in sections:
if section['name'] not in config:
if section['name'] in optional:
print("INFO: optional section '%s' is not present in config file" % section['name'], file=sys.stderr)
config[section['name']] = {}
else:
raise ConfigurationError("ERROR: config file must contain a '%s' section with keys '%s'" % (section['name'],
', '.join(section['keys'])))
for key in section['keys']:
if key not in config[section['name']]:
if key in section['defaults']:
value = section['defaults'][key]
if isinstance(value, str) and value.startswith('=='):
value = config[section['name']][value.replace('==', '')]
print("INFO: using default value for '%s.%s'" % (section['name'], key), file=sys.stderr)
print(value, file=sys.stderr)
config[section['name']][key] = value
else:
raise ConfigurationError("ERROR: config file section '%s' must contain '%s'" % (section['name'], key))
# fill in additional database info
if 'defaults' not in config['similarity'] or not config['similarity']['defaults']:
config['similarity']['defaults'] = {}
for key, value in similarity_defaults.items():
if key not in config['similarity']['defaults']:
config['similarity']['defaults'].update({key: value})
for db in config['similarity']['databases']:
if 'name' not in db or 'local' not in db:
quit("ERROR: 'name' and 'local' must be specified for all databases")
if db['name'] == 'nt' or db['name'] == 'nt_v5':
db.update({'source': 'ncbi',
'tool': 'blast',
'type': 'nucl'})
elif db['name'] == 'reference_proteomes':
db.update({'source': 'uniprot',
'tool': 'diamond',
'type': 'prot'})
else:
print("INFO: only 'nt' and 'reference_proteomes' databases are supported, ignoring '%s'" % db['name'], file=sys.stderr)
if not re.match(r'^\w+$', config['assembly']['prefix']):
raise ConfigurationError("ERROR: assembly prefix '%s' contains non-word characters. Please use only letters, numbers and underscores." % config['assembly']['prefix'])
for readset in config['reads']['single'] + config['reads']['paired']:
if not re.match(r'^[a-zA-Z0-9]+$', readset[0]):
raise ConfigurationError("ERROR: read file basename '%s' contains non-word characters. Please use only letters and numbers." % readset[0])
if '--use-singularity' in sys.argv:
return True
return False | 2fa8d8768144a9c84fe20394ac572b63349a28cd | 3,657,184 |
async def get_self_info(credential: Credential):
"""
获取自己的信息
Args:
credential (Credential): Credential
"""
api = API["info"]["my_info"]
credential.raise_for_no_sessdata()
return await request("GET", api["url"], credential=credential) | 74cc7f5e43c555de45c382db27cd314bb2b5794e | 3,657,185 |
import os
def app(request):
"""Testable flask application"""
_app.config.from_mapping(
TESTING=True,
SECRET_KEY=os.environ.get('SECRET_KEY'),
SQLALCHEMY_DATABASE_URI=os.getenv('TEST_DATABASE_URL'),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
WTF_CSRF_ENABLED=False
)
ctx = _app.app_context()
ctx.push()
def teardown():
"""Cleans up and closes out test session."""
ctx.pop()
request.addfinalizer(teardown)
return _app | 5c6837abda4cf3e58e1df63f67742e9b6bafcd48 | 3,657,186 |
def mpl_event_handler(event_type: MplEvent):
"""Marks the decorated method as given matplotlib event handler
.. note::
This decorator should be used only for methods of classes that
inherited from :class:`MplEventDispatcher` class.
This decorator can be used for reassignment event handlers in a dispatcher class.
Examples
--------
.. code-block:: python
from mpl_events import MplEventDispatcher, mpl_event_handler, mpl
class MyEventDispatcher(MplEventDispatcher):
@mpl_event_handler(MplEvent.KEY_PRESS)
def on_my_key_press(self, event: mpl.KeyPress):
pass
"""
class HandlerDescriptor:
"""Adds handler method name to event handlers mapping
"""
def __init__(self, handler):
self.handler = handler
def __get__(self, obj, cls=None):
return self.handler.__get__(obj, cls)
def __set_name__(self, owner, name):
if 'mpl_event_handlers' not in owner.__dict__:
owner.mpl_event_handlers = getattr(owner, 'mpl_event_handlers', {}).copy()
owner.mpl_event_handlers[event_type] = name
return HandlerDescriptor | 7cec2aad7f50daf832657bc01ac710159d1161a0 | 3,657,187 |
def get_date_pairs(in_dates, step):
"""
入场点出场点数据
:param in_dates: 所有入场日期
:param step: 步长
:return:
"""
DatePair = namedtuple('DatePair', ['in_date', 'out_date'])
date_pairs = []
for in_date in in_dates:
out_date = date_utility.date_cal(in_date, step)
date_pairs.append(DatePair(in_date, out_date))
return date_pairs | a2da0f3a48296de6c9f70b0e7535c8a2dd8e3d0b | 3,657,188 |
import random
def new_jitters(jitter):
"""
update jitter vector every 100 frames by setting ~half of noise vector units to lower sensitivity
"""
jitters=np.zeros(128)
for j in range(128):
if random.uniform(0,1)<0.5:
jitters[j]=1
else:
jitters[j]=1-jitter
return jitters | cab660f8b8c6cfb21e745479cae95e964dc412b9 | 3,657,189 |
import logging
import os
import subprocess
import re
def reduce_jscs(file_line_mapping, **extra):
"""
Runs JSHCS on the project with the default configured rules. The output
is reduced to only contain entries from the Git change set.
:param file_line_mapping: Mapping of files with changed lines (obtained
`get_git_line_sets()`).
:param extra: Optional keyword arguments:
`norules`: If true, omit verbose output of violated rule identifier
(default: `False` to include rules).
:return: A tuple containing the formatted string suitable for output and
an integer containing the number of failed rules.
"""
norules = extra['norules'] if 'norules' in extra else False
# Get the JSCS output.
logging.info('Obtaining JSCS output ...')
os.chdir(PROJECT_PATH)
rules = config.JSHINT_RULES if not norules else ''
command = config.JSCS_COMMAND.format(binary=config.JSCS_BIN,
rules=rules)
output = None
try:
output = subprocess.check_output(command.split())
except subprocess.CalledProcessError as ex:
# JSCS found something, so it has returned an error code.
# But we still want the output in the same fashion.
output = ex.output
output = output.decode('utf8').split('\n\n')
# Go through output and collect only relevant lines to the result.
result = ['\nJSCS output:\n============']
lines_expression = re.compile(r'^ +(\d+) |.*(?:\n|\r\n?)-', re.MULTILINE)
file_expression = re.compile(r'^[^\b].* (?:\./)?(.+) :$', re.MULTILINE)
for item in output:
# Do the processing for every block here.
line_no_candidates = lines_expression.findall(item, re.MULTILINE)
# Check if we've got a relevant block.
if line_no_candidates and '' in line_no_candidates:
line_no = int(line_no_candidates[line_no_candidates.index('') - 1])
file_name = file_expression.findall(item)[0]
file_name = tuple(re.split(PATH_SPLITTER, file_name))
# Check if the line is part of our selection list.
if line_no in file_line_mapping[file_name]:
result.append(item)
# Add the number of errors and return in a nicely formatted way.
error_count = len(result) - 1
result.append('\n{} code style errors found.'.format(error_count))
return '\n\n'.join(result), error_count | 2b4014a44fbb1db1671f436a81f41a72b15188cf | 3,657,190 |
def add_manuscript_urls_to_ci_params(ci_params):
"""
Return and edit in-place the ci_params dictionary to include 'manuscript_url'.
This function assumes Travis CI is used to deploy to GitHub Pages, while
AppVeyor is used for storing manuscript artifacts for pull request builds.
"""
if not ci_params:
return ci_params
assert isinstance(ci_params, dict)
provider = ci_params.get('provider')
if provider == 'travis':
ci_params['manuscript_url'] = (
"https://{repo_owner}.github.io/{repo_name}/v/{commit}/"
.format(**ci_params)
)
if provider == 'appveyor':
ci_params['manuscript_url'] = f"{ci_params['build_url']}/artifacts"
return ci_params | 7d45c4fe8060d387d0238788e4b7566e09abc499 | 3,657,191 |
def count_sites(vcfpath):
"""Extract number of sites in VCF from its tabix index."""
cmd = ["bcftools","index","--nrecords", vcfpath]
so, se, code = slurp_command(cmd)
return int(so) | 4f340827bbfc279e3b2601bd84ef68669ce1d829 | 3,657,192 |
import torch
from typing import Callable
def model_contrast_score(overlays: torch.Tensor, masks: torch.Tensor, object_labels: torch.Tensor,
scene_labels: torch.Tensor, object_model: Callable, scene_model: Callable,
object_method: Callable, scene_method: Callable, device: str):
"""
Model contrast score:
Difference of importance of object pixels for model trained on object labels
(should be important) and model trained on scene labels (should not be important)
"""
overlays = overlays.to(device)
object_labels = object_labels.to(device)
scene_labels = scene_labels.to(device)
masks = masks.squeeze().to(device)
# We check if both the object model and the scene model make the correct classification
with torch.no_grad():
y_pred_obj = torch.argmax(object_model(overlays), dim=1)
y_pred_scene = torch.argmax(scene_model(overlays), dim=1)
correctly_classified = ((y_pred_obj == object_labels) & (y_pred_scene == scene_labels))
object_model_attrs = object_method(overlays, object_labels)
scene_model_attrs = scene_method(overlays, scene_labels)
mask_sizes = torch.sum(masks.flatten(1), dim=1)
diffs = (object_model_attrs - scene_model_attrs) / mask_sizes
return diffs.cpu(), correctly_classified.cpu() | b44b0a958a79a1ad7a84de15817cdbc32160c13b | 3,657,193 |
from typing import Optional
def get_network_insights_access_scope_analysis(network_insights_access_scope_analysis_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInsightsAccessScopeAnalysisResult:
"""
Resource schema for AWS::EC2::NetworkInsightsAccessScopeAnalysis
"""
__args__ = dict()
__args__['networkInsightsAccessScopeAnalysisId'] = network_insights_access_scope_analysis_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:ec2:getNetworkInsightsAccessScopeAnalysis', __args__, opts=opts, typ=GetNetworkInsightsAccessScopeAnalysisResult).value
return AwaitableGetNetworkInsightsAccessScopeAnalysisResult(
analyzed_eni_count=__ret__.analyzed_eni_count,
end_date=__ret__.end_date,
findings_found=__ret__.findings_found,
network_insights_access_scope_analysis_arn=__ret__.network_insights_access_scope_analysis_arn,
network_insights_access_scope_analysis_id=__ret__.network_insights_access_scope_analysis_id,
start_date=__ret__.start_date,
status=__ret__.status,
status_message=__ret__.status_message,
tags=__ret__.tags) | cbd65230cf553b438f4a78ad34f6faa9eafb119f | 3,657,194 |
import torch
import logging
import os
def learnable_eval(
cfg: OmegaConf, classifier, encoder: ContrastiveModel, training_data_loader: DataLoader,
val_data_loader: DataLoader, top_k: int,
) -> tuple:
"""
:param cfg: Hydra's config instance.
:param classifier: Instance of classifier with learnable parameters.
:param encoder: feature extractor trained on self-supervised method.
:param training_data_loader: Training data loader for a downstream task.
:param val_data_loader: Validation data loader for a downstream task.
:param top_k: The number of top-k for evaluation.
:return: tuple of train acc, train top-k acc, train loss, val acc, val top-k acc, and val loss.
"""
local_rank = cfg["distributed"]["local_rank"]
epochs = cfg["experiment"]["epochs"]
normalized = cfg["experiment"]["normalize"]
cross_entropy_loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
params=classifier.parameters(),
lr=cfg["optimizer"]["lr"],
momentum=cfg["optimizer"]["momentum"],
nesterov=True,
weight_decay=cfg["optimizer"]["decay"]
)
cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=0.)
train_accuracies = []
train_top_k_accuracies = []
val_accuracies = []
val_top_k_accuracies = []
train_losses = []
val_losses = []
num_train = len(training_data_loader.dataset)
num_val = len(val_data_loader.dataset)
highest_val_acc = 0.
encoder.eval()
for epoch in range(1, epochs + 1):
classifier.train()
training_data_loader.sampler.set_epoch(epoch) # to shuffle dataset
for x, y in training_data_loader:
optimizer.zero_grad()
with torch.no_grad():
rep = encoder(x.to(local_rank))
if normalized:
rep = torch.nn.functional.normalize(rep, p=2, dim=1)
# t is not used
outputs = classifier(rep)
loss = cross_entropy_loss(outputs, y.to(local_rank))
loss.backward()
optimizer.step()
cos_lr_scheduler.step()
# train and val metrics
train_acc, train_top_k_acc, train_loss = calculate_accuracies_loss(
classifier, encoder, training_data_loader, local_rank, top_k=top_k, normalized=normalized
)
torch.distributed.barrier()
torch.distributed.reduce(train_acc, dst=0)
torch.distributed.reduce(train_top_k_acc, dst=0)
torch.distributed.reduce(train_loss, dst=0)
val_acc, val_top_k_acc, val_loss = calculate_accuracies_loss(
classifier, encoder, val_data_loader, local_rank, top_k=top_k, normalized=normalized
)
torch.distributed.barrier()
torch.distributed.reduce(val_acc, dst=0)
torch.distributed.reduce(val_top_k_acc, dst=0)
torch.distributed.reduce(val_loss, dst=0)
if local_rank == 0:
# NOTE: since drop=True, num_train is not approximate value
train_losses.append(train_loss.item() / num_train)
train_acc = train_acc.item() / num_train
train_accuracies.append(train_acc)
train_top_k_accuracies.append(train_top_k_acc.item() / num_train)
val_losses.append(val_loss.item() / num_val)
val_acc = val_acc.item() / num_val
val_accuracies.append(val_acc)
val_top_k_accuracies.append(val_top_k_acc.item() / num_val)
current_lr = optimizer.param_groups[0]["lr"]
current_progress = epoch / epochs
logging.info(f"Epoch:{epoch}/{epochs} progress:{current_progress:.2f}, train acc.:{train_acc * 100.:.1f} "
f"val acc.:{val_acc * 100.:.1f} lr:{current_lr:.4f}")
if highest_val_acc < val_acc and local_rank == 0:
# save best linear classifier on validation dataset
highest_val_acc = val_acc
# delete old checkpoint file
if "save_fname" in locals():
if os.path.exists(save_fname):
os.remove(save_fname)
save_fname = "epoch_{}-{}".format(epoch, cfg["experiment"]["output_model_name"])
torch.save(classifier.state_dict(), save_fname)
return train_accuracies, train_top_k_accuracies, train_losses, val_accuracies, val_top_k_accuracies, val_losses | 612639e490830d67cb333ba48954df8eddc83079 | 3,657,195 |
def wavenumber(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
"""Return the electromagnetic wavenumber-domain field.
Calculate the electromagnetic wavenumber-domain field due to infinitesimal
small electric or magnetic dipole source(s), measured by infinitesimal
small electric or magnetic dipole receiver(s); sources and receivers are
directed along the principal directions x, y, or z, and all sources are at
the same depth, as well as all receivers are at the same depth.
See Also
--------
dipole : Electromagnetic field due to an electromagnetic source (dipoles).
bipole : Electromagnetic field due to an electromagnetic source (bipoles).
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
The x- and y-coordinates only matter for the angle-dependent factor.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
freq : array_like
Frequencies f (Hz), used to calculate etaH/V and zetaH/V.
wavenumber : array
Wavenumbers lambda (1/m)
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
PJ0, PJ1 : array
Wavenumber-domain EM responses:
- PJ0: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order zero.
- PJ1: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order one.
Examples
--------
>>> import numpy as np
>>> from empymod.model import wavenumber
>>> src = [0, 0, 100]
>>> rec = [5000, 0, 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> freq = 1
>>> wavenrs = np.logspace(-3.7, -3.6, 10)
>>> PJ0, PJ1 = wavenumber(src, rec, depth, res, freq, wavenrs, verb=0)
>>> print(PJ0)
[ -1.02638329e-08 +4.91531529e-09j -1.05289724e-08 +5.04222413e-09j
-1.08009148e-08 +5.17238608e-09j -1.10798310e-08 +5.30588284e-09j
-1.13658957e-08 +5.44279805e-09j -1.16592877e-08 +5.58321732e-09j
-1.19601897e-08 +5.72722830e-09j -1.22687889e-08 +5.87492067e-09j
-1.25852765e-08 +6.02638626e-09j -1.29098481e-08 +6.18171904e-09j]
>>> print(PJ1)
[ 1.79483705e-10 -6.59235332e-10j 1.88672497e-10 -6.93749344e-10j
1.98325814e-10 -7.30068377e-10j 2.08466693e-10 -7.68286748e-10j
2.19119282e-10 -8.08503709e-10j 2.30308887e-10 -8.50823701e-10j
2.42062030e-10 -8.95356636e-10j 2.54406501e-10 -9.42218177e-10j
2.67371420e-10 -9.91530051e-10j 2.80987292e-10 -1.04342036e-09j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Check layer parameters (isfullspace not required)
modl = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV,
False, verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, _ = modl
# Check frequency => get etaH, etaV, zetaH, and zetaV
f = check_frequency(freq, res, aniso, epermH, epermV, mpermH, mpermV, verb)
freq, etaH, etaV, zetaH, zetaV = f
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get angle-dependent factor
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
# Get layer number in which src and rec reside (lsrc/lrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
# === 3. EM-FIELD CALCULATION ============
# If <ab> = 36 (or 63), field is zero
# In `bipole` and in `dipole`, this is taken care of in `fem`. Here we
# have to take care of it separately
if ab_calc in [36, ]:
PJ0 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
PJ1 = PJ0.copy()
else: # Regular calculation
# Calculate wavenumber response
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(wavenumber), ab_calc,
False, msrc, mrec, False)
# Collect output
PJ1 = factAng[:, np.newaxis]*PJ1
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Because of J2
# J2(kr) = 2/(kr)*J1(kr) - J0(kr)
PJ1 /= off[:, None]
PJ0 = PJ0 + factAng[:, np.newaxis]*PJ0b
# === 4. FINISHED ============
printstartfinish(verb, t0, 1)
return np.squeeze(PJ0), np.squeeze(PJ1) | c108f3343936a62b0d49a3807d2d25b4f3fc1eda | 3,657,196 |
def gumbel_softmax(logits, temperature, dtype=tf.float32, seed=0):
"""Gumbel Softmax Layer."""
log_alpha = tf.nn.log_softmax(logits)
eps = 1e-7
gumbel = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=0, maxval=1 - eps, dtype=dtype, seed=seed) +
eps))
prob = tf.nn.softmax((log_alpha + gumbel) / temperature)
return prob | 3889105f39e6f81c35e1a3ca94685b6e6d7e3f37 | 3,657,197 |
def divide(num1, num2=1):
"""
除法
:param num1: int
:param num2: int
:return: float
"""
# 增加判断操作,抛出自定义异常
if num2 == 0:
raise InvalidOpreation()
val = num1 / num2
return val | 6bcc9631ebba74a15f16f8da0a9dc7f76e372725 | 3,657,198 |
def convert2int(image):
""" Transfrom from float tensor ([-1.,1.]) to int image ([-1024,6500])
"""
return tf.image.convert_image_dtype((image + 1) * 2036 - 1000, tf.float32) | 1697e6bb6911e936e9ff4bbb0ab37ddfc8115340 | 3,657,199 |
Subsets and Splits