content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def fuzzy_lookup_item(name_or_id, lst):
"""Lookup an item by either name or id.
Looking up by id is exact match. Looking up by name is by containment, and
if the term is entirely lowercase then it's also case-insensitive.
Multiple matches will throw an exception, unless one of them was an exact
match.
"""
try:
idd = int(name_or_id)
for val in lst:
if val.id == idd:
return val
raise RuntimeError('Id %d not found!' % idd)
except ValueError:
insensitive = name_or_id.islower()
matches = []
for val in lst:
name = val.name or ''
if name_or_id == name:
return val
if insensitive:
name = name.lower()
if name_or_id in name:
matches.append(val)
if len(matches) == 1:
return matches[0]
if not matches:
raise RuntimeError(f'No name containing {name_or_id!r} found!') from None
raise RuntimeError(
f'Multiple matches for {name_or_id!r}: {[x.name for x in matches]}') from None | 5,352,800 |
def _make_output_dirs(root_output_dir, experiment_name):
"""Get directories for outputs. Create if not exist."""
tf.io.gfile.makedirs(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
tf.io.gfile.makedirs(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
tf.io.gfile.makedirs(results_dir)
summary_dir = os.path.join(root_output_dir, 'logdir', experiment_name)
tf.io.gfile.makedirs(summary_dir)
return checkpoint_dir, results_dir, summary_dir | 5,352,801 |
def fantasy_pros_ecr_scrape(league_dict=config.sean):
"""Scrape Fantasy Pros ECR given a league scoring format
:param league_dict: league dict in config.py used to determine whether to pull PPR/standard/half-ppr
"""
scoring = league_dict.get('scoring')
if scoring == 'ppr':
url = 'https://www.fantasypros.com/nfl/rankings/ppr-cheatsheets.php'
elif scoring == 'half-ppr':
url = 'https://www.fantasypros.com/nfl/rankings/half-point-ppr-cheatsheets.php'
else:
url = 'https://www.fantasypros.com/nfl/rankings/consensus-cheatsheets.php'
html = scrape_dynamic_javascript(url)
parsed_dict = parse_ecr_html(html)
return pd.DataFrame(parsed_dict) | 5,352,802 |
def lda(X, y, nr_components=2):
"""
Linear discrimindant analysis
:param X: Input vectors
:param y: Input classes
:param nr_components: Dimension of output co-ordinates
:return: Output co-ordinates
"""
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
return discriminant_analysis.LinearDiscriminantAnalysis(n_components=nr_components).fit_transform(X2, y) | 5,352,803 |
def _flatten_value_to_list(batch_values):
"""Converts an N-D dense or sparse batch to a 1-D list."""
# Ravel for flattening and tolist so that we go to native Python types
# for more efficient followup processing.
#
batch_value, = batch_values
return batch_value.ravel().tolist() | 5,352,804 |
async def test_option_flow_input_floor(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN, data={}, options=None)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 1}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: MIN_SCAN_INTERVAL,
CONF_WAKE_ON_START: DEFAULT_WAKE_ON_START,
} | 5,352,805 |
def sun_position(time):
"""
Computes the sun's position in longitude and colatitude at a given time
(mjd2000).
It is accurate for years 1901 through 2099, to within 0.006 deg.
Input shape is preserved.
Parameters
----------
time : ndarray, shape (...)
Time given as modified Julian date, i.e. with respect to the date 0h00
January 1, 2000 (mjd2000).
Returns
-------
theta : ndarray, shape (...)
Geographic colatitude of sun's position in degrees
:math:`[0^\\circ, 180^\\circ]`.
phi : ndarray, shape (...)
Geographic east longitude of sun's position in degrees
:math:`(-180^\\circ, 180^\\circ]`.
References
----------
Taken from `here <http://jsoc.stanford.edu/doc/keywords/Chris_Russel/
Geophysical%20Coordinate%20Transformations.htm#appendix2>`_
"""
rad = pi / 180
year = 2000 # reference year for mjd2000
assert np.all((year + time // 365.25) < 2099) \
and np.all((year - time // 365.25) > 1901), \
("Time must be between 1901 and 2099.")
frac_day = np.remainder(time, 1) # decimal fraction of a day
julian_date = 365 * (year-1900) + (year-1901)//4 + time + 0.5
t = julian_date/36525
v = np.remainder(279.696678 + 0.9856473354*julian_date, 360.)
g = np.remainder(358.475845 + 0.985600267*julian_date, 360.)
slong = v + (1.91946 - 0.004789*t)*np.sin(g*rad) + 0.020094*np.sin(2*g*rad)
obliq = (23.45229 - 0.0130125*t)
slp = (slong - 0.005686)
sind = np.sin(obliq*rad)*np.sin(slp*rad)
cosd = np.sqrt(1.-sind**2)
# sun's declination in radians
declination = np.arctan(sind/cosd)
# sun's right right ascension in radians (0, 2*pi)
right_ascension = pi - np.arctan2(sind/(cosd * np.tan(obliq*rad)),
-np.cos(slp*rad)/cosd)
# Greenwich mean siderial time in radians (0, 2*pi)
gmst = np.remainder(279.690983 + 0.9856473354*julian_date
+ 360.*frac_day + 180., 360.) * rad
theta = degrees(pi/2 - declination) # convert to colatitude
phi = center_azimuth(degrees(right_ascension - gmst))
return theta, phi | 5,352,806 |
def verify_shape_batch_pair(orig_sample: Tuple[torch.Tensor, torch.Tensor],
new_sample: Tuple[torch.Tensor, torch.Tensor], p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed batch of images."""
N, C, H_o, W_o = orig_sample[0].shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new_sample[0].shape == (N, C, H_t, W_t), f'Input shape mismatch: {new_sample[0].shape} != {(N, C, H_t, W_t)}'
assert new_sample[1].shape == (N, C, H_t,
W_t), f'Target shape mismatch: {new_sample[1].shape} != {(N, C, H_t, W_t)}' | 5,352,807 |
def register_config(app):
"""配置文件"""
"""
暂时兼容旧注册配置文件,后续废除。
"""
# 旧注册配置文件
# app.config.from_object(config_obj[app_conf()]) # 环境配置
# config_obj[app_conf()].init_app(app)
# 新注册配置文件
app.config.from_object(config_obj['config']) # 环境配置
app.logger.info(str(config_obj['config']))
# config_obj['config'].init_app(app) | 5,352,808 |
def get_assay_description(assay_id, summary=True, attempts=10):
""" Get the description of an assay in JSON format.
Parameters
----------
assay_id : int
The id of the bioassay.
summary : bool, optional
If true returns a summary of the description of the assay (default=True).
attempts : int, optional
number of times to try to download the data in case of failure
(default=10).
Returns
--------
dict
A dictionary containing the assay description.
"""
assay_url = base_url + "/assay/aid/{}".format(assay_id)
if summary:
description_url = assay_url + "/summary/JSON"
else:
description_url = assay_url + "/description/JSON"
data = _get_data(description_url, attempts)
return json.loads(data) | 5,352,809 |
def acos(expr):
"""
Arc cosine -- output in radians.
It is the same that :code:`arccos` moodle math function.
"""
return Expression('acos({0})'.format(str(expr))) | 5,352,810 |
def validate_api_declaration(api_declaration):
"""Validate an API Declaration (§5.2).
:param api_declaration: a dictionary respresentation of an API Declaration.
:returns: `None` in case of success, otherwise raises an exception.
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
:raises: :py:class:`jsonschema.exceptions.ValidationError`
"""
validate_json(api_declaration, 'schemas/v1.2/apiDeclaration.json')
model_ids = get_model_ids(api_declaration)
for api in api_declaration['apis']:
validate_api(api, model_ids)
for model_name, model in six.iteritems(api_declaration.get('models', {})):
validate_model(model, model_name, model_ids) | 5,352,811 |
def node_compat_sdk2203():
"""Replace old arm_logic_id system with tree variable system."""
for tree in bpy.data.node_groups:
if tree.bl_idname == 'ArmLogicTreeType':
# All tree variable nodes
tv_nodes: dict[str, list[arm.logicnode.arm_nodes.ArmLogicVariableNodeMixin]] = {}
# The type of the tree variable. If two types are found for
# a logic ID and one is dynamic, assume it's a getter node.
# Otherwise show a warning upon conflict, it was undefined
# behaviour before anyway.
tv_types: dict[str, str] = {}
# First pass: find all tree variable nodes and decide the
# variable type in case of conflicts
node: arm.logicnode.arm_nodes.ArmLogicTreeNode
for node in list(tree.nodes):
if node.arm_logic_id != '':
if not isinstance(node, arm.logicnode.arm_nodes.ArmLogicVariableNodeMixin):
arm.log.warn(
'While updating the file to the current SDK'
f' version, the node {node.name} in tree'
f' {tree.name} is no variable node but had'
' a logic ID. The logic ID was reset to'
' prevent undefined behaviour.'
)
node.arm_logic_id = ''
continue
if node.arm_logic_id in tv_nodes:
tv_nodes[node.arm_logic_id].append(node)
# Check for getter nodes and type conflicts
cur_type = tv_types[node.arm_logic_id]
if cur_type == 'LNDynamicNode':
tv_types[node.arm_logic_id] = node.bl_idname
elif cur_type != node.bl_idname and node.bl_idname != 'LNDynamicNode':
arm.log.warn(
'Found nodes of different types with the'
' same logic ID while updating the file'
' to the current SDK version (undefined'
' behaviour).\n'
f'\tConflicting types: {cur_type}, {node.bl_idname}\n'
f'\tLogic ID: {node.arm_logic_id}\n'
f'\tNew type for both nodes: {cur_type}'
)
else:
tv_nodes[node.arm_logic_id] = [node]
tv_types[node.arm_logic_id] = node.bl_idname
# Second pass: add the tree variable and convert all found
# tree var nodes to the correct type
for logic_id in tv_nodes.keys():
var_type = tv_types[logic_id]
var_item = ARM_PG_TreeVarListItem.create_new(tree, logic_id, var_type)
for node in tv_nodes[logic_id]:
if node.bl_idname != var_type:
newnode = tree.nodes.new(var_type)
arm.node_utils.copy_basic_node_props(from_node=node, to_node=newnode)
# Connect outputs as good as possible
for i in range(min(len(node.outputs), len(newnode.outputs))):
for out in node.outputs:
for link in out.links:
tree.links.new(newnode.outputs[i], link.to_socket)
tree.nodes.remove(node)
node = newnode
# Hide sockets
node.on_logic_id_change()
node.use_custom_color = True
node.color = var_item.color
arm.logicnode.arm_nodes.ArmLogicVariableNodeMixin.choose_new_master_node(tree, logic_id) | 5,352,812 |
def visualize_ranked_results(distmat, dataset, save_dir='log/ranked_results', topk=20):
"""
Visualize ranked results
Support both imgreid and vidreid
Args:
- distmat: distance matrix of shape (num_query, num_gallery).
- dataset: a 2-tuple containing (query, gallery), each contains a list of (img_path, pid, camid);
for imgreid, img_path is a string, while for vidreid, img_path is a tuple containing
a sequence of strings.
- save_dir: directory to save output images.
- topk: int, denoting top-k images in the rank list to be visualized.
"""
num_q, num_g = distmat.shape
print("Visualizing top-{} ranks".format(topk))
print("# query: {}\n# gallery {}".format(num_q, num_g))
print("Saving images to '{}'".format(save_dir))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
mkdir_if_missing(save_dir)
def _cp_img_to(src, dst, rank, prefix, gidx=None):
"""
- src: image path or tuple (for vidreid)
- dst: target directory
- rank: int, denoting ranked position, starting from 1
- prefix: string
"""
if isinstance(src, tuple) or isinstance(src, list):
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_pid_' + str(gidx) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
counter = 0
for q_idx in range(num_q):
qimg_path, qpid, qcamid = query[q_idx]
qdir = osp.join(save_dir, 'query' + str(q_idx + 1).zfill(5)) #osp.basename(qimg_path))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query', gidx=qpid)
if counter > 50:
break
rank_idx = 1
for g_idx in indices[q_idx,:]:
gimg_path, gpid, gcamid = gallery[g_idx]
invalid = (qpid == gpid) & (qcamid == gcamid)
if not invalid:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='_gallery', gidx=gpid)
rank_idx += 1
if rank_idx > topk:
break
counter += 1
print("Done")
draw_mosaic(save_dir) | 5,352,813 |
def test_scope_works(tmpdir):
"""
scripts which define variables in the global scope should
have access to them.
"""
SCRIPT = """
import os
from surgen import Procedure
class Scope(Procedure):
def operate(self):
print(os.path)
return __file__
""".strip()
cls = from_string(SCRIPT, "foo")
p = cls("foo", tmpdir.strpath)
assert p.operate() == "foo" | 5,352,814 |
def annotate_genes(gene_df, annotation_gtf, lookup_df=None):
"""
Add gene and variant annotations (e.g., gene_name, rs_id, etc.) to gene-level output
gene_df: output from map_cis()
annotation_gtf: gene annotation in GTF format
lookup_df: DataFrame with variant annotations, indexed by 'variant_id'
"""
gene_dict = {}
print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] Adding gene and variant annotations', flush=True)
print(' * parsing GTF', flush=True)
with open(annotation_gtf) as gtf:
for row in gtf:
row = row.strip().split('\t')
if row[0][0]=='#' or row[2]!='gene': continue
# get gene_id and gene_name from attributes
attr = dict([i.split() for i in row[8].replace('"','').split(';') if i!=''])
# gene_name, gene_chr, gene_start, gene_end, strand
gene_dict[attr['gene_id']] = [attr['gene_name'], row[0], row[3], row[4], row[6]]
print(' * annotating genes', flush=True)
if 'group_id' in gene_df:
gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df['group_id']],
columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],
index=gene_df.index)
else:
gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df.index],
columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'],
index=gene_df.index)
gene_df = pd.concat([gene_info, gene_df], axis=1)
assert np.all(gene_df.index==gene_info.index)
col_order = ['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand',
'num_var', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df', 'variant_id', 'tss_distance']
if lookup_df is not None:
print(' * adding variant annotations from lookup table', flush=True)
gene_df = gene_df.join(lookup_df, on='variant_id') # add variant information
col_order += list(lookup_df.columns)
col_order += ['ma_samples', 'ma_count', 'af', 'pval_nominal',
'slope', 'slope_se', 'pval_perm', 'pval_beta']
if 'group_id' in gene_df:
col_order += ['group_id', 'group_size']
col_order += ['qval', 'pval_nominal_threshold']
gene_df = gene_df[col_order]
print('done.', flush=True)
return gene_df | 5,352,815 |
def y_gate():
"""
Pauli y
"""
return torch.tensor([[0, -1j], [1j, 0]]) + 0j | 5,352,816 |
def extract_tarball(tarball, install_dir):
"""Extract tarball to a local path"""
if not tarball.path.is_file():
raise IOError(f"<info>{tarball.path}</info> is not a file!")
try:
with tarfile.open(tarball.path, "r:gz") as f_tarball:
extraction_dir = [
obj.name
for obj in f_tarball.getmembers()
if obj.isdir() and "/" not in obj.name
][0]
f_tarball.extractall(install_dir)
except tarfile.ReadError as exc:
raise IOError(f"<info>{tarball.path}</info> is not a valid tarball!") from exc
return install_dir / extraction_dir | 5,352,817 |
def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None):
"""Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective.
Parameters
----------
scale : [float, float, float]
The 3 scale factors in x-, y-, and z-direction.
shear : [float, float, float]
The 3 shear factors for x-y, x-z, and y-z axes.
angles : [float, float, float]
The rotation specified through the 3 Euler angles about static x, y, z axes.
translation : [float, float, float]
The 3 values of translation.
perspective : [float, float, float, float]
The 4 perspective entries of the matrix.
Returns
-------
list[list[float]]
The 4x4 matrix that combines the provided transformation components.
Examples
--------
>>> trans1 = [1, 2, 3]
>>> angle1 = [-2.142, 1.141, -0.142]
>>> scale1 = [0.123, 2, 0.5]
>>> M = compose_matrix(scale1, None, angle1, trans1, None)
>>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M)
>>> allclose(scale1, scale2)
True
>>> allclose(angle1, angle2)
True
>>> allclose(trans1, trans2)
True
"""
M = [[1. if i == j else 0. for i in range(4)] for j in range(4)]
if perspective is not None:
P = matrix_from_perspective_entries(perspective)
M = multiply_matrices(M, P)
if translation is not None:
T = matrix_from_translation(translation)
M = multiply_matrices(M, T)
if angles is not None:
R = matrix_from_euler_angles(angles, static=True, axes="xyz")
M = multiply_matrices(M, R)
if shear is not None:
H = matrix_from_shear_entries(shear)
M = multiply_matrices(M, H)
if scale is not None:
S = matrix_from_scale_factors(scale)
M = multiply_matrices(M, S)
for i in range(4):
for j in range(4):
M[i][j] /= M[3][3]
return M | 5,352,818 |
def start(event):
"""move to the start"""
global timestep
timestep = 0
plot_figure(varnames, var, timestep, False)
slider.set_val(var.time[timestep]) | 5,352,819 |
def validate_get_build_request(req):
"""Validates rpc_pb2.GetBuildRequest."""
if req.id:
if req.HasField('builder') or req.build_number:
_err('id is mutually exclusive with builder and build_number')
elif req.HasField('builder') and req.build_number:
validate_builder_id(req.builder)
else:
_err('id or (builder and build_number) are required') | 5,352,820 |
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) | 5,352,821 |
def EucDistIntegral(a, b, x):
"""[summary]
Calculate Integrated Euclidean distance.
Args:
a (float): a value
b (float): b value
x (float): x value
Returns:
val: Integration result
"""
asq = a * a
bsq = b * b
xsq = x * x
dn = (6 * (1 + asq)**(3 / 2))
cx = (a * b + x + asq * x) / \
sqrt((bsq + 2 * a * b * x + (1 + asq) * xsq)) / sqrt((1 + asq))
if abs(abs(cx) - 1) <= 1E-9 or np.isnan(cx):
c1 = x * b**2
else:
c1 = b**3 * arctanh(np.float(cx))
c2 = sqrt(bsq + 2 * a * b * x + (1 + asq) * xsq) * \
(2 * b * x + 2 * asq * b * x + a**3 * xsq + a * (bsq + xsq))
if x == 0:
c4 = 0
else:
c3 = abs(x) / (b + a * x + sqrt(xsq + (b + a * x)**2))
if np.isnan(c3) or np.isinf(c3):
if b == 0:
c3 = 1 / (sign(x) * a + sqrt(asq + 1))
else:
c3 = -2 * b / abs(x)
c4 = (1 + asq) * x**3 * log(c3)
return (c1 + sqrt(1 + asq) * (c2 - c4)) / dn | 5,352,822 |
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10 | 5,352,823 |
def play_again():
"""Re-Start Game.
Question if want to play again or not.
"""
again = input("Would you like to play again? (y/n)").lower()
if again == "y":
typing_game("\n\n\nExcellent! Restarting the game ...\n\n\n")
play_the_game()
elif again == "n":
typing_game("\n\n\nThank you for playing this game!"
"\nI hope you enjoy!\n"
"See you next time.\n\n\n")
clear_screen()
else:
play_again() | 5,352,824 |
def convert_addmm(g, op, block):
"""Operator converter for addmm."""
input_x = g.get_node(op.input("Input")[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
alpha = op.attr("Alpha")
beta = op.attr("Beta")
dtype = block.var(op.output("Out")[0]).dtype
dtype = str(dtype).strip().split(".")[1]
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _expr.const(alpha, dtype)
x *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _expr.const(beta, dtype)
input_x *= beta
transposed_y = _op.transpose(y, axes=[1, 0])
dense_out = _op.nn.dense(x, transposed_y)
out = dense_out + input_x
g.add_node(op.output("Out")[0], out) | 5,352,825 |
def store_trajectory_plot(graph, fname):
""" Store the resulting plot.
"""
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, fname)
graph.savefig(file_name) | 5,352,826 |
def send_to_hipchat(
message,
token=settings.HIPCHAT_API_TOKEN,
room=settings.HIPCHAT_ROOM_ID,
sender="Trello",
color="yellow",
notify=False): # noqa
"""
Send a message to HipChat.
Returns the status code of the request. Should be 200.
"""
payload = {
'auth_token': token,
'notify': notify,
'color': color,
'from': sender,
'room_id': room,
'message': message
}
return requests.post(HIPCHAT_API_URL, data=payload).status_code | 5,352,827 |
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
from cStringIO import StringIO
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
_method = defaults.pop('_method', 'both')
e = ctx.env.copy()
out = {}
if _method.lower() in ['both', 'post']:
a = {}
if e['REQUEST_METHOD'] == 'POST':
a = cgi.FieldStorage(fp = StringIO(data()), environ=e,
keep_blank_values=1)
a = dictify(a)
out = dictadd(out, a)
if _method.lower() in ['both', 'get']:
e['REQUEST_METHOD'] = 'GET'
a = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
out = dictadd(out, a)
try:
return storify(out, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration | 5,352,828 |
def systemctl_master(command='restart'):
""" Used to start, stop or restart the master process
"""
run_command_on_master('sudo systemctl {} dcos-mesos-master'.format(command)) | 5,352,829 |
def nodes_and_edges_valid(dev, num_nodes, node_names, rep):
"""Asserts that nodes in a device ``dev`` are properly initialized, when there
are ``num_nodes`` nodes expected, with names ``node_names``, using representation ``rep``."""
if not set(dev._nodes.keys()) == {"state"}:
return False
if not len(dev._nodes["state"]) == num_nodes:
return False
for idx in range(num_nodes):
if not dev._nodes["state"][idx].name == node_names[idx]:
return False
return edges_valid(dev, num_nodes=num_nodes, rep=rep) | 5,352,830 |
def is_android(builder_cfg):
"""Determine whether the given builder is an Android builder."""
return ('Android' in builder_cfg.get('extra_config', '') or
builder_cfg.get('os') == 'Android') | 5,352,831 |
def twoSum(self, numbers, target): # ! 这个方法可行
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
numbers_dict = {}
for idn, v in enumerate(numbers):
if target - v in numbers_dict:
return [numbers_dict[target - v] + 1, idn + 1]
numbers_dict[v] = idn | 5,352,832 |
def test_get_bound_pressure_height(pressure, bound, hgts, interp, expected):
"""Test getting bounds in layers with various parameter combinations."""
bounds = _get_bound_pressure_height(pressure, bound, heights=hgts, interpolate=interp)
assert_array_almost_equal(bounds[0], expected[0], 5)
assert_array_almost_equal(bounds[1], expected[1], 5) | 5,352,833 |
def not_after(cert):
"""
Gets the naive datetime of the certificates 'not_after' field.
This field denotes the last date in time which the given certificate
is valid.
:return: Datetime
"""
return cert.not_valid_after | 5,352,834 |
def checkout():
"""fetch from home directory on server"""
local('unison {source} {dest} -force {source} {uflags}'.format(
source = REMOTE + REPO,
dest = ROOT_PATH,
uflags = UFLAGS),
capture = False) | 5,352,835 |
def parse_time_string(time_str: str) -> datetime.time:
"""Parses a string recognizable by TIME_REGEXP into a datetime.time object. If
the string has an invalid format, a ValueError is raised."""
match = TIME_REGEXP.match(time_str)
if match is None:
raise ValueError("time string {} has an invalid format".format(repr(time_str)))
groups = match.groupdict()
return datetime.time(int(groups["h"]), int(groups["m"]), int(groups["s"] or 0)) | 5,352,836 |
def dig(start, outdir, depth=2, max_duration=360):
"""
Crawls YouTube for source material (as mp3s).
Args:
- start: the starting YouTube url
- outdir: directory to save download tracks to
- depth: how many levels of related vids to look through
- max_duration: only dl videos shorter than or equal to this in duration
"""
urls = [start]
candidates = [start]
# Dig
while depth:
candidates = sum((_get_related_video_urls(url) for url in candidates), [])
urls += candidates
depth -= 1
# Remove dupes
urls = set(urls)
print('Got {0} videos'.format(len(urls)))
# Kind of peculiar how this function has to work
def _filter(info):
if info['duration'] > max_duration:
return 'Too long'
return None
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'match_filter': _filter,
'outtmpl': os.path.join(outdir, '%(title)s-%(id)s.%(ext)s'),
}
with YoutubeDL(ydl_opts) as ydl:
ydl.download(urls) | 5,352,837 |
def get_roc_curve(y_true, y_score, title=None, with_plot=True):
"""
Plot the [Receiver Operating Characteristic][roc] curve of the given
true labels and confidence scores.
[roc]: http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score)
auc = np.trapz(tpr, fpr)
fig = None
if with_plot:
fig = vislab.results_viz.plot_curve_with_area(
fpr, tpr, auc, 'False Positive Rate', 'True Positive Rate', 'AUC')
ax = fig.get_axes()[0]
ax.plot([0, 1], [0, 1], 'k--')
if title is not None:
ax.set_title(title)
return fig, fpr, tpr, auc | 5,352,838 |
def read_err_songs():
""" read song data from xml file to a list of dictionaries """
songfile = open('/home/gabe/python/selfishmusic/errors.xml')
soup = BS.BeautifulSoup(songfile.read())
songsxml = soup.findAll('song')
songs = []
for song in songsxml:
sd = {}
sd['songnum'] = int(get_text(song.songnum))
sd['title'] = get_text(song.title)
sd['artist'] = get_text(song.artist)
date = get_text(song.date)
date = [x.strip(' ,') for x in date.split(' ')]
sd['date'] = datetime.date(month=MONTHS.index(date[0]) + 1,
day=int(date[1]),
year=int(date[2]))
sd['lyrics'] = get_text(song.lyrics)
sd['found_title'] = get_text(song.found_title)
sd['found_artist'] = get_text(song.found_artist)
songs.append(sd)
songfile.close()
return songs | 5,352,839 |
def test_dummy_user_service_current_user():
"""
Tests that get_current_user() works on a dummy user service.
"""
user = XBlockUser(full_name="tester")
user_service = SingleUserService(user)
current_user = user_service.get_current_user()
assert current_user == user
assert current_user.full_name == "tester"
# assert that emails is an Iterable but not a string
assert isinstance(current_user.emails, collections.Iterable)
assert not isinstance(current_user.emails, (str, bytes))
# assert that opt_attrs is a Mapping
assert isinstance(current_user.opt_attrs, collections.Mapping) | 5,352,840 |
def p_useStmt(t):
"""useStmt : R_USE ID"""
t[0] = instruction.useDataBase(t[2], t.slice[1].lineno, t.slice[1].lexpos)
repGrammar.append(t.slice) | 5,352,841 |
def word_value(word: str) -> int:
"""Returns the sum of the alphabetical positions of each letter in word."""
return (0 if word == '' else
word_value(word[:-1]) + alpha.letter_index_upper(word[-1])) | 5,352,842 |
def show_ads(args, api=None):
"""
Print list of all ads
"""
if not api:
api = kijiji_api.KijijiApi()
api.login(args.ssid)
all_ads = sorted(api.get_all_ads(),
key=lambda k: k[args.sort_key], reverse=args.sort_reverse)
print(" id ", "page", "views", " title")
[print("{ad_id:10} {rank:4} {views:5} '{title}'".format(
ad_id=ad['id'],
rank=ad['rank'],
views=ad['views'],
title=ad['title']
)) for ad in all_ads] | 5,352,843 |
def test_ec_number_retrieval(df_series, null_logger):
"""Test 'get_ec_numbers'."""
get_uniprot_proteins.get_ec_numbers(df_series, null_logger) | 5,352,844 |
def status():
"""
Returns json response of api status
Returns:
JSON: json object
"""
status = {
"status": "OK"
}
return jsonify(status) | 5,352,845 |
def celcius_to_farenheit(x):
"""calculate celcius to farenheit"""
farenheit = (9*x/5) + 32
return farenheit | 5,352,846 |
def CheckFlags(node_name, report_per_node, warnings, errors,
flags, warning_helper, error_helper):
"""Check the status flags in each node and bookkeep the results.
Args:
node_name: Short name of the node.
report_per_node: Structure to record warning/error messages per node.
Its type should be collections.defaultdict(list).
warnings: Structure to record nodes that raise each warning type.
Its type should be collections.defaultdict(list).
errors: Structure to record nodes that raise each error type.
Its type should be collections.defaultdict(list).
flags: The status flags to check against.
warning_helper: The EnumHelper for warnings.
error_helper: The EnumHelper for errors.
Returns:
True if there are any warnings/errors.
"""
any_warning_or_error = False
if warning_helper:
for warning_value in warning_helper.Values():
warning_name = warning_helper.ShortName(warning_value)
if avionics_util.CheckWarning(flags, warning_value):
report_per_node[node_name].append(('WARNING', warning_name))
warnings[warning_name].append(node_name)
any_warning_or_error = True
if error_helper:
for error_value in error_helper.Values():
error_name = error_helper.ShortName(error_value)
if avionics_util.CheckError(flags, error_value):
report_per_node[node_name].append(('ERROR', error_name))
errors[error_name].append(node_name)
any_warning_or_error = True
return any_warning_or_error | 5,352,847 |
def EnrollmentTransaction():
"""
:return:
"""
return b'\x20' | 5,352,848 |
def nonCommonWords(words):
"""
Calculate seven most frequent that arnt common
"""
print("\nSeven most frequent words, that arn't common")
with open("common-words.txt") as dFile:
res = list()
lst = dFile.readlines()
# lst = [re.sub('[^a-zA-Z]+', '', x) for x in lst]
lst = [str.rstrip(x) for x in lst]
lst = [x.lower() for x in lst]
for word in words:
if word[1] not in lst:
res.append(word)
res.sort(reverse=True)
for key, val in res[:7]:
print(str(val) +" : " + str(key))
#lst = list()
#for key, value in res:
# lst.append((value, key))
correctSpelledWords(res) | 5,352,849 |
def map_inheritance_and_composition(list_of_include_groups, use_old_discovery_mode):
"""
This function maps the relationships between the files which are related and fills the global
"includes" and "inheritance" lists with tuples of the form: (includer, included).
This function also populates the "classes" list.
:param list_of_include_groups: A list of lists, each of the form [file_name_A, file_name_B, file_name_C, etc.] where
file_name_B and file_name_C, etc. are all included BY file A.
:param use_old_discovery_mode: Whether or not to use the old way of discovering subsystems (heuristics). The new
way uses the directory structure to determine subsystems.
"""
print("Mapping relationships and identifying subsystems...")
for include_group in list_of_include_groups:
# For each include_group, determine the type of relationship it is
if len(include_group) > 1:
parent_name = include_group[0]
# Add this group's head to the list of classes already found
if {"name": parent_name} not in classes:
classes.append({"name": parent_name})
rest_of_layer = include_group[1:]
print("Mapping relationships for " + parent_name)
for item in rest_of_layer:
# If the item is not in the list of classes, add it
if {"name": item} not in classes:
classes.append({"name": item})
# Determine the type of relationship between the parent and this item
relationship = (parent_name, item)
if is_inheritance(parent_name, item) and not relationship in inheritance:
print(parent_name + " INHERITS from " + item)
inheritance.append(relationship)
elif relationship not in includes and not relationship in inheritance: # Don't include if already in inheritance
print(parent_name + " DEPENDS on " + item)
includes.append(relationship)
# At this point, the "classes" list is filled with all the files that this script has examined, and the
# "inheritance" and "includes" lists are also filled with the correct relationships. Use them to determine
# subsystems and provide them with the correct colors accordingly.
map_subsystems(use_old_discovery_mode) | 5,352,850 |
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1 | 5,352,851 |
def goodFits(bestfitloc='posteriorpdf.fits', Nfits=12, Ngood=5000,
cleanup=True, interactive=True, showOptical=False, threshold=1.2):
"""
Read posterior PDF and draw Nfits realizations from the final Ngood models
at random. Plot the model from each realization and compare to the data.
Also plot the residuals obtained after subtracting the model from the data
and compare to the data. By default: Nfits = 12, Ngood=5000.
Parameters
----------
threshold: float
in mJy, cleaning threshold
"""
import modifypdf
import numpy
# read the posterior PDFs
print("Found posterior PDF file: {:s}".format(bestfitloc))
fitresults = fits.getdata(bestfitloc)
fitresults = fitresults[-Ngood:]
fitresults = modifypdf.prune(fitresults)
# get keys
from astropy.table import Table
fitKeys = Table.read(bestfitloc).keys()
# select the random realizations model
Nunprune = len(fitresults)
realids = numpy.floor(numpy.random.uniform(0, Nunprune, Nfits))
for ifit in range(Nfits):
realid = numpy.int(realids[ifit])
fitresult = fitresults[realid]
tag = 'goodfit' + str(realid).zfill(4)
printFitParam(fitresult, fitKeys)
visualutil.plotFit(config, fitresult, threshold, tag=tag, showOptical=showOptical,
cleanup=cleanup, interactive=interactive) | 5,352,852 |
def create_loompy_raw_counting(hybridizations_infos,converted_positions,
hybridization,flt_rawcnt_config,hyb_dir,
processing_hyb,counting_gene_dirs):
"""
Function used to write the counting results in a loom file
Parameters:
-----------
hybridizations_infos: dict
Parsed information on the experiment.
converted_positions: dict
coords of the images for all hybridization.
hybridization: str
hybridization processed (ex. Hybridization2)
flt_rawcnt_config: dict
Parsed filtering a raw counting configuration file
(Filtering_raw_counting.config.yaml)
counting_gene_dirs: list
List of the directories containing the counts
"""
# Loompy matrix column attributes
pos_att = np.arange(0,len(converted_positions[hybridization].keys()))
# Create dictionaries to store the data that have one array for each image
# position
counting_dict = OrderedDict()
selected_peaks_dict = OrderedDict()
thr_array_dict = OrderedDict()
peaks_coords_dict = OrderedDict()
total_peaks_dict = OrderedDict()
selected_peaks_int_dict = OrderedDict()
gene_idx_pos = 0
gene_idx_list = []
gene_list = list(hybridizations_infos[hybridization].keys())
gene_list = [gene for gene in gene_list if gene not in flt_rawcnt_config['skip_genes_counting'] ]
gene_list = [gene for tag in flt_rawcnt_config['skip_tags_counting'] for gene in gene_list if tag not in gene]
# Create matrices for loompy layers
total_counts_mat = np.zeros([len(gene_list),len(converted_positions[hybridization].keys())])
selected_thr_mat = np.zeros([len(gene_list),len(converted_positions[hybridization].keys())])
calculated_thr_mat = np.zeros([len(gene_list),len(converted_positions[hybridization].keys())])
thr_idx_mat = np.zeros([len(gene_list),len(converted_positions[hybridization].keys())])
stringency_mat = np.zeros([len(gene_list),len(converted_positions[hybridization].keys())])
array_positions = np.sort(list(converted_positions[hybridization].keys()))
if gene_list:
for gene in gene_list:
gene_idx_list.append(gene+'_'+hybridization)
counting_gene_dir = [fdir for fdir in counting_gene_dirs if gene in fdir][0]
counting_files_list = glob.glob(counting_gene_dir+'*.pkl')
counting_dict[gene] =OrderedDict()
counting_dict[gene]['selected_peaks'] =OrderedDict()
counting_dict[gene]['thr_array'] =OrderedDict()
counting_dict[gene]['peaks_coords'] =OrderedDict()
counting_dict[gene]['total_peaks'] =OrderedDict()
counting_dict[gene]['selected_peaks_int'] =OrderedDict()
# Process the files according to sorted position
for pos in array_positions:
counting_file = [cf for cf in counting_files_list if 'pos_'+str(pos) in cf][0]
countings = pickle.load(open(counting_file,'rb'))
# pos = np.int(counting_file.split('/')[-1].split('_')[-1].split('.')[0])
# loompy layers construction
selected_thr_mat[gene_idx_pos,pos] = countings['selected_thr']
calculated_thr_mat[gene_idx_pos,pos] = countings['calculated_thr']
thr_idx_mat[gene_idx_pos,pos] = countings['thr_idx']
stringency_mat[gene_idx_pos,pos] = countings['stringency']
if isinstance(countings['selected_peaks'], list):
total_counts_mat[gene_idx_pos,pos] = len(countings['selected_peaks'])
else:
total_counts_mat[gene_idx_pos,pos] = 0
# Dictionaries construction
counting_dict[gene]['selected_peaks'][pos] = countings['selected_peaks']
counting_dict[gene]['thr_array'][pos] = countings['thr_array']
counting_dict[gene]['peaks_coords'][pos] = countings['peaks_coords']
counting_dict[gene]['total_peaks'][pos] = countings['total_peaks']
counting_dict[gene]['selected_peaks_int'][pos] = countings['selected_peaks_int']
gene_idx_pos +=1
# Create loompy file
loom_fname = hyb_dir+processing_hyb+'_raw_counting.loom'
loom_hdl = loompy.create(filename=loom_fname,matrix=total_counts_mat,row_attrs={'genes':np.array(gene_idx_list)},col_attrs={'image_pos':np.array(pos_att)})
loom_hdl.set_layer(name='selected_thr',matrix=selected_thr_mat)
loom_hdl.set_layer(name='calculated_thr',matrix=calculated_thr_mat)
loom_hdl.set_layer(name='thr_idx',matrix=thr_idx_mat)
loom_hdl.set_layer(name='stringency',matrix=stringency_mat)
loom_hdl.close()
# Add extra data to the loom file
with h5py.File(loom_fname,'r+') as loom_hdl:
counting_arrays_grp = loom_hdl.create_group('counting_arrays')
for gene in counting_dict.keys():
gene_grp = counting_arrays_grp.create_group(gene+'_'+hybridization)
selected_peaks_grp = gene_grp.create_group('selected_peaks')
thr_array_grp = gene_grp.create_group('thr_array')
peaks_coords_grp = gene_grp.create_group('peaks_coords')
total_peaks_grp = gene_grp.create_group('total_peaks')
selected_peaks_int_grp = gene_grp.create_group('selected_peaks_int')
for pos in array_positions:
selected_peaks_grp.create_dataset(str(pos), data=counting_dict[gene]['selected_peaks'][pos])
thr_array_grp.create_dataset(str(pos),data=counting_dict[gene]['thr_array'][pos])
total_peaks_grp.create_dataset(str(pos),data=counting_dict[gene]['total_peaks'][pos])
selected_peaks_int_grp.create_dataset(str(pos),data=counting_dict[gene]['selected_peaks_int'][pos])
pos_sbgrp = peaks_coords_grp.create_group(str(pos))
if isinstance(counting_dict[gene]['peaks_coords'][pos], list):
for idx,peaks in enumerate(counting_dict[gene]['peaks_coords'][pos]):
pos_sbgrp.create_dataset(str(idx),data=peaks)
else:
pos_sbgrp.create_dataset(str(idx),data=peaks) | 5,352,853 |
def create_signature(api_key, method, host, path, secret_key, get_params=None):
"""
创建签名
:param get_params: dict 使用GET方法时附带的额外参数(urlparams)
:return:
"""
sorted_params = [
("AccessKeyId", api_key),
("SignatureMethod", "HmacSHA256"),
("SignatureVersion", "2"),
("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"))
]
if get_params:
sorted_params.extend(list(get_params.items()))
sorted_params = list(sorted(sorted_params))
encode_params = urllib.parse.urlencode(sorted_params)
payload = [method, host, path, encode_params]
payload = "\n".join(payload)
payload = payload.encode(encoding="UTF8")
secret_key = secret_key.encode(encoding="UTF8")
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
params = dict(sorted_params)
params["Signature"] = signature.decode("UTF8")
return params | 5,352,854 |
def test_pi():
"""Check that PI has expected value on systemcd """
import math
assert math.pi == 3.141592653589793 | 5,352,855 |
def measure_fwhm(image, plot=True, printout=True):
"""
Find the 2D FWHM of a background/continuum subtracted cutout image of a target.
The target should be centered and cropped in the cutout.
Use lcbg.utils.cutout for cropping targets.
FWHM is estimated using the sigmas from a 2D gaussian fit of the target's flux.
The FWHM is returned as a tuple of the FWHM in the x and y directions.
Parameters
----------
image : array like
Input background/continuum subtracted cutout image.
printout : bool
Print out info.
plot : bool
To plot fit or not.
Returns
-------
tuple : array of floats
FWHM in x and y directions.
"""
# Find FWHM
# ----------
fitted_line = fit_gaussian2d(image)
# Find fitted center
x_mean, y_mean = [i.value for i in [fitted_line.x_mean, fitted_line.y_mean]]
# Estimate FWHM using gaussian_sigma_to_fwhm
x_fwhm = fitted_line.x_stddev * gaussian_sigma_to_fwhm
y_fwhm = fitted_line.y_stddev * gaussian_sigma_to_fwhm
# Find half max
hm = fitted_line(x_mean, y_mean) / 2.
# Find the mean of the x and y direction
mean_fwhm = np.mean([x_fwhm, y_fwhm])
mean_fwhm = int(np.round(mean_fwhm))
# Print info about fit and FWHM
# ------------------------------
if printout:
print("Image Max: {}".format(image.max()))
print("Amplitude: {}".format(fitted_line.amplitude.value))
print("Center: ({}, {})".format(x_mean, y_mean))
print("Sigma = ({}, {})".format(fitted_line.x_stddev.value,
fitted_line.y_stddev.value, ))
print("Mean FWHM: {} Pix ".format(mean_fwhm))
print("FWHM: (x={}, y={}) Pix ".format(x_fwhm, y_fwhm))
if plot:
fig, [ax0, ax1, ax2, ax3] = plot_fit(image, fitted_line)
# Make x and y grid to plot to
y_arange, x_arange = np.mgrid[:image.shape[0], :image.shape[1]]
# Plot input image with FWHM and center
# -------------------------------------
ax0.imshow(image, cmap='gray_r')
ax0.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM")
ax0.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--")
ax0.axhline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM")
ax0.axhline(y_mean + y_fwhm / 2, c='g', linestyle="--")
ax0.set_title("Center and FWHM Plot")
ax0.legend()
# Plot X fit
# ----------
ax2.axvline(x_mean, linestyle="-", label="Center")
ax2.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM")
ax2.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--")
ax2.axhline(hm, c="black", linestyle="--", label="Half Max")
ax2.legend()
# Plot Y fit
# ----------
ax3.axvline(y_mean, linestyle="-", label="Center")
ax3.axvline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM")
ax3.axvline(y_mean + y_fwhm / 2, c='g', linestyle="--")
ax3.axhline(hm, c="black", linestyle="--", label="Half Max")
ax3.legend()
plt.show()
return np.array([x_fwhm, y_fwhm]) | 5,352,856 |
def classFactory(iface): # pylint: disable=invalid-name
"""Load MappiaPublisher class from file MappiaPublisher.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .mappia_publisher import MappiaPublisherPlugin
return MappiaPublisherPlugin() | 5,352,857 |
def mongo_insert_canary(mongo, db_name, coll_name, doc):
""" Inserts a canary document with 'j' True. Returns 0 if successful. """
LOGGER.info("Inserting canary document %s to DB %s Collection %s", doc, db_name, coll_name)
coll = mongo[db_name][coll_name].with_options(
write_concern=pymongo.write_concern.WriteConcern(j=True))
res = coll.insert_one(doc)
return 0 if res.inserted_id else 1 | 5,352,858 |
def is_dict_specifier(value):
# type: (object) -> bool
""" Check if value is a supported dictionary.
Check if a parameter of the task decorator is a dictionary that specifies
at least Type (and therefore can include things like Prefix, see binary
decorator test for some examples).
:param value: Decorator value to check.
:return: True if value is a dictionary that specifies at least the Type of
the key.
"""
return isinstance(value, dict) and Type in value | 5,352,859 |
def logging(nb_buckets, hidden_sizes, denoising_rate):
"""
Print on console some information
"""
print("_"*30)
print("")
print('{:^30}'.format(" KFOLD %s"%nb_buckets))
print("_"*30)
print("{:^30}".format("Hidden_sizes: %s"%hidden_sizes))
print("{:^30}".format("Denoising rate: %s"%denoising_rate))
print("_"*30)
print("") | 5,352,860 |
def rm_path():
"""
Remove input directory options.input_path
:return: void
"""
shutil.rmtree(options.input_path) | 5,352,861 |
def parse_star_count(stars_str):
"""Parse strings like 40.3k and get the no. of stars as a number"""
stars_str = stars_str.strip()
return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str) | 5,352,862 |
def test_auto_loss_scaling_clip_final_loss_scale(w_dtype, loss_dtype_str,
loss_scaling):
"""Test whether the final loss scale is correctly clipped at 2^15, when the
weights are in fp16 or the loss (and final loss scale) are in fp16. Also
check whether the up/down scaling are matched.
To do this, a simple model is built as follows:
W = [1] -- MatMul ------ MSE --- Loss
X = [1] -' Y = [1] -'
Since the output of the model matches the labels, the loss, and consequently
the gradients are all 0, which will cause the final loss scale to be doubled
on every iteration. The test checks if the final loss scale is clipped if
the conditions above are met.
"""
# Test params
learning_rate = 0.1
bps = 20 # Bathes per step
should_clip = w_dtype == np.float16 or loss_dtype_str == 'FLOAT16'
loss_dtype = np.float16 if loss_dtype_str == 'FLOAT16' else np.float32
# Tensor names to anchor
fls = "finalLossScale"
ils = "finalInverseLossScale_0"
lsf = "lossScaleUpdateFactor"
builder = popart.Builder()
# Inputs, weights, labels.
x = builder.addInitializedInputTensor(np.array([1], dtype=w_dtype))
w = builder.addInitializedInputTensor(np.array([1], dtype=w_dtype))
y = builder.addInitializedInputTensor(np.array([1], dtype=w_dtype))
# Forward
y_pred = builder.aiOnnx.matmul([x, w])
# Loss
e = builder.aiOnnx.sub([y, y_pred]) # error
se = builder.aiOnnx.mul([e, e]) # squared error
se = builder.aiOnnx.cast([se], loss_dtype_str)
optimizer = popart.SGD({
"defaultLearningRate": (learning_rate, False),
"defaultMomentum": (0, True),
"lossScaling": (loss_scaling, False)
})
opts = popart.SessionOptions()
opts.automaticLossScalingSettings.enabled = True
opts.engineOptions = {"debug.nanOverflowException": "false"}
# Should run in Sim or Hw, because of fp16 not being accurate in IpuModel.
device = popart.DeviceManager().createSimDevice({
"numIPUs": 1,
"tilesPerIPU": 4
})
session = popart.TrainingSession(
fnModel=builder.getModelProto(),
deviceInfo=device,
dataFlow=popart.DataFlow(
bps, {
fls: popart.AnchorReturnType("All"),
ils: popart.AnchorReturnType("All"),
lsf: popart.AnchorReturnType("All"),
}),
loss=se,
optimizer=optimizer,
userOptions=opts,
)
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
session.run(popart.PyStepIO({}, anchors))
expected_lsf = np.geomspace(2**0, 2**(bps - 1), num=bps, dtype=loss_dtype)
if should_clip:
clip_at = np.array(2**15 / loss_scaling, dtype=loss_dtype)
expected_lsf = np.clip(expected_lsf, 0, clip_at)
expected_fls = expected_lsf * loss_scaling
expected_ils = (learning_rate / loss_scaling) / expected_lsf.astype(
np.float32)
assert np.allclose(anchors[lsf], expected_lsf)
assert np.allclose(anchors[fls], expected_fls)
assert np.allclose(anchors[ils], expected_ils) | 5,352,863 |
def test_propery_address_create_from_string():
"""
Property address should be able to create from a string
"""
address_full = Address.create_from_string(
'City, test street, Test region, Country')
address_short = Address.create_from_string('City, Country')
address_country = Address.create_from_string('Country')
address_empty = Address.create_from_string('')
assert address_full.country == 'Country'
assert address_full.region == 'Test region'
assert address_full.address == 'City, test street'
assert address_short.country == 'Country'
assert address_short.region is None
assert address_short.address == 'City'
assert address_country.country == 'Country'
assert address_country.region is None
assert address_country.address is None
assert address_empty is None | 5,352,864 |
def execute_reactivation_event(prng, cells):
"""
Randomly choose between reactivating a random quiescent cell
(i.e. make it a cycling cell) versus leaving it alone.
This function modifies the list of cells in place.
"""
Q_cells = [cell for cell in cells if cell.cell_cycle_state == 'quiescent']
random_Q_cell = prng.choice(Q_cells)
# mu = calculate_mu(random_Q_cell, C_cells, Q_cells)
mu = 0
if prng.uniform(0, 1) < float(mu):
random_Q_cell.cell_cycle_state = 'cycling' | 5,352,865 |
def is_libreoffice_sdk_available() -> bool:
""" do we have idlc somewhere (we suppose it is made available in current path var.) ? """
return shutil.which("idlc") is not None | 5,352,866 |
def set_metrics_file(filenames, metric_type):
"""Create metrics from data read from a file.
Args:
filenames (list of str):
Paths to files containing one json string per line (potentially base64
encoded)
metric_type (ts_mon.Metric): any class deriving from ts_mon.Metric.
For ex. ts_mon.GaugeMetric.
Returns:
metric (list of metric_type): the metric instances, filled.
"""
if not filenames:
return []
metrics = []
for filename in filenames:
with open(filename, 'r') as f:
lines = f.read()
# Skip blank lines because it helps humans.
lines = [line for line in lines.splitlines() if line.strip()]
metrics.extend(set_metrics(lines, metric_type))
return metrics | 5,352,867 |
def sortino_ratio_nb(returns, ann_factor, required_return_arr):
"""2-dim version of `sortino_ratio_1d_nb`.
`required_return_arr` should be an array of shape `returns.shape[1]`."""
result = np.empty(returns.shape[1], dtype=np.float_)
for col in range(returns.shape[1]):
result[col] = sortino_ratio_1d_nb(returns[:, col], ann_factor, required_return=required_return_arr[col])
return result | 5,352,868 |
def format_env_var(name: str, value: str) -> str:
"""
Formats environment variable value.
Formatter is chosen according to the kind of variable.
:param name: name of environment variable
:param value: value of environment variable
:return: string representation of value in appropriate format
"""
formatter = get_formatter(name)
new = str(value)
new = formatter(new)
new = escape(new)
new = re.sub("\n", "<br>", new)
return new | 5,352,869 |
def update_item(namespace, item_def):
"""
Update item to namespace
"""
db, namespace_name = namespace
log_operation.info(f"Update item: {item_def} to namespace {namespace_name}")
db.item_upsert(namespace_name, item_def) | 5,352,870 |
def evenly_divisible(n):
""" Idea:
- Find factors of numbers 1 to n. Use DP to cache results bottom up.
- Amongst all factors, we have to include max counts of prime factors.
- For example, in in 1 .. 10, 2 has to be included 3 times since 8 = 2 ^ 3
"""
max_counts = Counter()
for n in range(n, 1, -1):
factors = prime_factorize(n)
# Update max counts
for k, v in factors.iteritems():
max_counts[k] = max(max_counts[k], v)
res = 1
for k, v in max_counts.iteritems():
res *= k ** v
return res | 5,352,871 |
def test_adafactor_compile2():
""" test adafactor compile """
inputs = Tensor(np.ones([1, 64]).astype(np.float32))
label = Tensor(np.zeros([1, 10]).astype(np.float32))
net = Net()
net.set_train()
loss = nn.SoftmaxCrossEntropyWithLogits()
optimizer = AdaFactor(net.trainable_params(), learning_rate=None, weight_decay=0.9)
net_with_loss = WithLossCell(net, loss)
train_network = TrainOneStepCell(net_with_loss, optimizer)
_executor.compile(train_network, inputs, label) | 5,352,872 |
def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1,
epsscale=0.5):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step size decreases.
Parameters
----------
vals: array_like
A set of values, that are passed to a function, at which to calculate
the gradient of that function
func:
A function that takes in an array of values.
releps: float, array_like, 1e-3
The initial relative step size for calculating the derivative.
abseps: float, array_like, None
The initial absolute step size for calculating the derivative.
This overrides `releps` if set.
`releps` is set then that is used.
mineps: float, 1e-9
The minimum relative step size at which to stop iterations if no
convergence is achieved.
epsscale: float, 0.5
The factor by which releps if scaled in each iteration.
Returns
-------
grads: array_like
An array of gradients for each non-fixed value.
"""
grads = np.zeros(len(vals))
# maximum number of times the gradient can change sign
flipflopmax = 10.
# set steps
if abseps is None:
if isinstance(releps, float):
eps = np.abs(vals) * releps
eps[eps == 0.] = releps # if any values are zero set eps to releps
teps = releps * np.ones(len(vals))
elif isinstance(releps, (list, np.ndarray)):
if len(releps) != len(vals):
raise ValueError("Problem with input relative step sizes")
eps = np.multiply(np.abs(vals), releps)
eps[eps == 0.] = np.array(releps)[eps == 0.]
teps = releps
else:
raise RuntimeError("Relative step sizes are not a recognised type!")
else:
if isinstance(abseps, float):
eps = abseps * np.ones(len(vals))
elif isinstance(abseps, (list, np.ndarray)):
if len(abseps) != len(vals):
raise ValueError("Problem with input absolute step sizes")
eps = np.array(abseps)
else:
raise RuntimeError("Absolute step sizes are not a recognised type!")
teps = eps
# for each value in vals calculate the gradient
count = 0
for i in range(len(vals)):
# initial parameter diffs
leps = eps[i]
cureps = teps[i]
flipflop = 0
# get central finite difference
fvals = np.copy(vals)
bvals = np.copy(vals)
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiff = (func(fvals) - func(bvals)) / leps
while 1:
fvals[i] -= 0.5 * leps # remove old step
bvals[i] += 0.5 * leps
# change the difference by a factor of two
cureps *= epsscale
if cureps < mineps or flipflop > flipflopmax:
# if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
logger.warn("Derivative calculation did not converge: setting flat derivative.")
grads[count] = 0.
break
leps *= epsscale
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiffnew = (func(fvals) - func(bvals)) / leps
if cdiffnew == cdiff:
grads[count] = cdiff
break
# check whether previous diff and current diff are the same within reltol
rat = (cdiff / cdiffnew)
if np.isfinite(rat) and rat > 0.:
# gradient has not changed sign
if np.abs(1. - rat) < reltol:
grads[count] = cdiffnew
break
else:
cdiff = cdiffnew
continue
else:
cdiff = cdiffnew
flipflop += 1
continue
count += 1
return grads | 5,352,873 |
def do_open(user_input):
"""identical to io.open in PY3"""
try:
with open(user_input) as f:
return f.read()
except Exception:
return None | 5,352,874 |
def likely_solution(players):
""" Return tuples of cards with the
number of players who don't have them
"""
likely = likely_solution_nums(players)
return sorted([(ALLCARDS[n], ct) for n, ct in likely],
key=lambda tp: tp[1], reverse=True) | 5,352,875 |
def cns_extended_inp(mtf_infile, pdb_outfile):
"""
Create CNS iput script (.inp) to create extended PDB file
from molecular topology file (.mtf)
Parameters
----------
mtf_infile : str
Path to .mtf topology file
pdb_outfile : str
Path where extended .pdb file will be stored
Returns
-------
str:
Input script
"""
return _cns_render_template(
"generate_extended",
{
"mtf_infile": mtf_infile,
"pdb_outfile": pdb_outfile,
}
) | 5,352,876 |
def index(request, response_format='html'):
"""Sales index page"""
query = Q(status__hidden=False)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = _get_filter_query(request.GET)
else:
query = query & _get_filter_query(request.GET)
orders = Object.filter_by_request(
request, SaleOrder.objects.filter(query), mode="r")
filters = OrderFilterForm(request.user.profile, '', request.GET)
statuses = Object.filter_by_request(request, SaleStatus.objects, mode="r")
massform = MassActionForm(request.user.profile)
return render_to_response('sales/index',
{'orders': orders,
'filters': filters,
'statuses': statuses,
'massform': massform
},
context_instance=RequestContext(request), response_format=response_format) | 5,352,877 |
def getDroppableFilename(mime_data):
"""
Returns the filename of a file dropped into the canvas (if it was
accepted via @see isDroppableMimeType).
"""
if mime_data.hasUrls():
# Return the first locally existing file
for url in mime_data.urls():
fpath = url.toLocalFile()
if os.path.exists(fpath):
return fpath.strip()
if mime_data.hasText():
txt = mime_data.text()
if txt.startswith('file://'):
return txt[7:].strip()
raise ValueError('Unsupported QMimeData for dropped file!') | 5,352,878 |
def team_points_leaders(num_results=None, round_name=None):
"""Returns the team points leaders across all groups, as a dictionary profile__team__name
and points.
"""
size = team_normalize_size()
if size:
entries = score_mgr.team_points_leaders(round_name=round_name)
else:
entries = score_mgr.team_points_leaders(num_results=num_results, round_name=round_name)
if entries:
if size:
for entry in entries:
team = Team.objects.get(name=entry["profile__team__name"])
if team.size:
entry["points"] = int(entry["points"] * float(size / team.size))
# resort the entries after the normalization
entries = sorted(entries, key=lambda e: e["points"], reverse=True)
return entries[:num_results]
else:
return entries
else:
results = Team.objects.all().extra(
select={'profile__team__name': 'name', 'points': 0}).values(
'profile__team__name', 'points')
if num_results:
results = results[:num_results]
return results | 5,352,879 |
def with_color(text, color, bold=False):
"""
Return a ZSH color-formatted string.
Arguments
---------
text: str
text to be colored
color: str
ZSH color code
bold: bool
whether or not to make the text bold
Returns
-------
str
string with ZSH color-coded text
"""
color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]'
return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format(
color_fmt.format(color), text) | 5,352,880 |
def sample_test():
"""Return sample test json."""
return get_sample_json("test.json") | 5,352,881 |
def dataframe_from_inp(inp_path, section, additional_cols=None, quote_replace=' ', **kwargs):
"""
create a dataframe from a section of an INP file
:param inp_path:
:param section:
:param additional_cols:
:param skip_headers:
:param quote_replace:
:return:
"""
# format the section header for look up in headers OrderedDict
sect = remove_braces(section).upper()
# get list of all section headers in inp to use as section ending flags
headers = get_inp_sections_details(inp_path, include_brackets=False)
if sect not in headers:
warnings.warn(f'{sect} section not found in {inp_path}')
return pd.DataFrame()
# extract the string and read into a dataframe
start_string = format_inp_section_header(section)
end_strings = [format_inp_section_header(h) for h in headers.keys()]
s = extract_section_of_file(inp_path, start_string, end_strings, **kwargs)
# replace occurrences of double quotes ""
s = s.replace('""', quote_replace)
# and get the list of columns to use for parsing this section
# add any additional columns needed for special cases (build instructions)
additional_cols = [] if additional_cols is None else additional_cols
cols = headers[sect]['columns'] + additional_cols
if headers[sect]['columns'][0] == 'blob':
# return the whole row, without specific col headers
return pd.read_csv(StringIO(s), delim_whitespace=False)
else:
try:
df = pd.read_csv(StringIO(s), header=None, delim_whitespace=True,
skiprows=[0], index_col=0, names=cols)
except IndexError:
print(f'failed to parse {section} with cols: {cols}. head:\n{s[:500]}')
raise
return df | 5,352,882 |
def compositional_stratified_splitting(dataset, perc_train):
"""Given the dataset and the percentage of data you want to extract from it, method will
apply stratified sampling where X is the dataset and Y is are the category values for each datapoint.
In the case each structure contains 2 types of atoms, the category will
be constructed as such: number of atoms of type 1 + number of atoms of type 2 * 100.
Parameters
----------
dataset: [Data]
A list of Data objects representing a structure that has atoms.
subsample_percentage: float
Percentage of the dataset.
Returns
----------
[Data]
Subsample of the original dataset constructed using stratified sampling.
"""
dataset_categories = create_dataset_categories(dataset)
dataset, dataset_categories = duplicate_unique_data_samples(
dataset, dataset_categories
)
sss_train = sklearn.model_selection.StratifiedShuffleSplit(
n_splits=1, train_size=perc_train, random_state=0
)
trainset, val_test_set = generate_partition(sss_train, dataset, dataset_categories)
val_test_dataset_categories = create_dataset_categories(val_test_set)
val_test_set, val_test_dataset_categories = duplicate_unique_data_samples(
val_test_set, val_test_dataset_categories
)
sss_valtest = sklearn.model_selection.StratifiedShuffleSplit(
n_splits=1, train_size=0.5, random_state=0
)
valset, testset = generate_partition(
sss_valtest, val_test_set, val_test_dataset_categories
)
return trainset, valset, testset | 5,352,883 |
def is_inside_line_segment(x, y, x0, y0, x1, y1):
"""Return True if the (x, y) lies inside the line segment defined by
(x0, y0) and (x1, y1)."""
# Create two vectors.
v0 = np.array([ x0-x, y0-y ]).reshape((2,1))
v1 = np.array([ x1-x, y1-y ]).reshape((2,1))
# Inner product.
prod = v0.transpose().dot(v1)
if ( prod <= 0 ):
return True
else:
return False | 5,352,884 |
def transition_soil_carbon(area_final, carbon_final, depth_final,
transition_rate, year, area_initial,
carbon_initial, depth_initial):
"""This is the formula for calculating the transition of soil carbon
.. math:: (af * cf * df) - \
\\frac{1}{(1 + tr)^y} * \
[(af * cf * df) - \
(ai * ci * di)]
where
* :math:`af` is area_final
* :math:`cf` is carbon_final
* :math:`df` is depth_final
* :math:`tr` is transition_rate
* :math:`y` is year
* :math:`ai` is area_initial
* :math:`ci` is carbon_initial
* :math:`di` is depth_initial
Args:
area_final (float): The final area of the carbon
carbon_final (float): The final amount of carbon per volume
depth_final (float): The final depth of carbon
transition_rate (float): The rate at which the transition occurs
year (float): The amount of time in years overwhich the transition occurs
area_initial (float): The intial area of the carbon
carbon_initial (float): The iniital amount of carbon per volume
depth_initial (float): The initial depth of carbon
Returns:
float: Transition amount of soil carbon
"""
return (area_final * carbon_final * depth_final) - \
(1/((1 + transition_rate) ** year)) * \
((area_final * carbon_final * depth_final) - \
(area_initial * carbon_initial * depth_initial)) | 5,352,885 |
def make_global_config():
"""load & augment experiment configuration, then add it to global variables"""
parser = ArgumentParser(description='Evaluate TRE model.', formatter_class=ArgumentDefaultsHelpFormatter)
# parser.add_argument('--config_path', type=str, default="1d_gauss/20200501-0739_0")
parser.add_argument('--config_path', type=str, default="gaussians/20200713-1029_4")
# parser.add_argument('--config_path', type=str, default="mnist/20200504-1031_0")
parser.add_argument('--ais_id', type=int, default=0)
parser.add_argument('--eval_epoch_idx', type=str, default="best")
parser.add_argument('--do_estimate_log_par', type=int, default=0) # -1 == False, else True
parser.add_argument('--do_sample', type=int, default=-1) # -1 == False, else True
parser.add_argument('--ais_nuts_max_tree_depth', type=int, default=5) # -1 == False, else True
parser.add_argument('--do_assess_subbridges', type=int, default=-1) # -1 == False, else True
parser.add_argument('--do_assess_parameters', type=int, default=0) # -1 == False, else True
parser.add_argument('--sample_method', type=str, default="nuts")
parser.add_argument('--act_threshold_quantile', type=float, default=0.99)
# if we are only sampling (i.e. not computing partition function with AIS), then this is the number of sampling
# steps we use when performing annealed sampling. If None, then use the default value stored in config file.
parser.add_argument('--only_sample_total_n_steps', type=int, default=1000)
parser.add_argument('--only_sample_n_chains', type=int, default=-1)
# initial step size for annealed sampling
parser.add_argument('--ais_step_size_init', type=float, default=0.02)
parser.add_argument('--init_post_annealed_step_size', type=float, default=0.02)
# when doing annealed sampling with uncalibrated_langevin, we use an exponentially decreasing step size schedule.
# The final step size in this schedule is 10^-step_size_reduction_magnitude smaller than the initial step size.
parser.add_argument('--step_size_reduction_magnitude', type=float, default=2)
# After annealed sampling, we continue sampling from the entire model
parser.add_argument('--do_post_annealed_sample', type=int, default=0) # -1 == False, else True
parser.add_argument('--post_ais_n_samples_keep', type=int, default=20)
parser.add_argument('--post_ais_thinning_factor', type=int, default=0)
parser.add_argument('--post_ais_nuts_max_tree_depth', type=int, default=10)
parser.add_argument('--parallel_iterations', type=int, default=10)
parser.add_argument('--swap_memory', type=int, default=-1) # attempt to save gpu memory by using cpu when possible
parser.add_argument('--n_noise_samples_for_variational_losses', type=int, default=1000)
parser.add_argument('--frac', type=float, default=1.0)
parser.add_argument('--debug', type=int, default=-1)
args = parser.parse_args()
with open(project_root + "saved_models/{}/config.json".format(args.config_path)) as f:
config = json.load(f)
rename_save_dir(config)
if args.only_sample_n_chains == -1:
del args.only_sample_n_chains
config.update(vars(args))
config["do_estimate_log_par"] = False if args.do_estimate_log_par == -1 else True
config["do_sample"] = False if args.do_sample == -1 else True
config["do_post_annealed_sample"] = False if args.do_post_annealed_sample == -1 else True
config["do_assess_subbridges"] = False if args.do_assess_subbridges == -1 else True
config["do_assess_parameters"] = False if args.do_assess_parameters == -1 else True
config["swap_memory"] = False if args.swap_memory == -1 else True
config["debug"] = False if args.debug == -1 else True
if config["eval_epoch_idx"] == "final": # work out the final epoch number
metrics_save_dir = os.path.join(config["save_dir"], "model/every_x_epochs/")
epoch_nums = [x.split(".")[0] for x in os.listdir(metrics_save_dir) if "checkpoint" not in x]
config["eval_epoch_idx"] = str(max([int(x) for x in epoch_nums]))
if "data_dist_name" not in config: config["data_dist_name"] = None
save_config(config)
if config["debug"]:
config["do_assess_subbridges"] = True
config["do_assess_parameters"] = True
config["do_sample"] = False
config["do_estimate_log_par"] = True
config["do_post_annealed_sample"] = False
config["frac"] = 0.2
config["ais_n_chains"] = 10
config["ais_total_n_steps"] = 10
config["only_sample_n_chains"] = 10
config["only_sample_total_n_steps"] = 10
config["post_ais_n_samples_keep"] = 10
config["post_ais_thinning_factor"] = 5
config["n_noise_samples_for_variational_losses"] = 1000
globals().update(config)
return AttrDict(config) | 5,352,886 |
def dijkstra(graph, source):
"""Find the shortest path from the source node to every other node in the given graph"""
# Declare and initialize result, unvisited, and path
result = {i: sys.maxsize if i != source else 0 for i in graph.nodes} # placeholder, by default set distance to maxsize
path = dict()
unvisited = set(graph.nodes)
while unvisited: # As long as unvisited is non-empty
min_node = None
# Find the unvisited node having smallest known distance from the source node.
for node in unvisited:
if min_node is None: # base case
min_node = node
elif result[node] < result[min_node]:
min_node = node # switch the nodes, so start with source, then next lowest...
"""tried to be fancy"""
# d = {i[0][1]: i[1] for i in graph.distances.items() if i[0][0] == node}
# min_node = min(d, key=d.get)
# result[min_node] = d[min_node]
current_distance = result[min_node]
# For the current node, find all the unvisited neighbours.
# For this, you have calculate the distance of each unvisited neighbour.
# unvisited_neighbours = unvisited.intersection(graph.neighbours[min_node]) does not work, might not be a path between nodes
for neighbour in graph.neighbours[min_node]:
if neighbour in unvisited:
distance = current_distance + graph.distances[(min_node, neighbour)]
# If the calculated distance of the unvisited neighbour is less than the already known distance in result dictionary,
# update the shortest distance in the result dictionary.
if distance < result[neighbour]:
result[neighbour] = distance
path[neighbour] = min_node
# Remove the current node from the unvisited set.
unvisited.remove(min_node)
# should do an ASSERT to check no values in result dict equal sys.maxsize
return result | 5,352,887 |
def _snr_approx(array, source_xy, fwhm, centery, centerx):
"""
array - frame convolved with top hat kernel
"""
sourcex, sourcey = source_xy
rad = dist(centery, centerx, sourcey, sourcex)
ind_aper = draw.circle(sourcey, sourcex, fwhm/2.)
# noise : STDDEV in convolved array of 1px wide annulus (while
# masking the flux aperture) * correction of # of resolution elements
ind_ann = draw.circle_perimeter(int(centery), int(centerx), int(rad))
array2 = array.copy()
array2[ind_aper] = array[ind_ann].mean() # quick-n-dirty mask
n2 = (2*np.pi*rad)/fwhm - 1
noise = array2[ind_ann].std()*np.sqrt(1+(1/n2))
# signal : central px minus the mean of the pxs (masked) in 1px annulus
signal = array[sourcey, sourcex] - array2[ind_ann].mean()
snr = signal / noise
return sourcey, sourcex, snr | 5,352,888 |
def create_pipeline(training_set, validation_set, test_set):
"""
Create a pipeline for the training, validation and testing set
Parameters: training_set: Training data set
validation_set: Validation data set
test_set: Test data set
Returns: batch_size: Batch size
image_size: Image dimensions (width, height)
training_batches: Batches of training data set
validation_batches: Batches of validation data set
testing_batches: Batches of test data set
"""
# Define batch size and image size
batch_size = 64
image_size = 224
# Define function to convert images to appropriate format, resize to fit the input layer and normalize it
def format_image(image, label):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, [image_size, image_size])
image /= 255
return image, label
# Define batches, while modifying images according to above function as well as batch and prefetch them
training_batches = training_set.map(format_image).batch(batch_size).prefetch(1)
validation_batches = validation_set.map(format_image).batch(batch_size).prefetch(1)
testing_batches = test_set.map(format_image).batch(batch_size).prefetch(1)
return batch_size, image_size, training_batches, validation_batches, testing_batches | 5,352,889 |
def invalid_request() -> Tuple[Any, int]:
"""Invalid request API response."""
return jsonify({API.Response.KEY_INFO: API.Response.VAL_INVALID_REQUEST}), 400 | 5,352,890 |
def _add_hyperparameters(
ranges_path: Path, defaults_path: Path
) -> List[Dict[str, Any]]:
"""Returns a list of hyperparameters in a format that is compatible with the json
reader of the ConfigSpace API.
The list is created from two files: a hp_space file that defines the ranges of the
hyperparameters and an options file that defines the default values of the
hyperparameters. Both are in json format.
Parameters
----------
ranges_path: Path
Path to the hp_space file
defaults_path: Path
Path to the options file
Returns
-------
List
A list of hyperparameters
"""
# load the ranges of the hyperparameters as a dict
ranges_dict = load_data(ranges_path)
ranges_dict = flatten_dictionary(ranges_dict)
# load the default values of the hyperparameters as a dict
defaults_dict = load_data(defaults_path)
defaults_dict = flatten_dictionary(defaults_dict)
hyperparameter_list = _add_ranges(ranges_dict)
hyperparameter_list = _add_defaults(hyperparameter_list, defaults_dict)
return hyperparameter_list | 5,352,891 |
def softreset_magic(kernel, args):
"""Reset microcontroller. Similar to pressing the reset button.
Purges all variables and releases all devices (e.g. I2C, UART, ...).
Example:
a = 5
%softreset
print(a) # NameError: name 'a' isn't defined
"""
with kernel.device as repl:
if not args.quiet:
kernel.print("")
kernel.print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", 'red', 'on_cyan')
kernel.print("!!!!! softreset ... !!!!!", 'red', 'on_cyan')
kernel.print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", 'red', 'on_cyan', end="")
repl.softreset()
if not args.quiet:
kernel.print("\n") | 5,352,892 |
def setup(app):
"""Setup extension."""
app.add_domain(StuffDomain)
app.connect("builder-inited", generate_latex_preamble)
app.connect("config-inited", init_numfig_format)
app.add_css_file("stuff.css")
app.add_enumerable_node(
StuffNode,
"stuff",
html=(html_visit_stuff_node, html_depart_stuff_node),
singlehtml=(html_visit_stuff_node, html_depart_stuff_node),
latex=(latex_visit_stuff_node, latex_depart_stuff_node),
)
app.add_node(
nodes.caption,
override=True,
html=(html_visit_caption_node, html_depart_caption_node),
singlehtml=(html_visit_caption_node, html_depart_caption_node),
latex=(latex_visit_caption_node, latex_depart_caption_node),
)
app.add_node(
ContentNode,
html=(html_visit_content_node, html_depart_content_node),
singlehtml=(html_visit_content_node, html_depart_content_node),
latex=(latex_visit_content_node, latex_depart_content_node),
)
return {"version": __version__, "parallel_read_safe": True} | 5,352,893 |
def _save_finite_diffs(
file_name: str,
file_override: bool,
fin_diff_base_benchmarks: List[str],
fin_diff_shift_benchmarks: List[List[str]],
fin_diff_epsilon: float,
) -> None:
"""
Save finite differences between benchmarks into a HDF5 data file
Parameters
----------
file_name: str
HDF5 file name to save finite difference benchmarks into
file_override: bool
Whether to override HDF5 file contents or not
fin_diff_base_benchmarks: list
List of base benchmark names
fin_diff_shift_benchmarks: list
List of shift benchmark names, for each of the parameters
fin_diff_epsilon: float
Value representing the magnitude of the numerical uncertainty
Returns
-------
None
"""
fin_diff_base_benchmarks = _encode_strings(fin_diff_base_benchmarks)
fin_diff_shift_benchmarks = [_encode_strings(names) for names in fin_diff_shift_benchmarks]
# Append if file exists, otherwise create
with h5py.File(file_name, "a") as file:
if file_override:
with suppress(KeyError):
del file["finite_differences"]
file.create_dataset("finite_differences/base_benchmarks", data=fin_diff_base_benchmarks)
file.create_dataset("finite_differences/shifted_benchmarks", data=fin_diff_shift_benchmarks)
file.create_dataset("finite_differences/epsilon", data=fin_diff_epsilon) | 5,352,894 |
def call_extend_index_yaml() -> None:
"""Calls the extend_index_yaml.py script."""
print('\033[94m' + 'Extending index.yaml...' + '\033[0m')
extend_index_yaml.main() | 5,352,895 |
def has_checksum(path: Path, csum: str,
csum_fun: typing.Optional[Checksum] = None) -> bool:
"""
:return: True if the file at the path `path` has given checksum
"""
return get_checksum(path, csum_fun=csum_fun) == csum | 5,352,896 |
def test_void_transaction_with_param(single_transaction_purchase_invoice, auth_client):
"""Test if we can void a purchase invoice transaction by passing type into param."""
include = {'documentType': 'PurchaseInvoice'}
result = auth_client.void_transaction('DEFAULT', single_transaction_purchase_invoice, {'code':'DocVoided'}, include).json()
assert result['status'] == "Cancelled" | 5,352,897 |
def export_housing(filename_or_response):
"""
:param filename_or_response: can be a real filename or a django response
:return:
"""
def export(writer):
row = ['unit_id', 'address', 'apartment', 'city', 'state', 'zip_code']
writer.writerow(row)
for unit in Unit.objects.all():
code = unit.zip_code
row = [value(unit.unit_id),
unit.address, value(unit.apartment), code.city.name, code.state, code.zip_code]
print(row)
writer.writerow(row)
if isinstance(filename_or_response, str):
with open(filename_or_response, 'wb') as f:
csv.writer(f)
else:
w = csv.writer(filename_or_response, dialect='excel')
export(w) | 5,352,898 |
def remove_outliers(column):
"""
:param column: list of numbers
:return:
"""
if len(column) < 1:
return []
import numpy as np
clean_column = []
q1 = np.percentile(column, 25)
q3 = np.percentile(column, 75)
#k = 1.5
k = 2
# [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
lower_bound = q1 - k*(q3-q1)
upper_bound = q3 + k*(q3-q1)
for c in column:
if c >= lower_bound and c <= upper_bound:
clean_column.append(c)
return clean_column | 5,352,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.