content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
import subprocess
def run_bincapture(args: List[str]) -> bytes:
"""run is like "subprocess.run(args, capture_out=True, text=False)",
but with helpful settings and obeys "with capture_output(out)".
"""
if _capturing:
try:
return subprocess.run(args, check=True, capture_output=True).stdout
except subprocess.CalledProcessError as err:
raise Exception(f"{err.stderr.decode('UTF-8')}{err}") from err
else:
return subprocess.run(args, check=True, stdout=subprocess.PIPE).stdout | 61495c2be378f81fc9165b29a0c12c8d1b6c5d6c | 3,653,200 |
def _EnumValFromText(fdesc, enum_text_val, log):
"""Convert text version of enum to integer value.
Args:
fdesc: field descriptor containing the text -> int mapping.
enum_text_val: text to convert.
log: logger obj
Returns:
integer value of enum text.
"""
log.debug("converting enum val:" + enum_text_val)
log.debug("possible enum vals:" + str(fdesc.enum_type.values_by_name.keys()))
enum_val = fdesc.enum_type.values_by_name[enum_text_val.upper()].number
log.debug("done enum vals")
return enum_val | af923a2cf65a81914ebeab85de779f1502a2d943 | 3,653,201 |
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
# print('correct shape:', correct.shape)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
if len(res) == 1:
return res[0]
else:
return (res[0], res[1], correct[0], pred[0]) | a5b2c3d97c839e0ae9954ce48889d5b46966b3cb | 3,653,202 |
def yyyydoy2jd(year,doy,hh=0,mm=0,ss=0.0):
"""
yyyydoy2jd Take a year, day-of-year, etc and convert it into a julian day
Usage: jd = yyyydoy2jd(year,doy,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24) (not required)
mm - 2 digit, or less int,(0 <= ss < 60) (not required)
ss - float (not required)
Output: 'jd' (float)
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
#
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mn = dto.month
dy = dto.day
jd = cal2jd(int(year),int(mn),int(dy))
jd = jd + float(hh)/24. + float(mm)/60./24. + float(sec)/3600./24.
return jd - 2400000.5 | 7e0579197146435d4c3e5031de962b758555846f | 3,653,203 |
def lon2index(lon, coords, corr=True):
"""convert longitude to index for OpenDAP request"""
if corr:
if lon < 0:
lon += 360
lons = coords.lon.values
return np.argmin(np.abs(lons - lon)) | 3fd3571ab221533708c32c9e28293a90ee9f30cd | 3,653,204 |
def get_dynamic_call_address(ea):
"""Find all dynamic calls e.g call eax"""
dism_addr_list = list(FuncItems(ea))
return [addr for addr in dism_addr_list if print_insn_mnem(addr) == 'call' and get_operand_type(addr, 0)==1] | 1f4d0eb3bcfdf0728d12efdfd151246f0497c8dd | 3,653,205 |
def iwbo_nats(model, x, k, kbs=None):
"""Compute the IWBO in nats."""
if kbs: return - iwbo_batched(model, x, k, kbs).mean()
else: return - iwbo(model, x, k).mean() | 5620e60710e6c25804d66f4c668f4670e033fdbe | 3,653,206 |
def ko_json(queryset, field_names=None, name=None, safe=False):
"""
Given a QuerySet, return just the serialized representation
based on the knockout_fields. Useful for middleware/APIs.
Convenience method around ko_data.
"""
return ko_data(queryset, field_names, name, safe, return_json=True) | 25d3b433ffec6eb4e6bb8c0d39a9080692dee4f2 | 3,653,207 |
def map(video_features_path, audio_hypothesis, file_uri, ier=False):
"""Maps outputs of pyannote.audio and pyannote.video models
Parameters:
-----------
video_features_path: str
Path to the video features (.npy) file as defined in pyannote.video
audio_hypothesis: Annotation
hypothesis made by the audio model
file_uri: str
uri of the file you're interested in (used to filter out audio_hypothesis)
ier: bool
If True, the mapping will be done using `optimal_mapping_ier`
which may map the same label to several clusters in order to minimize IER
If False (default), pyannote.metrics `optimal_mapping` will be used.
"""
clustering = FaceClustering()
#TODO : move the preprocess (i.e. npy to pyannote) to some other place ?
face_id, _ = clustering.model.preprocess(video_features_path,CLUSTERING_THRESHOLD)
if ier:
optimal_mapping=optimal_mapping_ier(face_id, audio_hypothesis)
else:
der=DiarizationErrorRate()
optimal_mapping=der.optimal_mapping(face_id, audio_hypothesis)
mapped_hypothesis=audio_hypothesis.rename_labels(mapping=optimal_mapping)
return mapped_hypothesis, face_id | 1dbccac4e27378d92f03503e6e672937bde958da | 3,653,208 |
def delete_demo(guid):
"""
Delete a demo object and all its children.
:param guid: The demo's guid
:return:
"""
web_utils.check_null_input((guid, 'demo to delete'))
demo_service.delete_demo_by_guid(guid)
return '', 204 | eb0a205e4279003a99159b2aeb4b8caefd47c2be | 3,653,209 |
def return_json():
"""
Sample function that has been given a different name
"""
print("Tooler should render out the JSON value returned")
return {"one": 1, "deep": {"structure": ["example"]}} | bf28fab61cabfc3a4f30736e58490d5df6702dc2 | 3,653,210 |
def get(url) -> str:
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: str
:returns:
UTF-8 encoded string of response
"""
return _execute_request(url).read().decode("utf-8") | 2f0b6ed542f75f83478f672ef1f39f192dddbf66 | 3,653,211 |
import logging
import random
def brute_force(durs, labels, labelset, train_dur, val_dur, test_dur, max_iter=5000):
"""finds indices that split (labels, durations) tuples into training,
test, and validation sets of specified durations, with the set of unique labels
in each dataset equal to the specified labelset.
The durations of the datasets created using the returned indices will be
*greater than* or equal to the durations specified.
Must specify a positive value for one of {train_dur, test_dur}.
The other value can be specified as '-1' which is interpreted as
"use the remainder of the dataset for this split,
after finding indices for the set with a specified duration".
Parameters
----------
durs : list
of durations of vocalizations
labels : list
of labels from vocalizations
labelset : set
of labels
train_dur : int, float
Target duration for training set, in seconds.
val_dur : int, float
Target duration for validation set, in seconds.
test_dur : int, float
Target duration for test set, in seconds.
max_iter : int
maximum number of iterations to attempt to find indices. Default is 5000.
Returns
-------
train_inds, val_inds, test_inds : list
of int, the indices that will split datasets
Notes
-----
A 'brute force' algorithm that just randomly assigns indices to a set,
and iterates until it finds some partition where each set has instances of all classes of label.
Starts by ensuring that each label is represented in each set and then adds files to reach the required
durations.
"""
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
sum_durs = sum(durs)
train_dur, val_dur, test_dur = validate_split_durations(
train_dur, val_dur, test_dur, sum_durs
)
target_split_durs = dict(
zip(("train", "val", "test"), (train_dur, val_dur, test_dur))
)
if not len(durs) == len(labels):
raise ValueError(
"length of list of durations did not equal length of list of labels; "
"should be same length since "
"each duration of a vocalization corresponds to the labels from its annotations.\n"
f"Length of durations: {len(durs)}. Length of labels: {len(labels)}"
)
iter = 1
all_labels_err = (
"Did not successfully divide data into training, "
"validation, and test sets of sufficient duration "
f"after {max_iter} iterations. "
"Try increasing the total size of the data set."
)
# ---- outer loop that repeats until we successfully split our reach max number of iters ---------------------------
while 1:
# list of indices we use to index into both `durs` and `labels`
durs_labels_inds = list(
range(len(labels))
) # we checked len(labels) == len(durs) above
# when making `split_inds`, "initialize" the dict with all split names, by using target_split_durs
# so we don't get an error when indexing into dict in return statement below
split_inds = {split_name: [] for split_name in target_split_durs.keys()}
total_split_durs = {split_name: 0 for split_name in target_split_durs.keys()}
split_labelsets = {split_name: set() for split_name in target_split_durs.keys()}
# list of split 'choices' we use when randomly adding indices to splits
choice = []
for split_name in target_split_durs.keys():
if target_split_durs[split_name] > 0 or target_split_durs[split_name] == -1:
choice.append(split_name)
# ---- make sure each split has at least one instance of each label --------------------------------------------
for label_from_labelset in sorted(labelset):
label_inds = [
ind for ind in durs_labels_inds if label_from_labelset in labels[ind]
]
random.shuffle(label_inds)
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
or target_split_durs[split_name] == -1
) and label_from_labelset not in split_labelsets[split_name]:
try:
ind = label_inds.pop()
split_inds[split_name].append(ind)
total_split_durs[split_name] += durs[ind]
split_labelsets[split_name] = split_labelsets[split_name].union(
set(labels[ind])
)
durs_labels_inds.remove(ind)
except IndexError:
if len(label_inds) == 0:
logger.debug(
"Ran out of elements while dividing dataset into subsets of specified durations."
f"Iteration {iter}"
)
iter += 1
break # do next iteration
else:
# something else happened, re-raise error
raise
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
and total_split_durs[split_name] >= target_split_durs[split_name]
):
choice.remove(split_name)
if len(choice) == 0:
finished = True
else:
finished = False
# ---- inner loop that actually does split ---------------------------------------------------------------------
random.shuffle(durs_labels_inds)
while finished is False:
# pop durations off list and append to randomly-chosen
# list, either train, val, or test set.
# Do this until the total duration for each data set is equal
# to or greater than the target duration for each set.
try:
ind = durs_labels_inds.pop()
except IndexError:
if len(durs_labels_inds) == 0:
logger.debug(
"Ran out of elements while dividing dataset into subsets of specified durations."
f"Iteration {iter}"
)
iter += 1
break # do next iteration
else:
# something else happened, re-raise error
raise
which_set = random.randint(0, len(choice) - 1)
split_name = choice[which_set]
split_inds[split_name].append(ind)
total_split_durs[split_name] += durs[ind]
if (
target_split_durs[split_name] > 0
and total_split_durs[split_name] >= target_split_durs[split_name]
):
choice.remove(split_name)
elif target_split_durs[split_name] == -1:
# if this split is -1 and other split is already "finished"
if (split_name == "test" and "train" not in choice) or (
split_name == "train" and "test" not in choice
):
# just add all remaining inds to this split
split_inds[split_name].extend(durs_labels_inds)
choice.remove(split_name)
if len(choice) < 1: # list is empty, we popped off all the choices
for split_name in target_split_durs.keys():
if target_split_durs[split_name] > 0:
if total_split_durs[split_name] < target_split_durs[split_name]:
raise ValueError(
"Loop to find splits completed, "
f"but total duration of '{split_name}' split, "
f"{total_split_durs[split_name]} seconds, "
f"is less than target duration specified: {target_split_durs[split_name]} seconds."
)
else:
finished = True
break
if iter > max_iter:
raise ValueError(
"Could not find subsets of sufficient duration in "
f"less than {max_iter} iterations."
)
# make sure that each split contains all unique labels in labelset
if finished is True:
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
or target_split_durs[split_name] == -1
):
split_labels = [
label for ind in split_inds[split_name] for label in labels[ind]
]
split_labelset = set(split_labels)
if split_labelset != set(labelset):
iter += 1
if iter > max_iter:
raise ValueError(all_labels_err)
else:
logger.debug(
f"Set of unique labels in '{split_name}' split did not equal specified labelset. "
f"Getting new '{split_name}' split. Iteration: {iter}"
)
continue
# successfully split
break
elif finished is False:
continue
split_inds = {
split_name: (inds if inds else None) for split_name, inds in split_inds.items()
}
return split_inds["train"], split_inds["val"], split_inds["test"] | 553463746d6c330c833189f070bd66b0c748f75a | 3,653,212 |
from typing import Optional
from typing import Dict
import os
def VOLUME(env: Optional[Dict] = None) -> Dict:
"""Get specification for the volume that is associated with the worker that
is used to execute the main algorithm step.
Parameters
----------
env: dict, default=None
Optional environment variables that override the system-wide
settings, default=None
Returns
-------
dict
"""
return read_config_obj(var=METANOME_VOLUME, env=env if env is not None else os.environ) | 2ae5d5e6b0f9eb2800498ce9734f0f45dc368be3 | 3,653,213 |
def train_step(model_optimizer, game_board_log, predicted_action_log,
action_result_log):
"""Run one training step."""
def loss_fn(model_params):
logits = PolicyGradient().apply({'params': model_params}, game_board_log)
loss = compute_loss(logits, predicted_action_log, action_result_log)
return loss
grad_fn = jax.grad(loss_fn)
grads = grad_fn(model_optimizer.target)
model_optimizer = model_optimizer.apply_gradient(grads)
return model_optimizer | 628742cb6d2fe19d25b5e283c7bec6f5189fc7b5 | 3,653,214 |
def make_static_rnn_with_control_flow_v2_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batches": [4],
"time_step_size": [4],
"input_vec_size": [3],
"num_cells": [4],
"use_sequence_length": [True, False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in range(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batches, input_vec_size])
inputs_after_split.append(one_timestamp_input)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_cells, activation=tf.nn.relu, state_is_tuple=True)
sequence_length = None
if parameters["use_sequence_length"]:
# Using different sequence length in each bach, like [1, 2, 3, 3...].
sequence_length = [
min(i + 1, time_step_size) for i in range(num_batches)
]
cell_outputs, _ = rnn.static_rnn(
lstm_cell,
inputs_after_split,
dtype=tf.float32,
sequence_length=sequence_length)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(parameters["dtype"],
[kernel.shape[0], kernel.shape[1]], -1,
1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in range(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batches, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True) | aa29c5eddab46624c36be29ee9ce1e6a83efbd7a | 3,653,215 |
def jaccard(structured_phrases, phrases_to_score, partial=False, status_callback=None, status_increment=None, pmd_class=PartialMatchDict):
""" calculate jaccard similarity between phrases_to_score, using
structured_phrases to determine cooccurrences. For phrases `a' and `b', let
A be the set of documents `a' appeared in, and B be the set of documents
`b' appeared in. Then the Jaccard similarity of `a' and `b' is Similarity
value of two phrases is |A intersect B| / |A union B|.
Setting partial to true allows partial phrase matching: two phrases are the
same if they have any common subsequence of words. Very slow.
"""
# indicies will index into our union and intersection arrays
phrases = {}
if partial:
indices = pmd_class()
else:
indices = {}
for i, phrase in enumerate(phrases_to_score):
indices[phrase] = i
phrases[i] = phrase
N = len(phrases_to_score)
phrase_count = np.zeros(N)
if partial:
intersection = np.zeros((N, N), dtype=np.uint32)
else:
intersection = dok_matrix((N, N), dtype=np.uint32)
count = 0
if status_callback and not status_increment:
length = len(structured_phrases)
status_increment = length / 100
# take each document
for doc_phrases in structured_phrases:
if status_callback and status_increment > 0 and count % status_increment == 0:
try:
status_callback(status_format(float(count) / length))
except:
status_callback("%d processed" % count)
count += 1
# take all phrases within this document
for i in range(len(doc_phrases)):
np1 = tuple(doc_phrases[i])
if np1 in indices:
# this phrase is important enough to count
if partial:
matches1 = indices[np1]
else:
matches1 = set()
matches1.add(indices[np1])
for index1 in matches1:
phrase_count[index1] += 1
for k in range(i + 1, len(doc_phrases)):
np2 = tuple(doc_phrases[k])
if np2 in indices:
# this np is important enough to count
if partial:
matches2 = indices[np2]
else:
matches2 = set()
matches2.add(indices[np2])
for index1 in matches1:
for index2 in matches2:
if index2 != index1:
intersection[index1,index2] += 1
intersection[index2,index1] += 1
# use inclusion exclusion
if partial:
tiled_phrase_count = np.lib.stride_tricks.as_strided(phrase_count,
(N, phrase_count.size),
(0, phrase_count.itemsize))
union = tiled_phrase_count + tiled_phrase_count.T - intersection
jaccard = intersection / union
else:
jaccard = dok_matrix((N, N))
for coords, intersection_count in intersection.iteritems():
jaccard[coords] = intersection_count / (phrase_count[coords[0]] + phrase_count[coords[1]] - intersection_count)
jaccard = np.asarray(jaccard.todense())
return jaccard, phrases | c7af246028f59b2375974390f337063d740d2f53 | 3,653,216 |
import ast
def print_python(node: AST) -> str:
"""Takes an AST and produces a string containing a human-readable
Python expression that builds the AST node."""
return black.format_str(ast.dump(node), mode=black.FileMode()) | 06281c4622d2b13008c17763bb59f93dfc44527c | 3,653,217 |
def reg2deg(reg):
"""
Converts phase register values into degrees.
:param cycles: Re-formatted number of degrees
:type cycles: int
:return: Number of degrees
:rtype: float
"""
return reg*360/2**32 | c7dbd6119ad3bce9261fb3d78a369251ade2d8af | 3,653,218 |
from typing import Union
def flag_element(uid: int, reason: Union[key_duplicate, key_optimization, ReviewDeleteReasons], db_user: User,
is_argument: bool, ui_locales: str, extra_uid=None) -> dict:
"""
Flags an given argument based on the reason which was sent by the author. This argument will be enqueued
for a review process.
:param uid: Uid of the argument/statement, which should be flagged
:param reason: String which describes the reason
:param db_user: User
:param is_argument: Boolean
:param ui_locales: ui_locales
:param extra_uid: Uid of the argument/statement, which should be flagged
:return: success, info, error
"""
tn = Translator(ui_locales)
argument_uid = uid if is_argument else None
statement_uid = uid if not is_argument else None
# was this already flagged?
flag_status = QueueAdapter(db_user=db_user).element_in_queue(argument_uid=argument_uid,
statement_uid=statement_uid,
premisegroup_uid=None)
if flag_status:
LOG.debug("Already flagged by %s", flag_status)
if flag_status == FlaggedBy.user:
info = tn.get(_.alreadyFlaggedByYou)
else:
info = tn.get(_.alreadyFlaggedByOthers)
return {'success': '', 'info': info}
return __add_flag(reason, argument_uid, statement_uid, extra_uid, db_user, tn) | d36a36e4d4f106e6a072884da3588bad0642302b | 3,653,219 |
import os
def export_bioimageio_model(checkpoint, export_folder, input_data=None,
dependencies=None, name=None,
description=None, authors=None,
tags=None, license=None,
documentation=None, covers=None,
git_repo=None, cite=None,
input_optional_parameters=True,
model_postprocessing=None,
for_deepimagej=False, links=[],
maintainers=None, checkpoint_name="best",
config={}):
"""
"""
assert input_data is not None
# load trainer and model
trainer = get_trainer(checkpoint, name=checkpoint_name, device="cpu")
model, model_kwargs = _get_model(trainer, model_postprocessing)
# create the weights
os.makedirs(export_folder, exist_ok=True)
weight_path = _write_weights(model, export_folder)
# create the test input/output file and derive the tensor kwargs from the model and its kwargs
test_in_paths, test_out_paths = _write_data(input_data, model, trainer, export_folder)
tensor_kwargs = _get_tensor_kwargs(model, model_kwargs, test_in_paths, test_out_paths)
# create the model source file
source = _write_source(model, export_folder)
# create dependency file
_write_depedencies(export_folder, dependencies)
# get the additional kwargs
kwargs = _get_kwargs(trainer, name, description,
authors, tags,
license, documentation,
git_repo, cite,
maintainers,
export_folder, input_optional_parameters)
kwargs.update(tensor_kwargs)
preprocessing = _get_preprocessing(trainer)
# the apps to link with this model, by default ilastik
links.append("ilastik/ilastik")
kwargs.update({"links": links, "config": config})
zip_path = os.path.join(export_folder, f"{name}.zip")
# change the working directory to the export_folder to avoid issues with relative paths
cwd = os.getcwd()
os.chdir(export_folder)
try:
build_spec.build_model(
weight_uri=weight_path,
weight_type="pytorch_state_dict",
test_inputs=[f"./{os.path.split(test_in)[1]}" for test_in in test_in_paths],
test_outputs=[f"./{os.path.split(test_out)[1]}" for test_out in test_out_paths],
root=".",
output_path=f"{name}.zip",
dependencies="environment.yaml",
preprocessing=preprocessing,
architecture=source,
model_kwargs=model_kwargs,
add_deepimagej_config=for_deepimagej,
**kwargs
)
except Exception as e:
raise e
finally:
os.chdir(cwd)
# load and validate the model
rdf_path = os.path.join(export_folder, "rdf.yaml")
_extract_from_zip(zip_path, rdf_path, "rdf.yaml")
val_success = _validate_model(rdf_path)
if val_success:
print(f"The model was successfully exported to '{export_folder}'.")
else:
warn(f"Validation of the bioimageio model exported to '{export_folder}' has failed. " +
"You can use this model, but it will probably yield incorrect results.")
return val_success | 8dbbbe8fea06f98566c46e3eba810dbfcbcdec88 | 3,653,220 |
import pathlib
def load_config_at_path(path: Pathy) -> Dynaconf:
"""Load config at exact path
Args:
path: path to config file
Returns:
dict: config dict
"""
path = pathlib.Path(path)
if path.exists() and path.is_file():
options = DYNACONF_OPTIONS.copy()
options.update({
'root_path': str(path.parent),
'settings_file': str(path.name),
})
return Dynaconf(**options)
else:
raise ConfigurationError(
f'Couldn\'t find ballet.yml config file at {path!s}') | da5cc4b830ad3a50ec6713bb509d3db0862963bf | 3,653,221 |
def _build_target(action, original_target, plugin, context):
"""Augment dictionary of target attributes for policy engine.
This routine adds to the dictionary attributes belonging to the
"parent" resource of the targeted one.
"""
target = original_target.copy()
resource, _w = _get_resource_and_action(action)
hierarchy_info = attributes.RESOURCE_HIERARCHY_MAP.get(resource, None)
if hierarchy_info and plugin:
# use the 'singular' version of the resource name
parent_resource = hierarchy_info['parent'][:-1]
parent_id = hierarchy_info['identified_by']
f = getattr(plugin, 'get_%s' % parent_resource)
# f *must* exist, if not found it is better to let quantum explode
# Note: we do not use admin context
data = f(context, target[parent_id], fields=['tenant_id'])
target['%s_tenant_id' % parent_resource] = data['tenant_id']
return target | e3c62944d7083ee96ad510fff0807db50aed9602 | 3,653,222 |
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Configure Gammu state machine."""
device = entry.data[CONF_DEVICE]
config = {"Device": device, "Connection": "at"}
gateway = await create_sms_gateway(config, opp)
if not gateway:
return False
opp.data[DOMAIN][SMS_GATEWAY] = gateway
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | c0a14f2a92d06e814728ff0ceed05bff17acb66a | 3,653,223 |
def grep_response_body(regex_name, regex, owtf_transaction):
"""Grep response body
:param regex_name: Regex name
:type regex_name: `str`
:param regex: Regex
:type regex:
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
return grep(regex_name, regex, owtf_transaction.get_raw_response_body) | b5e9899675a63fe9ede9a9cf612b2004d52bb364 | 3,653,224 |
def link(f, search_range, pos_columns=None, t_column='frame', verbose=True, **kwargs):
"""
link(f, search_range, pos_columns=None, t_column='frame', memory=0,
predictor=None, adaptive_stop=None, adaptive_step=0.95,
neighbor_strategy=None, link_strategy=None, dist_func=None,
to_eucl=None)
Link a DataFrame of coordinates into trajectories.
Parameters
----------
f : DataFrame
The DataFrame must include any number of column(s) for position and a
column of frame numbers. By default, 'x' and 'y' are expected for
position, and 'frame' is expected for frame number. See below for
options to use custom column names.
search_range : float or tuple
the maximum distance features can move between frames,
optionally per dimension
pos_columns : list of str, optional
Default is ['y', 'x'], or ['z', 'y', 'x'] when 'z' is present in f
t_column : str, optional
Default is 'frame'
memory : integer, optional
the maximum number of frames during which a feature can vanish,
then reappear nearby, and be considered the same particle. 0 by default.
predictor : function, optional
Improve performance by guessing where a particle will be in
the next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features. Default 'KDTree'.
link_strategy : {'recursive', 'nonrecursive', 'numba', 'hybrid', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses hybrid (numba+recursive) if available
'drop' causes particles in subnetworks to go unlinked
dist_func : function, optional
a custom distance function that takes two 1D arrays of coordinates and
returns a float. Must be used with the 'BTree' neighbor_strategy.
to_eucl : function, optional
function that transforms a N x ndim array of positions into coordinates
in Euclidean space. Useful for instance to link by Euclidean distance
starting from radial coordinates. If search_range is anisotropic, this
parameter cannot be used.
Returns
-------
DataFrame with added column 'particle' containing trajectory labels.
The t_column (by default: 'frame') will be coerced to integer.
See also
--------
link_iter
Notes
-----
This is an implementation of the Crocker-Grier linking algorithm.
[1]_
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
if pos_columns is None:
pos_columns = guess_pos_columns(f)
# copy the dataframe
f = f.copy()
# coerce t_column to integer type
if not np.issubdtype(f[t_column].dtype, np.integer):
f[t_column] = f[t_column].astype(np.integer)
# sort on the t_column
pandas_sort(f, t_column, inplace=True)
coords_iter = coords_from_df(f, pos_columns, t_column)
ids = []
for i, _ids in link_iter(coords_iter, search_range, verbose=verbose, **kwargs):
ids.extend(_ids)
f['particle'] = ids
return f | 425f7ffe9bcda4700bc77e74c2e956f27f22d521 | 3,653,225 |
def get_classifier(opt, input_dim):
"""
Return a tuple with the ML classifier to be used and its hyperparameter
options (in dict format)."""
if opt == 'RF':
ml_algo = RandomForestClassifier
hyperparams = {
'n_estimators': [100],
'max_depth': [None, 10, 30, 50, 100],
'min_samples_split': [2, 10, 50, 100],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GBDT':
ml_algo = LGBMClassifier
hyperparams = {
'boosting_type': ['gbdt'],
'n_estimators': [100],
'max_depth': [-1, 10, 30, 50, 100],
'num_leaves': [2, 3, 5, 10, 50],
'learning_rate': [0.001, 0.01, 0.1],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'LR':
ml_algo = LogisticRegression
hyperparams = {
'solver': ['newton-cg', 'lbfgs', 'saga'],
'C': [0.0001, 0.001, 0.01],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GNB':
ml_algo = GaussianNB
hyperparams = {
'var_smoothing': [10**-i for i in range(2, 15)],
}
elif opt == 'SVM':
ml_algo = SVC
hyperparams = {
'probability': [True],
'C': [0.01, 0.1, 1, 10],
'gamma': [0.001, 0.01, 0.1, 1],
}
elif opt == 'NN':
ml_algo = KerasClassifier(get_nn_model(input_dim), epochs=30, verbose=0)
hyperparams = {}
else:
raise ValueError(f'{opt} is an invalid classifier name.')
return ml_algo, hyperparams | a522cab05958023dd4239e4ec2b136d2510aec1b | 3,653,226 |
def list_spiders_endpoint():
"""It returns a list of spiders available in the SPIDER_SETTINGS dict
.. version 0.4.0:
endpoint returns the spidername and endpoint to run the spider from
"""
spiders = {}
for item in app.config['SPIDER_SETTINGS']:
spiders[item['endpoint']] = 'URL: ' + request.url_root + 'run-spider/' + item['endpoint']
return jsonify(endpoints=spiders) | 71e7448a621565b540c8ade1dae04d8ef88d5fd2 | 3,653,227 |
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax | 067219abba7f77f7c4fbb4404ff16a3f5192f7cd | 3,653,228 |
from typing import Optional
from typing import Dict
from typing import Any
def _get_slice_predictions(
model: ModelBridge,
param_name: str,
metric_name: str,
generator_runs_dict: TNullableGeneratorRunsDict = None,
relative: bool = False,
density: int = 50,
slice_values: Optional[Dict[str, Any]] = None,
fixed_features: Optional[ObservationFeatures] = None,
trial_index: Optional[int] = None,
) -> SlicePredictions:
"""Computes slice prediction configuration values for a single metric name.
Args:
model: ModelBridge that contains model for predictions
param_name: Name of parameter that will be sliced
metric_name: Name of metric to plot
generator_runs_dict: A dictionary {name: generator run} of generator runs
whose arms will be plotted, if they lie in the slice.
relative: Predictions relative to status quo
density: Number of points along slice to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the status quo values will
be used if there is a status quo, otherwise the mean of numeric
parameters or the mode of choice parameters. Ignored if
fixed_features is specified.
fixed_features: An ObservationFeatures object containing the values of
features (including non-parameter features like context) to be set
in the slice.
Returns: Configruation values for AxPlotConfig.
"""
if generator_runs_dict is None:
generator_runs_dict = {}
parameter = get_range_parameter(model, param_name)
grid = get_grid_for_parameter(parameter, density)
plot_data, raw_data, cond_name_to_parameters = get_plot_data(
model=model,
generator_runs_dict=generator_runs_dict,
metric_names={metric_name},
fixed_features=fixed_features,
)
if fixed_features is not None:
slice_values = fixed_features.parameters
else:
fixed_features = ObservationFeatures(parameters={})
fixed_values = get_fixed_values(model, slice_values, trial_index)
prediction_features = []
for x in grid:
predf = deepcopy(fixed_features)
predf.parameters = fixed_values.copy()
predf.parameters[param_name] = x
prediction_features.append(predf)
f, cov = model.predict(prediction_features)
f_plt = f[metric_name]
sd_plt = np.sqrt(cov[metric_name][metric_name])
# pyre-fixme[7]: Expected `Tuple[PlotData, List[Dict[str, Union[float, str]]],
# List[float], np.ndarray, np.ndarray, str, str, bool, Dict[str, Union[None, bool,
# float, int, str]], np.ndarray, bool]` but got `Tuple[PlotData, Dict[str,
# Dict[str, Union[None, bool, float, int, str]]], List[float], List[Dict[str,
# Union[float, str]]], np.ndarray, str, str, bool, Dict[str, Union[None, bool,
# float, int, str]], typing.Any, bool]`.
return (
plot_data,
cond_name_to_parameters,
f_plt,
raw_data,
grid,
metric_name,
param_name,
relative,
fixed_values,
sd_plt,
parameter.log_scale,
) | 2c985b824b298ddaa29b44574b8399ad07746997 | 3,653,229 |
import numpy
def ellipse(a, b, center=(0.0, 0.0), num=50):
"""Return the coordinates of an ellipse.
Parameters
----------
a : float
The semi-major axis of the ellipse.
b : float
The semi-minor axis of the ellipse.
center : 2-tuple of floats, optional
The position of the center of the ellipse;
default: (0.0, 0.0)
num : integer, optional
The number of points on the upper side of the ellipse.
The number includes the leading and trailing edges.
Thus, the total number of points will be 2 * (num - 1);
default: 50.
Returns
-------
x : numpy.ndarray
The x-coordinates of the ellipse as a 1D array of floats.
y: numpy.ndarray
The y-coordinates of the ellipse as a 1D array of floats.
"""
xc, yc = center
x_upper = numpy.linspace(xc + a, xc - a, num=num)
y_upper = b / a * numpy.sqrt(a**2 - x_upper**2)
x_lower = numpy.linspace(xc - a, xc + a, num=num)[1:-1]
y_lower = -b / a * numpy.sqrt(a**2 - x_lower**2)
x = numpy.concatenate((x_upper, x_lower))
y = numpy.concatenate((y_upper, y_lower))
return x, y | bd4d4663981a0431e40b20d38cc48a7f2476c13b | 3,653,230 |
from typing import List
import os
import shutil
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
"""Copies all the test files into the test project directory."""
# The files are assumed to reside in the repo's 'data' directory.
print(f'# Copying inputs (from "${{PWD}}/{_DATA_DIRECTORY}")...')
expected_prefix: str = f"{_DATA_DIRECTORY}/"
for test_input in test_inputs:
print(f"# + {test_input}")
if not test_input.startswith(expected_prefix):
print("! FAILURE")
print(f'! Input file {test_input} must start with "{expected_prefix}"')
return False
if not os.path.isfile(test_input):
print("! FAILURE")
print(f"! Missing input file {test_input} ({test_input})")
return False
# Looks OK, copy it
shutil.copy(test_input, project_path)
print("# Copied")
return True | b801c4a7f42b16b8e6428aaf0889df906d3692a2 | 3,653,231 |
import os
def pinghost(host):
"""
Ping target with a 1-second timeout limit
:param str host: Destination to reach. IP address or domain name
:returns: True if reached, otherwise False
"""
host = str(host).split(':')[0] # leave off the port if exists
# print "Pinging"
if os.name == 'posix':
target = "ping -W1 -c 1 " + host + " > /dev/null 2>&1 "
else:
target = "ping " + host + " -w 1000 -n 1 > nul 2>&1"
response = os.system(target)
# Note:original response is 1 for fail; 0 for success; so we flip it
return not response | f0e6d84edf1093580159d08359bfd61adeb3b987 | 3,653,232 |
from typing import List
def get_trade_factors(name: str,
mp: float,
allow_zero: bool,
long_open_values: List,
long_close_values: List,
short_open_values: List = None,
short_close_values: List = None) -> dict:
"""获取指定 name 下的交易因子
:param allow_zero: 是否使用基础型
:param name: 因子系统的名称
:param mp: 单个标的最大允许持仓,小于0表示仓位百分比,大于0表示手数
:param long_open_values: 开多因子值
:param long_close_values: 平多因子值
:param short_open_values: 开空因子值
:param short_close_values: 平空因子值
:return: 因子交易系统
example:
===================
>>> factors = get_trade_factors(name="日线笔结束", long_open_values=['BDE'], long_close_values=['BUE'])
"""
if not short_close_values:
short_close_values = []
if not short_open_values:
short_open_values = []
def __is_match(v, x):
if allow_zero:
if v in x.name:
return 1
else:
return 0
else:
if v in x.name and "0" not in x.name:
return 1
else:
return 0
long_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_open_values]) > 0]
long_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_close_values]) > 0]
short_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_open_values]) > 0]
short_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_close_values]) > 0]
factors_ = {
"name": name,
"version": factors_all[name].__name__,
"mp": mp,
"long_open_factors": long_open_factors,
"long_close_factors": long_close_factors,
"short_open_factors": short_open_factors,
"short_close_factors": short_close_factors,
}
return factors_ | 14a7a8c0968e85f996e9c1e8f473be142c66759b | 3,653,233 |
def mbstrlen(src):
"""Return the 'src' string (Multibytes ASCII string) length.
:param src: the source string
"""
try:
return len(src.decode("utf8", errors = "replace"))
except Exception, err:
LOG.error("String convert issue %s", err)
return len(src) | 8b2f64b2791eebf898d3bf8104d93d86dcdd53a3 | 3,653,234 |
def adapted_border_postprocessing(border_prediction, cell_prediction):
"""
:param border_prediction:
:param cell_prediction:
:return:
"""
prediction_border_bin = np.argmax(border_prediction, axis=-1)
cell_prediction = cell_prediction > 0.5
seeds = border_prediction[:, :, 1] * (1 - border_prediction[:, :, 2]) > 0.5 # Substract borders from cell seed
seeds = measure.label(seeds, background=0)
prediction_instance = watershed(image=cell_prediction,
markers=seeds,
mask=cell_prediction,
watershed_line=False,
)
prediction_instance = measure.label(prediction_instance, background=0)
colors = get_colors()
prediction_instance_rgb = label2rgb(prediction_instance, colors=colors, kind='overlay', bg_label=0)
prediction_instance = np.expand_dims(prediction_instance, axis=-1)
prediction_border_bin = np.expand_dims(prediction_border_bin, axis=-1)
return prediction_instance.astype(np.uint16), prediction_instance_rgb.astype(np.uint8), prediction_border_bin.astype(np.uint8) | 4e74c1a71fb5c5f90d54735fa3af241461b48ebb | 3,653,235 |
def calc_bonding_volume(rc_klab, dij_bar, rd_klab=None, reduction_ratio=0.25):
"""
Calculate the association site bonding volume matrix
Dimensions of (ncomp, ncomp, nbeads, nbeads, nsite, nsite)
Parameters
----------
rc_klab : numpy.ndarray
This matrix of cutoff distances for association sites for each site type in each group type
dij_bar : numpy.ndarray
Component averaged hard sphere diameter
rd_klab : numpy.ndarray, Optional, default=None
Position of association site in each group (nbead, nbead, nsite, nsite)
reduction_ratio : float, Optional, default=0.25
Reduced distance of the sites from the center of the sphere of interaction. This value is used when site position, rd_klab is None
Returns
-------
Kijklab : numpy.ndarray
Matrix of binding volumes
"""
ncomp = len(dij_bar)
nbead, _, nsite, _ = np.shape(rc_klab)
Kijklab = np.zeros((ncomp, ncomp, nbead, nbead, nsite, nsite))
for i in range(ncomp):
for j in range(ncomp):
for k in range(nbead):
for l in range(nbead):
for a in range(nsite):
for b in range(nsite):
if rc_klab[k, l, a, b] != 0:
if rd_klab == None:
rd = reduction_ratio * dij_bar[i, j]
else:
rd = rd_klab[k, l, a, b]
tmp0 = np.pi * dij_bar[i, j] ** 2 / (18 * rd ** 2)
tmp11 = np.log(
(rc_klab[k, l, a, b] + 2 * rd) / dij_bar[i, j]
)
tmp12 = (
6 * rc_klab[k, l, a, b] ** 3
+ 18 * rc_klab[k, l, a, b] ** 2 * rd
- 24 * rd ** 3
)
tmp21 = rc_klab[k, l, a, b] + 2 * rd - dij_bar[i, j]
tmp22 = (
22 * rd ** 2
- 5 * rd * rc_klab[k, l, a, b]
- 7 * rd * dij_bar[i, j]
- 8 * rc_klab[k, l, a, b] ** 2
+ rc_klab[k, l, a, b] * dij_bar[i, j]
+ dij_bar[i, j] ** 2
)
Kijklab[i, j, k, l, a, b] = tmp0 * (
tmp11 * tmp12 + tmp21 * tmp22
)
return Kijklab | cf154af6287286c19d606a2324c548f70f90121b | 3,653,236 |
def scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor.
"""
w_w, h_h, x_ctr, y_ctr = genwhctrs(anchor)
w_s = w_w * scales
h_s = h_h * scales
anchors = makeanchors(w_s, h_s, x_ctr, y_ctr)
return anchors | 8de95fc6966133a74f10318f23e97babcb36d5cd | 3,653,237 |
def L1():
"""
Graph for computing 'L1'.
"""
graph = beamline(scatter=True)
for node in ['scattered_beam', 'two_theta', 'L2', 'Ltotal']:
del graph[node]
return graph | 1bd17365107740a41d88ac3825ef2aca412bb616 | 3,653,238 |
def _highlight_scoring(
original_example, subset_adversarial_result, adversarial_span_dict
):
"""
Calculate the highlighting score using classification results of adversarial examples
:param original_example:
:param subset_adversarial_result:
:param adversarial_span_dict:
"""
original_utterance = " ".join(nltk.word_tokenize(original_example[1]))
original_idx = original_example[0]
original_intent = original_example[3]
original_confidence = original_example[4]
original_position = original_example[6]
tokens = original_utterance.split(" ")
highlight = np.zeros(len(tokens), dtype="float32")
for idx in range(len(subset_adversarial_result)):
adversarial_example = subset_adversarial_result.iloc[idx]
if not adversarial_example["top_predicts"]:
continue
predict_dict = dict()
predict_intent_list = list()
for prediction in adversarial_example["top_predicts"]:
predict_dict[prediction["intent"]] = prediction["confidence"]
predict_intent_list.append(prediction["intent"])
if original_intent in predict_dict:
adversarial_position = list(predict_dict.keys()).index(original_intent)
adversarial_confidence = predict_dict[original_intent]
else:
adversarial_position = len(list(predict_dict.keys()))
adversarial_confidence = 0
start, end = adversarial_span_dict[
adversarial_example["utterance"] + "_" + str(original_idx)
]
highlight = _scoring_function(
highlight,
original_position,
adversarial_position,
original_confidence,
adversarial_confidence,
start,
end,
)
return highlight | 788f903fe471ef539fe337c79858c04468ae3137 | 3,653,239 |
def server_hello(cmd, response):
"""Test command
"""
return response | 7e0cc03d1b64afb1a4fc44264096e6888ddb5df2 | 3,653,240 |
import os
def applyRigidAlignment(outDir, refFile, inDataListSeg, inDataListImg=[], icp_iterations=200):
"""
This function takes in a filelists(binary and raw) and makes the
size and spacing the same as the reference
"""
isoValue = 1e-20
antialias_iterations = 30
print("\n############# Rigidly Align #############")
# create output dirs
segoutDir = os.path.join(outDir, 'segmentations') if inDataListImg else outDir
if not os.path.exists(segoutDir):
os.makedirs(segoutDir)
if inDataListImg:
rawoutDir = os.path.join(outDir, 'images')
if not os.path.exists(rawoutDir):
os.makedirs(rawoutDir)
# apply rigid alignment
outSegDataList = []
outRawDataList = []
# get reference image
refImg = Image(refFile)
refImg.antialias(antialias_iterations)
for i in range(len(inDataListSeg)):
segoutname = rename(inDataListSeg[i], segoutDir, 'aligned')
outSegDataList.append(segoutname)
if inDataListImg:
rawoutname = rename(inDataListImg[i], rawoutDir, 'aligned')
outRawDataList.append(rawoutname)
# resize images to reference images
img = Image(inDataListSeg[i])
img.antialias(antialias_iterations)
rigidTransform = img.createTransform(refImg, TransformType.IterativeClosestPoint, isoValue, icp_iterations)
img.applyTransform(rigidTransform, refImg.origin(), refImg.dims(), refImg.spacing(), refImg.coordsys(), InterpolationType.Linear).binarize().write(segoutname)
if inDataListImg:
img = Image(inDataListImg[i])
img.applyTransform(rigidTransform, refImg.origin(), refImg.dims(), refImg.spacing(), refImg.coordsys(), InterpolationType.Linear).write(rawoutname)
return [outSegDataList, outRawDataList] if inDataListImg else outSegDataList | 9f76afc4acad994aaa3f2ee57304401abfe27eaf | 3,653,241 |
def test_vectorised_likelihood_not_vectorised_error(model, error):
"""
Assert the value is False if the likelihood is not vectorised and raises
an error.
"""
def dummy_likelihood(x):
if hasattr(x, '__len__'):
raise error
else:
return np.log(np.random.rand())
model._vectorised_likelihood = None
model.log_likelihood = MagicMock(side_effect=dummy_likelihood)
model.new_point = MagicMock(return_value=np.random.rand(10))
out = Model.vectorised_likelihood.__get__(model)
assert model._vectorised_likelihood is False
assert out is False | 1968be0c2ba147147e2ea5d4443c8bc87050d218 | 3,653,242 |
def display_timestamps_pair(time_m_2):
"""Takes a list of the following form: [(a1, b1), (a2, b2), ...] and
returns a string (a_mean+/-a_error, b_mean+/-b_error).
"""
if len(time_m_2) == 0:
return '(empty)'
time_m_2 = np.array(time_m_2)
return '({}, {})'.format(
display_timestamps(time_m_2[:, 0]),
display_timestamps(time_m_2[:, 1]),
) | b8bb0fa727c087a6bc1761d55e55143a12693d1e | 3,653,243 |
def get_legendre(degree, length):
"""
Producesthe Legendre polynomials of order `degree`.
Parameters
----------
degree : int
Highest order desired.
length : int
Number of samples of the polynomials.
Returns
-------
legendre : np.ndarray
A `degree`*`length` array with all the polynomials up to order `degree`
"""
def _bonnet(d, x):
if(d == 0):
return np.ones_like(x)
elif(d == 1):
return x
else:
return ((2*d-1)*x*_bonnet(d-1, x)-(d-1)*_bonnet(d-2, x))/d
x = np.linspace(-1, 1, length)
legendre = np.empty([length, degree+1])
for n in range(degree+1):
legendre[:, n] = _bonnet(n, x)
return legendre | 5f939c7d759678f6c686c84b074c4ac973df8255 | 3,653,244 |
import os
def _get_table_names():
"""Gets an alphabetically ordered list of table names from facet_fields.csv.
Table names are fully qualified: <project id>:<dataset id>:<table name>
"""
config_path = os.path.join(app.app.config['DATASET_CONFIG_DIR'],
'bigquery.json')
table_names = _parse_json_file(config_path)['table_names']
table_names.sort()
return table_names | 3684fa80f52aa3a2d61c6843992976870ae2bd59 | 3,653,245 |
def _get_controller_of(pod):
"""Get a pod's controller's reference.
This uses the pod's metadata, so there is no guarantee that
the controller object reference returned actually corresponds to a
controller object in the Kubernetes API.
Args:
- pod: kubernetes pod object
Returns: the reference to a controller object
"""
if pod["metadata"].get("ownerReferences"):
for owner_ref in pod["metadata"]["ownerReferences"]:
if owner_ref.get("controller"):
return owner_ref
return None | 9c9e58e2fc49729c618af2c5bb9b4d033d90a831 | 3,653,246 |
from typing import Any
from typing import Dict
from typing import List
def proxify_device_objects(
obj: Any,
proxied_id_to_proxy: Dict[int, ProxyObject],
found_proxies: List[ProxyObject],
):
""" Wrap device objects in ProxyObject
Search through `obj` and wraps all CUDA device objects in ProxyObject.
It uses `proxied_id_to_proxy` to make sure that identical CUDA device
objects found in `obj` are wrapped by the same ProxyObject.
Parameters
----------
obj: Any
Object to search through or wrap in a ProxyObject.
proxied_id_to_proxy: Dict[int, ProxyObject]
Dict mapping the id() of proxied objects (CUDA device objects) to
their proxy and is updated with all new proxied objects found in `obj`.
found_proxies: List[ProxyObject]
List of found proxies in `obj`. Notice, this includes all proxies found,
including those already in `proxied_id_to_proxy`.
Returns
-------
ret: Any
A copy of `obj` where all CUDA device objects are wrapped in ProxyObject
"""
return dispatch(obj, proxied_id_to_proxy, found_proxies) | 6d410245624d2992e37b5bce1832d7326caf4fe2 | 3,653,247 |
def monotonicity(x, rounding_precision = 3):
"""Calculates monotonicity metric of a value of[0-1] for a given array.\nFor an array of length n, monotonicity is calculated as follows:\nmonotonicity=abs[(num. positive gradients)/(n-1)-(num. negative gradients)/(n-1)]."""
n = x.shape[0]
grad = np.gradient(x)
pos_grad = np.sum(grad>0)
neg_grad = np.sum(grad<0)
monotonicity = np.abs( pos_grad/(n-1) - neg_grad/(n-1) )
return np.round(monotonicity, rounding_precision) | 3ff9c37975502cb12b9e2839a5f5580412084f8c | 3,653,248 |
import subprocess
def get_cluster_cids():
"""return list of CIDs with pin types"""
output = subprocess.check_output([
'docker-compose', 'exec', '-T', 'cluster', 'ipfs-cluster-ctl', 'pin',
'ls'
])
return [
'-'.join([l.split()[0], l.split()[-1].lower()])
for l in output.decode('utf-8').splitlines()
] | c38e2742fa0476e2240d9759c6d8525a3add083b | 3,653,249 |
import os
import getpass
def download(homework, version="latest", redownload=False):
"""Download data files for the specified datasets. Defaults to downloading latest version on server.
Parameters:
homework (str): The name of the dataset to download data for, or "all" to download data for all datasets
version (str, optional): Which version of the data files to download. Defaults to latest on server.
redownload (bool, optional): Whether to redownload the data files, even if that version of the data is already downloaded. Default False.
Returns:
bool: Indicates whether download was successful.
"""
# Process the optional "all" parameter
if homework == "all":
homeworks = [
"bio462_hw1",
"bio462_hw2",
"bio462_hw3",
"bio462_hw4",
"bio462_hw5",
"bio462_hw6"
]
overall_result = True
for homework in homeworks:
if not download(homework, redownload=redownload):
overall_result = False
return overall_result
# Get our dataset path
homework = homework.lower()
dataset_path = get_dataset_path(homework)
# Update the index
update_index(homework)
# Load the index
index = get_index(homework)
# Validate the version number, including parsing if it's "latest"
version = validate_version(version, homework, use_context="download")
# Construct the path to the directory for this version
version_path = os.path.join(dataset_path, f"{homework}_v{version}")
# See if they've downloaded this version before. Get list of files to download.
version_index = index.get(version)
if os.path.isdir(version_path):
if redownload:
files_to_download = list(version_index.keys())
else:
files_to_download = []
for data_file in version_index.keys():
# Compare the server and local hashes, to make sure there was no data corruption
file_path = os.path.join(version_path, data_file)
if os.path.isfile(file_path):
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
local_hash = hash_file(file_path)
if local_hash == server_hash:
continue
files_to_download.append(data_file)
if len(files_to_download) == 0:
return True
else:
os.mkdir(version_path)
files_to_download = list(version_index.keys())
# Download the files
password_protected_datasets = [
"gbm",
"lscc"
]
password = None
total_files = len(files_to_download)
for data_file in files_to_download:
if (homework in password_protected_datasets) and (password is None):
password = getpass.getpass(prompt=f'Password for {homework} homework: ') # We manually specify the prompt parameter so it shows up in Jupyter Notebooks
print("\033[F", end='\r') # Use an ANSI escape sequence to move cursor back up to the beginning of the last line, so in the next line we can clear the password prompt
print("\033[K", end='\r') # Use an ANSI escape sequence to print a blank line, to clear the password prompt
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
file_url = file_index.get("url")
file_path = os.path.join(version_path, data_file)
file_number = files_to_download.index(data_file) + 1
downloaded_path = download_file(file_url, file_path, server_hash, password=password, file_message=f"{homework} v{version} data files", file_number=file_number, total_files=total_files)
while downloaded_path == "wrong_password":
password = getpass.getpass(prompt="Wrong password. Try again: ")
print("\033[F", end='\r') # Use an ANSI escape sequence to move cursor back up to the beginning of the last line, so in the next line we can clear the password prompt
print("\033[K", end='\r') # Use an ANSI escape sequence to print a blank line, to clear the password prompt
downloaded_path = download_file(file_url, file_path, server_hash, password=password, file_message=f"{homework} v{version} data files", file_number=file_number, total_files=total_files)
return True | a866e80848caa53c7abee7a3074b28d8d56e32bf | 3,653,250 |
import attrs
def parse_value_namedobject(tt):
"""
<!ELEMENT VALUE.NAMEDOBJECT (CLASS | (INSTANCENAME, INSTANCE))>
"""
check_node(tt, 'VALUE.NAMEDOBJECT')
k = kids(tt)
if len(k) == 1:
object = parse_class(k[0])
elif len(k) == 2:
path = parse_instancename(kids(tt)[0])
object = parse_instance(kids(tt)[1])
object.path = path
else:
raise ParseError('Expecting one or two elements, got %s' %
repr(kids(tt)))
return (name(tt), attrs(tt), object) | ecb507ac9b0c3fdfbec19f807fba06236e21d7c5 | 3,653,251 |
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Dyson cloud."""
# Nothing needs clean up
return True | 38a274e90c5fadc277e640cd6cc5442d5070dfd6 | 3,653,252 |
def quat_correct(quat):
""" Converts quaternion to minimize Euclidean distance from previous quaternion (wxyz order) """
for q in range(1, quat.shape[0]):
if np.linalg.norm(quat[q-1] - quat[q], axis=0) > np.linalg.norm(quat[q-1] + quat[q], axis=0):
quat[q] = -quat[q]
return quat | fc492998c5bdf2cf3b1aacbd42de72618bd74c01 | 3,653,253 |
def vis_bbox(im, dets):
"""Visual debugging of detections."""
for i in range(dets.shape[0]):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
# class_name = CLASS_NAME[int(dets[i, 4]) - 1]
class_name = ' '
score = 0.99
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im | c9bd2edb899d2a476eaff9bbbf91f1008e9d1c33 | 3,653,254 |
def parse_record1(raw_record):
"""Parse raw record and return it as a set of unique symbols without \n"""
return set(raw_record) - {"\n"} | 4ffd3ebd0aaa17ddd42baf3b9d44614784c8ff33 | 3,653,255 |
def execute_contract_creation(
laser_evm, contract_initialization_code, contract_name=None
) -> Account:
""" Executes a contract creation transaction from all open states"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
new_account = laser_evm.world_state.create_account(
0, concrete_storage=True, dynamic_loader=None
)
if contract_name:
new_account.contract_name = contract_name
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = ContractCreationTransaction(
open_world_state,
BitVec("creator{}".format(next_transaction_id), 256),
next_transaction_id,
new_account,
Disassembly(contract_initialization_code),
[],
BitVec("gas_price{}".format(next_transaction_id), 256),
BitVec("call_value{}".format(next_transaction_id), 256),
BitVec("origin{}".format(next_transaction_id), 256),
CalldataType.SYMBOLIC,
)
_setup_global_state_for_execution(laser_evm, transaction)
laser_evm.exec(True)
return new_account | ef1f0db204554c874c3aeea1ca6aa89ef7454d89 | 3,653,256 |
import re
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\byour awesome\b', text, re.IGNORECASE)) | c6e4275d53cd632b4f5e255aa62b69c80fd37794 | 3,653,257 |
def list_shared_with(uri, async_req=False):
"""Return array sharing policies"""
(namespace, array_name) = split_uri(uri)
api_instance = client.client.array_api
try:
return api_instance.get_array_sharing_policies(
namespace=namespace, array=array_name, async_req=async_req
)
except GenApiException as exc:
raise tiledb_cloud_error.check_exc(exc) from None | 2a9eb78c14e5bdc31a3ae5bfc077219bd06c768f | 3,653,258 |
import html
def format_html_data_table(dataframe, list_of_malformed, addLineBreak=False):
"""
Returns the predicted values as the data table
"""
if list_of_malformed:
list_of_malformed = str(list_of_malformed)
else:
list_of_malformed = "None"
# format numeric data into string format
for column_name in dataframe.select_dtypes(include=[np.float]).columns:
dataframe[column_name] = dataframe[column_name].apply(lambda x: "{0:.2f}%".format(x))
return html.Div([html.P("Total Number of Audio Clips : "+ str(dataframe.shape[0]),
style={"color":"white",
'text-decoration':'underline'}),
html.P("Error while prediction: " + list_of_malformed,
style={"color":"white"})] + \
([html.Br()] if addLineBreak else []) + \
[html.Hr(),
dash_table.DataTable(id='datatable-interactivity-predictions',
columns=[{"name": format_label_name(i),
"id": i,
"deletable": True} for i in dataframe.columns],
data=dataframe.to_dict("rows"),
style_header={'backgroundColor': 'rgb(30, 30, 30)',
"fontWeight": "bold",
'border': '1px solid white'},
style_cell={'backgroundColor': 'rgb(50, 50, 50)',
'color': 'white',
'whiteSpace':'normal',
'maxWidth': '240px'},
style_table={"maxHeight":"350px",
"overflowY":"scroll",
"overflowX":"auto"}),
html.Hr()] + \
([html.Br()] if addLineBreak else [])) | cc345d2cb87ddf7905d0d9a62cc6cd61b92ddc51 | 3,653,259 |
import math
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
) | ef18dede8312f78b4ba4258e87d4630863f1243c | 3,653,260 |
from typing import Iterable
def combine_from_streaming(stream: Iterable[runtime_pb2.Tensor]) -> runtime_pb2.Tensor:
""" Restore a result of split_into_chunks into a single serialized tensor """
stream = iter(stream)
first_chunk = next(stream)
serialized_tensor = runtime_pb2.Tensor()
serialized_tensor.CopyFrom(first_chunk)
buffer_chunks = [first_chunk.buffer]
for tensor_part in stream:
buffer_chunks.append(tensor_part.buffer)
serialized_tensor.buffer = b''.join(buffer_chunks)
return serialized_tensor | af88c9eeec99c1d3d7ca9e5753b72cf09a0c6c85 | 3,653,261 |
def portageq_envvar(options, out, err):
"""
return configuration defined variables. Use envvar2 instead, this will be removed.
"""
return env_var.function(options, out, err) | 8406985ac5f5d5d4bc93ded8c1392b1fe49e9ff7 | 3,653,262 |
def create_hash_factory(hashfun, complex_types=False, universe_size=None):
"""Create a function to make hash functions
:param hashfun: hash function to use
:type hashfun: callable
:param complex_types: whether hash function supports hashing of complex types,
either through native support or through repr
:type complex_types: bool
:param universe_size: upper limit to hash value
:type universe_size: int, long
:returns: a hash factory
:rtype: callable
"""
def hash_factory(seed):
if complex_types:
if universe_size is None:
fun = lambda x: hashfun(x, seed)
else:
fun = lambda x: hashfun(x, seed) % universe_size
else:
if universe_size is None:
fun = lambda x: hashfun(hashable(x), seed)
else:
fun = lambda x: hashfun(hashable(x), seed) % universe_size
return fun
return hash_factory | 23dee13f06f754caa9f7de5a89b855adbe7313a4 | 3,653,263 |
def estimate_key(note_info, method="krumhansl", *args, **kwargs):
"""
Estimate key of a piece by comparing the pitch statistics of the
note array to key profiles [2]_, [3]_.
Parameters
----------
note_info : structured array, `Part` or `PerformedPart`
Note information as a `Part` or `PerformedPart` instances or
as a structured array. If it is a structured array, it has to
contain the fields generated by the `note_array` properties
of `Part` or `PerformedPart` objects. If the array contains
onset and duration information of both score and performance,
(e.g., containing both `onset_beat` and `onset_sec`), the score
information will be preferred.
method : {'krumhansl'}
Method for estimating the key. For now 'krumhansl' is the only
supported method.
args, kwargs
Positional and Keyword arguments for the key estimation method
Returns
-------
str
String representing the key name (i.e., Root(alteration)(m if minor)).
See `partitura.utils.key_name_to_fifths_mode` and
`partitura.utils.fifths_mode_to_key_name`.
References
----------
.. [2] Krumhansl, Carol L. (1990) "Cognitive foundations of musical pitch",
Oxford University Press, New York.
.. [3] Temperley, D. (1999) "What's key for key? The Krumhansl-Schmuckler
key-finding algorithm reconsidered". Music Perception. 17(1),
pp. 65--100.
"""
if method not in ("krumhansl",):
raise ValueError('For now the only valid method is "krumhansl"')
if method == "krumhansl":
kid = ks_kid
if "key_profiles" not in kwargs:
kwargs["key_profiles"] = "krumhansl_kessler"
else:
if kwargs["key_profiles"] not in VALID_KEY_PROFILES:
raise ValueError(
"Invalid key_profiles. " 'Valid options are "ks", "cmbs" or "kp"'
)
note_array = ensure_notearray(note_info)
return kid(note_array, *args, **kwargs) | af2383ab2a94cf49a93a1f00d5bf575f19e0daa0 | 3,653,264 |
def print_parsable_dstip(data, srcip, dstip):
"""Returns a parsable data line for the destination data.
:param data: the data source
:type data: dictionary
:param scrip: the source ip
:type srcip: string
:param dstip: the destination ip
:type dstip: string
:return: a line of urls and their hitcount
"""
line = []
for item in header_order:
if item in data[srcip]['targets'][dstip]:
value = data[srcip]['targets'][dstip][item]
elif item == "src":
value = srcip
elif item == "dst":
value = dstip.replace(":", "|")
else:
value = ""
if value != "":
line.append(str(value))
if 'url' in data[srcip]['targets'][dstip]:
line.append(print_parsable_urls(data[srcip]['targets'][dstip]['url']))
line = "|".join(line)
return line | 9e27733a9821e184e53f21ca38af9cdb61192743 | 3,653,265 |
def OrListSelector(*selectors) -> pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector:
"""
OrResidueSelector but 2+
(not a class, but returns a Or
:param selectors:
:return:
"""
sele = pyrosetta.rosetta.core.select.residue_selector.FalseResidueSelector()
for subsele in selectors:
sele = pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector(subsele, sele)
return sele | 8f4443a6ee1bbcd2e76133e6a08eea2737e01383 | 3,653,266 |
def plot_regress_exog(res, exog_idx, exog_name='', fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig = utils.create_mpl_fig(fig)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
#y = res.endog
x1 = res.model.exog[:,exog_idx]
ax = fig.add_subplot(2,2,1)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.model.endog, 'o')
ax.set_title('endog versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,2)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.resid, 'o')
ax.axhline(y=0)
ax.set_title('residuals versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,3)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,4)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted plus residuals versus exog', fontsize='small')# + namestr)
fig.suptitle('Regression Plots for %s' % exog_name)
return fig | e4c7859c32892d2d8e94ff884652846f5f15f513 | 3,653,267 |
from typing import List
def get_circles_with_admin_access(account_id: int) -> List[Circle]:
"""
SELECT
management_style,
c_name
FROM (
SELECT
'SELF_ADMIN' AS management_style,
c.management_style AS c_management_style,
c.admin_circle AS c_admin_circle,
c.created_by AS c_created_by,
c.updated_by AS c_updated_by,
c.id AS c_id,
c.created_at AS c_created_at,
c.updated_at AS c_updated_at,
c.name AS c_name,
c.description AS c_description
FROM circle AS c
JOIN circle_member ON c.id = circle_member.circle
WHERE C.management_style = 'SELF_ADMIN' AND circle_member.account = 5
UNION
SELECT
'ADMIN_CIRCLE' AS management_style,
c.management_style AS c_management_style,
c.admin_circle AS c_admin_circle,
c.created_by AS c_created_by,
c.updated_by AS c_updated_by,
c.id AS c_id,
c.created_at AS c_created_at,
c.updated_at AS c_updated_at,
c.name AS c_name,
c.description AS c_description
FROM circle AS ac
JOIN circle AS C ON c.admin_circle = ac.id
JOIN circle_member ON ac.id = circle_member.circle
WHERE c.management_style = 'ADMIN_CIRCLE' AND circle_member.account = 5
) AS anon_1
"""
ac = aliased(Circle, name="ac")
c = aliased(Circle, name="c")
self_admin = db.session.query(c). \
join(c.members). \
filter(CircleMember.account_id == account_id). \
filter(c._management_style == CircleManagementStyle.SELF_ADMIN.name)
admin_circle = db.session.query(c). \
join(ac, c.admin_circle_id == ac.id). \
join(ac.members). \
filter(c._management_style == CircleManagementStyle.ADMIN_CIRCLE.name). \
filter(CircleMember.account_id == account_id)
return self_admin.union(admin_circle).all() | ef0a24d299bdad549f9b0220e4a34499097eb19d | 3,653,268 |
def combine(arr):
""" makes overlapping sequences 1 sequence """
def first(item):
return item[0]
def second(item):
return item[1]
if len(arr) == 0 or len(arr) == 1:
return arr
sarr = []
for c, val in enumerate(arr):
sarr.append((val[0], val[1], c))
sarr = sorted(sarr, key = second)
sarr = sorted(sarr, key = first)
chains = [[sarr[0][0], sarr[0][1], [sarr[0][2]]]]
for s, e, c in sarr[1:]: #start, end, counter
if s <= chains[-1][1] +1:
chains[-1][1] = max(e, chains[-1][1])
chains[-1][2].append(c)
else:
chains.append([s, e, [c]])
return chains | b46bb7f73fa6857ed4c980bdbdff77acde64b18d | 3,653,269 |
from operator import sub
def sub_fft(f_fft, g_fft):
"""Substraction of two polynomials (FFT representation)."""
return sub(f_fft, g_fft) | a559429a4d10889be3ffa776153854248ac7a496 | 3,653,270 |
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output | 5508f1681eaa3f2c5ccb44b6329ad012f85c42e8 | 3,653,271 |
import sys
def load_parameters(model_type, parameter_file):
"""
Loads in all parameter values given in a parameter file.
Parameters:
model_type (str):
sets the type of model, which determines the exact parameter set that is needed. Possible values for
the parameter model_type are: 'full model', 'density model' and 'constant force model'.
parameter_file (str):
the path to the parameter file.
Returns:
list: the values of the parameters in the order as specified in the <model_type>_parameter_names lists.
"""
if model_type == 'full model':
parameters = [0.]*len(full_model_parameter_names)
parameters_found = [0]*len(full_model_parameter_names)
parameter_names = full_model_parameter_names
elif model_type == 'density model':
parameters = [0.]*len(density_model_parameter_names)
parameters_found = [0]*len(density_model_parameter_names)
parameter_names = density_model_parameter_names
else:
print("ERROR: the parameter <model_type> has to be given one of the three values: "
"'full model' or 'density model'.")
sys.exit(1)
f = open(parameter_file)
for line in f.readlines():
line_split = line.split()
try:
idx = parameter_names.index(line_split[0])
parameters_found[idx] = 1
if line_split[0] == 'N_h' or line_split[0] == 'N_max':
parameters[idx] = int(line_split[2])
else:
parameters[idx] = float(line_split[2])
except ValueError:
print("WARNING: Parameter {} cannot be interpreted for the model type "
"'{}'!".format(line_split[0], model_type))
f.close()
if 0 in parameters_found:
print("ERROR: Not all necessary parameters for the model type '{}' were defined in the given "
"file!".format(model_type))
sys.exit(1)
return parameters | 6fe50dcb668104e0dfd9e7b9c483564b9c9e36cf | 3,653,272 |
def handle_dat_edge(data_all):
"""
把dat_edge个每一条记录的info拆开,然后输出,方便后续的计算
为了简化计算,忽略时间信息,把所有的月份的联系记录汇总起来
"""
def cal_multi_3(string):
s = string.split(',')
month_times = len(s)
df = list(map(lambda x: list(map(eval, x.split(':')[1].split('_'))), s))
times_sum, weight_sum = pd.DataFrame(df).sum().values
return month_times, times_sum, weight_sum
def cal_single_3(string):
times_sum, weight_sum = list(map(eval, string.split(':')[1].split('_')))
return 1, times_sum, weight_sum
length = list(map(len, map(lambda x: x.split(','), data_all['info'])))
dat_edge_single = data_all[np.array(length) == 1]
dat_edge_multi = data_all[np.array(length) > 1]
multi_pre_df = map(cal_multi_3, dat_edge_multi['info'])
multi_feature_3 = pd.DataFrame(list(multi_pre_df), columns=['month_times', 'times_sum', 'weight_sum'])
id_part = dat_edge_multi[['from_id', 'to_id']].reset_index(drop=True)
multi_result = pd.concat([id_part, multi_feature_3], axis=1)
single_pre_df = map(cal_single_3, dat_edge_single['info'])
single_feature_3 = pd.DataFrame(list(single_pre_df), columns=['month_times', 'times_sum', 'weight_sum'])
id_part = dat_edge_single[['from_id', 'to_id']].reset_index(drop=True)
single_result = pd.concat([id_part, single_feature_3], axis=1)
both_result = pd.concat([multi_result, single_result], ignore_index=True)
return both_result | 4ae92d337a70326bae87399809b920b1ad2cce1e | 3,653,273 |
def quadraric_distortion_scale(distortion_coefficient, r_squared):
"""Calculates a quadratic distortion factor given squared radii.
The distortion factor is 1.0 + `distortion_coefficient` * `r_squared`. When
`distortion_coefficient` is negative (barrel distortion), the distorted radius
is only monotonically increasing only when
`r_squared` < r_squared_max = -1 / (3 * distortion_coefficient).
Args:
distortion_coefficient: A tf.Tensor of a floating point type. The rank can
be from zero (scalar) to r_squared's rank. The shape of
distortion_coefficient will be appended by ones until the rank equals that
of r_squared.
r_squared: A tf.Tensor of a floating point type, containing
(x/z)^2 + (y/z)^2. We use r_squared rather than r to avoid an unnecessary
sqrt, which may introduce gradient singularities. The non-negativity of
r_squared only enforced in debug mode.
Returns:
A tf.Tensor of r_squared's shape, the correction factor that should
multiply the projective coordinates (x/z) and (y/z) to apply the
distortion.
"""
return 1 + distortion_coefficient * r_squared | b8a910e4de3a6c0a3793131503ed6ede8836bc89 | 3,653,274 |
import subprocess
import os
def run(executable: str, *args: str):
"""
Run executable using core.process configuration, replacing bin with configured one, appending and prepending args.
"""
command_list = effective_command(executable, *args)
process = subprocess.run(command_list,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if os.name == "nt":
# On windows, there's ANSI code after output that has to be dropped...
try:
eof_index = process.stdout.index(b"\x1b[0m")
process.stdout = process.stdout[:eof_index]
except ValueError:
pass
return process.stdout | 4255a831d7d617a4f34e77c628b34b4851c8c58a | 3,653,275 |
def open_path(path, **kwargs):
"""
Parameters
----------
path: str
window: tuple
e.g. ('1990-01-01','2030-01-01')
kwargs: all other kwargs the particular file might take, see the module for details
Returns
-------
"""
info = _tools.path2info(path)
module = arm_products[info['product']]['module']
out = module.open_path(path, **kwargs)
return out | c4e87d0649dfde2139a4ecac797775309eb6a72e | 3,653,276 |
import uuid
def generate_code() -> str:
"""Generates password reset code
:return: Password reset code
:rtype: str
"""
return str(uuid.uuid4()) | bcd8377afd5598e71f8bb8eb217c3f3fd53fc5c7 | 3,653,277 |
import os
def process_file(name, files, url):
"""
Save file to shared folder on server, and return
the name of the file.
"""
def allowed_file(filename):
if "." not in filename:
return False
ext = filename.rsplit(".", 1)[1].lower()
return ext in config.ALLOWED_EXTENSIONS
if name not in files:
flash("No file part found")
return redirect(url)
file = files[name]
if not file.filename:
flash("No file selected")
return redirect(url)
if not allowed_file(file.filename):
flash("File is not allowed")
return redirect(url)
filename = secure_filename(file.filename)
filepath = os.path.join(config.INPUTS_FOLDER, filename)
file.save(filepath)
return filename | 0c054c53cb8b24b1e2ff5e684453000fbd11a5a2 | 3,653,278 |
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.' | 16fceb539f7aafb775b851e55f1b606a1c917cf9 | 3,653,279 |
import pickle
import os
def cached(path: str, validate: bool = False):
"""Similar to ``define``, but cache to a file.
:param path:
the path of the cache file to use
:param validate:
if `True`, always execute the function. The loaded result will be
passed to the function, when the cache exists. In that case the
function should return the value to use. If the returned value is
not identical to the loaded value, the cache is updated with the
new value.
Usage::
@cached('./cache/result')
def dataset():
...
return result
or::
@cached('./cache/result', validate=True)
def model(result=None):
if result is not None:
# running to validate ...
return result
"""
def update_cache(result):
print("save cache", path)
with open(path, "wb") as fobj:
pickle.dump(result, fobj)
def load_cache():
print("load cache", path)
with open(path, "rb") as fobj:
return pickle.load(fobj)
def decorator(func):
if os.path.exists(path):
result = load_cache()
if not validate:
return result
else:
print("validate")
new_result = func(result)
if new_result is not result:
update_cache(new_result)
return new_result
else:
print("compute")
result = func()
update_cache(result)
return result
return decorator | d4b5b861bf43294d3e5f84b57f648a2e32b6428b | 3,653,280 |
import ast
def json(*arguments):
"""
Transform *arguments parameters into JSON.
"""
return ast.Json(*arguments) | 3e3333617b63dc1b5e8e4b71ea5c2f0ea08bfff8 | 3,653,281 |
def get_default_volume_size():
"""
:returns int: the default volume size (in bytes) supported by the
backend the acceptance tests are using.
"""
default_volume_size = environ.get("FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE")
if default_volume_size is None:
raise SkipTest(
"Set acceptance testing default volume size using the " +
"FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE environment variable.")
return int(default_volume_size) | ec24bfb9c07add5d1a800a1aaf9db3efb8727b3d | 3,653,282 |
import functools
def set_global_user(**decorator_kwargs):
"""
Wrap a Flask blueprint view function to set the global user
``flask.g.user`` to an instance of ``CurrentUser``, according to the
information from the JWT in the request headers. The validation will also
set the current token.
This requires a flask application and request context.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
set_current_user(**decorator_kwargs)
return func(*args, **kwargs)
return wrapper
return decorator | f73a89d94d188b1c258cedca3439ab9c1c94180c | 3,653,283 |
def sample_wfreq(sample):
"""Return the Weekly Washing Frequency as a number."""
# `sample[3:]` strips the `BB_` prefix
results = session.query(Samples_Metadata.WFREQ).\
filter(Samples_Metadata.SAMPLEID == sample[3:]).all()
wfreq = np.ravel(results)
# Return only the first integer value for washing frequency
return jsonify(int(wfreq[0])) | 6cb2ee0866efc9e841143c32e10cbb8feea813bc | 3,653,284 |
import sys
import os
from typing import OrderedDict
import shutil
import json
import six
def OffsiteRestore(
source_dir,
encryption_password=None,
dir_substitution=None,
display_only=False,
ssd=False,
output_stream=sys.stdout,
preserve_ansi_escape_sequences=False,
):
"""\
Restores content created by previous Offsite backups.
"""
dir_substitutions = dir_substitution
del dir_substitution
with StreamDecorator.GenerateAnsiSequenceStream(
output_stream,
preserve_ansi_escape_sequences=preserve_ansi_escape_sequences,
) as output_stream:
with output_stream.DoneManager(
line_prefix="",
prefix="\nResults: ",
suffix="\n",
) as dm:
# Get the dirs
dirs = []
for item in os.listdir(source_dir):
fullpath = os.path.join(source_dir, item)
if not os.path.isdir(fullpath):
continue
dirs.append(fullpath)
if not dirs:
dm.stream.write(
"No subdirectories were found in '{}'.\n".format(source_dir),
)
dm.result = -1
return dm.result
dirs = sorted(dirs)
# Get the file data
file_data = OrderedDict()
hashed_filenames = {}
dm.stream.write(
"Reading file data from {}...".format(inflect.no("directory", len(dirs))),
)
with dm.stream.DoneManager(
suffix="\n",
) as dir_dm:
for index, dir in enumerate(dirs):
dir_dm.stream.write(
"'{}' ({} of {})...".format(dir, index + 1, len(dirs)),
)
with dir_dm.stream.DoneManager() as this_dir_dm:
data_filename = os.path.join(dir, DATA_FILENAME)
if not os.path.isfile(data_filename):
# See if there is compressed data to decompress
for zipped_ext in ["", ".001"]:
potential_filename = os.path.join(
dir,
"{}{}".format(OFFSITE_BACKUP_FILENAME, zipped_ext),
)
if not os.path.isfile(potential_filename):
continue
# Extract the data
temp_dir = dir + ".tmp"
FileSystem.RemoveTree(temp_dir)
FileSystem.MakeDirs(temp_dir)
this_dir_dm.stream.write("Decompressing data...")
with this_dir_dm.stream.DoneManager(
suffix="\n",
) as decompress_dm:
command_line = '7za e -y "-o{dir}"{password} "{input}"'.format(
dir=temp_dir,
input=potential_filename,
password=' "-p{}"'.format(
encryption_password,
) if encryption_password else "",
)
decompress_dm.result = Process.Execute(
command_line,
decompress_dm.stream,
)
if decompress_dm.result != 0:
return decompress_dm.result
this_dir_dm.stream.write("Removing original data...")
with this_dir_dm.stream.DoneManager():
FileSystem.RemoveTree(dir)
this_dir_dm.stream.write("Restoring compressed data...")
with this_dir_dm.stream.DoneManager():
shutil.move(temp_dir, dir)
break
if not os.path.isfile(data_filename):
this_dir_dm.stream.write(
"INFO: The file '{}' was not found in the directory '{}'.\n".format(
DATA_FILENAME,
dir,
),
)
this_dir_dm.result = 1
continue
try:
with open(data_filename) as f:
data = json.load(f)
except:
this_dir_dm.stream.write(
"ERROR: The data in '{}' is corrupt.\n".format(
data_filename,
),
)
this_dir_dm.result = -1
continue
for file_info_index, file_info in enumerate(data):
operation = file_info["operation"]
if operation not in ["add", "modify", "remove"]:
this_dir_dm.stream.write(
"ERROR: The file info operation '{}' is not valid (Index: {}).\n".format(
operation,
file_info_index,
),
)
this_dir_dm.result = -1
continue
filename = file_info["filename"]
# Check if the data is in the expected state
if operation == "add":
if filename in file_data:
this_dir_dm.stream.write(
"ERROR: Information for the file '{}' has already been added and cannot be added again (Index: {}).\n".format(
filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
elif operation in ["modify", "remove"]:
if filename not in file_data:
this_dir_dm.stream.write(
"ERROR: Information for the file '{}' was not previously provided (Index: {}).\n".format(
filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
else:
assert False, operation
# Add or remove the data
if operation in ["add", "modify"]:
hash = file_info["hash"]
if hash not in hashed_filenames:
hashed_filename = os.path.join(dir, hash)
if not os.path.isfile(hashed_filename):
this_dir_dm.stream.write(
"ERROR: Contents for the file '{}' were not found at '{}' (Index: {}).\n".format(
filename,
hasehd_filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
hashed_filenames[hash] = hashed_filename
file_data[filename] = hashed_filenames[hash]
elif operation == "remove":
del file_data[filename]
else:
assert False, operation
keys = sorted(six.iterkeys(file_data))
# Perform destination substitutions (if necessary)
if dir_substitutions:
for key in keys:
for k, v in six.iteritems(dir_substitutions):
new_key = key.replace(k, v)
file_data[new_key] = file_data[key]
del file_data[key]
keys = sorted(six.iterkeys(file_data))
if display_only:
dm.stream.write(
"{} to restore...\n\n".format(inflect.no("file", len(keys))),
)
for key in keys:
dm.stream.write(" - {0:<100} <- {1}\n".format(key, file_data[key]))
return dm.result
with dm.stream.SingleLineDoneManager("Copying Files...") as copy_dm:
# ----------------------------------------------------------------------
def Execute(task_index, on_status_update):
dest = keys[task_index]
source = file_data[dest]
on_status_update(FileSystem.GetSizeDisplay(os.path.getsize(source)))
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
try:
os.makedirs(dest_dir)
except:
# This can happen when attempting to create the dir from
# multiple threads simultaneously. If the error is something
# else, the copy statement below will raise an exception.
pass
shutil.copy2(source, dest)
# ----------------------------------------------------------------------
copy_dm.result = TaskPool.Execute(
[
TaskPool.Task("'{}' -> '{}'".format(file_data[key], key), Execute)
for key in keys
],
optional_output_stream=copy_dm.stream,
progress_bar=True,
num_concurrent_tasks=None if ssd else 1,
)
if copy_dm.result != 0:
return copy_dm.result
return dm.result | 6dbc3d047b83ba93763ee31e750e61b3400bc7d9 | 3,653,285 |
def mlp_hyperparameter_tuning(no_of_hidden_neurons, epoch, alpha, roh, n_iter_no_change, X_train, X_validation, y_train, y_validation):
"""
INPUT
no_of_hidden_neurons: 1D int arary contains different values of no of neurons
present in 1st hidden layer (hyperparameter)
epoch: 1D int arary contains different values of epochs (hyperparameter)
alpha: 1D float array contains different values of alphas or learning rates (hyperparameter)
roh: 1D float array contains different values of tolerence or roh (hyperparameter)
n_iter_no_change: 1D int array conatins different values of
Number of iterations with no improvement to wait before stopping fitting (hyperparameter).
X_train: 2D array of shape = (no of patterns, no of features)
X_validation: 2D array of shape = (no of patterns, no of features)
y_train: 2D array of shape = (no of patterns, )
y_validation: 2D array of shape = (no of patterns, )
OUTPUT
best_hyperparameter: a tuple (epoch, alpha, roh, n_iter_no_change) which has best accuracy on the validation set.
"""
val_acc = []
for i in range(0, epoch.shape[0]):
mlp_classifier = MLPClassifier(hidden_layer_sizes = (no_of_hidden_neurons[i],), activation = 'logistic', solver = 'sgd', learning_rate = 'constant',\
learning_rate_init = alpha[i], max_iter = epoch[i], shuffle = True, random_state = 100, tol = roh[i],\
verbose = False, early_stopping = True, n_iter_no_change = n_iter_no_change[i]).fit(X_train, y_train)
# we are taking logloss function for error calculation
predicted = mlp_classifier.predict(X_validation)
val_acc.append(accuracy_score(y_validation, predicted)*100)
# Get the maximum accuracy on validation
max_value = max(val_acc)
max_index = val_acc.index(max_value)
best_hyperparameter = (no_of_hidden_neurons[max_index], epoch[max_index], alpha[max_index], roh[max_index], n_iter_no_change[max_index])
print("Best Hyperparameter:")
print("No of neurons in the 1st hidden layer = ", no_of_hidden_neurons[max_index])
print("Epoch = ", epoch[max_index])
print("Alpha = ", alpha[max_index])
print("Roh = ", roh[max_index])
print("n_iter_no_change (Number of iterations with no improvement) = ", n_iter_no_change[max_index])
return best_hyperparameter | 3ec079bbeae32a5e7e6b80e833336ecb8662cbf1 | 3,653,286 |
def play(p1:list[int], p2:list[int]) -> list[int]:
"""Gets the final hand of the winning player"""
while p1 and p2:
a = p1.pop(0)
b = p2.pop(0)
if a > b:
p1 += [a, b]
else:
p2 += [b, a]
return p1 + p2 | 2a2b561474b3cd0841dcbe881e74b4767b4102b1 | 3,653,287 |
def oda_update_uhf(dFs, dDs, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
if type(dFs) is not list:
raise Exception("arg1 and arg2 are list of alpha/beta matrices.")
E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd | fcead1536db11f80ae9eb2e912ac9857f93de669 | 3,653,288 |
def authorize(*roles):
"""Decorator that authorizes (or not) the current user
Raises an exception if the current user does not have at least
one of the listed roles.
"""
def wrapper(func):
"""wraps the protected function"""
def authorize_and_call(*args, **kwargs):
"""checks authorization and calls function if authorized"""
user = context.request.user
if user.is_active:
if user.is_administrator:
return func(*args, **kwargs)
for role in roles:
if role in user.groups:
return func(*args, **kwargs)
raise zoom.exceptions.UnauthorizedException('Unauthorized')
return authorize_and_call
return wrapper | f3fd8eb42924f8f956d0e3eae1499f64387fe96e | 3,653,289 |
import time
import math
def trunked_greedy_by_size_offset_calculation(usage_recorders,
show_detail=False):
"""
An offset calculation algorithm designed for variable-length inputs.
@ params:
usage_recorders : tensor usage recoders (name, start_op, end_op, size)
global trunk_size_list : a list of list (name, offset)
@returns:
assigned_offset : a dict indicates the offset for each tensor
assigned_trunk : a dict indicates the trunk for each tensor
"""
global gTrunkList
# descend
usage_recorders = sorted(usage_recorders,
key=lambda tup: tup[3],
reverse=False)
assigned_offset = {}
assigned_trunk = {}
new_allocate_size = 0
time_start = time.time()
for i in range(len(gTrunkList._trunks)):
gTrunkList._trunks[i]._tensor_list = []
for t in usage_recorders:
t_name = t[0]
t_size = t[3]
is_assigned = False
for trunk_id, trunk in enumerate(gTrunkList._trunks):
trunk, offset = try_fit_trunk(t, trunk)
if offset is not None:
assigned_trunk[t_name] = trunk_id
assigned_offset[t_name] = offset
# update gTrunkList
gTrunkList._trunks[trunk_id] = trunk
is_assigned = True
break
# init new trunk, trunk id should be assigned after delete useless trunk
if is_assigned is False:
trunk_size = max(DEFAULT_TRUNK_SIZE,
math.ceil((t_size * K_SCALE + 31) // 32 * 32))
new_allocate_size += trunk_size
trunk = Trunk(trunk_size)
trunk._tensor_list.append((*t, 0)) #offset @ 0
gTrunkList.appendTrunk(trunk)
# TODO
trunk_id = len(gTrunkList._trunks) - 1
assigned_trunk[t_name] = trunk_id
assigned_offset[t_name] = 0
time_end = time.time()
core_cost = time_end - time_start
used_consumption = 0
total_consumption = 0
delete_trunk_list = []
# find trunk not used -> delete_trunk_list
for trunk_id, trunk in enumerate(gTrunkList._trunks):
max_end_offset = 0
for elem in trunk._tensor_list:
max_end_offset = max(elem[4] + elem[3],
max_end_offset) # offset + size
# print("trunk id", trunk_id, " usage ",
# max_end_offset / gTrunkList._trunks[trunk_id]._size)
used_consumption += max_end_offset
if max_end_offset == 0:
delete_trunk_list.insert(0, trunk_id)
else:
total_consumption += gTrunkList._trunks[trunk_id]._size
# delete
for id in delete_trunk_list:
gTrunkList._trunks.pop(id)
# adjust trunk ids
for trunk_id, trunk in enumerate(gTrunkList._trunks):
for tensor in trunk._tensor_list:
tensor_name = tensor[0]
assigned_trunk[tensor_name] = trunk_id
if show_detail:
print("=====allocation plan====")
print("trunk_id \t size")
for i, t in enumerate(gTrunkList._trunks):
print(i, t._size)
print("tensor name \t offset")
for t in assigned_offset.items():
t_name = t[0]
print("{", t_name, assigned_trunk[t_name], assigned_offset[t_name],
"},")
# print("{\"" + t_name + "\",", assigned_offset[t_name], "},")
print("=====allocation plan====")
used_consumption = used_consumption / 1024 / 1024
total_consumption = total_consumption / 1024 / 1024
new_allocate_size = new_allocate_size / 1024 / 1024
if show_detail:
print(
f"> debug total_consumption {total_consumption} MB used_consumption {used_consumption} MB percent {used_consumption/total_consumption}"
)
return assigned_offset, assigned_trunk, gTrunkList.getInfo(), (
total_consumption, new_allocate_size) | 87c782301a4534aaffacc6aaf47a1f9b88b4ee39 | 3,653,290 |
import base64
import itertools
import six
def decrypt(secret, ciphertext):
"""Given the first 16 bytes of splunk.secret, decrypt a Splunk password"""
plaintext = None
if ciphertext.startswith("$1$"):
ciphertext = base64.b64decode(ciphertext[3:])
key = secret[:16]
algorithm = algorithms.ARC4(key)
cipher = Cipher(algorithm, mode=None, backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext)
chars = []
for char1, char2 in zip(plaintext[:-1], itertools.cycle("DEFAULTSA")):
chars.append(six.byte2int([char1]) ^ ord(char2))
plaintext = "".join([six.unichr(c) for c in chars])
elif ciphertext.startswith("$7$"):
ciphertext = base64.b64decode(ciphertext[3:])
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=b"disk-encryption",
iterations=1,
backend=default_backend()
)
key = kdf.derive(secret)
iv = ciphertext[:16] # pylint: disable=invalid-name
tag = ciphertext[-16:]
ciphertext = ciphertext[16:-16]
algorithm = algorithms.AES(key)
cipher = Cipher(algorithm, mode=modes.GCM(iv, tag), backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext).decode()
return plaintext | d4b0caca50d649633bb973d26bb174875d23b0e0 | 3,653,291 |
from unet_core.vessel_analysis import VesselTree
def load_skeleton(path):
"""
Load the skeleton from a pickle
"""
# Delayed import so script can be run with both Python 2 and 3
v = VesselTree()
v.load_skeleton(path)
return v.skeleton | d9632de40310dd738eb7d07966d6eb04360c3b81 | 3,653,292 |
def industries_hierarchy() -> pd.DataFrame:
"""Read the Dow Jones Industry hierarchy CSV file.
Reads the Dow Jones Industry hierarchy CSV file and returns
its content as a Pandas DataFrame. The root node has
the fcode `indroot` and an empty parent.
Returns
-------
DataFrame : A Pandas DataFrame with the columns:
* ind_fcode : string
Industry Factiva Code
* name : string
Name of the Industry
* parent : string
Factiva Code of the parent Industry
"""
ret_ind = pd.read_csv(ind_hrchy_path)
ret_ind = ret_ind.replace(np.nan, '', regex=True)
return ret_ind | 9d1de27e3e01572637e7afc729e0cd07d96b14e2 | 3,653,293 |
def ready():
""" A readiness endpoint, checks on DB health too.
:return: a 200 OK status if this server is up, and the backing DB is ready too; otherwise, a
503 "Temporarily unavailable."
"""
try:
# TODO: Move to a DAO class
client = get_mongo_client()
info = {}
for key in MONGO_HEALTH_KEYS:
info[key] = client.server_info().get(key, "null")
if info.get("ok") == 1:
info["status"] = "UP"
else:
info["status"] = "WARN"
except pymongo.errors.ServerSelectionTimeoutError as ex:
info = {"status": "DOWN", "error": str(ex)}
response = make_response(jsonify({'status': 'UP', "mongo": info}))
if info['status'] != "UP":
response.status_code = 503
return response | 526dda4fc4bffecde573b1b26cf5b33dcbd66d09 | 3,653,294 |
import typing
def setup_callback(callback: typing.Awaitable):
"""
This function is used to setup the callback.
"""
callback.is_guild = False
""" The guild of the callback. """
callback.has_permissions = []
""" The permissions of the callback. """
callback.has_roles = []
""" The roles of the callback. """
callback.checks = []
""" The checks of the callback. """
callback.check_any = False
""" The check_any of the callback. """
return callback | 4cb7849d9746166c95c96d18e27227b52160ff7a | 3,653,295 |
def prepare_for_training(ds, ds_name, conf, cache):
"""
Cache -> shuffle -> repeat -> augment -> batch -> prefetch
"""
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Resample dataset. NB: dataset is cached in resamler
if conf["resample"] and 'train' in ds_name:
ds = oversample(ds, ds_name, conf)
# Cache to SSD
elif cache:
cache_string = "{}/{}_{}_{}".format(
conf["cache_dir"], conf["img_shape"][0], conf["ds_info"], ds_name
)
ds = ds.cache(cache_string)
# Shuffle
if conf["shuffle_buffer_size"]>1:
ds = ds.shuffle(
buffer_size=conf["shuffle_buffer_size"],
seed=tf.constant(conf["seed"], tf.int64) if conf["seed"] else None
)
# Repeat forever
ds = ds.repeat()
#Augment
if conf["augment"] and "train" in ds_name:
ds = augment_ds(ds, conf, AUTOTUNE)
# Batch
ds = ds.batch(conf["batch_size"], drop_remainder=False)
# Prefetch - lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds | b072a7ced028b1627288a3f2bbf0233c85afbab8 | 3,653,296 |
def residual3d(inp, is_training, relu_after=True, add_bn=True,
name=None, reuse=None):
""" 3d equivalent to 2d residual layer
Args:
inp (tensor[batch_size, d, h, w, channels]):
is_training (tensor[bool]):
relu_after (bool):
add_bn (bool): add bn before every relu
name (string):
reuse (bool):
"""
if name == None:
name = "residual3d"
out_dim = (int)(inp.shape[-1])
with tf.variable_scope(name, reuse=reuse):
out1 = tf.layers.conv3d(
inp, filters=out_dim, kernel_size=[3, 3, 3],
strides=[1, 1, 1], padding="same", activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer(), name="layer1",
reuse=reuse)
if add_bn:
out1 = batch_norm(
inp=out1,
is_training=is_training,
name="norm1",
reuse=reuse)
out1 = tf.nn.relu(out1)
out2 = tf.layers.conv3d(
out1, filters=out_dim, kernel_size=[3, 3, 3],
strides=[1, 1, 1], padding="same", activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer(), name="layer2",
reuse=reuse)
if relu_after and add_bn:
out2 = batch_norm(
inp=out2,
is_training=is_training,
name="norm2",
reuse=reuse)
if relu_after:
return tf.nn.relu(inp + out2)
else:
return inp + out2 | 28fb8651c9a9755e8d0636c702b2bd77e7186fd1 | 3,653,297 |
def marks(family, glyph):
"""
:param family:
:param glyph:
:return: True when glyph has at least one anchor
"""
has_mark_anchor = False
for anchor in glyph.anchors:
if anchor.name:
if anchor.name.startswith("_"):
has_mark_anchor = True
break
return has_mark_anchor | 101555dcadfd78b0550606f843e32dce99de62b8 | 3,653,298 |
import re
def generate_bom(pcb_modules, config, extra_data):
# type: (list, Config, dict) -> dict
"""
Generate BOM from pcb layout.
:param pcb_modules: list of modules on the pcb
:param config: Config object
:param extra_data: Extra fields data
:return: dict of BOM tables (qty, value, footprint, refs) and dnp components
"""
def convert(text):
return int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
return [convert(c)
for c in re.split('([0-9]+)', key)]
def natural_sort(l):
"""
Natural sort for strings containing numbers
"""
return sorted(l, key=lambda r: (alphanum_key(r[0]), r[1]))
# build grouped part list
warning_shown = False
skipped_components = []
part_groups = {}
for i, m in enumerate(pcb_modules):
if skip_component(m, config, extra_data):
skipped_components.append(i)
continue
# group part refs by value and footprint
norm_value = units.componentValue(m.val)
extras = []
if config.extra_fields:
if m.ref in extra_data:
extras = [extra_data[m.ref].get(f, '')
for f in config.extra_fields]
else:
# Some components are on pcb but not in schematic data.
# Show a warning about possibly outdated netlist/xml file.
log.warn(
'Component %s is missing from schematic data.' % m.ref)
warning_shown = True
extras = [''] * len(config.extra_fields)
group_key = (norm_value, tuple(extras), m.footprint, m.attr)
valrefs = part_groups.setdefault(group_key, [m.val, []])
valrefs[1].append((m.ref, i))
if warning_shown:
log.warn('Netlist/xml file is likely out of date.')
# build bom table, sort refs
bom_table = []
for (norm_value, extras, footprint, attr), valrefs in part_groups.items():
bom_row = (
len(valrefs[1]), valrefs[0], footprint,
natural_sort(valrefs[1]), extras)
bom_table.append(bom_row)
# sort table by reference prefix, footprint and quantity
def sort_func(row):
qty, _, fp, rf, e = row
prefix = re.findall('^[A-Z]*', rf[0][0])[0]
if prefix in config.component_sort_order:
ref_ord = config.component_sort_order.index(prefix)
else:
ref_ord = config.component_sort_order.index('~')
return ref_ord, e, fp, -qty, alphanum_key(rf[0][0])
if '~' not in config.component_sort_order:
config.component_sort_order.append('~')
bom_table = sorted(bom_table, key=sort_func)
result = {
'both': bom_table,
'skipped': skipped_components
}
for layer in ['F', 'B']:
filtered_table = []
for row in bom_table:
filtered_refs = [ref for ref in row[3]
if pcb_modules[ref[1]].layer == layer]
if filtered_refs:
filtered_table.append((len(filtered_refs), row[1],
row[2], filtered_refs, row[4]))
result[layer] = sorted(filtered_table, key=sort_func)
return result | 7645929bfcfd3d7447a32ae2ea3074d6da86c368 | 3,653,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.