content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def recipe_clone_message(recipe):
"""
Renders the recipe clone message.
"""
return dict(recipe=recipe) | 09728b431966b12415861a212f2cb85af475dc37 | 3,653,500 |
def read_expression_file(file):
"""Reads a file with the expression profiles."""
D = []
genes = []
with open(file) as fp:
firstline = fp.readline()
classes = [c.strip() for c in firstline.split("\t")[1:]]
for line in fp.readlines():
items = [w.strip() for w in line.split("\t")]
genes.append(items[0])
D.append([int(x) for x in items[1:]])
class_a = classes[0]
C = [int(c == class_a) for c in classes]
D = np.array(D)
return genes, D, C | aa3465855eb75a731801660e8f7b22091aae0a36 | 3,653,501 |
def train(X, Y, n_h, num_iterations=10000, print_cost=False):
"""
定义神经网络模型,把之前的操作合并到一起
Args:
X: 输入值
Y: 真实值
n_h: 隐藏层大小/节点数
num_iterations: 训练次数
print_cost: 设置为True,则每1000次训练打印一次成本函数值
Return:
parameters: 模型训练所得参数,用于预测
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# 根据n_x, n_h, n_y初始化参数,并取出W1,b1,W2,b2
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
for i in range(0, num_iterations):
# 前向传播, 输入: "X, parameters". 输出: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# 成本计算. 输入: "A2, Y, parameters". 输出: "cost".
cost = calculate_cost(A2, Y, parameters)
# 后向传播, 输入: "parameters, cache, X, Y". 输出: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# 参数更新. 输入: "parameters, grads". 输出: "parameters".
parameters = update_parameters(parameters, grads)
# 每1000次训练打印一次成本函数值
if print_cost and i % 1000 == 0:
print "Cost after iteration %i: %f" % (i, cost)
return parameters | 57efdadd744b9801227da87aed8ca458e2990c5c | 3,653,502 |
def get_drawdowns(cum_returns):
"""
Computes the drawdowns of the cumulative returns.
Parameters
----------
cum_returns : Series or DataFrame, required
a Series or DataFrame of cumulative returns
Returns
-------
Series or DataFrame
"""
cum_returns = cum_returns[cum_returns.notnull()]
highwater_marks = cum_returns.expanding().max()
drawdowns = cum_returns/highwater_marks - 1
return drawdowns | 1f4da9e405b8b4f8a691b09e42e479cd6fdec3ae | 3,653,503 |
def calc_recipe_quantity_ratio(
first_month: str,
first_recipe: str,
second_recipe: str,
file_name: str,
second_month: str = None) -> float:
"""
A function which calculates the ratio of quantity between two months.
:param first_month: str
:param first_recipe: str
:param second_recipe: str
:param file_name: str
:param second_month: str
:return: ratio: float
"""
if first_month not in VALID_MONTH:
raise ValueError("Date must be one of %s." % VALID_MONTH)
elif first_recipe not in VALID_RECIPE or second_recipe not in VALID_RECIPE:
raise ValueError("Recipe must be on of %s." % VALID_RECIPE)
else:
if second_month is None:
second_month: str = first_month
first_quantity: int = calc_month_quantity_by_recipe(first_month, first_recipe, file_name)
second_quantity: int = calc_month_quantity_by_recipe(second_month, second_recipe, file_name)
ratio = round(first_quantity / second_quantity, 2)
return ratio | 284fd4c010c933523967a11f774fc7e220198e7f | 3,653,504 |
def teacher_add_to_db():
"""Adds a teacher to database
Returns:
Redirect: Redirects to teachers list route
"""
if request.method == "POST":
fet_name = request.form["fet_name"]
fullname = request.form["fullname"]
teacher_email = request.form["t_email"]
try:
teacher_obj = Teacher(teacher_email=teacher_email,
fet_name=fet_name,
fullname=fullname)
db.session.add(teacher_obj)
db.session.commit()
flash(("Teacher {} added successfully.".format(fet_name)),
category="success")
return redirect(url_for('teacher_list')), 302
except Exception as e:
flash("Exception: {}".format(str(e)), category="danger")
return redirect(url_for("teacher_list")), 302 | 224f95f00e88dce00d21406883dd5655ed9e8fbd | 3,653,505 |
def authorize(app_id, channel_id, team_id):
"""Just double check if this app is invoked from the expected app/channel/team"""
if app_id != SLACK_APP_ID:
return f"app ID {app_id}"
if team_id not in SLACK_TEAM_IDS:
return f"team ID {team_id}"
if channel_id not in SLACK_CHANNEL_IDS:
return f"channel ID {channel_id}" | ff1f43a2073e7a0a54bda709b41fce02475c48ec | 3,653,506 |
import random
def deal_one_card():
""" returns a random card from the deck """
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
return random.choice(cards) | e8836c6569ed5c9e48043c9b750e730c42781a14 | 3,653,507 |
def grey_pal(start=0.2, end=0.8):
"""
Utility for creating continuous grey scale palette
Parameters
----------
start : float
grey value at low end of palette
end : float
grey value at high end of palette
Returns
-------
out : function
Continuous color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
equally spaced colors.
Examples
--------
>>> palette = grey_pal()
>>> palette(5)
['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']
"""
gamma = 2.2
ends = ((0.0, start, start), (1.0, end, end))
cdict = {'red': ends, 'green': ends, 'blue': ends}
grey_cmap = mcolors.LinearSegmentedColormap('grey', cdict)
def continuous_grey_palette(n):
# The grey scale points are linearly separated in
# gamma encoded space
x = np.linspace(start**gamma, end**gamma, n)
# Map points onto the [0, 1] palette domain
vals = (x ** (1./gamma) - start) / (end - start)
return ratios_to_colors(vals, grey_cmap)
return continuous_grey_palette | f38295a48120a3e17b000276797bfec78a644749 | 3,653,508 |
import re
import os
def prepare(path, data_id):
"""Process each dataset based on individual characteristics
Args:
path to pull data from
"""
# assert type(train) == bool, 'Wrong train/test selection input'
if train:
suffix = "_train"
else:
suffix = "_test"
if dataset == "synapse":
feature_id = re.findall(r"[\w']+", feature_dir)[-3][-4:]
label_id = re.findall(r"[\w']+", label_dir)[-3][-4:]
assert feature_id == label_id, "Feature and label mis-match: {0}".format(
feature_id
)
feature_array = load_nifty_data(feature_dir)
feature_array = rotate_image(feature_array, 1)
feature_final = np.moveaxis(feature_array, -1, 0)
label_array = load_nifty_data(label_dir)
label_array = rotate_image(label_array, 1)
label_final = np.moveaxis(label_array, -1, 0)
else:
feature_id = re.findall(r"[\w']+", feature_dir)[-1][-4:]
label_id = re.findall(r"[\w']+", label_dir)[-3][-4:]
assert feature_id == label_id, "Feature and label mis-match: {0}".format(
feature_id
)
feature_array = load_dicom_data(feature_dir)
feature_array = rotate_image(feature_array, 2, (1, 2))
feature_final = rotate_image(feature_array, 2, (0, 2))
# feature_final = np.flip(feature_array, axis=0)
# feature_final = feature_array
label_array = load_nifty_data(label_dir)
label_array = np.transpose(label_array, (2, 0, 1))
# label_array = rotate_image(label_array, 2)
# label_final = np.moveaxis(label_array, -1, 0)
label_tcia = rotate_image(label_array, 1, (1, 2))
label_final = np.flip(label_tcia, axis=0)
# label_final = rotate_image(label_array, 2, (0, 2))
# save_dir = os.path.join(parent_save_folder, dataset + suffix, feature_id)
print("parent save folder:", parent_save_folder)
save_dir = os.path.join(parent_save_folder, feature_id)
# print('saving dir: {0}'.format(save_dir))
return save_dir, feature_final, label_final | cf4464b87eb1770b899debb6f2bb2aa96b0f853c | 3,653,509 |
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag']) | ab9b6eec8aaa2bddeccdbbb923338dc6b32df649 | 3,653,510 |
from typing import Optional
from typing import Union
from typing import Any
from typing import Dict
def get_parameter_value_and_validate_return_type(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
"""
if isinstance(parameter_reference, dict):
parameter_reference = safe_deep_copy(data=parameter_reference)
parameter_reference = get_parameter_value(
domain=domain,
parameter_reference=parameter_reference,
variables=variables,
parameters=parameters,
)
if expected_return_type is not None:
if not isinstance(parameter_reference, expected_return_type):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \
(value of type "{str(type(parameter_reference))}" was encountered).
"""
)
return parameter_reference | be1ab4c90942d69083b765194baa494658265275 | 3,653,511 |
def add_leaf_to_edge(t):
"""
Returns a `Shape` instance with a new root; both a new leaf and the input `Shape` pend from it.
:param t: `Shape` instance.
:return: `Shape` instance.
"""
return Shape([Shape.LEAF, t]) | 18d4a383dcc2873e506677f76501c76e36b99ac7 | 3,653,512 |
def create_simulation(parameter_values=None, experiment=None, make_inputs=False):
"""
Create a PyBaMM simulation set up for interation with liionpack
Parameters
----------
parameter_values : :class:`pybamm.ParameterValues`
The default is None.
experiment : :class:`pybamm.Experiment`
The default is None.
make_inputs : bool, optional
Changes "Current function [A]" and "Total heat transfer coefficient
[W.m-2.K-1]" to be inputs that are controlled by liionpack.
The default is False.
Returns
-------
sim : :class:`pybamm.Simulation`
A simulation that can be solved individually or passed into the
liionpack solve method
"""
# Create the pybamm model
model = pybamm.lithium_ion.SPMe(
options={
"thermal": "lumped",
}
)
# Set up parameter values
if parameter_values is None:
chemistry = pybamm.parameter_sets.Chen2020
parameter_values = pybamm.ParameterValues(chemistry=chemistry)
# Change the current function and heat transfer coefficient to be
# inputs controlled by the external circuit
if make_inputs:
parameter_values.update(
{
"Current function [A]": "[input]",
"Total heat transfer coefficient [W.m-2.K-1]": "[input]",
},
)
# Set up solver and simulation
solver = pybamm.CasadiSolver(mode="safe")
sim = pybamm.Simulation(
model=model,
experiment=experiment,
parameter_values=parameter_values,
solver=solver,
)
return sim | 0faf24958440e42b64ef8cc82b9ec478d4899dd2 | 3,653,513 |
def _logistic_loss_and_grad(w, X, y, alpha, mask, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
mask : array-like, shape (n_features), (n_classes, n_features) optional
Masking array for coef.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
if mask is not None:
w[:n_features] *= mask
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) / n_samples
out += .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = (safe_sparse_dot(X.T, z0) / n_samples) + alpha * w
if mask is not None:
grad[:n_features] *= mask
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum() / n_samples
return out, grad | 39a05f090807a6eb83000e6daf5f3719bbbe9aa1 | 3,653,514 |
import requests
def verify_status_code(request_response: requests.Response) -> tuple:
"""Verify the status code of the post request to the search url and raise exceptions if the code is unexpected
:type request_response: requests.Response
:return:
"""
if request_response.status_code == 200:
return STATUS_CODE_OK, ''
elif request_response.status_code == 429:
if 'user\'s rate limit' in request_response.text:
msg = "Search rate limit reached"
return STATUS_CODE_REPEAT, msg
if 'limit of 150 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for unregistered users reached')
elif 'limit of 300 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for basic users reached')
else:
raise DailyLimitReachedException('Daily search limit reached')
elif request_response.status_code == 403:
raise InvalidOrWrongApiKeyException("Invalid or wrong API key")
elif request_response.status_code == 413:
msg = "Payload too large, skipping file"
return STATUS_CODE_SKIP, msg
else:
msg = "Unknown status code: {0:d}".format(request_response.status_code)
return STATUS_CODE_REPEAT, msg | b5f686dfe11d7fd4bd9d5f32ccd51df9f2322a13 | 3,653,515 |
def instability_product_graphs(gra):
""" Determine if the species has look for functional group attachments that
could cause molecule instabilities
"""
# Build graphs for the detection scheme
rad_grp_dct = radical_group_dct(gra)
# Check for instability causing functional groups
prd_gras = ()
for atm, grps in rad_grp_dct.items():
if atm in instab_fgrps.DCT:
fgrps, prds = instab_fgrps.DCT[atm]
for grp in grps:
grp_ich = automol.graph.inchi(grp)
if grp_ich in fgrps:
# If instability found, determine prod of the instability
prd_ich = prds[fgrps.index(grp_ich)]
prd_geo = automol.inchi.geometry(prd_ich)
prd_gra = automol.geom.graph(prd_geo)
prd_gras = radical_dissociation_prods(
gra, prd_gra)
break
return prd_gras | 42aba79627bd6343bd2e99bca9a099d1047a9f5c | 3,653,516 |
def build_pdb_rmsd_matrix(pdb_paths, pdb_diff_path=None):
"""
Returns rmsd difference matrix for multiple pdb files.
Returns rmsd_list (3-item list), pdb_comp_amount (int).
Optional with pdb_diff_path return pdb_diff_comp(int).
"""
# make 3 column list or ndarray for x, y = (pdb1-n * pdb1-n) and z = rmsd diff
rmsd_list = [[], [], []]
# get rmsd difference between each pdb file in nested loop and append
for pdb0 in pdb_paths:
# compare 2 different sets of pdb files
if pdb_diff_path != None:
for pdb1 in pdb_diff_path:
# append to x (col 0) pdb in outer loop
rmsd_list[0].append(pdb_paths.index(pdb0) + 1)
# append to y (col 1) pdb in inner loop
rmsd_list[1].append(pdb_diff_path.index(pdb1) + 1)
# find and append to z (col 2) rmsd value between pdb0 and pdb1
rmsd = rmsd_diff_calc(pdb0, pdb1)
#print(f"\n For PDB-A = {pdb0} and PDB-B = {pdb1} : RMSD = {rmsd}")
rmsd_list[2].append(rmsd)
elif pdb_diff_path == None:
for pdb1 in pdb_paths:
# append to x (col 0) pdb in outer loop
rmsd_list[0].append(pdb_paths.index(pdb0) + 1)
# append to y (col 1) pdb in inner loop
rmsd_list[1].append(pdb_paths.index(pdb1) + 1)
# find and append to z (col 2) rmsd value between pdb0 and pdb1
rmsd = rmsd_diff_calc(pdb0, pdb1)
rmsd_list[2].append(rmsd)
# amount of pdb files to compare to each other
pdb_comp_amount = len(pdb_paths)
if pdb_diff_path == None:
return rmsd_list, pdb_comp_amount
elif pdb_diff_path !=None:
pdb_diff_comp = len(pdb_diff_path)
return rmsd_list, pdb_comp_amount, pdb_diff_comp | fe00efa6a853d0e77ae52c21267376eca25a110c | 3,653,517 |
import logging
import sys
def handle_options():
"""
Define default options for a complete and automatic process
then check the command line arguments for parts of the process to skip
Returns:
auto: whether or not we accept user inputs on job and location
scrap: whether or not we do the scraping
working_data: whether or not we get working data from csv and mongoDB
cleaner: whether or not we do the cleaning of data
model: whether or not we do the model part
update: whether or not we update the DB with our findings
report: whether or not we do the reporting to the CEO
"""
auto = False
scrap = True
working_data = True
cleaner = True
pre_process = True
model = True
update = True
report = True
log = logging.getLogger('main')
log.debug(sys.argv)
if sys.argv:
# there are command line arguments, we must handle them
for arg in sys.argv:
if arg == '-auto':
auto = True
elif arg == '-noScrap':
scrap = False
elif arg == '-noReport':
report = False
return auto, scrap, working_data, cleaner, pre_process, model, update, report | c1e68d57dc7a664691dc121654cb8ad1b701e36d | 3,653,518 |
from typing import Optional
def expected_response(y: np.ndarray, w: np.ndarray, policy: np.ndarray,
mu: Optional[np.ndarray]=None, ps: Optional[np.ndarray]=None) -> float:
"""Estimate expected response.
Parameters
----------
y: array-like of shape = (n_samples)
Observed target values.
w: array-like of shape = shape = (n_samples)
Treatment assignment variables.
policy: array-like of shape = (n_samples)
Estimated treatment policy.
mu: array-like of shape = (n_samples, n_trts), optional
Estimated potential outcomes.
ps: array-like of shape = (n_samples, n_trts), optional
Estimated propensity scores.
Returns
-------
expected_response: float
Estimated expected_response.
"""
mu = np.zeros((w.shape[0], np.unique(w).shape[0])) if mu is None else mu
ps = pd.get_dummies(w).mean(axis=0).values if ps is None else ps
indicator = np.array(w == policy, dtype=int)
expected_response = np.mean(mu[np.arange(w.shape[0]), policy]
+ (y - mu[np.arange(w.shape[0]), policy]) * indicator / ps[w])
return expected_response | ffa6938914480f236d8b0aff7ebc993a2e714682 | 3,653,519 |
def get_type_for_field(field: Field) -> type:
"""
For optional fields, the field type_ is a :class:`typing.Union`, of
``NoneType`` and the actual type.
Here we extract the "actual" type from a Union with None
"""
if not field.sub_fields:
return field.type_
for f in field.sub_fields:
if f.type_ != type(None): # noqa
return f.type_
raise Exception(f"No type found for field: {field}") | 2d0163b6e92bcd67c2aaee60d7088cc70e9bd09b | 3,653,520 |
from typing import List
def read_program_data(program: List[str]) -> int:
"""Read program data from port computer system.
Args:
program (List[str]): the program code containing masks and memory
Returns:
int: sum of all values in memory
"""
memory = defaultdict(int)
for line in program:
if line.startswith('mask'):
_, mask = line.split(' = ')
ones = remove_leading_zeroes(
[1 if c == '1' else 0 for c in mask]
)
floating = remove_leading_zeroes(
[1 if c == 'X' else 0 for c in mask]
)
mask_len = 36 # This is hard-coded currently and may change
# if this problem is used in a new context.
else:
address, value = [int(n) for n in MEM_RE.match(line).groups()]
address = [int(a) for a in bin(address)[2:]]
if len(address) < mask_len:
address = add_leading_zeroes(address, mask_len - len(address))
try:
if 1 in ones:
address = mask_values(ones, address, 1)
except TypeError:
pass
if 1 in floating:
addresses = mask_floating(floating, address)
for address in addresses:
address = int(''.join([str(a) for a in address]), base=2)
memory[address] = value
return sum(memory.values()) | 7852d13f1de9cc04ee4170a10807b49ed2592905 | 3,653,521 |
def get_mgr_worker_msg(comm, status=None):
"""Get message to worker from manager.
"""
status = status or MPI.Status()
comm.probe(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag in [STOP_TAG, PERSIS_STOP]:
return tag, None, None
Work = comm.recv(buf=None, source=0, tag=MPI.ANY_TAG, status=status)
calc_in = comm.recv(buf=None, source=0)
return tag, Work, calc_in | 76bf2be4707052ab5ea0f447822208eccb19f9ef | 3,653,522 |
import time
def retry(exceptions, tries=4, delay=3, backoff=2, logger=None):
"""
Retry calling the decorated function using an exponential backoff.
Args:
exceptions: The exception to check. may be a tuple of
exceptions to check.
tries: Number of times to try (not retry) before giving up.
delay: Initial delay between retries in seconds.
backoff: Backoff multiplier (e.g. value of 2 will double the delay
each retry).
logger: Logger to use. If None, print.
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except exceptions as e:
msg = '{}, Retrying in {} seconds...'.format(e, mdelay)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry | 8c0917ad45b2c000ced926f0457b9b9aebbc4543 | 3,653,523 |
def load_dictionary(dicttimestamp, server='postgres-cns-myaura'):
""" Load dictionary from database
Args:
dicttimestamp (string): the version of dictionary (ex: 20210131)
server (string): the server name in db_config.ini
Returns:
tuple (termdictparser, pandas.DataFrame): A TermDictParser and a pandas dataframe containing the dictionary.
"""
print('--- Loading {server:s} dictionary ({dicttimestamp:s}) ---'.format(server=server, dicttimestamp=dicttimestamp))
#
if 'postgres' in server:
engine = db.connectToPostgreSQL(server=server)
tablename = 'dictionaries.dict_%s' % (dicttimestamp)
sql = """
SELECT
d.id,
COALESCE(d.id_parent,d.id) AS id_parent,
d.dictionary,
d.token,
COALESCE(p.token, d.token) as parent,
d.type,
d.source,
d.id_original,
COALESCE(p.id_original, d.id_original) as id_original_parent
FROM %s d
LEFT JOIN %s p ON d.id_parent = p.id
WHERE d.enabled > 0""" % (tablename, tablename)
elif 'mysql' in server:
engine = db.connectToMySQL(server=server)
tablename = 'dict_%s' % (dicttimestamp)
sql = """
SELECT
d.id,
IFNULL(d.id_parent,d.id) AS id_parent,
d.dictionary,
d.token,
IFNULL(p.token, d.token) as parent,
d.type,
d.source,
d.id_original,
IFNULL(p.id_original, d.id_original) as id_original_parent
FROM %s d
LEFT JOIN %s p ON d.id_parent = p.id
WHERE d.enabled = True""" % (tablename, tablename)
else:
raise TypeError("Invalid server name. The name of the server must contain either a 'mysql' or 'postgress' string.")
df = pd.read_sql(sql, engine, index_col='id')
return df | 63df416815386c1bf4d6a820c98490ae5a6e4d08 | 3,653,524 |
def part1(data):
"""
>>> part1(((20, 30), (-10, -5)))
45
>>> part1(INPUT)
13203
"""
target_x, target_y = data
best = None
for dx in range(1, max(target_x) + 1):
for dy in range(0, - min(target_y) + 1):
hit_target, height = trajectory(target_x, target_y, dx, dy)
if hit_target:
if best is None:
best = height
else:
best = max(best, height)
return best | 293dd006caa20471cc849c1366f0610594279b8b | 3,653,525 |
def get_neighbors(p, exclude_p=True, shape=None, nNeighbors=1,
get_indices=False, direction=None, get_mask=False):
"""Determine pixel coordinates of neighboring pixels.
Includes also all pixels that neighbor diagonally.
Parameters
----------
p : tuple
Gives the coordinates (y, x) of the central pixel
exclude_p : boolean
Whether or not to exclude the pixel with position p from the resulting list.
shape : tuple
Describes the dimensions of the total array (NAXIS2, NAXIS1).
Returns
-------
neighbors: numpy.ndarray
Contains all pixel coordinates of the neighboring pixels
[[y1, x1], [y2, x2], ...]
Adapted from:
https://stackoverflow.com/questions/34905274/how-to-find-the-neighbors-of-a-cell-in-an-ndarray
"""
ndim = len(p)
n = nNeighbors*2 + 1
# generate an (m, ndims) array containing all combinations of 0, 1, 2
offset_idx = np.indices((n,) * ndim).reshape(ndim, -1).T
# use these to index into np.array([-1, 0, 1]) to get offsets
lst = list(range(-(nNeighbors), nNeighbors + 1))
offsets = np.r_[lst].take(offset_idx)
if direction == 'horizontal':
indices = np.where(offsets[:, 0] == 0)
elif direction == 'vertical':
indices = np.where(offsets[:, 1] == 0)
elif direction == 'diagonal_ul':
indices = np.where(offsets[:, 0] == offsets[:, 1])
elif direction == 'diagonal_ur':
indices = np.where(offsets[:, 0] == -offsets[:, 1])
if direction is not None:
offsets = offsets[indices]
# optional: exclude offsets of 0, 0, ..., 0 (i.e. p itself)
if exclude_p:
offsets = offsets[np.any(offsets, 1)]
neighbours = p + offsets # apply offsets to p
# optional: exclude out-of-bounds indices
if shape is not None:
valid = np.all((neighbours < np.array(shape)) & (neighbours >= 0), axis=1)
neighbours = neighbours[valid]
if get_mask:
return valid
if get_indices:
indices_neighbours = np.array([])
for neighbour in neighbours:
indices_neighbours = np.append(
indices_neighbours, np.ravel_multi_index(neighbour, shape)).astype('int')
return indices_neighbours
return neighbours | 4dc68ed4f44667253c4bdb114a0d3034a65ef725 | 3,653,526 |
import torch
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed | 0bbbd2f0e0d588b58feebce19b3f2fd9c84934d8 | 3,653,527 |
def add_wrong_column(data_frame):
"""
Adds wrong column to dataframe
:params dataframe data_frame:
:returns dataframe:
"""
new_df = data_frame.copy()
new_df['Ducks'] = 0
return new_df | 0f3ae838c0975e8021cfeee258576afac75072c5 | 3,653,528 |
def p2l(X, Y, D, tol, inputTransform):
"""
Computes the Procrustean point-line registration between X and Y+nD with
anisotropic Scaling,
where X is a mxn matrix, m is typically 3
Y is a mxn matrix denoting line origin, same dimension as X
D is a mxn normalized matrix denoting line direction
R is a mxm rotation matrix,
A is a mxm diagonal scaling matrix, and
t is a mx1 translation vector
Q is a mxn fiducial on line that is closest to X after registration
fre is the fiducial localization error
based on the Majorization Principle
"""
[m,n] = X.shape
err = np.Infinity
E_old = 1000000 * np.ones((m,n))
e = np.ones((1,n))
# intialization
Q = Y
# normalize the line orientation just in case
Dir = D/np.linalg.norm(D, ord=2,axis=0,keepdims=True)
while err > tol:
[R, t, A] = AOPA_Major(X, Q, tol)
E = Q-np.matmul(R,np.matmul(A,X))-np.matmul(t,e)
# project point to line
Q = Y+Dir*np.tile(np.einsum('ij,ij->j',np.matmul(R,np.matmul(A,X))+np.matmul(t,e)-Y,Dir),(m,1))
err = np.linalg.norm(E-E_old)
E_old = E
E = Q - np.matmul(R, np.matmul(A,X)) - np.matmul(t,e)
# calculate fiducial registration error
fre = np.sum(np.linalg.norm(E,ord=2,axis=0,keepdims=True))/X.shape[1]
lps2ras = np.diag([-1, -1, 1, 1])
data = np.eye(4)
data[0:3, 3] = t.T
data[:3, :3] = np.dot(R, A)
data = np.dot(data, lps2ras)
transform_matrix = vtk.vtkMatrix4x4()
dimensions = len(data) - 1
for row in range(dimensions):
for col in range(dimensions + 1):
transform_matrix.SetElement(row, col, data[(row, col)])
inputTransform.SetMatrixTransformToParent(transform_matrix)
return [R,t,A,Q,fre, inputTransform] | 5b7b1143fd7dbbeae6767f8ef5f71464eb6220a0 | 3,653,529 |
def config_database(db_name):
"""
Create a database in sqlite3
:param db_name: The name of the file for the database
:return: A database objetc and his connections object
"""
db = Database()
connection = db.create_connection(db_name)
db.create_table(connection)
return db, connection | 42602f32a3cca0dfbbc791973acbf6279af7cde3 | 3,653,530 |
from pathlib import Path
def parse_main_argument(argument, export_folder):
"""Function parsing the main_argument argument.
Returns a dataframe containing the search terms (or the urls if main_argument is a youtube file."""
# File or string
if Path(argument).is_file():
is_file = True
argument_file_content = open(argument).read()
# File of urls or search terms
is_spotify = (
"spotify" in argument_file_content
and argument_file_content.startswith("http")
)
is_deezer = (
"deezer" in argument_file_content
and argument_file_content.startswith("http")
)
is_youtube = (
"youtu" in argument_file_content
and argument_file_content.startswith("http")
)
else:
is_file = False
is_spotify = "spotify" in argument
is_deezer = "deezer" in argument
# would be equivalent to argument youtube_url, doesn't exist
is_youtube = False
if is_spotify:
if is_file:
terms = extract_terms_from_file(argument)
df = get_spotify_songs(terms)
logger.info("Reading file containing spotify urls at %s.", argument)
else:
terms = extract_terms_from_arg(argument)
df = get_spotify_songs(terms)
logger.info("Reading spotify urls %s.", argument)
elif is_deezer:
if is_file:
terms = extract_terms_from_file(argument)
df = get_deezer_songs(terms)
logger.info("Reading file containing deezer urls at %s.", argument)
else:
terms = extract_terms_from_arg(argument)
df = get_deezer_songs(terms)
logger.info("Reading deezer urls %s.", argument)
elif is_youtube:
if is_file:
df = pd.read_csv(argument, sep="\t", header=None, names=["url"])
logger.info("Reading file containing youtube urls at %s.", argument)
else:
if is_file:
df = pd.read_csv(argument, sep="\t", header=None, names=["title"])
logger.info("Reading file containing search terms at %s.", argument)
else:
df = pd.DataFrame(
[x.strip() for x in argument.split(",")], columns=["title"]
)
logger.info("Reading search terms %s.", argument)
return df | f3cbd81e3fe98333fa2a4ad04f746c291dc9138a | 3,653,531 |
from datetime import datetime
def validate_auth_header(headers):
"""Validate and decode auth token in request headers.
This helper function is used in each of the below wrappers, and is responsible to
validate the format of the `Authorization` header where the Lowball token is
supposed to reside.
Requirements for successful validation:
1. The current app must have a working auth database
2. The `Authorization` header __must__ be present in the headers
3. That header value __must__ be of the format `Bearer <token>`. The header value
is split on the space character, and if the header value is properly formatted,
this should result in a data structure that looks like ["Bearer", "<token>"]. If
after splitting the header value on the space, the length of the resulting
structure is not __exactly__ two, then the header is considered improperly formatted.
4. The token must be able to be decoded by the `Authentication.decode_token` method
5. The token cannot be expired.
6. The token must match a token that is in the application authentication database __exactly__
:param headers: Headers from request made to Lowball application
:type headers: werkzeug.Headers
:return: decoded token data
:rtype: Token
"""
if current_app.auth_db is None:
raise NoAuthenticationDatabaseException
if "Authorization" not in headers:
raise NoAuthHeaderException
auth_header = headers["Authorization"].split(" ")
if len(auth_header) < 2 or auth_header[0] != "Bearer":
raise InvalidAuthHeaderException
token = auth_header[1]
decoded = current_app.authenticator.decode_token(token)
g.client_data = decoded
if datetime.datetime.utcnow() > decoded.expiration:
raise ExpiredTokenException
database_token = current_app.auth_db.lookup_token(decoded.token_id)
if database_token != decoded:
raise InvalidTokenException
return decoded | be75c33767a43f1482417277d6a41f887b26f388 | 3,653,532 |
def shared_random_seed():
"""All workers must call this function, otherwise it will deadblock.
"""
seed = np.random.randint(2 ** 31)
all_seeds = all_gather(seed)
return all_seeds[0] | bdf636ddc24defd13339c20fed0bb5896c35400e | 3,653,533 |
def _version(base):
"""Get a chronological version from git or PKG-INFO
Args:
base (dict): state
Returns:
str: Chronological version "yyyymmdd.hhmmss"
str: git sha if available
"""
v1 = _version_from_pkg_info(base)
v2, sha = _version_from_git(base)
if v1:
if v2:
return (v1, None) if float(v1) > float(v2) else (v2, sha)
return v1, None
if v2:
return v2, sha
raise ValueError('Must have a git repo or an source distribution') | 2d5b5e08fe44386347541634643e28e86cda5a44 | 3,653,534 |
def average_link_distance_segment(D,stop=-1,qmax=1,verbose=0):
"""
Average link clustering based on a pairwise distance matrix.
Parameters
----------
D: a (n,n) distance matrix between some items
stop=-1: stopping criterion, i.e. distance threshold at which
further merges are forbidden
By default, all merges are performed
qmax = 1; the number of desired clusters
(in the limit of stop)
verbose=0, verbosity level
Returns
-------
u: a labelling of the graph vertices according to the criterion
cost the cost of each merge step during the clustering procedure
Note
----
this method has not been optimized
"""
n = D.shape[0]
if D.shape[1]!=n:
raise ValueError, "non -square distance matrix"
if stop==-1: stop = np.infty
t = average_link_distance(D,verbose)
if verbose: t.plot()
u1 = np.zeros(n, np.int)
u2 = np.zeros(n, np.int)
if stop>=0:
u1 = t.partition(stop)
if qmax>0:
u2 = t.split(qmax)
if u1.max()<u2.max():
u = u2
else:
u = u1
cost = t.get_height()
cost = cost[t.isleaf()==False]
return u,cost | 1be3da149a5ceb99ab94980b94caec0c42edb096 | 3,653,535 |
def _process_get_set_Operand(column, reply):
"""Process reply for functions zGetOperand and zSetOperand"""
rs = reply.rstrip()
if column == 1:
# ensure that it is a string ... as it is supposed to return the operand
if isinstance(_regressLiteralType(rs), str):
return str(rs)
else:
return -1
elif column in (2,3): # if thre is a comment, it will be in column 2
#return int(float(rs))
return _regressLiteralType(rs)
else:
return float(rs) | b63de89b480ab263eb49c04dba47befb8bbe0997 | 3,653,536 |
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0, extra_arguments=(), extra_keywords=None):
"""Multi-dimensional Laplace filter using a provided second derivative
function.
Args:
input (cupy.ndarray): The input array.
derivative2 (callable): Function or other callable with the following
signature that is called once per axis::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an
``int`` from ``0`` to the number of dimensions, and ``mode``,
``cval``, ``extra_arguments``, ``extra_keywords`` are the values
given to this function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
extra_arguments (sequence, optional):
Sequence of extra positional arguments to pass to ``derivative2``.
extra_keywords (dict, optional):
dict of extra keyword arguments to pass ``derivative2``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.generic_laplace`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
if extra_keywords is None:
extra_keywords = {}
ndim = input.ndim
modes = _util._fix_sequence_arg(mode, ndim, 'mode',
_util._check_mode)
output = _util._get_output(output, input)
if ndim == 0:
output[...] = input
return output
derivative2(input, 0, output, modes[0], cval,
*extra_arguments, **extra_keywords)
if ndim > 1:
tmp = _util._get_output(output.dtype, input)
for i in range(1, ndim):
derivative2(input, i, tmp, modes[i], cval,
*extra_arguments, **extra_keywords)
output += tmp
return output | bc58a7ca79b551f4cba4c4c61855e027d666f2a0 | 3,653,537 |
import functools
def as_keras_metric(method):
""" from https://stackoverflow.com/questions/43076609/how-to-calculate-precision-and-recall-in-keras """
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
tf.keras.backend.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper | 17a6a6e39a25576215e0b426779df2ccac48e9b4 | 3,653,538 |
def do_get_video_capture_job(port_output_name: str = 'RAW') -> str:
"""
Function for configure the image retrieval job from video camera.
:param port_output_name: name you want to use for raw image in the application
:return: output image port name
"""
output_raw_port_name = transform_port_name_lvl(name=port_output_name, lvl=PYRAMID_LEVEL.LEVEL_0)
output_raw_port_size = transform_port_size_lvl(lvl=PYRAMID_LEVEL.LEVEL_0, rgb=True)
input_port_list = None
main_func_list = [output_raw_port_name]
output_port_list = [(output_raw_port_name, output_raw_port_size, 'B', True)]
job_name = job_name_create(action='Get image camera video frame')
d = create_dictionary_element(job_module='get_image',
job_name=job_name,
input_ports=input_port_list,
init_func_name='init_func', init_func_param=None,
main_func_name='main_func_video_camera',
main_func_param=main_func_list,
output_ports=output_port_list)
jobs_dict.append(d)
return port_output_name | b7965cde58e6b562d289cc58ac25edfd400c3b8a | 3,653,539 |
from typing import Optional
import os
import subprocess
def sync_one(src: str, dst: str, *, dry_run: bool) -> Optional[date]:
"""
From the snapshots that are present in src and missing in dst, pick the one
that is closest to an existing snapshot in dst, and sync it. Returns the
snapshot synced, or none if src and dst are already in sync.
"""
src_subvols = list_subvolumes(src)
dst_subvols = list_subvolumes(dst)
missing_subvols = src_subvols - dst_subvols
if len(missing_subvols) == 0:
return None
# We will sync the *latest* missing subvolume first. The rationale behind
# this is that data is mostly append-only, and that we prefer fragmenting
# early snapshots over later snapshots. There is no advantage in rebuilding
# a file that changed over time in the same order, it will only be
# fragmented in the later snapshots. Rather, we can sync the final (or at
# least latest) version, and rebuild the past versions backwards.
sync_date = max(missing_subvols)
num_days, base_date = hausdorff_distance(sync_date, dst_subvols)
base_dir = base_date.isoformat()
sync_dir = sync_date.isoformat()
print(f'Syncing {sync_dir}, using {base_dir} as base.')
# Create a writeable snapshot of the base subvolume.
cmd = [
'btrfs', 'subvolume', 'snapshot',
os.path.join(dst, base_dir),
os.path.join(dst, sync_dir),
]
run(cmd, dry_run=dry_run)
print('Waiting for sync of snapshot.')
# Previously I used "btrfs subvolume sync" instead of "filesystem sync",
# but that sync process reliably got stuck in an endless ioctl loop where
# it would call clock_nanosleep to sleep for a second and then a
# BTRFS_IOC_TREE_SEARCH ioctl, over and over again. A filesystem sync is
# less buggy.
cmd_sync = [
'btrfs', 'filesystem', 'sync',
os.path.join(dst, sync_dir),
]
run(cmd_sync, dry_run=dry_run)
cmd = [
'target/release/reflink-diff',
'dry-run' if dry_run else 'apply',
os.path.join(src, base_dir),
os.path.join(src, sync_dir),
os.path.join(dst, base_dir),
os.path.join(dst, sync_dir),
]
subprocess.run(cmd, check=True)
# Sync into it.
# Would be nice to use reflink support once that gets mainstream.
# https://bugzilla.samba.org/show_bug.cgi?id=10170
cmd = [
'rsync',
'-a',
'--delete-delay',
'--inplace',
'--preallocate',
'--no-whole-file',
'--fuzzy',
'--info=copy,del,name1,progress2,stats2',
os.path.join(src, sync_dir) + '/',
os.path.join(dst, sync_dir),
]
run(cmd, dry_run=dry_run)
# Once that is done, make the snapshot readonly.
cmd = [
'btrfs', 'property', 'set',
'-t', 'subvol',
os.path.join(dst, sync_dir),
'ro', 'true',
]
run(cmd, dry_run=dry_run)
run(cmd_sync, dry_run=dry_run)
return sync_date | 1b2b8b273bf65e7654c9623a51ec3dbeb62b8ee3 | 3,653,540 |
def vgg8_S(*args, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['YS']), final_filter=256, **kwargs)
return model | ec3824dbccbbca804ec5160c551bc7171a9b3866 | 3,653,541 |
from typing import IO
def createWords(word_str):
"""Cn Mandarin sentence to Cn Mandarin Words list"""
pre_func = IO.readList(r'docs/pre_punctuation.txt')[1:]
lat_func = IO.readList(r'docs/post_punctuation.txt')[1:]
en_letters = IO.readList(r'docs/special_English_letters.txt')[1:]
words = []
j = 0
tmp_word = ''
while j < len(word_str):
find_pre_func = 0
while j < len(word_str) and word_str[j] in pre_func:
tmp_word += word_str[j]
find_pre_func = 1
j += 1
if (u'\u9fa5' >= word_str[j] >= u'\u4e00') or word_str[j] in en_letters:
if find_pre_func:
tmp_word += word_str[j]
else:
tmp_word = word_str[j]
j = j + 1
while j < len(word_str) and word_str[j] in lat_func:
tmp_word += word_str[j]
j = j + 1
words.append(tmp_word)
tmp_word = ''
return words | 7d7aad411773550e6a884539a1160218499dea73 | 3,653,542 |
def get_calib_driver(calib_dir: str):
""" Create left/right charuco point detectors and load calibration images from directory. """
reference_image = cv2.imread("tests/data/2020_01_20_storz/pattern_4x4_19x26_5_4_with_inset_9x14.png")
minimum_points = 50
number_of_squares = [19, 26]
square_tag_sizes = [5, 4]
filter_markers = True
number_of_chessboard_squares = [9, 14]
chessboard_square_size = 3
chessboard_id_offset = 500
left_pd = \
charuco_pd.CharucoPlusChessboardPointDetector(
reference_image,
minimum_number_of_points=minimum_points,
number_of_charuco_squares=number_of_squares,
size_of_charuco_squares=square_tag_sizes,
charuco_filtering=filter_markers,
number_of_chessboard_squares=number_of_chessboard_squares,
chessboard_square_size=chessboard_square_size,
chessboard_id_offset=chessboard_id_offset
)
right_pd = \
charuco_pd.CharucoPlusChessboardPointDetector(
reference_image,
minimum_number_of_points=minimum_points,
number_of_charuco_squares=number_of_squares,
size_of_charuco_squares=square_tag_sizes,
charuco_filtering=filter_markers,
number_of_chessboard_squares=number_of_chessboard_squares,
chessboard_square_size=chessboard_square_size,
chessboard_id_offset=chessboard_id_offset
)
calibration_driver = sc.StereoVideoCalibrationDriver(left_pd,
right_pd,
minimum_points)
for i in range(3):
l_img, r_img, chessboard, scope = lcu.get_calib_data(calib_dir, i)
calibration_driver.grab_data(l_img, r_img, scope, chessboard)
return calibration_driver | 1c0b859327fefa983c68b2f70909f5d1ca8108cd | 3,653,543 |
def stop_loading() -> dict:
"""Force the page stop all navigations and pending resource fetches."""
return {"method": "Page.stopLoading", "params": {}} | fd46497cee6a87ca0b00cc6ceed487655361d896 | 3,653,544 |
def drop_duplicates(df):
"""Drop duplicate rows and reindex.
Args:
df (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Dataframe with the replaced value.
Examples:
>>> df = pd.DataFrame({'letters':['b','b','c'], 'numbers':[2,2,3]})
>>> drop_duplicates(df)
letters numbers
0 b 2
1 c 3
"""
return df.drop_duplicates().reset_index(drop=True) | 517d9faf09267df72def3fa7b90b0f59d819d660 | 3,653,545 |
def estimate_progress(ihash, peers):
"""Estimate a percentage done based on client stats"""
progress = count = 0
log.debug("peers: %s" % peers)
size = float(get_size(ihash))
if not size:
return "Unknown"
stats = get_clientstats(ihash)
# log.debug("%s" % stats)
for peer in peers:
progress += float(stats["%s:peer:%s:left" % (ihash, peer)])
try:
percentage = 100 - (( progress / float(len(peers)) ) / size * 100)
except ZeroDivisionError:
if transfer_complete_for_peers(ihash, peers) and count == 0 and len(peers) > 0:
percentage = 100.00
else:
percentage = 0.00
log.debug("progress: %s, perc: %s, count: %s, peers: %s" % ( progress, percentage, count, peers))
return "%0.2f%%" % percentage | 5f85b3157f6db12a468a9e94f7535b78a08e7788 | 3,653,546 |
def make_otf(
psf,
outpath=None,
dzpsf=0.1,
dxpsf=0.1,
wavelength=520,
na=1.25,
nimm=1.3,
otf_bgrd=None,
krmax=0,
fixorigin=10,
cleanup_otf=False,
max_otf_size=60000,
**kwargs
):
""" Generate a radially averaged OTF file from a PSF file
Args:
psf (str): Filepath of 3D PSF TIF
outpath (str): Destination filepath for the output OTF
(default: appends "_otf.tif" to filename)
dzpsf (float): Z-step size in microns (default: {0.1})
dxpsf (float): XY-Pixel size in microns (default: {0.1})
wavelength (int): Emission wavelength in nm (default: {520})
na (float): Numerical Aperture (default: {1.25})
nimm (float): Refractive indez of immersion medium (default: {1.3})
otf_bgrd (int, None): Background to subtract. "None" = autodetect. (default: {None})
krmax (int): pixels outside this limit will be zeroed (overwriting
estimated value from NA and NIMM) (default: {0})
fixorigin (int): for all kz, extrapolate using pixels kr=1 to this pixel
to get value for kr=0 (default: {10})
cleanup_otf (bool): clean-up outside OTF support (default: {False})
max_otf_size (int): make sure OTF is smaller than this many bytes. Deconvolution
may fail if the OTF is larger than 60KB (default: 60000)
Returns:
str: Path of output file
"""
if outpath is None:
outpath = psf.replace(".tif", "_otf.tif")
if otf_bgrd and isinstance(otf_bgrd, (int, float)):
bUserBackground = True
background = float(otf_bgrd)
else:
bUserBackground = False
background = 0.0
with CappedPSF(psf, max_otf_size) as _psf:
shared_makeotf(
str.encode(_psf.path),
str.encode(outpath),
wavelength,
dzpsf,
fixorigin,
bUserBackground,
background,
na,
nimm,
dxpsf,
krmax,
cleanup_otf,
)
return outpath | ad3fbdbea7562f766c53f26a82880f41002c893f | 3,653,547 |
def get_palette(dataset_name):
"""
Maps classes to colors in the style of PASCAL VOC.
Close values are mapped to far colors for segmentation visualization.
See http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
Takes:
num_classes: the number of classes
Gives:
palette: the colormap as a k x 3 array of RGB colors
"""
# TODO: парсити з csv файлу
palette = np.zeros((num_classes, 3), dtype=np.uint8)
for k in range(0, num_classes):
label = k
i = 0
while label:
palette[k, 0] |= (((label >> 0) & 1) << (7 - i))
palette[k, 1] |= (((label >> 1) & 1) << (7 - i))
palette[k, 2] |= (((label >> 2) & 1) << (7 - i))
label >>= 3
i += 1
return palette | 9253f633f7c3a6aa0c135af988125f3c144e3572 | 3,653,548 |
import unicodedata
def is_number(input_string):
"""
if input_string includes number only, return corresponding number,
otherwise return input_string
"""
try:
return float(input_string)
except ValueError:
pass
try:
return unicodedata.numeric(input_string)
except (TypeError, ValueError):
pass
return input_string.strip('"') | 2b435b1f23c8764e0ff6bf741678db91bb4a5b23 | 3,653,549 |
def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argument
"""
Initiate a reboot if the running kernel is not the latest one installed.
.. note::
This state does not install any patches. It only compares the running
kernel version number to other kernel versions also installed in the
system. If the running version is not the latest one installed, this
state will reboot the system.
See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and
:py:func:`~salt.states.kernelpkg.latest_installed`
for ways to install new kernel packages.
This module does not attempt to understand or manage boot loader configurations
it is possible to have a new kernel installed, but a boot loader configuration
that will never activate it. For this reason, it would not be advisable to
schedule this state to run automatically.
Because this state function may cause the system to reboot, it may be preferable
to move it to the very end of the state run.
See :py:func:`~salt.states.kernelpkg.latest_wait`
for a waitable state that can be called with the `listen` requesite.
name
Arbitrary name for the state. Does not affect behavior.
at_time
The wait time in minutes before the system will be rebooted.
"""
active = __salt__["kernelpkg.active"]()
latest = __salt__["kernelpkg.latest_installed"]()
ret = {"name": name}
if __salt__["kernelpkg.needs_reboot"]():
ret["comment"] = (
"The system will be booted to activate " "kernel: {0}"
).format(latest)
if __opts__["test"]:
ret["result"] = None
ret["changes"] = {"kernel": {"old": active, "new": latest}}
else:
__salt__["system.reboot"](at_time=at_time)
ret["result"] = True
ret["changes"] = {"kernel": {"old": active, "new": latest}}
else:
ret["result"] = True
ret["comment"] = (
"The latest installed kernel package " "is active: {0}"
).format(active)
ret["changes"] = {}
return ret | 449819b4abb43b062514f0c8356f3fcd992198f7 | 3,653,550 |
import logging
def upload_file(file_name, bucket, object_name):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
s3_client = boto3.client('s3')
try:
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True | 3db16ca5b2136995f4772dafa3778b54e5e41e5c | 3,653,551 |
def get_conf(bs_info, client_config, genesis_time, setup_oracle=None, setup_poet=None, args=None):
"""
get_conf gather specification information into one ContainerSpec object
:param bs_info: DeploymentInfo, bootstrap info
:param client_config: DeploymentInfo, client info
:param genesis_time: string, genesis time as set in suite specification file
:param setup_oracle: string, oracle ip
:param setup_poet: string, poet ip
:param args: list of strings, arguments for appendage in specification
:return: ContainerSpec
"""
genesis_time_delta = get_genesis_time_delta(genesis_time)
client_args = {} if 'args' not in client_config else client_config['args']
# append client arguments
if args is not None:
for arg in args:
client_args[arg] = args[arg]
# create a new container spec with client configuration
cspec = ContainerSpec(cname='client', specs=client_config)
# append oracle configuration
if setup_oracle:
client_args['oracle_server'] = 'http://{0}:{1}'.format(setup_oracle, conf.ORACLE_SERVER_PORT)
# append poet configuration
if setup_poet:
client_args['poet_server'] = '{0}:{1}'.format(setup_poet, conf.POET_SERVER_PORT)
bootnodes = node_string(bs_info['key'], bs_info['pod_ip'], conf.BOOTSTRAP_PORT, conf.BOOTSTRAP_PORT)
cspec.append_args(bootnodes=bootnodes, genesis_time=genesis_time_delta.isoformat('T', 'seconds'))
# append client config to ContainerSpec
if len(client_args) > 0:
cspec.append_args(**client_args)
return cspec | c2d27fa1922be786b0afa9212864465e14ee3de7 | 3,653,552 |
def zeropad(tr, starttime, endtime):
"""
Zeropads an obspy.Trace so as to cover the time window specified by
`starttime`'and `endtime`
Parameters
----------
tr : obspy.Trace
starttime, endtime : obspy.UTCDateTime
Returns
-------
trace : obspy.Trace
Zeropadded copy of the input trace.
"""
trace = Trace()
for key, value in tr.stats.items():
if key not in ['endtime', 'npts']:
trace.stats[key] = value
fs = tr.stats.sampling_rate
samples_before = int((tr.stats.starttime - starttime) * fs)
samples_after = int((endtime - tr.stats.endtime) * fs)
data = tr.data
if samples_before > 0:
trace.stats.starttime = tr.stats.starttime - ((samples_before+1) / fs)
data = np.concatenate((np.zeros(samples_before+1), data))
if samples_after > 0:
data = np.concatenate((data, np.zeros(samples_after+1)))
trace.data = data
return trace | 417c17d8dea148f6534f204d064ba13665ede597 | 3,653,553 |
import time
import random
def deepwalk(G, _filepath, o=1, num_walks_node=10, walk_length=80,
representation_size=128, window_size=5,):
"""not going to deal with memory exceeding case"""
output = _filepath + G.name
print("Walking...")
time_start = time.time()
walks = gu.build_deepwalk_corpus(G, num_paths=num_walks_node, path_length=walk_length,
alpha=0, rand=random.Random(0)) # alpha = 0: do not go back
time_end = time.time()
print('Walking time cost:', time_end - time_start)
print("Training...")
time_start = time.time()
# with negative sampling: 5(default)
model = Word2Vec(walks, size=representation_size, window=window_size, min_count=0, sg=1, workers=cpu_count())
time_end = time.time()
print('Training vectors time cost:', time_end - time_start)
if o == 1:
model.wv.save_word2vec_format(output + '.dw.emb')
else:
model.wv.save_word2vec_format(output + '0.dw.emb')
return time_end - time_start | a5b77e6485d29a6a08ca25aa9ea06507a7fae076 | 3,653,554 |
def logship_status(host):
"""Report log shipping retstore delta and latency"""
crit = warn = 0
msg = ''
sql = """SELECT secondary_server, secondary_database, primary_server, primary_database,
last_restored_date, DATEDIFF(mi, last_restored_date, GETDATE()) last_restored_delta,
last_restored_latency, restore_threshold
FROM msdb..log_shipping_monitor_secondary"""
rows = execute_sql(host, sql)
if type(rows) is dict: return rows
for row in rows:
if row.last_restored_delta >= row.restore_threshold:
warn += 1
msg += "Srv:%s DB:%s Restore delta %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_delta, row.restore_threshold)
if row.last_restored_latency >= row.restore_threshold:
crit += 1
msg += "Srv:%s DB:%s Latency of %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.restore_threshold)
if row.last_restored_delta < row.restore_threshold and row.last_restored_latency < row.restore_threshold:
msg += "Srv:%s DB:%s Latency:%s Restore delta:%s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.last_restored_delta)
if crit > 0:
code = 'CRITICAL'
msg = 'Log shipping CRITICAL\n' + msg
elif warn > 0:
code = 'WARNING'
msg = 'Log shipping warning\n' + msg
else:
code = 'OK'
msg = 'Log shipping OK\n' + msg
return {'code':code, 'msg': msg} | 7b0ea3282d66dd7d354d4b6d14d54bfd826d85d9 | 3,653,555 |
def dice(y_true, y_pred):
"""
Attention:
y_true can be weighted to modify learning therefore
apply sign to get back to labels
y_pred have to be rounded to nearest integer to obtain labels.
"""
smooth = 1.
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth) | 205d7cf0f09f702d7905d42c0dfbbd16738ed1e8 | 3,653,556 |
import os
def get_long_description(filename):
"""Return entire contents of *filename*."""
with open(os.path.join(WORKING_DIR, filename)) as fh:
return fh.read() | 6720da5b5e2a91a5ad62e4ac9bcc344e10a16656 | 3,653,557 |
def get_default_language():
"""
Returns the default language code based on the data from LANGUAGES.json.
"""
for language_code, language_data in MEDICINE_LANGUAGE_DATA.items():
if 'DEFAULT' in language_data:
if language_data['DEFAULT']:
return language_code
return 'en' | 5bbfbc1060e52a4db957afd0eb8b45cdb60036f4 | 3,653,558 |
def triple_triple(r, p=qt.QH([1, 0, 0, 0])):
"""Use three triple products for rotations and boosts."""
# Note: 'qtype' provides a record of what algrabric operations were done to create a quaternion.
return triple_sandwich(r, p).add(triple_2_on_1(r, p), qtype="triple_triple") | 58b529faa97fae29fcc5b481263c0c84af2ddca2 | 3,653,559 |
def _pinv_trunc(x, miss):
"""Compute pseudoinverse, truncating at most "miss" fraction of varexp."""
u, s, v = linalg.svd(x, full_matrices=False)
# Eigenvalue truncation
varexp = np.cumsum(s)
varexp /= varexp[-1]
n = np.where(varexp >= (1.0 - miss))[0][0] + 1
logger.info(' Truncating at %d/%d components to omit less than %g '
'(%0.2g)' % (n, len(s), miss, 1. - varexp[n - 1]))
s = 1. / s[:n]
inv = ((u[:, :n] * s) @ v[:n]).T
return inv, n | fca4f8b7e118c88ed7be37553ede09275f8d06ec | 3,653,560 |
def mog_loglike(x, means, icovs, dets, pis):
""" compute the log likelihood according to a mixture of gaussians
with means = [mu0, mu1, ... muk]
icovs = [C0^-1, ..., CK^-1]
dets = [|C0|, ..., |CK|]
pis = [pi1, ..., piK] (sum to 1)
at locations given by x = [x1, ..., xN]
"""
xx = np.atleast_2d(x)
centered = xx[:,:,np.newaxis] - means.T[np.newaxis,:,:]
solved = np.einsum('ijk,lji->lki', icovs, centered)
logprobs = -0.5*np.sum(solved * centered, axis=1) - np.log(2*np.pi) - 0.5*np.log(dets) + np.log(pis)
logprob = scpm.logsumexp(logprobs, axis=1)
if len(x.shape) == 1:
return logprob[0]
else:
return logprob | e907c642e664188cb838e89b889257ded2a5aed9 | 3,653,561 |
from typing import Sequence
def align_chunks(array: da.core.Array, scale_factors: Sequence[int]) -> da.core.Array:
"""
Ensure that all chunks are divisible by scale_factors
"""
new_chunks = {}
for idx, factor in enumerate(scale_factors):
aligned = aligned_coarsen_chunks(array.chunks[idx], factor)
if aligned != array.chunks[idx]:
new_chunks[idx] = aligned
if new_chunks:
array = array.rechunk(new_chunks)
return array | df8b845f10bc4a8fa72e1da53d655d25f73d971d | 3,653,562 |
def getWordScore(word):
"""
Computes the score of a word (no bingo bonus is added).
word: The word to score (a string).
returns: score of the word.
"""
if len(word) == HAND_SIZE:
score = 50
else:
score = 0
for letter in word:
score = score + SCRABBLE_LETTER_VALUES[letter]
return score | 5d848b5ef5bb0e77d75a866300d9a87b557b5b1b | 3,653,563 |
import requests
import json
def get_lang_list(source_text, key=None, print_meta_data=False):
"""
Inputs:
source_text - source text as a string
key - google api key, needed or function will raise and error
returns list of language identifiers
"""
#set up url request to google translate api
if not key:
raise Exception( "You dont have a key")
url_shell = 'https://www.googleapis.com/language/translate/v2/detect?key={0}&q={1}'
url = url_shell.format(key, source_text)
response = requests.get(url)
lang_json= json.loads(response.text)
source_lang = lang_json['data']['detections'][0][0]['language']
# if print_meta_data:
# print 'Is detection reliable: {0}'.format(data_dict['data']['detections']['isReliable'])
# print 'Confidence: {0}'.format(data_dict['data']['detections']['confidence'])
#
return source_lang | 720c3c9252535e82881411fa345734d984350537 | 3,653,564 |
def single_value_rnn_regressor(num_units,
sequence_feature_columns,
context_feature_columns=None,
cell_type='basic_rnn',
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
momentum=None,
gradient_clipping_norm=5.0,
input_keep_probability=None,
output_keep_probability=None,
model_dir=None,
config=None,
feature_engineering_fn=None):
"""Create a RNN `Estimator` that predicts single values.
The input function passed to this `Estimator` optionally contains keys
`RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to
`RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where entry
`n` corresponds to the length of the `n`th sequence in the batch. The sequence
length feature is required for batches of varying sizes. It will be used to
calculate loss and evaluation metrics. If `RNNKeys.SEQUENCE_LENGTH_KEY` is not
included, all sequences are assumed to have length equal to the size of
dimension 1 of the input to the RNN.
In order to specify an initial state, the input function must include keys
`STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested
elements in `cell.state_size`. The input function must contain values for all
state components or none of them. If none are included, then the default
(zero) state is used as an initial state. See the documentation for
`dict_to_state_tuple` and `state_tuple_to_dict` for further details.
The `predict()` method of the `Estimator` returns a dictionary with keys
`RNNKeys.PREDICTIONS_KEY` and `STATE_PREFIX_i` for `0 <= i < n` where `n` is
the number of nested elements in `cell.state_size`. The value keyed by
`RNNKeys.PREDICTIONS_KEY` has shape `[batch_size, padded_length]`. Here,
`padded_length` is the largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor`
passed as input. Entry `[i, j]` is the prediction associated with sequence `i`
and time step `j`.
Args:
num_units: The size of the RNN cells. This argument has no effect
if `cell_type` is an instance of `RNNCell`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of
'basic_rnn,' 'lstm' or 'gru'.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer` or a string. Strings must be
one of 'Adagrad', 'Momentum' or 'SGD'.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
input_keep_probability: Probability to keep inputs to `cell`. If `None`,
no dropout is applied.
output_keep_probability: Probability to keep outputs of `cell`. If `None`,
no dropout is applied.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
Returns:
An initialized `Estimator`.
"""
cell = _to_rnn_cell(cell_type, num_units, num_rnn_layers)
target_column = layers.regression_target()
if optimizer_type == 'Momentum':
optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)
dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
cell=cell,
target_column=target_column,
problem_type=ProblemType.REGRESSION,
prediction_type=PredictionType.SINGLE_VALUE,
optimizer=optimizer_type,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
learning_rate=learning_rate,
gradient_clipping_norm=gradient_clipping_norm,
input_keep_probability=input_keep_probability,
output_keep_probability=output_keep_probability,
name='SingleValueRnnRegressor')
return estimator.Estimator(model_fn=dynamic_rnn_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn) | 4290d8b4e5ea069f58b7fc5a5734c16133b1a614 | 3,653,565 |
import uuid
def token():
""" Return a unique 32-char write-token
"""
return str(uuid.uuid4().hex) | f7dc5725cc1d11ee0ab9471d141a89178fa3d07c | 3,653,566 |
def _get_caller_caller_module_name():
"""Return name of module which calls the function from which this function is invoked"""
frame = currentframe().f_back.f_back
return getmodule(frame).__name__ | 4199207922db40424e1a4fa56ee662209de06830 | 3,653,567 |
from typing import Tuple
def percentile(x: np.ndarray, percentile: float = 99) -> Tuple[float, float]:
"""Get the (low, high) limit for the series by only including the data within the given percentile.
For example, if percentile is 99, (1st percentile, 99th percentile) will be returned.
Also, if percentile is 1, (1st percentile, 99th percentile) will be returned.
Args:
x: the series
percentile: the percentile, beyond which to exclude data.
Returns:
(low, high) percentiles of series
"""
percentile = max(percentile, 100 - percentile)
high = np.percentile(x, percentile)
low = np.percentile(x, 100 - percentile)
return (low, high) | 00a7c6561432da84f878985b1e3dba942d4ec478 | 3,653,568 |
from pathlib import Path
def get_versions_data(
hidden=None,
is_unreleased=None,
find_latest_release=None,
sort_key=None,
labels=None,
suffix_latest_release=' (latest release)',
suffix_unreleased=' (dev)',
find_downloads=None,
):
"""Get the versions data, to be serialized to json."""
if hidden is None:
hidden = []
if is_unreleased is None:
is_unreleased = _is_unreleased
if find_latest_release is None:
find_latest_release = _find_latest_release
if find_downloads is None:
find_downloads = _find_downloads
if sort_key is None:
sort_key = parse_version
if labels is None:
labels = {}
folders = sorted(
[
str(f)
for f in Path().iterdir()
if (
f.is_dir()
and not str(f).startswith('.')
and not str(f).startswith('_')
)
],
key=sort_key,
)
labels = {folder: labels.get(folder, str(folder)) for folder in folders}
versions = []
unreleased = []
for folder in folders:
if folder not in hidden:
versions.append(folder)
if is_unreleased(folder):
unreleased.append(folder)
labels[folder] += suffix_unreleased
latest_release = find_latest_release(
[f for f in versions if f not in unreleased]
)
outdated = []
if latest_release is not None:
labels[latest_release] += suffix_latest_release
outdated = [
folder
for folder in versions
if (folder != latest_release and folder not in unreleased)
]
versions_data = {
# list of *all* folders
'folders': folders,
#
# folder => labels for every folder in "Versions"
'labels': labels,
#
# list folders that appear in "Versions"
'versions': versions,
#
# list of folders that do not appear in "Versions"
'hidden': hidden,
#
# list of folders that should warn & point to latest release
'outdated': outdated,
#
# list of dev-folders that should warn & point to latest release
'unreleased': unreleased,
#
# the latest stable release folder
'latest_release': latest_release,
#
# folder => list of (label, file)
'downloads': {folder: find_downloads(folder) for folder in folders},
}
return versions_data | 4232013fe403b3de54df571e13b881077145b61f | 3,653,569 |
import torch
from typing import Generator
import os
def cgan_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a conditional GAN
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.double
vh = VariableHandler(device=device, dtype=dtype)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# Sizes
batch_size = 64
input_height = 32
input_width = 64
nsample_lbls = 16
nsample_noise = 10
noise_size = 100
nlabels = Xtrain.shape[1]
torch.manual_seed(5_465_462)
# Construct the G and D models
D = Discriminator().to(device=device, dtype=dtype)
G = Generator(noise_size, vh).to(device=device, dtype=dtype)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
lr_D = 1e-3
lr_G = 1e-3
decay_rate = 0.98
# Loss and optimizers
criterion = nn.BCELoss().to(device=device)
D_optimizer = optim.SGD(D.parameters(), lr=lr_D, momentum=0.5, nesterov=True)
G_optimizer = optim.SGD(G.parameters(), lr=lr_G, momentum=0.5, nesterov=True)
D_scheduler = optim.lr_scheduler.StepLR(D_optimizer, step_size=1, gamma=decay_rate)
G_scheduler = optim.lr_scheduler.StepLR(G_optimizer, step_size=1, gamma=decay_rate)
# Tensorboard writer
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "CGAN"
# Validation images, labels and noise
xdev_sub = vh.tovar(Xdev[:nsample_lbls, :])
ydev_sub = vh.tovar(Ydev[:nsample_lbls, :])
valimgs = ydev_sub.view(nsample_lbls, -1, input_height, input_width)
vallbl = xdev_sub.expand(input_height, input_width, nsample_lbls, nlabels).permute(
2, 3, 0, 1
)
grid = vutils.make_grid(valimgs, nrow=nsample_lbls, normalize=True, scale_each=True)
writer.add_image("True PDF", grid, 0)
fixed_noise = vh.tovar(
torch.rand(nsample_noise, noise_size)
.to(device=device)
.repeat(1, nsample_lbls)
.reshape(-1, noise_size)
)
fixed_labels = xdev_sub.repeat(nsample_noise, 1)
# Graphs in Tensorboard
xdummy = vh.tovar(torch.rand(1, 1, input_height, input_width))
ldummy = vh.tovar(torch.rand(1, nlabels, input_height, input_width))
writer.add_graph(D, (xdummy, ldummy), verbose=False)
writer.add_graph(G, (fixed_noise, fixed_labels), verbose=False)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
D.train()
for epoch in range(nepochs):
G.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_x = vh.tovar(Xtrain[indices, :])
batch_y = vh.tovar(Ytrain[indices, :])
# Reshape these for the D network
actual_batch_size = batch_x.shape[0]
labels = batch_x.expand(
input_height, input_width, actual_batch_size, nlabels
).permute(2, 3, 0, 1)
imgs = batch_y.view(actual_batch_size, -1, input_height, input_width)
noise = vh.tovar(torch.rand((actual_batch_size, noise_size)))
# Real and fake labels
real_label = vh.tovar(torch.ones(actual_batch_size, 1))
fake_label = vh.tovar(torch.zeros(actual_batch_size, 1))
# update the D network
D_optimizer.zero_grad()
D_real = D(imgs, labels)
D_real_loss = criterion(D_real, real_label)
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
D_fake_loss = criterion(D_fake, fake_label)
D_loss = D_real_loss + D_fake_loss
writer.add_scalar("D_real_loss", D_real_loss.item(), step)
writer.add_scalar("D_fake_loss", D_fake_loss.item(), step)
writer.add_scalar("D_loss", D_loss.item(), step)
D_loss.backward()
D_optimizer.step()
# update G network
G_optimizer.zero_grad()
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
G_loss = criterion(D_fake, real_label)
writer.add_scalar("G_loss", G_loss.item(), step)
G_loss.backward()
G_optimizer.step()
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], D_loss: {4:.4e}, G_loss: {5:.4e}".format(
epoch + 1,
nepochs,
batch + 1,
nbatches,
D_loss.item(),
G_loss.item(),
)
)
# Adaptive time step
G_scheduler.step()
D_scheduler.step()
for param_group in D_optimizer.param_groups:
print("Current learning rate for discriminator:", param_group["lr"])
for param_group in G_optimizer.param_groups:
print(" for generator:", param_group["lr"])
# Visualize results in Tensorboard
G.eval()
samples = G(fixed_noise, fixed_labels)
grid = vutils.make_grid(
samples, nrow=nsample_lbls, normalize=True, scale_each=True
)
writer.add_image("Generator", grid, step)
# Save the models
torch.save(G.state_dict(), os.path.join(logdir, model_name + "_G.pkl"))
torch.save(D.state_dict(), os.path.join(logdir, model_name + "_D.pkl"))
writer.close()
# Stuff we need to do to get plots...
G.eval()
mtrain = G.predict(Xtrain)
mdev = G.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, G | f7ad50dcc1b5bb7a86c97c00693adf55b64364c5 | 3,653,570 |
from wheel_filename import InvalidFilenameError, parse_wheel_filename
import json
from datetime import datetime
def wheels(
package_name: str = Argument(..., help="The name of the package to show wheel info for"),
version: str = Argument(
None,
help="The version of the package to show info for, defaults to latest, can be omitted if using package_name==version",
),
supported_only: bool = Option(False, help="Only show wheels supported on the current platform"),
):
"""See detailed information about all the wheels of a release of a package"""
if not version and "==" in package_name:
package_name, _, version = package_name.partition("==")
url = f"{base_url}/pypi/{quote(package_name)}{f'/{quote(version)}' if version else ''}/json"
with console.status("Getting data from PyPI"):
response = session.get(url)
if response.status_code != 200:
if response.status_code == 404:
rich.print("[red]:no_entry_sign: Project or version not found[/]")
rich.print(f"[orange]:grey_exclamation: Some error occured. response code {response.status_code}[/]")
raise typer.Exit()
parsed_data = json.loads(response.text)
from packaging.version import parse as parse_version # pylint: disable=import-outside-toplevel
from rich.text import Text # pylint: disable=import-outside-toplevel
# def is_wheel_supported(wheel_name):
# try:
# tag = parse_tag("-".join(wheel_name.split("-")[2:]))
# except Exception as e:
# return "white"
# if not tag:
# return "white"
# else:
# if list(tag)[-1] in sys_tags():
# return "green"
# else:
# return "red"
data = parsed_data["urls"]
from itertools import cycle # pylint: disable=import-outside-toplevel
colors = cycle(["green", "blue", "magenta", "cyan", "yellow", "red"])
wheel_panels = []
if supported_only:
from packaging.tags import parse_tag, sys_tags # pylint: disable=import-outside-toplevel
def is_wheel_supported(wheel):
try:
parsed_wheel_file = parse_wheel_filename(wheel["filename"])
except InvalidFilenameError:
return True
for tag in parsed_wheel_file.tag_triples():
if any(tag in sys_tags() for tag in list(parse_tag(tag))):
return True
return False
data = filter(is_wheel_supported, data)
from datetime import timezone # pylint: disable=import-outside-toplevel
for wheel in data:
wheel_name = Text(wheel["filename"])
# Maybe use the regex in https://github.com/jwodder/wheel-filename/blob/master/src/wheel_filename/__init__.py#L45-L53
wheel_name.highlight_regex(
r"^(?P<distribution>\w+)-(?P<version>[A-Za-z0-9\.\-]+)(?P<build_tag>-\w{0,3})?-(?P<python_tag>[a-z]{2}[0-9]{0,3})-(?P<abi_tag>\w+)-(?P<platform_tag>.+)(?P<file_extension>\.whl)$",
style_prefix="wheel.",
)
wheel_panels.append(
Panel(
"\n".join(
filter(
None,
[
f"[blue]Comment:[/] {wheel['comment_text']}" if wheel["comment_text"] else None,
f"[magenta]Has Signature[/]: {wheel['has_sig']}",
f"[cyan]Package Type:[/] {wheel['packagetype']}",
f"[green]Requires Python:[/] {wheel['requires_python']}"
if not wheel["requires_python"] is None
else None,
f"[yellow]Size:[/] {humanize.naturalsize(wheel['size'], binary=True)}",
f"[bright_cyan]Yanked Reason[/]: {wheel['yanked_reason']}" if wheel["yanked"] else None,
f"[red]Upload Time[/]: {humanize.naturaltime(utc_to_local(datetime.strptime(wheel['upload_time_iso_8601'], '%Y-%m-%dT%H:%M:%S.%fZ'), timezone.utc))}",
],
)
),
title=f"[white]{wheel_name}[/]" if not wheel_name.plain.endswith(".whl") else wheel_name,
border_style=next(colors),
)
)
from rich.columns import Columns # pylint: disable=import-outside-toplevel
console.print(Columns(wheel_panels)) | f57b577426dbd24b86d9eaa91b13c0b048d22afe | 3,653,571 |
from typing import List
import os
def _list_of_files() -> List[str]:
"""
Return the list of waypoint story files
:return:
"""
file_list = (
f for f in os.listdir(waypoint_directory_path) if f.endswith("." + "yml")
)
waypoint_list_file = []
for file in file_list:
if not file.startswith("_"):
waypoint_list_file.append(file)
return waypoint_list_file | dfc7033c2610b13fb3e3cae8e2074915a594caf5 | 3,653,572 |
def extract_pvdata(h5file, timestamp, pvnames=None):
"""
Extract as a snapshot (PV values) nearest a timestamp from a BSA HDF5 file.
Parameters
----------
h5file: str
BSA HDF5 file with data that includes the timestamp
timestamp: datetime-like, str, int, float
This must be localized (not naive time).
Returns
-------
pvdata: dict
Dict of pvname:value
found_timestamp : pd.Timestamp
The exact time that the data was tagged at
See Also
--------
bsa_snapshot
"""
timestamp = pd.Timestamp(timestamp).tz_convert('UTC') # Convert to UTC
with h5py.File(h5file) as h5:
# Use pandas to get the nearest time
s = h5['secondsPastEpoch'][:, 0]
ns = h5['nanoseconds'][:, 0]
df = pd.DataFrame({'s':s, 'ns':ns})
df['time'] = pd.to_datetime(df['s'], unit='s', utc=True) + pd.to_timedelta(df['ns'], unit='nanoseconds')
# Assure that the time is in here
assert timestamp <= df.time.iloc[-1]
assert timestamp >= df.time.iloc[0]
# Search for the nearest time
ix = df.time.searchsorted(timestamp)
found_timestamp = df['time'].iloc[ix]
# form snapshot dict
pvdata = {}
# Return everything
if pvnames is None:
pvnames = list(h5)
for pvname in pvnames:
if pvname in h5:
pvdata[pvname] = np.squeeze(h5[pvname][ix])
else:
pvdata[pvname] = None
return pvdata, found_timestamp | cd62b347735c97e962c8eec01d4344b4fb4e63f9 | 3,653,573 |
import functools
def decorate_func_with_plugin_arg(f):
"""Decorate a function that takes a plugin as an argument.
A "plugin" is a pair of simulation and postprocess plugins.
The decorator expands this pair.
"""
@functools.wraps(f)
def wrapper(self, plugins_tuple):
return f(self, plugins_tuple[0], plugins_tuple[1])
return wrapper | e90c86bfd6c3cada33c867d26ed64da3cac6f9c4 | 3,653,574 |
from datetime import datetime
def datestr(date=None):
"""Convert timestamps to strings in a predefined format
"""
if date is None:
date = datetime.utcnow()
if isinstance(date, str):
date = parse_time(date)
return date.strftime("%y-%m-%d %H:%M:%S") | 36505b926eef6aaa5bcbae011ba90931c20a5067 | 3,653,575 |
import re
import argparse
def validate_release_tag_param(arg_value):
"""
User defined helper function to validate that the release_tag parameter follows the correct naming convention
:param arg_value: release tag parameter passed through either the command line arguments
:return: arg_value
"""
release_tag_regex = re.compile(r'[0-9]{4}Q[0-9]R[0-9]')
if not re.match(release_tag_regex, arg_value):
msg = f"Parameter ERROR {arg_value} is in an incorrect format, accepted: YYYYQ#R#"
LOGGER.error(msg)
raise argparse.ArgumentTypeError(msg)
return arg_value | 80ba82ad5d13d28349272e2c4b9831864e32fbf2 | 3,653,576 |
def init():
"""Connect to the keyboard, switch all lights off"""
global bufferC # Buffer with the full key/lights mapping
global device
device=hid.device()
# 0x17cc: Native Instruments. 0x1410: KK S88 MK1
device.open(0x17cc, pid)
device.write([0xa0])
bufferC = [0x00] * numkeys
notes_off()
return True | 0edc2085cbd6b48fef85d5492e4093551a15aac1 | 3,653,577 |
def render(path):
"""
Render the knowledge post with all the related formatting.
"""
mode = request.args.get('render', 'html')
username, user_id = current_user.identifier, current_user.id
tmpl = 'markdown-rendered.html'
if mode == 'raw':
tmpl = 'markdown-raw.html'
elif mode == 'presentation':
# TODO(dan?) fix presentation post
# presentation_post = {}
# presentation_post['authors_string'] = post.author_string
# presentation_post['tldr'] = post.tldr
# presentation_post['html'] = html
# html = create_presentation_text(presentation_post)
tmpl = "markdown-presentation.html"
post = (db_session.query(Post)
.filter(Post.path == path)
.first())
if not post:
knowledge_aliases = current_repo.config.aliases
if path in knowledge_aliases:
# TODO: reframe as redirect
post = (db_session.query(Post)
.filter(Post.path == knowledge_aliases[path])
.first())
# If post is None ...
if not post:
if not current_app.config.get('INDEXING_ENABLED', True): # ... and indexing is disabled...
return _render_preview(path=path, tmpl=tmpl) # try rendering in preview mode
else: # ...otherwise, raise exception that post wasn't found
raise Exception(u"unable to find post at {}".format(path))
if post.contains_excluded_tag:
# It's possible that someone gets a direct link to a post that has an excluded tag
return render_template("error.html")
if post.private and not (username in post.authors or username in current_repo.config.editors):
allowed_users = set(user.id for group in post.groups for user in group.users)
if user_id not in allowed_users:
return render_template("permission_ask.html", authors=post.authors_string)
rendered = render_post(post, with_toc=True)
raw_post = render_post_raw(post) if mode == 'raw' else None
comments = post.comments
for comment in comments:
author = db_session.query(User).filter(User.id == comment.user_id).first()
if author is not None:
comment.author = author.format_name
else:
comment.author = 'Anonymous'
if mode != 'raw':
comment.text = render_comment(comment)
user_obj = current_user
tags_list = [str(t.name) for t in post.tags]
user_subscriptions = [str(s) for s in user_obj.subscriptions]
is_author = user_id in [author.id for author in post.authors]
web_editor_prefixes = current_app.config['WEB_EDITOR_PREFIXES']
is_webpost = False
if web_editor_prefixes:
is_webpost = any(prefix for prefix in web_editor_prefixes if path.startswith(prefix))
rendered = render_template(tmpl,
html=rendered['html'],
toc=rendered['toc'],
post_id=post.id,
post_path=path,
raw_post=raw_post,
comments=comments,
username=username,
post_author=post.authors_string,
title=post.title,
page_views=post.view_count,
unique_views=post.view_user_count,
likes=post.vote_counted_for_user(user_id=user_id),
total_likes=post.vote_count,
tags_list=tags_list,
user_subscriptions=user_subscriptions,
show_webeditor_button=is_webpost and is_author,
webeditor_buttons=is_webpost,
web_uri=post.kp.web_uri,
table_id=None,
is_private=(post.private == 1),
is_author=is_author,
can_download=permissions.post_download.can(),
downloads=post.kp.src_paths)
return rendered | 67d9f48c94d79ab069403457b63f31b495123363 | 3,653,578 |
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
"""
# Computes accuracy and average confidence for bin
Args:
conf_thresh_lower (float): Lower Threshold of confidence interval
conf_thresh_upper (float): Upper Threshold of confidence interval
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
Returns:
(accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin.
"""
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0, 0, 0
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN
accuracy = float(correct) / len_bin # accuracy of BIN
return accuracy, avg_conf, len_bin | eb338800751de635e6b72213254287554cd34dc0 | 3,653,579 |
import importlib
def multi_backend_test(globals_dict,
relative_module_name,
backends=('jax', 'tensorflow'),
test_case=None):
"""Multi-backend test decorator.
The end goal of this decorator is that the decorated test case is removed, and
replaced with a set of new test cases that have been rewritten to use one or
more backends. E.g., a test case named `Test` will by default be rewritten to
`Test_jax` and 'Test_tensorflow' which use the JAX and TensorFlow,
respectively.
The decorator works by using the dynamic rewrite system to rewrite imports of
the module the test is defined in, and inserting the approriately renamed test
cases into the `globals()` dictionary of the original module. A side-effect of
this is that the global code inside the module is run `1 + len(backends)`
times, so avoid doing anything expensive there. This does mean that the
original module needs to be in a runnable state, i.e., when it uses symbols
from `backend`, those must be actually present in the literal `backend`
module.
A subtle point about what this decorator does in the rewritten modules: the
rewrite system changes the behavior of this decorator to act as a passthrough
to avoid infinite rewriting loops.
Args:
globals_dict: Python dictionary of strings to symbols. Set this to the value
of `globals()`.
relative_module_name: Python string. The module name of the module where the
decorated test resides relative to `fun_mc`. You must not use `__name__`
for this as that is set to a defective value of `__main__` which is
sufficiently abnormal that the rewrite system does not work on it.
backends: Python iterable of strings. Which backends to test with.
test_case: The actual test case to decorate.
Returns:
None, to delete the original test case.
"""
if test_case is None:
return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda
globals_dict=globals_dict,
relative_module_name=relative_module_name,
test_case=test_case)
if BACKEND is not None:
return test_case
if relative_module_name == '__main__':
raise ValueError(
'module_name should be written out manually, not by passing __name__.')
# This assumes `test_util` is 1 levels deep inside of `fun_mc`. If we
# move it, we'd change the `-1` to equal the (negative) nesting level.
root_name_comps = __name__.split('.')[:-1]
relative_module_name_comps = relative_module_name.split('.')
# Register the rewrite hooks.
importlib.import_module('.'.join(root_name_comps + ['backends', 'rewrite']))
new_test_case_names = []
for backend in backends:
new_module_name_comps = (
root_name_comps + ['dynamic', 'backend_{}'.format(backend)] +
relative_module_name_comps)
# Rewrite the module.
new_module = importlib.import_module('.'.join(new_module_name_comps))
# Subclass the test case so that we can rename it (absl uses the class name
# in its UI).
base_new_test = getattr(new_module, test_case.__name__)
new_test = type('{}_{}'.format(test_case.__name__, backend),
(base_new_test,), {})
new_test_case_names.append(new_test.__name__)
globals_dict[new_test.__name__] = new_test
# We deliberately return None to delete the original test case from the
# original module. | 1006e2bc983f7821138ab27b6d2465055a275c0d | 3,653,580 |
import random
def _get_typed_array():
"""Generates a TypedArray constructor.
There are nine types of TypedArrays and TypedArray has four constructors.
Types:
* Int8Array
* Int16Array
* Int32Array
* Uint8Array
* Uint16Array
* Uint32Array
* Uint8ClampedArray
* Float32Array
* Float64Array
Constructors:
* new TypedArray(length)
* new TypedArray(typedArray)
* new TypedArray(object)
* new TypedArray(buffer)
Returns:
A string made up of a randomly chosen type and argument type from the
lists above.
"""
array_type = random.choice([
'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array',
'Uint32Array', 'Uint8ClampedArray', 'Float32Array', 'Float64Array'
])
# Choose an argument type at random.
arguments = random.choice([
# length e.g. 293
# We choose 2**10 as the upper boundry because the max length allowed
# by WebBluetooth is 2**10.
lambda: utils.UniformExpoInteger(0, 10),
# typedArray e.g. new Uint8Array([1,2,3])
_get_typed_array,
# object e.g. [1,2,3]
lambda: _get_array_of_random_ints(max_length=1000, max_value=2**64),
# buffer e.g. new Uint8Array(10).buffer
lambda: _get_typed_array() + '.buffer',
])
return 'new {array_type}({arguments})'.format(
array_type=array_type, arguments=arguments()) | 31eea5c66689584ff38eb8edad3a15231f7cd438 | 3,653,581 |
def _is_valid_requirement(requirement: str) -> bool:
"""Returns True is the `requirement.txt` line is valid."""
is_invalid = (
not requirement or # Empty line
requirement.startswith('#') or # Comment
requirement.startswith('-r ') # Filter the `-r requirement.txt`
)
return not is_invalid | 73b8ad139329698ad334b230cb04976db4ec05ba | 3,653,582 |
from kivy.clock import mainthread
from kivy.app import App
import threading
def execute(cmd):
"""Execute a random string in the app context
"""
_result = [None]
_event = threading.Event()
@mainthread
def _real_execute():
app = App.get_running_app()
idmap = {"app": app}
try:
exec(cmd, idmap)
except Exception as e:
_result[:] = [u"{}".format(e)]
_event.set()
_real_execute()
_event.wait()
return _result[0] | 2ec1850487d854a074dd60642ff38a7ec9fc7e97 | 3,653,583 |
import six
def is_scalar(element):
"""An `is_atomic` criterion. Returns `True` for scalar elements.
Scalar elements are : strings and any object that is not one of:
collections.Sequence, collections.Mapping, set, or attrs object.
```
import nifty_nesting as nest
flat = nest.flatten([1, [2, 3]], is_atomic=is_scalar)
assert flat == [1, 2, 3]
```
Arguments:
element: The element to check.
Returns:
`True` if the element is a scalar, else `False`.
"""
if isinstance(element, six.string_types):
return True
if is_attrs_object(element):
return False
if is_sequence(element) or is_set(element):
return False
if is_mapping(element):
return False
return True | 07f280822a6167ab951942f6e2479476ceec2dc5 | 3,653,584 |
from typing import Union
from typing import Sequence
def wrap_singleton_string(item: Union[Sequence, str]):
""" Wrap a single string as a list. """
if isinstance(item, str):
# Can't check if iterable, because a string is an iterable of
# characters, which is not what we want.
return [item]
return item | 6e0946fee8fddd23631ff66d405dce2ae8a15fa6 | 3,653,585 |
def view_milestone_history(request, chosen_year=None):
"""
http://127.0.0.1:8000/milestones/by-columns/
:param request:
:return:
"""
(chosen_year, basic_query) = get_basic_milestone_history_query(chosen_year)
milestones = basic_query.order_by('due_on')
open_closed_cnts = get_issue_counts_query_base().values('open_issues', 'closed_issues')
num_open_issues = sum(x['open_issues'] for x in open_closed_cnts)
num_closed_issues = sum( x['closed_issues'] for x in open_closed_cnts)
mmo = MilestoneMonthOrganizer(milestones)
#mmo.show()
#return HttpResponse('ok')
sorted_repos = mmo.get_sorted_repos()
if sorted_repos and len(sorted_repos) > 0:
last_retrieval_time = sorted_repos[0].last_retrieval_time
else:
last_retrieval_time = None
d = {}
d['page_title'] = 'Previous Milestones for %s' % chosen_year
d['is_milestone_history_all'] = True
d['chosen_year'] = chosen_year
d['last_retrieval_time'] = last_retrieval_time
d['sorted_repos'] = sorted_repos
d['organized_months'] = mmo.get_organized_months(descending_order=True)
d['NO_DUE_DATE'] = RepoMilestoneMonthsOrganizer.NO_DUE_DATE
d['milestone_count'] = milestones.count()
d['num_open_issues'] = num_open_issues
d['num_closed_issues'] = num_closed_issues
d['hide_description'] = True
#print(d)
return render_to_response('milestones/view_history_multi_column.html'\
, d\
, context_instance=RequestContext(request)) | 1d40c08701e9088682e9e0663c1538956b22770c | 3,653,586 |
def M_absolute_bol(lum):
"""Computes the absolute bolometric luminosity
Parameters
----------
lum : `float/array`
luminosity in solar luminosities
Returns
-------
M_bol : `float/array`
absolute bolometric magnitude
"""
log_lum = np.log10(lum)
M_bol = 4.75 - 2.7 * log_lum
return M_bol | dd3209fd6c91a7b1b51f43a7a15f9c5eaccd740d | 3,653,587 |
def codes_index_get_double(indexid, key):
# type: (cffi.FFI.CData, bytes) -> T.List[float]
"""
Get the list of double values associated to a key.
The index must be created with such a key (possibly together with other
keys).
:param bytes key: the keyword whose list of values has to be retrieved
:rtype: List(int)
"""
size = codes_index_get_size(indexid, key)
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
check_return(lib.codes_index_get_double)(indexid, key, values, size_p)
return list(values) | 9a0c2c27f917ecfe63ad1f6a797aa152928d294c | 3,653,588 |
from typing import Optional
def lemmatize(
nlp: Optional[Language] = None, name="lemmatize"
) -> ops.base.SpacyBasedOperation:
"""Helper function to return SpacyBasedOperation for lemmatizing.
This operation returns a stream.DataStream where each item is a string after
being lemmatized.
Parameters
----------
nlp : Optional[spacy.language.Language]
spacy's language model or None. If None then by default
`en_core_web_sm` spacy model is loaded
name : Optional[str]
name of this operation
Returns
-------
out : SpacyBasedOperation
"""
return ops.base.SpacyBasedOperation(nlp=nlp, process_doc_fn=_lemmatize, name=name,) | 797efa35320cf4b4e5e1176d1fbbcee13bbaa884 | 3,653,589 |
def mean_absolute_deviation(curve1: np.ndarray, curve2: np.ndarray, *args):
"""Calculate the mean deviation."""
diff = np.abs(curve1 - curve2)
return np.mean(diff) | 687fda24399bc71e1d1f0ca8b880a0eafc9a1a7d | 3,653,590 |
def get_segtype_hops(seg_type, connector=None): # pragma: no cover
"""
Request list of segments by type used to construct paths.
:param seg_type: The type of PathSegmentType requested.
:returns: List of SCIONDSegTypeHopReplyEntry objects.
"""
global _connector
if not connector:
connector = _connector
if not connector:
raise SCIONDLibNotInitializedError
return connector.get_segtype_hops(seg_type) | 525e9feb6ea5b0692ef67ec20aa967a60f65519b | 3,653,591 |
def build_moderation_channel_embed(ctx, channel, action):
"""
Builds a moderation embed which display some information about the mod channel being created/removed
:param ctx: The discord context
:param channel: The channel to be created/removed
:param action: either "Added" or "Removed" to tell the user what happened to the mod channel
:return embed: The moderation embed to be sent to the user
"""
embed = create_default_embed(ctx)
embed.title = "Koala Moderation - Mod Channel " + action
embed.add_field(name="Channel Name", value=channel.mention)
embed.add_field(name="Channel ID", value=channel.id)
return embed | de0f32a29019a05125f2389286140bc6dcfff198 | 3,653,592 |
def print_settings(settings):
"""
This function returns the harmonic approximation settings .
Returns
-------
text: str
Pretty-printed settings for the current Quantas run.
"""
text = '\nCalculator: Equation of state (EoS) fitting\n'
text += '\nMeasurement units\n'
text += '-------------------------------------\n'
text += ' - {:12} {}\n'.format('pressure:', settings['pressure_unit'])
text += ' - {:12} {}\n'.format('lenght:', settings['lenght_unit'])
return text | 4e64353e0c519a26ac210de1df39ce09fbf54045 | 3,653,593 |
def run_analysis(output, stimtype="gabors", comp="surp", ctrl=False,
CI=0.95, alg="sklearn", parallel=False, all_scores_df=None):
"""
run_analysis(output)
Calculates statistics on scores from runs for each specific analysis
criteria and saves them in the summary scores dataframe.
Overwrites any existing dataframe of analysed data.
Required args:
- output (str): general directory in which summary dataframe is saved.
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- CI (num) : CI for shuffled data
default: 0.95
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- parallel (bool) : if True, run information is collected in
parallel
default: False
- all_scores_df (pd df): already collated scores dataframe
default: None
Returns:
- scores_summ (pd DataFrame): dataframe with analysed scores
"""
if all_scores_df is None:
all_scores_df = run_collate(output, stimtype, comp, ctrl, alg, parallel)
stats = "mean" # across runs for shuffle CIs
if all_scores_df is None:
return
scores_summ = pd.DataFrame()
ext_test = sess_str_util.ext_test_str(
("q1v4" in output), ("rvs" in output), comp)
if ext_test == "":
ext_test = None
# common labels
comm_labs = gen_util.remove_if(info_dict(),
["uniqueid", "run_n", "epoch_n"])
# get all unique comb of labels
for acr_shuff in [False, True]:
if not acr_shuff:
df_unique = all_scores_df[comm_labs].drop_duplicates()
else:
df_unique = all_scores_df[gen_util.remove_if(comm_labs,
["mouse_n", "n_rois"])].drop_duplicates()
for _, df_row in df_unique.iterrows():
if acr_shuff and not df_row["shuffle"]:
# second pass, only shuffle
continue
vals = [df_row[x] for x in comm_labs]
curr_lines = gen_util.get_df_vals(all_scores_df, comm_labs, vals)
# assign values to current line in summary df
curr_idx = len(scores_summ)
gen_util.set_df_vals(scores_summ, curr_idx, comm_labs, vals)
# calculate stats
scores_summ = calc_stats(scores_summ, curr_lines, curr_idx, CI,
ext_test, stats=stats, shuffle=acr_shuff)
savename = get_df_name("analyse", stimtype, comp, ctrl, alg)
file_util.saveinfo(scores_summ, savename, output, overwrite=True)
return scores_summ | 5f6c44fcfc482c66ee318be3787419ee2e811962 | 3,653,594 |
from typing import List
from typing import Dict
def decrypt_ballots_with_all_guardians(
ballots: List[Dict], guardians: List[Dict], context: Dict
) -> Dict:
"""
Decrypt all ballots using the guardians.
Runs the decryption in batches, rather than all at once.
"""
ballots_per_batch = 2
decrypted_ballots: Dict = {}
for batch in batch_list(ballots, ballots_per_batch):
ballot_shares: Dict[str, List[Dict]] = {}
# Each guardian should decrypt their own shares independently...
for guardian in guardians:
response = guardian_api.decrypt_ballot_shares(batch, guardian, context)
shares: List[Dict] = response["shares"]
ballot_shares[guardian["id"]] = shares
# These shares are then gathered by the mediator and used to fully decrypt the ballots!
decrypted_batch = mediator_api.decrypt_ballots(batch, ballot_shares, context)
# The decrypted ballots are keyed by ballot ID. Merge them into the full dictionary.
decrypted_ballots = {**decrypted_ballots, **decrypted_batch}
return decrypted_ballots | 6aebf29e7d0b41fd3da23d6bbecf7e40c56e1c9f | 3,653,595 |
def getRealItemScenePos(item):
"""
Returns item's real position in scene space. Mostly for e.g. stranditems.
This will change as the root item is moved round the scene,
but should not change when zooming.
"""
view = pathview()
try:
vhitem = item.virtualHelixItem()
linepos = linecenter(item.line()) # StrandItem lines are drawn in the virtual-helix space.
except AttributeError:
# E.g. EndPointItems, caps, etc, has no VhItem, position should be in scene coordinates:
return item.scenePos()
# Should I map to scene space or maybe use pathrootitem, i.e. vhitem.mapToItem(pathroot(), *linepos) ?
# mapping to pathroot produces constant result independent of zoom and transform.
# mapping to scene produces variable results.
return vhitem.mapToScene(*linepos) | cef292e25cd99886841bb5aa5d521c8630284210 | 3,653,596 |
def get_default_upload_mode():
"""
Returns the string for the default upload mode
:return: Default upload mode string
"""
return api.MODE_DEFAULT | 003672d478dc5754ba8b62833d9c8706b482bd0f | 3,653,597 |
def remove_multi_whitespace(string_or_list):
""" Cleans redundant whitespace from extracted data """
if type(string_or_list) == str:
return ' '.join(string_or_list.split())
return [' '.join(string.split()) for string in string_or_list] | a284eb1ea685fb55afeefe78d863a716475a9182 | 3,653,598 |
def validate_board(board: list) -> bool:
"""
Checks if board fits the rules. If fits returns True, else False.
>>> validate_board(["**** ****","***1 ****","** 3****","* 4 1****",\
" 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"])
False
"""
if check_rows(board) and\
check_columns(board) and\
check_color(board):
return True
return False | 8f6b2cdf9e7cecd456378a11b580b6a69d52a308 | 3,653,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.