content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def cross_validation(df, K, hyperparameters):
"""
Perform cross validation on a dataset.
:param df: pandas.DataFrame
:param K: int
:param hyperparameters: dict
"""
train_indices = list(df.sample(frac=1).index)
k_folds = np.array_split(train_indices, K)
if K == 1:
K = 2
rmse_list = []
for i in range(len(k_folds)):
training_folds = [fold for j, fold in enumerate(k_folds) if j != i]
training_indices = np.concatenate(training_folds)
x_train, y_train = df.iloc[training_indices, 1:], df.iloc[training_indices, :1]
x_validation, y_validation = df.iloc[k_folds[i], 1:], df.iloc[k_folds[i], :1]
dtrain = xgb.DMatrix(data=x_train, label=y_train)
dvalidation = xgb.DMatrix(data=x_validation, label=y_validation)
model = xgb.train(
params=hyperparameters,
dtrain=dtrain,
evals=[(dtrain, "train"), (dvalidation, "validation")],
)
eval_results = model.eval(dvalidation)
rmse_list.append(float(eval_results.split("eval-rmse:")[1]))
return rmse_list, model | 72cdf91efa029eb8c029eb84d596057d13a7c515 | 3,657,218 |
from typing import List
from typing import Dict
def solve_cities(cities: List, gdps: List, sick: List, total_capacity: int,
value_r=0, weight_r=0, num_reads=1, verbose=False) -> Dict:
"""
Solves problem: "Which cities should I should I shut down in order to stay
within healthcare resources constraints while maximizing overall GDP"
parameters:
cities - list of city names
gdps - corresponding list of GDP per city
sick - corresponding number of sick people per city
total_capacity - max capacity for sick people summed over all cities
num_reads - number of samples to take
verbose - whether to print out best result
returns:
(dict) - list of dictionaries with individual results and selected attributes
sorted in order of least energy first
"""
if sum(sick) < total_capacity:
print("Warning in solve_cities: Total number of sick people is less "
+ "than total capacity. There's no knapsack problem to solve!")
bqm = knapsack_bqm(cities, gdps, sick, total_capacity,
value_r=value_r, weight_r=weight_r)
sampler = LeapHybridSampler()
samplesets = [sampler.sample(bqm) for _ in range(num_reads)]
df = pd.DataFrame({'city': cities, 'gdp': gdps, 'sick': sick})
df = df.set_index('city')
solution_set = []
for sampleset in samplesets:
open_cities = []
closed_cities = []
for k, v in sampleset.first.sample.items():
if k in cities:
if v == 1:
open_cities.append(k)
else:
closed_cities.append(k)
solution_set.append({
'open_cities': open_cities,
'closed_cities': closed_cities,
'energy': sampleset.first.energy,
'salvaged_gdp': sum(df.loc[open_cities]['gdp']) + sum(df.loc[closed_cities]['gdp']) * value_r,
'used_capacity': int(round(sum(df.loc[open_cities]['sick'])))
})
# do sorting from lowest to highest energy
if num_reads > 1:
energies = [solution['energy'] for solution in solution_set]
solution_set = [x for _, x in sorted(zip(energies, solution_set))]
if verbose:
print('BEST SOLUTION')
print('Open cities')
print(solution_set[0]['open_cities'])
print('\n')
print('Closed cities')
print(solution_set[0]['closed_cities'])
print('\n')
total_gdp = sum(df['gdp'])
salvaged_gdp = solution_set[0]['salvaged_gdp']
print(
f'Salvaged GDP: {salvaged_gdp} ({(100*salvaged_gdp/total_gdp):.1f}%)')
used_capacity = solution_set[0]['used_capacity']
print(
f'Used up hospital capacity: {used_capacity:d} of {total_capacity} ({(100*used_capacity/total_capacity):.1f}%)')
return solution_set | 52bef06069ee6975fbc5dea50cbb44349c96b9db | 3,657,219 |
def catalog():
"""Render the mapping catalog page."""
if request.args.get(EQUIVALENT_TO):
mappings = current_app.manager.get_mappings_by_type(EQUIVALENT_TO)
message = Markup("<h4>You are now visualizing the catalog of equivalent mappings</h4>")
flash(message)
elif request.args.get(IS_PART_OF):
mappings = current_app.manager.get_mappings_by_type(IS_PART_OF)
message = Markup("<h4>You are now visualizing the catalog of hierarchical mappings</h4>")
flash(message)
else:
mappings = current_app.manager.get_all_mappings()
return render_template(
'curation/catalog.html',
STYLED_NAMES=STYLED_NAMES,
mappings=mappings,
all='all'
) | b28904fff79b978225eda1bb3ed4f6e04c817737 | 3,657,220 |
def apply_inverse_rot_to_vec(rot, vec):
"""Multiply the inverse of a rotation matrix by a vector."""
# Inverse rotation is just transpose
return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2],
rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2],
rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]] | 1108ac6caa30b3562a2af1bcc83e1c1a1bfd8d4d | 3,657,222 |
def gsl_blas_dsdot(*args, **kwargs):
"""gsl_blas_dsdot(gsl_vector_float const * X, gsl_vector_float const * Y) -> int"""
return _gslwrap.gsl_blas_dsdot(*args, **kwargs) | 6b8f45a773fca936913b2653df9ab8c96f1e974a | 3,657,223 |
def cost(weights):
"""Cost function which tends to zero when A |x> tends to |b>."""
p_global_ground = global_ground(weights)
p_ancilla_ground = ancilla_ground(weights)
p_cond = p_global_ground / p_ancilla_ground
return 1 - p_cond | 3b33292c63b42d110efe0d7cbe4dae85f095472f | 3,657,224 |
import time
def runOptimization(
cfg,
optimize_cfg,
n_iter=20,
split_runs=1,
model_runs=1,
filename="optimize_result",
):
"""Optimize the model parameter using hyperopt.
The model parameters are optimized using
the evaluations on validation dataset.
Args:
cfg(dict): configuration data
optimize_cfg(dict): configuration for optimization
n_iter(int): the number of iterations for sequential optimization
split_runs(int): the number of runs
for different dataset-split random seeds.
model_runs(int): the number of runs
for different model-initialization random seeds.
filename(string): a file-name for logging
"""
def objective(space):
print(space)
newcfg = {**cfg}
for k in space.keys():
if k in newcfg and type(newcfg[k]) == dict:
newcfg[k] = {**space[k]}
else:
newcfg[k] = space[k]
print(newcfg, cfg)
result = runEvaluation(
newcfg, split_runs=split_runs, model_runs=model_runs
)
opt_result = {
"loss": result["val_rmse"][0],
"loss_variance": result["val_rmse"][1] ** 2,
"true_loss": result["test_rmse"][0],
"true_loss_variance": result["test_rmse"][1] ** 2,
"status": STATUS_OK,
"eval_time": time.time(),
"data": result,
"space": space,
}
return opt_result
trials = Trials()
best = fmin(
objective,
optimize_cfg,
algo=tpe.suggest,
max_evals=n_iter,
trials=trials,
)
valid_trial = [t for t in trials if t["result"]["status"] == STATUS_OK]
losses_argmin = np.argmin(
[float(trial["result"]["loss"]) for trial in valid_trial]
)
print([float(trial["result"]["loss"]) for trial in valid_trial])
best_trial = valid_trial[losses_argmin]
best_result = best_trial["result"]["data"]
print(best, best_trial["result"]["space"], space_eval(optimize_cfg, best))
ret = {
"best": best,
"n_iter": n_iter,
"split_runs": split_runs,
"model_runs": model_runs,
"result": best_result,
"optimize_confg": optimize_cfg,
"config": cfg,
}
ret_str = ConfigEncoder.dumps(ret)
with open(f"{filename}.json", "w") as fp:
fp.write(ret_str)
print(ret)
return ret | ef2e2c85f5b0b8f6889da49ed1e964d432bb1886 | 3,657,225 |
def _capabilities_for_entity(config, entity):
"""Return an _EntityCapabilities appropriate for given entity.
raises _UnknownEntityDomainError if the given domain is unsupported.
"""
if entity.domain not in _CAPABILITIES_FOR_DOMAIN:
raise _UnknownEntityDomainError()
return _CAPABILITIES_FOR_DOMAIN[entity.domain](config, entity) | 5fe541778ede415020377a0c989fa47ad2ae4d05 | 3,657,226 |
def apply_torsion(nodes, suffix=""):
""" Torsion energy in nodes. """
if (
"phases%s" % suffix in nodes.data
and "periodicity%s" % suffix in nodes.data
):
return {
"u%s"
% suffix: esp.mm.torsion.periodic_torsion(
x=nodes.data["x"],
k=nodes.data["k%s" % suffix],
phases=nodes.data["phases%s" % suffix],
periodicity=nodes.data["periodicity%s" % suffix],
)
}
else:
return {
"u%s"
% suffix: esp.mm.torsion.periodic_torsion(
x=nodes.data["x"], k=nodes.data["k%s" % suffix],
)
} | 37310291ddb769587d9d14a8dedcda3b528a78f3 | 3,657,228 |
import decimal
def parse_summary_table(doc):
"""
Parse the etree doc for summarytable, returns::
[{'channel': unicode,
'impressions': int,
'clicks': int,
'ctr': decimal.Decimal,
'ecpm': decimal.Decimal,
'earnings': decimal.Decimal}]
"""
for t in doc.findall('.//table'):
if t.attrib.get('id') == 'summarytable':
break
else:
raise ValueError("summary table not found")
res = []
FIELDS = ['channel', 'requests', 'responses', 'impressions',
'clicks', 'ctr', 'ecpm', 'earnings']
for row in t.findall('.//tr'):
celltext = []
for c in row.findall('td'):
tail = ''
# adsense inserts an empty span if a row has a period in it, so
# get the children and find the tail element to append to the text
if c.find('a') and c.find('a').getchildren():
tail = c.find('a').getchildren()[0].tail or ''
celltext.append('%s%s' % ((c.text or c.findtext('a') or '').strip(), tail.strip()))
if len(celltext) != 8:
continue
try:
value_cols = map(parse_decimal, celltext[1:])
except decimal.InvalidOperation:
continue
res.append(dict(zip(FIELDS, [celltext[0]] + value_cols)))
return res | 7d188478dc5539b4c8020af09cb052140def63c9 | 3,657,229 |
import math
def tileset_info(hitile_path):
"""
Get the tileset info for a hitile file.
Parameters
----------
hitile_path: string
The path to the hitile file
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
"""
hdf_file = h5py.File(hitile_path, "r")
d = hdf_file["meta"]
if "min-pos" in d.attrs:
min_pos = d.attrs["min-pos"]
else:
min_pos = 0
if "max-pos" in d.attrs:
max_pos = d.attrs["max-pos"]
else:
max_pos = d.attrs["max-length"]
return {
"max_pos": [int(max_pos)],
"min_pos": [int(min_pos)],
"max_width": 2 ** math.ceil(math.log(max_pos - min_pos) / math.log(2)),
"max_zoom": int(d.attrs["max-zoom"]),
"tile_size": int(d.attrs["tile-size"]),
} | 3ea467898e15ac6aca21c219398aa1249b795e55 | 3,657,230 |
def delete_user():
""" Deletes the current user's account. """
DB.session.delete(current_user)
DB.session.commit()
flash("Account deleted", 'success')
return redirect('/login') | bc22d6287738c676cec3c780a9fb01513ddd5530 | 3,657,231 |
def setup_code_gen(no_of_accessories):
""" Generate setup code
"""
try:
invalid_setup_codes = ['00000000','11111111','22222222','33333333','44444444','55555555',\
'66666666','77777777','88888888','99999999','12345678','87654321']
setup_code_created = []
for _ in range(no_of_accessories):
setup_code = ''
# random generate setup_code
for _ in range(8):
random_num = str(random.randint(0,9))
setup_code += random_num
# generate again till valid
while setup_code in invalid_setup_codes:
setup_code = ''
for _ in range(8):
random_num = str(randint(0,9))
setup_code += random_num
# Check if the setup code has valid format
if (len(setup_code) != 8) or (not setup_code.isdigit()):
print "\nSetup code generated should be 8 numbers without any '-' in between. Eg. 11122333 \n"
raise SystemExit(1)
# Add the hyphen (-) in the PIN for salt-verifier generation. So, 11122333 will become 111-22-333
setup_code = setup_code[:3] + '-' + setup_code[3:5] + '-' + setup_code[5:]
setup_code_created.append(setup_code)
return setup_code_created
except StandardError as std_err:
print std_err
except:
raise | 253272cc27de1ead05d12f2e1798d91a3c4571dd | 3,657,232 |
def letter_difference(letter_1: str, letter_2: str) -> int:
"""
Return the difference in value between letter_1 and letter_2
"""
assert len(letter_1) == 1
assert len(letter_2) == 1
diff = letter_to_value[letter_2] - letter_to_value[letter_1]
if diff > 13:
diff -= 27
return diff | 66d88efff92acebef06275d244d560ca5071e974 | 3,657,233 |
import requests
def refresh_access_token(request):
"""Updates `accessToken` in request cookies (not in browser cookies) using `refreshToken`. """
try:
refresh_token = request.COOKIES['refreshToken']
url = urljoin(settings.TIT_API_HOST, '/api/auth/token/refresh/')
response = requests.post(url, {'refresh': refresh_token})
result = response.json()
request.COOKIES['accessToken'] = result['access']
return True
except (KeyError, requests.HTTPError, ):
"""Refresh token doesn't exist in cookies or response from TIT API
returned error status code.
"""
return False | 378f4129fa9cc6af8cc961560e3e4063fcd0495b | 3,657,234 |
def ngrams(string, n=3, punctuation=PUNCTUATION, **kwargs):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Punctuation marks are stripped from words.
"""
s = string
s = s.replace(".", " .")
s = s.replace("?", " ?")
s = s.replace("!", " !")
s = [w.strip(punctuation) for w in s.split()]
s = [w.strip() for w in s if w.strip()]
return [tuple(s[i:i + n]) for i in range(len(s) - n + 1)] | 1e0e99f01c8aa46f4c44cca02d9bdb2b1c52d4c5 | 3,657,235 |
def binstringToBitList(binstring):
"""Converts a string of '0's and '1's to a list of 0's and 1's"""
bitList = []
for bit in binstring:
bitList.append(int(bit))
return bitList | d8ff10651d9fc2d02aba3b4a57a0a768032783b7 | 3,657,236 |
def file_revisions(request, repo_id):
"""List file revisions in file version history page.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
# perm check
if check_folder_permission(request, repo_id, '/') is None:
raise Http404
return render_file_revisions(request, repo_id) | c9ec0e1c159a4efdd8f4c3287cb9d8339ba9a9d2 | 3,657,237 |
def textctrl_info_t_get_tabsize(*args):
"""
textctrl_info_t_get_tabsize(self) -> unsigned int
"""
return _ida_kernwin.textctrl_info_t_get_tabsize(*args) | 7aff89906aebacb3664a73d26f52dd4317031790 | 3,657,238 |
def node_is_hidden(node_name):
"""
Returns whether or not given node is hidden
:param node_name: str
:return: bool
"""
if python.is_string(node_name):
return not maya.cmds.getAttr('{}.visibility'.format(node_name))
return not maya.cmds.getAttr('{}.visibility'.format(node.get_name(node_name))) | 0927d2424b64b9b81b52ced335823963d7ec9fe2 | 3,657,239 |
import torch
def generate_patch_grid_from_normalized_LAF(img: torch.Tensor, LAF: torch.Tensor, PS: int = 32) -> torch.Tensor:
"""Helper function for affine grid generation.
Args:
img: image tensor of shape :math:`(B, CH, H, W)`.
LAF: laf with shape :math:`(B, N, 2, 3)`.
PS: patch size to be extracted.
Returns:
grid
"""
raise_error_if_laf_is_not_valid(LAF)
B, N, _, _ = LAF.size()
num, ch, h, w = img.size()
# norm, then renorm is needed for allowing detection on one resolution
# and extraction at arbitrary other
LAF_renorm = denormalize_laf(LAF, img)
grid = F.affine_grid(LAF_renorm.view(B * N, 2, 3), [B * N, ch, PS, PS], align_corners=False) # type: ignore
grid[..., :, 0] = 2.0 * grid[..., :, 0].clone() / float(w) - 1.0
grid[..., :, 1] = 2.0 * grid[..., :, 1].clone() / float(h) - 1.0
return grid | 288572e4ff8577a8bd664732c79408b71ec58c0d | 3,657,240 |
from typing import Union
from typing import Tuple
def _resolve_condition_operands(
left_operand: Union[str, pipeline_channel.PipelineChannel],
right_operand: Union[str, pipeline_channel.PipelineChannel],
) -> Tuple[str, str]:
"""Resolves values and PipelineChannels for condition operands.
Args:
left_operand: The left operand of a condition expression.
right_operand: The right operand of a condition expression.
Returns:
A tuple of the resolved operands values:
(left_operand_value, right_operand_value).
"""
# Pre-scan the operand to get the type of constant value if there's any.
# The value_type can be used to backfill missing PipelineChannel.channel_type.
value_type = None
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type in [
pipeline_spec_pb2.ParameterType.STRUCT,
pipeline_spec_pb2.ParameterType.LIST,
pipeline_spec_pb2.ParameterType
.PARAMETER_TYPE_ENUM_UNSPECIFIED,
]:
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
raise ValueError('Conditional requires scalar parameter values'
' for comparison. Found input "{}" of type {}'
' in pipeline definition instead.'.format(
input_name,
value_or_reference.channel_type))
parameter_types = set()
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
else:
parameter_type = type_utils.get_parameter_type(
type(value_or_reference).__name__)
parameter_types.add(parameter_type)
if len(parameter_types) == 2:
# Two different types being compared. The only possible types are
# String, Boolean, Double and Integer. We'll promote the other type
# using the following precedence:
# String > Boolean > Double > Integer
if pipeline_spec_pb2.ParameterType.STRING in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif pipeline_spec_pb2.ParameterType.BOOLEAN in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
else:
# Must be a double and int, promote to double.
assert pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
assert pipeline_spec_pb2.ParameterType.NUMBER_INTEGER in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
canonical_parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
elif len(parameter_types) == 1: # Both operands are the same type.
canonical_parameter_type = parameter_types.pop()
else:
# Probably shouldn't happen.
raise ValueError('Unable to determine operand types for'
' "{}" and "{}"'.format(left_operand, right_operand))
operand_values = []
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
operand_value = "inputs.parameter_values['{input_name}']".format(
input_name=input_name)
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER:
operand_value = 'int({})'.format(operand_value)
elif isinstance(value_or_reference, str):
operand_value = "'{}'".format(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif isinstance(value_or_reference, bool):
# Booleans need to be compared as 'true' or 'false' in CEL.
operand_value = str(value_or_reference).lower()
parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
elif isinstance(value_or_reference, int):
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
else:
assert isinstance(value_or_reference, float), value_or_reference
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
if parameter_type != canonical_parameter_type:
# Type-cast to so CEL does not complain.
if canonical_parameter_type == pipeline_spec_pb2.ParameterType.STRING:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.BOOLEAN,
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = "'{}'".format(operand_value)
elif canonical_parameter_type == pipeline_spec_pb2.ParameterType.BOOLEAN:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = 'true' if int(operand_value) == 0 else 'false'
else:
assert canonical_parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
assert parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
operand_value = 'double({})'.format(operand_value)
operand_values.append(operand_value)
return tuple(operand_values) | fde07e14af8f9ae610cfcd64e6a3f2219f0ee8e9 | 3,657,241 |
def int_to_bitstr(int_value: int) -> str:
"""
A function which returns its bit representation as a string.
Arguments:
int_value (int) - The int value we want to get the bit representation for.
Return:
str - The string representation of the bits required to form the int.
"""
return bin(int_value)[2:] | cafbf151ce0404081a0a8e1327d85e61ea7ddc52 | 3,657,243 |
def target_reached(effect):
"""target amount has been reached (100% or more)"""
if not effect.instance.target:
return False
return effect.instance.amount_raised >= effect.instance.target | 0101cd9c3c51a1e03ba7cfd8844c3821a156e2fe | 3,657,244 |
def resid_mask(ints, wfs_map=read_map(wfs_file), act_map=read_map(act_file), num_aps=236):
"""
Returns the locations of the valid actuators in the actuator array
resids: Nx349 residual wavefront array (microns)
ints: Nx304 intensity array (any units)
N: Number of timestamps
"""
# Check inputs
N = ints.shape[0] # Num timestamps
# Aggregate intensities over all timestamps
med_ints = np.median(ints, axis=0)
# Fill WFS map with aggregated intensities
int_map = wfs_map.copy()
int_map[np.where(int_map==1)] = med_ints
# Find lenslets with greatest intensity
idxs = np.flip(np.argsort(int_map, axis=None))[:num_aps] # flat idxs of sort
idxs = np.unravel_index(idxs, wfs_map.shape) # 2D idxs of sort
# Mask for good sub-ap values
good_aps = np.zeros(wfs_map.shape, dtype=int)
good_aps[idxs] = 1
good_aps = good_aps * wfs_map # Just in case
# Mask for good actuator values
good_acts = np.pad(good_aps, ((1,1),(1,1)))
good_acts = (good_acts[1:,1:] | good_acts[1:,:-1]
| good_acts[:-1,:-1] | good_acts[:-1,1:]) * act_map
return good_acts | 98c818db8d2d5040c5a20857693f3f3116ab8e13 | 3,657,245 |
import requests
def session():
"""Sets up a HTTP session with a retry policy."""
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.5)
s.mount("http://", HTTPAdapter(max_retries=retries))
return s | d5cb89f04017718983834a0b4008972f393f56ae | 3,657,246 |
def handle_index():
"""
Kezeli az index oldalat, elokesziti es visszakulti a html-t a kliensnek.
:return:
"""
return render_template("index.html") | eaaa2c3028983c1e5ed29a45fe6ff3db0a8a7482 | 3,657,248 |
def get_polynomial_coefficients(degree=5):
"""
Return a list with coefficient names,
[1 x y x^2 xy y^2 x^3 ...]
"""
names = ["1"]
for exp in range(1, degree + 1): # 0, ..., degree
for x_exp in range(exp, -1, -1):
y_exp = exp - x_exp
if x_exp == 0:
x_str = ""
elif x_exp == 1:
x_str = r"$x$"
else:
x_str = rf"$x^{x_exp}$"
if y_exp == 0:
y_str = ""
elif y_exp == 1:
y_str = r"$y$"
else:
y_str = rf"$y^{y_exp}$"
names.append(x_str + y_str)
return names | 9369841215045e925a3453b83be9dc49c9be7b92 | 3,657,249 |
from typing import Dict
import pickle
def _get_configured_credentials() -> Dict[str, bytes]:
"""
Get the encryupted credentials stored in disk
"""
path = get_credentials_path()
credentials: Dict[str, bytes]
with open(path, "rb") as file_handle:
credentials = pickle.load(file_handle)
if len(credentials) == 0:
raise ConfigurationError(
"You have not setup your credentials yet. "
"Please do so by using 'omigami credentials-helper' CLI functionality and try again."
)
if not all(key in ["k", "u", "p"] for key in credentials.keys()):
raise ConfigurationError(
"Something seems wrong with your credentials. "
"Please, run 'omigami credentials-helper --unset' to remove them and then set them again."
)
return credentials | 824aeb18f4e5bed609008c6594d86666502b2339 | 3,657,250 |
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P | 2be9c94901b27205ec4720b16ab6e81f34b1c6d6 | 3,657,252 |
def matrix(
odoo=ODOO_VERSIONS, pg=PG_VERSIONS, odoo_skip=frozenset(), pg_skip=frozenset()
):
"""All possible combinations.
We compute the variable matrix here instead of in ``.travis.yml`` because
this generates faster builds, given the scripts found in ``hooks``
directory are already multi-version-build aware.
"""
return map(
dict,
product(
product(("ODOO_MINOR",), ODOO_VERSIONS & odoo - odoo_skip),
product(("DB_VERSION",), PG_VERSIONS & pg - pg_skip),
),
) | 034e791d2e10e9f691df3e6c458722549c59a89a | 3,657,253 |
import copy
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialize a dict with {"page": 1/n} for all pages in corpus
new_dist = dict([(page, 1 / len(corpus)) for page in corpus])
finished = False
while not finished:
# Make copy before changing
prev_dist = copy.deepcopy(new_dist)
for page in corpus:
# Run the iterative algorithm on each page
new_dist[page] = iter_algorithm(damping_factor, len(corpus), page, corpus, new_dist)
# If any page has a difference over .001 from the previous run, the while loop will continue
for pg in new_dist:
finished = True
if abs(prev_dist[pg] - new_dist[pg]) > 0.001:
finished = False
break
return new_dist | bc51bd946d8fc617222303ffe30507695ee5ae33 | 3,657,254 |
def user_enabled(inst, opt):
"""
Check whether the option is enabled.
:param inst: instance from content object init
:param url: Option to be checked
:return: True if enabled, False if disabled or non present
"""
return opt in inst.settings and inst.settings[opt] | 3b2a5a1534ff779178eb4bd6b839b66c0b07864f | 3,657,255 |
async def get_buttons_data(client: Client, message: Message):
"""
Get callback_data and urls of all the inline buttons of the message you replied to.
"""
reply_message = message.reply_to_message
if reply_message and reply_message.reply_markup:
if reply_message.reply_markup.inline_keyboard:
row_lines = []
for i, row in enumerate(reply_message.reply_markup.inline_keyboard):
row_buttons = []
for button in row:
if button.callback_data:
data = button.callback_data
elif button.url:
data = button.url
else:
continue
row_buttons.append(f"<i>{quote_html(button.text)}:</i> <code>{quote_html(data)}</code>")
buttons = "\n".join(row_buttons)
row_lines.append(f"<b>Row {i + 1}:</b>\n{buttons}")
if row_lines:
clean_time = 20
await message.edit_text("\n\n".join(row_lines))
else:
clean_time = 4
await message.edit_text("There is no any callback_data or url button inside this keyboard.")
return await clean_up(client, message.chat.id, message.message_id, clear_after=clean_time)
await message.edit_text("Reply to a message containing an inline keyboard to extract callback_data and urls.")
await clean_up(client, message.chat.id, message.message_id, clear_after=4) | 6567455d95781515fc6236bf867a042ba550736f | 3,657,256 |
def update_document(
*, db_session: Session = Depends(get_db), document_id: PrimaryKey, document_in: DocumentUpdate
):
"""Update a document."""
document = get(db_session=db_session, document_id=document_id)
if not document:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "The document with this id does not exist."}],
)
document = update(db_session=db_session, document=document, document_in=document_in)
return document | 15dc23aeb10950da19a6732c43d7675447f6a45c | 3,657,257 |
def from_jabsorb(request, seems_raw=False):
"""
Transforms a jabsorb request into a more Python data model (converts maps
and lists)
:param request: Data coming from Jabsorb
:param seems_raw: Set it to True if the given data seems to already have
been parsed (no Java class hint). If True, the lists will
be kept as lists instead of being converted to tuples.
:return: A Python representation of the given data
"""
if isinstance(request, (tuple, set, frozenset)):
# Special case : JSON arrays (Python lists)
return type(request)(from_jabsorb(element) for element in request)
elif isinstance(request, list):
# Check if we were a list or a tuple
if seems_raw:
return list(from_jabsorb(element) for element in request)
else:
return tuple(from_jabsorb(element) for element in request)
elif isinstance(request, dict):
# Dictionary
java_class = request.get(JAVA_CLASS)
json_class = request.get(JSON_CLASS)
seems_raw = not java_class and not json_class
if java_class:
# Java Map ?
if JAVA_MAPS_PATTERN.match(java_class) is not None:
return HashableDict((from_jabsorb(key), from_jabsorb(value))
for key, value in request["map"].items())
# Java List ?
elif JAVA_LISTS_PATTERN.match(java_class) is not None:
return HashableList(from_jabsorb(element)
for element in request["list"])
# Java Set ?
elif JAVA_SETS_PATTERN.match(java_class) is not None:
return HashableSet(from_jabsorb(element)
for element in request["set"])
# Any other case
result = AttributeMap((from_jabsorb(key),
from_jabsorb(value, seems_raw))
for key, value in request.items())
# Keep JSON class information as is
if json_class:
result[JSON_CLASS] = json_class
return result
elif not _is_builtin(request):
# Bean
for attr in dir(request):
# Only convert public fields
if not attr[0] == '_':
# Field conversion
setattr(request, attr, from_jabsorb(getattr(request, attr)))
return request
else:
# Any other case
return request | 78e8eefc0b234a5b6cd09cebff76e5cb716b54c2 | 3,657,258 |
def write_board_to_svg_file(board, file_name, hex_edge=50, hex_offset=0,
board_padding=None, pointy_top=True, trim_board=True, style=None):
"""
Writes given board to a svg file of given name.
:param board: 2 dimensional list of fields, each represented as a number
:param file_name name of the output file
:param hex_edge: length of hexagon's side (in pixels)
:param hex_offset: distance between side of one hexagon and its neighbour (in pixels)
:param board_padding padding of the board (in pixels)
:param pointy_top: specifies if hexagons should be pointy topped or flat topped
:param trim_board: if True, fields with a value 0 will be removed during transformation
:param style css style (as string)
"""
if board_padding is None:
board_padding = hex_edge
styles = ['.board { fill: white } .hex-field { fill: white; stroke: black } .hex-field-0 { fill: black }']
if style is not None:
styles.append(style)
hexagons = transform_board_into_hexagons(board, hex_edge, hex_offset, pointy_top, trim_board)
min_x, min_y, max_x, max_y = calculate_bounding_box(hexagons)
offset = (board_padding - min_x, board_padding - min_y)
hexagons = move_hexagons_by_offset(hexagons, offset)
board_size = (2 * board_padding + max_x - min_x, 2 * board_padding + max_y - min_y)
svg_image = create_svg_image(styles, board_size, hexagons)
svg_image.saveas(file_name)
return svg_image | 4cbf895a4a91e0434e31fdad459c3354927c5e2b | 3,657,259 |
def ensure_conf(app):
"""
Ensure for the given app the the redbeat_conf
attribute is set to an instance of the RedBeatConfig
class.
"""
name = 'redbeat_conf'
app = app_or_default(app)
try:
config = getattr(app, name)
except AttributeError:
config = RedBeatConfig(app)
setattr(app, name, config)
return config | 673680aafbc4d76b1ae7f7740e53fd7f54740acf | 3,657,260 |
def check_if_process_present(string_to_find):
"""Checks if process runs on machine
Parameters:
string_to_find (string): process we want to find
Returns:
found (bool): True if found process running
"""
output = check_output(["ps", "-ax"], universal_newlines=True)
if string_to_find in output:
return True
else:
return False | 3a153e2160000ec1c9d4c0c28d1631179f9e88c3 | 3,657,261 |
def consulta_dicionario(nivel):
"""
Entrada: Parâmetro do nível selecionado (fácil, médio, difícil)
Tarefa: Determinar qual dicionário a ser consultado
Saída: Parâmetros do dicionário (texto, lacunas, gabarito)
"""
nivel_dicionario = nivel
if nivel_dicionario == 'facil':
texto = dicionario_nivel_facil['texto']
lacunas = dicionario_nivel_facil['lacunas']
gabarito = dicionario_nivel_facil['gabarito']
elif nivel_dicionario == 'medio':
texto = dicionario_nivel_medio['texto']
lacunas = dicionario_nivel_medio['lacunas']
gabarito = dicionario_nivel_medio['gabarito']
elif nivel_dicionario == 'dificil':
texto = dicionario_nivel_dificil['texto']
lacunas = dicionario_nivel_dificil['lacunas']
gabarito = dicionario_nivel_dificil['gabarito']
return texto, lacunas, gabarito | 1453298f791cca010f75abad9d0d37a28c1c8ae5 | 3,657,262 |
from typing import List
def stats_check(
main_table: Table,
compare_table: Table,
checks: List[OutlierCheck] = [],
max_rows_returned: int = 100,
):
"""
:param main_table: main table
:type main_table: table object
:param compare_table: table to be compared
:type compare_table: table object
:param checks: check class object, which represent boolean expression
:type checks: Check
:param max_rows_returned: number of row returned if the check fails.
:type max_rows_returned: int
"""
return AgnosticStatsCheck(
main_table=main_table,
compare_table=compare_table,
checks=checks,
max_rows_returned=max_rows_returned,
) | 39f0c0b2bad74a7878453a3fe11def36f1971a5f | 3,657,263 |
async def get_collectible_name(collectible_id: int, db: AsyncSession = Depends(get_db_session)):
"""Gets the collectible name"""
result = await destiny_items.get_collectible(db=db, collectible_id=collectible_id)
return NameModel(name=result.name) if result else NameModel(name=None) | 18fd3856d5145a004cf20343b5e8782a13a35845 | 3,657,264 |
def prime_factors(n):
"""
Return a list of prime factors of n
:param n: int
:return: list
"""
# check if 2 is the largest prime
all_factors = set()
t = n
while t % 2 == 0:
t /= 2
all_factors.add(2)
# check the divisors greater than 2
d = 3
while d < n ** 0.5:
while not t % d:
t /= d
all_factors.add(d)
d += 2
return all_factors | 09aad44a7b04492c225447eaa15590fa630a43cd | 3,657,265 |
def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, Nd, sigma=0.15):
"""Function to calculate central age."""
Ns = np.array(Ns)
Ni = np.array(Ni)
# We just replace 0 counts with a low value, the age will be rounded to
# 2 decimals. That should take care of the zero count issue.
Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts
Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts
# Calculate mj
LAMBDA = 1.55125e-4
G = 0.5
m = Ns + Ni
p = Ns / m
theta = np.sum(Ns) / np.sum(m)
for i in range(0, 30):
w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2)
sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w))
theta = np.sum(w * p) / np.sum(w)
t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta))
se = np.sqrt(1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + 1.0 / Nd + (seZeta / zeta)**2) * t
return {"Central": np.round(t, 2), "se": np.round(se, 2), "sigma": np.round(sigma, 2)} | 29b360ce1df7cfa376a86b989fb7899137d110cc | 3,657,266 |
import requests
def _get_cross_reference_token(auth_cookie: str) -> str:
"""Gets a new cross reference token affiliated with the Roblox auth cookie.
:param auth_cookie: Your Roblox authentication cookie.
:return: A fresh cross reference token.
"""
session: requests.Session = _get_session(auth_cookie)
response: requests.Response = session.post("https://auth.roblox.com/v2/logout")
try:
token = response.headers["x-csrf-token"]
except KeyError:
raise Exception("Please specify a valid auth cookie")
return token | 63041ddeb31ccdc0a72a721d000968588580e816 | 3,657,267 |
def erase_not_displayed(client):
"""Erase all non-displayed models from memory.
Args:
client (obj): creopyson Client.
Returns:
None
"""
return client._creoson_post("file", "erase_not_displayed") | c3981fcce00b5d5440fcbdbe8781e9e6229a8fa7 | 3,657,268 |
def reset_position_for_friends_image_details_from_voter(voter, twitter_profile_image_url_https,
facebook_profile_image_url_https):
"""
Reset all position image urls in PositionForFriends from we vote image details
:param voter:
:param twitter_profile_image_url_https:
:param facebook_profile_image_url_https:
:return:
"""
position_list_manager = PositionListManager()
position_manager = PositionManager()
stance_we_are_looking_for = ANY_STANCE
friends_vs_public = FRIENDS_ONLY
speaker_image_url_https = None
reset_all_position_image_urls_results = []
if positive_value_exists(twitter_profile_image_url_https):
speaker_image_url_https = twitter_profile_image_url_https
elif positive_value_exists(facebook_profile_image_url_https):
speaker_image_url_https = facebook_profile_image_url_https
positions_for_voter_results = position_list_manager.retrieve_all_positions_for_voter(
voter.id, voter.we_vote_id, stance_we_are_looking_for, friends_vs_public)
if positions_for_voter_results['position_list_found']:
friends_position_list = positions_for_voter_results['position_list']
for position_object in friends_position_list:
reset_position_image_urls_results = position_manager.reset_position_image_details(
position_object, speaker_image_url_https=speaker_image_url_https)
reset_all_position_image_urls_results.append(reset_position_image_urls_results)
results = {
'success': True,
'reset_all_position_results': reset_all_position_image_urls_results
}
return results | e2483bf781029a7481dead0a168775b1a9223978 | 3,657,269 |
def get_analysis(panda_data):
"""
Get Analysis of CSV Data
:param panda_data: Panda dataframes
:return: panda data frames
"""
# Create Object for Analysis0
sentiment_object = SentimentConfig.sentiment_object
ner_object = SentimentConfig.ner_object
# Get list of sentences
list = panda_data['text'].to_list()
sentiment_result = np.array([sentiment_object.get_sentiment(i) for i in list])
panda_data["Positive Score"] = sentiment_result[:, 2]
panda_data["Negative Score"] = sentiment_result[:, 0]
panda_data["Neutral Score"] = sentiment_result[:, 1]
panda_data["Sentiment Result"] = sentiment_result[:, 3]
# NER Data Analysis Added
ner_result = np.array([ner_object.get_ner(i) for i in list])
panda_data["Entity Result"] = ner_result
# Adjective Analysis Added
adjective_result = np.array([ner_object.get_adjectives(i) for i in list])
panda_data["Adjective Result"] = adjective_result
return panda_data | 014bc1543f67dfe561bce62d9b5e5f974b28db2a | 3,657,270 |
def create_coordinate_string_dict():
"""31パターンのヒモ。"""
w = 120
h = 120
return {
47: (0, 0),
57: (1*-w, 0),
58: (2*-w, 0),
16: (4*-w, 0),
35: (5*-w, 0),
36: (6*-w, 0),
38: (0, 1*-h),
13: (1*-w, 1*-h),
14: (2*-w, 1*-h),
15: (3*-w, 1*-h),
25: (4*-w, 1*-h),
17: (5*-w, 1*-h),
27: (6*-w, 1*-h),
37: (7*-w, 1*-h),
1357: (0, 2*-h),
1571: (1*-w, 2*-h),
7135: (2*-w, 2*-h),
3583: (4*-w, 2*-h),
274: (5*-w, 2*-h),
1361: (6*-w, 2*-h),
1371: (0, 3*-h),
15037: (1*-w, 3*-h),
3573: (2*-w, 3*-h),
416: (4*-w, 3*-h),
258: (6*-w, 3*-h),
1753: (0, 4*-h),
1351: (1*-w, 4*-h),
3175: (2*-w, 4*-h),
2572: (4*-w, 4*-h),
638: (5*-w, 4*-h),
1471: (6*-w, 4*-h),
} | 4abc2b246345569780db2dc9f6ef71c56ae86528 | 3,657,271 |
def all_but_ast(check):
"""Only passes AST to check."""
def _check_wrapper(contents, ast, **kwargs):
"""Wrap check and passes the AST to it."""
del contents
del kwargs
return check(ast)
return _check_wrapper | 71f3e3b8649a3a9885ded7eec248894cca8083c4 | 3,657,272 |
import readline
def get_history_items():
"""
Get all history item
"""
return [
readline.get_history_item(i)
for i in xrange(1, readline.get_current_history_length() + 1)
] | b3600ca6581c11a46c2ea92d82b9d5aefcded49b | 3,657,273 |
def normalize(*args):
"""Scale a sequence of occurrences into probabilities that sum up to 1."""
total = sum(args)
return [arg / total for arg in args] | 49b0f998fe58b2c85da5a993e542d91bb5dd5382 | 3,657,275 |
import requests
def _make_request(
resource: str,
from_currency_code: str,
to_currency_code: str,
timestamp: int,
access_token: str,
exchange_code: str,
num_records: int,
api_version: str
) -> requests.Response:
"""
API documentation for cryptocompare can be found at https://min-api.cryptocompare.com/documentation
"""
base_url = f"https://min-api.cryptocompare.com/data/{api_version}/{resource}"
params = {
"fsym": from_currency_code,
"tsym": to_currency_code,
"e": exchange_code,
"limit": num_records,
"toTs": timestamp,
"api_key": access_token
}
return requests.get(base_url, params=params) | 4da7c3cab42b742b106fafb4c1585e6ecb250121 | 3,657,277 |
def projective_error_function(params, args):
"""
:param params:
:param args:
:return:
"""
# fx fy cx cy k0 k1
project_params = params[0:5]
f, cx, cy, k0, k1 = project_params
K = eye(3, 3)
K[0,0] = f
K[1,1] = f
K[0, 2] = k0
K[1, 2] = k1
model, image = args
tp = params[5:]
_, R, t = transform(tp, model)
Rt = np.c_[R, t.transpose()]
# Reconstruct camera matrix
P = K @ Rt
# Project
X = np.zeros((4, len(model[0])))
X[0:3] = model
X[3] = 1
PX = P @ X
image_star = PX[0:2] / PX[2]
dataShape = image.shape
nData = dataShape[0] * dataShape[1]
imagevec = image.reshape(1, nData)[0]
image_star_vec = image_star.reshape(1, nData)[0]
return imagevec - image_star_vec | 5b1fe8265478a379d91178474e392797798c9c0f | 3,657,279 |
import warnings
def _transform_masks(y, transform, data_format=None, **kwargs):
"""Based on the transform key, apply a transform function to the masks.
Refer to :mod:`deepcell.utils.transform_utils` for more information about
available transforms. Caution for unknown transform keys.
Args:
y (numpy.array): Labels of ``ndim`` 4 or 5
transform (str): Name of the transform, one of
``{"deepcell", "disc", "watershed", None}``.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
kwargs (dict): Optional transform keyword arguments.
Returns:
numpy.array: the output of the given transform function on ``y``.
Raises:
ValueError: Rank of ``y`` is not 4 or 5.
ValueError: Channel dimension of ``y`` is not 1.
ValueError: ``transform`` is invalid value.
"""
valid_transforms = {
'deepcell', # deprecated for "pixelwise"
'pixelwise',
'disc',
'watershed', # deprecated for "outer-distance"
'watershed-cont', # deprecated for "outer-distance"
'inner-distance',
'outer-distance',
'centroid', # deprecated for "inner-distance"
'fgbg'
}
if data_format is None:
data_format = K.image_data_format()
if y.ndim not in {4, 5}:
raise ValueError('`labels` data must be of ndim 4 or 5. Got', y.ndim)
channel_axis = 1 if data_format == 'channels_first' else -1
if y.shape[channel_axis] != 1:
raise ValueError('Expected channel axis to be 1 dimension. Got',
y.shape[1 if data_format == 'channels_first' else -1])
if isinstance(transform, str):
transform = transform.lower()
if transform not in valid_transforms and transform is not None:
raise ValueError('`{}` is not a valid transform'.format(transform))
if transform in {'pixelwise', 'deepcell'}:
if transform == 'deepcell':
warnings.warn('The `{}` transform is deprecated. Please use the '
'`pixelwise` transform instead.'.format(transform),
DeprecationWarning)
dilation_radius = kwargs.pop('dilation_radius', None)
separate_edge_classes = kwargs.pop('separate_edge_classes', False)
edge_class_shape = 4 if separate_edge_classes else 3
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + [edge_class_shape] + list(y.shape[2:]))
else:
shape = tuple(list(y.shape[0:-1]) + [edge_class_shape])
# using uint8 since should only be 4 unique values.
y_transform = np.zeros(shape, dtype=np.uint8)
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = transform_utils.pixelwise_transform(
mask, dilation_radius, data_format=data_format,
separate_edge_classes=separate_edge_classes)
elif transform in {'outer-distance', 'watershed', 'watershed-cont'}:
if transform in {'watershed', 'watershed-cont'}:
warnings.warn('The `{}` transform is deprecated. Please use the '
'`outer-distance` transform instead.'.format(transform),
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.outer_distance_transform_movie
else:
_distance_transform = transform_utils.outer_distance_transform_3d
else:
_distance_transform = transform_utils.outer_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if bins is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform in {'inner-distance', 'centroid'}:
if transform == 'centroid':
warnings.warn('The `{}` transform is deprecated. Please use the '
'`inner-distance` transform instead.'.format(transform),
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
'alpha': kwargs.pop('alpha', 0.1),
'beta': kwargs.pop('beta', 1)
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.inner_distance_transform_movie
else:
_distance_transform = transform_utils.inner_distance_transform_3d
else:
_distance_transform = transform_utils.inner_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if distance_kwargs['bins'] is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'disc' or transform is None:
dtype = K.floatx() if transform == 'disc' else np.int32
y_transform = to_categorical(y.squeeze(channel_axis), dtype=dtype)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'fgbg':
y_transform = np.where(y > 1, 1, y)
# convert to one hot notation
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, 1, y.ndim)
# using uint8 since should only be 2 unique values.
y_transform = to_categorical(y_transform, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
return y_transform | 278615037d78b35cacb641eca89918010cf9b2fd | 3,657,280 |
def get_cur_version():
"""
Get current apk version string
"""
pkg_name = cur_activity.getPackageName()
return str(
cur_activity.getPackageManager().getPackageInfo(
pkg_name, 0).versionName) | 015d3368238edc10344c633d9cc491c79569f5f6 | 3,657,283 |
def checkpointload(checkpointfile):
"""Loads an hyperoptimizer checkpoint from file
Returns a list of tuples (params, loss) referring to previous hyperoptimization trials
"""
try:
with open(checkpointfile, "rb") as f:
return pkl.load(f)
except (FileNotFoundError, EOFError):
return [] | 906df00fcb209c979fd57c49a426f4c752b45753 | 3,657,284 |
def get_case_color_marker(case):
"""Get color and marker based on case."""
black_o = ("#000000", "o")
teal_D = ("#469990", "D")
orange_s = ("#de9f16", "s")
purple_v = ("#802f99", "v")
bs = case["batch_size"]
sub = case["subsampling"]
mc = case["mc_samples"]
if sub is None and mc == 0: # only bs
mapping = {2: purple_v, 8: orange_s, 32: teal_D, 128: black_o}
try:
return mapping[bs]
except KeyError:
warn(f"Could not map bs={bs} to color-marker-pair. Returning (black, o)")
return black_o
if sub is not None and mc == 0: # only bs & sub
return teal_D
if sub is None and mc != 0: # only bs & mc
return orange_s
if sub is not None and mc != 0: # bs, sub & mc
return purple_v | 4a42fc784b9034e3996753bc6da18fbfebc66b16 | 3,657,287 |
def clean_integer_score(x):
"""Converts x from potentially a float or string into a clean integer, and replace NA and NP values with one string character"""
try:
x = str(int(float(x)))
except Exception as exc:
if isinstance(x, basestring):
pass
else:
raise
x = x.lower().strip()
return 'A' if x == 'na (not assesible)' else 'P' if x == 'np (not performed)' else x | 9ff2c911653421d51738bdb1bf8f381d3aa59820 | 3,657,288 |
def do_stuff2():
"""This is not right."""
(first, second) = 1, 2, 3
return first + second | dbb6f503e73cc0365dfa20fca54bebb174fcdadd | 3,657,289 |
def get_extra(item_container):
""" liefert die erste passende image_url """
if item_container.item.extra != '':
return get_extra_data(item_container)
item_container = item_container.get_parent()
while item_container.item.app.name == 'dmsEduFolder':
if item_container.item.extra != '':
return get_extra_data(item_container)
item_container = item_container.get_parent()
if item_container.item.app.name != 'dmsEduFolder':
return None | c9ac2ad65d05d13deaad8a6031f2f9ee8ab8aee4 | 3,657,291 |
import torch
def pt_accuracy(output, target, topk=(1,)):
"""Compute the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 5c0ddcd57163987b00e09a6677ddc41928810874 | 3,657,292 |
from typing import Counter
def compute_phasing_counts(variant_to_read_names_dict):
"""
Parameters
----------
variants_to_read_names : dict
Dictionary mapping varcode.Variant to set of read names
Returns
-------
Dictionary from variant to Counter(Variant)
"""
read_names_to_variants = defaultdict(set)
for variant, read_names in variant_to_read_names_dict.items():
for read_name in read_names:
read_names_to_variants[read_name].add(variant)
# now count up how many reads are shared between pairs of variants
phasing_counts = defaultdict(Counter)
for variant, read_names in variant_to_read_names_dict.items():
for read_name in read_names:
for other_variant in read_names_to_variants[read_name]:
if variant != other_variant:
phasing_counts[variant][other_variant] += 1
return phasing_counts | ba13a6d6c76e018cb1072e9fba635aad5593437b | 3,657,293 |
def _inputs_and_vae(hparams):
"""Constructs a VAE."""
obs_encoder = codec.MLPObsEncoder(hparams)
obs_decoder = codec.MLPObsDecoder(
hparams,
codec.BernoulliDecoder(squeeze_input=True),
param_size=1)
inputs = context_mod.EncodeObserved(obs_encoder)
vae = vae_mod.make(hparams, obs_encoder, obs_decoder)
return inputs, vae | 0e6af8a7d17312f99435426907a0dca062981225 | 3,657,294 |
def read_grid_hdf5(filepath, name):
"""Read a grid from HDF5 file.
Parameters
----------
filepath : string or pathlib.Path object
Path of the HDF5 file.
name : string
Name of the grid.
Returns
-------
x : numpy.ndarray
The x-coordinates along a gridline in the x-direction.
y : numpy.ndarray
The y-coordinates along a gridline in the y-direction.
z : numpy.ndarray
The z-coordinates along a gridline in the z-direction.
"""
f = h5py.File(str(filepath), 'r')
dim = len(f[name])
x, y, z = f[name]['x'][:], f[name]['y'][:], None
if dim == 3:
z = f[name]['z'][:]
f.close()
if z is None or len(z) == 1:
return x, y
return x, y, z | f9ad79da36cfa24028562cdaeefba8ca9b48e572 | 3,657,295 |
import xml
def cff2provn(filename):
"""Parse cml xml file and return a prov bundle object"""
#filename = "/Users/fariba/Desktop/UCI/freesurfer/scripts/meta-MC-SCA-023_tp1.cml"
tree = xml.dom.minidom.parse(filename)
collections = tree.documentElement
g = prov.ProvBundle()
g.add_namespace(xsd)
g.add_namespace(dcterms)
g.add_namespace(cml)
url_entity = g.entity(cml[get_id()])
url_entity.add_extra_attributes({prov.PROV['type']: nidm['nidm:ConnectomeFileFormat'],
prov.PROV['location']: prov.Literal(filename, prov.XSD['String'])})
cml_collection = g.collection(cml[get_id()])
cml_collection.add_extra_attributes(
{prov.PROV['type']: cml['connectome'],
prov.PROV['label']: filename})
g.wasDerivedFrom(cml_collection, url_entity)
# get species, subject_name, and subject_timepoint
species = tree.getElementsByTagName('cml:species')[0].toxml()
species = species.replace('<cml:species>', '').replace('</cml:species>', '')
tp = ''
sub = ''
tags = collections.getElementsByTagName("cml:tag")
for t in tags:
if t.attributes['key'].value == 'subject_name':
sub = t.toxml()
if t.attributes['key'].value == 'subject_timepoint':
tp = t.toxml()
sub = sub.replace('<cml:tag key="subject_name">', '').replace('</cml:tag>', '')
tp = tp.replace('<cml:tag key="subject_timepoint">', '').replace('</cml:tag>', '')
#print species + " " + sub + " " + tp
cml_meta = g.entity(cml[get_id()])
cml_meta.add_extra_attributes(
{prov.PROV['type']: cml['connectome-meta'], cml['species']: species, cml['timepoint']: tp,
cml['subject_name']: sub})
g.hadMember(cml_collection, cml_meta)
volumes = collections.getElementsByTagName("cml:connectome-volume")
c = 0
for v in volumes:
c = c + 1
#print v.getAttribute("src") + " " + v.getAttribute("dtype") + " " + v.getAttribute("name") + " " + v.getAttribute("fileformat")
#print v.attributes['fileformat'].value
dtype = v.getAttribute('dtype')
src = v.getAttribute('src')
name = v.getAttribute('name')
fileformat = v.getAttribute('fileformat')
cml_volume = g.entity(cml[get_id()])
cml_volume.add_extra_attributes(
{prov.PROV['type']: cml['connectome-volume'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_volume)
tracks = collections.getElementsByTagName("cml:connectome-track")
c = 0
for t in tracks:
c = c + 1
#print t.getAttribute("src") + " " + t.getAttribute("dtype") + " " + t.getAttribute("name") + " " + t.getAttribute("fileformat")
dtype = t.getAttribute('dtype')
src = t.getAttribute('src')
name = t.getAttribute('name')
fileformat = t.getAttribute('fileformat')
cml_track = g.entity(cml[get_id()])
cml_track.add_extra_attributes(
{prov.PROV['type']: cml['connectome-track'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_track)
networks = collections.getElementsByTagName("cml:connectome-network")
c = 0
for n in networks:
c = c + 1
#print n.getAttribute("src") + " " + n.getAttribute("dtype") + " " + n.getAttribute("name") + " " + n.getAttribute("fileformat")
dtype = n.getAttribute('dtype')
src = n.getAttribute('src')
name = n.getAttribute('name')
fileformat = n.getAttribute('fileformat')
cml_network = g.entity(cml[get_id()])
cml_network.add_extra_attributes(
{prov.PROV['type']: cml['connectome-network'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_network)
surfaces = collections.getElementsByTagName("cml:connectome-surface")
c = 0
for s in surfaces:
c = c + 1
#print s.getAttribute("src") + " " + s.getAttribute("dtype") + " " + s.getAttribute("name") + " " + s.getAttribute("fileformat")
dtype = s.getAttribute('dtype')
src = s.getAttribute('src')
name = s.getAttribute('name')
fileformat = s.getAttribute('fileformat')
cml_surface = g.entity(cml[get_id()])
cml_surface.add_extra_attributes(
{prov.PROV['type']: cml['connectome-surface'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_surface)
data = collections.getElementsByTagName("cml:connectome-data")
c = 0
for d in data:
c = c + 1
#print d.getAttribute("src") + " " + d.getAttribute("dtype") + " " + d.getAttribute("name") + " " + d.getAttribute("fileformat")
dtype = d.getAttribute('dtype')
src = d.getAttribute('src')
name = d.getAttribute('name')
cml_data = g.entity(cml[get_id()])
cml_data.add_extra_attributes(
{prov.PROV['type']: cml['connectome-data'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_data)
return g | c8e44bd627173a17d45eadcc5ab73062c4e27ff6 | 3,657,297 |
def optional(idx, *args):
"""A converter for functions having optional arguments.
The index to the last non-optional parameter is specified and a list of types for optional arguments follows.
"""
return lambda ctx, typespecs: _optional_imp(ctx, typespecs[idx], args) | e5491588090beaf4730f18c1b54193104a8f62be | 3,657,298 |
def calc_plot_ROC(y1, y2):
"""
Take two distributions and plot the ROC curve if you used the difference
in those distributions as a binary classifier.
:param y1:
:param y2:
:return:
"""
y_score = np.concatenate([y1, y2])
y_true = np.concatenate([np.zeros(len(y1)), np.ones(len(y2))])
return plot_ROC(y_true, y_score) | 428aa54ebe92ff1df6df4ebcae800a0b692f09d5 | 3,657,299 |
def degrees(x):
"""Converts angle x from radians to degrees.
:type x: numbers.Real
:rtype: float
"""
return 0.0 | 87fe22113f8286db6c516e711b9cf0d4efe7e11d | 3,657,301 |
def account_credit(account=None,
asset=None,
date=None,
tp=None,
order_by=['tp', 'account', 'asset'],
hide_empty=False):
"""
Get credit operations for the account
Args:
account: filter by account code
asset: filter by asset code
date: get balance for specified date/time
tp: FIlter by account type
sort: field or list of sorting fields
hide_empty: don't return zero balances
Returns:
generator object
"""
return _account_summary('credit',
account=account,
asset=asset,
date=date,
tp=tp,
order_by=order_by,
hide_empty=hide_empty) | a690d1352344c6f8e3d8172848255adc1fa9e331 | 3,657,302 |
import torch
def verify(model):
"""
测试数据模型检验
:param model: 网络模型以及其参数
:return res: 返回对应的列表
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
if device == 'cuda':
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
res = []
for idx, data in enumerate(test_loader):
img, label = data
img, label = img.to(device), label.to(device)
label2 = label.numpy()[0]
img = img.view(img.size(0), -1)
out = model(img)
all_output = []
for i in out.data:
all_output.append(i.numpy())
all_output = all_output[0]
if max(all_output) == all_output[label2]:
correct = True
else:
correct = False
all_output = sorted(all_output, reverse=True)
bvsb = all_output[0] - all_output[1]
obj = {
"label": int(label2),
"correct": correct,
"bvsb": float(bvsb)
}
res.append(obj)
if idx >= test_num - 1:
break
return res | 9ad2fd6280018aacbb2501f6b5eb862924b361a1 | 3,657,304 |
import csv
def parse_solution_file(solution_file):
"""Parse a solution file."""
ids = []
classes = []
with open(solution_file) as file_handle:
solution_reader = csv.reader(file_handle)
header = next(solution_reader, None)
if header != HEADER:
raise ValueError(
'Incorrect header found: {}, should be: {}'.format(
header, HEADER))
solution = sorted(list(solution_reader), key=lambda x: x[0])
for row in solution:
if len(row) < 2:
raise ValueError(
'Bad row length: {}, '
'should be at least {} for row {}'.format(
len(row), len(HEADER), row))
row_classes = row[1:]
if any(class_ not in POSSIBLE_CLASSES for class_ in row_classes):
raise ValueError(
'Unknown class found among: {}'.format(row_classes))
ids.append(row[0])
classes.append(row_classes)
return ids, classes | 19a553bd9979ca1d85d223b3109f3567a3a84100 | 3,657,305 |
def fibonacci_modulo(number, modulo):
"""
Calculating (n-th Fibonacci number) mod m
Args:
number: fibonacci number
modulo: modulo
Returns:
(n-th Fibonacci number) mod m
Examples:
>>> fibonacci_modulo(11527523930876953, 26673)
10552
"""
period = _pisano_period_len(modulo)
answer = _fib(number - number // period * period) % modulo
return answer | 5a7692597c17263ba86e81104762e4c7c8c95083 | 3,657,306 |
from typing import Union
def _str_unusual_grades(df: pd.DataFrame) -> Union[str, None]:
"""Print the number of unusual grades."""
grades = np.arange(0, 10.5, 0.5).astype(float)
catch_grades = []
for item in df["grade"]:
try:
if float(item) not in grades:
catch_grades.append(item)
except ValueError:
catch_grades.append(item)
if catch_grades == []:
return None
else:
return (
f"– Over all grades, {len(catch_grades)} of {len(df)} cards do not receive"
f" standard grades. These grades are in {set(catch_grades)}"
) | 0998b112438685523cadc60eb438bee94f3ad8fd | 3,657,307 |
from typing import Any
from typing import Dict
def _adjust_estimator_options(estimator: Any, est_options: Dict[str, Any], **kwargs) -> Dict[str, Any]:
"""
Adds specific required classifier options to the `clf_options` dictionary.
Parameters
----------
classifier : Any
The classifier object for which the options have to be added
clf_options : Dict[str, Any]
Dictionary, where the additional classifier options should be added to
kwargs :
Additional classifier options as keyword arguments
Returns
-------
Dict[str, Any]
The input `clf_options` dictionary containing the additional classifier options
"""
if estimator.__name__ == 'XGBClassifier':
est_options['num_class'] = kwargs['n_categories']
elif estimator.__name__ == 'DNNClassifier':
est_options['n_classes'] = kwargs['n_categories']
est_options['n_features'] = kwargs['n_features']
est_options['random_state'] = kwargs['random_seed']
return est_options | 4ff98d8a3b3e647e129fb0ffbc9bc549caa60440 | 3,657,308 |
def _prettify(elem,indent_level=0):
"""Return a pretty-printed XML string for the Element.
"""
indent = " "
res = indent_level*indent + '<'+elem.tag.encode('utf-8')
for k in elem.keys():
res += " "+k.encode('utf-8')+'="'+_escape_nl(elem.get(k)).encode('utf-8')+'"'
children = elem.getchildren()
if len(children)==0 and not elem.text:
res += ' />'
return res
res += '>'
if elem.text:
res += _escape_nl(elem.text).encode('utf-8')
for c in children:
res += '\n'+_prettify(c,indent_level+1)
if len(children)>0:
res += '\n'+indent_level*indent
res += '</'+elem.tag.encode('utf-8')+'>'
return res | 8f46637cdbb8daf488fd668a197aee5495b8128a | 3,657,309 |
def predict(text):
"""
Predict the language of a text.
Parameters
----------
text : str
Returns
-------
language_code : str
"""
if language_models is None:
init_language_models(comp_metric, unicode_cutoff=10**6)
x_distribution = get_distribution(text, language_models_chars)
return predict_param(language_models,
comp_metric,
x_distribution,
best_only=True) | 713d8cd8df040703ee7f138314f5c14f5a89ef26 | 3,657,310 |
def distance_loop(x1, x2):
""" Returns the Euclidean distance between the 1-d numpy arrays x1 and x2"""
return -1 | abd35a27cbeb5f5c9fe49a2a076d18f16e2849d9 | 3,657,312 |
def get_ps_calls_and_summary(filtered_guide_counts_matrix, f_map):
"""Calculates protospacer calls per cell and summarizes them
Args:
filtered_guide_counts_matrix: CountMatrix - obtained by selecting features by CRISPR library type on the feature counts matrix
f_map: dict - map of feature ID:feature sequence pairs
Returns:
First 3 outputs as specified in docstring for get_perturbation_calls
ps_calls_summary is a Pandas dataframe summarizing descriptive statistics for each perturbation_call (unique combination of protospacers) found in
the dataset, along with some overall summary statistics about the mulitplicty of infection
"""
if feature_utils.check_if_none_or_empty(filtered_guide_counts_matrix):
return (None, None, None, None, None)
(ps_calls_table, presence_calls, cells_with_ps, umi_thresholds) = get_perturbation_calls(filtered_guide_counts_matrix,
f_map,)
ps_calls_table.sort_values(by=['feature_call'], inplace=True, kind='mergesort')
ps_calls_summary = get_ps_calls_summary(ps_calls_table, filtered_guide_counts_matrix)
return (ps_calls_table, presence_calls, cells_with_ps, ps_calls_summary, umi_thresholds) | 18aecb335655fb62459350761aeffd4ddbe231ae | 3,657,313 |
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name("celery.concurrency.processes.TaskPool")
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name("default", {
... "default": "celery.concurrency.processes.TaskPool"})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError, exc:
raise ValueError, ValueError(
"Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default | 10921d715abc9c83891b26b884f3c88e86c4a900 | 3,657,314 |
from typing import Tuple
def coefficients_of_line_from_points(
point_a: Tuple[float, float], point_b: Tuple[float, float]
) -> Tuple[float, float]:
"""Computes the m and c coefficients of the equation (y=mx+c) for
a straight line from two points.
Args:
point_a: point 1 coordinates
point_b: point 2 coordinates
Returns:
m coefficient and c coefficient
"""
points = [point_a, point_b]
x_coords, y_coords = zip(*points)
coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T
m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]
return m, c | b4d89f2bb3db48723f321e01658e795f431427e1 | 3,657,315 |
import tifffile
def read_tiff(fname, slc=None):
"""
Read data from tiff file.
Parameters
----------
fname : str
String defining the path of file or file name.
slc : sequence of tuples, optional
Range of values for slicing data in each axis.
((start_1, end_1, step_1), ... , (start_N, end_N, step_N))
defines slicing parameters for each axis of the data matrix.
Returns
-------
ndarray
Output 2D image.
"""
fname = _check_read(fname)
try:
arr = tifffile.imread(fname, memmap=True)
except IOError:
logger.error('No such file or directory: %s', fname)
return False
arr = _slice_array(arr, slc)
_log_imported_data(fname, arr)
return arr | 39b48229719dd8210059a3a9ed7972e8398728ab | 3,657,317 |
def sorted_non_max_suppression_padded(scores,
boxes,
max_output_size,
iou_threshold):
"""A wrapper that handles non-maximum suppression.
Assumption:
* The boxes are sorted by scores unless the box is a dot (all coordinates
are zero).
* Boxes with higher scores can be used to suppress boxes with lower scores.
The overal design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
scores: a tensor with a shape of [batch_size, anchors].
boxes: a tensor with a shape of [batch_size, anchors, 4].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
nms_scores: a tensor with a shape of [batch_size, anchors]. It has same
dtype as input scores.
nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has
same dtype as input boxes.
"""
batch_size = tf.shape(boxes)[0]
num_boxes = tf.shape(boxes)[1]
pad = tf.cast(
tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE),
tf.int32) * NMS_TILE_SIZE - num_boxes
boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])
scores = tf.pad(
tf.cast(scores, tf.float32), [[0, 0], [0, pad]], constant_values=-1)
num_boxes += pad
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(output_size) < max_output_size,
idx < num_boxes // NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
_loop_cond, _suppression_loop_body, [
boxes, iou_threshold,
tf.zeros([batch_size], tf.int32),
tf.constant(0)
])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1)
idx = tf.reshape(
idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), idx),
[batch_size, max_output_size, 4])
boxes = boxes * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(
output_size, [-1, 1, 1]), boxes.dtype)
scores = tf.reshape(
tf.gather(tf.reshape(scores, [-1, 1]), idx),
[batch_size, max_output_size])
scores = scores * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(
output_size, [-1, 1]), scores.dtype)
return scores, boxes | 5d882acb6b9559eb541d49f6784798e5d342c673 | 3,657,318 |
def create_session() -> Session:
"""
Creates a new session using the aforementioned engine
:return: session
"""
return Session(bind=engine) | 8b480ee216c30b2c6b8652a6b6239ab6b83df4d9 | 3,657,319 |
import torch
def fft_to_complex_matrix(x):
""" Create matrix with [a -b; b a] entries for complex numbers. """
x_stacked = torch.stack((x, torch.flip(x, (4,))), dim=5).permute(2, 3, 0, 4, 1, 5)
x_stacked[:, :, :, 0, :, 1] *= -1
return x_stacked.reshape(-1, 2 * x.shape[0], 2 * x.shape[1]) | 9fb38004041280da0d6d53830761501aebf7969a | 3,657,320 |
import copy
def mcais(A, X, verbose=False):
"""
Returns the maximal constraint-admissible (positive) invariant set O_inf for the system x(t+1) = A x(t) subject to the constraint x in X.
O_inf is also known as maximum output admissible set.
It holds that x(0) in O_inf <=> x(t) in X for all t >= 0.
(Implementation of Algorithm 3.2 from: Gilbert, Tan - Linear Systems with State and Control Constraints, The Theory and Application of Maximal Output Admissible Sets.)
Sufficient conditions for this set to be finitely determined (i.e. defined by a finite number of facets) are: A stable, X bounded and containing the origin.
Math
----------
At each time step t, we want to verify if at the next time step t+1 the system will go outside X.
Let's consider X := {x | D_i x <= e_i, i = 1,...,n} and t = 0.
In order to ensure that x(1) = A x(0) is inside X, we need to consider one by one all the constraints and for each of them, the worst-case x(0).
We can do this solvin an LP
V(t=0, i) = max_{x in X} D_i A x - e_i for i = 1,...,n
if all these LPs has V < 0 there is no x(0) such that x(1) is outside X.
The previous implies that all the time-evolution x(t) will lie in X (see Gilbert and Tan).
In case one of the LPs gives a V > 0, we iterate and consider
V(t=1, i) = max_{x in X, x in A X} D_i A^2 x - e_i for i = 1,...,n
where A X := {x | D A x <= e}.
If now all V < 0, then O_inf = X U AX, otherwise we iterate until convergence
V(t, i) = max_{x in X, x in A X, ..., x in A^t X} D_i A^(t+1) x - e_i for i = 1,...,n
Once at convergence O_Inf = X U A X U ... U A^t X.
Arguments
----------
A : numpy.ndarray
State transition matrix.
X : instance of Polyhedron
State-space domain of the dynamical system.
verbose : bool
If True prints at each iteration the convergence parameters.
Returns:
----------
O_inf : instance of Polyhedron
Maximal constraint-admissible (positive) ivariant.
t : int
Determinedness index.
"""
# ensure convergence of the algorithm
eig_max = np.max(np.absolute(np.linalg.eig(A)[0]))
if eig_max > 1.:
raise ValueError('unstable system, cannot derive maximal constraint-admissible set.')
[nc, nx] = X.A.shape
if not X.contains(np.zeros((nx, 1))):
raise ValueError('the origin is not contained in the constraint set, cannot derive maximal constraint-admissible set.')
if not X.bounded:
raise ValueError('unbounded constraint set, cannot derive maximal constraint-admissible set.')
# initialize mcais
O_inf = copy(X)
# loop over time
t = 1
convergence = False
while not convergence:
# solve one LP per facet
J = X.A.dot(np.linalg.matrix_power(A,t))
residuals = []
for i in range(X.A.shape[0]):
sol = linear_program(- J[i,:], O_inf.A, O_inf.b)
residuals.append(- sol['min'] - X.b[i,0])
# print status of the algorithm
if verbose:
print('Time horizon: ' + str(t) + '.'),
print('Convergence index: ' + str(max(residuals)) + '.'),
print('Number of facets: ' + str(O_inf.A.shape[0]) + '. \r'),
# convergence check
new_facets = [i for i, r in enumerate(residuals) if r > 0.]
if len(new_facets) == 0:
convergence = True
else:
# add (only non-redundant!) facets
O_inf.add_inequality(J[new_facets,:], X.b[new_facets,:])
t += 1
# remove redundant facets
if verbose:
print('\nMaximal constraint-admissible invariant set found.')
print('Removing redundant facets ...'),
O_inf.remove_redundant_inequalities()
if verbose:
print('minimal facets are ' + str(O_inf.A.shape[0]) + '.')
return O_inf | e162a1aed724166f373f8afbd6541622254e8b42 | 3,657,321 |
def evaluation_seasonal_srmse(model_name, variable_name='mean', background='all'):
"""
Evaluate the model in different seasons using the standardized RMSE.
:type model_name: str
:param model_name: The name of the model.
:type variable_name: str
:param variable_name: The name of the variable which shell be evaluated\
against the ONI prediction.
:returns: The SRMSE for different seasons and the \
0, 3, 6, 9, 12 and 15-month lead times. The returned arrays have the shape \
(lead time, season). The season corresponding to the the array entry [:,0]\
is DJF and to [:,1] is JFM (and so on).
"""
reader = data_reader(startdate='1963-01', enddate='2017-12')
# seasonal scores
seas_srmse = np.zeros((n_lead, 12))
# ONI observation
oni = reader.read_csv('oni')
if background=="el-nino-like":
obs = oni[(oni.index.year>=1982)&(oni.index.year<=2001)]
elif background=="la-nina-like":
obs = oni[(oni.index.year<1982)|(oni.index.year>2001)]
elif background=="all":
obs = oni
obs_time = obs.index
for i in range(n_lead):
pred_all = reader.read_forecasts(model_name, lead_times[i]).loc[{'target_season':obs_time}]
pred = pred_all[variable_name]
seas_srmse[i, :] = seasonal_srmse(obs, pred, obs_time - pd.tseries.offsets.MonthBegin(1))
return seas_srmse | 39fb7ae64ab32fc5092e46c77c8593f1aeaf4c92 | 3,657,322 |
def _async_device_ha_info(
hass: HomeAssistant, lg_device_id: str
) -> dict | None:
"""Gather information how this ThinQ device is represented in Home Assistant."""
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
hass_device = device_registry.async_get_device(
identifiers={(DOMAIN, lg_device_id)}
)
if not hass_device:
return None
data = {
"name": hass_device.name,
"name_by_user": hass_device.name_by_user,
"model": hass_device.model,
"manufacturer": hass_device.manufacturer,
"sw_version": hass_device.sw_version,
"disabled": hass_device.disabled,
"disabled_by": hass_device.disabled_by,
"entities": {},
}
hass_entities = er.async_entries_for_device(
entity_registry,
device_id=hass_device.id,
include_disabled_entities=True,
)
for entity_entry in hass_entities:
if entity_entry.platform != DOMAIN:
continue
state = hass.states.get(entity_entry.entity_id)
state_dict = None
if state:
state_dict = dict(state.as_dict())
# The entity_id is already provided at root level.
state_dict.pop("entity_id", None)
# The context doesn't provide useful information in this case.
state_dict.pop("context", None)
data["entities"][entity_entry.entity_id] = {
"name": entity_entry.name,
"original_name": entity_entry.original_name,
"disabled": entity_entry.disabled,
"disabled_by": entity_entry.disabled_by,
"entity_category": entity_entry.entity_category,
"device_class": entity_entry.device_class,
"original_device_class": entity_entry.original_device_class,
"icon": entity_entry.icon,
"original_icon": entity_entry.original_icon,
"unit_of_measurement": entity_entry.unit_of_measurement,
"state": state_dict,
}
return data | 47af173daba91aa70ea167baf58c05e9f6f595f6 | 3,657,324 |
from typing import Optional
def get_travis_pr_num() -> Optional[int]:
"""Return the PR number if the job is a pull request, None otherwise
Returns:
int
See also:
- <https://docs.travis-ci.com/user/environment-variables/#default-environment-variables>
""" # noqa E501
try:
travis_pull_request = get_travis_env_or_fail('TRAVIS_PULL_REQUEST')
if falsy(travis_pull_request):
return None
else:
try:
return int(travis_pull_request)
except ValueError:
return None
except UnexpectedTravisEnvironmentError:
return None | 86ef6ce3f9bf3c3e056b11e575b1b13381e490fe | 3,657,325 |
from typing import List
import json
def get_updated_records(table_name: str, existing_items: List) -> List:
"""
Determine the list of record updates, to be sent to a DDB stream after a PartiQL update operation.
Note: This is currently a fairly expensive operation, as we need to retrieve the list of all items
from the table, and compare the items to the previously available. This is a limitation as
we're currently using the DynamoDB Local backend as a blackbox. In future, we should consider hooking
into the PartiQL query execution inside DynamoDB Local and directly extract the list of updated items.
"""
result = []
stream_spec = dynamodb_get_table_stream_specification(table_name=table_name)
key_schema = SchemaExtractor.get_key_schema(table_name)
before = ItemSet(existing_items, key_schema=key_schema)
after = ItemSet(ItemFinder.get_all_table_items(table_name), key_schema=key_schema)
def _add_record(item, comparison_set: ItemSet):
matching_item = comparison_set.find_item(item)
if matching_item == item:
return
# determine event type
if comparison_set == after:
if matching_item:
return
event_name = "REMOVE"
else:
event_name = "INSERT" if not matching_item else "MODIFY"
old_image = item if event_name == "REMOVE" else matching_item
new_image = matching_item if event_name == "REMOVE" else item
# prepare record
keys = SchemaExtractor.extract_keys_for_schema(item=item, key_schema=key_schema)
record = {
"eventName": event_name,
"eventID": short_uid(),
"dynamodb": {
"Keys": keys,
"NewImage": new_image,
"SizeBytes": len(json.dumps(item)),
},
}
if stream_spec:
record["dynamodb"]["StreamViewType"] = stream_spec["StreamViewType"]
if old_image:
record["dynamodb"]["OldImage"] = old_image
result.append(record)
# loop over items in new item list (find INSERT/MODIFY events)
for item in after.items_list:
_add_record(item, before)
# loop over items in old item list (find REMOVE events)
for item in before.items_list:
_add_record(item, after)
return result | 631c21836614731e5b53ed752036f1216d555196 | 3,657,326 |
def normalize_record(input_object, parent_name="root_entity"):
"""
This function orchestrates the main normalization.
It will go through the json document and recursively work with the data to:
- unnest (flatten/normalize) keys in objects with the standard <parentkey>_<itemkey> convention
- identify arrays, which will be pulled out and normalized
- create an array of entities, ready for streaming or export
for each item in the object:
if the item is a non object or non list item:
append to this flattened_dict object
if the item is a dictionary:
trigger the flatten dict function
the flatten dict function will iterate through the items and append them to a dictionary. it will return a dictionary with {"dictionary": <dict_data>, "array": <arrays>}
join flattened_dict and the returned[dictionary] data
append returned[array] to arrays layer
arrays will be dealt with a little differently. Because we're expecting multiple entries we'll be workign with a loop which will always belong to an array
create new dict object dict_object = {"name": <dict name>, "data": [dict array entries data]}
for each in the array loop - trigger normalize_layer with parent name of array name
dict_object.append the `dicts_array`["data"] to the dict_object["data"] array
"""
arrays = []
dicts = []
output_dictionary = {}
parent_keys = extract_parent_keys(dictionary_name=parent_name, dictionary_object=input_object)
if isinstance(input_object, (dict)):
for key, value in input_object.items():
if not isinstance(value, (dict,list) ):
# if the item is a non object or non list item:
output_dictionary[key] = value
elif isinstance(value, dict):
# if the item is a dictionary:
# trigger the flatten dict function
dict_contents = flatten_object(key,value) # will return {"dictionary": <dict_data>, "array": <arrays>}
instance_dictionary = dict_contents["dictionary"]
instance_array = dict_contents["array"]
if len(instance_array) >0:
arrays.extend(instance_array)
output_dictionary = merge_two_dicts(output_dictionary,instance_dictionary) #join the dict
elif isinstance(value, list):
arrays.append({"name":key, "data":value, "parent_keys": parent_keys})
elif isinstance(input_object, (list)):
arrays.append({"name":parent_name,"data":input_object })
##############################
### Now process the arrays ###
##############################
for each_array in arrays:
for each_entry in each_array["data"]:
each_entry = each_entry
try:
if each_array["parent_keys"]:
each_entry = merge_two_dicts(each_entry, each_array["parent_keys"])
except:
pass
normalized_array = (normalize_record(input_object = each_entry, parent_name = each_array["name"]) )
#expect list here
#let the normalizer recursively work through and pull the data out. Once it's out, we can append the data to the dicts array :)
#may return 1 or more dictionaries
for each_normalized_array_entry in normalized_array:
# iterate through each output in the normalized array
#check if there is an instance of this data already
matches = False
for each_dictionary_entity in dicts:
if each_normalized_array_entry["name"] == each_dictionary_entity["name"]:
#check if there is data in place already for this. If so, we add an entry to it
each_dictionary_entity["data"].extend(each_normalized_array_entry["data"])
matches = True
if matches == False:
dicts.append({"name": each_normalized_array_entry["name"] , "data": each_normalized_array_entry["data"] })
dicts.append({"name":parent_name, "data": [output_dictionary]})
return(dicts) | 73647b04ba943e18a38ebf2f1d03cca46b533935 | 3,657,327 |
def rmse(f, p, xdata, ydata):
"""Root-mean-square error."""
results = np.asarray([f(p, x) for x in xdata])
sqerr = (results - ydata)**2
return np.sqrt(sqerr.mean()) | 2b8afdb1742aad5e5c48fbe4407ab0989dbaf762 | 3,657,328 |
import logging
def get_logger(name=None, propagate=True):
"""Get logger object"""
logger = logging.getLogger(name)
logger.propagate = propagate
loggers.append(logger)
return logger | 3ad4dbc39f9bf934b02e2dc6e713a4793a28298b | 3,657,329 |
import requests
from typing import Match
def getMatches(tournamentName=None, matchDate=None, matchPatch=None, matchTeam=None):
"""
Params:
tournamentName: str/List[str]/Tuple(str) : filter by tournament names (e.g. LCK 2020 Spring)
matchDate: str/List[str]/Tuple(str) : date in the format of yyyy-mm-dd
matchPatch: str/List[str]/Tuple(str) : game patch the match is played on (e.g. 10.15)
matchTeam: str/List[str]/Tuple(str)
Returns:
List[Match]
"""
argsString = " AND ".join(filter(None, [
_formatArgs(tournamentName, "SG.Tournament"),
_formatDateTimeArgs(matchDate, "SG.DateTime_UTC"),
_formatArgs(matchPatch, "SG.Patch")
]))
url = MATCHES_URL.format(argsString)
matchesJson = requests.get(url).json()["cargoquery"]
matches = []
uniqueMatchMap = {}
for i in range(len(matchesJson)):
matchJson = matchesJson[i]["title"]
# apply team filter
if isinstance(matchTeam, str):
matchTeam = [matchTeam]
if isinstance(matchTeam, list):
if matchJson["Team1"] not in matchTeam and matchJson["Team2"] not in matchTeam:
continue
elif isinstance(matchTeam, tuple):
if not set(matchTeam).issubset(set([matchJson["Team1"], matchJson["Team2"]])):
continue
uniqueMatch = matchJson["UniqueGame"][:-2]
if uniqueMatch not in uniqueMatchMap:
match = Match(uniqueMatch)
match._uniqueGames.append(matchJson["UniqueGame"])
match.dateTime = matchJson["DateTime UTC"]
match.patch = matchJson["Patch"]
match.teams = (matchJson["Team1"], matchJson["Team2"])
match.scores = (int(matchJson["Team1Score"]), int(matchJson["Team2Score"]))
matches.append(match)
uniqueMatchMap[uniqueMatch] = match
else:
match = uniqueMatchMap[uniqueMatch]
match._uniqueGames.append(matchJson["UniqueGame"])
match.dateTime = matchJson["DateTime UTC"]
return matches | 89525caa9da0a3b546e0b8982e96469f32f8c5bc | 3,657,333 |
def get_fields(fields):
"""
From the last column of a GTF, return a dictionary mapping each value.
Parameters:
fields (str): The last column of a GTF
Returns:
attributes (dict): Dictionary created from fields.
"""
attributes = {}
description = fields.strip()
description = [x.strip() for x in description.split(";")]
for pair in description:
if pair == "": continue
pair = pair.replace('"', '')
key, val = pair.split()
attributes[key] = val
# put in placeholders for important attributes (such as gene_id) if they
# are absent
if 'gene_id' not in attributes:
attributes['gene_id'] = 'NULL'
return attributes | 30777838934b18a0046017f3da6b3a111a911a9c | 3,657,336 |
def add_log_group_name_params(log_group_name, configs):
"""Add a "log_group_name": log_group_name to every config."""
for config in configs:
config.update({"log_group_name": log_group_name})
return configs | a5fce8143c3404257789c1720bbfefc49c8ea3f5 | 3,657,337 |
from typing import Union
def on_update_user_info(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
broadcast a user info update to a room, or all rooms the user is in if no target.id specified
:param data: activity streams format, must include object.attachments (user info)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
environ.env.observer.emit('on_update_user_info', (data, activity))
return ECodes.OK, data | 735486cad96545885a76a5a18418db549869304d | 3,657,338 |
def discover(isamAppliance, check_mode=False, force=False):
"""
Discover available updates
"""
return isamAppliance.invoke_get("Discover available updates",
"/updates/available/discover") | 04c68b0ce57d27bc4032cf9b1607f2f1f371e384 | 3,657,339 |
from re import A
def ltistep(U, A=A, B=B, C=C):
""" LTI( A B C ): U -> y linear
straight up
"""
U, A, B, C = map(np.asarray, (U, A, B, C))
xk = np.zeros(A.shape[1])
x = [xk]
for u in U[:-1]:
xk = A.dot(xk) + B.dot(u)
x.append(xk.copy())
return np.dot(x, C) | 5d7c7550a9a6407a8f1a68ee32e158f25a7d50bf | 3,657,340 |
def _registry():
"""Registry to download images from."""
return _registry_config()["host"] | ee7c724f3b9381c4106a4e19d0434b9b4f0125fc | 3,657,341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.