content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def search_by_pattern(pattern, limit=20):
"""Perform a search for pattern."""
pattern_ = normalize_pattern(pattern)
db = get_db()
results = db.execute(
"""
SELECT json FROM places
WHERE document MATCH ?
ORDER BY rank DESC
LIMIT ?;
""",
(fts_pattern(pattern_), limit)
).fetchall()
return "[{}]".format(','.join([doc['json'] for doc in results])) | 467b85c850bb27ac1ed9a6e7fff6bb969a5f84e0 | 3,650,091 |
def gcd(a, b):
"""Greatest common divisor"""
return _gcd_internal(abs(a), abs(b)) | 886d366893a0215ccf0208af56c9c45037ad9549 | 3,650,092 |
def exp_create_database(db_name, demo, lang, user_password='admin', login='admin', country_code=None, phone=None):
""" Similar to exp_create but blocking."""
_logger.info('Create database `%s`.', db_name)
_create_empty_database(db_name)
_initialize_db(id, db_name, demo, lang, user_password, login, country_code, phone)
return True | b1d956628d864e0aa3998c00fd6a0b7cfb3ba411 | 3,650,093 |
def fix_cr(data):
"""Cosmic ray fixing function.
Args:
data (:class:`numpy.ndarray`): Input image data.
Returns:
:class:`numpy.dtype`: Fixed image data.
"""
m = data.mean(dtype=np.float64)
s = data.std(dtype=np.float64)
_mask = data > m + 3.*s
if _mask.sum()>0:
x = np.arange(data.size)
f = InterpolatedUnivariateSpline(x[~_mask], data[~_mask], k=3)
return f(x)
else:
return data | 40702ccc9400f4ba5f10cad1f376b83eac487876 | 3,650,094 |
def iou(box1, box2, iouType='segm'):
"""Compute the Intersection-Over-Union of two given boxes.
or the Intersection-Over box2.
Args:
box1: array of 4 elements [cx, cy, width, height].
box2: same as above
iouType: The kind of intersection it will compute.
'keypoints' is for intersection over box2 area.
Returns:
iou: a float number in range [0, 1]. iou of the two boxes.
"""
lr = min(box1[0]+0.5*box1[2], box2[0]+0.5*box2[2]) - \
max(box1[0]-0.5*box1[2], box2[0]-0.5*box2[2])
if lr > 0:
tb = min(box1[1]+0.5*box1[3], box2[1]+0.5*box2[3]) - \
max(box1[1]-0.5*box1[3], box2[1]-0.5*box2[3])
if tb > 0:
intersection = tb*lr
else:
intersection = 0
if(iouType == 'keypoints'):
box2_area = box2[2] * box2[3]
return intersection/box2_area
else:
union = box1[2]*box1[3]+box2[2]*box2[3]-intersection
return intersection/union
return 0 | 42ef4689c977e4ccbdbb987ff3ae63b265d3c42d | 3,650,095 |
def transform_color(color1, color2, skipR=1, skipG=1, skipB=1):
"""
transform_color(color1, color2, skipR=1, skipG=1, skipB=1)
This function takes 2 color1 and color2 RGB color arguments, and then returns a
list of colors in-between the color1 and color2
eg- tj.transform_color([0,0,0],[10,10,20]) returns a list:-
[[0, 0, 0], [1, 1, 1], [2, 2, 2] ... [9, 9, 9], [10, 10, 10], [10, 10, 11] ... [10, 10, 20]]
This function is very useful for creating color fade or color transition effects in pygame.
There are 3 optional arguments, which are skip arguments set to 1 by default.
"""
L = []
if (color1[0] < color2[0]):
i = list(range(color1[0],
color2[0] + 1,
skipR))
else:
i = list(range(color2[0], color1[0] + 1, skipR))[::-1]
if i == []:
i = [color1[0]]
if (color1[1] < color2[1]):
j = list(range(color1[1],
color2[1] + 1,
skipG))
else:
j = list(range(color2[1], color1[1] + 1, skipG))[::-1]
if j == []:
j = [color1[1]]
if (color1[2] < color2[2]):
k = list(range(color1[2],
color2[2] + 1,
skipB))
else:
k = list(range(color2[2], color1[2] + 1, skipB))[::-1]
if k == []:
k = [color1[2]]
x = max(len(i), len(j), len(k))
for m in range(len(i), x):
i += [i[-1]]
for m in range(len(j), x):
j += [j[-1]]
for m in range(len(k), x):
k += [k[-1]]
for m in range(x):
l = [i[m], j[m], k[m]]
L += [l]
return L | 5f04daa951c59b0445387b2dc988ab7efb98aff4 | 3,650,096 |
def sandwich(func):
"""Write a decorator that prints UPPER_SLICE and
LOWE_SLICE before and after calling the function (func)
that is passed in (@wraps is to preserve the original
func's docstring)
"""
@wraps(func)
def wrapped(*args, **kwargs):
print(UPPER_SLICE)
func(*args, **kwargs)
print(LOWE_SLICE)
return wrapped | 167e1a753b7ba1f0d42732e12c5b37e0b0670f1b | 3,650,097 |
def as_dict(bdb_path, compact=True):
"""Get the state of a minter BerkeleyDB as a dict. Only the fields used by EZID are
included.
"""
with nog.bdb_wrapper.BdbWrapper(bdb_path, dry_run=False) as w:
return w.as_dict(compact) | dab02d671c099bd726839dc40167632cab812015 | 3,650,098 |
import json
def main() -> int:
"""
Builds/updates aircraft.json codes
"""
craft = {}
for line in AIRCRAFT_PATH.open().readlines():
code, _, name = line.strip().split("\t")
if code not in craft:
craft[code] = name
json.dump(craft, OUTPUT_PATH.open("w"))
return 0 | e35bc5b5f8be2452d60b73b891bc5756931b18aa | 3,650,100 |
def bootstrap_mean(x, alpha=0.05, b=1000):
"""
Calculate bootstrap 1-alpha percentile CI of the mean from a sample x
Parameters
----------
x : 1d array
alpha : float
Confidence interval is defined as the
b : int
The number of bootstrap samples
Returns
-------
lb, ub : the lower and upper bounds of the confidence interval
"""
means = np.empty(b)
for ii in xrange(b):
idx = np.random.randint(0, len(x), len(x))
means[ii] = np.mean(x[idx])
sort_means = np.sort(means)
lb_idx = int(b * alpha/2)
ub_idx = int(b * (1-(alpha/2)))
return sort_means[lb_idx], sort_means[ub_idx] | 10b240f97196c7e2922574230a675d7e7c89038e | 3,650,101 |
def _penalize_token(log_probs, token_id, penalty=-1e7):
"""Penalize token probabilities."""
depth = log_probs.shape[-1]
penalty = tf.one_hot([token_id], depth, on_value=tf.cast(penalty, log_probs.dtype))
return log_probs + penalty | af8bf807438ff7ae96be0c5be0ec37fdbf81a5d1 | 3,650,102 |
from typing import Iterable
def minimize(function,
vs,
explicit=True,
num_correction_pairs=10,
tolerance=1e-05,
x_tolerance=0,
f_relative_tolerance=1e7,
initial_inverse_hessian_estimate=None,
max_iterations=1000,
parallel_iterations=1,
optimizer="l-bfgs",
trace=False,
logger=print):
"""
Takes a function whose arguments are subclasses of boa.core.AbstractVariable,
and performs some form of BFGS on it.
:param function:
:param vs: Structure of NTFO variables
:param explicit: If True, `function` must have the signature `function(*vs)`.
If False, `function` must have the signature `function()`
:param num_correction_pairs:
:param tolerance:
:param x_tolerance:
:param f_relative_tolerance:
:param initial_inverse_hessian_estimate:
:param max_iterations:
:param parallel_iterations:
:param logger:
:param trace:
:param optimizer:
:return:
"""
if optimizer not in AVAILABLE_OPTIMIZERS:
raise OptimizationError(f"Specified optimizer ({optimizer}) must be one of {AVAILABLE_OPTIMIZERS}!")
if not isinstance(vs, Iterable):
raise OptimizationError(f"Variables passed must be in an iterable structure!")
optimizer = AVAILABLE_OPTIMIZERS[optimizer]
float64_machine_eps = finfo(float64).eps
# Check if the function and the passed arguments are compatible
num_function_params = len(signature(function).parameters)
num_args = len(vs)
if explicit and num_function_params != num_args:
raise OptimizationError(f"Optimization target takes {num_function_params} argument(s) " \
f"but {num_args} were given!")
# These are chosen to match the parameters of
# scipy.optimizer.fmin_l_bfgs_b
optimizer_args = {"num_correction_pairs": num_correction_pairs,
"tolerance": tolerance, # This is pgtol in scipy
"x_tolerance": x_tolerance,
# This is eps * factr in scipy
"f_relative_tolerance": float64_machine_eps * f_relative_tolerance,
"initial_inverse_hessian_estimate": initial_inverse_hessian_estimate,
"max_iterations": max_iterations,
"parallel_iterations": parallel_iterations}
# Get the reparameterization of the
reparameterizations = get_reparametrizations(vs)
initial_position, bounds, shapes = recursive_flatten(reparameterizations)
def unflatten(xs):
return _recursive_unflatten(xs, bounds, shapes)
# Pull-back of the function to the unconstrained domain:
# Reparameterize the function such that instead of taking its original bounded
# arguments, it takes the unconstrained ones, and they get forward transformed.
def reparameterized_function(*args):
new_args = recursive_forward_transform(args, vs)
return function(*new_args)
def fn_with_grads(x, first_run=False):
if explicit:
# Get back the original arguments
args = unflatten(x)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(args)
value = reparameterized_function(*args)
gradients = tape.gradient(value, args)
else:
# If we're performing implicit optimization, we assign the parameter
# values to the watched variables at the start
values = unflatten(x)
recursive_assign(reparameterizations, values)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(reparameterizations)
value = function()
gradients = tape.gradient(value, reparameterizations)
if first_run:
for grad in gradients:
if grad is None:
raise OptimizationError("Given function does not depend on some of the given variables!")
# We must concatenate the gradients because lbfgs_minimize expects a single vector
gradients, _, _ = recursive_flatten(gradients)
return value, gradients
# Dry run to check if the optimization can be performed
fn_with_grads(initial_position, first_run=True)
optimizer_results = optimizer(fn_with_grads,
initial_position=initial_position,
**optimizer_args)
if trace:
logger(f"Optimizer evaluated the objective {optimizer_results.num_objective_evaluations.numpy()} times!")
logger(f"Optimizer terminated after "
f"{optimizer_results.num_iterations.numpy()}/{max_iterations} iterations!")
logger(f"Optimizer converged: {optimizer_results.converged.numpy()}")
logger(f"Optimizer diverged: {optimizer_results.failed.numpy()}")
optimum = unflatten(optimizer_results.position)
# Assign the results to the variables
recursive_assign(reparameterizations, optimum)
# Return the loss
return optimizer_results.objective_value, optimizer_results.converged, optimizer_results.failed | 22659e37afa29fde75ba344e0054207582e44fb3 | 3,650,104 |
def variation_reliability(flow, gamma=1):
""" Calculates the flow variation reliability
Parameters
----------
flow: numpy array
flow values
gamma: float, optional
soft threshold
Returns
-------
variation reliability map (0 less reliable, 1 reliable)
"""
#compute central differences
gradx = np.gradient(flow[:, :, 0])
grady = np.gradient(flow[:, :, 1])
norm_grad = (gradx[0] ** 2 + gradx[1] ** 2 +
grady[0] ** 2 + grady[1] ** 2) / (0.01 * np.sum(flow ** 2, axis=2) + 0.002)
norm_grad[norm_grad > 1e2] = 0
return np.exp(-norm_grad / gamma) | cbdc4ab49402239e5e0fd484e5bf7bceaca383d4 | 3,650,105 |
def get_means(df: pd.DataFrame, *, matching_sides: bool,
matching_roots: bool) -> pd.Series:
"""
Calculates mean conditional probabilities from a given co-occurrence table
with filters restricting for matching sides and roots.
Args:
df: The co-occurrences.
matching_sides: Whether to consider pairs of sites with matching sides.
matching_roots: Whether to consider pairs of sites with matching roots.
Returns:
The mean conditional probabilities.
"""
return df.query('is_matching_sides == {} and '
'(reference_joint_type {} co_occurring_joint_type)'.format(
matching_sides, '=='
if matching_roots else '!=')).groupby([
'reference_joint_type', 'co_occurring_joint_type'
])['conditional_probability'].mean() | e6318fe4d284b134a8b655003b074ec554abca82 | 3,650,108 |
from typing import Dict
import math
def main(
pathname: str,
sheetname: str='Sheet1',
min_row: int=None,
max_row: int=None,
min_col: int=None,
max_col: int=None,
openpyxl_kwargs: Dict=None
):
"""
main is the main function. It accepts details about a excel sheet and returns an HTML table matching it.
Arguments:
pathname: A path to the excel sheet
sheetname: The name of the sheet to convert
min_row: The minimum row to parse in the excel (1-based)
max_row: The maximum row to parse in the excel (1-based)
min_col: The minimum column to parse in the excel (1-based)
max_col: The maximum column to parse in the excel (1-based)
openpyxl_kwargs: A dicionary of arguments to pass to openpyxl.load_workbook
"""
def out_of_range(bounds):
'''bounds are of the form (left_col, top_row, right_col, bottom_row)'''
return (bounds[0] < (min_col or 0)) or (bounds[1] < (min_row or 0))
openpyxl_kwargs = openpyxl_kwargs or {} # just in case people are mutating openpyxl_kwargs between calls.
wb = openpyxl.load_workbook(pathname, **openpyxl_kwargs)
ws = wb[sheetname]
ws_meta = {
'themes': color_utilities.get_theme_colors(wb),
'merged_cell_ranges': ws.merged_cells.ranges,
'column_widths': {(openpyxl.utils.cell.column_index_from_string(i) - 1): math.ceil(x.width * 7) for i, x in ws.column_dimensions.items()}, # converting excel units to pixels
'default_col_width': ws.sheet_format.defaultColWidth or 64,
'row_heights': {(i - 1): x.height * (4 / 3) for i, x in ws.row_dimensions.items()}, # converting excel units to pixels
'default_row_height': ws.sheet_format.defaultRowHeight or 20,
'min_row': min_row or 1,
'min_col': min_col or 1,
'max_row': min(max_row or ws.max_row, ws.max_row),
'max_col': min(max_col or ws.max_column, ws.max_column),
}
parsed_sheet = []
candidate_merge_ranges = [x for x in ws.merged_cells.ranges if out_of_range(x.bounds)]
for i, row in enumerate(ws.iter_rows(min_row=min_row, max_row=max_row, min_col=min_col, max_col=max_col)):
parsed_row = []
for j, cell in enumerate(row):
if isinstance(cell, openpyxl.cell.cell.Cell):
parsed_row.append(ParsedCell(cell, ws_meta, i, j))
else:
for i, candidate_range in enumerate(candidate_merge_ranges):
if cell.coordinate in candidate_range:
parent_cell = ws.cell(row=candidate_range.bounds[1], column=candidate_range.bounds[0])
parsed_row.append(ParsedCell(parent_cell, ws_meta, i, j))
candidate_merge_ranges.pop(i)
break
parsed_sheet.append(parsed_row)
# it's important to first run background_color and then fix_borders, so that
# the border can be deleted on the cell in fix_background_color. Then you
# need to run fix_borders so their neighbors can also have their borders
# deleted. That's part of the reason we make default_border = False when
# running delete_side
fix_background_color(parsed_sheet)
fix_borders(parsed_sheet, ws_meta)
body = to_html(parsed_sheet)
return body | b44884bf84909b3bb76553aff247df6a961f3289 | 3,650,109 |
def get_go_module_path(package):
"""assumption: package name starts with <host>/org/repo"""
return "/".join(package.split("/")[3:]) | 1443d59391a36c7b9ba1d72ade9fd51f11cc1cc3 | 3,650,110 |
def get_hports(obj, *args, **kwargs):
"""
get_hports(obj, ...)
Get hierarchical references to ports *within* an object.
Parameters
----------
obj : object, Iterable - required
The object or objects associated with this query. Queries return a collection of objects associated with the
provided object or objects that match the query criteria. For example, `sdn.get_instances(netlist, ...)` would
return all of the instances *within* the provided definition that match the additional criteria.
patterns : str, Iterable - optional, positional or named, default: wildcard
The search patterns. Patterns can be a single string or an Iterable collection of strings. Patterns can be
absolute or they can contain wildcards or regular expressions. If `patterns` is not provided, then it defaults
to a wildcard.
recursive : bool - optional, default: False
Specify if search should be recursive or not meaning that sub hierarchical pins within an instance are
included or not.
is_case : bool - optional, named, default: True
Specify if patterns should be treated as case sensitive. Only applies to patterns. Does not alter fast lookup
behavior (if namespace policy uses case insensitive indexing, this parameter will not prevent a fast lookup
from returning a matching object even if the case is not an exact match).
is_re: bool - optional, named, default: False
Specify if patterns are regular expressions. If `False`, a pattern can still contain `*` and `?` wildcards. A
`*` matches zero or more characters. A `?` matches upto a single character.
filter : function
This is a single input function that can be used to filter out unwanted virtual instances. If not specifed, all
matching virtual instances are returned. Otherwise, virtual instances that cause the filter function to evaluate
to true are the only items returned.
Returns
-------
href_ports : generator
The hierarchical references to ports associated with a particular object or collection of objects.
"""
# Check argument list
if len(args) == 1 and 'patterns' in kwargs:
raise TypeError("get_hports() got multiple values for argument 'patterns'")
if len(args) > 1 or any(x not in {'patterns', 'recursive', 'filter', 'is_case', 'is_re'} for x in
kwargs):
raise TypeError("Unknown usage. Please see help for more information.")
# Default values
filter_func = kwargs.get('filter', lambda x: True)
recursive = kwargs.get('recursive', False)
is_case = kwargs.get('is_case', True)
is_re = kwargs.get('is_re', False)
patterns = args[0] if len(args) == 1 else kwargs.get('patterns', ".*" if is_re else "*")
if isinstance(obj, (FirstClassElement, InnerPin, OuterPin, Wire)) is False:
try:
object_collection = list(iter(obj))
except TypeError:
object_collection = [obj]
else:
object_collection = [obj]
if all(isinstance(x, (HRef, FirstClassElement, InnerPin, OuterPin, Wire)) for x in object_collection) is False:
raise TypeError("get_hports() supports all netlist related objects and hierarchical references or a "
"collection of theses as the object searched, unsupported object provided")
if isinstance(patterns, str):
patterns = (patterns,)
assert isinstance(patterns, (FirstClassElement, InnerPin, OuterPin, Wire)) is False
return _get_hports(object_collection, patterns, recursive, is_case, is_re, filter_func) | f4a18a43018c10ef5860e974ed1d3eaf0ab73ac3 | 3,650,111 |
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps))) | b56e8bbaf16c688ebeb8be05b15a8c63745def3d | 3,650,113 |
from typing import Optional
def get_organization(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrganizationResult:
"""
Use this data source to retrieve basic information about a GitHub Organization.
## Example Usage
```python
import pulumi
import pulumi_github as github
test = github.get_organization(name="github")
```
:param str name: The name of the organization account
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('github:index/getOrganization:getOrganization', __args__, opts=opts, typ=GetOrganizationResult).value
return AwaitableGetOrganizationResult(
description=__ret__.description,
id=__ret__.id,
login=__ret__.login,
members=__ret__.members,
name=__ret__.name,
node_id=__ret__.node_id,
plan=__ret__.plan,
repositories=__ret__.repositories) | f534525be6bc7bc7e6e3c80be9b94543b327a8e7 | 3,650,114 |
import array
def numerator_LGG(mfld_dim: int,
ambient_dim: array,
vol: array,
epsilon: array,
prob: float) -> array: # our theory
"""
Theoretical M * epsilon^2 / K, our formula
Parameters
----------
mfld_dim
K, dimensionality of manifold
ambient_dim
N, dimensionality of ambient space
vol
V, volume of manifold
epsilon
allowed distortion
prob
allowed failure probability
"""
onev = np.ones_like(epsilon)
Me_K = (np.log(vol / prob) / mfld_dim + 0.5 * np.log(27. / mfld_dim)
+ np.log(ambient_dim / 4.) + 1.5 * onev)
return 16 * Me_K | b8f22f269cfb5a42ebb60118e6715c812d18ef73 | 3,650,115 |
from typing import OrderedDict
def group_nodes(node_list, tree_height):
"""
Groups a list of nodes.
"""
dict = OrderedDict()
for node in node_list:
nodelist = _make_node_list(GroupNode(node), tree_height)
if nodelist.get_name() not in dict:
dict[nodelist.get_name()] = nodelist
else:
dict[nodelist.get_name()].merge(nodelist)
return list(dict.values()) | 50dc5f0356bed740e8b88e8bc05df01057fab29d | 3,650,116 |
def power_over_line_rule_pos_rule(m, i, j, t):
"""
If decision variable m.var_x[i, j, t] is set to TRUE, the positive power over the line var_power_over_line is
limited by power_line_limit
:param m: complete pyomo model
:type m: pyomo model
:param i: startnode index of set_edge
:type i: int
:param j: endnode index of set_edge
:type j: int
:param t: type index of set_linetypes
:type t: int
:returns:
- **pyomo equality function**: pyomo rule
:rtype: function
"""
power_line_limit = m.var_x[i, j, t] \
* (m.dict_line_tech[t]['I_max_A'] * V_BASE * 10 ** 3) \
/ (S_BASE * 10 ** 6)
return m.var_power_over_line[i, j, t] <= power_line_limit | c4d5a70f4cc2a67a45f11b3072fcb98c9f54e4e2 | 3,650,117 |
def join_tiles(tiles):
"""Reconstructs the image from tiles."""
return np.concatenate(np.concatenate(tiles, 1), 1) | 559b88d8bdd42662961669050d29b6583dfbc706 | 3,650,118 |
def solve2(lines, max_total):
"""Solve the problem for Part 2."""
points = parse_points(lines)
xmin = min([p[0] for p in points])
xmax = max([p[0] for p in points])
ymin = min([p[1] for p in points])
ymax = max([p[1] for p in points])
size = 0
for x in range(xmin, xmax+1):
for y in range(ymin, ymax+1):
total = sum([dist((x, y), p) for p in points])
if total < max_total:
size += 1
return size | 1e743f736f92f60df960d4946f89ae222054d4aa | 3,650,120 |
import random
import json
def main():
"""
関数の実行を行う関数。
Return:
"""
def shuffle_dict(d):
"""
辞書(のキー)の順番をランダムにする
Args:
d: 順番をランダムにしたい辞書。
Return:
dの順番をランダムにしたもの
"""
keys = list(d.keys())
random.shuffle(keys)
return dict([(key, d[key]) for key in keys])
"""
input_node_dict: 全ノードについての情報を辞書にまとめたもの。dict()
key: ノードの名前。
value: リスト
第1要素: keyのノードが指すノードの集合。set()
第2要素: keyのノードのリンク先URL。str()
"""
input_node_dict = {"a": [set(), "example.html"],
"b": [{"a"}, "example.html"],
"c": [{"b", "e"}, "example.html"],
"d": [{"c", "a"}, "example.html"],
"e": [{"a"}, "example.html"],
"f": [{"e", "b", "a"}, "example.html"],
"g": [{"e"}, "example.html"],
"h": [{"g", "f"}, "example.html"],
"i": [{"a"}, "example.html"],
"j": [{"i"}, "example.html"],
"k": [{"j", "m"}, "example.html"],
"l": [{"i", "a"}, "example.html"],
"m": [{"i"}, "example.html"],
"n": [{"j", "m"}, "example.html"],
"o": [{"m", "l"}, "example.html"],
"p": [{"n", "k"}, "example.html"],
"q": [{"k", "o", "i"}, "example.html"],
}
node_list = create_node_list(shuffle_dict(input_node_dict))
remove_redundant_dependency(node_list)
assign_top_node(node_list)
assign_x_sequentially(node_list)
cut_edges_higher_than_1(node_list)
assign_x_sequentially(node_list)
sort_nodes_by_xcenter(node_list, downward=True)
sort_nodes_by_xcenter(node_list, downward=False)
node_attributes = node_list2node_dict(node_list)
# 有向グラフGraphの作成
graph = nx.DiGraph()
create_dependency_graph(node_list, graph)
# nodes_attrsを用いて各ノードの属性値を設定
nx.set_node_attributes(graph, node_attributes)
# グラフの描画
nx.draw_networkx(graph)
# cytoscape.jsの記述形式(JSON)でグラフを記述
graph_json = nx.cytoscape_data(graph, attrs=None)
with open('demo_sample.json', 'w') as f:
f.write(json.dumps(graph_json)) | eca402495d47c8d856568c2271f3de92b1ca2d4f | 3,650,121 |
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
expressions = args[:-1]
raise SchemeError('"load" given incorrect number of arguments: '
'{0}'.format(len(expressions)))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = eval(sym)
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(sym) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env, quiet=quiet)
return okay | d41bb38e60d5e82022c0857629937774c6b180d5 | 3,650,122 |
import torch
from typing import Optional
from typing import Tuple
def mask2idx(
mask: torch.Tensor,
max_length: Optional[int] = None,
padding_value: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
E.g. input a tensor [[T T F F], [T T T F], [F F F T]] with padding value -1,
return [[0, 1, -1], [0, 1, 2], [3, -1, -1]]
:param mask: Mask tensor. Boolean. Not necessarily to be 2D.
:param max_length: If provided, will truncate.
:param padding_value: Padding value. Default to 0.
:return: Index tensor.
"""
shape_prefix, mask_length = mask.shape[:-1], mask.shape[-1]
flat_mask = mask.flatten(0, -2)
index_list = [torch.arange(mask_length, device=mask.device)[one_mask] for one_mask in flat_mask.unbind(0)]
index_tensor = pad_sequence(index_list, batch_first=True, padding_value=padding_value)
if max_length is not None:
index_tensor = index_tensor[:, :max_length]
index_tensor = index_tensor.reshape(*shape_prefix, -1)
return index_tensor, mask.sum(-1) | b0900aadd14c9eff8af4fcf8e9043127d2a3562c | 3,650,123 |
def generate_paths(data, path=''):
"""Iterate the json schema file and generate a list of all of the
XPath-like expression for each primitive value. An asterisk * represents
an array of items."""
paths = []
if isinstance(data, dict):
if len(data) == 0:
paths.append(f'{path}')
else:
for key, val in data.items():
if key == 'type':
if isinstance(val, list):
types = set(val)
else:
types = {val}
if types.isdisjoint({'object', 'array'}):
paths.append(f'{path}')
elif key == 'properties':
paths.extend(generate_paths(val, path))
else:
if key == 'items':
key = '*'
paths.extend(generate_paths(val, f'{path}/{key}'))
return paths | 367f244b44c254b077907ff8b219186bd820fccd | 3,650,124 |
def enable_heater_shaker_python_api() -> bool:
"""Get whether to use the Heater-Shaker python API."""
return advs.get_setting_with_env_overload("enableHeaterShakerPAPI") | f849413c072034a15f0242e4b65dd36753a8d6f1 | 3,650,128 |
def solidityKeccak(abi_types, values):
"""
Executes keccak256 exactly as Solidity does.
Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
and list of corresponding values -- `[20, [-1, 5, 0], True]`
"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
normalized_values = map_abi_data([abi_ens_resolver], abi_types, values)
hex_string = add_0x_prefix(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
))
return keccak(hexstr=hex_string) | 2e31d05404b204a233694f14e808c78e96e67aed | 3,650,129 |
def run_fib_recursive_mathy_cached(n):
"""Return Fibonacci sequence with length "n" using "fib_recursive_mathy_cached".
Args:
n: The length of the sequence to return.
Returns:
A list containing the Fibonacci sequence.
"""
return [fib_recursive_mathy_cached(i + 1) for i in range(n)] | 7cdd01ab747bccbd95dbb886e680c9214c2f883d | 3,650,131 |
def binning(LLRs_per_window,info,num_of_bins):
""" Genomic windows are distributed into bins. The LLRs in a genomic windows
are regarded as samples of a random variable. Within each bin, we calculate
the mean and population standard deviation of the mean of random variables.
The boundaries of the bins as well as the mean LLR and the standard-error
per bin are returned. """
#K,M,V = tuple(LLR_stat.keys()), *zip(*LLR_stat.values())
list_of_windows = [*LLRs_per_window.keys()]
bins = bin_genomic_windows(list_of_windows, info['chr_id'], num_of_bins)
X = [*bins]
LLR_matrix = [*LLRs_per_window.values()]
Y, E = [], []
for C in bins.values():
if C:
mean, std = mean_and_std_of_mean_of_rnd_var(LLR_matrix[C[0]:C[1]])
else:
mean, std = None, None
Y.append(mean)
E.append(std)
return X,Y,E | bc872aa9d32545a91d9854d83ba2efb238cbfc02 | 3,650,132 |
from typing import Optional
from typing import List
def conj(node: BaseNode,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> BaseNode:
"""Conjugate a `node`.
Args:
node: A `BaseNode`.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The complex conjugate of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
backend = node.backend
if not axis_names:
axis_names = node.axis_names
return Node(
backend.conj(node.tensor),
name=name,
axis_names=axis_names,
backend=backend) | 9baffa5ad00289b0f44ad4953dc12931eccb8ed9 | 3,650,133 |
from typing import Union
import torch
def f1(
predictions: Union[list, np.array, torch.Tensor],
labels: Union[list, np.array, torch.Tensor],
):
"""Calculate F1 score for binary classification."""
return f1_score(y_true=labels, y_pred=predictions) | e35fa02281351a2c2003982fc6450aa4d8e5561b | 3,650,134 |
def length(v, squared=False, out=None, dtype=None):
"""Get the length of a vector.
Parameters
----------
v : array_like
Vector to normalize, can be Nx2, Nx3, or Nx4. If a 2D array is
specified, rows are treated as separate vectors.
squared : bool, optional
If ``True`` the squared length is returned. The default is ``False``.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for computations can either be 'float32' or 'float64'. If
`out` is specified, the data type of `out` is used and this argument is
ignored. If `out` is not provided, 'float64' is used by default.
Returns
-------
float or ndarray
Length of vector `v`.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
v = np.asarray(v, dtype=dtype)
if v.ndim == 2:
assert v.shape[1] <= 4
toReturn = np.zeros((v.shape[0],), dtype=dtype) if out is None else out
v2d, vr = np.atleast_2d(v, toReturn) # 2d view of array
if squared:
vr[:, :] = np.sum(np.square(v2d), axis=1)
else:
vr[:, :] = np.sqrt(np.sum(np.square(v2d), axis=1))
elif v.ndim == 1:
assert v.shape[0] <= 4
if squared:
toReturn = np.sum(np.square(v))
else:
toReturn = np.sqrt(np.sum(np.square(v)))
else:
raise ValueError("Input arguments have invalid dimensions.")
return toReturn | 16caea4730b7dfd7cfcc71d253ee1fc7691fe05d | 3,650,135 |
def generateUniqueId():
"""
Generates a unique ID each time it is invoked.
Returns
-------
string
uniqueId
Examples
--------
>>> from arch.api import session
>>> session.generateUniqueId()
"""
return RuntimeInstance.SESSION.generateUniqueId() | f5dd066c1f9e6670b1e0949c890d5abc9931142a | 3,650,136 |
def order_rep(dumper, data):
""" YAML Dumper to represent OrderedDict """
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items(),
flow_style=False) | 6b455d49cd5702324f4b1e825dabb4af90734730 | 3,650,137 |
def match_format(
format_this: spec.BinaryInteger,
like_this: spec.BinaryInteger,
match_pad: bool = False,
) -> spec.BinaryInteger:
"""
will only match left pads, because pad size cannot be reliably determined
"""
output_format = get_binary_format(like_this)
output = convert(data=format_this, output_format=output_format)
if match_pad:
padded_size = get_binary_n_bytes(like_this)
output = add_binary_pad(output, padded_size=padded_size)
return output | 0a9638cbe5e522b1c7dc9ee316e559f9238b9abe | 3,650,138 |
def parse_nonterm_6_elems(expr_list, idx):
"""
Try to parse a non-terminal node from six elements of {expr_list}, starting
from {idx}.
Return the new expression list on success, None on error.
"""
(it_a, it_b, it_c, it_d, it_e, it_f) = expr_list[idx : idx + 6]
# Match against and_n.
if (
isinstance(it_a, Node)
and it_a.p.has_all("Bdu")
and it_b == OP_NOTIF
and isinstance(it_c, Node)
and it_c.t == NodeType.JUST_0
and it_d == OP_ELSE
and isinstance(it_e, Node)
and it_e.p.has_any("BKV")
and it_f == OP_ENDIF
):
node = Node().construct_and_n(it_a, it_e)
expr_list[idx : idx + 6] = [node]
return expr_list
# Match against andor.
if (
isinstance(it_a, Node)
and it_a.p.has_all("Bdu")
and it_b == OP_NOTIF
and isinstance(it_c, Node)
and it_c.p.has_any("BKV")
and it_d == OP_ELSE
and isinstance(it_e, Node)
and it_e.p.has_any("BKV")
and it_f == OP_ENDIF
):
node = Node().construct_andor(it_a, it_e, it_c)
expr_list[idx : idx + 6] = [node]
return expr_list | 1c82d6b0654cf15c5aafec2c08e92c86cb5a4543 | 3,650,139 |
def make1d(u, v, num_cols=224):
"""Make a 2D image index linear.
"""
return (u * num_cols + v).astype("int") | 1f37c7ae06071ce641561eadc1d0a42a0b74508d | 3,650,140 |
def xkcd(scale=1, length=100, randomness=2):
"""
Turn on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode. This will
only have effect on things drawn after this function is called.
For best results, the "Humor Sans" font should be installed: it is
not included with Matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
return _xkcd(scale, length, randomness) | 1ce7aed60b2b67febb1658e98f14434005f3434a | 3,650,141 |
def team_size(data):
"""
Computes team size of each paper by taking the number of authors in 'authors'
Input:
- df: dataframe (dataset); or just the 'authors' column [pandas dataframe]
Output:
- team: vector of team_size for each paper of the given dataset [pandas series]
with team_size [int]
"""
team = pd.Series([len(i) for i in data['authors']]) # teamsize
return team | 76a5aafe90cf63fb0506525e566ca7759d0e27ce | 3,650,142 |
from datetime import datetime
def header_to_date(header):
""" return the initial date based on the header of an ascii file"""
try:
starttime = datetime.strptime(header[2], '%Y%m%d_%H%M')
except ValueError:
try:
starttime = datetime.strptime(
header[2] + '_' + header[3], '%Y%m%d_%H'
)
except ValueError:
print("Warning: could not retrieve starttime from header,\
setting to default value ")
starttime = datetime(1970, 1, 1)
return starttime | 3e2757ae39a2a9008a5f0fb8cd8fe031770c83ad | 3,650,143 |
def PhenylAlanineCenterNormal(residue):
""" Phenylalanine """
PHE_ATOMS = ["CG", "CD1", "CD2", "CE1", "CE2", "CZ"]
return RingCenterNormal(residue, PHE_ATOMS) | 4d96c7fe14bd411749136cf1569f32bd1c01679d | 3,650,144 |
def diamBiasK(diam, B, Kexcess, RshellRstar=2.5):
"""
diameter bias (>1) due to the presence of a shell
only works for scalar diam, B and Kexcess
validity: Kexcess>0 and Kexcess<0.1 and B*diam <~ 500
return 1 if Kexcess <= 0
"""
global __biasData, KLUDGE
d = np.abs(__biasData['Rshell/Rstar']-RshellRstar)
j0 = np.argsort(d)[0]
j1 = np.argsort(d)[1]
if Kexcess>0 and diam*B/2.2<__biasData['Bdw'][-1]:
tmp = [np.interp(diam*B/2.2, __biasData['Bdw'],
__biasData['bias'][j0,k,:])
for k in range(__biasData['bias'].shape[1]) ]
r0 = np.interp(np.log10(KLUDGE*Kexcess), np.log10(__biasData['fr'][j0,:]), tmp)
tmp = [np.interp(diam*B/2.2, __biasData['Bdw'],
__biasData['bias'][j1,k,:])
for k in range(__biasData['bias'].shape[1]) ]
r1 = np.interp(np.log10(KLUDGE*Kexcess), np.log10(__biasData['fr'][j0,:]), tmp)
return r0 + (r1-r0)*(RshellRstar-__biasData['Rshell/Rstar'][j0])/\
(__biasData['Rshell/Rstar'][j1]-__biasData['Rshell/Rstar'][j0])
else:
return 1 | 6754ee70c64b2028cd5b72d3331e65bb64ea53b4 | 3,650,148 |
def interpolate_poses_from_samples(time_stamped_poses, samples):
"""
Interpolate time stamped poses at the time stamps provided in samples.
The poses are expected in the following format:
[timestamp [s], x [m], y [m], z [m], qx, qy, qz, qw]
We apply linear interpolation to the position and use SLERP for
the quaternion interpolation.
"""
# Extract the quaternions from the poses.
quaternions = []
for pose in time_stamped_poses[:, 1:]:
quaternions.append(Quaternion(q=pose[3:]))
# interpolate the quaternions.
quaternions_interp = resample_quaternions_from_samples(
time_stamped_poses[:, 0], quaternions, samples)
# Interpolate the position and assemble the full aligned pose vector.
num_poses = samples.shape[0]
aligned_poses = np.zeros((num_poses, time_stamped_poses.shape[1]))
aligned_poses[:, 0] = samples[:]
for i in [1, 2, 3]:
aligned_poses[:, i] = np.interp(
samples,
np.asarray(time_stamped_poses[:, 0]).ravel(),
np.asarray(time_stamped_poses[:, i]).ravel())
for i in range(0, num_poses):
aligned_poses[i, 4:8] = quaternions_interp[i].q
return aligned_poses.copy() | 3131fe895a53b7d6c262930c635ce8cfa1c277f2 | 3,650,149 |
def test_add_client(case, client_name, client=None, client_id=None, duplicate_client=None, check_errors=False,
log_checker=None):
"""
UC MEMBER_47 main test method. Tries to add a new client to a security server and check logs if
ssh_host is set.
:param case: MainController object
:param client_name: str - existing client name
:param client: dict|None - existing client and new subsystem data; this or client_id is required
:param client_id: str|None - existing client and new subsystem data as string; this or client is required
:param duplicate_client: dict|None - if set, existing client subsystem data (used for checking error messages)
:param check_errors: bool - True to check for error scenarios, False otherwise
:param unregistered_member: dict|None - if set, used for checking for correct messages when the member is unregistered
:param ssh_host: str|None - if set, Central Server SSH host for checking the audit.log; if None, no log check
:param ssh_user: str|None - CS SSH username, needed if cs_ssh_host is set
:param ssh_pass: str|None - CS SSH password, needed if cs_ssh_host is set
:param client_unregistered: bool|None - if True, client will always be confirmed (skip a few tests)
:return:
"""
self = case
def create_registration_request():
# UC MEMBER_47 1 - select to add a security server client
self.log('MEMBER_47 1 - select to add a security server client')
current_log_lines = None
self.logdata = []
if client is None:
client_data = xroad.split_xroad_subsystem(client_id)
else:
client_data = client
client_data['name'] = client_name
if log_checker:
current_log_lines = log_checker.get_line_count()
check_values = []
# Create a list of erroneous and/or testing values to be entered as client
check_value_errors = [
[['', client_data['class'], ''], 'Missing parameter: {0}', False],
[['', client_data['class'], client_data['subsystem']], 'Missing parameter: {0}', False],
# [[client_data['code'], client_data['class'], ''], 'Missing parameter: {2}', False],
[[256 * 'A', client_data['class'], client_data['subsystem']],
"Parameter '{0}' input exceeds 255 characters", False],
[[client_data['code'], client_data['class'], 256 * 'A'], "Parameter '{2}' input exceeds 255 characters",
False],
[[256 * 'A', client_data['class'], 256 * 'A'], "Parameter '{0}' input exceeds 255 characters", False],
[[' {0} '.format(client_data['code']), client_data['class'],
' {0} '.format(client_data['subsystem'])], CLIENT_ALREADY_EXISTS_ERROR, True]
]
# UC MEMBER_47 2, 3 - insert the X-Road identifier of the client and parse the user input
self.log('MEMBER_47 2, 3, 4 - insert the X-Road identifier of the client and parse the user input')
if check_errors:
# UC MEMBER_47 3a - check for erroneous inputs / parse user input
check_values += check_value_errors
self.log('MEMBER_47 3a - check for erroneous inputs')
if duplicate_client:
# UC MEMBER_47 4 - verify that a client does not already exist
self.log('MEMBER_47 4a - verify that the client does not already exist')
check_values += [[['{0}'.format(duplicate_client['code']), duplicate_client['class'],
'{0}'.format(duplicate_client['subsystem'])], 'Client already exists', False]]
# Try adding the client with different parameters (delete all added clients)
add_clients(self, check_values, instance=client_data['instance'], delete=False)
if current_log_lines:
# UC MEMBER_47 3a, 4a, 7 - Check logs for entries
self.log('MEMBER_47 3a, 4a, 7 - checking logs for: {0}'.format(self.logdata))
logs_found = log_checker.check_log(self.logdata, from_line=current_log_lines + 1)
self.is_true(logs_found,
msg='Some log entries were missing. Expected: "{0}", found: "{1}"'.format(self.logdata,
log_checker.log_output))
return create_registration_request | 0b15ee41c8a61c0602e89e59a6343e67c6dc712f | 3,650,150 |
from typing import List
def get_data_unit_labels(data_unit: DataUnit) -> List[Attributes]:
"""
Extract important information from data_unit. That is, get only bounding_boxes and
associated classifications.
Args:
data_unit: The data unit to extract information from.
Returns: list of pairs of objects and associated answers for the particular data
unit.
"""
res = []
for obj in data_unit.objects:
# Classifications (are both on the object_answer.classifications and on the object.
# Store all nested classification info.
obj_answer = obj.object_answer
classes = [
ClassificationInfo(
ontology_id=obj.ontology_object.id, value=obj.value, name=obj.name
)
]
queue = obj_answer.classifications
while len(queue) > 0:
c = queue.pop(0)
# Skip text for now.
if (
not hasattr(c.ontology_object, "type")
or c.ontology_object.type == "text"
):
continue
classes.append(
ClassificationInfo(
ontology_id=c.ontology_object.id, value=c.value, name=c.name
)
)
if (
c.answers is not None
and isinstance(c.answers, list)
and len(c.answers) > 0
):
queue.extend(c.answers)
elif c.answers is not None:
raise ValueError(
f"I didn't expect to see this. What to do in this situation?\n{c.answers}"
)
# Bounding box and polygon
bbox = obj.bounding_box if hasattr(obj, "bounding_box") else None
polygon = obj.polygon if hasattr(obj, "polygon") else None
res.append(Attributes(bbox=bbox, polygon=polygon, classes=classes, du=obj))
return res | 36577b13713258fe1542c9bfc377a469ed5d6fd6 | 3,650,151 |
def calc_4points_bezier_path(svec, syaw, spitch, evec, eyaw, epitch, offset, n_points=100):
"""
Compute control points and path given start and end position.
:param sx: (float) x-coordinate of the starting point
:param sy: (float) y-coordinate of the starting point
:param syaw: (float) yaw angle at start
:param ex: (float) x-coordinate of the ending point
:param ey: (float) y-coordinate of the ending point
:param eyaw: (float) yaw angle at the end
:param offset: (float)
:return: (numpy array, numpy array)
"""
#dist = np.linalg.norm(svec - evec) / offset
dist = offset
control_points = np.array(
(svec,
svec + dist*np.array([np.cos(syaw)*np.cos(spitch), np.sin(syaw)*np.cos(spitch), np.sin(spitch)]),
evec - dist*np.array([np.cos(eyaw)*np.cos(epitch), np.sin(eyaw)*np.cos(epitch), np.sin(epitch)]),
evec))
path = calc_bezier_path(control_points, n_points=100)
return path, control_points | 455225b10c034895c32329bc14ce6dc384f5e0b3 | 3,650,152 |
import copy
def permutationwithparity(n):
"""Returns a list of all permutation of n integers, with its first element being the parity"""
if (n == 1):
result = [[1,1]]
return result
else:
result = permutationwithparity(n-1)
newresult = []
for shorterpermutation in result:
for position in range(1,n+1):
parity = shorterpermutation[0]
for swaps in range(n-position):
parity = - parity
newpermutation = copy.deepcopy(shorterpermutation)
newpermutation.insert(position,n)
newpermutation[0] = parity
newresult.append(newpermutation)
return newresult | 218b728c2118a8cca98c019dff036e0ae2593974 | 3,650,153 |
from typing import List
import torch
import logging
def _train_model(model: BertForValueExtraction, optimizer, scheduler, train_data_loader, val_data_loader) -> List[int]:
"""
Main method to train & evaluate model.
:param model: BertForValueExtraction object
:param optimizer: optimizer
:param scheduler: scheduler
:param train_data_loader: training DataLoader object
:param val_data_loader: validation DataLoader object
:return: List[int] - validation predictions
"""
val_predictions = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Using device: {device}")
for epoch_number in trange(EPOCHS, desc="Epoch"):
# put the model into training mode.
model.train()
# reset the total loss for this epoch.
total_loss = 0
# training loop
train_true_labels, train_predictions = [], []
for step, batch in enumerate(train_data_loader):
# add batch to gpu if available
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_token_type_ids, b_labels = batch
# always clear any previously calculated gradients before performing a backward pass
model.zero_grad()
# forward pass
# This will return the loss (together with the model output) because we have provided the `labels`
outputs = model(b_input_ids, token_type_ids=b_token_type_ids, labels=b_labels)
# get the loss
loss = outputs[0]
# move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
b_labels = b_labels.to("cpu").numpy()
train_predictions.extend(logits.argmax(1))
train_true_labels.extend(b_labels)
# perform a backward pass to calculate the gradients
loss.backward()
# track train loss
total_loss += loss.item()
# clip the norm of the gradient
# this is to help prevent the "exploding gradients" problem
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=MAX_GRAD_NORM)
# update parameters
optimizer.step()
# Update the learning rate
scheduler.step()
# calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_data_loader)
logging.info(f"Average train loss on epoch {epoch_number}: {avg_train_loss}")
accuracy = accuracy_score(train_predictions, train_true_labels)[0]
logging.info(f"Train Accuracy on epoch {epoch_number + 1}: {accuracy}")
# Put the model into evaluation mode
model.eval()
# reset the validation loss for this epoch
eval_loss, eval_accuracy = 0, 0
val_predictions, true_labels = [], []
for batch in val_data_loader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_token_type_ids, b_labels = batch
# telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# forward pass, calculate logit predictions
# this will return the logits rather than the loss because we have not provided labels
outputs = model(b_input_ids, token_type_ids=b_token_type_ids, labels=b_labels)
# move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
b_labels = b_labels.to("cpu").numpy()
# calculate the accuracy for this batch of test sentences
eval_loss += outputs[0].mean().item()
val_predictions.extend(logits.argmax(1))
true_labels.extend(b_labels)
eval_loss = eval_loss / len(val_data_loader)
logging.info(f"Validation loss on epoch {epoch_number + 1}: {eval_loss}")
accuracy = accuracy_score(val_predictions, true_labels)[0]
logging.info(f"Validation Accuracy on epoch {epoch_number + 1}: {accuracy}")
logging.info("\n")
return [val_prediction.tolist()[0] for val_prediction in val_predictions] | fbf5a588d9da24f72c0955c767f454297d91e74d | 3,650,154 |
def mass_at_two_by_counting_mod_power(self, k):
"""
Computes the local mass at `p=2` assuming that it's stable `(mod 2^k)`.
Note: This is **way** too slow to be useful, even when k=1!!!
TO DO: Remove this routine, or try to compile it!
INPUT:
k -- an integer >= 1
OUTPUT:
a rational number
EXAMPLE::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q.mass_at_two_by_counting_mod_power(1)
4
"""
R = IntegerModRing(2**k)
Q1 = self.base_change_to(R)
n = self.dim()
MS = MatrixSpace(R, n)
ct = sum([1 for x in mrange([2**k] * (n**2)) if Q1(MS(x)) == Q1]) ## Count the solutions mod 2^k
two_mass = ZZ(1)/2 * (ZZ(ct) / ZZ(2)**(k*n*(n-1)/2))
return two_mass | 35f85dd32e3c81a4921c5d5a87ab4cdc13e8ae46 | 3,650,155 |
import logging
def get_paginiation_data(first_pagination_url, family, cazy_home, args, session):
"""Parse the first paginiation page and retrieve URLs to all pagination page for the Family.
:param first_pagination_url: str, URL to the fist page of the family
:param family: Family class instance, represents a unique CAZy family
:param cazy_home: str, URL to CAZy home page
:param args: cmd-line args parser
:param session: open SQL database session
Return dict of error messages if errors arise OR list of URLs to paginiation pages, and the
total number of proteins in the family.
"""
logger = logging.getLogger(__name__)
# retrieve a list of all page urls of protein tables for the CAZy family
first_pagination_page, error_message = get_page(
first_pagination_url, args, max_tries=args.retries,
)
if first_pagination_page is None:
logger.warning(
f"Could not connect to {first_pagination_url} after {args.retries} attempts\n"
f"The following error was raised:\n{error_message}\nTherefore, could not "
"retrieve all pagination pages URLs, therefore, cannot scrape proteins from "
f"{family.name}"
)
return(
{
"url": (
f"{first_pagination_url}\t{family.cazy_class}\t"
f"Failed to connect to first pagination page for {family.name}, therefore "
f"could not retrieve URLs to all paginiation pages\t{error_message}"
),
"format": None,
},
None,
)
# Get the URLS to all pages of proteins and the total number of proteins in the (sub)family
protein_page_urls, total_proteins = get_paginiation_page_urls(
first_pagination_url, first_pagination_page, cazy_home, family.name,
)
if total_proteins == 'Deleted family!':
# add family to the database
logger.warning(
f'{family.name} listed as "Deleted family" in CAZy.\nAdding family name to the database'
)
sql_interface.add_deleted_cazy_family(family.name, session)
return(
{
"url": None,
"format": (
f"{first_pagination_url}\t{family.cazy_class}\t{family.name} listed as "
"'Deleted family' in CAZy.\tAdded family name to the database"
),
},
None
)
elif total_proteins == 'Empty family':
# add family to the database
logger.warning(
f'{family.name} is an empty family, but not listed "Deleted family" in CAZy.\n'
'Adding family name to the database'
)
sql_interface.add_deleted_cazy_family(family.name, session)
return(
{
"url": None,
"format": (
f"{first_pagination_url}\t{family.cazy_class}\t{family.name} is empty in CAZy, "
"but not listed as Deleted fam.\tAdded family name to the database"
),
},
None
)
elif total_proteins == 'Failed Retrieval':
# add family to the database
logger.warning(
f"Could not retrieve total protiens count for {family.name}, appears to be empty\n"
"but not listed 'Deleted family' in CAZy.\nAdding family name to the database"
)
sql_interface.add_deleted_cazy_family(family.name, session)
return(
{
"url": None,
"format": (
f"{first_pagination_url}\t{family.cazy_class}\t{family.name} Could not "
"retrieve total protiens count\tAdded family name to the database"
),
},
None
)
elif len(protein_page_urls) == 0:
logger.warning(f"No protein page URLs found for {family.name}: {first_pagination_url}")
return(
{
"url": None,
"format": (
f"{first_pagination_url}\t{family.cazy_class}\tFailed to retrieve URLs to "
f"protein table pages for {family.name}\tNo specific error message availble."
),
},
None
)
else:
return protein_page_urls, total_proteins | 89cdd6e0d333cc69f0b92c202b6f0d4257849382 | 3,650,156 |
def gravitationalPotentialEnergy(mass, gravity, y):
"""1 J = 1 N*m = 1 Kg*m**2/s**2
Variables: m=mass g=gravity constant y=height
Usage: Energy stored by springs"""
U = mass*gravity*y
return U | f4fcfc9e7ddac8b246b2200e3886b79f6706936e | 3,650,157 |
def to_list(obj):
"""List Converter
Takes any object and converts it to a `list`.
If the object is already a `list` it is just returned,
If the object is None an empty `list` is returned,
Else a `list` is created with the object as it's first element.
Args:
obj (any object): the object to be converted
Returns:
A list containing the given object
"""
if isinstance(obj, list):
return obj
elif isinstance(obj, tuple):
return list(obj)
elif obj is None:
return []
else:
return [obj, ] | 3ca373867ea3c30edcf7267bba69ef2ee3c7722e | 3,650,159 |
from molsysmt.basic import select, extract
def remove(molecular_system, selection=None, frame_indices=None, to_form=None, syntaxis='MolSysMT'):
"""remove(item, selection=None, frame_indices=None, syntaxis='MolSysMT')
Remove atoms or frames from the molecular model.
Paragraph with detailed explanation.
Parameters
----------
item: molecular model
Molecular model in any of the supported forms by MolSysMT. (See: XXX)
selection: str, list, tuple or np.ndarray, default=None
Atoms selection over which this method applies. The selection can be given by a
list, tuple or numpy array of integers (0-based), or by means of a string following any of
the selection syntaxis parsable by MolSysMT (see: :func:`molsysmt.select`).
frame_indices: str, list, tuple or np.ndarray, default=None
XXX
syntaxis: str, default='MolSysMT'
Syntaxis used in the argument `selection` (in case it is a string). The
current options supported by MolSysMt can be found in section XXX (see: :func:`molsysmt.select`).
Returns
-------
item: molecular model
The result is a new molecular model with the same form as the input item.
Examples
--------
Remove chains 0 and 1 from the pdb: 1B3T.
>>> import molsysmt as m3t
>>> system = m3t.load('pdb:1B3T')
Check the number of chains
>>> m3t.get(system, n_chains=True)
8
Remove chains 0 and 1
>>> new_system = m3t.remove(system, 'chainid 0 1')
Check the number of chains of the new molecular model
>>> m3t.get(new_system, n_chains=True)
6
See Also
--------
:func:`molsysmt.select`
Notes
-----
There is a specific method to remove solvent atoms: molsysmt.remove_solvent and another one to
remove hydrogens: molsysmt.remove_hydrogens.
"""
molecular_system = digest_molecular_system(molecular_system)
frame_indices = digest_frame_indices(frame_indices)
atom_indices_to_be_kept = 'all'
frame_indices_to_be_kept = 'all'
if selection is not None:
atom_indices_to_be_removed = select(molecular_system, selection=selection, syntaxis=syntaxis)
atom_indices_to_be_kept = complementary_atom_indices(molecular_system, atom_indices_to_be_removed)
if frame_indices is not None:
frame_indices_to_be_kept = complementary_frame_indices(molecular_system, frame_indices)
tmp_item = extract(molecular_system, selection=atom_indices_to_be_kept, frame_indices=frame_indices_to_be_kept, to_form=to_form)
tmp_item = digest_output(tmp_item)
return tmp_item | a2232e6226f76df6760eef59aab0f31edf7a75ec | 3,650,160 |
def macd_cross_func_pd(data):
"""
神一样的指标:MACD
"""
if (ST.VERBOSE in data.columns):
print('Phase macd_cross_func', QA_util_timestamp_to_str())
MACD = QA.QA_indicator_MACD(data)
MACD_CROSS = pd.DataFrame(columns=[ST.MACD_CROSS,
FLD.MACD_CROSS_JX_BEFORE,
FLD.MACD_CROSS_SX_BEFORE,
FLD.NEGATIVE_LOWER_PRICE,
FLD.NEGATIVE_LOWER_PRICE_BEFORE,
FLD.LOWER_SETTLE_PRICE,
FLD.LOWER_SETTLE_PRICE_BEFORE,
FLD.HIGHER_SETTLE_PRICE,
FLD.HIGHER_SETTLE_PRICE_BEFORE],
index=data.index)
MACD_CROSS = MACD_CROSS.assign(DIF=MACD[FLD.DIF])
MACD_CROSS = MACD_CROSS.assign(DEA=MACD[FLD.DEA])
MACD_CROSS = MACD_CROSS.assign(MACD=MACD[FLD.MACD])
MACD_CROSS = MACD_CROSS.assign(ZERO=0)
# 新版考虑合并指标,将 DELTA 重命名为 MACD_DELTA
MACD_CROSS = MACD_CROSS.assign(MACD_DELTA=MACD[FLD.MACD].diff())
MACD_CROSS[FLD.MACD_CROSS_JX_BEFORE] = CROSS(MACD_CROSS[FLD.DIF],
MACD_CROSS[FLD.DEA])
MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE] = CROSS(MACD_CROSS[FLD.DEA],
MACD_CROSS[FLD.DIF])
MACD_CROSS[ST.MACD_CROSS] = np.where(MACD_CROSS[FLD.MACD_CROSS_JX_BEFORE] == 1, 1,
np.where(MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE] == 1,
-1, 0))
MACD_CROSS[FLD.DEA_CROSS_JX_BEFORE] = Timeline_duration(CROSS(MACD_CROSS[FLD.DEA],
MACD_CROSS[FLD.ZERO]).values)
MACD_CROSS[FLD.DIF_CROSS_JX_BEFORE] = Timeline_duration(CROSS(MACD_CROSS[FLD.DIF],
MACD_CROSS[FLD.ZERO]).values)
MACD_CROSS[FLD.DEA_CROSS_SX_BEFORE] = Timeline_duration(CROSS(MACD_CROSS[FLD.ZERO],
MACD_CROSS[FLD.DEA]).values)
MACD_CROSS[FLD.MACD_CROSS_JX_BEFORE] = Timeline_duration(MACD_CROSS[FLD.MACD_CROSS_JX_BEFORE].values)
MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE] = Timeline_duration(MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE].values)
MACD_CROSS[FLD.DEA_SLOPE] = talib.LINEARREG_SLOPE(MACD[FLD.DEA], timeperiod=14)
MACD_CROSS['MACD_TIDE_MEDIAN'] = int(min(MACD_CROSS[FLD.MACD_CROSS_JX_BEFORE].median(),
MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE].median()))
MACD_CROSS[FLD.DEA_SLOPE_UB] = MACD_CROSS[FLD.DEA_SLOPE].abs().rolling(MACD_CROSS['MACD_TIDE_MEDIAN'].max()).median()
negative_lower_price_state = (MACD_CROSS[FLD.MACD] < 0) & \
(MACD_CROSS[FLD.DEA] < 0) & \
(MACD_CROSS[FLD.MACD] < MACD_CROSS[FLD.DEA])
negative_lower_price_state = (negative_lower_price_state == True) | \
(MACD_CROSS[FLD.MACD] < 0) & \
(((MACD_CROSS[FLD.DEA] < 0) & \
((MACD_CROSS[FLD.DEA_CROSS_SX_BEFORE] > 6) | \
(MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE] > 12))) | \
((MACD_CROSS[FLD.DIF] < 0) & \
(MACD_CROSS[FLD.MACD_CROSS_SX_BEFORE] > 12))) & \
(MACD_CROSS[FLD.MACD] < MACD_CROSS[FLD.DEA]) & \
(abs(MACD_CROSS[FLD.MACD]) > abs(MACD_CROSS[FLD.DEA]))
MACD_CROSS[FLD.NEGATIVE_LOWER_PRICE] = negative_lower_price_state.apply(int)
MACD_CROSS[FLD.NEGATIVE_LOWER_PRICE_BEFORE] = Timeline_duration(MACD_CROSS[FLD.NEGATIVE_LOWER_PRICE].values)
lower_settle_price_state = ~(negative_lower_price_state == True) & \
(MACD_CROSS[FLD.DEA] < 0) & \
(MACD_CROSS[FLD.MACD_DELTA] > 0)
MACD_CROSS[FLD.LOWER_SETTLE_PRICE] = lower_settle_price_state.apply(int)
MACD_CROSS[FLD.LOWER_SETTLE_PRICE_BEFORE] = Timeline_duration(MACD_CROSS[FLD.LOWER_SETTLE_PRICE].values)
higher_settle_price_state = (MACD_CROSS[FLD.DEA] > 0) & \
(MACD_CROSS[FLD.MACD] > MACD_CROSS[FLD.DEA])
MACD_CROSS[FLD.HIGHER_SETTLE_PRICE] = higher_settle_price_state.apply(int)
MACD_CROSS[FLD.HIGHER_SETTLE_PRICE_BEFORE] = Timeline_duration(MACD_CROSS[FLD.HIGHER_SETTLE_PRICE].values)
return MACD_CROSS | d7c970efc931a3f1f2c25e51cf8e55e630eb37ad | 3,650,161 |
import json
def jsonp_response(data, callback="f", status=200, serializer=None):
"""
Returns an HttpResponse object containing JSON serialized data,
wrapped in a JSONP callback.
The mime-type is set to application/x-javascript, and the charset to UTF-8.
"""
val = json.dumps(data, default=serializer)
ret = "{callback}('{val}');".format(callback=callback, val=val)
return HttpResponse(ret,
status=status,
content_type='application/x-javascript; charset=UTF-8') | 3ac71358043184b84b2f1f610a852b0b587d158d | 3,650,162 |
import collections
def enhance_bonds(bond_dataframe, structure_dict):
"""Enhance the bonds dataframe by including derived information.
Args:
bond_dataframe: Pandas dataframe read from train.csv or test.csv.
structure_dict: Output of :func:`make_structure_dict`, after running :func:`enhance_structure_dict`.
Returns:
pandas.DataFrame: Same dataframe, modified in-place, with derived information added.
"""
bond_dataframe.sort_values(['molecule_name', 'atom_index_0', 'atom_index_1'], inplace=True)
assert int(bond_dataframe.groupby("molecule_name").count().max()[0]) <= MAX_BOND_COUNT
new_columns = collections.defaultdict(list)
for index, row in bond_dataframe.iterrows():
molecule_name, iatom0, iatom1 = row['molecule_name'], row['atom_index_0'], row['atom_index_1']
if 'predict' not in structure_dict[molecule_name]:
structure_dict[molecule_name]['predict'] = structure_dict[molecule_name]['bond_orders'] * 0
structure_dict[molecule_name]['predict'][iatom0, iatom1] = 1
structure_dict[molecule_name]['predict'][iatom1, iatom0] = 1
long_symbols = [structure_dict[molecule_name]['long_symbols'][x] for x in [iatom0, iatom1]]
# labeled_type
if all([x[0] == 'H' for x in long_symbols]):
lt = row['type']
elif not any([x[0] == 'H' for x in long_symbols]):
raise ValueError("No hydrogen found in {}".format(row))
else:
ls = [x for x in long_symbols if x[0] != 'H'][0]
lt = row["type"] + ls[1:].replace('.0', '')
if lt in classification_corrections:
lt = classification_corrections[lt]
if lt in small_longtypes:
lt = lt.split('_')[0]
new_columns["labeled_type"].append(lt)
# sublabeled type
new_columns["sublabel_type"].append(row['type'] + '-' + '-'.join(sorted(long_symbols)))
# bond order
new_columns["bond_order"].append(structure_dict[molecule_name]['bond_orders'][iatom0, iatom1])
new_columns["predict"].append(1)
for key in new_columns:
bond_dataframe[key] = new_columns[key]
return bond_dataframe | 46836576b6bec8e1ca9f5685185d0f379b9e63f6 | 3,650,163 |
import numbers
def _tofloat(value):
"""Convert a parameter to float or float array"""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=np.float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
"Parameter of {0} could not be converted to "
"float".format(type(value)))
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean")
else:
raise InputParameterError(
"Don't know how to convert parameter of {0} to "
"float".format(type(value)))
return value | 796b03699cb3b1e201436b6eb61df0636318de14 | 3,650,164 |
import time
import functools
def sp2mgc(sp, order=20, alpha=0.35, gamma=-0.41, miniter=2,
maxiter=30, criteria=0.001, otype=0, verbose=False):
"""
Accepts 1D or 2D one-sided spectrum (complex or real valued).
If 2D, assumes time is axis 0.
Returns mel generalized cepstral coefficients.
Based on r9y9 Julia code
https://github.com/r9y9/MelGeneralizedCepstrums.jl
"""
if len(sp.shape) == 1:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=0)
return _sp2mgc(sp, order=order, alpha=alpha, gamma=gamma,
miniter=miniter, maxiter=maxiter, criteria=criteria,
otype=otype, verbose=verbose)
else:
sp = np.concatenate((sp, sp[:, 1:][:, ::-1]), axis=1)
# Slooow, use multiprocessing to speed up a bit
# http://blog.shenwei.me/python-multiprocessing-pool-difference-between-map-apply-map_async-apply_async/
# http://stackoverflow.com/questions/5666576/show-the-progress-of-a-python-multiprocessing-pool-map-call
c = [(i + 1, sp.shape[0], sp[i]) for i in range(sp.shape[0])]
p = Pool()
start = time.time()
if verbose:
print("Starting conversion of %i frames" % sp.shape[0])
print("This may take some time...")
# takes ~360s for 630 frames, 1 process
itr = p.map_async(functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter, criteria=criteria, otype=otype, verbose=False), c, callback=_sp_collect_result)
sz = len(c) // itr._chunksize
if (sz * itr._chunksize) != len(c):
sz += 1
last_remaining = None
while True:
remaining = itr._number_left
if verbose:
if remaining != last_remaining:
last_remaining = remaining
print("%i chunks of %i complete" % (sz - remaining, sz))
if itr.ready():
break
time.sleep(.5)
"""
# takes ~455s for 630 frames
itr = p.imap_unordered(functools.partial(_sp_convert, order=order, alpha=alpha, gamma=gamma, miniter=miniter, maxiter=maxiter, criteria=criteria, otype=otype, verbose=False), c)
res = []
# print ~every 5%
mod = int(len(c)) // 20
if mod < 1:
mod = 1
for i, res_i in enumerate(itr, 1):
res.append(res_i)
if i % mod == 0 or i == 1:
print("%i of %i complete" % (i, len(c)))
"""
p.close()
p.join()
stop = time.time()
if verbose:
print("Processed %i frames in %s seconds" % (sp.shape[0], stop - start))
# map_async result comes in chunks
flat = [a_i for a in _sp_convert_results for a_i in a]
final = [o[1] for o in sorted(flat, key=lambda x: x[0])]
for i in range(len(_sp_convert_results)):
_sp_convert_results.pop()
return np.array(final) | 01e380ddf10c56c3c4b9d09726ac98aef58715ca | 3,650,165 |
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for key, val in word_dic.items():
if key.startswith(sub_s):
return True
else:
pass
return False | b52937578d62d464df3131374a5dc0bdca735807 | 3,650,166 |
def _package_data(vec_f_image, vec_f_imageBscan3d, downscale_size, downscale_size_bscan, crop_size, num_octa,
str_angiography, str_structure, str_bscan, vec_str_layer, vec_str_layer_bscan3d, str_bscan_layer,
dict_layer_order, dict_layer_order_bscan3d):
"""
Organizes the angiography, OCT and b-scan images into a list of cubes for a single subject and also returns which
eye it is. Difference from function below: contains logic that deal with cases where there are two eyes
:param vec_f_image: list of absolute paths to individual images from a single subject
:param vec_f_imageBscan3d: list of absolute paths to individual bscan images from a single subject
:param downscale_size: desired final size of the loaded images, e.g. (256, 256)
:param downscale_size_bscan: desired final size of the loaded bscan images, e.g. (256, 256)
:param crop_size: desired number of pixels to exclude from analysis for bscan images, e.g. [50, 60]
:param num_octa: number of OCTA images per patient, e.g. 5
:param str_angiography: identifier for angiography images in the filename
:param str_structure: identifier for structural OCT images in the filename
:param str_bscan: identifier for b-scan OCT images in the filename
:param vec_str_layer: list of strings that contain the relevant layers to be used for training
:param vec_str_layer_bscan3d: list of strings that contain the relevant bscan images to be used for training
:param str_bscan_layer: string that contains the type of b-scan images to be used in filename, e.g. Flow
:param dict_layer_order: dictionary that contains the order in which the different layers will be organized
:param dict_layer_order_bscan3d: dictionary that contains the order in which the bscans cubes will be organized
:return: return a list in the form [packed_images, str_eye]. If both eyes are available, then each variable would
be a list of cubes and strings; if only one eye is available, packed_images would be a cube and str_eye would be
OD/OS; if nothing is available then both are None
"""
# Test if the dataset has data from both eyes
if any("/OD/" in s for s in vec_f_image) & any("/OS/" in s for s in vec_f_image):
vec_f_image_OD = [s for s in vec_f_image if "/OD/" in s]
vec_f_image_OS = [s for s in vec_f_image if "/OS/" in s]
vec_f_imageBscan3d_OD = [s for s in vec_f_imageBscan3d if "/OD/" in s]
vec_f_imageBscan3d_OS = [s for s in vec_f_imageBscan3d if "/OS/" in s]
x_curr_OD = _form_cubes(vec_f_image_OD, vec_f_imageBscan3d_OD, num_octa, downscale_size, downscale_size_bscan, crop_size,
str_angiography, str_structure, str_bscan, vec_str_layer, vec_str_layer_bscan3d,
str_bscan_layer, dict_layer_order, dict_layer_order_bscan3d)
x_curr_OS = _form_cubes(vec_f_image_OS, vec_f_imageBscan3d_OS, num_octa, downscale_size, downscale_size_bscan, crop_size,
str_angiography, str_structure, str_bscan, vec_str_layer, vec_str_layer_bscan3d,
str_bscan_layer, dict_layer_order, dict_layer_order_bscan3d)
# Figure out if any of the single eye data is none
if x_curr_OD is not None and x_curr_OS is not None:
packed_x_curr = [x_curr_OD, x_curr_OS]
str_eye = ['OD', 'OS']
elif x_curr_OD is None and x_curr_OS is not None:
packed_x_curr = x_curr_OS
str_eye = 'OS'
elif x_curr_OD is not None and x_curr_OS is None:
packed_x_curr = x_curr_OD
str_eye = 'OD'
else:
packed_x_curr = None
str_eye = None
else:
x_curr = _form_cubes(vec_f_image, vec_f_imageBscan3d, num_octa, downscale_size, downscale_size_bscan, crop_size,
str_angiography, str_structure, str_bscan, vec_str_layer, vec_str_layer_bscan3d,
str_bscan_layer, dict_layer_order, dict_layer_order_bscan3d)
packed_x_curr = x_curr
if any("_OD_" in s for s in vec_f_image):
str_eye = 'OD'
elif any("_OS_" in s for s in vec_f_image):
str_eye = 'OS'
else:
str_eye = None
return packed_x_curr, str_eye | dfe9ed58bd25715948cfb09f069c76d2f844df3c | 3,650,167 |
def apply_padding_by_last(list_of_lists):
""" The same as applying pad_into_lists followed by carry_previous_over_none
but takes a list of lists instead of events
Args:
lists_of_lists: list of lists with possibly different lengths
Returns:
lists of lists padded to the same length by the last element in each list
"""
padded_lists = pad_into_lists(
[enumerate(vals) for vals in list_of_lists],
lambda x: x[0]
)
return [
# remove the index
[e[1] if e is not None else e for e in carry_previous_over_none(padded)]
for padded in padded_lists
] | 83316fedb46230665fc543d4a3961cb72692023b | 3,650,168 |
def combine_patterns(
*patterns: str, seperator: Expression = None, combine_all=False
) -> str:
"""
Intelligently combine following input patterns.
Parameters
----------
patterns :
The patterns to combine.
seperator :
The seperator to use. If None, the default seperator :data:`WORD_SEPARATOR`
is used.
combine_all :
If True, the start matches any of the input patterns. If False, the start
matches the first pattern only, followed by any combination of all other
patterns including the first pattern.
Returns
-------
str
The combined pattern.
"""
if seperator is None:
seperator = WORD_SEPARATOR
start_group = non_capturing_group(*[str(p) for p in patterns])
pattern = wrap_pattern(
(start_group if combine_all else patterns[0])
+ non_capturing_group(seperator + start_group)
+ "*"
)
return pattern | 1bcd703a183b72d88a8bfa7f1680754e0e3ee35e | 3,650,169 |
from datetime import datetime
async def utc_timediff(t1, t2):
"""
Calculate the absolute difference between two UTC time strings
Parameters
----------
t1, t2 : str
"""
time1 = datetime.datetime.strptime(t1, timefmt)
time2 = datetime.datetime.strptime(t2, timefmt)
timedelt = time1 - time2
return abs(timedelt.total_seconds()) | ba0b406048467029f6d05d72898b534dd6309e45 | 3,650,170 |
def function():
"""
>>> function()
'decorated function'
"""
return 'function' | 46b892fb70b5672909d87efcf76ffd3f96f9cf7f | 3,650,171 |
def load_stopwords(file_path):
"""
:param file_path: Stop word file path
:return: Stop word list
"""
stopwords = [line.strip() for line in open(file_path, 'r', encoding='utf-8').readlines()]
return stopwords | 9cb6578b5cbc608bc72da7c4f363b4f84d0adbb7 | 3,650,172 |
def hello():
"""Return a friendly HTTP greeting."""
return 'abc' | 9acda65833bec5976c3e2f0ffa77df8a2a7537bf | 3,650,173 |
def show_ip_ospf_route(
enode,
_shell='vtysh',
_shell_args={
'matches': None,
'newline': True,
'timeout': None,
'connection': None
}
):
"""
Show ospf detail.
This function runs the following vtysh command:
::
# show ip ospf route
:param dict kwargs: arguments to pass to the send_command of the
vtysh shell.
:param str _shell: shell to be selected
:param dict _shell_args: low-level shell API arguments
:return: A dictionary as returned by
:func:`topology_lib_vtysh.parser.parse_show_ip_ospf_route`
"""
cmd = [
'show ip ospf route'
]
shell = enode.get_shell(_shell)
shell.send_command(
(' '.join(cmd)).format(**locals()), **_shell_args
)
result = shell.get_response(
connection=_shell_args.get('connection', None)
)
return parse_show_ip_ospf_route(result) | 0f80ef7a46211002141ea489459df6be78aeeb28 | 3,650,174 |
from typing import get_origin
def attrs_classes(
verb,
typ,
ctx,
pre_hook="__json_pre_decode__",
post_hook="__json_post_encode__",
check="__json_check__",
):
"""
Handle an ``@attr.s`` or ``@dataclass`` decorated class.
This rule also implements several hooks to handle complex cases, especially to
manage backwards compatibility. Hooks should be resilient against invalid data,
and should not mutate their inputs.
`__json_pre_decode__` is used by decoders constructed by `RuleSet.json_to_python`.
It will be called before decoding with the JSON object and may adjust them to fit
the expected structure, which must be a `dict` with the necessary fields.
The checker generated by `inspect_json` will also call `__json_pre_decode__` before
inspecting the value generated.
`__json_post_encode__` is used by encoders constructed by `RuleSet.python_to_json`.
It will be called after encoding with the JSON object and may adjust it as
necessary.
`__json_check__` may be used to completely override the `inspect_json` check generated
for this class.
"""
if verb not in _SUPPORTED_VERBS:
return
if is_generic(typ):
typ_args = get_argument_map(typ)
typ = get_origin(typ)
else:
typ_args = None
inner_map = build_attribute_map(verb, typ, ctx, typ_args)
if inner_map is None:
return
if verb == INSP_PY:
return partial(check_isinst, typ=typ)
if verb == JSON2PY:
pre_hook_method = getattr(typ, pre_hook, identity)
return partial(
convert_dict_to_attrs,
pre_hook=pre_hook_method,
inner_map=inner_map,
con=typ,
)
elif verb == PY2JSON:
post_hook = post_hook if hasattr(typ, post_hook) else None
return partial(convert_attrs_to_dict, post_hook=post_hook, inner_map=inner_map)
elif verb == INSP_JSON:
check = getattr(typ, check, None)
if check:
return check
pre_hook_method = getattr(typ, pre_hook, identity)
return partial(check_dict, inner_map=inner_map, pre_hook=pre_hook_method)
elif verb == PATTERN:
return pat.Object.exact(
(pat.String.exact(attr.name), attr.inner or pat.Unkown)
for attr in inner_map
if attr.is_required
) | 9db1f0a9ddefe1fb32d52331e158e8e2565b2697 | 3,650,175 |
import torch
def delta(pricer, *, create_graph: bool = False, **kwargs) -> torch.Tensor:
"""Computes and returns the delta of a derivative.
Args:
pricer (callable): Pricing formula of a derivative.
create_graph (bool): If True, graph of the derivative will be
constructed, allowing to compute higher order derivative products.
Default: False.
**kwargs: Other parameters passed to `pricer`.
Returns:
torch.Tensor: The greek of a derivative.
Examples:
>>> pricer = lambda spot, expiry: spot * expiry
>>> spot = torch.tensor([1.0, 2.0, 3.0])
>>> expiry = torch.tensor([2.0, 3.0, 4.0])
>>> delta(pricer, spot=spot, expiry=expiry)
tensor([2., 3., 4.])
"""
if kwargs.get("strike") is None and kwargs.get("spot") is None:
# Since delta does not depend on strike,
# assign an arbitrary value (1.0) to strike if not given.
kwargs["strike"] = torch.tensor(1.0)
spot = _parse_spot(**kwargs).requires_grad_()
kwargs["spot"] = spot
if "moneyness" in kwargs:
# lest moneyness is used to compute price and grad wrt spot cannot be computed
kwargs["moneyness"] = None
if "log_moneyness" in kwargs:
# lest moneyness is used to compute price and grad wrt spot cannot be computed
kwargs["log_moneyness"] = None
price = pricer(**kwargs)
return torch.autograd.grad(
price,
inputs=spot,
grad_outputs=torch.ones_like(price),
create_graph=create_graph,
)[0] | 88216117ba58afd68c88515210ad927a581eaf54 | 3,650,176 |
def formula_search(min_texts, max_texts, min_entries, max_entries):
"""Filter search results"""
result = Cf.query.filter(
Cf.n_entries >= (min_entries or MIN),
Cf.n_entries <= (max_entries or MAX),
Cf.unique_text >= (min_texts or MIN),
Cf.unique_text <= (max_texts or MAX)
).group_by(
Cf.short_ngram_id
).order_by(Cf.verb_text).all()
return formula_search_to_dict(result) | 938cc7ea25c2fe1bcd240ad4b60e517295eebe7b | 3,650,177 |
from datetime import datetime
import uuid
def versioneer():
"""
Function used to generate a new version string when saving a new Service
bundle. User can also override this function to get a customized version format
"""
date_string = datetime.now().strftime("%Y%m%d")
random_hash = uuid.uuid4().hex[:6].upper()
# Example output: '20191009_D246ED'
return date_string + "_" + random_hash | 7c5123d28e3bee45f2c9f7d519e830cf80e9fea8 | 3,650,178 |
import http
def request_url(method, url):
"""Request the specific url and return data"""
try:
r = http.request(method, url)
if r.status == 200:
return r.data.decode('utf-8')
else:
raise Exception("Fail to {} data from {}".format(method, url))
except Exception as e:
logger.error(str(e), exc_info=True)
raise e | 9d25df49c9996364a9cb0195b90454378aefa5fd | 3,650,179 |
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
# for change/delete conflicts write out the changed version, then fail
if fcd.isabsent():
_underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
return 1, False | 4d90ff7296fa9042392c7ffe28034fbbf804614f | 3,650,180 |
from typing import Callable
def __noise_with_pdf(im_arr: np.ndarray, pdf: Callable, **kwargs) -> np.ndarray:
"""Apply noise to given image array using pdf function that generates random values."""
util.check_input(im_arr)
im_arr = im_arr/255.0
noise = pdf(**kwargs, size=im_arr.shape)
out_im = im_arr + noise
out_im = np.clip(out_im, 0.0, 1.0)
return (out_im*255.0).astype(np.uint8) | d1b9f612f4490ac526c2a952c54ee03cbedc2139 | 3,650,181 |
def train_test_split(df, frac):
"""
Create a Train/Test split function for a dataframe and return both
the Training and Testing sets.
Frac refers to the percent of data you would like to set aside
for training.
"""
frac = round(len(df)*frac)
train = df[:frac]
test = df[frac:]
return train, test | 8e233e017a261141f57f7b2bff9a527e275d2ed9 | 3,650,183 |
def load_special_config(config_filename, special_type, image_type='extent'):
"""Loads the google earth ("google"), science on a sphere ("sos"), or any other
special type of image config.
"""
cfg = load_config(config_filename)
# Promote the image type's keys
cfg = _merge_keys(cfg, cfg[image_type])
cfg = _prune_keys(cfg, image_type)
# Promote the special key
cfg = _merge_keys(cfg, cfg[special_type])
cfg = _substitute_colortable(cfg)
return cfg | b3a38d4ea9e39e42685604b4f01c7dcfa8ee2cdd | 3,650,184 |
def softmax_strategy_cim(attrs, inputs, out_type, target):
"""softmax cim strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.cim.schedule_softmax),
name="softmax.cim",
)
return strategy | 11c5e581d9ad2814068558bc9faf24f57f5acba3 | 3,650,185 |
def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2):
"""Get fully formed statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
ev_limit : int or None
Limit the amount of evidence returned per Statement. Default is 100.
best_first : bool
If True, the preassembled statements will be sorted by the amount of
evidence they have, and those with the most evidence will be
prioritized. When using `max_stmts`, this means you will get the "best"
statements. If False, statements will be queried in arbitrary order.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 2.
"""
if not isinstance(hash_list, list):
raise ValueError("The `hash_list` input is a list, not %s."
% type(hash_list))
if not hash_list:
return []
if isinstance(hash_list[0], str):
hash_list = [int(h) for h in hash_list]
if not all([isinstance(h, int) for h in hash_list]):
raise ValueError("Hashes must be ints or strings that can be "
"converted into ints.")
resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit,
data={'hashes': hash_list},
best_first=best_first, tries=tries)
return stmts_from_json(resp.json()['statements'].values()) | 26924f7d05b35b6655eb69989a32677edf1eedbf | 3,650,186 |
def _is_in(val_set):
"""Check if a value is included in a set of values"""
def inner(val, val_set):
if val not in val_set:
if isinstance(val_set, xrange):
acceptable = "[%d-%d]" % (val_set[0], val_set[-1])
else:
acceptable = "{%s}" % ", ".join(val_set)
raise ValueError("Acceptable values are: %s" % acceptable)
return partial(inner, val_set=val_set) | 462afc33c73ae78bd62fa446f652a30492150643 | 3,650,187 |
def onebyone(transform, loglikelihood, parameter_names, prior, start = 0.5, ftol=0.1, disp=0, nsteps=40000,
parallel=False, find_uncertainties=False, **args):
"""
**Convex optimization based on Brent's method**
A strict assumption of one optimum between the parameter limits is used.
The bounds are narrowed until it is found, i.e. the likelihood function is flat
within the bounds.
* If optimum outside bracket, expands bracket until contained.
* Thus guaranteed to return local optimum.
* Supports parallelization (multiple parameters are treated independently)
* Supports finding ML uncertainties (Delta-Chi^2=1)
Very useful for 1-3d problems.
Otherwise useful, reproducible/deterministic algorithm for finding the minimum in
well-behaved likelihoods, where the parameters are weakly independent,
or to find a good starting point.
Optimizes each parameter in order, assuming they are largely independent.
For 1-dimensional algorithm used, see :func:`jbopt.opt_grid`
:param ftol: difference in values at which the function can be considered flat
:param compute_errors: compute standard deviation of gaussian around optimum
"""
def minfunc(cube):
cube = numpy.array(cube)
if (cube <= 1e-10).any() or (cube >= 1-1e-10).any():
return 1e100
params = transform(cube)
l = loglikelihood(params)
p = prior(params)
if numpy.isinf(p) and p < 0:
print ' prior rejection'
return -1e300
if numpy.isnan(l):
return -1e300
return -l - p
if parallel:
func = opt_grid_parallel
else:
func = opt_grid
n_params = len(parameter_names)
start = start + numpy.zeros(n_params)
ret = func(start, minfunc, [(1e-10, 1-1e-10)] * n_params, ftol=ftol, disp=disp, compute_errors=find_uncertainties)
if find_uncertainties:
c0 = ret[0]
p0 = transform(c0)
stdev = numpy.zeros(n_params)
lower = numpy.zeros(n_params)
upper = numpy.zeros(n_params)
for i, (lo, hi) in enumerate(ret[1]):
c1 = numpy.copy(c0)
c1[i] = lo
c2 = numpy.copy(c0)
c2[i] = hi
p1 = transform(c1)
p2 = transform(c2)
stdev[i] = numpy.abs(p2[i] - p1[i]) / 2
lower[i] = min(p2[i], p1[i])
upper[i] = max(p2[i], p1[i])
return dict(start=ret[0], maximum=p0,
stdev=stdev, upper=upper, lower=lower,
method='opt_grid')
else:
return dict(start=ret, maximum=transform(ret), method='opt_grid') | 86a12d7d7b738cc8ab7d8e65e3765e6b81f825b4 | 3,650,189 |
def test_data():
"""Test data object for the main PlantCV module."""
return TestData() | f8b2dc49d460ddadcd74c89da1159274660ecdfb | 3,650,191 |
def make_cmap(infile):
"""Call correct cmap function depending on file."""
cornames = ["coherence-cog.tif", "phsig.cor.geo.vrt", "topophase.cor.geo.vrt"]
phsnames = ["unwrapped-phase-cog.tif", "filt_topophase.unw.geo.vrt"]
if infile in cornames:
cpt = make_coherence_cmap()
elif infile in phsnames:
cpt = make_wrapped_phase_cmap()
else: # amplitude cmap
cpt = make_amplitude_cmap()
return cpt | cd6408e1f8718b7073d1c5baa4e5a20ab8553720 | 3,650,193 |
def tail_correction(r, V, r_switch):
"""Apply a tail correction to a potential making it go to zero smoothly.
Parameters
----------
r : np.ndarray, shape=(n_points,), dtype=float
The radius values at which the potential is given.
V : np.ndarray, shape=r.shape, dtype=float
The potential values at each radius value.
r_switch : float, optional, default=pot_r[-1] - 5 * dr
The radius after which a tail correction is applied.
References
----------
.. [1] https://codeblue.umich.edu/hoomd-blue/doc/classhoomd__script_1_1pair_1_1pair.html
"""
r_cut = r[-1]
idx_r_switch, r_switch = find_nearest(r, r_switch)
S_r = np.ones_like(r)
r = r[idx_r_switch:]
S_r[idx_r_switch:] = (
(r_cut ** 2 - r ** 2) ** 2
* (r_cut ** 2 + 2 * r ** 2 - 3 * r_switch ** 2)
/ (r_cut ** 2 - r_switch ** 2) ** 3
)
return V * S_r | 50934031776cfd4d92ef7a05ca2e63c215518352 | 3,650,194 |
def process_input(input_string):
"""
>>> for i in range (0, 5):
... parent_node = Node(None)
... parent_node.random_tree(4)
... new_node = process_input(parent_node.get_test_string())
... parent_node.compute_meta_value() - new_node.compute_meta_value()
0
0
0
0
0
>>>
:param input_string: The string from the file that represents the license tree.
:return: The tree of Nodes
"""
node = Node.node_factory(input_string)
return node | 3cea94806034ebd3d95fdc7b5c8844d79698a684 | 3,650,195 |
def equiv_alpha(x,y):
"""check if two closed terms are equivalent module alpha
conversion. for now, we assume the terms are closed
"""
if x == y:
return True
if il.is_lambda(x) and il.is_lambda(y):
return x.body == il.substitute(y.body,zip(x.variables,y.variables))
return False
pass | 27cd246e217403320bd16580029eb1d7c0122e33 | 3,650,197 |
def delete(isamAppliance, file_id, check_mode=False, force=False):
"""
Clearing a common log file
"""
ret_obj = {'warnings': ''}
try:
ret_obj = get(isamAppliance, file_id, start=1, size=1)
delete_required = True # Exception thrown if the file is empty
# Check for Docker
warnings = ret_obj['warnings']
if warnings and 'Docker' in warnings[0]:
return isamAppliance.create_return_object(warnings=ret_obj['warnings'])
except:
delete_required = False
if force is True or delete_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=ret_obj['warnings'])
else:
return isamAppliance.invoke_delete(
"Clearing a common log file",
"{0}/{1}".format(uri, file_id), requires_model=requires_model)
return isamAppliance.create_return_object(warnings=ret_obj['warnings']) | 7f27555eb33e489a59e1d9d1a7127713afd25d2f | 3,650,198 |
def filter_imgs(df, properties = [], values = []):
"""Filters pandas dataframe according to properties and a range of values
Input:
df - Pandas dataframe
properties - Array of column names to be filtered
values - Array of tuples containing bounds for each filter
Output:
df - Filtered dataframe
"""
for i, val in enumerate(properties):
df = df.loc[(df[val] > values[i][0]) & (df[val] < values[i][1])]
return df | cdc5c8bfef10fae60f48cee743df049581a0df04 | 3,650,199 |
def conjugate_term(term: tuple) -> tuple:
"""Returns the sorted hermitian conjugate of the term."""
conj_term = [conjugate_field(field) for field in term]
return tuple(sorted(conj_term)) | d21834ff5c2abe5ff6ec85304db962d809b07637 | 3,650,200 |
def create_notify_policy_if_not_exists(project, user, level=NotifyLevel.involved):
"""
Given a project and user, create notification policy for it.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
try:
result = model_cls.objects.get_or_create(project=project,
user=user,
defaults={"notify_level": level})
return result[0]
except IntegrityError as e:
raise exc.IntegrityError(_("Notify exists for specified user and project")) from e | 3d2eec3e3a5f12a4cbba3c3c111608f38133cf94 | 3,650,201 |
def parse_table(soup, start_gen, end_gen):
"""
- Finds the PKMN names in the soup object and puts them into a list.
- Establishes a gen range.
- Gets rid of repeated entries (formes, e.g. Deoxys) using an OrderedSet.
- Joins the list with commas.
- Handles both Nidorans having 'unmappable' characters in their names (u2642 and u2640).
params: soup (BeautifulSoup object), start_gen (int), end_gen (int)
returns: pkmn_string (string)
"""
pokes = []
for cell in soup.find_all("td", attrs={'style': None}):
for name in cell.find_all("a"):
pokes.append(name.string)
start_index = pokes.index(GEN_STARTS_WITH[start_gen])
end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1
# Doesn't have to be ordered, just personal preference.
unique_list = OrderedSet(pokes[start_index:end_index])
if start_gen != end_gen:
print(f"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.")
else:
print(f"{len(unique_list)} Pokémon from gen {start_gen} were fetched.")
pkmn_string = ', '.join(unique_list)
for key, value in NIDORAN_CASE.items():
# Handling of Nidoran male/female symbols.
pkmn_string = pkmn_string.replace(key, value)
return pkmn_string | 1bb1ce6135f162e532b02e2d95eabd675540878d | 3,650,202 |
import torch
def get_expert_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zero
in the expert parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_expert_parallel_world_size()
return (global_rank // local_world_size) * local_world_size | 0022f953707f26f9a3b3b021422ebc16e1d14213 | 3,650,203 |
from typing import Dict
from typing import Any
from typing import Optional
def _add_extra_kwargs(
kwargs: Dict[str, Any], extra_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Safely add additional keyword arguments to an existing dictionary
Parameters
----------
kwargs : dict
Keyword argument dictionary
extra_kwargs : dict, default None
Keyword argument dictionary to add
Returns
-------
dict
Keyword dictionary with added keyword arguments
Notes
-----
There is no checking for duplicate keys
"""
if extra_kwargs is None:
return kwargs
else:
kwargs_copy = kwargs.copy()
kwargs_copy.update(extra_kwargs)
return kwargs_copy | cfc4c17f608c0b7fe1ae3046dc220d385c890caa | 3,650,204 |
import random
import math
def Hiker(n,xLst,yLst,dist):
"""
Hiker is a function to generate lists of x and y
coordinates of n steps for a random walk of n steps
along with distance between the first and last point
"""
x0=0
y0=0
x=x0
y=y0
xLst[1] = x0
yLst[1] = y0
for i in range (n-1):
rnum = random.random()
if rnum <= 0.19:
y=y+1
x=x
elif rnum <= 0.43:
y=y+1
x=x+1
elif rnum <= 0.60:
y=y
x=x+1
elif rnum <= 0.70:
y = y-1
x= x+1
elif rnum <= 0.72:
y = y-1
x = x
elif rnum <= 0.75:
y = y-1
x = x-1
elif rnum <= 0.85:
y = y
x = x-1
elif rnum <= 1.00:
y = y+1
x = x-1
xLst[i+1] = x
yLst[i+1] = y
dist = math.sqrt ((x-x0)^2 + (y-y0)^2)
return (xLst,yLst,dist) | abe341c8ecdc579de2b72f5af1ace3f07dd40dc3 | 3,650,205 |
def extractdata(csvName='US_SP_Restructured.csv'):
"""
Parameters
----------
:string csvName: Name of csv file. e.g. 'US_SP_Restructured.csv'
"""
df = pd.read_csv(csvName)
df['index'] = df.index
# extract alternative specific variables
cost = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['Car_Cost', 'CarRental_Cost', 'Bus_Cost',
'Plane_Cost', 'Train_Cost', 'TrH_Cost'],
value_name='cost')
tt = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['Car_TT', 'CarRental_TT', 'Bus_TT',
'Plane_TT', 'Train_TT', 'TrH_TT'],
value_name='tt')
relib = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['CarRelib', 'CarRentalRelib', 'BusRelib',
'PlaneRelib', 'TrainRelib', 'TrHRelib'],
value_name='reliability')
# extract generic variables
data_RP = df[[
'quest', 'index',
'DrvLicens', 'PblcTrst', 'Ag1825', 'Ag2545', 'Ag4565', 'Ag65M',
'Male', 'Fulltime', 'PrtTime', 'Unemplyd',
'Edu_Highschl', 'Edu_BSc', 'Edu_MscPhD',
'HH_Veh0', 'HH_Veh1', 'HH_Veh2M',
'HH_Adult1', 'HH_Adult2', 'HH_Adult3M',
'HH_Chld0', 'HH_Chld1', 'HH_Chld2M',
'HH_Inc020K', 'HH_Inc2060K', 'HH_Inc60KM',
'HH_Sngl', 'HH_SnglParent', 'HH_AllAddults',
'HH_Nuclear', 'P_Chld',
'BusCrwd', 'CarMorning', 'CarAfternoon', 'CarEve',
'O_MTL_US_max', 'O_Odr_US_max', 'D_Bstn_max', 'D_NYC_max', 'D_Maine_max',
'Tp_Onewy_max', 'Tp_2way_max',
'Tp_h06_max', 'Tp_h69_max', 'Tp_h915_max', 'Tp_h1519_max',
'Tp_h1924_max', 'Tp_h1524_max',
'Tp_Y2016_max', 'Tp_Y2017_max',
'Tp_Wntr_max', 'Tp_Sprng_max', 'Tp_Sumr_max', 'Tp_Fall_max',
'Tp_CarDrv_max', 'Tp_CarPsngr_max', 'Tp_CarShrRnt_max',
'Tp_Train_max', 'Tp_Bus_max', 'Tp_Plane_max', 'Tp_ModOdr_max',
'Tp_WrkSkl_max', 'Tp_Leisr_max', 'Tp_Shpng_max', 'Tp_ActOdr_max',
'Tp_NHotel1_max', 'Tp_NHotel2_max', 'Tp_NHotel3M_max',
'Tp_FreqMonthlMulti_max', 'Tp_FreqYearMulti_max',
'Tp_FreqYear1_max',
'Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane',
'Import_Cost', 'Import_TT', 'Import_Relib', 'Import_StartTime',
'Import_Freq', 'Import_Onboard', 'Import_Crwding'
]]
# extract alternatives
data_choice = df[['quest', 'index', 'New_SP_Choice']]
# extract availability
data_avail = df[['quest', 'index',
'AV_Car', 'AV_CarRental', 'AV_Bus', 'AV_Plane',
'AV_Train', 'AV_TrH']]
# extract indicators
data_ind = df[['quest', 'index',
'Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane']]
data_choice = data_choice.sort_values(['quest', 'index'])
cost = cost.sort_values(['quest', 'index', 'variable'])
tt = tt.sort_values(['quest', 'index', 'variable'])
relib = relib.sort_values(['quest', 'index', 'variable'])
data_RP = data_RP.sort_values(['quest', 'index'])
data_avail = data_avail.sort_values(['quest', 'index'])
data_ind = data_ind.sort_values(['quest', 'index'])
# make a copy and merge alternative specific variables
data_SP = cost
data_SP['tt'] = tt['tt']
data_SP['relib'] = relib['reliability']
data_SP['choice'] = data_SP['variable'].str.split('_', expand=True)[0]
data_SP = data_SP.reset_index(drop=True)
# check if everything is in order
print(data_SP.head(6))
# extract data arrays
dataset_y = data_choice[['New_SP_Choice']]
dataset_x_ng = data_SP[['cost', 'tt', 'relib']]
dataset_x_g = data_RP[[
'DrvLicens', 'PblcTrst',
'Ag1825', 'Ag2545', 'Ag4565', 'Ag65M',
'Male', 'Fulltime', # 'PrtTime', 'Unemplyd',
'Edu_Highschl', 'Edu_BSc', 'Edu_MscPhD',
'HH_Veh0', 'HH_Veh1', 'HH_Veh2M',
# 'HH_Adult1', 'HH_Adult2', 'HH_Adult3M',
'HH_Chld0', 'HH_Chld1', 'HH_Chld2M',
'HH_Inc020K', 'HH_Inc2060K', 'HH_Inc60KM',
# 'HH_Sngl', 'HH_SnglParent', 'HH_AllAddults',
# 'HH_Nuclear', # 'P_Chld',
# 'O_MTL_US_max', 'O_Odr_US_max',
# 'D_Bstn_max', 'D_NYC_max', 'D_Maine_max',
# 'Tp_Onewy_max', 'Tp_2way_max',
# 'Tp_h06_max', 'Tp_h69_max', 'Tp_h915_max',
# 'Tp_h1519_max', 'Tp_h1924_max', 'Tp_h1524_max',
# 'Tp_Y2016_max', 'Tp_Y2017_max',
# 'Tp_Wntr_max', 'Tp_Sprng_max', 'Tp_Sumr_max', 'Tp_Fall_max',
# 'Tp_CarDrv_max', 'Tp_CarPsngr_max', 'Tp_CarShrRnt_max',
# 'Tp_Train_max', 'Tp_Bus_max', 'Tp_Plane_max', 'Tp_ModOdr_max',
# 'Tp_WrkSkl_max', 'Tp_Leisr_max', 'Tp_Shpng_max',
# 'Tp_ActOdr_max',
# 'Tp_NHotel1_max', 'Tp_NHotel2_max', 'Tp_NHotel3M_max',
# 'Tp_FreqMonthlMulti_max', 'Tp_FreqYearMulti_max',
# 'Tp_FreqYear1_max',
]]
dataset_avail = data_avail[['AV_Bus', 'AV_CarRental', 'AV_Car',
'AV_Plane', 'AV_TrH', 'AV_Train']]
dataset_ind = data_ind[['Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane']]
n = df.shape[0]
y = dataset_y.values.reshape(n,)
x_ng = dataset_x_ng.values.reshape(n, 6, -1)/100.
x_g = dataset_x_g.values
avail = dataset_avail.values
ind = dataset_ind.values
return x_ng, x_g, y, avail, ind | e0a3f405da0e31e252b6110f84f81363c692d66a | 3,650,206 |
def has_field(entry: EntryType, field: str) -> bool:
"""Check if a given entry has non empty field"""
return has_data(get_field(entry, field)) | e13d973fde62e36764871fd3b565552ff46b359b | 3,650,207 |
def open_file(app_id, file_name, mode):
# type: (int, str, int) -> str
""" Call to open_file.
:param app_id: Application identifier.
:param file_name: File name reference.
:param mode: Open mode.
:return: The real file name.
"""
return _COMPSs.open_file(app_id, file_name, mode) | 7c38d219d4a867e72d90b873412ec7d5e5aad78a | 3,650,209 |
def correct_repeat_line():
""" Matches repeat spec above """
return "2|1|2|3|4|5|6|7" | b9c1e48c5043a042b9f6a6253cba6ae8ce1ca32c | 3,650,211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.