repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
coursera-dl/coursera-dl | coursera/cookies.py | https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/cookies.py#L258-L276 | def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
"""
path = "/" + class_name
def cookies_filter(c):
return c.domain == ".coursera.org" \
or (c.domain == "class.coursera.org" and c.path == path)
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj | [
"def",
"find_cookies_for_class",
"(",
"cookies_file",
",",
"class_name",
")",
":",
"path",
"=",
"\"/\"",
"+",
"class_name",
"def",
"cookies_filter",
"(",
"c",
")",
":",
"return",
"c",
".",
"domain",
"==",
"\".coursera.org\"",
"or",
"(",
"c",
".",
"domain",
"==",
"\"class.coursera.org\"",
"and",
"c",
".",
"path",
"==",
"path",
")",
"cj",
"=",
"get_cookie_jar",
"(",
"cookies_file",
")",
"new_cj",
"=",
"requests",
".",
"cookies",
".",
"RequestsCookieJar",
"(",
")",
"for",
"c",
"in",
"filter",
"(",
"cookies_filter",
",",
"cj",
")",
":",
"new_cj",
".",
"set_cookie",
"(",
"c",
")",
"return",
"new_cj"
] | Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file. | [
"Return",
"a",
"RequestsCookieJar",
"containing",
"the",
"cookies",
"for",
".",
"coursera",
".",
"org",
"and",
"class",
".",
"coursera",
".",
"org",
"found",
"in",
"the",
"given",
"cookies_file",
"."
] | python | train |
astropy/photutils | photutils/aperture/mask.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L49-L93 | def _overlap_slices(self, shape):
"""
Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
xmin = self.bbox.ixmin
xmax = self.bbox.ixmax
ymin = self.bbox.iymin
ymax = self.bbox.iymax
if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:
# no overlap of the aperture with the data
return None, None
slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),
slice(max(xmin, 0), min(xmax, shape[1])))
slices_small = (slice(max(-ymin, 0),
min(ymax - ymin, shape[0] - ymin)),
slice(max(-xmin, 0),
min(xmax - xmin, shape[1] - xmin)))
return slices_large, slices_small | [
"def",
"_overlap_slices",
"(",
"self",
",",
"shape",
")",
":",
"if",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'input shape must have 2 elements.'",
")",
"xmin",
"=",
"self",
".",
"bbox",
".",
"ixmin",
"xmax",
"=",
"self",
".",
"bbox",
".",
"ixmax",
"ymin",
"=",
"self",
".",
"bbox",
".",
"iymin",
"ymax",
"=",
"self",
".",
"bbox",
".",
"iymax",
"if",
"xmin",
">=",
"shape",
"[",
"1",
"]",
"or",
"ymin",
">=",
"shape",
"[",
"0",
"]",
"or",
"xmax",
"<=",
"0",
"or",
"ymax",
"<=",
"0",
":",
"# no overlap of the aperture with the data",
"return",
"None",
",",
"None",
"slices_large",
"=",
"(",
"slice",
"(",
"max",
"(",
"ymin",
",",
"0",
")",
",",
"min",
"(",
"ymax",
",",
"shape",
"[",
"0",
"]",
")",
")",
",",
"slice",
"(",
"max",
"(",
"xmin",
",",
"0",
")",
",",
"min",
"(",
"xmax",
",",
"shape",
"[",
"1",
"]",
")",
")",
")",
"slices_small",
"=",
"(",
"slice",
"(",
"max",
"(",
"-",
"ymin",
",",
"0",
")",
",",
"min",
"(",
"ymax",
"-",
"ymin",
",",
"shape",
"[",
"0",
"]",
"-",
"ymin",
")",
")",
",",
"slice",
"(",
"max",
"(",
"-",
"xmin",
",",
"0",
")",
",",
"min",
"(",
"xmax",
"-",
"xmin",
",",
"shape",
"[",
"1",
"]",
"-",
"xmin",
")",
")",
")",
"return",
"slices_large",
",",
"slices_small"
] | Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array. | [
"Calculate",
"the",
"slices",
"for",
"the",
"overlapping",
"part",
"of",
"the",
"bounding",
"box",
"and",
"an",
"array",
"of",
"the",
"given",
"shape",
"."
] | python | train |
manahl/arctic | arctic/chunkstore/chunkstore.py | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L119-L168 | def delete(self, symbol, chunk_range=None, audit=None):
"""
Delete all chunks for a symbol, or optionally, chunks within a range
Parameters
----------
symbol : str
symbol name for the item
chunk_range: range object
a date range to delete
audit: dict
dict to store in the audit log
"""
if chunk_range is not None:
sym = self._get_symbol_info(symbol)
# read out chunks that fall within the range and filter out
# data within the range
df = self.read(symbol, chunk_range=chunk_range, filter_data=False)
row_adjust = len(df)
if not df.empty:
df = CHUNKER_MAP[sym[CHUNKER]].exclude(df, chunk_range)
# remove chunks, and update any remaining data
query = {SYMBOL: symbol}
query.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
self._collection.delete_many(query)
self._mdata.delete_many(query)
self.update(symbol, df)
# update symbol metadata (rows and chunk count)
sym = self._get_symbol_info(symbol)
sym[LEN] -= row_adjust
sym[CHUNK_COUNT] = mongo_count(self._collection, filter={SYMBOL: symbol})
self._symbols.replace_one({SYMBOL: symbol}, sym)
else:
query = {SYMBOL: symbol}
self._collection.delete_many(query)
self._symbols.delete_many(query)
self._mdata.delete_many(query)
if audit is not None:
audit['symbol'] = symbol
if chunk_range is not None:
audit['rows_deleted'] = row_adjust
audit['action'] = 'range delete'
else:
audit['action'] = 'symbol delete'
self._audit.insert_one(audit) | [
"def",
"delete",
"(",
"self",
",",
"symbol",
",",
"chunk_range",
"=",
"None",
",",
"audit",
"=",
"None",
")",
":",
"if",
"chunk_range",
"is",
"not",
"None",
":",
"sym",
"=",
"self",
".",
"_get_symbol_info",
"(",
"symbol",
")",
"# read out chunks that fall within the range and filter out",
"# data within the range",
"df",
"=",
"self",
".",
"read",
"(",
"symbol",
",",
"chunk_range",
"=",
"chunk_range",
",",
"filter_data",
"=",
"False",
")",
"row_adjust",
"=",
"len",
"(",
"df",
")",
"if",
"not",
"df",
".",
"empty",
":",
"df",
"=",
"CHUNKER_MAP",
"[",
"sym",
"[",
"CHUNKER",
"]",
"]",
".",
"exclude",
"(",
"df",
",",
"chunk_range",
")",
"# remove chunks, and update any remaining data",
"query",
"=",
"{",
"SYMBOL",
":",
"symbol",
"}",
"query",
".",
"update",
"(",
"CHUNKER_MAP",
"[",
"sym",
"[",
"CHUNKER",
"]",
"]",
".",
"to_mongo",
"(",
"chunk_range",
")",
")",
"self",
".",
"_collection",
".",
"delete_many",
"(",
"query",
")",
"self",
".",
"_mdata",
".",
"delete_many",
"(",
"query",
")",
"self",
".",
"update",
"(",
"symbol",
",",
"df",
")",
"# update symbol metadata (rows and chunk count)",
"sym",
"=",
"self",
".",
"_get_symbol_info",
"(",
"symbol",
")",
"sym",
"[",
"LEN",
"]",
"-=",
"row_adjust",
"sym",
"[",
"CHUNK_COUNT",
"]",
"=",
"mongo_count",
"(",
"self",
".",
"_collection",
",",
"filter",
"=",
"{",
"SYMBOL",
":",
"symbol",
"}",
")",
"self",
".",
"_symbols",
".",
"replace_one",
"(",
"{",
"SYMBOL",
":",
"symbol",
"}",
",",
"sym",
")",
"else",
":",
"query",
"=",
"{",
"SYMBOL",
":",
"symbol",
"}",
"self",
".",
"_collection",
".",
"delete_many",
"(",
"query",
")",
"self",
".",
"_symbols",
".",
"delete_many",
"(",
"query",
")",
"self",
".",
"_mdata",
".",
"delete_many",
"(",
"query",
")",
"if",
"audit",
"is",
"not",
"None",
":",
"audit",
"[",
"'symbol'",
"]",
"=",
"symbol",
"if",
"chunk_range",
"is",
"not",
"None",
":",
"audit",
"[",
"'rows_deleted'",
"]",
"=",
"row_adjust",
"audit",
"[",
"'action'",
"]",
"=",
"'range delete'",
"else",
":",
"audit",
"[",
"'action'",
"]",
"=",
"'symbol delete'",
"self",
".",
"_audit",
".",
"insert_one",
"(",
"audit",
")"
] | Delete all chunks for a symbol, or optionally, chunks within a range
Parameters
----------
symbol : str
symbol name for the item
chunk_range: range object
a date range to delete
audit: dict
dict to store in the audit log | [
"Delete",
"all",
"chunks",
"for",
"a",
"symbol",
"or",
"optionally",
"chunks",
"within",
"a",
"range"
] | python | train |
facebook/watchman | build/fbcode_builder/utils.py | https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/build/fbcode_builder/utils.py#L42-L52 | def _inner_read_config(path):
'''
Helper to read a named config file.
The grossness with the global is a workaround for this python bug:
https://bugs.python.org/issue21591
The bug prevents us from defining either a local function or a lambda
in the scope of read_fbcode_builder_config below.
'''
global _project_dir
full_path = os.path.join(_project_dir, path)
return read_fbcode_builder_config(full_path) | [
"def",
"_inner_read_config",
"(",
"path",
")",
":",
"global",
"_project_dir",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_project_dir",
",",
"path",
")",
"return",
"read_fbcode_builder_config",
"(",
"full_path",
")"
] | Helper to read a named config file.
The grossness with the global is a workaround for this python bug:
https://bugs.python.org/issue21591
The bug prevents us from defining either a local function or a lambda
in the scope of read_fbcode_builder_config below. | [
"Helper",
"to",
"read",
"a",
"named",
"config",
"file",
".",
"The",
"grossness",
"with",
"the",
"global",
"is",
"a",
"workaround",
"for",
"this",
"python",
"bug",
":",
"https",
":",
"//",
"bugs",
".",
"python",
".",
"org",
"/",
"issue21591",
"The",
"bug",
"prevents",
"us",
"from",
"defining",
"either",
"a",
"local",
"function",
"or",
"a",
"lambda",
"in",
"the",
"scope",
"of",
"read_fbcode_builder_config",
"below",
"."
] | python | train |
streamlink/streamlink | src/streamlink/plugin/api/http_session.py | https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/http_session.py#L110-L116 | def parse_cookies(self, cookies, **kwargs):
"""Parses a semi-colon delimited list of cookies.
Example: foo=bar;baz=qux
"""
for name, value in _parse_keyvalue_list(cookies):
self.cookies.set(name, value, **kwargs) | [
"def",
"parse_cookies",
"(",
"self",
",",
"cookies",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"name",
",",
"value",
"in",
"_parse_keyvalue_list",
"(",
"cookies",
")",
":",
"self",
".",
"cookies",
".",
"set",
"(",
"name",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | Parses a semi-colon delimited list of cookies.
Example: foo=bar;baz=qux | [
"Parses",
"a",
"semi",
"-",
"colon",
"delimited",
"list",
"of",
"cookies",
"."
] | python | test |
hydraplatform/hydra-base | hydra_base/util/permissions.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/permissions.py#L70-L87 | def required_role(req_role):
"""
Decorator applied to functions requiring caller to possess the specified role
"""
def dec_wrapper(wfunc):
@wraps(wfunc)
def wrapped(*args, **kwargs):
user_id = kwargs.get("user_id")
try:
res = db.DBSession.query(RoleUser).filter(RoleUser.user_id==user_id).join(Role, Role.code==req_role).one()
except NoResultFound:
raise PermissionError("Permission denied. User %s does not have role %s"%
(user_id, req_role))
return wfunc(*args, **kwargs)
return wrapped
return dec_wrapper | [
"def",
"required_role",
"(",
"req_role",
")",
":",
"def",
"dec_wrapper",
"(",
"wfunc",
")",
":",
"@",
"wraps",
"(",
"wfunc",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"\"user_id\"",
")",
"try",
":",
"res",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"RoleUser",
")",
".",
"filter",
"(",
"RoleUser",
".",
"user_id",
"==",
"user_id",
")",
".",
"join",
"(",
"Role",
",",
"Role",
".",
"code",
"==",
"req_role",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"raise",
"PermissionError",
"(",
"\"Permission denied. User %s does not have role %s\"",
"%",
"(",
"user_id",
",",
"req_role",
")",
")",
"return",
"wfunc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped",
"return",
"dec_wrapper"
] | Decorator applied to functions requiring caller to possess the specified role | [
"Decorator",
"applied",
"to",
"functions",
"requiring",
"caller",
"to",
"possess",
"the",
"specified",
"role"
] | python | train |
tensorflow/cleverhans | cleverhans/attacks/momentum_iterative_method.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/momentum_iterative_method.py#L43-L123 | def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation.
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
momentum = tf.zeros_like(x)
adv_x = x
# Fix labels to the first model predictions for loss computation
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
"""Iterate until number of iterations completed"""
return tf.less(i, self.nb_iter)
def body(i, ax, m):
"""Do a momentum step"""
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, ax)
# Normalize current gradient and add it to the accumulated gradient
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasn't been tested for ord=1."
"It's not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
# Update and clip adversarial example in current iteration
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x | [
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"# Parse and save attack-specific parameters",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"asserts",
"=",
"[",
"]",
"# If a data range was specified, check that the input was in that range",
"if",
"self",
".",
"clip_min",
"is",
"not",
"None",
":",
"asserts",
".",
"append",
"(",
"utils_tf",
".",
"assert_greater_equal",
"(",
"x",
",",
"tf",
".",
"cast",
"(",
"self",
".",
"clip_min",
",",
"x",
".",
"dtype",
")",
")",
")",
"if",
"self",
".",
"clip_max",
"is",
"not",
"None",
":",
"asserts",
".",
"append",
"(",
"utils_tf",
".",
"assert_less_equal",
"(",
"x",
",",
"tf",
".",
"cast",
"(",
"self",
".",
"clip_max",
",",
"x",
".",
"dtype",
")",
")",
")",
"# Initialize loop variables",
"momentum",
"=",
"tf",
".",
"zeros_like",
"(",
"x",
")",
"adv_x",
"=",
"x",
"# Fix labels to the first model predictions for loss computation",
"y",
",",
"_nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"y",
"=",
"y",
"/",
"reduce_sum",
"(",
"y",
",",
"1",
",",
"keepdims",
"=",
"True",
")",
"targeted",
"=",
"(",
"self",
".",
"y_target",
"is",
"not",
"None",
")",
"def",
"cond",
"(",
"i",
",",
"_",
",",
"__",
")",
":",
"\"\"\"Iterate until number of iterations completed\"\"\"",
"return",
"tf",
".",
"less",
"(",
"i",
",",
"self",
".",
"nb_iter",
")",
"def",
"body",
"(",
"i",
",",
"ax",
",",
"m",
")",
":",
"\"\"\"Do a momentum step\"\"\"",
"logits",
"=",
"self",
".",
"model",
".",
"get_logits",
"(",
"ax",
")",
"loss",
"=",
"softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"y",
",",
"logits",
"=",
"logits",
")",
"if",
"targeted",
":",
"loss",
"=",
"-",
"loss",
"# Define gradient of loss wrt input",
"grad",
",",
"=",
"tf",
".",
"gradients",
"(",
"loss",
",",
"ax",
")",
"# Normalize current gradient and add it to the accumulated gradient",
"red_ind",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"grad",
".",
"get_shape",
"(",
")",
")",
")",
")",
"avoid_zero_div",
"=",
"tf",
".",
"cast",
"(",
"1e-12",
",",
"grad",
".",
"dtype",
")",
"grad",
"=",
"grad",
"/",
"tf",
".",
"maximum",
"(",
"avoid_zero_div",
",",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"grad",
")",
",",
"red_ind",
",",
"keepdims",
"=",
"True",
")",
")",
"m",
"=",
"self",
".",
"decay_factor",
"*",
"m",
"+",
"grad",
"optimal_perturbation",
"=",
"optimize_linear",
"(",
"m",
",",
"self",
".",
"eps_iter",
",",
"self",
".",
"ord",
")",
"if",
"self",
".",
"ord",
"==",
"1",
":",
"raise",
"NotImplementedError",
"(",
"\"This attack hasn't been tested for ord=1.\"",
"\"It's not clear that FGM makes a good inner \"",
"\"loop step for iterative optimization since \"",
"\"it updates just one coordinate at a time.\"",
")",
"# Update and clip adversarial example in current iteration",
"ax",
"=",
"ax",
"+",
"optimal_perturbation",
"ax",
"=",
"x",
"+",
"utils_tf",
".",
"clip_eta",
"(",
"ax",
"-",
"x",
",",
"self",
".",
"ord",
",",
"self",
".",
"eps",
")",
"if",
"self",
".",
"clip_min",
"is",
"not",
"None",
"and",
"self",
".",
"clip_max",
"is",
"not",
"None",
":",
"ax",
"=",
"utils_tf",
".",
"clip_by_value",
"(",
"ax",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
")",
"ax",
"=",
"tf",
".",
"stop_gradient",
"(",
"ax",
")",
"return",
"i",
"+",
"1",
",",
"ax",
",",
"m",
"_",
",",
"adv_x",
",",
"_",
"=",
"tf",
".",
"while_loop",
"(",
"cond",
",",
"body",
",",
"(",
"tf",
".",
"zeros",
"(",
"[",
"]",
")",
",",
"adv_x",
",",
"momentum",
")",
",",
"back_prop",
"=",
"True",
",",
"maximum_iterations",
"=",
"self",
".",
"nb_iter",
")",
"if",
"self",
".",
"sanity_checks",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"asserts",
")",
":",
"adv_x",
"=",
"tf",
".",
"identity",
"(",
"adv_x",
")",
"return",
"adv_x"
] | Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation. | [
"Generate",
"symbolic",
"graph",
"for",
"adversarial",
"examples",
"and",
"return",
"."
] | python | train |
renweizhukov/pytwis | pytwis/pytwis.py | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L152-L186 | def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error) | [
"def",
"_check_password",
"(",
"password",
")",
":",
"# Check the length.",
"length_error",
"=",
"len",
"(",
"password",
")",
"<",
"8",
"# Search for digits.",
"digit_error",
"=",
"re",
".",
"search",
"(",
"r'\\d'",
",",
"password",
")",
"is",
"None",
"# Search for uppercase letters.",
"uppercase_error",
"=",
"re",
".",
"search",
"(",
"r'[A-Z]'",
",",
"password",
")",
"is",
"None",
"# Search for lowercase letters.",
"lowercase_error",
"=",
"re",
".",
"search",
"(",
"r'[a-z]'",
",",
"password",
")",
"is",
"None",
"# Search for symbols (excluding whitespace characters).",
"symbol_error",
"=",
"re",
".",
"search",
"(",
"r'[^A-Za-z\\d\\s]'",
",",
"password",
")",
"is",
"None",
"return",
"not",
"(",
"length_error",
"or",
"digit_error",
"or",
"uppercase_error",
"or",
"lowercase_error",
"or",
"symbol_error",
")"
] | Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise. | [
"Check",
"the",
"strength",
"of",
"a",
"password",
".",
"A",
"password",
"is",
"considered",
"strong",
"if",
"8",
"characters",
"length",
"or",
"more",
"1",
"digit",
"or",
"more",
"1",
"uppercase",
"letter",
"or",
"more",
"1",
"lowercase",
"letter",
"or",
"more",
"1",
"symbol",
"(",
"excluding",
"whitespace",
"characters",
")",
"or",
"more"
] | python | train |
google/grr | grr/server/grr_response_server/check_lib/checks.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/check_lib/checks.py#L785-L800 | def LoadChecksFromFiles(file_paths, overwrite_if_exists=True):
"""Load the checks defined in the specified files."""
loaded = []
for file_path in file_paths:
configs = LoadConfigsFromFile(file_path)
for conf in itervalues(configs):
check = Check(**conf)
# Validate will raise if the check doesn't load.
check.Validate()
loaded.append(check)
CheckRegistry.RegisterCheck(
check,
source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
logging.debug("Loaded check %s from %s", check.check_id, file_path)
return loaded | [
"def",
"LoadChecksFromFiles",
"(",
"file_paths",
",",
"overwrite_if_exists",
"=",
"True",
")",
":",
"loaded",
"=",
"[",
"]",
"for",
"file_path",
"in",
"file_paths",
":",
"configs",
"=",
"LoadConfigsFromFile",
"(",
"file_path",
")",
"for",
"conf",
"in",
"itervalues",
"(",
"configs",
")",
":",
"check",
"=",
"Check",
"(",
"*",
"*",
"conf",
")",
"# Validate will raise if the check doesn't load.",
"check",
".",
"Validate",
"(",
")",
"loaded",
".",
"append",
"(",
"check",
")",
"CheckRegistry",
".",
"RegisterCheck",
"(",
"check",
",",
"source",
"=",
"\"file:%s\"",
"%",
"file_path",
",",
"overwrite_if_exists",
"=",
"overwrite_if_exists",
")",
"logging",
".",
"debug",
"(",
"\"Loaded check %s from %s\"",
",",
"check",
".",
"check_id",
",",
"file_path",
")",
"return",
"loaded"
] | Load the checks defined in the specified files. | [
"Load",
"the",
"checks",
"defined",
"in",
"the",
"specified",
"files",
"."
] | python | train |
nutechsoftware/alarmdecoder | alarmdecoder/decoder.py | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L356-L377 | def fault_zone(self, zone, simulate_wire_problem=False):
"""
Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool
"""
# Allow ourselves to also be passed an address/channel combination
# for zone expanders.
#
# Format (expander index, channel)
if isinstance(zone, tuple):
expander_idx, channel = zone
zone = self._zonetracker.expander_to_zone(expander_idx, channel)
status = 2 if simulate_wire_problem else 1
self.send("L{0:02}{1}\r".format(zone, status)) | [
"def",
"fault_zone",
"(",
"self",
",",
"zone",
",",
"simulate_wire_problem",
"=",
"False",
")",
":",
"# Allow ourselves to also be passed an address/channel combination",
"# for zone expanders.",
"#",
"# Format (expander index, channel)",
"if",
"isinstance",
"(",
"zone",
",",
"tuple",
")",
":",
"expander_idx",
",",
"channel",
"=",
"zone",
"zone",
"=",
"self",
".",
"_zonetracker",
".",
"expander_to_zone",
"(",
"expander_idx",
",",
"channel",
")",
"status",
"=",
"2",
"if",
"simulate_wire_problem",
"else",
"1",
"self",
".",
"send",
"(",
"\"L{0:02}{1}\\r\"",
".",
"format",
"(",
"zone",
",",
"status",
")",
")"
] | Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool | [
"Faults",
"a",
"zone",
"if",
"we",
"are",
"emulating",
"a",
"zone",
"expander",
"."
] | python | train |
pymupdf/PyMuPDF | fitz/fitz.py | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2605-L2616 | def addStampAnnot(self, rect, stamp=0):
"""Add a 'rubber stamp' in a rectangle."""
CheckParent(self)
val = _fitz.Page_addStampAnnot(self, rect, stamp)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val | [
"def",
"addStampAnnot",
"(",
"self",
",",
"rect",
",",
"stamp",
"=",
"0",
")",
":",
"CheckParent",
"(",
"self",
")",
"val",
"=",
"_fitz",
".",
"Page_addStampAnnot",
"(",
"self",
",",
"rect",
",",
"stamp",
")",
"if",
"not",
"val",
":",
"return",
"val",
".",
"thisown",
"=",
"True",
"val",
".",
"parent",
"=",
"weakref",
".",
"proxy",
"(",
"self",
")",
"self",
".",
"_annot_refs",
"[",
"id",
"(",
"val",
")",
"]",
"=",
"val",
"return",
"val"
] | Add a 'rubber stamp' in a rectangle. | [
"Add",
"a",
"rubber",
"stamp",
"in",
"a",
"rectangle",
"."
] | python | train |
sffjunkie/astral | src/astral.py | https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L950-L989 | def dusk(self, date=None, local=True, use_elevation=True):
"""Calculates the dusk time (the time in the evening when the sun is a
certain number of degrees below the horizon. By default this is 6
degrees but can be changed by setting the
:attr:`solar_depression` property.)
:param date: The date for which to calculate the dusk time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which dusk occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
elevation = self.elevation if use_elevation else 0
dusk = self.astral.dusk_utc(date, self.latitude, self.longitude, observer_elevation=elevation)
if local:
return dusk.astimezone(self.tz)
else:
return dusk | [
"def",
"dusk",
"(",
"self",
",",
"date",
"=",
"None",
",",
"local",
"=",
"True",
",",
"use_elevation",
"=",
"True",
")",
":",
"if",
"local",
"and",
"self",
".",
"timezone",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Local time requested but Location has no timezone set.\"",
")",
"if",
"self",
".",
"astral",
"is",
"None",
":",
"self",
".",
"astral",
"=",
"Astral",
"(",
")",
"if",
"date",
"is",
"None",
":",
"date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"elevation",
"=",
"self",
".",
"elevation",
"if",
"use_elevation",
"else",
"0",
"dusk",
"=",
"self",
".",
"astral",
".",
"dusk_utc",
"(",
"date",
",",
"self",
".",
"latitude",
",",
"self",
".",
"longitude",
",",
"observer_elevation",
"=",
"elevation",
")",
"if",
"local",
":",
"return",
"dusk",
".",
"astimezone",
"(",
"self",
".",
"tz",
")",
"else",
":",
"return",
"dusk"
] | Calculates the dusk time (the time in the evening when the sun is a
certain number of degrees below the horizon. By default this is 6
degrees but can be changed by setting the
:attr:`solar_depression` property.)
:param date: The date for which to calculate the dusk time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which dusk occurs.
:rtype: :class:`~datetime.datetime` | [
"Calculates",
"the",
"dusk",
"time",
"(",
"the",
"time",
"in",
"the",
"evening",
"when",
"the",
"sun",
"is",
"a",
"certain",
"number",
"of",
"degrees",
"below",
"the",
"horizon",
".",
"By",
"default",
"this",
"is",
"6",
"degrees",
"but",
"can",
"be",
"changed",
"by",
"setting",
"the",
":",
"attr",
":",
"solar_depression",
"property",
".",
")"
] | python | train |
quantopian/trading_calendars | trading_calendars/calendar_utils.py | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L257-L285 | def resolve_alias(self, name):
"""
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
"""
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name | [
"def",
"resolve_alias",
"(",
"self",
",",
"name",
")",
":",
"seen",
"=",
"[",
"]",
"while",
"name",
"in",
"self",
".",
"_aliases",
":",
"seen",
".",
"append",
"(",
"name",
")",
"name",
"=",
"self",
".",
"_aliases",
"[",
"name",
"]",
"# This is O(N ** 2), but if there's an alias chain longer than 2,",
"# something strange has happened.",
"if",
"name",
"in",
"seen",
":",
"seen",
".",
"append",
"(",
"name",
")",
"raise",
"CyclicCalendarAlias",
"(",
"cycle",
"=",
"\" -> \"",
".",
"join",
"(",
"repr",
"(",
"k",
")",
"for",
"k",
"in",
"seen",
")",
")",
"return",
"name"
] | Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return. | [
"Resolve",
"a",
"calendar",
"alias",
"for",
"retrieval",
"."
] | python | train |
peeringdb/peeringdb-py | peeringdb/config.py | https://github.com/peeringdb/peeringdb-py/blob/cf2060a1d5ef879a01cf849e54b7756909ab2661/peeringdb/config.py#L59-L68 | def read_config(conf_dir=DEFAULT_CONFIG_DIR):
"Find and read config file for a directory, return None if not found."
conf_path = os.path.expanduser(conf_dir)
if not os.path.exists(conf_path):
# only throw if not default
if conf_dir != DEFAULT_CONFIG_DIR:
raise IOError("Config directory not found at %s" % (conf_path, ))
return munge.load_datafile('config', conf_path, default=None) | [
"def",
"read_config",
"(",
"conf_dir",
"=",
"DEFAULT_CONFIG_DIR",
")",
":",
"conf_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"conf_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"conf_path",
")",
":",
"# only throw if not default",
"if",
"conf_dir",
"!=",
"DEFAULT_CONFIG_DIR",
":",
"raise",
"IOError",
"(",
"\"Config directory not found at %s\"",
"%",
"(",
"conf_path",
",",
")",
")",
"return",
"munge",
".",
"load_datafile",
"(",
"'config'",
",",
"conf_path",
",",
"default",
"=",
"None",
")"
] | Find and read config file for a directory, return None if not found. | [
"Find",
"and",
"read",
"config",
"file",
"for",
"a",
"directory",
"return",
"None",
"if",
"not",
"found",
"."
] | python | train |
google/apitools | apitools/base/py/credentials_lib.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L643-L668 | def LockedWrite(self, cache_data):
"""Acquire an interprocess lock and write a string.
This method safely acquires the locks then writes a string
to the cache file. If the string is written successfully
the function will return True, if the write fails for any
reason it will return False.
Args:
cache_data: string or bytes to write.
Returns:
bool: success
"""
if isinstance(cache_data, six.text_type):
cache_data = cache_data.encode(encoding=self._encoding)
with self._thread_lock:
if not self._EnsureFileExists():
return False
with self._process_lock_getter() as acquired_plock:
if not acquired_plock:
return False
with open(self._filename, 'wb') as f:
f.write(cache_data)
return True | [
"def",
"LockedWrite",
"(",
"self",
",",
"cache_data",
")",
":",
"if",
"isinstance",
"(",
"cache_data",
",",
"six",
".",
"text_type",
")",
":",
"cache_data",
"=",
"cache_data",
".",
"encode",
"(",
"encoding",
"=",
"self",
".",
"_encoding",
")",
"with",
"self",
".",
"_thread_lock",
":",
"if",
"not",
"self",
".",
"_EnsureFileExists",
"(",
")",
":",
"return",
"False",
"with",
"self",
".",
"_process_lock_getter",
"(",
")",
"as",
"acquired_plock",
":",
"if",
"not",
"acquired_plock",
":",
"return",
"False",
"with",
"open",
"(",
"self",
".",
"_filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"cache_data",
")",
"return",
"True"
] | Acquire an interprocess lock and write a string.
This method safely acquires the locks then writes a string
to the cache file. If the string is written successfully
the function will return True, if the write fails for any
reason it will return False.
Args:
cache_data: string or bytes to write.
Returns:
bool: success | [
"Acquire",
"an",
"interprocess",
"lock",
"and",
"write",
"a",
"string",
"."
] | python | train |
tensorflow/tensorboard | tensorboard/compat/tensorflow_stub/io/gfile.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/io/gfile.py#L184-L229 | def read(self, filename, binary_mode=False, size=None, offset=None):
"""Reads contents of a file to a string.
Args:
filename: string, a path
binary_mode: bool, read as binary if True, otherwise text
size: int, number of bytes or characters to read, otherwise
read all the contents of the file from the offset
offset: int, offset into file to read from, otherwise read
from the very beginning
Returns:
Subset of the contents of the file as a string or bytes.
"""
s3 = boto3.resource("s3")
bucket, path = self.bucket_and_path(filename)
args = {}
endpoint = 0
if size is not None or offset is not None:
if offset is None:
offset = 0
endpoint = '' if size is None else (offset + size)
args['Range'] = 'bytes={}-{}'.format(offset, endpoint)
try:
stream = s3.Object(bucket, path).get(**args)['Body'].read()
except botocore.exceptions.ClientError as exc:
if exc.response['Error']['Code'] == '416':
if size is not None:
# Asked for too much, so request just to the end. Do this
# in a second request so we don't check length in all cases.
client = boto3.client("s3")
obj = client.head_object(Bucket=bucket, Key=path)
len = obj['ContentLength']
endpoint = min(len, offset + size)
if offset == endpoint:
# Asked for no bytes, so just return empty
stream = b''
else:
args['Range'] = 'bytes={}-{}'.format(offset, endpoint)
stream = s3.Object(bucket, path).get(**args)['Body'].read()
else:
raise
if binary_mode:
return bytes(stream)
else:
return stream.decode('utf-8') | [
"def",
"read",
"(",
"self",
",",
"filename",
",",
"binary_mode",
"=",
"False",
",",
"size",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"s3",
"=",
"boto3",
".",
"resource",
"(",
"\"s3\"",
")",
"bucket",
",",
"path",
"=",
"self",
".",
"bucket_and_path",
"(",
"filename",
")",
"args",
"=",
"{",
"}",
"endpoint",
"=",
"0",
"if",
"size",
"is",
"not",
"None",
"or",
"offset",
"is",
"not",
"None",
":",
"if",
"offset",
"is",
"None",
":",
"offset",
"=",
"0",
"endpoint",
"=",
"''",
"if",
"size",
"is",
"None",
"else",
"(",
"offset",
"+",
"size",
")",
"args",
"[",
"'Range'",
"]",
"=",
"'bytes={}-{}'",
".",
"format",
"(",
"offset",
",",
"endpoint",
")",
"try",
":",
"stream",
"=",
"s3",
".",
"Object",
"(",
"bucket",
",",
"path",
")",
".",
"get",
"(",
"*",
"*",
"args",
")",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"exc",
":",
"if",
"exc",
".",
"response",
"[",
"'Error'",
"]",
"[",
"'Code'",
"]",
"==",
"'416'",
":",
"if",
"size",
"is",
"not",
"None",
":",
"# Asked for too much, so request just to the end. Do this",
"# in a second request so we don't check length in all cases.",
"client",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
")",
"obj",
"=",
"client",
".",
"head_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"path",
")",
"len",
"=",
"obj",
"[",
"'ContentLength'",
"]",
"endpoint",
"=",
"min",
"(",
"len",
",",
"offset",
"+",
"size",
")",
"if",
"offset",
"==",
"endpoint",
":",
"# Asked for no bytes, so just return empty",
"stream",
"=",
"b''",
"else",
":",
"args",
"[",
"'Range'",
"]",
"=",
"'bytes={}-{}'",
".",
"format",
"(",
"offset",
",",
"endpoint",
")",
"stream",
"=",
"s3",
".",
"Object",
"(",
"bucket",
",",
"path",
")",
".",
"get",
"(",
"*",
"*",
"args",
")",
"[",
"'Body'",
"]",
".",
"read",
"(",
")",
"else",
":",
"raise",
"if",
"binary_mode",
":",
"return",
"bytes",
"(",
"stream",
")",
"else",
":",
"return",
"stream",
".",
"decode",
"(",
"'utf-8'",
")"
] | Reads contents of a file to a string.
Args:
filename: string, a path
binary_mode: bool, read as binary if True, otherwise text
size: int, number of bytes or characters to read, otherwise
read all the contents of the file from the offset
offset: int, offset into file to read from, otherwise read
from the very beginning
Returns:
Subset of the contents of the file as a string or bytes. | [
"Reads",
"contents",
"of",
"a",
"file",
"to",
"a",
"string",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L507-L566 | def rs_find_error_locator(synd, nsym, erase_loc=None, erase_count=0):
'''Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm'''
# The idea is that BM will iteratively estimate the error locator polynomial.
# To do this, it will compute a Discrepancy term called Delta, which will tell us if the error locator polynomial needs an update or not
# (hence why it's called discrepancy: it tells us when we are getting off board from the correct value).
# Init the polynomials
if erase_loc: # if the erasure locator polynomial is supplied, we init with its value, so that we include erasures in the final locator polynomial
err_loc = bytearray(erase_loc)
old_loc = bytearray(erase_loc)
else:
err_loc = bytearray([1]) # This is the main variable we want to fill, also called Sigma in other notations or more formally the errors/errata locator polynomial.
old_loc = bytearray([1]) # BM is an iterative algorithm, and we need the errata locator polynomial of the previous iteration in order to update other necessary variables.
#L = 0 # update flag variable, not needed here because we use an alternative equivalent way of checking if update is needed (but using the flag could potentially be faster depending on if using length(list) is taking linear time in your language, here in Python it's constant so it's as fast.
# Fix the syndrome shifting: when computing the syndrome, some implementations may prepend a 0 coefficient for the lowest degree term (the constant). This is a case of syndrome shifting, thus the syndrome will be bigger than the number of ecc symbols (I don't know what purpose serves this shifting). If that's the case, then we need to account for the syndrome shifting when we use the syndrome such as inside BM, by skipping those prepended coefficients.
# Another way to detect the shifting is to detect the 0 coefficients: by definition, a syndrome does not contain any 0 coefficient (except if there are no errors/erasures, in this case they are all 0). This however doesn't work with the modified Forney syndrome, which set to 0 the coefficients corresponding to erasures, leaving only the coefficients corresponding to errors.
synd_shift = 0
if len(synd) > nsym: synd_shift = len(synd) - nsym
for i in xrange(nsym-erase_count): # generally: nsym-erase_count == len(synd), except when you input a partial erase_loc and using the full syndrome instead of the Forney syndrome, in which case nsym-erase_count is more correct (len(synd) will fail badly with IndexError).
if erase_loc: # if an erasures locator polynomial was provided to init the errors locator polynomial, then we must skip the FIRST erase_count iterations (not the last iterations, this is very important!)
K = erase_count+i+synd_shift
else: # if erasures locator is not provided, then either there's no erasures to account or we use the Forney syndromes, so we don't need to use erase_count nor erase_loc (the erasures have been trimmed out of the Forney syndromes).
K = i+synd_shift
# Compute the discrepancy Delta
# Here is the close-to-the-books operation to compute the discrepancy Delta: it's a simple polynomial multiplication of error locator with the syndromes, and then we get the Kth element.
#delta = gf_poly_mul(err_loc[::-1], synd)[K] # theoretically it should be gf_poly_add(synd[::-1], [1])[::-1] instead of just synd, but it seems it's not absolutely necessary to correctly decode.
# But this can be optimized: since we only need the Kth element, we don't need to compute the polynomial multiplication for any other element but the Kth. Thus to optimize, we compute the polymul only at the item we need, skipping the rest (avoiding a nested loop, thus we are linear time instead of quadratic).
# This optimization is actually described in several figures of the book "Algebraic codes for data transmission", Blahut, Richard E., 2003, Cambridge university press.
delta = synd[K]
for j in xrange(1, len(err_loc)):
delta ^= gf_mul(err_loc[-(j+1)], synd[K - j]) # delta is also called discrepancy. Here we do a partial polynomial multiplication (ie, we compute the polynomial multiplication only for the term of degree K). Should be equivalent to brownanrs.polynomial.mul_at().
#print "delta", K, delta, list(gf_poly_mul(err_loc[::-1], synd)) # debugline
# Shift polynomials to compute the next degree
old_loc = old_loc + bytearray([0])
# Iteratively estimate the errata locator and evaluator polynomials
if delta != 0: # Update only if there's a discrepancy
if len(old_loc) > len(err_loc): # Rule B (rule A is implicitly defined because rule A just says that we skip any modification for this iteration)
#if 2*L <= K+erase_count: # equivalent to len(old_loc) > len(err_loc), as long as L is correctly computed
# Computing errata locator polynomial Sigma
new_loc = gf_poly_scale(old_loc, delta)
old_loc = gf_poly_scale(err_loc, gf_inverse(delta)) # effectively we are doing err_loc * 1/delta = err_loc // delta
err_loc = new_loc
# Update the update flag
#L = K - L # the update flag L is tricky: in Blahut's schema, it's mandatory to use `L = K - L - erase_count` (and indeed in a previous draft of this function, if you forgot to do `- erase_count` it would lead to correcting only 2*(errors+erasures) <= (n-k) instead of 2*errors+erasures <= (n-k)), but in this latest draft, this will lead to a wrong decoding in some cases where it should correctly decode! Thus you should try with and without `- erase_count` to update L on your own implementation and see which one works OK without producing wrong decoding failures.
# Update with the discrepancy
err_loc = gf_poly_add(err_loc, gf_poly_scale(old_loc, delta))
# Check if the result is correct, that there's not too many errors to correct
err_loc = list(itertools.dropwhile(lambda x: x == 0, err_loc)) # drop leading 0s, else errs will not be of the correct size
errs = len(err_loc) - 1
if (errs-erase_count) * 2 + erase_count > nsym:
raise ReedSolomonError("Too many errors to correct")
return err_loc | [
"def",
"rs_find_error_locator",
"(",
"synd",
",",
"nsym",
",",
"erase_loc",
"=",
"None",
",",
"erase_count",
"=",
"0",
")",
":",
"# The idea is that BM will iteratively estimate the error locator polynomial.",
"# To do this, it will compute a Discrepancy term called Delta, which will tell us if the error locator polynomial needs an update or not",
"# (hence why it's called discrepancy: it tells us when we are getting off board from the correct value).",
"# Init the polynomials",
"if",
"erase_loc",
":",
"# if the erasure locator polynomial is supplied, we init with its value, so that we include erasures in the final locator polynomial",
"err_loc",
"=",
"bytearray",
"(",
"erase_loc",
")",
"old_loc",
"=",
"bytearray",
"(",
"erase_loc",
")",
"else",
":",
"err_loc",
"=",
"bytearray",
"(",
"[",
"1",
"]",
")",
"# This is the main variable we want to fill, also called Sigma in other notations or more formally the errors/errata locator polynomial.",
"old_loc",
"=",
"bytearray",
"(",
"[",
"1",
"]",
")",
"# BM is an iterative algorithm, and we need the errata locator polynomial of the previous iteration in order to update other necessary variables.",
"#L = 0 # update flag variable, not needed here because we use an alternative equivalent way of checking if update is needed (but using the flag could potentially be faster depending on if using length(list) is taking linear time in your language, here in Python it's constant so it's as fast.",
"# Fix the syndrome shifting: when computing the syndrome, some implementations may prepend a 0 coefficient for the lowest degree term (the constant). This is a case of syndrome shifting, thus the syndrome will be bigger than the number of ecc symbols (I don't know what purpose serves this shifting). If that's the case, then we need to account for the syndrome shifting when we use the syndrome such as inside BM, by skipping those prepended coefficients.",
"# Another way to detect the shifting is to detect the 0 coefficients: by definition, a syndrome does not contain any 0 coefficient (except if there are no errors/erasures, in this case they are all 0). This however doesn't work with the modified Forney syndrome, which set to 0 the coefficients corresponding to erasures, leaving only the coefficients corresponding to errors.",
"synd_shift",
"=",
"0",
"if",
"len",
"(",
"synd",
")",
">",
"nsym",
":",
"synd_shift",
"=",
"len",
"(",
"synd",
")",
"-",
"nsym",
"for",
"i",
"in",
"xrange",
"(",
"nsym",
"-",
"erase_count",
")",
":",
"# generally: nsym-erase_count == len(synd), except when you input a partial erase_loc and using the full syndrome instead of the Forney syndrome, in which case nsym-erase_count is more correct (len(synd) will fail badly with IndexError).",
"if",
"erase_loc",
":",
"# if an erasures locator polynomial was provided to init the errors locator polynomial, then we must skip the FIRST erase_count iterations (not the last iterations, this is very important!)",
"K",
"=",
"erase_count",
"+",
"i",
"+",
"synd_shift",
"else",
":",
"# if erasures locator is not provided, then either there's no erasures to account or we use the Forney syndromes, so we don't need to use erase_count nor erase_loc (the erasures have been trimmed out of the Forney syndromes).",
"K",
"=",
"i",
"+",
"synd_shift",
"# Compute the discrepancy Delta",
"# Here is the close-to-the-books operation to compute the discrepancy Delta: it's a simple polynomial multiplication of error locator with the syndromes, and then we get the Kth element.",
"#delta = gf_poly_mul(err_loc[::-1], synd)[K] # theoretically it should be gf_poly_add(synd[::-1], [1])[::-1] instead of just synd, but it seems it's not absolutely necessary to correctly decode.",
"# But this can be optimized: since we only need the Kth element, we don't need to compute the polynomial multiplication for any other element but the Kth. Thus to optimize, we compute the polymul only at the item we need, skipping the rest (avoiding a nested loop, thus we are linear time instead of quadratic).",
"# This optimization is actually described in several figures of the book \"Algebraic codes for data transmission\", Blahut, Richard E., 2003, Cambridge university press.",
"delta",
"=",
"synd",
"[",
"K",
"]",
"for",
"j",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"err_loc",
")",
")",
":",
"delta",
"^=",
"gf_mul",
"(",
"err_loc",
"[",
"-",
"(",
"j",
"+",
"1",
")",
"]",
",",
"synd",
"[",
"K",
"-",
"j",
"]",
")",
"# delta is also called discrepancy. Here we do a partial polynomial multiplication (ie, we compute the polynomial multiplication only for the term of degree K). Should be equivalent to brownanrs.polynomial.mul_at().",
"#print \"delta\", K, delta, list(gf_poly_mul(err_loc[::-1], synd)) # debugline",
"# Shift polynomials to compute the next degree",
"old_loc",
"=",
"old_loc",
"+",
"bytearray",
"(",
"[",
"0",
"]",
")",
"# Iteratively estimate the errata locator and evaluator polynomials",
"if",
"delta",
"!=",
"0",
":",
"# Update only if there's a discrepancy",
"if",
"len",
"(",
"old_loc",
")",
">",
"len",
"(",
"err_loc",
")",
":",
"# Rule B (rule A is implicitly defined because rule A just says that we skip any modification for this iteration)",
"#if 2*L <= K+erase_count: # equivalent to len(old_loc) > len(err_loc), as long as L is correctly computed",
"# Computing errata locator polynomial Sigma",
"new_loc",
"=",
"gf_poly_scale",
"(",
"old_loc",
",",
"delta",
")",
"old_loc",
"=",
"gf_poly_scale",
"(",
"err_loc",
",",
"gf_inverse",
"(",
"delta",
")",
")",
"# effectively we are doing err_loc * 1/delta = err_loc // delta",
"err_loc",
"=",
"new_loc",
"# Update the update flag",
"#L = K - L # the update flag L is tricky: in Blahut's schema, it's mandatory to use `L = K - L - erase_count` (and indeed in a previous draft of this function, if you forgot to do `- erase_count` it would lead to correcting only 2*(errors+erasures) <= (n-k) instead of 2*errors+erasures <= (n-k)), but in this latest draft, this will lead to a wrong decoding in some cases where it should correctly decode! Thus you should try with and without `- erase_count` to update L on your own implementation and see which one works OK without producing wrong decoding failures.",
"# Update with the discrepancy",
"err_loc",
"=",
"gf_poly_add",
"(",
"err_loc",
",",
"gf_poly_scale",
"(",
"old_loc",
",",
"delta",
")",
")",
"# Check if the result is correct, that there's not too many errors to correct",
"err_loc",
"=",
"list",
"(",
"itertools",
".",
"dropwhile",
"(",
"lambda",
"x",
":",
"x",
"==",
"0",
",",
"err_loc",
")",
")",
"# drop leading 0s, else errs will not be of the correct size",
"errs",
"=",
"len",
"(",
"err_loc",
")",
"-",
"1",
"if",
"(",
"errs",
"-",
"erase_count",
")",
"*",
"2",
"+",
"erase_count",
">",
"nsym",
":",
"raise",
"ReedSolomonError",
"(",
"\"Too many errors to correct\"",
")",
"return",
"err_loc"
] | Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm | [
"Find",
"error",
"/",
"errata",
"locator",
"and",
"evaluator",
"polynomials",
"with",
"Berlekamp",
"-",
"Massey",
"algorithm"
] | python | train |
deepmind/sonnet | sonnet/python/modules/util.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/util.py#L509-L531 | def _format_device(var):
"""Returns the device with an annotation specifying `ResourceVariable`.
"legacy" means a normal tf.Variable while "resource" means a ResourceVariable.
For example:
`(legacy)`
`(resource)`
`/job:learner/task:0/device:CPU:* (legacy)`
`/job:learner/task:0/device:CPU:* (resource)`
Args:
var: The Tensorflow Variable to print.
"""
if var.dtype.name.endswith("_ref"):
resource_var_annotation = "(legacy)"
else:
resource_var_annotation = "(resource)"
if var.device:
return "{} {}".format(var.device, resource_var_annotation)
else:
return resource_var_annotation | [
"def",
"_format_device",
"(",
"var",
")",
":",
"if",
"var",
".",
"dtype",
".",
"name",
".",
"endswith",
"(",
"\"_ref\"",
")",
":",
"resource_var_annotation",
"=",
"\"(legacy)\"",
"else",
":",
"resource_var_annotation",
"=",
"\"(resource)\"",
"if",
"var",
".",
"device",
":",
"return",
"\"{} {}\"",
".",
"format",
"(",
"var",
".",
"device",
",",
"resource_var_annotation",
")",
"else",
":",
"return",
"resource_var_annotation"
] | Returns the device with an annotation specifying `ResourceVariable`.
"legacy" means a normal tf.Variable while "resource" means a ResourceVariable.
For example:
`(legacy)`
`(resource)`
`/job:learner/task:0/device:CPU:* (legacy)`
`/job:learner/task:0/device:CPU:* (resource)`
Args:
var: The Tensorflow Variable to print. | [
"Returns",
"the",
"device",
"with",
"an",
"annotation",
"specifying",
"ResourceVariable",
"."
] | python | train |
adaptive-learning/proso-apps | proso_concepts/models.py | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L250-L300 | def recalculate_concepts(self, concepts, lang=None):
"""
Recalculated given concepts for given users
Args:
concepts (dict): user id (int -> set of concepts to recalculate)
lang(Optional[str]): language used to get items in all concepts (cached).
Defaults to None, in that case are get items only in used concepts
"""
if len(concepts) == 0:
return
if lang is None:
items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))
else:
items = Concept.objects.get_concept_item_mapping(lang=lang)
environment = get_environment()
mastery_threshold = get_mastery_trashold()
for user, concepts in concepts.items():
all_items = list(set(flatten([items[c] for c in concepts])))
answer_counts = environment.number_of_answers_more_items(all_items, user)
correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)
predictions = dict(list(zip(all_items, get_predictive_model().
predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))
new_user_stats = []
stats_to_delete_condition = Q()
for concept in concepts:
answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(
time_spent=Sum("response_time"),
sessions=Count("session", True),
time_first=Min("time"),
time_last=Max("time"),
)
stats = {
"answer_count": sum(answer_counts[i] for i in items[concept]),
"correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]),
"item_count": len(items[concept]),
"practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]),
"mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]),
"prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]),
"time_spent": answer_aggregates["time_spent"] / 1000,
"session_count": answer_aggregates["sessions"],
"time_first": answer_aggregates["time_first"].timestamp(),
"time_last": answer_aggregates["time_last"].timestamp(),
}
stats_to_delete_condition |= Q(user=user, concept=concept)
for stat_name, value in stats.items():
new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))
self.filter(stats_to_delete_condition).delete()
self.bulk_create(new_user_stats) | [
"def",
"recalculate_concepts",
"(",
"self",
",",
"concepts",
",",
"lang",
"=",
"None",
")",
":",
"if",
"len",
"(",
"concepts",
")",
"==",
"0",
":",
"return",
"if",
"lang",
"is",
"None",
":",
"items",
"=",
"Concept",
".",
"objects",
".",
"get_concept_item_mapping",
"(",
"concepts",
"=",
"Concept",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"set",
"(",
"flatten",
"(",
"concepts",
".",
"values",
"(",
")",
")",
")",
")",
")",
"else",
":",
"items",
"=",
"Concept",
".",
"objects",
".",
"get_concept_item_mapping",
"(",
"lang",
"=",
"lang",
")",
"environment",
"=",
"get_environment",
"(",
")",
"mastery_threshold",
"=",
"get_mastery_trashold",
"(",
")",
"for",
"user",
",",
"concepts",
"in",
"concepts",
".",
"items",
"(",
")",
":",
"all_items",
"=",
"list",
"(",
"set",
"(",
"flatten",
"(",
"[",
"items",
"[",
"c",
"]",
"for",
"c",
"in",
"concepts",
"]",
")",
")",
")",
"answer_counts",
"=",
"environment",
".",
"number_of_answers_more_items",
"(",
"all_items",
",",
"user",
")",
"correct_answer_counts",
"=",
"environment",
".",
"number_of_correct_answers_more_items",
"(",
"all_items",
",",
"user",
")",
"predictions",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"all_items",
",",
"get_predictive_model",
"(",
")",
".",
"predict_more_items",
"(",
"environment",
",",
"user",
",",
"all_items",
",",
"time",
"=",
"get_time_for_knowledge_overview",
"(",
")",
")",
")",
")",
")",
"new_user_stats",
"=",
"[",
"]",
"stats_to_delete_condition",
"=",
"Q",
"(",
")",
"for",
"concept",
"in",
"concepts",
":",
"answer_aggregates",
"=",
"Answer",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"item__in",
"=",
"items",
"[",
"concept",
"]",
")",
".",
"aggregate",
"(",
"time_spent",
"=",
"Sum",
"(",
"\"response_time\"",
")",
",",
"sessions",
"=",
"Count",
"(",
"\"session\"",
",",
"True",
")",
",",
"time_first",
"=",
"Min",
"(",
"\"time\"",
")",
",",
"time_last",
"=",
"Max",
"(",
"\"time\"",
")",
",",
")",
"stats",
"=",
"{",
"\"answer_count\"",
":",
"sum",
"(",
"answer_counts",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
"[",
"concept",
"]",
")",
",",
"\"correct_answer_count\"",
":",
"sum",
"(",
"correct_answer_counts",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
"[",
"concept",
"]",
")",
",",
"\"item_count\"",
":",
"len",
"(",
"items",
"[",
"concept",
"]",
")",
",",
"\"practiced_items_count\"",
":",
"sum",
"(",
"[",
"answer_counts",
"[",
"i",
"]",
">",
"0",
"for",
"i",
"in",
"items",
"[",
"concept",
"]",
"]",
")",
",",
"\"mastered_items_count\"",
":",
"sum",
"(",
"[",
"predictions",
"[",
"i",
"]",
">=",
"mastery_threshold",
"for",
"i",
"in",
"items",
"[",
"concept",
"]",
"]",
")",
",",
"\"prediction\"",
":",
"sum",
"(",
"[",
"predictions",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
"[",
"concept",
"]",
"]",
")",
"/",
"len",
"(",
"items",
"[",
"concept",
"]",
")",
",",
"\"time_spent\"",
":",
"answer_aggregates",
"[",
"\"time_spent\"",
"]",
"/",
"1000",
",",
"\"session_count\"",
":",
"answer_aggregates",
"[",
"\"sessions\"",
"]",
",",
"\"time_first\"",
":",
"answer_aggregates",
"[",
"\"time_first\"",
"]",
".",
"timestamp",
"(",
")",
",",
"\"time_last\"",
":",
"answer_aggregates",
"[",
"\"time_last\"",
"]",
".",
"timestamp",
"(",
")",
",",
"}",
"stats_to_delete_condition",
"|=",
"Q",
"(",
"user",
"=",
"user",
",",
"concept",
"=",
"concept",
")",
"for",
"stat_name",
",",
"value",
"in",
"stats",
".",
"items",
"(",
")",
":",
"new_user_stats",
".",
"append",
"(",
"UserStat",
"(",
"user_id",
"=",
"user",
",",
"concept_id",
"=",
"concept",
",",
"stat",
"=",
"stat_name",
",",
"value",
"=",
"value",
")",
")",
"self",
".",
"filter",
"(",
"stats_to_delete_condition",
")",
".",
"delete",
"(",
")",
"self",
".",
"bulk_create",
"(",
"new_user_stats",
")"
] | Recalculated given concepts for given users
Args:
concepts (dict): user id (int -> set of concepts to recalculate)
lang(Optional[str]): language used to get items in all concepts (cached).
Defaults to None, in that case are get items only in used concepts | [
"Recalculated",
"given",
"concepts",
"for",
"given",
"users"
] | python | train |
rlabbe/filterpy | filterpy/stats/stats.py | https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/stats/stats.py#L63-L108 | def mahalanobis(x, mean, cov):
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922
"""
x = _validate_vector(x)
mean = _validate_vector(mean)
if x.shape != mean.shape:
raise ValueError("length of input vectors must be the same")
y = x - mean
S = np.atleast_2d(cov)
dist = float(np.dot(np.dot(y.T, inv(S)), y))
return math.sqrt(dist) | [
"def",
"mahalanobis",
"(",
"x",
",",
"mean",
",",
"cov",
")",
":",
"x",
"=",
"_validate_vector",
"(",
"x",
")",
"mean",
"=",
"_validate_vector",
"(",
"mean",
")",
"if",
"x",
".",
"shape",
"!=",
"mean",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"length of input vectors must be the same\"",
")",
"y",
"=",
"x",
"-",
"mean",
"S",
"=",
"np",
".",
"atleast_2d",
"(",
"cov",
")",
"dist",
"=",
"float",
"(",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"y",
".",
"T",
",",
"inv",
"(",
"S",
")",
")",
",",
"y",
")",
")",
"return",
"math",
".",
"sqrt",
"(",
"dist",
")"
] | Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922 | [
"Computes",
"the",
"Mahalanobis",
"distance",
"between",
"the",
"state",
"vector",
"x",
"from",
"the",
"Gaussian",
"mean",
"with",
"covariance",
"cov",
".",
"This",
"can",
"be",
"thought",
"as",
"the",
"number",
"of",
"standard",
"deviations",
"x",
"is",
"from",
"the",
"mean",
"i",
".",
"e",
".",
"a",
"return",
"value",
"of",
"3",
"means",
"x",
"is",
"3",
"std",
"from",
"mean",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/resource/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L2992-L3009 | def has_child_bins(self, bin_id):
"""Tests if a bin has any children.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``bin_id`` has children,
``false`` otherwise
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_child_bins
if self._catalog_session is not None:
return self._catalog_session.has_child_catalogs(catalog_id=bin_id)
return self._hierarchy_session.has_children(id_=bin_id) | [
"def",
"has_child_bins",
"(",
"self",
",",
"bin_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.has_child_bins",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"has_child_catalogs",
"(",
"catalog_id",
"=",
"bin_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"has_children",
"(",
"id_",
"=",
"bin_id",
")"
] | Tests if a bin has any children.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``bin_id`` has children,
``false`` otherwise
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Tests",
"if",
"a",
"bin",
"has",
"any",
"children",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/molecule_structure_comparator.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/molecule_structure_comparator.py#L103-L113 | def are_equal(self, mol1, mol2):
"""
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
"""
b1 = set(self._get_bonds(mol1))
b2 = set(self._get_bonds(mol2))
return b1 == b2 | [
"def",
"are_equal",
"(",
"self",
",",
"mol1",
",",
"mol2",
")",
":",
"b1",
"=",
"set",
"(",
"self",
".",
"_get_bonds",
"(",
"mol1",
")",
")",
"b2",
"=",
"set",
"(",
"self",
".",
"_get_bonds",
"(",
"mol2",
")",
")",
"return",
"b1",
"==",
"b2"
] | Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec. | [
"Compare",
"the",
"bond",
"table",
"of",
"the",
"two",
"molecules",
"."
] | python | train |
mitsei/dlkit | dlkit/aws_adapter/repository/aws_utils.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/aws_utils.py#L12-L31 | def get_aws_s3_handle(config_map):
"""Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3
"""
url = 'https://' + config_map['s3_bucket'] + '.s3.amazonaws.com'
if not AWS_CLIENT.is_aws_s3_client_set():
client = boto3.client(
's3',
aws_access_key_id=config_map['put_public_key'],
aws_secret_access_key=config_map['put_private_key']
)
AWS_CLIENT.set_aws_s3_client(client)
else:
client = AWS_CLIENT.s3
return client, url | [
"def",
"get_aws_s3_handle",
"(",
"config_map",
")",
":",
"url",
"=",
"'https://'",
"+",
"config_map",
"[",
"'s3_bucket'",
"]",
"+",
"'.s3.amazonaws.com'",
"if",
"not",
"AWS_CLIENT",
".",
"is_aws_s3_client_set",
"(",
")",
":",
"client",
"=",
"boto3",
".",
"client",
"(",
"'s3'",
",",
"aws_access_key_id",
"=",
"config_map",
"[",
"'put_public_key'",
"]",
",",
"aws_secret_access_key",
"=",
"config_map",
"[",
"'put_private_key'",
"]",
")",
"AWS_CLIENT",
".",
"set_aws_s3_client",
"(",
"client",
")",
"else",
":",
"client",
"=",
"AWS_CLIENT",
".",
"s3",
"return",
"client",
",",
"url"
] | Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3 | [
"Convenience",
"function",
"for",
"getting",
"AWS",
"S3",
"objects"
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4538-L4553 | def get_stp_mst_detail_output_msti_port_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
interface_type = ET.SubElement(port, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_stp_mst_detail_output_msti_port_interface_type",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=",
"get_stp_mst_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_stp_mst_detail",
",",
"\"output\"",
")",
"msti",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"msti\"",
")",
"instance_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"instance-id\"",
")",
"instance_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'instance_id'",
")",
"port",
"=",
"ET",
".",
"SubElement",
"(",
"msti",
",",
"\"port\"",
")",
"interface_type",
"=",
"ET",
".",
"SubElement",
"(",
"port",
",",
"\"interface-type\"",
")",
"interface_type",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'interface_type'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
xtrementl/focus | focus/environment/cli.py | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/cli.py#L194-L209 | def _get_plugin_parser(self, plugin_obj):
""" Creates a plugin argument parser.
`plugin_obj`
``Plugin`` object.
Returns ``FocusArgParser`` object.
"""
prog_name = 'focus ' + plugin_obj.command
desc = (plugin_obj.__doc__ or '').strip()
parser = FocusArgParser(prog=prog_name, description=desc)
plugin_obj.setup_parser(parser)
return parser | [
"def",
"_get_plugin_parser",
"(",
"self",
",",
"plugin_obj",
")",
":",
"prog_name",
"=",
"'focus '",
"+",
"plugin_obj",
".",
"command",
"desc",
"=",
"(",
"plugin_obj",
".",
"__doc__",
"or",
"''",
")",
".",
"strip",
"(",
")",
"parser",
"=",
"FocusArgParser",
"(",
"prog",
"=",
"prog_name",
",",
"description",
"=",
"desc",
")",
"plugin_obj",
".",
"setup_parser",
"(",
"parser",
")",
"return",
"parser"
] | Creates a plugin argument parser.
`plugin_obj`
``Plugin`` object.
Returns ``FocusArgParser`` object. | [
"Creates",
"a",
"plugin",
"argument",
"parser",
"."
] | python | train |
pantsbuild/pants | src/python/pants/build_graph/build_graph.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_graph.py#L115-L122 | def apply_injectables(self, targets):
"""Given an iterable of `Target` instances, apply their transitive injectables."""
target_types = {type(t) for t in targets}
target_subsystem_deps = {s for s in itertools.chain(*(t.subsystems() for t in target_types))}
for subsystem in target_subsystem_deps:
# TODO: The is_initialized() check is primarily for tests and would be nice to do away with.
if issubclass(subsystem, InjectablesMixin) and subsystem.is_initialized():
subsystem.global_instance().injectables(self) | [
"def",
"apply_injectables",
"(",
"self",
",",
"targets",
")",
":",
"target_types",
"=",
"{",
"type",
"(",
"t",
")",
"for",
"t",
"in",
"targets",
"}",
"target_subsystem_deps",
"=",
"{",
"s",
"for",
"s",
"in",
"itertools",
".",
"chain",
"(",
"*",
"(",
"t",
".",
"subsystems",
"(",
")",
"for",
"t",
"in",
"target_types",
")",
")",
"}",
"for",
"subsystem",
"in",
"target_subsystem_deps",
":",
"# TODO: The is_initialized() check is primarily for tests and would be nice to do away with.",
"if",
"issubclass",
"(",
"subsystem",
",",
"InjectablesMixin",
")",
"and",
"subsystem",
".",
"is_initialized",
"(",
")",
":",
"subsystem",
".",
"global_instance",
"(",
")",
".",
"injectables",
"(",
"self",
")"
] | Given an iterable of `Target` instances, apply their transitive injectables. | [
"Given",
"an",
"iterable",
"of",
"Target",
"instances",
"apply",
"their",
"transitive",
"injectables",
"."
] | python | train |
saltstack/salt | salt/states/ipset.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ipset.py#L312-L351 | def flush(name, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Flush current ipset set
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is False:
ret['result'] = False
ret['comment'] = ('ipset set {0} does not exist for {1}'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset entries in set {0} for {1} would be flushed'.format(
name,
family)
return ret
if __salt__['ipset.flush'](name, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flushed ipset entries from set {0} for {1}'.format(
name,
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush ipset entries from set {0} for {1}' \
''.format(name, family)
return ret | [
"def",
"flush",
"(",
"name",
",",
"family",
"=",
"'ipv4'",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"set_check",
"=",
"__salt__",
"[",
"'ipset.check_set'",
"]",
"(",
"name",
")",
"if",
"set_check",
"is",
"False",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'ipset set {0} does not exist for {1}'",
".",
"format",
"(",
"name",
",",
"family",
")",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'ipset entries in set {0} for {1} would be flushed'",
".",
"format",
"(",
"name",
",",
"family",
")",
"return",
"ret",
"if",
"__salt__",
"[",
"'ipset.flush'",
"]",
"(",
"name",
",",
"family",
")",
":",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'locale'",
":",
"name",
"}",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Flushed ipset entries from set {0} for {1}'",
".",
"format",
"(",
"name",
",",
"family",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to flush ipset entries from set {0} for {1}'",
"''",
".",
"format",
"(",
"name",
",",
"family",
")",
"return",
"ret"
] | .. versionadded:: 2014.7.0
Flush current ipset set
family
Networking family, either ipv4 or ipv6 | [
"..",
"versionadded",
"::",
"2014",
".",
"7",
".",
"0"
] | python | train |
lcharleux/argiope | argiope/mesh.py | https://github.com/lcharleux/argiope/blob/8170e431362dc760589f7d141090fd133dece259/argiope/mesh.py#L1153-L1179 | def _make_conn(shape):
"""
Connectivity builder using Numba for speed boost.
"""
shape = np.array(shape)
Ne = shape.prod()
if len(shape) == 2:
nx, ny = np.array(shape) +1
conn = np.zeros((Ne, 4), dtype = np.int32)
counter = 0
pattern = np.array([0,1,1+nx,nx])
for j in range(shape[1]):
for i in range(shape[0]):
conn[counter] = pattern + 1 + i + j*nx
counter += 1
if len(shape) == 3:
nx, ny, nz = np.array(shape) +1
conn = np.zeros((Ne, 8), dtype = np.int32)
counter = 0
pattern = np.array([0,1,1+nx,nx,nx*ny,1+nx*ny,1+(nx+1)*ny,(nx+1)*ny])
for k in range(shape[2]):
for j in range(shape[1]):
for i in range(shape[0]):
conn[counter] = pattern + 1 + i + j*nx+ k*nx*ny
counter += 1
return conn | [
"def",
"_make_conn",
"(",
"shape",
")",
":",
"shape",
"=",
"np",
".",
"array",
"(",
"shape",
")",
"Ne",
"=",
"shape",
".",
"prod",
"(",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"2",
":",
"nx",
",",
"ny",
"=",
"np",
".",
"array",
"(",
"shape",
")",
"+",
"1",
"conn",
"=",
"np",
".",
"zeros",
"(",
"(",
"Ne",
",",
"4",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"counter",
"=",
"0",
"pattern",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"1",
",",
"1",
"+",
"nx",
",",
"nx",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"shape",
"[",
"1",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"shape",
"[",
"0",
"]",
")",
":",
"conn",
"[",
"counter",
"]",
"=",
"pattern",
"+",
"1",
"+",
"i",
"+",
"j",
"*",
"nx",
"counter",
"+=",
"1",
"if",
"len",
"(",
"shape",
")",
"==",
"3",
":",
"nx",
",",
"ny",
",",
"nz",
"=",
"np",
".",
"array",
"(",
"shape",
")",
"+",
"1",
"conn",
"=",
"np",
".",
"zeros",
"(",
"(",
"Ne",
",",
"8",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"counter",
"=",
"0",
"pattern",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"1",
",",
"1",
"+",
"nx",
",",
"nx",
",",
"nx",
"*",
"ny",
",",
"1",
"+",
"nx",
"*",
"ny",
",",
"1",
"+",
"(",
"nx",
"+",
"1",
")",
"*",
"ny",
",",
"(",
"nx",
"+",
"1",
")",
"*",
"ny",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"shape",
"[",
"2",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"shape",
"[",
"1",
"]",
")",
":",
"for",
"i",
"in",
"range",
"(",
"shape",
"[",
"0",
"]",
")",
":",
"conn",
"[",
"counter",
"]",
"=",
"pattern",
"+",
"1",
"+",
"i",
"+",
"j",
"*",
"nx",
"+",
"k",
"*",
"nx",
"*",
"ny",
"counter",
"+=",
"1",
"return",
"conn"
] | Connectivity builder using Numba for speed boost. | [
"Connectivity",
"builder",
"using",
"Numba",
"for",
"speed",
"boost",
"."
] | python | test |
MartinThoma/hwrt | bin/convert.py | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/bin/convert.py#L28-L33 | def _str2array(d):
""" Reconstructs a numpy array from a plain-text string """
if type(d) == list:
return np.asarray([_str2array(s) for s in d])
ins = StringIO(d)
return np.loadtxt(ins) | [
"def",
"_str2array",
"(",
"d",
")",
":",
"if",
"type",
"(",
"d",
")",
"==",
"list",
":",
"return",
"np",
".",
"asarray",
"(",
"[",
"_str2array",
"(",
"s",
")",
"for",
"s",
"in",
"d",
"]",
")",
"ins",
"=",
"StringIO",
"(",
"d",
")",
"return",
"np",
".",
"loadtxt",
"(",
"ins",
")"
] | Reconstructs a numpy array from a plain-text string | [
"Reconstructs",
"a",
"numpy",
"array",
"from",
"a",
"plain",
"-",
"text",
"string"
] | python | train |
dereneaton/ipyrad | ipyrad/assemble/cluster_across.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L1214-L1237 | def inserted_indels(indels, ocatg):
"""
inserts indels into the catg array
"""
## return copy with indels inserted
newcatg = np.zeros(ocatg.shape, dtype=np.uint32)
## iterate over loci and make extensions for indels
for iloc in xrange(ocatg.shape[0]):
## get indels indices
indidx = np.where(indels[iloc, :])[0]
if np.any(indidx):
## which new (empty) rows will be added
allrows = np.arange(ocatg.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newcatg[iloc][not_idx] = ocatg[iloc, :not_idx.shape[0]]
else:
newcatg[iloc] = ocatg[iloc]
return newcatg | [
"def",
"inserted_indels",
"(",
"indels",
",",
"ocatg",
")",
":",
"## return copy with indels inserted",
"newcatg",
"=",
"np",
".",
"zeros",
"(",
"ocatg",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"## iterate over loci and make extensions for indels",
"for",
"iloc",
"in",
"xrange",
"(",
"ocatg",
".",
"shape",
"[",
"0",
"]",
")",
":",
"## get indels indices",
"indidx",
"=",
"np",
".",
"where",
"(",
"indels",
"[",
"iloc",
",",
":",
"]",
")",
"[",
"0",
"]",
"if",
"np",
".",
"any",
"(",
"indidx",
")",
":",
"## which new (empty) rows will be added",
"allrows",
"=",
"np",
".",
"arange",
"(",
"ocatg",
".",
"shape",
"[",
"1",
"]",
")",
"mask",
"=",
"np",
".",
"ones",
"(",
"allrows",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"idx",
"in",
"indidx",
":",
"mask",
"[",
"idx",
"]",
"=",
"False",
"not_idx",
"=",
"allrows",
"[",
"mask",
"==",
"1",
"]",
"## fill in new data into all other spots",
"newcatg",
"[",
"iloc",
"]",
"[",
"not_idx",
"]",
"=",
"ocatg",
"[",
"iloc",
",",
":",
"not_idx",
".",
"shape",
"[",
"0",
"]",
"]",
"else",
":",
"newcatg",
"[",
"iloc",
"]",
"=",
"ocatg",
"[",
"iloc",
"]",
"return",
"newcatg"
] | inserts indels into the catg array | [
"inserts",
"indels",
"into",
"the",
"catg",
"array"
] | python | valid |
pywbem/pywbem | try/run_central_instances.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/try/run_central_instances.py#L1216-L1236 | def get_profiles_in_svr(nickname, server, all_profiles_dict, org_vm,
add_error_list=False):
"""
Test all profiles in server.profiles to determine if profile is in
the all_profiles_dict.
Returns list of profiles in the profile_dict and in the defined server.
If add_error_list is True, it also adds profiles not found to
PROFILES_WITH_NO_DEFINITIONS.
"""
profiles_in_dict = []
for profile_inst in server.profiles:
pn = profile_name(org_vm, profile_inst, short=True)
if pn in all_profiles_dict:
profiles_in_dict.append(profile_inst)
else:
if add_error_list:
print('PROFILES_WITH_NO_DEFINITIONS svr=%s: %s' %
(nickname, pn))
PROFILES_WITH_NO_DEFINITIONS[nickname] = pn
return profiles_in_dict | [
"def",
"get_profiles_in_svr",
"(",
"nickname",
",",
"server",
",",
"all_profiles_dict",
",",
"org_vm",
",",
"add_error_list",
"=",
"False",
")",
":",
"profiles_in_dict",
"=",
"[",
"]",
"for",
"profile_inst",
"in",
"server",
".",
"profiles",
":",
"pn",
"=",
"profile_name",
"(",
"org_vm",
",",
"profile_inst",
",",
"short",
"=",
"True",
")",
"if",
"pn",
"in",
"all_profiles_dict",
":",
"profiles_in_dict",
".",
"append",
"(",
"profile_inst",
")",
"else",
":",
"if",
"add_error_list",
":",
"print",
"(",
"'PROFILES_WITH_NO_DEFINITIONS svr=%s: %s'",
"%",
"(",
"nickname",
",",
"pn",
")",
")",
"PROFILES_WITH_NO_DEFINITIONS",
"[",
"nickname",
"]",
"=",
"pn",
"return",
"profiles_in_dict"
] | Test all profiles in server.profiles to determine if profile is in
the all_profiles_dict.
Returns list of profiles in the profile_dict and in the defined server.
If add_error_list is True, it also adds profiles not found to
PROFILES_WITH_NO_DEFINITIONS. | [
"Test",
"all",
"profiles",
"in",
"server",
".",
"profiles",
"to",
"determine",
"if",
"profile",
"is",
"in",
"the",
"all_profiles_dict",
"."
] | python | train |
farzadghanei/statsd-metrics | statsdmetrics/client/__init__.py | https://github.com/farzadghanei/statsd-metrics/blob/153ff37b79777f208e49bb9d3fb737ba52b99f98/statsdmetrics/client/__init__.py#L186-L199 | def gauge(self, name, value, rate=1):
# type: (str, float, float) -> None
"""Send a Gauge metric with the specified value"""
if self._should_send_metric(name, rate):
if not is_numeric(value):
value = float(value)
self._request(
Gauge(
self._create_metric_name_for_request(name),
value,
rate
).to_request()
) | [
"def",
"gauge",
"(",
"self",
",",
"name",
",",
"value",
",",
"rate",
"=",
"1",
")",
":",
"# type: (str, float, float) -> None",
"if",
"self",
".",
"_should_send_metric",
"(",
"name",
",",
"rate",
")",
":",
"if",
"not",
"is_numeric",
"(",
"value",
")",
":",
"value",
"=",
"float",
"(",
"value",
")",
"self",
".",
"_request",
"(",
"Gauge",
"(",
"self",
".",
"_create_metric_name_for_request",
"(",
"name",
")",
",",
"value",
",",
"rate",
")",
".",
"to_request",
"(",
")",
")"
] | Send a Gauge metric with the specified value | [
"Send",
"a",
"Gauge",
"metric",
"with",
"the",
"specified",
"value"
] | python | test |
djgagne/hagelslag | hagelslag/evaluation/NeighborEvaluator.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L129-L170 | def evaluate_hourly_forecasts(self):
"""
Calculates ROC curves and Reliability scores for each forecast hour.
Returns:
A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects.
"""
score_columns = ["Run_Date", "Forecast_Hour", "Ensemble Name", "Model_Name", "Forecast_Variable",
"Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"]
all_scores = pd.DataFrame(columns=score_columns)
for h, hour in enumerate(range(self.start_hour, self.end_hour + 1)):
for neighbor_radius in self.neighbor_radii:
n_filter = disk(neighbor_radius)
for s, size_threshold in enumerate(self.size_thresholds):
print("Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}".format(hour, self.model_name,
self.forecast_variable,
self.run_date, neighbor_radius,
size_threshold))
hour_obs = fftconvolve(self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s],
n_filter, mode="same")
hour_obs[hour_obs > 1] = 1
hour_obs[hour_obs < 1] = 0
if self.obs_mask:
hour_obs = hour_obs[self.raw_obs[self.mask_variable][h] > 0]
for smoothing_radius in self.smoothing_radii:
hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius,
smoothing_radius,
self.forecast_variable,
size_threshold)
if self.obs_mask:
hour_forecast = self.hourly_forecasts[hour_var][h][self.raw_obs[self.mask_variable][h] > 0]
else:
hour_forecast = self.hourly_forecasts[hour_var][h]
roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)
roc.update(hour_forecast, hour_obs)
rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)
rel.update(hour_forecast, hour_obs)
row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable,
neighbor_radius,
smoothing_radius, size_threshold, roc, rel]
all_scores.loc[hour_var + "_{0:d}".format(hour)] = row
return all_scores | [
"def",
"evaluate_hourly_forecasts",
"(",
"self",
")",
":",
"score_columns",
"=",
"[",
"\"Run_Date\"",
",",
"\"Forecast_Hour\"",
",",
"\"Ensemble Name\"",
",",
"\"Model_Name\"",
",",
"\"Forecast_Variable\"",
",",
"\"Neighbor_Radius\"",
",",
"\"Smoothing_Radius\"",
",",
"\"Size_Threshold\"",
",",
"\"ROC\"",
",",
"\"Reliability\"",
"]",
"all_scores",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"score_columns",
")",
"for",
"h",
",",
"hour",
"in",
"enumerate",
"(",
"range",
"(",
"self",
".",
"start_hour",
",",
"self",
".",
"end_hour",
"+",
"1",
")",
")",
":",
"for",
"neighbor_radius",
"in",
"self",
".",
"neighbor_radii",
":",
"n_filter",
"=",
"disk",
"(",
"neighbor_radius",
")",
"for",
"s",
",",
"size_threshold",
"in",
"enumerate",
"(",
"self",
".",
"size_thresholds",
")",
":",
"print",
"(",
"\"Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}\"",
".",
"format",
"(",
"hour",
",",
"self",
".",
"model_name",
",",
"self",
".",
"forecast_variable",
",",
"self",
".",
"run_date",
",",
"neighbor_radius",
",",
"size_threshold",
")",
")",
"hour_obs",
"=",
"fftconvolve",
"(",
"self",
".",
"raw_obs",
"[",
"self",
".",
"mrms_variable",
"]",
"[",
"h",
"]",
">=",
"self",
".",
"obs_thresholds",
"[",
"s",
"]",
",",
"n_filter",
",",
"mode",
"=",
"\"same\"",
")",
"hour_obs",
"[",
"hour_obs",
">",
"1",
"]",
"=",
"1",
"hour_obs",
"[",
"hour_obs",
"<",
"1",
"]",
"=",
"0",
"if",
"self",
".",
"obs_mask",
":",
"hour_obs",
"=",
"hour_obs",
"[",
"self",
".",
"raw_obs",
"[",
"self",
".",
"mask_variable",
"]",
"[",
"h",
"]",
">",
"0",
"]",
"for",
"smoothing_radius",
"in",
"self",
".",
"smoothing_radii",
":",
"hour_var",
"=",
"\"neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}\"",
".",
"format",
"(",
"neighbor_radius",
",",
"smoothing_radius",
",",
"self",
".",
"forecast_variable",
",",
"size_threshold",
")",
"if",
"self",
".",
"obs_mask",
":",
"hour_forecast",
"=",
"self",
".",
"hourly_forecasts",
"[",
"hour_var",
"]",
"[",
"h",
"]",
"[",
"self",
".",
"raw_obs",
"[",
"self",
".",
"mask_variable",
"]",
"[",
"h",
"]",
">",
"0",
"]",
"else",
":",
"hour_forecast",
"=",
"self",
".",
"hourly_forecasts",
"[",
"hour_var",
"]",
"[",
"h",
"]",
"roc",
"=",
"DistributedROC",
"(",
"thresholds",
"=",
"self",
".",
"probability_levels",
",",
"obs_threshold",
"=",
"0.5",
")",
"roc",
".",
"update",
"(",
"hour_forecast",
",",
"hour_obs",
")",
"rel",
"=",
"DistributedReliability",
"(",
"thresholds",
"=",
"self",
".",
"probability_levels",
",",
"obs_threshold",
"=",
"0.5",
")",
"rel",
".",
"update",
"(",
"hour_forecast",
",",
"hour_obs",
")",
"row",
"=",
"[",
"self",
".",
"run_date",
",",
"hour",
",",
"self",
".",
"ensemble_name",
",",
"self",
".",
"model_name",
",",
"self",
".",
"forecast_variable",
",",
"neighbor_radius",
",",
"smoothing_radius",
",",
"size_threshold",
",",
"roc",
",",
"rel",
"]",
"all_scores",
".",
"loc",
"[",
"hour_var",
"+",
"\"_{0:d}\"",
".",
"format",
"(",
"hour",
")",
"]",
"=",
"row",
"return",
"all_scores"
] | Calculates ROC curves and Reliability scores for each forecast hour.
Returns:
A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects. | [
"Calculates",
"ROC",
"curves",
"and",
"Reliability",
"scores",
"for",
"each",
"forecast",
"hour",
"."
] | python | train |
brocade/pynos | pynos/versions/base/yang/ietf_netconf.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/ietf_netconf.py#L522-L533 | def commit_input_persist_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
commit = ET.Element("commit")
config = commit
input = ET.SubElement(commit, "input")
persist_id = ET.SubElement(input, "persist-id")
persist_id.text = kwargs.pop('persist_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"commit_input_persist_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"commit",
"=",
"ET",
".",
"Element",
"(",
"\"commit\"",
")",
"config",
"=",
"commit",
"input",
"=",
"ET",
".",
"SubElement",
"(",
"commit",
",",
"\"input\"",
")",
"persist_id",
"=",
"ET",
".",
"SubElement",
"(",
"input",
",",
"\"persist-id\"",
")",
"persist_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'persist_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
F5Networks/f5-common-python | f5/bigip/tm/sys/application.py | https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/sys/application.py#L182-L222 | def exists(self, **kwargs):
'''Check for the existence of the named object on the BIG-IP
Override of resource.Resource exists() to build proper URI unique to
service resources.
Sends an HTTP GET to the URI of the named object and if it fails with
a :exc:~requests.HTTPError` exception it checks the exception for
status code of 404 and returns :obj:`False` in that case.
If the GET is successful it returns :obj:`True`.
For any other errors are raised as-is.
:param kwargs: Keyword arguments required to get objects
NOTE: If kwargs has a 'requests_params' key the corresponding dict will
be passed to the underlying requests.session.get method where it will
be handled according to that API. THIS IS HOW TO PASS QUERY-ARGS!
:returns: bool -- The objects exists on BIG-IP® or not.
:raises: :exc:`requests.HTTPError`, Any HTTP error that was not status
code 404.
'''
requests_params = self._handle_requests_params(kwargs)
self._check_load_parameters(**kwargs)
kwargs['uri_as_parts'] = False
session = self._meta_data['bigip']._meta_data['icr_session']
base_uri = self._meta_data['container']._meta_data['uri']
partition = kwargs.pop('partition')
name = kwargs.pop('name')
exists_uri = self._build_service_uri(base_uri, partition, name)
kwargs.update(requests_params)
try:
session.get(exists_uri, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
return False
else:
raise
return True | [
"def",
"exists",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"requests_params",
"=",
"self",
".",
"_handle_requests_params",
"(",
"kwargs",
")",
"self",
".",
"_check_load_parameters",
"(",
"*",
"*",
"kwargs",
")",
"kwargs",
"[",
"'uri_as_parts'",
"]",
"=",
"False",
"session",
"=",
"self",
".",
"_meta_data",
"[",
"'bigip'",
"]",
".",
"_meta_data",
"[",
"'icr_session'",
"]",
"base_uri",
"=",
"self",
".",
"_meta_data",
"[",
"'container'",
"]",
".",
"_meta_data",
"[",
"'uri'",
"]",
"partition",
"=",
"kwargs",
".",
"pop",
"(",
"'partition'",
")",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"exists_uri",
"=",
"self",
".",
"_build_service_uri",
"(",
"base_uri",
",",
"partition",
",",
"name",
")",
"kwargs",
".",
"update",
"(",
"requests_params",
")",
"try",
":",
"session",
".",
"get",
"(",
"exists_uri",
",",
"*",
"*",
"kwargs",
")",
"except",
"HTTPError",
"as",
"err",
":",
"if",
"err",
".",
"response",
".",
"status_code",
"==",
"404",
":",
"return",
"False",
"else",
":",
"raise",
"return",
"True"
] | Check for the existence of the named object on the BIG-IP
Override of resource.Resource exists() to build proper URI unique to
service resources.
Sends an HTTP GET to the URI of the named object and if it fails with
a :exc:~requests.HTTPError` exception it checks the exception for
status code of 404 and returns :obj:`False` in that case.
If the GET is successful it returns :obj:`True`.
For any other errors are raised as-is.
:param kwargs: Keyword arguments required to get objects
NOTE: If kwargs has a 'requests_params' key the corresponding dict will
be passed to the underlying requests.session.get method where it will
be handled according to that API. THIS IS HOW TO PASS QUERY-ARGS!
:returns: bool -- The objects exists on BIG-IP® or not.
:raises: :exc:`requests.HTTPError`, Any HTTP error that was not status
code 404. | [
"Check",
"for",
"the",
"existence",
"of",
"the",
"named",
"object",
"on",
"the",
"BIG",
"-",
"IP"
] | python | train |
camsci/meteor-pi | src/pythonModules/meteorpi_db/meteorpi_db/__init__.py | https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_db/meteorpi_db/__init__.py#L749-L759 | def has_obsgroup_id(self, group_id):
"""
Check for the presence of the given group_id
:param string group_id:
The group ID
:return:
True if we have a :class:`meteorpi_model.ObservationGroup` with this Id, False otherwise
"""
self.con.execute('SELECT 1 FROM archive_obs_groups WHERE publicId = %s', (group_id,))
return len(self.con.fetchall()) > 0 | [
"def",
"has_obsgroup_id",
"(",
"self",
",",
"group_id",
")",
":",
"self",
".",
"con",
".",
"execute",
"(",
"'SELECT 1 FROM archive_obs_groups WHERE publicId = %s'",
",",
"(",
"group_id",
",",
")",
")",
"return",
"len",
"(",
"self",
".",
"con",
".",
"fetchall",
"(",
")",
")",
">",
"0"
] | Check for the presence of the given group_id
:param string group_id:
The group ID
:return:
True if we have a :class:`meteorpi_model.ObservationGroup` with this Id, False otherwise | [
"Check",
"for",
"the",
"presence",
"of",
"the",
"given",
"group_id"
] | python | train |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L236-L246 | def _python_instance_to_nexml_meta_datatype(v):
"""Returns 'xsd:string' or a more specific type for a <meta datatype="XYZ"...
syntax using introspection.
"""
if isinstance(v, bool):
return 'xsd:boolean'
if is_int_type(v):
return 'xsd:int'
if isinstance(v, float):
return 'xsd:float'
return 'xsd:string' | [
"def",
"_python_instance_to_nexml_meta_datatype",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"bool",
")",
":",
"return",
"'xsd:boolean'",
"if",
"is_int_type",
"(",
"v",
")",
":",
"return",
"'xsd:int'",
"if",
"isinstance",
"(",
"v",
",",
"float",
")",
":",
"return",
"'xsd:float'",
"return",
"'xsd:string'"
] | Returns 'xsd:string' or a more specific type for a <meta datatype="XYZ"...
syntax using introspection. | [
"Returns",
"xsd",
":",
"string",
"or",
"a",
"more",
"specific",
"type",
"for",
"a",
"<meta",
"datatype",
"=",
"XYZ",
"...",
"syntax",
"using",
"introspection",
"."
] | python | train |
ubyssey/dispatch | dispatch/admin/urls.py | https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/admin/urls.py#L8-L16 | def admin(request):
"""Render HTML entry point for manager app."""
context = {
'api_url': settings.API_URL,
'app_js_bundle': 'manager-%s.js' % dispatch.__version__,
'app_css_bundle': 'manager-%s.css' % dispatch.__version__
}
return render_to_response('manager/index.html', context) | [
"def",
"admin",
"(",
"request",
")",
":",
"context",
"=",
"{",
"'api_url'",
":",
"settings",
".",
"API_URL",
",",
"'app_js_bundle'",
":",
"'manager-%s.js'",
"%",
"dispatch",
".",
"__version__",
",",
"'app_css_bundle'",
":",
"'manager-%s.css'",
"%",
"dispatch",
".",
"__version__",
"}",
"return",
"render_to_response",
"(",
"'manager/index.html'",
",",
"context",
")"
] | Render HTML entry point for manager app. | [
"Render",
"HTML",
"entry",
"point",
"for",
"manager",
"app",
"."
] | python | test |
HarveyHunt/i3situation | i3situation/core/plugin_manager.py | https://github.com/HarveyHunt/i3situation/blob/3160a21006fcc6961f240988874e228a5ec6f18e/i3situation/core/plugin_manager.py#L125-L142 | def _compile_files(self):
"""
Compiles python plugin files in order to be processed by the loader.
It compiles the plugins if they have been updated or haven't yet been
compiled.
"""
for f in glob.glob(os.path.join(self.dir_path, '*.py')):
# Check for compiled Python files that aren't in the __pycache__.
if not os.path.isfile(os.path.join(self.dir_path, f + 'c')):
compileall.compile_dir(self.dir_path, quiet=True)
logging.debug('Compiled plugins as a new plugin has been added.')
return
# Recompile if there are newer plugins.
elif os.path.getmtime(os.path.join(self.dir_path, f)) > os.path.getmtime(
os.path.join(self.dir_path, f + 'c')):
compileall.compile_dir(self.dir_path, quiet=True)
logging.debug('Compiled plugins as a plugin has been changed.')
return | [
"def",
"_compile_files",
"(",
"self",
")",
":",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir_path",
",",
"'*.py'",
")",
")",
":",
"# Check for compiled Python files that aren't in the __pycache__.",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir_path",
",",
"f",
"+",
"'c'",
")",
")",
":",
"compileall",
".",
"compile_dir",
"(",
"self",
".",
"dir_path",
",",
"quiet",
"=",
"True",
")",
"logging",
".",
"debug",
"(",
"'Compiled plugins as a new plugin has been added.'",
")",
"return",
"# Recompile if there are newer plugins.",
"elif",
"os",
".",
"path",
".",
"getmtime",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir_path",
",",
"f",
")",
")",
">",
"os",
".",
"path",
".",
"getmtime",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir_path",
",",
"f",
"+",
"'c'",
")",
")",
":",
"compileall",
".",
"compile_dir",
"(",
"self",
".",
"dir_path",
",",
"quiet",
"=",
"True",
")",
"logging",
".",
"debug",
"(",
"'Compiled plugins as a plugin has been changed.'",
")",
"return"
] | Compiles python plugin files in order to be processed by the loader.
It compiles the plugins if they have been updated or haven't yet been
compiled. | [
"Compiles",
"python",
"plugin",
"files",
"in",
"order",
"to",
"be",
"processed",
"by",
"the",
"loader",
".",
"It",
"compiles",
"the",
"plugins",
"if",
"they",
"have",
"been",
"updated",
"or",
"haven",
"t",
"yet",
"been",
"compiled",
"."
] | python | train |
obriencj/python-javatools | javatools/__init__.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1497-L1517 | def get_identifier(self):
"""
For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type.
"""
ident = self.get_name()
if self.is_method:
args = ",".join(self.get_arg_type_descriptors())
if self.is_bridge():
ident = "%s(%s):%s" % (ident, args, self.get_descriptor())
else:
ident = "%s(%s)" % (ident, args)
return ident | [
"def",
"get_identifier",
"(",
"self",
")",
":",
"ident",
"=",
"self",
".",
"get_name",
"(",
")",
"if",
"self",
".",
"is_method",
":",
"args",
"=",
"\",\"",
".",
"join",
"(",
"self",
".",
"get_arg_type_descriptors",
"(",
")",
")",
"if",
"self",
".",
"is_bridge",
"(",
")",
":",
"ident",
"=",
"\"%s(%s):%s\"",
"%",
"(",
"ident",
",",
"args",
",",
"self",
".",
"get_descriptor",
"(",
")",
")",
"else",
":",
"ident",
"=",
"\"%s(%s)\"",
"%",
"(",
"ident",
",",
"args",
")",
"return",
"ident"
] | For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type. | [
"For",
"methods",
"this",
"is",
"the",
"return",
"type",
"the",
"name",
"and",
"the",
"(",
"non",
"-",
"pretty",
")",
"argument",
"descriptor",
".",
"For",
"fields",
"it",
"is",
"simply",
"the",
"name",
"."
] | python | train |
hannorein/rebound | rebound/simulation.py | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1190-L1204 | def particles_ascii(self, prec=8):
"""
Returns an ASCII string with all particles' masses, radii, positions and velocities.
Parameters
----------
prec : int, optional
Number of digits after decimal point. Default 8.
"""
s = ""
for p in self.particles:
s += (("%%.%de "%prec) * 8)%(p.m, p.r, p.x, p.y, p.z, p.vx, p.vy, p.vz) + "\n"
if len(s):
s = s[:-1]
return s | [
"def",
"particles_ascii",
"(",
"self",
",",
"prec",
"=",
"8",
")",
":",
"s",
"=",
"\"\"",
"for",
"p",
"in",
"self",
".",
"particles",
":",
"s",
"+=",
"(",
"(",
"\"%%.%de \"",
"%",
"prec",
")",
"*",
"8",
")",
"%",
"(",
"p",
".",
"m",
",",
"p",
".",
"r",
",",
"p",
".",
"x",
",",
"p",
".",
"y",
",",
"p",
".",
"z",
",",
"p",
".",
"vx",
",",
"p",
".",
"vy",
",",
"p",
".",
"vz",
")",
"+",
"\"\\n\"",
"if",
"len",
"(",
"s",
")",
":",
"s",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"return",
"s"
] | Returns an ASCII string with all particles' masses, radii, positions and velocities.
Parameters
----------
prec : int, optional
Number of digits after decimal point. Default 8. | [
"Returns",
"an",
"ASCII",
"string",
"with",
"all",
"particles",
"masses",
"radii",
"positions",
"and",
"velocities",
"."
] | python | train |
CalebBell/fluids | fluids/fittings.py | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L1230-L1364 | def bend_rounded(Di, angle, fd=None, rc=None, bend_diameters=5.0,
Re=None, roughness=0.0, L_unimpeded=None, method='Rennels'):
r'''Returns loss coefficient for rounded bend in a pipe of diameter `Di`,
`angle`, with a specified either radius of curvature `rc` or curvature
defined by `bend_diameters`, Reynolds number `Re` and optionally pipe
roughness, unimpeded length downstrean, and with the specified method.
This calculation has five methods available.
It is hard to describe one method as more conservative than another as
depending on the conditions, the relative results change significantly.
The 'Miller' method is the most complicated and slowest method; the 'Ito'
method comprehensive as well and a source of original data, and the primary
basis for the 'Rennels' method. The 'Swamee' method is very simple and
generally does not match the other methods. The 'Crane' method may match
or not match other methods depending on the inputs.
The Rennels [1]_ formula is:
.. math::
K = f\alpha\frac{r}{d} + (0.10 + 2.4f)\sin(\alpha/2)
+ \frac{6.6f(\sqrt{\sin(\alpha/2)}+\sin(\alpha/2))}
{(r/d)^{\frac{4\alpha}{\pi}}}
The Swamee [5]_ formula is:
.. math::
K = \left[0.0733 + 0.923 \left(\frac{d}{rc}\right)^{3.5} \right]
\theta^{0.5}
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
fd : float, optional
Darcy friction factor; used only in Rennels method; calculated if not
provided from Reynolds number, diameter, and roughness [-]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Re : float, optional
Reynolds number of the pipe (used in Miller, Ito methods primarily, and
Rennels method if no friction factor given), [m]
roughness : float, optional
Roughness of bend wall (used in Miller, Ito methods primarily, and
Rennels method if no friction factor given), [m]
L_unimpeded : float, optional
The length of unimpeded pipe without any fittings, instrumentation,
or flow disturbances downstream (assumed 20 diameters if not
specified); used only in Miller method, [m]
method : str, optional
One of 'Rennels', 'Miller', 'Crane', 'Ito', or 'Swamee', [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
When inputting bend diameters, note that manufacturers often specify
this as a multiplier of nominal diameter, which is different than actual
diameter. Those require that rc be specified.
In the 'Rennels' method, `rc` is limited to 0.5 or above; which represents
a sharp, square, inner edge - and an outer bend radius of 1.0. Losses are
at a minimum when this value is large. Its first term represents surface
friction loss; the second, secondary flows; and the third, flow separation.
It encompasses the entire range of elbow and pipe bend configurations.
It was developed for bend angles between 0 and 180 degrees; and r/D
ratios above 0.5. Only smooth pipe data was used in its development.
Note the loss coefficient includes the surface friction of the pipe as if
it was straight.
Examples
--------
>>> bend_rounded(Di=4.020, rc=4.0*5, angle=30, Re=1E5)
0.11519070808085191
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
.. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [5] Itō, H."Pressure Losses in Smooth Pipe Bends." Journal of Fluids
Engineering 82, no. 1 (March 1, 1960): 131-40. doi:10.1115/1.3662501
.. [6] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
'''
if method is None:
method = 'Rennels'
if rc is None:
rc = Di*bend_diameters
if method == 'Rennels':
angle = radians(angle)
if fd is None:
if Re is None:
raise ValueError("The `Rennels` method requires either a "
"specified friction factor or `Re`")
fd = Colebrook(Re=Re, eD=roughness/Di, tol=-1)
sin_term = sin(0.5*angle)
return (fd*angle*rc/Di + (0.10 + 2.4*fd)*sin_term
+ 6.6*fd*(sin_term**0.5 + sin_term)/(rc/Di)**(4.*angle/pi))
elif method == 'Miller':
if Re is None:
raise ValueError('Miller method requires Reynolds number')
return bend_rounded_Miller(Di=Di, angle=angle, Re=Re, rc=rc,
bend_diameters=bend_diameters,
roughness=roughness,
L_unimpeded=L_unimpeded)
elif method == 'Crane':
return bend_rounded_Crane(Di=Di, angle=angle, rc=rc,
bend_diameters=bend_diameters)
elif method == 'Ito':
if Re is None:
raise ValueError("The `Iso` method requires`Re`")
return bend_rounded_Ito(Di=Di, angle=angle, Re=Re, rc=rc, bend_diameters=bend_diameters,
roughness=roughness)
elif method == 'Swamee':
return (0.0733 + 0.923*(Di/rc)**3.5)*radians(angle)**0.5
else:
raise ValueError('Specified method not recognized; methods are %s'
%(bend_rounded_methods)) | [
"def",
"bend_rounded",
"(",
"Di",
",",
"angle",
",",
"fd",
"=",
"None",
",",
"rc",
"=",
"None",
",",
"bend_diameters",
"=",
"5.0",
",",
"Re",
"=",
"None",
",",
"roughness",
"=",
"0.0",
",",
"L_unimpeded",
"=",
"None",
",",
"method",
"=",
"'Rennels'",
")",
":",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"'Rennels'",
"if",
"rc",
"is",
"None",
":",
"rc",
"=",
"Di",
"*",
"bend_diameters",
"if",
"method",
"==",
"'Rennels'",
":",
"angle",
"=",
"radians",
"(",
"angle",
")",
"if",
"fd",
"is",
"None",
":",
"if",
"Re",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"The `Rennels` method requires either a \"",
"\"specified friction factor or `Re`\"",
")",
"fd",
"=",
"Colebrook",
"(",
"Re",
"=",
"Re",
",",
"eD",
"=",
"roughness",
"/",
"Di",
",",
"tol",
"=",
"-",
"1",
")",
"sin_term",
"=",
"sin",
"(",
"0.5",
"*",
"angle",
")",
"return",
"(",
"fd",
"*",
"angle",
"*",
"rc",
"/",
"Di",
"+",
"(",
"0.10",
"+",
"2.4",
"*",
"fd",
")",
"*",
"sin_term",
"+",
"6.6",
"*",
"fd",
"*",
"(",
"sin_term",
"**",
"0.5",
"+",
"sin_term",
")",
"/",
"(",
"rc",
"/",
"Di",
")",
"**",
"(",
"4.",
"*",
"angle",
"/",
"pi",
")",
")",
"elif",
"method",
"==",
"'Miller'",
":",
"if",
"Re",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Miller method requires Reynolds number'",
")",
"return",
"bend_rounded_Miller",
"(",
"Di",
"=",
"Di",
",",
"angle",
"=",
"angle",
",",
"Re",
"=",
"Re",
",",
"rc",
"=",
"rc",
",",
"bend_diameters",
"=",
"bend_diameters",
",",
"roughness",
"=",
"roughness",
",",
"L_unimpeded",
"=",
"L_unimpeded",
")",
"elif",
"method",
"==",
"'Crane'",
":",
"return",
"bend_rounded_Crane",
"(",
"Di",
"=",
"Di",
",",
"angle",
"=",
"angle",
",",
"rc",
"=",
"rc",
",",
"bend_diameters",
"=",
"bend_diameters",
")",
"elif",
"method",
"==",
"'Ito'",
":",
"if",
"Re",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"The `Iso` method requires`Re`\"",
")",
"return",
"bend_rounded_Ito",
"(",
"Di",
"=",
"Di",
",",
"angle",
"=",
"angle",
",",
"Re",
"=",
"Re",
",",
"rc",
"=",
"rc",
",",
"bend_diameters",
"=",
"bend_diameters",
",",
"roughness",
"=",
"roughness",
")",
"elif",
"method",
"==",
"'Swamee'",
":",
"return",
"(",
"0.0733",
"+",
"0.923",
"*",
"(",
"Di",
"/",
"rc",
")",
"**",
"3.5",
")",
"*",
"radians",
"(",
"angle",
")",
"**",
"0.5",
"else",
":",
"raise",
"ValueError",
"(",
"'Specified method not recognized; methods are %s'",
"%",
"(",
"bend_rounded_methods",
")",
")"
] | r'''Returns loss coefficient for rounded bend in a pipe of diameter `Di`,
`angle`, with a specified either radius of curvature `rc` or curvature
defined by `bend_diameters`, Reynolds number `Re` and optionally pipe
roughness, unimpeded length downstrean, and with the specified method.
This calculation has five methods available.
It is hard to describe one method as more conservative than another as
depending on the conditions, the relative results change significantly.
The 'Miller' method is the most complicated and slowest method; the 'Ito'
method comprehensive as well and a source of original data, and the primary
basis for the 'Rennels' method. The 'Swamee' method is very simple and
generally does not match the other methods. The 'Crane' method may match
or not match other methods depending on the inputs.
The Rennels [1]_ formula is:
.. math::
K = f\alpha\frac{r}{d} + (0.10 + 2.4f)\sin(\alpha/2)
+ \frac{6.6f(\sqrt{\sin(\alpha/2)}+\sin(\alpha/2))}
{(r/d)^{\frac{4\alpha}{\pi}}}
The Swamee [5]_ formula is:
.. math::
K = \left[0.0733 + 0.923 \left(\frac{d}{rc}\right)^{3.5} \right]
\theta^{0.5}
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
fd : float, optional
Darcy friction factor; used only in Rennels method; calculated if not
provided from Reynolds number, diameter, and roughness [-]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Re : float, optional
Reynolds number of the pipe (used in Miller, Ito methods primarily, and
Rennels method if no friction factor given), [m]
roughness : float, optional
Roughness of bend wall (used in Miller, Ito methods primarily, and
Rennels method if no friction factor given), [m]
L_unimpeded : float, optional
The length of unimpeded pipe without any fittings, instrumentation,
or flow disturbances downstream (assumed 20 diameters if not
specified); used only in Miller method, [m]
method : str, optional
One of 'Rennels', 'Miller', 'Crane', 'Ito', or 'Swamee', [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
When inputting bend diameters, note that manufacturers often specify
this as a multiplier of nominal diameter, which is different than actual
diameter. Those require that rc be specified.
In the 'Rennels' method, `rc` is limited to 0.5 or above; which represents
a sharp, square, inner edge - and an outer bend radius of 1.0. Losses are
at a minimum when this value is large. Its first term represents surface
friction loss; the second, secondary flows; and the third, flow separation.
It encompasses the entire range of elbow and pipe bend configurations.
It was developed for bend angles between 0 and 180 degrees; and r/D
ratios above 0.5. Only smooth pipe data was used in its development.
Note the loss coefficient includes the surface friction of the pipe as if
it was straight.
Examples
--------
>>> bend_rounded(Di=4.020, rc=4.0*5, angle=30, Re=1E5)
0.11519070808085191
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Miller, Donald S. Internal Flow Systems: Design and Performance
Prediction. Gulf Publishing Company, 1990.
.. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply
Pipe Networks. John Wiley & Sons, 2008.
.. [5] Itō, H."Pressure Losses in Smooth Pipe Bends." Journal of Fluids
Engineering 82, no. 1 (March 1, 1960): 131-40. doi:10.1115/1.3662501
.. [6] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984. | [
"r",
"Returns",
"loss",
"coefficient",
"for",
"rounded",
"bend",
"in",
"a",
"pipe",
"of",
"diameter",
"Di",
"angle",
"with",
"a",
"specified",
"either",
"radius",
"of",
"curvature",
"rc",
"or",
"curvature",
"defined",
"by",
"bend_diameters",
"Reynolds",
"number",
"Re",
"and",
"optionally",
"pipe",
"roughness",
"unimpeded",
"length",
"downstrean",
"and",
"with",
"the",
"specified",
"method",
".",
"This",
"calculation",
"has",
"five",
"methods",
"available",
".",
"It",
"is",
"hard",
"to",
"describe",
"one",
"method",
"as",
"more",
"conservative",
"than",
"another",
"as",
"depending",
"on",
"the",
"conditions",
"the",
"relative",
"results",
"change",
"significantly",
".",
"The",
"Miller",
"method",
"is",
"the",
"most",
"complicated",
"and",
"slowest",
"method",
";",
"the",
"Ito",
"method",
"comprehensive",
"as",
"well",
"and",
"a",
"source",
"of",
"original",
"data",
"and",
"the",
"primary",
"basis",
"for",
"the",
"Rennels",
"method",
".",
"The",
"Swamee",
"method",
"is",
"very",
"simple",
"and",
"generally",
"does",
"not",
"match",
"the",
"other",
"methods",
".",
"The",
"Crane",
"method",
"may",
"match",
"or",
"not",
"match",
"other",
"methods",
"depending",
"on",
"the",
"inputs",
".",
"The",
"Rennels",
"[",
"1",
"]",
"_",
"formula",
"is",
":",
"..",
"math",
"::",
"K",
"=",
"f",
"\\",
"alpha",
"\\",
"frac",
"{",
"r",
"}",
"{",
"d",
"}",
"+",
"(",
"0",
".",
"10",
"+",
"2",
".",
"4f",
")",
"\\",
"sin",
"(",
"\\",
"alpha",
"/",
"2",
")",
"+",
"\\",
"frac",
"{",
"6",
".",
"6f",
"(",
"\\",
"sqrt",
"{",
"\\",
"sin",
"(",
"\\",
"alpha",
"/",
"2",
")",
"}",
"+",
"\\",
"sin",
"(",
"\\",
"alpha",
"/",
"2",
"))",
"}",
"{",
"(",
"r",
"/",
"d",
")",
"^",
"{",
"\\",
"frac",
"{",
"4",
"\\",
"alpha",
"}",
"{",
"\\",
"pi",
"}}}"
] | python | train |
log2timeline/dfvfs | dfvfs/vfs/vshadow_file_system.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/vshadow_file_system.py#L45-L74 | def _Open(self, path_spec, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
vshadow_volume = pyvshadow.volume()
vshadow_volume.open_file_object(file_object)
except:
file_object.close()
raise
self._file_object = file_object
self._vshadow_volume = vshadow_volume | [
"def",
"_Open",
"(",
"self",
",",
"path_spec",
",",
"mode",
"=",
"'rb'",
")",
":",
"if",
"not",
"path_spec",
".",
"HasParent",
"(",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification without parent.'",
")",
"file_object",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileObject",
"(",
"path_spec",
".",
"parent",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"try",
":",
"vshadow_volume",
"=",
"pyvshadow",
".",
"volume",
"(",
")",
"vshadow_volume",
".",
"open_file_object",
"(",
"file_object",
")",
"except",
":",
"file_object",
".",
"close",
"(",
")",
"raise",
"self",
".",
"_file_object",
"=",
"file_object",
"self",
".",
"_vshadow_volume",
"=",
"vshadow_volume"
] | Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | [
"Opens",
"the",
"file",
"system",
"object",
"defined",
"by",
"path",
"specification",
"."
] | python | train |
happyleavesaoc/python-limitlessled | limitlessled/group/commands/legacy.py | https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/legacy.py#L29-L42 | def get_bytes(self, bridge):
"""
Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent.
"""
if self.cmd_2 is not None:
cmd = [self.cmd_1, self.cmd_2]
else:
cmd = [self.cmd_1, self.SUFFIX_BYTE]
if bridge.version < self.BRIDGE_SHORT_VERSION_MIN:
cmd.append(self.BRIDGE_LONG_BYTE)
return bytearray(cmd) | [
"def",
"get_bytes",
"(",
"self",
",",
"bridge",
")",
":",
"if",
"self",
".",
"cmd_2",
"is",
"not",
"None",
":",
"cmd",
"=",
"[",
"self",
".",
"cmd_1",
",",
"self",
".",
"cmd_2",
"]",
"else",
":",
"cmd",
"=",
"[",
"self",
".",
"cmd_1",
",",
"self",
".",
"SUFFIX_BYTE",
"]",
"if",
"bridge",
".",
"version",
"<",
"self",
".",
"BRIDGE_SHORT_VERSION_MIN",
":",
"cmd",
".",
"append",
"(",
"self",
".",
"BRIDGE_LONG_BYTE",
")",
"return",
"bytearray",
"(",
"cmd",
")"
] | Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent. | [
"Gets",
"the",
"full",
"command",
"as",
"bytes",
".",
":",
"param",
"bridge",
":",
"The",
"bridge",
"to",
"which",
"the",
"command",
"should",
"be",
"sent",
"."
] | python | train |
BlueBrain/nat | nat/zotero_wrap.py | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L233-L240 | def reference_journal(self, index):
"""Return the reference journal name."""
# TODO Change the column name 'Journal' to an other?
ref_type = self.reference_type(index)
if ref_type == "journalArticle":
return self.reference_data(index)["publicationTitle"]
else:
return "({})".format(ref_type) | [
"def",
"reference_journal",
"(",
"self",
",",
"index",
")",
":",
"# TODO Change the column name 'Journal' to an other?",
"ref_type",
"=",
"self",
".",
"reference_type",
"(",
"index",
")",
"if",
"ref_type",
"==",
"\"journalArticle\"",
":",
"return",
"self",
".",
"reference_data",
"(",
"index",
")",
"[",
"\"publicationTitle\"",
"]",
"else",
":",
"return",
"\"({})\"",
".",
"format",
"(",
"ref_type",
")"
] | Return the reference journal name. | [
"Return",
"the",
"reference",
"journal",
"name",
"."
] | python | train |
danilobellini/audiolazy | audiolazy/lazy_synth.py | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_synth.py#L621-L654 | def karplus_strong(freq, tau=2e4, memory=white_noise):
"""
Karplus-Strong "digitar" synthesis algorithm.
Parameters
----------
freq :
Frequency, in rad/sample.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples. Defaults
to 2e4. Be careful: using the default value will make duration different
on each sample rate value. Use ``sHz`` if you need that independent from
the sample rate and in seconds unit.
memory :
Memory data for the comb filter (delayed "output" data in memory).
Defaults to the ``white_noise`` function.
Returns
-------
Stream instance with the synthesized data.
Note
----
The fractional delays are solved by exponent linearization.
See Also
--------
sHz :
Second and hertz constants from samples/second rate.
white_noise :
White noise stream generator.
"""
return comb.tau(2 * pi / freq, tau).linearize()(zeros(), memory=memory) | [
"def",
"karplus_strong",
"(",
"freq",
",",
"tau",
"=",
"2e4",
",",
"memory",
"=",
"white_noise",
")",
":",
"return",
"comb",
".",
"tau",
"(",
"2",
"*",
"pi",
"/",
"freq",
",",
"tau",
")",
".",
"linearize",
"(",
")",
"(",
"zeros",
"(",
")",
",",
"memory",
"=",
"memory",
")"
] | Karplus-Strong "digitar" synthesis algorithm.
Parameters
----------
freq :
Frequency, in rad/sample.
tau :
Time decay (up to ``1/e``, or -8.686 dB), in number of samples. Defaults
to 2e4. Be careful: using the default value will make duration different
on each sample rate value. Use ``sHz`` if you need that independent from
the sample rate and in seconds unit.
memory :
Memory data for the comb filter (delayed "output" data in memory).
Defaults to the ``white_noise`` function.
Returns
-------
Stream instance with the synthesized data.
Note
----
The fractional delays are solved by exponent linearization.
See Also
--------
sHz :
Second and hertz constants from samples/second rate.
white_noise :
White noise stream generator. | [
"Karplus",
"-",
"Strong",
"digitar",
"synthesis",
"algorithm",
"."
] | python | train |
openid/python-openid | openid/association.py | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/association.py#L261-L297 | def fromExpiresIn(cls, expires_in, handle, secret, assoc_type):
"""
This is an alternate constructor used by the OpenID consumer
library to create associations. C{L{OpenIDStore
<openid.store.interface.OpenIDStore>}} implementations
shouldn't use this constructor.
@param expires_in: This is the amount of time this association
is good for, measured in seconds since the association was
issued.
@type expires_in: C{int}
@param handle: This is the handle the server gave this
association.
@type handle: C{str}
@param secret: This is the shared secret the server generated
for this association.
@type secret: C{str}
@param assoc_type: This is the type of association this
instance represents. The only valid value of this field
at this time is C{'HMAC-SHA1'}, but new types may be
defined in the future.
@type assoc_type: C{str}
"""
issued = int(time.time())
lifetime = expires_in
return cls(handle, secret, issued, lifetime, assoc_type) | [
"def",
"fromExpiresIn",
"(",
"cls",
",",
"expires_in",
",",
"handle",
",",
"secret",
",",
"assoc_type",
")",
":",
"issued",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"lifetime",
"=",
"expires_in",
"return",
"cls",
"(",
"handle",
",",
"secret",
",",
"issued",
",",
"lifetime",
",",
"assoc_type",
")"
] | This is an alternate constructor used by the OpenID consumer
library to create associations. C{L{OpenIDStore
<openid.store.interface.OpenIDStore>}} implementations
shouldn't use this constructor.
@param expires_in: This is the amount of time this association
is good for, measured in seconds since the association was
issued.
@type expires_in: C{int}
@param handle: This is the handle the server gave this
association.
@type handle: C{str}
@param secret: This is the shared secret the server generated
for this association.
@type secret: C{str}
@param assoc_type: This is the type of association this
instance represents. The only valid value of this field
at this time is C{'HMAC-SHA1'}, but new types may be
defined in the future.
@type assoc_type: C{str} | [
"This",
"is",
"an",
"alternate",
"constructor",
"used",
"by",
"the",
"OpenID",
"consumer",
"library",
"to",
"create",
"associations",
".",
"C",
"{",
"L",
"{",
"OpenIDStore",
"<openid",
".",
"store",
".",
"interface",
".",
"OpenIDStore",
">",
"}}",
"implementations",
"shouldn",
"t",
"use",
"this",
"constructor",
"."
] | python | train |
liip/taxi | taxi/timesheet/parser.py | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/parser.py#L292-L305 | def extract_flags_from_text(self, text):
"""
Extract the flags from the given text and return a :class:`set` of flag values. See
:class:`~taxi.timesheet.lines.Entry` for a list of existing flags.
"""
flags = set()
reversed_flags_repr = {v: k for k, v in self.flags_repr.items()}
for flag_repr in text:
if flag_repr not in reversed_flags_repr:
raise KeyError("Flag '%s' is not recognized" % flag_repr)
else:
flags.add(reversed_flags_repr[flag_repr])
return flags | [
"def",
"extract_flags_from_text",
"(",
"self",
",",
"text",
")",
":",
"flags",
"=",
"set",
"(",
")",
"reversed_flags_repr",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"flags_repr",
".",
"items",
"(",
")",
"}",
"for",
"flag_repr",
"in",
"text",
":",
"if",
"flag_repr",
"not",
"in",
"reversed_flags_repr",
":",
"raise",
"KeyError",
"(",
"\"Flag '%s' is not recognized\"",
"%",
"flag_repr",
")",
"else",
":",
"flags",
".",
"add",
"(",
"reversed_flags_repr",
"[",
"flag_repr",
"]",
")",
"return",
"flags"
] | Extract the flags from the given text and return a :class:`set` of flag values. See
:class:`~taxi.timesheet.lines.Entry` for a list of existing flags. | [
"Extract",
"the",
"flags",
"from",
"the",
"given",
"text",
"and",
"return",
"a",
":",
"class",
":",
"set",
"of",
"flag",
"values",
".",
"See",
":",
"class",
":",
"~taxi",
".",
"timesheet",
".",
"lines",
".",
"Entry",
"for",
"a",
"list",
"of",
"existing",
"flags",
"."
] | python | train |
NoviceLive/intellicoder | intellicoder/executables/elf.py | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/executables/elf.py#L44-L52 | def get_section_data(self, name):
"""Get the data of the section."""
logging.debug(_('Obtaining ELF section: %s'), name)
section = self.binary.get_section_by_name(name)
if section:
return section.data()
else:
logging.error(_('Section no found: %s'), name)
return b'' | [
"def",
"get_section_data",
"(",
"self",
",",
"name",
")",
":",
"logging",
".",
"debug",
"(",
"_",
"(",
"'Obtaining ELF section: %s'",
")",
",",
"name",
")",
"section",
"=",
"self",
".",
"binary",
".",
"get_section_by_name",
"(",
"name",
")",
"if",
"section",
":",
"return",
"section",
".",
"data",
"(",
")",
"else",
":",
"logging",
".",
"error",
"(",
"_",
"(",
"'Section no found: %s'",
")",
",",
"name",
")",
"return",
"b''"
] | Get the data of the section. | [
"Get",
"the",
"data",
"of",
"the",
"section",
"."
] | python | train |
jlmadurga/permabots | permabots/views/api/hook.py | https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/hook.py#L106-L115 | def get(self, request, bot_id, id, format=None):
"""
Get list of telegram recipients of a hook
---
serializer: TelegramRecipientSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(TelegramRecipientList, self).get(request, bot_id, id, format) | [
"def",
"get",
"(",
"self",
",",
"request",
",",
"bot_id",
",",
"id",
",",
"format",
"=",
"None",
")",
":",
"return",
"super",
"(",
"TelegramRecipientList",
",",
"self",
")",
".",
"get",
"(",
"request",
",",
"bot_id",
",",
"id",
",",
"format",
")"
] | Get list of telegram recipients of a hook
---
serializer: TelegramRecipientSerializer
responseMessages:
- code: 401
message: Not authenticated | [
"Get",
"list",
"of",
"telegram",
"recipients",
"of",
"a",
"hook",
"---",
"serializer",
":",
"TelegramRecipientSerializer",
"responseMessages",
":",
"-",
"code",
":",
"401",
"message",
":",
"Not",
"authenticated"
] | python | train |
pantsbuild/pants | src/python/pants/java/nailgun_executor.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_executor.py#L228-L242 | def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername()))
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts))
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC) | [
"def",
"ensure_connectable",
"(",
"self",
",",
"nailgun",
")",
":",
"attempt_count",
"=",
"1",
"while",
"1",
":",
"try",
":",
"with",
"closing",
"(",
"nailgun",
".",
"try_connect",
"(",
")",
")",
"as",
"sock",
":",
"logger",
".",
"debug",
"(",
"'Verified new ng server is connectable at {}'",
".",
"format",
"(",
"sock",
".",
"getpeername",
"(",
")",
")",
")",
"return",
"except",
"nailgun",
".",
"NailgunConnectionError",
":",
"if",
"attempt_count",
">=",
"self",
".",
"_connect_attempts",
":",
"logger",
".",
"debug",
"(",
"'Failed to connect to ng after {} attempts'",
".",
"format",
"(",
"self",
".",
"_connect_attempts",
")",
")",
"raise",
"# Re-raise the NailgunConnectionError which provides more context to the user.",
"attempt_count",
"+=",
"1",
"time",
".",
"sleep",
"(",
"self",
".",
"WAIT_INTERVAL_SEC",
")"
] | Ensures that a nailgun client is connectable or raises NailgunError. | [
"Ensures",
"that",
"a",
"nailgun",
"client",
"is",
"connectable",
"or",
"raises",
"NailgunError",
"."
] | python | train |
Zaeb0s/loop-function | loopfunction/loopfunction.py | https://github.com/Zaeb0s/loop-function/blob/5999132aca5cf79b34e4ad5c05f9185712f1b583/loopfunction/loopfunction.py#L48-L61 | def _loop(self, *args, **kwargs):
"""Loops the target function
:param args: The args specified on initiation
:param kwargs: The kwargs specified on initiation
"""
self.on_start(*self.on_start_args, **self.on_start_kwargs)
try:
while not self._stop_signal:
self.target(*args, **kwargs)
finally:
self.on_stop(*self.on_stop_args, **self.on_stop_kwargs)
self._stop_signal = False
self._lock.set() | [
"def",
"_loop",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"on_start",
"(",
"*",
"self",
".",
"on_start_args",
",",
"*",
"*",
"self",
".",
"on_start_kwargs",
")",
"try",
":",
"while",
"not",
"self",
".",
"_stop_signal",
":",
"self",
".",
"target",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"self",
".",
"on_stop",
"(",
"*",
"self",
".",
"on_stop_args",
",",
"*",
"*",
"self",
".",
"on_stop_kwargs",
")",
"self",
".",
"_stop_signal",
"=",
"False",
"self",
".",
"_lock",
".",
"set",
"(",
")"
] | Loops the target function
:param args: The args specified on initiation
:param kwargs: The kwargs specified on initiation | [
"Loops",
"the",
"target",
"function"
] | python | train |
siznax/wptools | wptools/wikidata.py | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/wikidata.py#L87-L93 | def _pop_entities(self, limit=50):
"""
returns up to limit entities and pops them off the list
"""
pop = self.data['entities'][:limit]
del self.data['entities'][:limit]
return pop | [
"def",
"_pop_entities",
"(",
"self",
",",
"limit",
"=",
"50",
")",
":",
"pop",
"=",
"self",
".",
"data",
"[",
"'entities'",
"]",
"[",
":",
"limit",
"]",
"del",
"self",
".",
"data",
"[",
"'entities'",
"]",
"[",
":",
"limit",
"]",
"return",
"pop"
] | returns up to limit entities and pops them off the list | [
"returns",
"up",
"to",
"limit",
"entities",
"and",
"pops",
"them",
"off",
"the",
"list"
] | python | train |
bunq/sdk_python | bunq/sdk/security.py | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/security.py#L166-L179 | def _add_header_client_encryption_key(api_context, key, custom_headers):
"""
:type api_context: bunq.sdk.context.ApiContext
:type key: bytes
:type custom_headers: dict[str, str]
:rtype: None
"""
public_key_server = api_context.installation_context.public_key_server
key_cipher = PKCS1_v1_5_Cipher.new(public_key_server)
key_encrypted = key_cipher.encrypt(key)
key_encrypted_base64 = base64.b64encode(key_encrypted).decode()
custom_headers[_HEADER_CLIENT_ENCRYPTION_KEY] = key_encrypted_base64 | [
"def",
"_add_header_client_encryption_key",
"(",
"api_context",
",",
"key",
",",
"custom_headers",
")",
":",
"public_key_server",
"=",
"api_context",
".",
"installation_context",
".",
"public_key_server",
"key_cipher",
"=",
"PKCS1_v1_5_Cipher",
".",
"new",
"(",
"public_key_server",
")",
"key_encrypted",
"=",
"key_cipher",
".",
"encrypt",
"(",
"key",
")",
"key_encrypted_base64",
"=",
"base64",
".",
"b64encode",
"(",
"key_encrypted",
")",
".",
"decode",
"(",
")",
"custom_headers",
"[",
"_HEADER_CLIENT_ENCRYPTION_KEY",
"]",
"=",
"key_encrypted_base64"
] | :type api_context: bunq.sdk.context.ApiContext
:type key: bytes
:type custom_headers: dict[str, str]
:rtype: None | [
":",
"type",
"api_context",
":",
"bunq",
".",
"sdk",
".",
"context",
".",
"ApiContext",
":",
"type",
"key",
":",
"bytes",
":",
"type",
"custom_headers",
":",
"dict",
"[",
"str",
"str",
"]"
] | python | train |
nivbend/mock-open | mock_open/mocks.py | https://github.com/nivbend/mock-open/blob/eb7c9484b8c0ed58ba81ad04f44974e3dfa1b023/mock_open/mocks.py#L77-L91 | def set_properties(self, path, mode):
"""Set file's properties (name and mode).
This function is also in charge of swapping between textual and
binary streams.
"""
self.name = path
self.mode = mode
if 'b' in self.mode:
if not isinstance(self.read_data, bytes):
self.read_data = bytes(self.read_data, encoding='utf8')
else:
if not isinstance(self.read_data, str):
self.read_data = str(self.read_data, encoding='utf8') | [
"def",
"set_properties",
"(",
"self",
",",
"path",
",",
"mode",
")",
":",
"self",
".",
"name",
"=",
"path",
"self",
".",
"mode",
"=",
"mode",
"if",
"'b'",
"in",
"self",
".",
"mode",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"read_data",
",",
"bytes",
")",
":",
"self",
".",
"read_data",
"=",
"bytes",
"(",
"self",
".",
"read_data",
",",
"encoding",
"=",
"'utf8'",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"read_data",
",",
"str",
")",
":",
"self",
".",
"read_data",
"=",
"str",
"(",
"self",
".",
"read_data",
",",
"encoding",
"=",
"'utf8'",
")"
] | Set file's properties (name and mode).
This function is also in charge of swapping between textual and
binary streams. | [
"Set",
"file",
"s",
"properties",
"(",
"name",
"and",
"mode",
")",
"."
] | python | train |
eng-tools/sfsimodels | sfsimodels/models/soils.py | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1197-L1214 | def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs | [
"def",
"shear_vel_at_depth",
"(",
"self",
",",
"y_c",
")",
":",
"sl",
"=",
"self",
".",
"get_soil_at_depth",
"(",
"y_c",
")",
"if",
"y_c",
"<=",
"self",
".",
"gwl",
":",
"saturation",
"=",
"False",
"else",
":",
"saturation",
"=",
"True",
"if",
"hasattr",
"(",
"sl",
",",
"\"get_shear_vel_at_v_eff_stress\"",
")",
":",
"v_eff",
"=",
"self",
".",
"get_v_eff_stress_at_depth",
"(",
"y_c",
")",
"vs",
"=",
"sl",
".",
"get_shear_vel_at_v_eff_stress",
"(",
"v_eff",
",",
"saturation",
")",
"else",
":",
"vs",
"=",
"sl",
".",
"get_shear_vel",
"(",
"saturation",
")",
"return",
"vs"
] | Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return: | [
"Get",
"the",
"shear",
"wave",
"velocity",
"at",
"a",
"depth",
"."
] | python | train |
mlaprise/genSpline | genSpline/genSpline.py | https://github.com/mlaprise/genSpline/blob/cedfb45bd6afde47042dd71292549493f27cd136/genSpline/genSpline.py#L52-L70 | def gaussianPulse(t, FWHM, t0, P0 = 1.0, m = 1, C = 0):
"""
Geneate a gaussian/supergaussiance envelope pulse
* field_amp: output gaussian pulse envellope (amplitude).
* t: vector of times at which to compute u
* t0: center of pulse (default = 0)
* FWHM: full-width at half-intensity of pulse (default = 1)
* P0: peak intensity of the pulse @ t=t0 (default = 1)
* m: Gaussian order (default = 1)
* C: chirp parameter (default = 0)
"""
t_zero = FWHM/sqrt(4.0*log(2.0))
amp = sqrt(P0)
real_exp_arg = -pow(((t-t0)/t_zero),2.0*m)/2.0
euler1 = cos(-C*real_exp_arg)
euler2 = sin(-C*real_exp_arg)
return amp*exp(real_exp_arg)*euler1 + amp*exp(real_exp_arg)*euler2*1.0j | [
"def",
"gaussianPulse",
"(",
"t",
",",
"FWHM",
",",
"t0",
",",
"P0",
"=",
"1.0",
",",
"m",
"=",
"1",
",",
"C",
"=",
"0",
")",
":",
"t_zero",
"=",
"FWHM",
"/",
"sqrt",
"(",
"4.0",
"*",
"log",
"(",
"2.0",
")",
")",
"amp",
"=",
"sqrt",
"(",
"P0",
")",
"real_exp_arg",
"=",
"-",
"pow",
"(",
"(",
"(",
"t",
"-",
"t0",
")",
"/",
"t_zero",
")",
",",
"2.0",
"*",
"m",
")",
"/",
"2.0",
"euler1",
"=",
"cos",
"(",
"-",
"C",
"*",
"real_exp_arg",
")",
"euler2",
"=",
"sin",
"(",
"-",
"C",
"*",
"real_exp_arg",
")",
"return",
"amp",
"*",
"exp",
"(",
"real_exp_arg",
")",
"*",
"euler1",
"+",
"amp",
"*",
"exp",
"(",
"real_exp_arg",
")",
"*",
"euler2",
"*",
"1.0j"
] | Geneate a gaussian/supergaussiance envelope pulse
* field_amp: output gaussian pulse envellope (amplitude).
* t: vector of times at which to compute u
* t0: center of pulse (default = 0)
* FWHM: full-width at half-intensity of pulse (default = 1)
* P0: peak intensity of the pulse @ t=t0 (default = 1)
* m: Gaussian order (default = 1)
* C: chirp parameter (default = 0) | [
"Geneate",
"a",
"gaussian",
"/",
"supergaussiance",
"envelope",
"pulse"
] | python | train |
opengridcc/opengrid | opengrid/library/plotting.py | https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/plotting.py#L128-L177 | def boxplot(df, plot_mean=False, plot_ids=None, title=None, xlabel=None, ylabel=None):
"""
Plot boxplots
Plot the boxplots of a dataframe in time
Parameters
----------
df: Pandas Dataframe
Every collumn is a timeseries
plot_mean: bool
Wether or not to plot the means
plot_ids: [str]
List of id's to plot
Returns
-------
matplotlib figure
"""
df = df.applymap(float)
description = df.apply(pd.DataFrame.describe, axis=1)
# plot
plt = plot_style()
plt.boxplot(df)
#plt.setp(bp['boxes'], color='black')
#plt.setp(bp['whiskers'], color='black')
if plot_ids is not None:
for id in plot_ids:
if id in df.columns:
plt.scatter(x=range(1, len(df) + 1), y=df[id], label=str(id))
if plot_mean:
plt.scatter(x=range(1, len(df) + 1), y=description['mean'], label="Mean", color='k', s=30, marker='+')
ax = plt.gca()
ax.set_xticklabels(df.index)
#plt.xticks(rotation=45)
plt.legend()
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
return plt.gcf() | [
"def",
"boxplot",
"(",
"df",
",",
"plot_mean",
"=",
"False",
",",
"plot_ids",
"=",
"None",
",",
"title",
"=",
"None",
",",
"xlabel",
"=",
"None",
",",
"ylabel",
"=",
"None",
")",
":",
"df",
"=",
"df",
".",
"applymap",
"(",
"float",
")",
"description",
"=",
"df",
".",
"apply",
"(",
"pd",
".",
"DataFrame",
".",
"describe",
",",
"axis",
"=",
"1",
")",
"# plot",
"plt",
"=",
"plot_style",
"(",
")",
"plt",
".",
"boxplot",
"(",
"df",
")",
"#plt.setp(bp['boxes'], color='black')",
"#plt.setp(bp['whiskers'], color='black')",
"if",
"plot_ids",
"is",
"not",
"None",
":",
"for",
"id",
"in",
"plot_ids",
":",
"if",
"id",
"in",
"df",
".",
"columns",
":",
"plt",
".",
"scatter",
"(",
"x",
"=",
"range",
"(",
"1",
",",
"len",
"(",
"df",
")",
"+",
"1",
")",
",",
"y",
"=",
"df",
"[",
"id",
"]",
",",
"label",
"=",
"str",
"(",
"id",
")",
")",
"if",
"plot_mean",
":",
"plt",
".",
"scatter",
"(",
"x",
"=",
"range",
"(",
"1",
",",
"len",
"(",
"df",
")",
"+",
"1",
")",
",",
"y",
"=",
"description",
"[",
"'mean'",
"]",
",",
"label",
"=",
"\"Mean\"",
",",
"color",
"=",
"'k'",
",",
"s",
"=",
"30",
",",
"marker",
"=",
"'+'",
")",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"set_xticklabels",
"(",
"df",
".",
"index",
")",
"#plt.xticks(rotation=45)",
"plt",
".",
"legend",
"(",
")",
"if",
"title",
"is",
"not",
"None",
":",
"plt",
".",
"title",
"(",
"title",
")",
"if",
"xlabel",
"is",
"not",
"None",
":",
"plt",
".",
"xlabel",
"(",
"xlabel",
")",
"if",
"ylabel",
"is",
"not",
"None",
":",
"plt",
".",
"ylabel",
"(",
"ylabel",
")",
"return",
"plt",
".",
"gcf",
"(",
")"
] | Plot boxplots
Plot the boxplots of a dataframe in time
Parameters
----------
df: Pandas Dataframe
Every collumn is a timeseries
plot_mean: bool
Wether or not to plot the means
plot_ids: [str]
List of id's to plot
Returns
-------
matplotlib figure | [
"Plot",
"boxplots"
] | python | train |
onnx/onnxmltools | onnxmltools/convert/coreml/shape_calculators/Classifier.py | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/Classifier.py#L13-L66 | def calculate_traditional_classifier_output_shapes(operator):
'''
For classifiers, allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N], Sequence of Map
2. [N, C_1], ..., [N, C_n] ---> [N]
For regressors, allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, 1]
Core ML classifiers and regressors support multiple input feature tensors, so we need to concatenate them before
feeding them into their ONNX counterparts. Note that the N must be 1 as long as ZipMap only produces dictionary.
'''
check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=[1, 2])
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, FloatType, Int64Type])
if any(len(variable.type.shape) != 2 for variable in operator.inputs):
raise RuntimeError('Input(s) must be [N, C]-tensor(s)')
model_type = operator.raw_operator.WhichOneof('Type')
if model_type == 'treeEnsembleClassifier':
class_label_type = operator.raw_operator.treeEnsembleClassifier.WhichOneof('ClassLabels')
elif model_type == 'glmClassifier':
class_label_type = operator.raw_operator.glmClassifier.WhichOneof('ClassLabels')
elif model_type == 'supportVectorClassifier':
class_label_type = operator.raw_operator.supportVectorClassifier.WhichOneof('ClassLabels')
else:
raise ValueError('%s has no class label' % model_type)
N = operator.inputs[0].type.shape[0]
if operator.target_opset < 7:
output_shape = [1, 1]
else:
output_shape = [N]
if class_label_type == 'stringClassLabels':
operator.outputs[0].type = StringTensorType(output_shape, doc_string=operator.outputs[0].type.doc_string)
if len(operator.outputs) == 2:
if operator.target_opset < 7:
operator.outputs[1].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]),
doc_string=operator.outputs[1].type.doc_string)
else:
operator.outputs[1].type = SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])),
doc_string=operator.outputs[1].type.doc_string)
elif class_label_type == 'int64ClassLabels':
operator.outputs[0].type = Int64TensorType(output_shape, doc_string=operator.outputs[0].type.doc_string)
if len(operator.outputs) == 2:
if operator.target_opset < 7:
operator.outputs[1].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]),
doc_string=operator.outputs[1].type.doc_string)
else:
operator.outputs[1].type = SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])),
doc_string=operator.outputs[1].type.doc_string)
else:
raise ValueError('Traditional classifier must include label information') | [
"def",
"calculate_traditional_classifier_output_shapes",
"(",
"operator",
")",
":",
"check_input_and_output_numbers",
"(",
"operator",
",",
"input_count_range",
"=",
"[",
"1",
",",
"None",
"]",
",",
"output_count_range",
"=",
"[",
"1",
",",
"2",
"]",
")",
"check_input_and_output_types",
"(",
"operator",
",",
"good_input_types",
"=",
"[",
"FloatTensorType",
",",
"Int64TensorType",
",",
"FloatType",
",",
"Int64Type",
"]",
")",
"if",
"any",
"(",
"len",
"(",
"variable",
".",
"type",
".",
"shape",
")",
"!=",
"2",
"for",
"variable",
"in",
"operator",
".",
"inputs",
")",
":",
"raise",
"RuntimeError",
"(",
"'Input(s) must be [N, C]-tensor(s)'",
")",
"model_type",
"=",
"operator",
".",
"raw_operator",
".",
"WhichOneof",
"(",
"'Type'",
")",
"if",
"model_type",
"==",
"'treeEnsembleClassifier'",
":",
"class_label_type",
"=",
"operator",
".",
"raw_operator",
".",
"treeEnsembleClassifier",
".",
"WhichOneof",
"(",
"'ClassLabels'",
")",
"elif",
"model_type",
"==",
"'glmClassifier'",
":",
"class_label_type",
"=",
"operator",
".",
"raw_operator",
".",
"glmClassifier",
".",
"WhichOneof",
"(",
"'ClassLabels'",
")",
"elif",
"model_type",
"==",
"'supportVectorClassifier'",
":",
"class_label_type",
"=",
"operator",
".",
"raw_operator",
".",
"supportVectorClassifier",
".",
"WhichOneof",
"(",
"'ClassLabels'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'%s has no class label'",
"%",
"model_type",
")",
"N",
"=",
"operator",
".",
"inputs",
"[",
"0",
"]",
".",
"type",
".",
"shape",
"[",
"0",
"]",
"if",
"operator",
".",
"target_opset",
"<",
"7",
":",
"output_shape",
"=",
"[",
"1",
",",
"1",
"]",
"else",
":",
"output_shape",
"=",
"[",
"N",
"]",
"if",
"class_label_type",
"==",
"'stringClassLabels'",
":",
"operator",
".",
"outputs",
"[",
"0",
"]",
".",
"type",
"=",
"StringTensorType",
"(",
"output_shape",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"0",
"]",
".",
"type",
".",
"doc_string",
")",
"if",
"len",
"(",
"operator",
".",
"outputs",
")",
"==",
"2",
":",
"if",
"operator",
".",
"target_opset",
"<",
"7",
":",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
"=",
"DictionaryType",
"(",
"StringTensorType",
"(",
"[",
"1",
"]",
")",
",",
"FloatTensorType",
"(",
"[",
"1",
"]",
")",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
".",
"doc_string",
")",
"else",
":",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
"=",
"SequenceType",
"(",
"DictionaryType",
"(",
"StringTensorType",
"(",
"[",
"]",
")",
",",
"FloatTensorType",
"(",
"[",
"]",
")",
")",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
".",
"doc_string",
")",
"elif",
"class_label_type",
"==",
"'int64ClassLabels'",
":",
"operator",
".",
"outputs",
"[",
"0",
"]",
".",
"type",
"=",
"Int64TensorType",
"(",
"output_shape",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"0",
"]",
".",
"type",
".",
"doc_string",
")",
"if",
"len",
"(",
"operator",
".",
"outputs",
")",
"==",
"2",
":",
"if",
"operator",
".",
"target_opset",
"<",
"7",
":",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
"=",
"DictionaryType",
"(",
"Int64TensorType",
"(",
"[",
"1",
"]",
")",
",",
"FloatTensorType",
"(",
"[",
"1",
"]",
")",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
".",
"doc_string",
")",
"else",
":",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
"=",
"SequenceType",
"(",
"DictionaryType",
"(",
"Int64TensorType",
"(",
"[",
"]",
")",
",",
"FloatTensorType",
"(",
"[",
"]",
")",
")",
",",
"doc_string",
"=",
"operator",
".",
"outputs",
"[",
"1",
"]",
".",
"type",
".",
"doc_string",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Traditional classifier must include label information'",
")"
] | For classifiers, allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N], Sequence of Map
2. [N, C_1], ..., [N, C_n] ---> [N]
For regressors, allowed input/output patterns are
1. [N, C_1], ..., [N, C_n] ---> [N, 1]
Core ML classifiers and regressors support multiple input feature tensors, so we need to concatenate them before
feeding them into their ONNX counterparts. Note that the N must be 1 as long as ZipMap only produces dictionary. | [
"For",
"classifiers",
"allowed",
"input",
"/",
"output",
"patterns",
"are",
"1",
".",
"[",
"N",
"C_1",
"]",
"...",
"[",
"N",
"C_n",
"]",
"---",
">",
"[",
"N",
"]",
"Sequence",
"of",
"Map",
"2",
".",
"[",
"N",
"C_1",
"]",
"...",
"[",
"N",
"C_n",
"]",
"---",
">",
"[",
"N",
"]"
] | python | train |
ska-sa/katcp-python | katcp/kattypes.py | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/kattypes.py#L586-L605 | def unpack(self, value):
"""Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value.
"""
# Wrap errors in FailReplies with information identifying the parameter
try:
return self._kattype.unpack(value, self.major)
except ValueError, message:
raise FailReply("Error in parameter %s (%s): %s" %
(self.position, self.name, message)) | [
"def",
"unpack",
"(",
"self",
",",
"value",
")",
":",
"# Wrap errors in FailReplies with information identifying the parameter",
"try",
":",
"return",
"self",
".",
"_kattype",
".",
"unpack",
"(",
"value",
",",
"self",
".",
"major",
")",
"except",
"ValueError",
",",
"message",
":",
"raise",
"FailReply",
"(",
"\"Error in parameter %s (%s): %s\"",
"%",
"(",
"self",
".",
"position",
",",
"self",
".",
"name",
",",
"message",
")",
")"
] | Unpack the parameter using its kattype.
Parameters
----------
packed_value : str
The unescaped KATCP string to unpack.
Returns
-------
value : object
The unpacked value. | [
"Unpack",
"the",
"parameter",
"using",
"its",
"kattype",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/unison/__init__.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/unison/__init__.py#L303-L314 | def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
gid=None, fatal=False):
"""Sync all hosts to an specific path
The type of group is integer, it allows user has permissions to
operate a directory have a different group id with the user id.
Propagates exception if any operation fails and fatal=True.
"""
if paths:
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) | [
"def",
"sync_to_peers",
"(",
"peer_interface",
",",
"user",
",",
"paths",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"cmd",
"=",
"None",
",",
"gid",
"=",
"None",
",",
"fatal",
"=",
"False",
")",
":",
"if",
"paths",
":",
"for",
"host",
"in",
"collect_authed_hosts",
"(",
"peer_interface",
")",
":",
"sync_to_peer",
"(",
"host",
",",
"user",
",",
"paths",
",",
"verbose",
",",
"cmd",
",",
"gid",
",",
"fatal",
")"
] | Sync all hosts to an specific path
The type of group is integer, it allows user has permissions to
operate a directory have a different group id with the user id.
Propagates exception if any operation fails and fatal=True. | [
"Sync",
"all",
"hosts",
"to",
"an",
"specific",
"path"
] | python | train |
caseyjlaw/rtpipe | rtpipe/interactive.py | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/interactive.py#L548-L572 | def calcinds(data, threshold, ignoret=None):
""" Find indexes for data above (or below) given threshold. """
inds = []
for i in range(len(data['time'])):
snr = data['snrs'][i]
time = data['time'][i]
if (threshold >= 0 and snr > threshold):
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
elif threshold < 0 and snr < threshold:
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
return inds | [
"def",
"calcinds",
"(",
"data",
",",
"threshold",
",",
"ignoret",
"=",
"None",
")",
":",
"inds",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"'time'",
"]",
")",
")",
":",
"snr",
"=",
"data",
"[",
"'snrs'",
"]",
"[",
"i",
"]",
"time",
"=",
"data",
"[",
"'time'",
"]",
"[",
"i",
"]",
"if",
"(",
"threshold",
">=",
"0",
"and",
"snr",
">",
"threshold",
")",
":",
"if",
"ignoret",
":",
"incl",
"=",
"[",
"t0",
"for",
"(",
"t0",
",",
"t1",
")",
"in",
"ignoret",
"if",
"np",
".",
"round",
"(",
"time",
")",
".",
"astype",
"(",
"int",
")",
"in",
"range",
"(",
"t0",
",",
"t1",
")",
"]",
"logger",
".",
"debug",
"(",
"'{} {} {} {}'",
".",
"format",
"(",
"np",
".",
"round",
"(",
"time",
")",
".",
"astype",
"(",
"int",
")",
",",
"t0",
",",
"t1",
",",
"incl",
")",
")",
"if",
"not",
"incl",
":",
"inds",
".",
"append",
"(",
"i",
")",
"else",
":",
"inds",
".",
"append",
"(",
"i",
")",
"elif",
"threshold",
"<",
"0",
"and",
"snr",
"<",
"threshold",
":",
"if",
"ignoret",
":",
"incl",
"=",
"[",
"t0",
"for",
"(",
"t0",
",",
"t1",
")",
"in",
"ignoret",
"if",
"np",
".",
"round",
"(",
"time",
")",
".",
"astype",
"(",
"int",
")",
"in",
"range",
"(",
"t0",
",",
"t1",
")",
"]",
"logger",
".",
"debug",
"(",
"'{} {} {} {}'",
".",
"format",
"(",
"np",
".",
"round",
"(",
"time",
")",
".",
"astype",
"(",
"int",
")",
",",
"t0",
",",
"t1",
",",
"incl",
")",
")",
"if",
"not",
"incl",
":",
"inds",
".",
"append",
"(",
"i",
")",
"else",
":",
"inds",
".",
"append",
"(",
"i",
")",
"return",
"inds"
] | Find indexes for data above (or below) given threshold. | [
"Find",
"indexes",
"for",
"data",
"above",
"(",
"or",
"below",
")",
"given",
"threshold",
"."
] | python | train |
bjmorgan/lattice_mc | lattice_mc/cluster.py | https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/cluster.py#L101-L113 | def remove_sites_from_neighbours( self, remove_labels ):
"""
Removes sites from the set of neighbouring sites if these have labels in remove_labels.
Args:
Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set.
Returns:
None
"""
if type( remove_labels ) is str:
remove_labels = [ remove_labels ]
self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels ) | [
"def",
"remove_sites_from_neighbours",
"(",
"self",
",",
"remove_labels",
")",
":",
"if",
"type",
"(",
"remove_labels",
")",
"is",
"str",
":",
"remove_labels",
"=",
"[",
"remove_labels",
"]",
"self",
".",
"neighbours",
"=",
"set",
"(",
"n",
"for",
"n",
"in",
"self",
".",
"neighbours",
"if",
"n",
".",
"label",
"not",
"in",
"remove_labels",
")"
] | Removes sites from the set of neighbouring sites if these have labels in remove_labels.
Args:
Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set.
Returns:
None | [
"Removes",
"sites",
"from",
"the",
"set",
"of",
"neighbouring",
"sites",
"if",
"these",
"have",
"labels",
"in",
"remove_labels",
"."
] | python | train |
gem/oq-engine | openquake/hmtk/seismicity/catalogue.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/catalogue.py#L395-L429 | def get_depth_distribution(self, depth_bins, normalisation=False,
bootstrap=None):
'''
Gets the depth distribution of the earthquake catalogue to return a
single histogram. Depths may be normalised. If uncertainties are found
in the catalogue the distrbution may be bootstrap sampled
:param numpy.ndarray depth_bins:
getBin edges for the depths
:param bool normalisation:
Choose to normalise the results such that the total contributions
sum to 1.0 (True) or not (False)
:param int bootstrap:
Number of bootstrap samples
:returns:
Histogram of depth values
'''
if len(self.data['depth']) == 0:
# If depth information is missing
raise ValueError('Depths missing in catalogue')
if len(self.data['depthError']) == 0:
self.data['depthError'] = np.zeros(self.get_number_events(),
dtype=float)
return bootstrap_histogram_1D(self.data['depth'],
depth_bins,
self.data['depthError'],
normalisation=normalisation,
number_bootstraps=bootstrap,
boundaries=(0., None)) | [
"def",
"get_depth_distribution",
"(",
"self",
",",
"depth_bins",
",",
"normalisation",
"=",
"False",
",",
"bootstrap",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"data",
"[",
"'depth'",
"]",
")",
"==",
"0",
":",
"# If depth information is missing",
"raise",
"ValueError",
"(",
"'Depths missing in catalogue'",
")",
"if",
"len",
"(",
"self",
".",
"data",
"[",
"'depthError'",
"]",
")",
"==",
"0",
":",
"self",
".",
"data",
"[",
"'depthError'",
"]",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"get_number_events",
"(",
")",
",",
"dtype",
"=",
"float",
")",
"return",
"bootstrap_histogram_1D",
"(",
"self",
".",
"data",
"[",
"'depth'",
"]",
",",
"depth_bins",
",",
"self",
".",
"data",
"[",
"'depthError'",
"]",
",",
"normalisation",
"=",
"normalisation",
",",
"number_bootstraps",
"=",
"bootstrap",
",",
"boundaries",
"=",
"(",
"0.",
",",
"None",
")",
")"
] | Gets the depth distribution of the earthquake catalogue to return a
single histogram. Depths may be normalised. If uncertainties are found
in the catalogue the distrbution may be bootstrap sampled
:param numpy.ndarray depth_bins:
getBin edges for the depths
:param bool normalisation:
Choose to normalise the results such that the total contributions
sum to 1.0 (True) or not (False)
:param int bootstrap:
Number of bootstrap samples
:returns:
Histogram of depth values | [
"Gets",
"the",
"depth",
"distribution",
"of",
"the",
"earthquake",
"catalogue",
"to",
"return",
"a",
"single",
"histogram",
".",
"Depths",
"may",
"be",
"normalised",
".",
"If",
"uncertainties",
"are",
"found",
"in",
"the",
"catalogue",
"the",
"distrbution",
"may",
"be",
"bootstrap",
"sampled"
] | python | train |
spyder-ide/spyder | spyder/utils/qthelpers.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/qthelpers.py#L511-L548 | def calc_tools_spacing(tools_layout):
"""
Return a spacing (int) or None if we don't have the appropriate metrics
to calculate the spacing.
We're trying to adapt the spacing below the tools_layout spacing so that
the main_widget has the same vertical position as the editor widgets
(which have tabs above).
The required spacing is
spacing = tabbar_height - tools_height + offset
where the tabbar_heights were empirically determined for a combination of
operating systems and styles. Offsets were manually adjusted, so that the
heights of main_widgets and editor widgets match. This is probably
caused by a still not understood element of the layout and style metrics.
"""
metrics = { # (tabbar_height, offset)
'nt.fusion': (32, 0),
'nt.windowsvista': (21, 3),
'nt.windowsxp': (24, 0),
'nt.windows': (21, 3),
'posix.breeze': (28, -1),
'posix.oxygen': (38, -2),
'posix.qtcurve': (27, 0),
'posix.windows': (26, 0),
'posix.fusion': (32, 0),
}
style_name = qapplication().style().property('name')
key = '%s.%s' % (os.name, style_name)
if key in metrics:
tabbar_height, offset = metrics[key]
tools_height = tools_layout.sizeHint().height()
spacing = tabbar_height - tools_height + offset
return max(spacing, 0) | [
"def",
"calc_tools_spacing",
"(",
"tools_layout",
")",
":",
"metrics",
"=",
"{",
"# (tabbar_height, offset)\r",
"'nt.fusion'",
":",
"(",
"32",
",",
"0",
")",
",",
"'nt.windowsvista'",
":",
"(",
"21",
",",
"3",
")",
",",
"'nt.windowsxp'",
":",
"(",
"24",
",",
"0",
")",
",",
"'nt.windows'",
":",
"(",
"21",
",",
"3",
")",
",",
"'posix.breeze'",
":",
"(",
"28",
",",
"-",
"1",
")",
",",
"'posix.oxygen'",
":",
"(",
"38",
",",
"-",
"2",
")",
",",
"'posix.qtcurve'",
":",
"(",
"27",
",",
"0",
")",
",",
"'posix.windows'",
":",
"(",
"26",
",",
"0",
")",
",",
"'posix.fusion'",
":",
"(",
"32",
",",
"0",
")",
",",
"}",
"style_name",
"=",
"qapplication",
"(",
")",
".",
"style",
"(",
")",
".",
"property",
"(",
"'name'",
")",
"key",
"=",
"'%s.%s'",
"%",
"(",
"os",
".",
"name",
",",
"style_name",
")",
"if",
"key",
"in",
"metrics",
":",
"tabbar_height",
",",
"offset",
"=",
"metrics",
"[",
"key",
"]",
"tools_height",
"=",
"tools_layout",
".",
"sizeHint",
"(",
")",
".",
"height",
"(",
")",
"spacing",
"=",
"tabbar_height",
"-",
"tools_height",
"+",
"offset",
"return",
"max",
"(",
"spacing",
",",
"0",
")"
] | Return a spacing (int) or None if we don't have the appropriate metrics
to calculate the spacing.
We're trying to adapt the spacing below the tools_layout spacing so that
the main_widget has the same vertical position as the editor widgets
(which have tabs above).
The required spacing is
spacing = tabbar_height - tools_height + offset
where the tabbar_heights were empirically determined for a combination of
operating systems and styles. Offsets were manually adjusted, so that the
heights of main_widgets and editor widgets match. This is probably
caused by a still not understood element of the layout and style metrics. | [
"Return",
"a",
"spacing",
"(",
"int",
")",
"or",
"None",
"if",
"we",
"don",
"t",
"have",
"the",
"appropriate",
"metrics",
"to",
"calculate",
"the",
"spacing",
".",
"We",
"re",
"trying",
"to",
"adapt",
"the",
"spacing",
"below",
"the",
"tools_layout",
"spacing",
"so",
"that",
"the",
"main_widget",
"has",
"the",
"same",
"vertical",
"position",
"as",
"the",
"editor",
"widgets",
"(",
"which",
"have",
"tabs",
"above",
")",
".",
"The",
"required",
"spacing",
"is",
"spacing",
"=",
"tabbar_height",
"-",
"tools_height",
"+",
"offset",
"where",
"the",
"tabbar_heights",
"were",
"empirically",
"determined",
"for",
"a",
"combination",
"of",
"operating",
"systems",
"and",
"styles",
".",
"Offsets",
"were",
"manually",
"adjusted",
"so",
"that",
"the",
"heights",
"of",
"main_widgets",
"and",
"editor",
"widgets",
"match",
".",
"This",
"is",
"probably",
"caused",
"by",
"a",
"still",
"not",
"understood",
"element",
"of",
"the",
"layout",
"and",
"style",
"metrics",
"."
] | python | train |
DataKitchen/DKCloudCommand | DKCloudCommand/cli/__main__.py | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/cli/__main__.py#L569-L587 | def file_update_all(backend, message, dryrun):
"""
Update all of the changed files for this Recipe
"""
kitchen = DKCloudCommandRunner.which_kitchen_name()
if kitchen is None:
raise click.ClickException('You must be in a Kitchen')
recipe_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_dir is None:
raise click.ClickException('You must be in a Recipe folder')
recipe = DKRecipeDisk.find_recipe_name()
if dryrun:
click.secho('%s - Display all changed files in Recipe (%s) in Kitchen(%s) with message (%s)' %
(get_datetime(), recipe, kitchen, message), fg='green')
else:
click.secho('%s - Updating all changed files in Recipe (%s) in Kitchen(%s) with message (%s)' %
(get_datetime(), recipe, kitchen, message), fg='green')
check_and_print(DKCloudCommandRunner.update_all_files(backend.dki, kitchen, recipe, recipe_dir, message, dryrun)) | [
"def",
"file_update_all",
"(",
"backend",
",",
"message",
",",
"dryrun",
")",
":",
"kitchen",
"=",
"DKCloudCommandRunner",
".",
"which_kitchen_name",
"(",
")",
"if",
"kitchen",
"is",
"None",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'You must be in a Kitchen'",
")",
"recipe_dir",
"=",
"DKRecipeDisk",
".",
"find_recipe_root_dir",
"(",
")",
"if",
"recipe_dir",
"is",
"None",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'You must be in a Recipe folder'",
")",
"recipe",
"=",
"DKRecipeDisk",
".",
"find_recipe_name",
"(",
")",
"if",
"dryrun",
":",
"click",
".",
"secho",
"(",
"'%s - Display all changed files in Recipe (%s) in Kitchen(%s) with message (%s)'",
"%",
"(",
"get_datetime",
"(",
")",
",",
"recipe",
",",
"kitchen",
",",
"message",
")",
",",
"fg",
"=",
"'green'",
")",
"else",
":",
"click",
".",
"secho",
"(",
"'%s - Updating all changed files in Recipe (%s) in Kitchen(%s) with message (%s)'",
"%",
"(",
"get_datetime",
"(",
")",
",",
"recipe",
",",
"kitchen",
",",
"message",
")",
",",
"fg",
"=",
"'green'",
")",
"check_and_print",
"(",
"DKCloudCommandRunner",
".",
"update_all_files",
"(",
"backend",
".",
"dki",
",",
"kitchen",
",",
"recipe",
",",
"recipe_dir",
",",
"message",
",",
"dryrun",
")",
")"
] | Update all of the changed files for this Recipe | [
"Update",
"all",
"of",
"the",
"changed",
"files",
"for",
"this",
"Recipe"
] | python | train |
mitsei/dlkit | dlkit/handcar/learning/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L1011-L1033 | def get_objective_hierarchy_design_session(self):
"""Gets the session for designing objective hierarchies.
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
ObjectiveHierarchyDesignSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_objective_hierarchy_design() is
false
compliance: optional - This method must be implemented if
supports_objective_hierarchy_design() is true.
"""
if not self.supports_objective_hierarchy_design():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.ObjectiveHierarchyDesignSession(runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | [
"def",
"get_objective_hierarchy_design_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_objective_hierarchy_design",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"OperationFailed",
"(",
")",
"try",
":",
"session",
"=",
"sessions",
".",
"ObjectiveHierarchyDesignSession",
"(",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"except",
"AttributeError",
":",
"raise",
"OperationFailed",
"(",
")",
"return",
"session"
] | Gets the session for designing objective hierarchies.
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
ObjectiveHierarchyDesignSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_objective_hierarchy_design() is
false
compliance: optional - This method must be implemented if
supports_objective_hierarchy_design() is true. | [
"Gets",
"the",
"session",
"for",
"designing",
"objective",
"hierarchies",
"."
] | python | train |
GuiltyTargets/ppi-network-annotation | src/ppi_network_annotation/model/labeled_network.py | https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/labeled_network.py#L36-L46 | def get_index_labels(self, targets):
"""Get the labels(known target/not) mapped to indices.
:param targets: List of known targets
:return: Dictionary of index-label mappings
"""
target_ind = self.graph.vs.select(name_in=targets).indices
rest_ind = self.graph.vs.select(name_notin=targets).indices
label_mappings = {i: 1 for i in target_ind}
label_mappings.update({i: 0 for i in rest_ind})
return label_mappings | [
"def",
"get_index_labels",
"(",
"self",
",",
"targets",
")",
":",
"target_ind",
"=",
"self",
".",
"graph",
".",
"vs",
".",
"select",
"(",
"name_in",
"=",
"targets",
")",
".",
"indices",
"rest_ind",
"=",
"self",
".",
"graph",
".",
"vs",
".",
"select",
"(",
"name_notin",
"=",
"targets",
")",
".",
"indices",
"label_mappings",
"=",
"{",
"i",
":",
"1",
"for",
"i",
"in",
"target_ind",
"}",
"label_mappings",
".",
"update",
"(",
"{",
"i",
":",
"0",
"for",
"i",
"in",
"rest_ind",
"}",
")",
"return",
"label_mappings"
] | Get the labels(known target/not) mapped to indices.
:param targets: List of known targets
:return: Dictionary of index-label mappings | [
"Get",
"the",
"labels",
"(",
"known",
"target",
"/",
"not",
")",
"mapped",
"to",
"indices",
"."
] | python | train |
ciena/afkak | afkak/client.py | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L509-L557 | def send_produce_request(self, payloads=None, acks=1,
timeout=DEFAULT_REPLICAS_ACK_MSECS,
fail_on_error=True, callback=None):
"""
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Parameters
----------
payloads:
list of ProduceRequest
acks:
How many Kafka broker replicas need to write before
the leader replies with a response
timeout:
How long the server has to receive the acks from the
replicas before returning an error.
fail_on_error:
boolean, should we raise an Exception if we encounter an API error?
callback:
function, instead of returning the ProduceResponse,
first pass it through this function
Return
------
a deferred which callbacks with a list of ProduceResponse
Raises
------
FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
"""
encoder = partial(
KafkaCodec.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaCodec.decode_produce_response
resps = yield self._send_broker_aware_request(
payloads, encoder, decoder)
returnValue(self._handle_responses(resps, fail_on_error, callback)) | [
"def",
"send_produce_request",
"(",
"self",
",",
"payloads",
"=",
"None",
",",
"acks",
"=",
"1",
",",
"timeout",
"=",
"DEFAULT_REPLICAS_ACK_MSECS",
",",
"fail_on_error",
"=",
"True",
",",
"callback",
"=",
"None",
")",
":",
"encoder",
"=",
"partial",
"(",
"KafkaCodec",
".",
"encode_produce_request",
",",
"acks",
"=",
"acks",
",",
"timeout",
"=",
"timeout",
")",
"if",
"acks",
"==",
"0",
":",
"decoder",
"=",
"None",
"else",
":",
"decoder",
"=",
"KafkaCodec",
".",
"decode_produce_response",
"resps",
"=",
"yield",
"self",
".",
"_send_broker_aware_request",
"(",
"payloads",
",",
"encoder",
",",
"decoder",
")",
"returnValue",
"(",
"self",
".",
"_handle_responses",
"(",
"resps",
",",
"fail_on_error",
",",
"callback",
")",
")"
] | Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Parameters
----------
payloads:
list of ProduceRequest
acks:
How many Kafka broker replicas need to write before
the leader replies with a response
timeout:
How long the server has to receive the acks from the
replicas before returning an error.
fail_on_error:
boolean, should we raise an Exception if we encounter an API error?
callback:
function, instead of returning the ProduceResponse,
first pass it through this function
Return
------
a deferred which callbacks with a list of ProduceResponse
Raises
------
FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError | [
"Encode",
"and",
"send",
"some",
"ProduceRequests"
] | python | train |
tensorpack/tensorpack | examples/FasterRCNN/dataset.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/dataset.py#L77-L102 | def load(self, add_gt=True, add_mask=False):
"""
Args:
add_gt: whether to add ground truth bounding box annotations to the dicts
add_mask: whether to also add ground truth mask
Returns:
a list of dict, each has keys including:
'image_id', 'file_name',
and (if add_gt is True) 'boxes', 'class', 'is_crowd', and optionally
'segmentation'.
"""
if add_mask:
assert add_gt
with timed_operation('Load Groundtruth Boxes for {}'.format(self.name)):
img_ids = self.coco.getImgIds()
img_ids.sort()
# list of dict, each has keys: height,width,id,file_name
imgs = self.coco.loadImgs(img_ids)
for img in tqdm.tqdm(imgs):
img['image_id'] = img.pop('id')
self._use_absolute_file_name(img)
if add_gt:
self._add_detection_gt(img, add_mask)
return imgs | [
"def",
"load",
"(",
"self",
",",
"add_gt",
"=",
"True",
",",
"add_mask",
"=",
"False",
")",
":",
"if",
"add_mask",
":",
"assert",
"add_gt",
"with",
"timed_operation",
"(",
"'Load Groundtruth Boxes for {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
":",
"img_ids",
"=",
"self",
".",
"coco",
".",
"getImgIds",
"(",
")",
"img_ids",
".",
"sort",
"(",
")",
"# list of dict, each has keys: height,width,id,file_name",
"imgs",
"=",
"self",
".",
"coco",
".",
"loadImgs",
"(",
"img_ids",
")",
"for",
"img",
"in",
"tqdm",
".",
"tqdm",
"(",
"imgs",
")",
":",
"img",
"[",
"'image_id'",
"]",
"=",
"img",
".",
"pop",
"(",
"'id'",
")",
"self",
".",
"_use_absolute_file_name",
"(",
"img",
")",
"if",
"add_gt",
":",
"self",
".",
"_add_detection_gt",
"(",
"img",
",",
"add_mask",
")",
"return",
"imgs"
] | Args:
add_gt: whether to add ground truth bounding box annotations to the dicts
add_mask: whether to also add ground truth mask
Returns:
a list of dict, each has keys including:
'image_id', 'file_name',
and (if add_gt is True) 'boxes', 'class', 'is_crowd', and optionally
'segmentation'. | [
"Args",
":",
"add_gt",
":",
"whether",
"to",
"add",
"ground",
"truth",
"bounding",
"box",
"annotations",
"to",
"the",
"dicts",
"add_mask",
":",
"whether",
"to",
"also",
"add",
"ground",
"truth",
"mask"
] | python | train |
PyCQA/pylint | pylint/checkers/exceptions.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/exceptions.py#L342-L357 | def _check_bad_exception_context(self, node):
"""Verify that the exception context is properly set.
An exception context can be only `None` or an exception.
"""
cause = utils.safe_infer(node.cause)
if cause in (astroid.Uninferable, None):
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message("bad-exception-context", node=node)
elif not isinstance(cause, astroid.ClassDef) and not utils.inherit_from_std_ex(
cause
):
self.add_message("bad-exception-context", node=node) | [
"def",
"_check_bad_exception_context",
"(",
"self",
",",
"node",
")",
":",
"cause",
"=",
"utils",
".",
"safe_infer",
"(",
"node",
".",
"cause",
")",
"if",
"cause",
"in",
"(",
"astroid",
".",
"Uninferable",
",",
"None",
")",
":",
"return",
"if",
"isinstance",
"(",
"cause",
",",
"astroid",
".",
"Const",
")",
":",
"if",
"cause",
".",
"value",
"is",
"not",
"None",
":",
"self",
".",
"add_message",
"(",
"\"bad-exception-context\"",
",",
"node",
"=",
"node",
")",
"elif",
"not",
"isinstance",
"(",
"cause",
",",
"astroid",
".",
"ClassDef",
")",
"and",
"not",
"utils",
".",
"inherit_from_std_ex",
"(",
"cause",
")",
":",
"self",
".",
"add_message",
"(",
"\"bad-exception-context\"",
",",
"node",
"=",
"node",
")"
] | Verify that the exception context is properly set.
An exception context can be only `None` or an exception. | [
"Verify",
"that",
"the",
"exception",
"context",
"is",
"properly",
"set",
"."
] | python | test |
apache/airflow | airflow/hooks/S3_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L204-L218 | def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj | [
"def",
"get_key",
"(",
"self",
",",
"key",
",",
"bucket_name",
"=",
"None",
")",
":",
"if",
"not",
"bucket_name",
":",
"(",
"bucket_name",
",",
"key",
")",
"=",
"self",
".",
"parse_s3_url",
"(",
"key",
")",
"obj",
"=",
"self",
".",
"get_resource_type",
"(",
"'s3'",
")",
".",
"Object",
"(",
"bucket_name",
",",
"key",
")",
"obj",
".",
"load",
"(",
")",
"return",
"obj"
] | Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str | [
"Returns",
"a",
"boto3",
".",
"s3",
".",
"Object"
] | python | test |
facetoe/zenpy | zenpy/lib/api.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1095-L1103 | def events(self, start_time, include=None):
"""
Retrieve TicketEvents
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param start_time: time to retrieve events from.
"""
return self._query_zendesk(self.endpoint.events, 'ticket_event', start_time=start_time, include=include) | [
"def",
"events",
"(",
"self",
",",
"start_time",
",",
"include",
"=",
"None",
")",
":",
"return",
"self",
".",
"_query_zendesk",
"(",
"self",
".",
"endpoint",
".",
"events",
",",
"'ticket_event'",
",",
"start_time",
"=",
"start_time",
",",
"include",
"=",
"include",
")"
] | Retrieve TicketEvents
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param start_time: time to retrieve events from. | [
"Retrieve",
"TicketEvents"
] | python | train |
denisenkom/pytds | src/pytds/tds.py | https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L77-L86 | def tds7_crypt_pass(password):
""" Mangle password according to tds rules
:param password: Password str
:returns: Byte-string with encoded password
"""
encoded = bytearray(ucs2_codec.encode(password)[0])
for i, ch in enumerate(encoded):
encoded[i] = ((ch << 4) & 0xff | (ch >> 4)) ^ 0xA5
return encoded | [
"def",
"tds7_crypt_pass",
"(",
"password",
")",
":",
"encoded",
"=",
"bytearray",
"(",
"ucs2_codec",
".",
"encode",
"(",
"password",
")",
"[",
"0",
"]",
")",
"for",
"i",
",",
"ch",
"in",
"enumerate",
"(",
"encoded",
")",
":",
"encoded",
"[",
"i",
"]",
"=",
"(",
"(",
"ch",
"<<",
"4",
")",
"&",
"0xff",
"|",
"(",
"ch",
">>",
"4",
")",
")",
"^",
"0xA5",
"return",
"encoded"
] | Mangle password according to tds rules
:param password: Password str
:returns: Byte-string with encoded password | [
"Mangle",
"password",
"according",
"to",
"tds",
"rules"
] | python | train |
softlayer/softlayer-python | SoftLayer/managers/ordering.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/ordering.py#L212-L230 | def order_quote(self, quote_id, extra):
"""Places an order using a quote
::
extras = {
'hardware': {'hostname': 'test', 'domain': 'testing.com'},
'quantity': 2
}
manager = ordering.OrderingManager(env.client)
result = manager.order_quote(12345, extras)
:param int quote_id: ID for the target quote
:param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order
:param int quantity: Quantity to override default
"""
container = self.generate_order_template(quote_id, extra)
return self.client.call('SoftLayer_Billing_Order_Quote', 'placeOrder', container, id=quote_id) | [
"def",
"order_quote",
"(",
"self",
",",
"quote_id",
",",
"extra",
")",
":",
"container",
"=",
"self",
".",
"generate_order_template",
"(",
"quote_id",
",",
"extra",
")",
"return",
"self",
".",
"client",
".",
"call",
"(",
"'SoftLayer_Billing_Order_Quote'",
",",
"'placeOrder'",
",",
"container",
",",
"id",
"=",
"quote_id",
")"
] | Places an order using a quote
::
extras = {
'hardware': {'hostname': 'test', 'domain': 'testing.com'},
'quantity': 2
}
manager = ordering.OrderingManager(env.client)
result = manager.order_quote(12345, extras)
:param int quote_id: ID for the target quote
:param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order
:param int quantity: Quantity to override default | [
"Places",
"an",
"order",
"using",
"a",
"quote"
] | python | train |
dropbox/stone | stone/frontend/ir_generator.py | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L749-L803 | def _populate_union_type_attributes(self, env, data_type):
"""
Converts a forward reference of a union into a complete definition.
"""
parent_type = None
extends = data_type._ast_node.extends
if extends:
# A parent type must be fully defined and not just a forward
# reference.
parent_type = self._resolve_type(env, extends, True)
if isinstance(parent_type, Alias):
raise InvalidSpec(
'A union cannot extend an alias. '
'Use the canonical name instead.',
data_type._ast_node.lineno, data_type._ast_node.path)
if isinstance(parent_type, Nullable):
raise InvalidSpec(
'A union cannot extend a nullable type.',
data_type._ast_node.lineno, data_type._ast_node.path)
if not isinstance(parent_type, Union):
raise InvalidSpec(
'A union can only extend another union: '
'%s is not a union.' % quote(parent_type.name),
data_type._ast_node.lineno, data_type._ast_node.path)
api_type_fields = []
for stone_field in data_type._ast_node.fields:
if stone_field.name == 'other':
raise InvalidSpec(
"Union cannot define an 'other' field because it is "
"reserved as the catch-all field for open unions.",
stone_field.lineno, stone_field.path)
api_type_fields.append(self._create_union_field(env, stone_field))
catch_all_field = None
if data_type.closed:
if parent_type and not parent_type.closed:
# Due to the reversed super type / child type relationship for
# unions, a child type cannot be closed if its parent is open
# because the parent now has an extra field that is not
# recognized by the child if it were substituted in for it.
raise InvalidSpec(
"Union cannot be closed since parent type '%s' is open." % (
parent_type.name),
data_type._ast_node.lineno, data_type._ast_node.path)
else:
if not parent_type or parent_type.closed:
# Create a catch-all field
catch_all_field = UnionField(
name='other', data_type=Void(), doc=None,
ast_node=data_type._ast_node, catch_all=True)
api_type_fields.append(catch_all_field)
data_type.set_attributes(
data_type._ast_node.doc, api_type_fields, parent_type, catch_all_field) | [
"def",
"_populate_union_type_attributes",
"(",
"self",
",",
"env",
",",
"data_type",
")",
":",
"parent_type",
"=",
"None",
"extends",
"=",
"data_type",
".",
"_ast_node",
".",
"extends",
"if",
"extends",
":",
"# A parent type must be fully defined and not just a forward",
"# reference.",
"parent_type",
"=",
"self",
".",
"_resolve_type",
"(",
"env",
",",
"extends",
",",
"True",
")",
"if",
"isinstance",
"(",
"parent_type",
",",
"Alias",
")",
":",
"raise",
"InvalidSpec",
"(",
"'A union cannot extend an alias. '",
"'Use the canonical name instead.'",
",",
"data_type",
".",
"_ast_node",
".",
"lineno",
",",
"data_type",
".",
"_ast_node",
".",
"path",
")",
"if",
"isinstance",
"(",
"parent_type",
",",
"Nullable",
")",
":",
"raise",
"InvalidSpec",
"(",
"'A union cannot extend a nullable type.'",
",",
"data_type",
".",
"_ast_node",
".",
"lineno",
",",
"data_type",
".",
"_ast_node",
".",
"path",
")",
"if",
"not",
"isinstance",
"(",
"parent_type",
",",
"Union",
")",
":",
"raise",
"InvalidSpec",
"(",
"'A union can only extend another union: '",
"'%s is not a union.'",
"%",
"quote",
"(",
"parent_type",
".",
"name",
")",
",",
"data_type",
".",
"_ast_node",
".",
"lineno",
",",
"data_type",
".",
"_ast_node",
".",
"path",
")",
"api_type_fields",
"=",
"[",
"]",
"for",
"stone_field",
"in",
"data_type",
".",
"_ast_node",
".",
"fields",
":",
"if",
"stone_field",
".",
"name",
"==",
"'other'",
":",
"raise",
"InvalidSpec",
"(",
"\"Union cannot define an 'other' field because it is \"",
"\"reserved as the catch-all field for open unions.\"",
",",
"stone_field",
".",
"lineno",
",",
"stone_field",
".",
"path",
")",
"api_type_fields",
".",
"append",
"(",
"self",
".",
"_create_union_field",
"(",
"env",
",",
"stone_field",
")",
")",
"catch_all_field",
"=",
"None",
"if",
"data_type",
".",
"closed",
":",
"if",
"parent_type",
"and",
"not",
"parent_type",
".",
"closed",
":",
"# Due to the reversed super type / child type relationship for",
"# unions, a child type cannot be closed if its parent is open",
"# because the parent now has an extra field that is not",
"# recognized by the child if it were substituted in for it.",
"raise",
"InvalidSpec",
"(",
"\"Union cannot be closed since parent type '%s' is open.\"",
"%",
"(",
"parent_type",
".",
"name",
")",
",",
"data_type",
".",
"_ast_node",
".",
"lineno",
",",
"data_type",
".",
"_ast_node",
".",
"path",
")",
"else",
":",
"if",
"not",
"parent_type",
"or",
"parent_type",
".",
"closed",
":",
"# Create a catch-all field",
"catch_all_field",
"=",
"UnionField",
"(",
"name",
"=",
"'other'",
",",
"data_type",
"=",
"Void",
"(",
")",
",",
"doc",
"=",
"None",
",",
"ast_node",
"=",
"data_type",
".",
"_ast_node",
",",
"catch_all",
"=",
"True",
")",
"api_type_fields",
".",
"append",
"(",
"catch_all_field",
")",
"data_type",
".",
"set_attributes",
"(",
"data_type",
".",
"_ast_node",
".",
"doc",
",",
"api_type_fields",
",",
"parent_type",
",",
"catch_all_field",
")"
] | Converts a forward reference of a union into a complete definition. | [
"Converts",
"a",
"forward",
"reference",
"of",
"a",
"union",
"into",
"a",
"complete",
"definition",
"."
] | python | train |
Netflix-Skunkworks/swag-client | swag_client/backend.py | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L99-L132 | def get_service_enabled(self, name, accounts_list=None, search_filter=None, region=None):
"""Get a list of accounts where a service has been enabled."""
if not accounts_list:
accounts = self.get_all(search_filter=search_filter)
else:
accounts = accounts_list
if self.version == 1:
accounts = accounts['accounts']
enabled = []
for account in accounts:
if self.version == 1:
account_filter = "accounts[?id=='{id}']".format(id=account['id'])
else:
account_filter = "[?id=='{id}']".format(id=account['id'])
service = self.get_service(name, search_filter=account_filter)
if self.version == 1:
if service:
service = service['enabled'] # no region information available in v1
else:
if not region:
service_filter = "status[?enabled]"
else:
service_filter = "status[?(region=='{region}' || region=='all') && enabled]".format(region=region)
service = jmespath.search(service_filter, service)
if service:
enabled.append(account)
return enabled | [
"def",
"get_service_enabled",
"(",
"self",
",",
"name",
",",
"accounts_list",
"=",
"None",
",",
"search_filter",
"=",
"None",
",",
"region",
"=",
"None",
")",
":",
"if",
"not",
"accounts_list",
":",
"accounts",
"=",
"self",
".",
"get_all",
"(",
"search_filter",
"=",
"search_filter",
")",
"else",
":",
"accounts",
"=",
"accounts_list",
"if",
"self",
".",
"version",
"==",
"1",
":",
"accounts",
"=",
"accounts",
"[",
"'accounts'",
"]",
"enabled",
"=",
"[",
"]",
"for",
"account",
"in",
"accounts",
":",
"if",
"self",
".",
"version",
"==",
"1",
":",
"account_filter",
"=",
"\"accounts[?id=='{id}']\"",
".",
"format",
"(",
"id",
"=",
"account",
"[",
"'id'",
"]",
")",
"else",
":",
"account_filter",
"=",
"\"[?id=='{id}']\"",
".",
"format",
"(",
"id",
"=",
"account",
"[",
"'id'",
"]",
")",
"service",
"=",
"self",
".",
"get_service",
"(",
"name",
",",
"search_filter",
"=",
"account_filter",
")",
"if",
"self",
".",
"version",
"==",
"1",
":",
"if",
"service",
":",
"service",
"=",
"service",
"[",
"'enabled'",
"]",
"# no region information available in v1",
"else",
":",
"if",
"not",
"region",
":",
"service_filter",
"=",
"\"status[?enabled]\"",
"else",
":",
"service_filter",
"=",
"\"status[?(region=='{region}' || region=='all') && enabled]\"",
".",
"format",
"(",
"region",
"=",
"region",
")",
"service",
"=",
"jmespath",
".",
"search",
"(",
"service_filter",
",",
"service",
")",
"if",
"service",
":",
"enabled",
".",
"append",
"(",
"account",
")",
"return",
"enabled"
] | Get a list of accounts where a service has been enabled. | [
"Get",
"a",
"list",
"of",
"accounts",
"where",
"a",
"service",
"has",
"been",
"enabled",
"."
] | python | train |
MartinThoma/hwrt | hwrt/datasets/__init__.py | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/datasets/__init__.py#L20-L79 | def formula_to_dbid(formula_str, backslash_fix=False):
"""
Convert a LaTeX formula to the database index.
Parameters
----------
formula_str : string
The formula as LaTeX code.
backslash_fix : boolean
If this is set to true, then it will be checked if the same formula
exists with a preceeding backslash.
Returns
-------
int :
The database index.
"""
global __formula_to_dbid_cache
if __formula_to_dbid_cache is None:
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# Get all formulas that should get examined
sql = ("SELECT `id`, `formula_in_latex` FROM `wm_formula` ")
cursor.execute(sql)
formulas = cursor.fetchall()
__formula_to_dbid_cache = {}
for fm in formulas:
__formula_to_dbid_cache[fm['formula_in_latex']] = fm['id']
if formula_str in __formula_to_dbid_cache:
return __formula_to_dbid_cache[formula_str]
elif backslash_fix and ('\\%s' % formula_str) in __formula_to_dbid_cache:
return __formula_to_dbid_cache['\\%s' % formula_str]
else:
logging.info("Symbol '%s' was not found. Add it to write-math.com.",
formula_str)
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT INTO `wm_formula` (`user_id`, `formula_name`, "
"`formula_in_latex`, "
"`mode`, `package`) VALUES ("
"'10', %s, %s, 'bothmodes', NULL);")
if len(formula_str) < 20:
logging.info("Insert formula %s.", formula_str)
cursor.execute(sql, (formula_str, formula_str))
connection.commit()
__formula_to_dbid_cache[formula_str] = connection.insert_id()
return __formula_to_dbid_cache[formula_str] | [
"def",
"formula_to_dbid",
"(",
"formula_str",
",",
"backslash_fix",
"=",
"False",
")",
":",
"global",
"__formula_to_dbid_cache",
"if",
"__formula_to_dbid_cache",
"is",
"None",
":",
"mysql",
"=",
"utils",
".",
"get_mysql_cfg",
"(",
")",
"connection",
"=",
"pymysql",
".",
"connect",
"(",
"host",
"=",
"mysql",
"[",
"'host'",
"]",
",",
"user",
"=",
"mysql",
"[",
"'user'",
"]",
",",
"passwd",
"=",
"mysql",
"[",
"'passwd'",
"]",
",",
"db",
"=",
"mysql",
"[",
"'db'",
"]",
",",
"charset",
"=",
"'utf8mb4'",
",",
"cursorclass",
"=",
"pymysql",
".",
"cursors",
".",
"DictCursor",
")",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"# Get all formulas that should get examined",
"sql",
"=",
"(",
"\"SELECT `id`, `formula_in_latex` FROM `wm_formula` \"",
")",
"cursor",
".",
"execute",
"(",
"sql",
")",
"formulas",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"__formula_to_dbid_cache",
"=",
"{",
"}",
"for",
"fm",
"in",
"formulas",
":",
"__formula_to_dbid_cache",
"[",
"fm",
"[",
"'formula_in_latex'",
"]",
"]",
"=",
"fm",
"[",
"'id'",
"]",
"if",
"formula_str",
"in",
"__formula_to_dbid_cache",
":",
"return",
"__formula_to_dbid_cache",
"[",
"formula_str",
"]",
"elif",
"backslash_fix",
"and",
"(",
"'\\\\%s'",
"%",
"formula_str",
")",
"in",
"__formula_to_dbid_cache",
":",
"return",
"__formula_to_dbid_cache",
"[",
"'\\\\%s'",
"%",
"formula_str",
"]",
"else",
":",
"logging",
".",
"info",
"(",
"\"Symbol '%s' was not found. Add it to write-math.com.\"",
",",
"formula_str",
")",
"mysql",
"=",
"utils",
".",
"get_mysql_cfg",
"(",
")",
"connection",
"=",
"pymysql",
".",
"connect",
"(",
"host",
"=",
"mysql",
"[",
"'host'",
"]",
",",
"user",
"=",
"mysql",
"[",
"'user'",
"]",
",",
"passwd",
"=",
"mysql",
"[",
"'passwd'",
"]",
",",
"db",
"=",
"mysql",
"[",
"'db'",
"]",
",",
"charset",
"=",
"'utf8mb4'",
",",
"cursorclass",
"=",
"pymysql",
".",
"cursors",
".",
"DictCursor",
")",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"sql",
"=",
"(",
"\"INSERT INTO `wm_formula` (`user_id`, `formula_name`, \"",
"\"`formula_in_latex`, \"",
"\"`mode`, `package`) VALUES (\"",
"\"'10', %s, %s, 'bothmodes', NULL);\"",
")",
"if",
"len",
"(",
"formula_str",
")",
"<",
"20",
":",
"logging",
".",
"info",
"(",
"\"Insert formula %s.\"",
",",
"formula_str",
")",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"formula_str",
",",
"formula_str",
")",
")",
"connection",
".",
"commit",
"(",
")",
"__formula_to_dbid_cache",
"[",
"formula_str",
"]",
"=",
"connection",
".",
"insert_id",
"(",
")",
"return",
"__formula_to_dbid_cache",
"[",
"formula_str",
"]"
] | Convert a LaTeX formula to the database index.
Parameters
----------
formula_str : string
The formula as LaTeX code.
backslash_fix : boolean
If this is set to true, then it will be checked if the same formula
exists with a preceeding backslash.
Returns
-------
int :
The database index. | [
"Convert",
"a",
"LaTeX",
"formula",
"to",
"the",
"database",
"index",
"."
] | python | train |
openid/python-openid | openid/association.py | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/association.py#L151-L169 | def addAllowedType(self, assoc_type, session_type=None):
"""Add an association type and session type to the allowed
types list. The assocation/session pairs are tried in the
order that they are added."""
if self.allowed_types is None:
self.allowed_types = []
if session_type is None:
available = getSessionTypes(assoc_type)
if not available:
raise ValueError('No session available for association type %r'
% (assoc_type,))
for session_type in getSessionTypes(assoc_type):
self.addAllowedType(assoc_type, session_type)
else:
checkSessionType(assoc_type, session_type)
self.allowed_types.append((assoc_type, session_type)) | [
"def",
"addAllowedType",
"(",
"self",
",",
"assoc_type",
",",
"session_type",
"=",
"None",
")",
":",
"if",
"self",
".",
"allowed_types",
"is",
"None",
":",
"self",
".",
"allowed_types",
"=",
"[",
"]",
"if",
"session_type",
"is",
"None",
":",
"available",
"=",
"getSessionTypes",
"(",
"assoc_type",
")",
"if",
"not",
"available",
":",
"raise",
"ValueError",
"(",
"'No session available for association type %r'",
"%",
"(",
"assoc_type",
",",
")",
")",
"for",
"session_type",
"in",
"getSessionTypes",
"(",
"assoc_type",
")",
":",
"self",
".",
"addAllowedType",
"(",
"assoc_type",
",",
"session_type",
")",
"else",
":",
"checkSessionType",
"(",
"assoc_type",
",",
"session_type",
")",
"self",
".",
"allowed_types",
".",
"append",
"(",
"(",
"assoc_type",
",",
"session_type",
")",
")"
] | Add an association type and session type to the allowed
types list. The assocation/session pairs are tried in the
order that they are added. | [
"Add",
"an",
"association",
"type",
"and",
"session",
"type",
"to",
"the",
"allowed",
"types",
"list",
".",
"The",
"assocation",
"/",
"session",
"pairs",
"are",
"tried",
"in",
"the",
"order",
"that",
"they",
"are",
"added",
"."
] | python | train |
luckydonald/pytgbot | pytgbot/api_types/sendable/inline.py | https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/sendable/inline.py#L2811-L2831 | def to_array(self):
"""
Serializes this InlineQueryResultCachedGif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineQueryResultCachedGif, self).to_array()
# 'type' and 'id' given by superclass
array['gif_file_id'] = u(self.gif_file_id) # py2: type unicode, py3: type str
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array | [
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"InlineQueryResultCachedGif",
",",
"self",
")",
".",
"to_array",
"(",
")",
"# 'type' and 'id' given by superclass",
"array",
"[",
"'gif_file_id'",
"]",
"=",
"u",
"(",
"self",
".",
"gif_file_id",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"title",
"is",
"not",
"None",
":",
"array",
"[",
"'title'",
"]",
"=",
"u",
"(",
"self",
".",
"title",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"caption",
"is",
"not",
"None",
":",
"array",
"[",
"'caption'",
"]",
"=",
"u",
"(",
"self",
".",
"caption",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"parse_mode",
"is",
"not",
"None",
":",
"array",
"[",
"'parse_mode'",
"]",
"=",
"u",
"(",
"self",
".",
"parse_mode",
")",
"# py2: type unicode, py3: type str",
"if",
"self",
".",
"reply_markup",
"is",
"not",
"None",
":",
"array",
"[",
"'reply_markup'",
"]",
"=",
"self",
".",
"reply_markup",
".",
"to_array",
"(",
")",
"# type InlineKeyboardMarkup",
"if",
"self",
".",
"input_message_content",
"is",
"not",
"None",
":",
"array",
"[",
"'input_message_content'",
"]",
"=",
"self",
".",
"input_message_content",
".",
"to_array",
"(",
")",
"# type InputMessageContent",
"return",
"array"
] | Serializes this InlineQueryResultCachedGif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict | [
"Serializes",
"this",
"InlineQueryResultCachedGif",
"to",
"a",
"dictionary",
"."
] | python | train |
PythonCharmers/python-future | src/future/backports/http/cookies.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookies.py#L235-L245 | def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"' | [
"def",
"_quote",
"(",
"str",
",",
"LegalChars",
"=",
"_LegalChars",
")",
":",
"if",
"all",
"(",
"c",
"in",
"LegalChars",
"for",
"c",
"in",
"str",
")",
":",
"return",
"str",
"else",
":",
"return",
"'\"'",
"+",
"_nulljoin",
"(",
"_Translator",
".",
"get",
"(",
"s",
",",
"s",
")",
"for",
"s",
"in",
"str",
")",
"+",
"'\"'"
] | r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters. | [
"r",
"Quote",
"a",
"string",
"for",
"use",
"in",
"a",
"cookie",
"header",
"."
] | python | train |
Microsoft/nni | src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py#L142-L169 | def _add_index(in_x, parameter):
"""
change parameters in NNI format to parameters in hyperopt format(This function also support nested dict.).
For example, receive parameters like:
{'dropout_rate': 0.8, 'conv_size': 3, 'hidden_size': 512}
Will change to format in hyperopt, like:
{'dropout_rate': 0.8, 'conv_size': {'_index': 1, '_value': 3}, 'hidden_size': {'_index': 1, '_value': 512}}
"""
if TYPE not in in_x: # if at the top level
out_y = dict()
for key, value in parameter.items():
out_y[key] = _add_index(in_x[key], value)
return out_y
elif isinstance(in_x, dict):
value_type = in_x[TYPE]
value_format = in_x[VALUE]
if value_type == "choice":
choice_name = parameter[0] if isinstance(parameter, list) else parameter
for pos, item in enumerate(value_format): # here value_format is a list
if isinstance(item, list): # this format is ["choice_key", format_dict]
choice_key = item[0]
choice_value_format = item[1]
if choice_key == choice_name:
return {INDEX: pos, VALUE: [choice_name, _add_index(choice_value_format, parameter[1])]}
elif choice_name == item:
return {INDEX: pos, VALUE: item}
else:
return parameter | [
"def",
"_add_index",
"(",
"in_x",
",",
"parameter",
")",
":",
"if",
"TYPE",
"not",
"in",
"in_x",
":",
"# if at the top level",
"out_y",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"parameter",
".",
"items",
"(",
")",
":",
"out_y",
"[",
"key",
"]",
"=",
"_add_index",
"(",
"in_x",
"[",
"key",
"]",
",",
"value",
")",
"return",
"out_y",
"elif",
"isinstance",
"(",
"in_x",
",",
"dict",
")",
":",
"value_type",
"=",
"in_x",
"[",
"TYPE",
"]",
"value_format",
"=",
"in_x",
"[",
"VALUE",
"]",
"if",
"value_type",
"==",
"\"choice\"",
":",
"choice_name",
"=",
"parameter",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"parameter",
",",
"list",
")",
"else",
"parameter",
"for",
"pos",
",",
"item",
"in",
"enumerate",
"(",
"value_format",
")",
":",
"# here value_format is a list",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"# this format is [\"choice_key\", format_dict]",
"choice_key",
"=",
"item",
"[",
"0",
"]",
"choice_value_format",
"=",
"item",
"[",
"1",
"]",
"if",
"choice_key",
"==",
"choice_name",
":",
"return",
"{",
"INDEX",
":",
"pos",
",",
"VALUE",
":",
"[",
"choice_name",
",",
"_add_index",
"(",
"choice_value_format",
",",
"parameter",
"[",
"1",
"]",
")",
"]",
"}",
"elif",
"choice_name",
"==",
"item",
":",
"return",
"{",
"INDEX",
":",
"pos",
",",
"VALUE",
":",
"item",
"}",
"else",
":",
"return",
"parameter"
] | change parameters in NNI format to parameters in hyperopt format(This function also support nested dict.).
For example, receive parameters like:
{'dropout_rate': 0.8, 'conv_size': 3, 'hidden_size': 512}
Will change to format in hyperopt, like:
{'dropout_rate': 0.8, 'conv_size': {'_index': 1, '_value': 3}, 'hidden_size': {'_index': 1, '_value': 512}} | [
"change",
"parameters",
"in",
"NNI",
"format",
"to",
"parameters",
"in",
"hyperopt",
"format",
"(",
"This",
"function",
"also",
"support",
"nested",
"dict",
".",
")",
".",
"For",
"example",
"receive",
"parameters",
"like",
":",
"{",
"dropout_rate",
":",
"0",
".",
"8",
"conv_size",
":",
"3",
"hidden_size",
":",
"512",
"}",
"Will",
"change",
"to",
"format",
"in",
"hyperopt",
"like",
":",
"{",
"dropout_rate",
":",
"0",
".",
"8",
"conv_size",
":",
"{",
"_index",
":",
"1",
"_value",
":",
"3",
"}",
"hidden_size",
":",
"{",
"_index",
":",
"1",
"_value",
":",
"512",
"}}"
] | python | train |
vinci1it2000/schedula | schedula/utils/alg.py | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L22-L45 | def add_edge_fun(graph):
"""
Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
succ, pred, node = graph._succ, graph._pred, graph._node
def add_edge(u, v, **attr):
if v not in succ: # Add nodes.
succ[v], pred[v], node[v] = {}, {}, {}
succ[u][v] = pred[v][u] = attr # Add the edge.
return add_edge | [
"def",
"add_edge_fun",
"(",
"graph",
")",
":",
"# Namespace shortcut for speed.",
"succ",
",",
"pred",
",",
"node",
"=",
"graph",
".",
"_succ",
",",
"graph",
".",
"_pred",
",",
"graph",
".",
"_node",
"def",
"add_edge",
"(",
"u",
",",
"v",
",",
"*",
"*",
"attr",
")",
":",
"if",
"v",
"not",
"in",
"succ",
":",
"# Add nodes.",
"succ",
"[",
"v",
"]",
",",
"pred",
"[",
"v",
"]",
",",
"node",
"[",
"v",
"]",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"succ",
"[",
"u",
"]",
"[",
"v",
"]",
"=",
"pred",
"[",
"v",
"]",
"[",
"u",
"]",
"=",
"attr",
"# Add the edge.",
"return",
"add_edge"
] | Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable | [
"Returns",
"a",
"function",
"that",
"adds",
"an",
"edge",
"to",
"the",
"graph",
"checking",
"only",
"the",
"out",
"node",
"."
] | python | train |
ronaldguillen/wave | wave/parsers.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/parsers.py#L79-L87 | def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data | [
"def",
"parse",
"(",
"self",
",",
"stream",
",",
"media_type",
"=",
"None",
",",
"parser_context",
"=",
"None",
")",
":",
"parser_context",
"=",
"parser_context",
"or",
"{",
"}",
"encoding",
"=",
"parser_context",
".",
"get",
"(",
"'encoding'",
",",
"settings",
".",
"DEFAULT_CHARSET",
")",
"data",
"=",
"QueryDict",
"(",
"stream",
".",
"read",
"(",
")",
",",
"encoding",
"=",
"encoding",
")",
"return",
"data"
] | Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict. | [
"Parses",
"the",
"incoming",
"bytestream",
"as",
"a",
"URL",
"encoded",
"form",
"and",
"returns",
"the",
"resulting",
"QueryDict",
"."
] | python | train |
dtmilano/AndroidViewClient | src/com/dtmilano/android/viewclient.py | https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L3415-L3472 | def list(self, sleep=1):
'''
List the windows.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
This also sets L{self.windows} as the list of windows.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of windows
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
raise Exception("Not implemented yet: listing windows with UiAutomator")
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((VIEW_SERVER_HOST, self.localPort))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s" % (VIEW_SERVER_HOST, self.localPort, ex))
s.send('list\r\n')
received = ""
doneRE = re.compile("DONE")
while True:
received += s.recv(1024)
if doneRE.search(received[-7:]):
break
s.close()
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, received
print >>sys.stderr
self.windows = {}
for line in received.split('\n'):
if not line:
break
if doneRE.search(line):
break
values = line.split()
if len(values) > 1:
package = values[1]
else:
package = "UNKNOWN"
if len(values) > 0:
wid = values[0]
else:
wid = '00000000'
self.windows[int('0x' + wid, 16)] = package
return self.windows | [
"def",
"list",
"(",
"self",
",",
"sleep",
"=",
"1",
")",
":",
"if",
"sleep",
">",
"0",
":",
"time",
".",
"sleep",
"(",
"sleep",
")",
"if",
"self",
".",
"useUiAutomator",
":",
"raise",
"Exception",
"(",
"\"Not implemented yet: listing windows with UiAutomator\"",
")",
"else",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"s",
".",
"connect",
"(",
"(",
"VIEW_SERVER_HOST",
",",
"self",
".",
"localPort",
")",
")",
"except",
"socket",
".",
"error",
",",
"ex",
":",
"raise",
"RuntimeError",
"(",
"\"ERROR: Connecting to %s:%d: %s\"",
"%",
"(",
"VIEW_SERVER_HOST",
",",
"self",
".",
"localPort",
",",
"ex",
")",
")",
"s",
".",
"send",
"(",
"'list\\r\\n'",
")",
"received",
"=",
"\"\"",
"doneRE",
"=",
"re",
".",
"compile",
"(",
"\"DONE\"",
")",
"while",
"True",
":",
"received",
"+=",
"s",
".",
"recv",
"(",
"1024",
")",
"if",
"doneRE",
".",
"search",
"(",
"received",
"[",
"-",
"7",
":",
"]",
")",
":",
"break",
"s",
".",
"close",
"(",
")",
"if",
"DEBUG",
":",
"self",
".",
"received",
"=",
"received",
"if",
"DEBUG_RECEIVED",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"received %d chars\"",
"%",
"len",
"(",
"received",
")",
"print",
">>",
"sys",
".",
"stderr",
"print",
">>",
"sys",
".",
"stderr",
",",
"received",
"print",
">>",
"sys",
".",
"stderr",
"self",
".",
"windows",
"=",
"{",
"}",
"for",
"line",
"in",
"received",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"not",
"line",
":",
"break",
"if",
"doneRE",
".",
"search",
"(",
"line",
")",
":",
"break",
"values",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"values",
")",
">",
"1",
":",
"package",
"=",
"values",
"[",
"1",
"]",
"else",
":",
"package",
"=",
"\"UNKNOWN\"",
"if",
"len",
"(",
"values",
")",
">",
"0",
":",
"wid",
"=",
"values",
"[",
"0",
"]",
"else",
":",
"wid",
"=",
"'00000000'",
"self",
".",
"windows",
"[",
"int",
"(",
"'0x'",
"+",
"wid",
",",
"16",
")",
"]",
"=",
"package",
"return",
"self",
".",
"windows"
] | List the windows.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
This also sets L{self.windows} as the list of windows.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of windows | [
"List",
"the",
"windows",
"."
] | python | train |
atztogo/phonopy | phonopy/structure/spglib.py | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/spglib.py#L123-L235 | def get_symmetry_dataset(cell,
symprec=1e-5,
angle_tolerance=-1.0,
hall_number=0):
"""Search symmetry dataset from an input cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
hall_number: If a serial number of Hall symbol (>0) is given,
the database corresponding to the Hall symbol is made.
Return:
A dictionary is returned. Dictionary keys:
number (int): International space group number
international (str): International symbol
hall (str): Hall symbol
choice (str): Centring, origin, basis vector setting
transformation_matrix (3x3 float):
Transformation matrix from input lattice to standardized
lattice:
L^original = L^standardized * Tmat
origin shift (3 float):
Origin shift from standardized to input origin
rotations (3x3 int), translations (float vector):
Rotation matrices and translation vectors. Space group
operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs (n char): Wyckoff letters
equivalent_atoms (n int): Symmetrically equivalent atoms
mapping_to_primitive (n int):
Original cell atom index mapping to primivie cell atom index
Idealized standardized unit cell:
std_lattice (3x3 float, row vectors),
std_positions (Nx3 float), std_types (N int)
std_rotation_matrix:
Rigid rotation matrix to rotate from standardized basis
vectors to idealized standardized basis vectors
L^idealized = R * L^standardized
std_mapping_to_primitive (m int):
std_positions index mapping to those of primivie cell atoms
pointgroup (str): Pointgroup symbol
If it fails, None is returned.
"""
_set_no_error()
lattice, positions, numbers, _ = _expand_cell(cell)
if lattice is None:
return None
spg_ds = spg.dataset(lattice, positions, numbers, hall_number,
symprec, angle_tolerance)
if spg_ds is None:
_set_error_message()
return None
keys = ('number',
'hall_number',
'international',
'hall',
'choice',
'transformation_matrix',
'origin_shift',
'rotations',
'translations',
'wyckoffs',
'site_symmetry_symbols',
'equivalent_atoms',
'mapping_to_primitive',
'std_lattice',
'std_types',
'std_positions',
'std_rotation_matrix',
'std_mapping_to_primitive',
# 'pointgroup_number',
'pointgroup')
dataset = {}
for key, data in zip(keys, spg_ds):
dataset[key] = data
dataset['international'] = dataset['international'].strip()
dataset['hall'] = dataset['hall'].strip()
dataset['choice'] = dataset['choice'].strip()
dataset['transformation_matrix'] = np.array(
dataset['transformation_matrix'], dtype='double', order='C')
dataset['origin_shift'] = np.array(dataset['origin_shift'], dtype='double')
dataset['rotations'] = np.array(dataset['rotations'],
dtype='intc', order='C')
dataset['translations'] = np.array(dataset['translations'],
dtype='double', order='C')
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
dataset['wyckoffs'] = [letters[x] for x in dataset['wyckoffs']]
dataset['site_symmetry_symbols'] = [
s.strip() for s in dataset['site_symmetry_symbols']]
dataset['equivalent_atoms'] = np.array(dataset['equivalent_atoms'],
dtype='intc')
dataset['mapping_to_primitive'] = np.array(dataset['mapping_to_primitive'],
dtype='intc')
dataset['std_lattice'] = np.array(np.transpose(dataset['std_lattice']),
dtype='double', order='C')
dataset['std_types'] = np.array(dataset['std_types'], dtype='intc')
dataset['std_positions'] = np.array(dataset['std_positions'],
dtype='double', order='C')
dataset['std_rotation_matrix'] = np.array(dataset['std_rotation_matrix'],
dtype='double', order='C')
dataset['std_mapping_to_primitive'] = np.array(
dataset['std_mapping_to_primitive'], dtype='intc')
dataset['pointgroup'] = dataset['pointgroup'].strip()
_set_error_message()
return dataset | [
"def",
"get_symmetry_dataset",
"(",
"cell",
",",
"symprec",
"=",
"1e-5",
",",
"angle_tolerance",
"=",
"-",
"1.0",
",",
"hall_number",
"=",
"0",
")",
":",
"_set_no_error",
"(",
")",
"lattice",
",",
"positions",
",",
"numbers",
",",
"_",
"=",
"_expand_cell",
"(",
"cell",
")",
"if",
"lattice",
"is",
"None",
":",
"return",
"None",
"spg_ds",
"=",
"spg",
".",
"dataset",
"(",
"lattice",
",",
"positions",
",",
"numbers",
",",
"hall_number",
",",
"symprec",
",",
"angle_tolerance",
")",
"if",
"spg_ds",
"is",
"None",
":",
"_set_error_message",
"(",
")",
"return",
"None",
"keys",
"=",
"(",
"'number'",
",",
"'hall_number'",
",",
"'international'",
",",
"'hall'",
",",
"'choice'",
",",
"'transformation_matrix'",
",",
"'origin_shift'",
",",
"'rotations'",
",",
"'translations'",
",",
"'wyckoffs'",
",",
"'site_symmetry_symbols'",
",",
"'equivalent_atoms'",
",",
"'mapping_to_primitive'",
",",
"'std_lattice'",
",",
"'std_types'",
",",
"'std_positions'",
",",
"'std_rotation_matrix'",
",",
"'std_mapping_to_primitive'",
",",
"# 'pointgroup_number',",
"'pointgroup'",
")",
"dataset",
"=",
"{",
"}",
"for",
"key",
",",
"data",
"in",
"zip",
"(",
"keys",
",",
"spg_ds",
")",
":",
"dataset",
"[",
"key",
"]",
"=",
"data",
"dataset",
"[",
"'international'",
"]",
"=",
"dataset",
"[",
"'international'",
"]",
".",
"strip",
"(",
")",
"dataset",
"[",
"'hall'",
"]",
"=",
"dataset",
"[",
"'hall'",
"]",
".",
"strip",
"(",
")",
"dataset",
"[",
"'choice'",
"]",
"=",
"dataset",
"[",
"'choice'",
"]",
".",
"strip",
"(",
")",
"dataset",
"[",
"'transformation_matrix'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'transformation_matrix'",
"]",
",",
"dtype",
"=",
"'double'",
",",
"order",
"=",
"'C'",
")",
"dataset",
"[",
"'origin_shift'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'origin_shift'",
"]",
",",
"dtype",
"=",
"'double'",
")",
"dataset",
"[",
"'rotations'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'rotations'",
"]",
",",
"dtype",
"=",
"'intc'",
",",
"order",
"=",
"'C'",
")",
"dataset",
"[",
"'translations'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'translations'",
"]",
",",
"dtype",
"=",
"'double'",
",",
"order",
"=",
"'C'",
")",
"letters",
"=",
"\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"",
"dataset",
"[",
"'wyckoffs'",
"]",
"=",
"[",
"letters",
"[",
"x",
"]",
"for",
"x",
"in",
"dataset",
"[",
"'wyckoffs'",
"]",
"]",
"dataset",
"[",
"'site_symmetry_symbols'",
"]",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"dataset",
"[",
"'site_symmetry_symbols'",
"]",
"]",
"dataset",
"[",
"'equivalent_atoms'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'equivalent_atoms'",
"]",
",",
"dtype",
"=",
"'intc'",
")",
"dataset",
"[",
"'mapping_to_primitive'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'mapping_to_primitive'",
"]",
",",
"dtype",
"=",
"'intc'",
")",
"dataset",
"[",
"'std_lattice'",
"]",
"=",
"np",
".",
"array",
"(",
"np",
".",
"transpose",
"(",
"dataset",
"[",
"'std_lattice'",
"]",
")",
",",
"dtype",
"=",
"'double'",
",",
"order",
"=",
"'C'",
")",
"dataset",
"[",
"'std_types'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'std_types'",
"]",
",",
"dtype",
"=",
"'intc'",
")",
"dataset",
"[",
"'std_positions'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'std_positions'",
"]",
",",
"dtype",
"=",
"'double'",
",",
"order",
"=",
"'C'",
")",
"dataset",
"[",
"'std_rotation_matrix'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'std_rotation_matrix'",
"]",
",",
"dtype",
"=",
"'double'",
",",
"order",
"=",
"'C'",
")",
"dataset",
"[",
"'std_mapping_to_primitive'",
"]",
"=",
"np",
".",
"array",
"(",
"dataset",
"[",
"'std_mapping_to_primitive'",
"]",
",",
"dtype",
"=",
"'intc'",
")",
"dataset",
"[",
"'pointgroup'",
"]",
"=",
"dataset",
"[",
"'pointgroup'",
"]",
".",
"strip",
"(",
")",
"_set_error_message",
"(",
")",
"return",
"dataset"
] | Search symmetry dataset from an input cell.
Args:
cell, symprec, angle_tolerance:
See the docstring of get_symmetry.
hall_number: If a serial number of Hall symbol (>0) is given,
the database corresponding to the Hall symbol is made.
Return:
A dictionary is returned. Dictionary keys:
number (int): International space group number
international (str): International symbol
hall (str): Hall symbol
choice (str): Centring, origin, basis vector setting
transformation_matrix (3x3 float):
Transformation matrix from input lattice to standardized
lattice:
L^original = L^standardized * Tmat
origin shift (3 float):
Origin shift from standardized to input origin
rotations (3x3 int), translations (float vector):
Rotation matrices and translation vectors. Space group
operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs (n char): Wyckoff letters
equivalent_atoms (n int): Symmetrically equivalent atoms
mapping_to_primitive (n int):
Original cell atom index mapping to primivie cell atom index
Idealized standardized unit cell:
std_lattice (3x3 float, row vectors),
std_positions (Nx3 float), std_types (N int)
std_rotation_matrix:
Rigid rotation matrix to rotate from standardized basis
vectors to idealized standardized basis vectors
L^idealized = R * L^standardized
std_mapping_to_primitive (m int):
std_positions index mapping to those of primivie cell atoms
pointgroup (str): Pointgroup symbol
If it fails, None is returned. | [
"Search",
"symmetry",
"dataset",
"from",
"an",
"input",
"cell",
"."
] | python | train |
mottosso/be | be/vendor/requests/adapters.py | https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/adapters.py#L301-L321 | def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers | [
"def",
"proxy_headers",
"(",
"self",
",",
"proxy",
")",
":",
"headers",
"=",
"{",
"}",
"username",
",",
"password",
"=",
"get_auth_from_url",
"(",
"proxy",
")",
"if",
"username",
"and",
"password",
":",
"headers",
"[",
"'Proxy-Authorization'",
"]",
"=",
"_basic_auth_str",
"(",
"username",
",",
"password",
")",
"return",
"headers"
] | Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments. | [
"Returns",
"a",
"dictionary",
"of",
"the",
"headers",
"to",
"add",
"to",
"any",
"request",
"sent",
"through",
"a",
"proxy",
".",
"This",
"works",
"with",
"urllib3",
"magic",
"to",
"ensure",
"that",
"they",
"are",
"correctly",
"sent",
"to",
"the",
"proxy",
"rather",
"than",
"in",
"a",
"tunnelled",
"request",
"if",
"CONNECT",
"is",
"being",
"used",
"."
] | python | train |
mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L592-L597 | def write_memory(self, addr, value, transfer_size=32):
"""
write a memory location.
By default the transfer size is a word
"""
self.ap.write_memory(addr, value, transfer_size) | [
"def",
"write_memory",
"(",
"self",
",",
"addr",
",",
"value",
",",
"transfer_size",
"=",
"32",
")",
":",
"self",
".",
"ap",
".",
"write_memory",
"(",
"addr",
",",
"value",
",",
"transfer_size",
")"
] | write a memory location.
By default the transfer size is a word | [
"write",
"a",
"memory",
"location",
".",
"By",
"default",
"the",
"transfer",
"size",
"is",
"a",
"word"
] | python | train |
globus/globus-cli | globus_cli/commands/endpoint/permission/delete.py | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/endpoint/permission/delete.py#L14-L21 | def delete_command(endpoint_id, rule_id):
"""
Executor for `globus endpoint permission delete`
"""
client = get_client()
res = client.delete_endpoint_acl_rule(endpoint_id, rule_id)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message") | [
"def",
"delete_command",
"(",
"endpoint_id",
",",
"rule_id",
")",
":",
"client",
"=",
"get_client",
"(",
")",
"res",
"=",
"client",
".",
"delete_endpoint_acl_rule",
"(",
"endpoint_id",
",",
"rule_id",
")",
"formatted_print",
"(",
"res",
",",
"text_format",
"=",
"FORMAT_TEXT_RAW",
",",
"response_key",
"=",
"\"message\"",
")"
] | Executor for `globus endpoint permission delete` | [
"Executor",
"for",
"globus",
"endpoint",
"permission",
"delete"
] | python | train |
biolink/ontobio | ontobio/assocmodel.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L295-L317 | def as_dataframe(self, fillna=True, subjects=None):
"""
Return association set as pandas DataFrame
Each row is a subject (e.g. gene)
Each column is the inferred class used to describe the subject
"""
entries = []
selected_subjects = self.subjects
if subjects is not None:
selected_subjects = subjects
for s in selected_subjects:
vmap = {}
for c in self.inferred_types(s):
vmap[c] = 1
entries.append(vmap)
logging.debug("Creating DataFrame")
df = pd.DataFrame(entries, index=selected_subjects)
if fillna:
logging.debug("Performing fillna...")
df = df.fillna(0)
return df | [
"def",
"as_dataframe",
"(",
"self",
",",
"fillna",
"=",
"True",
",",
"subjects",
"=",
"None",
")",
":",
"entries",
"=",
"[",
"]",
"selected_subjects",
"=",
"self",
".",
"subjects",
"if",
"subjects",
"is",
"not",
"None",
":",
"selected_subjects",
"=",
"subjects",
"for",
"s",
"in",
"selected_subjects",
":",
"vmap",
"=",
"{",
"}",
"for",
"c",
"in",
"self",
".",
"inferred_types",
"(",
"s",
")",
":",
"vmap",
"[",
"c",
"]",
"=",
"1",
"entries",
".",
"append",
"(",
"vmap",
")",
"logging",
".",
"debug",
"(",
"\"Creating DataFrame\"",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"entries",
",",
"index",
"=",
"selected_subjects",
")",
"if",
"fillna",
":",
"logging",
".",
"debug",
"(",
"\"Performing fillna...\"",
")",
"df",
"=",
"df",
".",
"fillna",
"(",
"0",
")",
"return",
"df"
] | Return association set as pandas DataFrame
Each row is a subject (e.g. gene)
Each column is the inferred class used to describe the subject | [
"Return",
"association",
"set",
"as",
"pandas",
"DataFrame"
] | python | train |
Kozea/pygal | pygal/graph/stackedline.py | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/stackedline.py#L40-L50 | def _value_format(self, value, serie, index):
"""
Display value and cumulation
"""
sum_ = serie.points[index][1]
if serie in self.series and (
self.stack_from_top
and self.series.index(serie) == self._order - 1
or not self.stack_from_top and self.series.index(serie) == 0):
return super(StackedLine, self)._value_format(value)
return '%s (+%s)' % (self._y_format(sum_), self._y_format(value)) | [
"def",
"_value_format",
"(",
"self",
",",
"value",
",",
"serie",
",",
"index",
")",
":",
"sum_",
"=",
"serie",
".",
"points",
"[",
"index",
"]",
"[",
"1",
"]",
"if",
"serie",
"in",
"self",
".",
"series",
"and",
"(",
"self",
".",
"stack_from_top",
"and",
"self",
".",
"series",
".",
"index",
"(",
"serie",
")",
"==",
"self",
".",
"_order",
"-",
"1",
"or",
"not",
"self",
".",
"stack_from_top",
"and",
"self",
".",
"series",
".",
"index",
"(",
"serie",
")",
"==",
"0",
")",
":",
"return",
"super",
"(",
"StackedLine",
",",
"self",
")",
".",
"_value_format",
"(",
"value",
")",
"return",
"'%s (+%s)'",
"%",
"(",
"self",
".",
"_y_format",
"(",
"sum_",
")",
",",
"self",
".",
"_y_format",
"(",
"value",
")",
")"
] | Display value and cumulation | [
"Display",
"value",
"and",
"cumulation"
] | python | train |
chimera0/accel-brain-code | Reinforcement-Learning/pyqlearning/deep_q_learning.py | https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/deep_q_learning.py#L246-L253 | def set_alpha_value(self, value):
'''
setter
Learning rate.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __alpha_value must be float.")
self.__alpha_value = value | [
"def",
"set_alpha_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
"is",
"False",
":",
"raise",
"TypeError",
"(",
"\"The type of __alpha_value must be float.\"",
")",
"self",
".",
"__alpha_value",
"=",
"value"
] | setter
Learning rate. | [
"setter",
"Learning",
"rate",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/tasks.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L3939-L3947 | def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path | [
"def",
"sigres_path",
"(",
"self",
")",
":",
"# Lazy property to avoid multiple calls to has_abiext.",
"try",
":",
"return",
"self",
".",
"_sigres_path",
"except",
"AttributeError",
":",
"path",
"=",
"self",
".",
"outdir",
".",
"has_abiext",
"(",
"\"SIGRES\"",
")",
"if",
"path",
":",
"self",
".",
"_sigres_path",
"=",
"path",
"return",
"path"
] | Absolute path of the SIGRES file. Empty string if file is not present. | [
"Absolute",
"path",
"of",
"the",
"SIGRES",
"file",
".",
"Empty",
"string",
"if",
"file",
"is",
"not",
"present",
"."
] | python | train |
openstack/horizon | openstack_auth/backend.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/backend.py#L102-L231 | def authenticate(self, auth_url=None, **kwargs):
"""Authenticates a user via the Keystone Identity API."""
LOG.debug('Beginning user authentication')
if not auth_url:
auth_url = settings.OPENSTACK_KEYSTONE_URL
auth_url, url_fixed = utils.fix_auth_url_version_prefix(auth_url)
if url_fixed:
LOG.warning("The OPENSTACK_KEYSTONE_URL setting points to a v2.0 "
"Keystone endpoint, but v3 is specified as the API "
"version to use by Horizon. Using v3 endpoint for "
"authentication.")
plugin, unscoped_auth = self._get_auth_backend(auth_url, **kwargs)
# the recent project id a user might have set in a cookie
recent_project = None
request = kwargs.get('request')
if request:
# Grab recent_project found in the cookie, try to scope
# to the last project used.
recent_project = request.COOKIES.get('recent_project')
unscoped_auth_ref = plugin.get_access_info(unscoped_auth)
# Check expiry for our unscoped auth ref.
self.check_auth_expiry(unscoped_auth_ref)
domain_name = kwargs.get('user_domain_name', None)
domain_auth, domain_auth_ref = plugin.get_domain_scoped_auth(
unscoped_auth, unscoped_auth_ref, domain_name)
scoped_auth, scoped_auth_ref = plugin.get_project_scoped_auth(
unscoped_auth, unscoped_auth_ref, recent_project=recent_project)
# Abort if there are no projects for this user and a valid domain
# token has not been obtained
#
# The valid use cases for a user login are:
# Keystone v2: user must have a role on a project and be able
# to obtain a project scoped token
# Keystone v3: 1) user can obtain a domain scoped token (user
# has a role on the domain they authenticated to),
# only, no roles on a project
# 2) user can obtain a domain scoped token and has
# a role on a project in the domain they
# authenticated to (and can obtain a project scoped
# token)
# 3) user cannot obtain a domain scoped token, but can
# obtain a project scoped token
if not scoped_auth_ref and domain_auth_ref:
# if the user can't obtain a project scoped token, set the scoped
# token to be the domain token, if valid
scoped_auth = domain_auth
scoped_auth_ref = domain_auth_ref
elif not scoped_auth_ref and not domain_auth_ref:
msg = _('You are not authorized for any projects.')
if utils.get_keystone_version() >= 3:
msg = _('You are not authorized for any projects or domains.')
raise exceptions.KeystoneAuthException(msg)
# Check expiry for our new scoped token.
self.check_auth_expiry(scoped_auth_ref)
# We want to try to use the same region we just logged into
# which may or may not be the default depending upon the order
# keystone uses
region_name = None
id_endpoints = scoped_auth_ref.service_catalog.\
get_endpoints(service_type='identity')
for id_endpoint in [cat for cat in id_endpoints['identity']]:
if auth_url in id_endpoint.values():
region_name = id_endpoint['region']
break
interface = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'public')
endpoint, url_fixed = utils.fix_auth_url_version_prefix(
scoped_auth_ref.service_catalog.url_for(
service_type='identity',
interface=interface,
region_name=region_name))
if url_fixed:
LOG.warning("The Keystone URL in service catalog points to a v2.0 "
"Keystone endpoint, but v3 is specified as the API "
"version to use by Horizon. Using v3 endpoint for "
"authentication.")
# If we made it here we succeeded. Create our User!
unscoped_token = unscoped_auth_ref.auth_token
user = auth_user.create_user_from_token(
request,
auth_user.Token(scoped_auth_ref, unscoped_token=unscoped_token),
endpoint,
services_region=region_name)
if request is not None:
# if no k2k providers exist then the function returns quickly
utils.store_initial_k2k_session(auth_url, request, scoped_auth_ref,
unscoped_auth_ref)
request.session['unscoped_token'] = unscoped_token
if domain_auth_ref:
# check django session engine, if using cookies, this will not
# work, as it will overflow the cookie so don't add domain
# scoped token to the session and put error in the log
if utils.using_cookie_backed_sessions():
LOG.error('Using signed cookies as SESSION_ENGINE with '
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT is '
'enabled. This disables the ability to '
'perform identity operations due to cookie size '
'constraints.')
else:
request.session['domain_token'] = domain_auth_ref
request.user = user
timeout = getattr(settings, "SESSION_TIMEOUT", 3600)
token_life = user.token.expires - datetime.datetime.now(pytz.utc)
session_time = min(timeout, int(token_life.total_seconds()))
request.session.set_expiry(session_time)
keystone_client_class = utils.get_keystone_client().Client
session = utils.get_session()
scoped_client = keystone_client_class(session=session,
auth=scoped_auth)
# Support client caching to save on auth calls.
setattr(request, KEYSTONE_CLIENT_ATTR, scoped_client)
LOG.debug('Authentication completed.')
return user | [
"def",
"authenticate",
"(",
"self",
",",
"auth_url",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"LOG",
".",
"debug",
"(",
"'Beginning user authentication'",
")",
"if",
"not",
"auth_url",
":",
"auth_url",
"=",
"settings",
".",
"OPENSTACK_KEYSTONE_URL",
"auth_url",
",",
"url_fixed",
"=",
"utils",
".",
"fix_auth_url_version_prefix",
"(",
"auth_url",
")",
"if",
"url_fixed",
":",
"LOG",
".",
"warning",
"(",
"\"The OPENSTACK_KEYSTONE_URL setting points to a v2.0 \"",
"\"Keystone endpoint, but v3 is specified as the API \"",
"\"version to use by Horizon. Using v3 endpoint for \"",
"\"authentication.\"",
")",
"plugin",
",",
"unscoped_auth",
"=",
"self",
".",
"_get_auth_backend",
"(",
"auth_url",
",",
"*",
"*",
"kwargs",
")",
"# the recent project id a user might have set in a cookie",
"recent_project",
"=",
"None",
"request",
"=",
"kwargs",
".",
"get",
"(",
"'request'",
")",
"if",
"request",
":",
"# Grab recent_project found in the cookie, try to scope",
"# to the last project used.",
"recent_project",
"=",
"request",
".",
"COOKIES",
".",
"get",
"(",
"'recent_project'",
")",
"unscoped_auth_ref",
"=",
"plugin",
".",
"get_access_info",
"(",
"unscoped_auth",
")",
"# Check expiry for our unscoped auth ref.",
"self",
".",
"check_auth_expiry",
"(",
"unscoped_auth_ref",
")",
"domain_name",
"=",
"kwargs",
".",
"get",
"(",
"'user_domain_name'",
",",
"None",
")",
"domain_auth",
",",
"domain_auth_ref",
"=",
"plugin",
".",
"get_domain_scoped_auth",
"(",
"unscoped_auth",
",",
"unscoped_auth_ref",
",",
"domain_name",
")",
"scoped_auth",
",",
"scoped_auth_ref",
"=",
"plugin",
".",
"get_project_scoped_auth",
"(",
"unscoped_auth",
",",
"unscoped_auth_ref",
",",
"recent_project",
"=",
"recent_project",
")",
"# Abort if there are no projects for this user and a valid domain",
"# token has not been obtained",
"#",
"# The valid use cases for a user login are:",
"# Keystone v2: user must have a role on a project and be able",
"# to obtain a project scoped token",
"# Keystone v3: 1) user can obtain a domain scoped token (user",
"# has a role on the domain they authenticated to),",
"# only, no roles on a project",
"# 2) user can obtain a domain scoped token and has",
"# a role on a project in the domain they",
"# authenticated to (and can obtain a project scoped",
"# token)",
"# 3) user cannot obtain a domain scoped token, but can",
"# obtain a project scoped token",
"if",
"not",
"scoped_auth_ref",
"and",
"domain_auth_ref",
":",
"# if the user can't obtain a project scoped token, set the scoped",
"# token to be the domain token, if valid",
"scoped_auth",
"=",
"domain_auth",
"scoped_auth_ref",
"=",
"domain_auth_ref",
"elif",
"not",
"scoped_auth_ref",
"and",
"not",
"domain_auth_ref",
":",
"msg",
"=",
"_",
"(",
"'You are not authorized for any projects.'",
")",
"if",
"utils",
".",
"get_keystone_version",
"(",
")",
">=",
"3",
":",
"msg",
"=",
"_",
"(",
"'You are not authorized for any projects or domains.'",
")",
"raise",
"exceptions",
".",
"KeystoneAuthException",
"(",
"msg",
")",
"# Check expiry for our new scoped token.",
"self",
".",
"check_auth_expiry",
"(",
"scoped_auth_ref",
")",
"# We want to try to use the same region we just logged into",
"# which may or may not be the default depending upon the order",
"# keystone uses",
"region_name",
"=",
"None",
"id_endpoints",
"=",
"scoped_auth_ref",
".",
"service_catalog",
".",
"get_endpoints",
"(",
"service_type",
"=",
"'identity'",
")",
"for",
"id_endpoint",
"in",
"[",
"cat",
"for",
"cat",
"in",
"id_endpoints",
"[",
"'identity'",
"]",
"]",
":",
"if",
"auth_url",
"in",
"id_endpoint",
".",
"values",
"(",
")",
":",
"region_name",
"=",
"id_endpoint",
"[",
"'region'",
"]",
"break",
"interface",
"=",
"getattr",
"(",
"settings",
",",
"'OPENSTACK_ENDPOINT_TYPE'",
",",
"'public'",
")",
"endpoint",
",",
"url_fixed",
"=",
"utils",
".",
"fix_auth_url_version_prefix",
"(",
"scoped_auth_ref",
".",
"service_catalog",
".",
"url_for",
"(",
"service_type",
"=",
"'identity'",
",",
"interface",
"=",
"interface",
",",
"region_name",
"=",
"region_name",
")",
")",
"if",
"url_fixed",
":",
"LOG",
".",
"warning",
"(",
"\"The Keystone URL in service catalog points to a v2.0 \"",
"\"Keystone endpoint, but v3 is specified as the API \"",
"\"version to use by Horizon. Using v3 endpoint for \"",
"\"authentication.\"",
")",
"# If we made it here we succeeded. Create our User!",
"unscoped_token",
"=",
"unscoped_auth_ref",
".",
"auth_token",
"user",
"=",
"auth_user",
".",
"create_user_from_token",
"(",
"request",
",",
"auth_user",
".",
"Token",
"(",
"scoped_auth_ref",
",",
"unscoped_token",
"=",
"unscoped_token",
")",
",",
"endpoint",
",",
"services_region",
"=",
"region_name",
")",
"if",
"request",
"is",
"not",
"None",
":",
"# if no k2k providers exist then the function returns quickly",
"utils",
".",
"store_initial_k2k_session",
"(",
"auth_url",
",",
"request",
",",
"scoped_auth_ref",
",",
"unscoped_auth_ref",
")",
"request",
".",
"session",
"[",
"'unscoped_token'",
"]",
"=",
"unscoped_token",
"if",
"domain_auth_ref",
":",
"# check django session engine, if using cookies, this will not",
"# work, as it will overflow the cookie so don't add domain",
"# scoped token to the session and put error in the log",
"if",
"utils",
".",
"using_cookie_backed_sessions",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"'Using signed cookies as SESSION_ENGINE with '",
"'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT is '",
"'enabled. This disables the ability to '",
"'perform identity operations due to cookie size '",
"'constraints.'",
")",
"else",
":",
"request",
".",
"session",
"[",
"'domain_token'",
"]",
"=",
"domain_auth_ref",
"request",
".",
"user",
"=",
"user",
"timeout",
"=",
"getattr",
"(",
"settings",
",",
"\"SESSION_TIMEOUT\"",
",",
"3600",
")",
"token_life",
"=",
"user",
".",
"token",
".",
"expires",
"-",
"datetime",
".",
"datetime",
".",
"now",
"(",
"pytz",
".",
"utc",
")",
"session_time",
"=",
"min",
"(",
"timeout",
",",
"int",
"(",
"token_life",
".",
"total_seconds",
"(",
")",
")",
")",
"request",
".",
"session",
".",
"set_expiry",
"(",
"session_time",
")",
"keystone_client_class",
"=",
"utils",
".",
"get_keystone_client",
"(",
")",
".",
"Client",
"session",
"=",
"utils",
".",
"get_session",
"(",
")",
"scoped_client",
"=",
"keystone_client_class",
"(",
"session",
"=",
"session",
",",
"auth",
"=",
"scoped_auth",
")",
"# Support client caching to save on auth calls.",
"setattr",
"(",
"request",
",",
"KEYSTONE_CLIENT_ATTR",
",",
"scoped_client",
")",
"LOG",
".",
"debug",
"(",
"'Authentication completed.'",
")",
"return",
"user"
] | Authenticates a user via the Keystone Identity API. | [
"Authenticates",
"a",
"user",
"via",
"the",
"Keystone",
"Identity",
"API",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/transforms/interactive.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/interactive.py#L47-L60 | def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The event.
"""
if self._aspect is None:
return
w, h = self._canvas.size
aspect = self._aspect / (w / h)
self.scale = (self.scale[0], self.scale[0] / aspect)
self.shader_map() | [
"def",
"on_resize",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"_aspect",
"is",
"None",
":",
"return",
"w",
",",
"h",
"=",
"self",
".",
"_canvas",
".",
"size",
"aspect",
"=",
"self",
".",
"_aspect",
"/",
"(",
"w",
"/",
"h",
")",
"self",
".",
"scale",
"=",
"(",
"self",
".",
"scale",
"[",
"0",
"]",
",",
"self",
".",
"scale",
"[",
"0",
"]",
"/",
"aspect",
")",
"self",
".",
"shader_map",
"(",
")"
] | Resize handler
Parameters
----------
event : instance of Event
The event. | [
"Resize",
"handler"
] | python | train |
robhowley/nhlscrapi | nhlscrapi/games/toi.py | https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/toi.py#L100-L108 | def away_shift_summ(self):
"""
:returns: :py:class:`.ShiftSummary` by player for the away team
:rtype: dict ``{ player_num: shift_summary_obj }``
"""
if not self.__wrapped_away:
self.__wrapped_away = self.__wrap(self._away.by_player)
return self.__wrapped_away | [
"def",
"away_shift_summ",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__wrapped_away",
":",
"self",
".",
"__wrapped_away",
"=",
"self",
".",
"__wrap",
"(",
"self",
".",
"_away",
".",
"by_player",
")",
"return",
"self",
".",
"__wrapped_away"
] | :returns: :py:class:`.ShiftSummary` by player for the away team
:rtype: dict ``{ player_num: shift_summary_obj }`` | [
":",
"returns",
":",
":",
"py",
":",
"class",
":",
".",
"ShiftSummary",
"by",
"player",
"for",
"the",
"away",
"team",
":",
"rtype",
":",
"dict",
"{",
"player_num",
":",
"shift_summary_obj",
"}"
] | python | train |
tilde-lab/tilde | utils/syshwinfo.py | https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/utils/syshwinfo.py#L127-L138 | def diskdata():
"""Get total disk size in GB."""
p = os.popen("/bin/df -l -P")
ddata = {}
tsize = 0
for line in p.readlines():
d = line.split()
if ("/dev/sd" in d[0] or "/dev/hd" in d[0] or "/dev/mapper" in d[0]):
tsize = tsize + int(d[1])
ddata["Disk_GB"] = int(tsize)/1000000
p.close()
return ddata | [
"def",
"diskdata",
"(",
")",
":",
"p",
"=",
"os",
".",
"popen",
"(",
"\"/bin/df -l -P\"",
")",
"ddata",
"=",
"{",
"}",
"tsize",
"=",
"0",
"for",
"line",
"in",
"p",
".",
"readlines",
"(",
")",
":",
"d",
"=",
"line",
".",
"split",
"(",
")",
"if",
"(",
"\"/dev/sd\"",
"in",
"d",
"[",
"0",
"]",
"or",
"\"/dev/hd\"",
"in",
"d",
"[",
"0",
"]",
"or",
"\"/dev/mapper\"",
"in",
"d",
"[",
"0",
"]",
")",
":",
"tsize",
"=",
"tsize",
"+",
"int",
"(",
"d",
"[",
"1",
"]",
")",
"ddata",
"[",
"\"Disk_GB\"",
"]",
"=",
"int",
"(",
"tsize",
")",
"/",
"1000000",
"p",
".",
"close",
"(",
")",
"return",
"ddata"
] | Get total disk size in GB. | [
"Get",
"total",
"disk",
"size",
"in",
"GB",
"."
] | python | train |
swharden/webinspect | webinspect/webinspect.py | https://github.com/swharden/webinspect/blob/432674b61666d66e5be330b61f9fad0b46dac84e/webinspect/webinspect.py#L24-L33 | def launch(thing,title=False):
"""analyze a thing, create a nice HTML document, and launch it."""
html=htmlFromThing(thing,title=title)
if not html:
print("no HTML was generated.")
return
fname="%s/%s.html"%(tempfile.gettempdir(),str(time.time()))
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname) | [
"def",
"launch",
"(",
"thing",
",",
"title",
"=",
"False",
")",
":",
"html",
"=",
"htmlFromThing",
"(",
"thing",
",",
"title",
"=",
"title",
")",
"if",
"not",
"html",
":",
"print",
"(",
"\"no HTML was generated.\"",
")",
"return",
"fname",
"=",
"\"%s/%s.html\"",
"%",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"str",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html",
")",
"webbrowser",
".",
"open",
"(",
"fname",
")"
] | analyze a thing, create a nice HTML document, and launch it. | [
"analyze",
"a",
"thing",
"create",
"a",
"nice",
"HTML",
"document",
"and",
"launch",
"it",
"."
] | python | train |
taizilongxu/douban.fm | doubanfm/colorset/colors.py | https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/colorset/colors.py#L5-L18 | def basic_color(code):
"""
16 colors supported
"""
def inner(text, rl=False):
""" Every raw_input with color sequences should be called with
rl=True to avoid readline messed up the length calculation
"""
c = code
if rl:
return "\001\033[%sm\002%s\001\033[0m\002" % (c, text)
else:
return "\033[%sm%s\033[0m" % (c, text)
return inner | [
"def",
"basic_color",
"(",
"code",
")",
":",
"def",
"inner",
"(",
"text",
",",
"rl",
"=",
"False",
")",
":",
"\"\"\" Every raw_input with color sequences should be called with\n rl=True to avoid readline messed up the length calculation\n \"\"\"",
"c",
"=",
"code",
"if",
"rl",
":",
"return",
"\"\\001\\033[%sm\\002%s\\001\\033[0m\\002\"",
"%",
"(",
"c",
",",
"text",
")",
"else",
":",
"return",
"\"\\033[%sm%s\\033[0m\"",
"%",
"(",
"c",
",",
"text",
")",
"return",
"inner"
] | 16 colors supported | [
"16",
"colors",
"supported"
] | python | train |
dbcli/athenacli | athenacli/packages/filepaths.py | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/filepaths.py#L5-L14 | def list_path(root_dir):
"""List directory if exists.
:param dir: str
:return: list
"""
res = []
if os.path.isdir(root_dir):
for name in os.listdir(root_dir):
res.append(name)
return res | [
"def",
"list_path",
"(",
"root_dir",
")",
":",
"res",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"root_dir",
")",
":",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"root_dir",
")",
":",
"res",
".",
"append",
"(",
"name",
")",
"return",
"res"
] | List directory if exists.
:param dir: str
:return: list | [
"List",
"directory",
"if",
"exists",
".",
":",
"param",
"dir",
":",
"str",
":",
"return",
":",
"list"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.