repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
gazpachoking/jsonref | jsonref.py | https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L413-L424 | def dumps(obj, **kwargs):
"""
Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON
formatted string. `JsonRef` objects will be dumped as the original
reference object they were created from.
:param obj: Object to serialize
:param kwargs: Keyword arguments are the same as to :func:`json.dumps`
"""
kwargs["cls"] = _ref_encoder_factory(kwargs.get("cls", json.JSONEncoder))
return json.dumps(obj, **kwargs) | [
"def",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"cls\"",
"]",
"=",
"_ref_encoder_factory",
"(",
"kwargs",
".",
"get",
"(",
"\"cls\"",
",",
"json",
".",
"JSONEncoder",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")"
] | Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON
formatted string. `JsonRef` objects will be dumped as the original
reference object they were created from.
:param obj: Object to serialize
:param kwargs: Keyword arguments are the same as to :func:`json.dumps` | [
"Serialize",
"obj",
"which",
"may",
"contain",
":",
"class",
":",
"JsonRef",
"objects",
"to",
"a",
"JSON",
"formatted",
"string",
".",
"JsonRef",
"objects",
"will",
"be",
"dumped",
"as",
"the",
"original",
"reference",
"object",
"they",
"were",
"created",
"from",
"."
] | python | train |
SmokinCaterpillar/pypet | pypet/naturalnaming.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L3172-L3186 | def f_get_children(self, copy=True):
"""Returns a children dictionary.
:param copy:
Whether the group's original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
:returns: Dictionary of nodes
"""
if copy:
return self._children.copy()
else:
return self._children | [
"def",
"f_get_children",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"if",
"copy",
":",
"return",
"self",
".",
"_children",
".",
"copy",
"(",
")",
"else",
":",
"return",
"self",
".",
"_children"
] | Returns a children dictionary.
:param copy:
Whether the group's original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
:returns: Dictionary of nodes | [
"Returns",
"a",
"children",
"dictionary",
"."
] | python | test |
ryanpetrello/cleaver | cleaver/experiment.py | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/experiment.py#L167-L187 | def confidence_level(self):
"""
Based on the variant's Z-Score, returns a human-readable string that
describes the confidence with which we can say the results are
statistically significant.
"""
z = self.z_score
if isinstance(z, string_types):
return z
z = abs(round(z, 3))
if z == 0.0:
return "No Change"
elif z < 1.65:
return "No Confidence"
elif z < 2.33:
return "95% Confidence"
elif z < 3.08:
return "99% Confidence"
return "99.9% Confidence" | [
"def",
"confidence_level",
"(",
"self",
")",
":",
"z",
"=",
"self",
".",
"z_score",
"if",
"isinstance",
"(",
"z",
",",
"string_types",
")",
":",
"return",
"z",
"z",
"=",
"abs",
"(",
"round",
"(",
"z",
",",
"3",
")",
")",
"if",
"z",
"==",
"0.0",
":",
"return",
"\"No Change\"",
"elif",
"z",
"<",
"1.65",
":",
"return",
"\"No Confidence\"",
"elif",
"z",
"<",
"2.33",
":",
"return",
"\"95% Confidence\"",
"elif",
"z",
"<",
"3.08",
":",
"return",
"\"99% Confidence\"",
"return",
"\"99.9% Confidence\""
] | Based on the variant's Z-Score, returns a human-readable string that
describes the confidence with which we can say the results are
statistically significant. | [
"Based",
"on",
"the",
"variant",
"s",
"Z",
"-",
"Score",
"returns",
"a",
"human",
"-",
"readable",
"string",
"that",
"describes",
"the",
"confidence",
"with",
"which",
"we",
"can",
"say",
"the",
"results",
"are",
"statistically",
"significant",
"."
] | python | train |
AtteqCom/zsl | src/zsl/utils/xml_helper.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/xml_helper.py#L111-L123 | def element_to_int(element, attribute=None):
"""Convert ``element`` object to int. If attribute is not given, convert ``element.text``.
:param element: ElementTree element
:param attribute: attribute name
:type attribute: str
:returns: integer
:rtype: int
"""
if attribute is not None:
return int(element.get(attribute))
else:
return int(element.text) | [
"def",
"element_to_int",
"(",
"element",
",",
"attribute",
"=",
"None",
")",
":",
"if",
"attribute",
"is",
"not",
"None",
":",
"return",
"int",
"(",
"element",
".",
"get",
"(",
"attribute",
")",
")",
"else",
":",
"return",
"int",
"(",
"element",
".",
"text",
")"
] | Convert ``element`` object to int. If attribute is not given, convert ``element.text``.
:param element: ElementTree element
:param attribute: attribute name
:type attribute: str
:returns: integer
:rtype: int | [
"Convert",
"element",
"object",
"to",
"int",
".",
"If",
"attribute",
"is",
"not",
"given",
"convert",
"element",
".",
"text",
"."
] | python | train |
onnx/onnxmltools | onnxutils/onnxconverter_common/topology.py | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/topology.py#L281-L306 | def _generate_unique_name(seed, existing_names):
'''
Produce an unique string based on the seed
:param seed: a string
:param existing_names: a set containing strings which cannot be produced
:return: a string similar to the seed
'''
if seed == '':
raise ValueError('Name seed must be an non-empty string')
# Make the seed meet C-style naming convention
seed = re.sub('[^0-9a-zA-Z]', '_', seed) # Only alphabets and numbers are allowed
if re.match('^[0-9]', seed): # The first symbol cannot be a number
seed = '_' + seed
# If seed has never been seen, we return it as it is. Otherwise, we will append an number to make it unique.
if seed not in existing_names:
existing_names.add(seed)
return seed
else:
i = 1
while seed + str(i) in existing_names:
i += 1
new_name = seed + str(i)
existing_names.add(new_name)
return new_name | [
"def",
"_generate_unique_name",
"(",
"seed",
",",
"existing_names",
")",
":",
"if",
"seed",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"'Name seed must be an non-empty string'",
")",
"# Make the seed meet C-style naming convention",
"seed",
"=",
"re",
".",
"sub",
"(",
"'[^0-9a-zA-Z]'",
",",
"'_'",
",",
"seed",
")",
"# Only alphabets and numbers are allowed",
"if",
"re",
".",
"match",
"(",
"'^[0-9]'",
",",
"seed",
")",
":",
"# The first symbol cannot be a number",
"seed",
"=",
"'_'",
"+",
"seed",
"# If seed has never been seen, we return it as it is. Otherwise, we will append an number to make it unique.",
"if",
"seed",
"not",
"in",
"existing_names",
":",
"existing_names",
".",
"add",
"(",
"seed",
")",
"return",
"seed",
"else",
":",
"i",
"=",
"1",
"while",
"seed",
"+",
"str",
"(",
"i",
")",
"in",
"existing_names",
":",
"i",
"+=",
"1",
"new_name",
"=",
"seed",
"+",
"str",
"(",
"i",
")",
"existing_names",
".",
"add",
"(",
"new_name",
")",
"return",
"new_name"
] | Produce an unique string based on the seed
:param seed: a string
:param existing_names: a set containing strings which cannot be produced
:return: a string similar to the seed | [
"Produce",
"an",
"unique",
"string",
"based",
"on",
"the",
"seed",
":",
"param",
"seed",
":",
"a",
"string",
":",
"param",
"existing_names",
":",
"a",
"set",
"containing",
"strings",
"which",
"cannot",
"be",
"produced",
":",
"return",
":",
"a",
"string",
"similar",
"to",
"the",
"seed"
] | python | train |
sensu-plugins/sensu-plugin-python | sensu_plugin/handler.py | https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/handler.py#L76-L87 | def read_event(self, check_result):
'''
Convert the piped check result (json) into a global 'event' dict
'''
try:
event = json.loads(check_result)
event['occurrences'] = event.get('occurrences', 1)
event['check'] = event.get('check', {})
event['client'] = event.get('client', {})
return event
except Exception:
raise ValueError('error reading event: ' + check_result) | [
"def",
"read_event",
"(",
"self",
",",
"check_result",
")",
":",
"try",
":",
"event",
"=",
"json",
".",
"loads",
"(",
"check_result",
")",
"event",
"[",
"'occurrences'",
"]",
"=",
"event",
".",
"get",
"(",
"'occurrences'",
",",
"1",
")",
"event",
"[",
"'check'",
"]",
"=",
"event",
".",
"get",
"(",
"'check'",
",",
"{",
"}",
")",
"event",
"[",
"'client'",
"]",
"=",
"event",
".",
"get",
"(",
"'client'",
",",
"{",
"}",
")",
"return",
"event",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'error reading event: '",
"+",
"check_result",
")"
] | Convert the piped check result (json) into a global 'event' dict | [
"Convert",
"the",
"piped",
"check",
"result",
"(",
"json",
")",
"into",
"a",
"global",
"event",
"dict"
] | python | train |
koriakin/binflakes | binflakes/types/word.py | https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L242-L249 | def sext(self, width):
"""Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits).
"""
width = operator.index(width)
if width < self._width:
raise ValueError('sign extending to a smaller width')
return BinWord(width, self.to_sint(), trunc=True) | [
"def",
"sext",
"(",
"self",
",",
"width",
")",
":",
"width",
"=",
"operator",
".",
"index",
"(",
"width",
")",
"if",
"width",
"<",
"self",
".",
"_width",
":",
"raise",
"ValueError",
"(",
"'sign extending to a smaller width'",
")",
"return",
"BinWord",
"(",
"width",
",",
"self",
".",
"to_sint",
"(",
")",
",",
"trunc",
"=",
"True",
")"
] | Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits). | [
"Sign",
"-",
"extends",
"a",
"word",
"to",
"a",
"larger",
"width",
".",
"It",
"is",
"an",
"error",
"to",
"specify",
"a",
"smaller",
"width",
"(",
"use",
"extract",
"instead",
"to",
"crop",
"off",
"the",
"extra",
"bits",
")",
"."
] | python | train |
TestInABox/stackInABox | stackinabox/util/requests_mock/core.py | https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/util/requests_mock/core.py#L78-L101 | def split_status(status):
"""Split a HTTP Status and Reason code string into a tuple.
:param status string containing the status and reason text or
the integer of the status code
:returns: tuple - (int, string) containing the integer status code
and reason text string
"""
# If the status is an integer, then lookup the reason text
if isinstance(status, int):
return (status, RequestMockCallable.get_reason_for_status(
status))
# otherwise, ensure it is a string and try to split it based on the
# standard HTTP status and reason text format
elif isinstance(status, str) or isinstance(status, bytes):
code, reason = status.split(' ', 1)
return (code, reason)
# otherwise, return with a default reason code
else:
return (status, 'Unknown') | [
"def",
"split_status",
"(",
"status",
")",
":",
"# If the status is an integer, then lookup the reason text",
"if",
"isinstance",
"(",
"status",
",",
"int",
")",
":",
"return",
"(",
"status",
",",
"RequestMockCallable",
".",
"get_reason_for_status",
"(",
"status",
")",
")",
"# otherwise, ensure it is a string and try to split it based on the",
"# standard HTTP status and reason text format",
"elif",
"isinstance",
"(",
"status",
",",
"str",
")",
"or",
"isinstance",
"(",
"status",
",",
"bytes",
")",
":",
"code",
",",
"reason",
"=",
"status",
".",
"split",
"(",
"' '",
",",
"1",
")",
"return",
"(",
"code",
",",
"reason",
")",
"# otherwise, return with a default reason code",
"else",
":",
"return",
"(",
"status",
",",
"'Unknown'",
")"
] | Split a HTTP Status and Reason code string into a tuple.
:param status string containing the status and reason text or
the integer of the status code
:returns: tuple - (int, string) containing the integer status code
and reason text string | [
"Split",
"a",
"HTTP",
"Status",
"and",
"Reason",
"code",
"string",
"into",
"a",
"tuple",
"."
] | python | train |
bastikr/boolean.py | boolean/boolean.py | https://github.com/bastikr/boolean.py/blob/e984df480afc60605e9501a0d3d54d667e8f7dbf/boolean/boolean.py#L505-L527 | def normalize(self, expr, operation):
"""
Return a normalized expression transformed to its normal form in the
given AND or OR operation.
The new expression arguments will satisfy these conditions:
- operation(*args) == expr (here mathematical equality is meant)
- the operation does not occur in any of its arg.
- NOT is only appearing in literals (aka. Negation normal form).
The operation must be an AND or OR operation or a subclass.
"""
# ensure that the operation is not NOT
assert operation in (self.AND, self.OR,)
# Move NOT inwards.
expr = expr.literalize()
# Simplify first otherwise _rdistributive() may take forever.
expr = expr.simplify()
operation_example = operation(self.TRUE, self.FALSE)
expr = self._rdistributive(expr, operation_example)
# Canonicalize
expr = expr.simplify()
return expr | [
"def",
"normalize",
"(",
"self",
",",
"expr",
",",
"operation",
")",
":",
"# ensure that the operation is not NOT",
"assert",
"operation",
"in",
"(",
"self",
".",
"AND",
",",
"self",
".",
"OR",
",",
")",
"# Move NOT inwards.",
"expr",
"=",
"expr",
".",
"literalize",
"(",
")",
"# Simplify first otherwise _rdistributive() may take forever.",
"expr",
"=",
"expr",
".",
"simplify",
"(",
")",
"operation_example",
"=",
"operation",
"(",
"self",
".",
"TRUE",
",",
"self",
".",
"FALSE",
")",
"expr",
"=",
"self",
".",
"_rdistributive",
"(",
"expr",
",",
"operation_example",
")",
"# Canonicalize",
"expr",
"=",
"expr",
".",
"simplify",
"(",
")",
"return",
"expr"
] | Return a normalized expression transformed to its normal form in the
given AND or OR operation.
The new expression arguments will satisfy these conditions:
- operation(*args) == expr (here mathematical equality is meant)
- the operation does not occur in any of its arg.
- NOT is only appearing in literals (aka. Negation normal form).
The operation must be an AND or OR operation or a subclass. | [
"Return",
"a",
"normalized",
"expression",
"transformed",
"to",
"its",
"normal",
"form",
"in",
"the",
"given",
"AND",
"or",
"OR",
"operation",
"."
] | python | train |
inasafe/inasafe | safe/gui/tools/batch/batch_dialog.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L849-L905 | def read_scenarios(filename):
"""Read keywords dictionary from file.
:param filename: Name of file holding scenarios .
:return Dictionary of with structure like this
{{ 'foo' : { 'a': 'b', 'c': 'd'},
{ 'bar' : { 'd': 'e', 'f': 'g'}}
A scenarios file may look like this:
[jakarta_flood]
hazard: /path/to/hazard.tif
exposure: /path/to/exposure.tif
function: function_id
aggregation: /path/to/aggregation_layer.tif
extent: minx, miny, maxx, maxy
Notes:
path for hazard, exposure, and aggregation are relative to scenario
file path
"""
# Input checks
filename = os.path.abspath(filename)
blocks = {}
parser = ConfigParser()
# Parse the file content.
# if the content don't have section header
# we use the filename.
try:
parser.read(filename)
except MissingSectionHeaderError:
base_name = os.path.basename(filename)
name = os.path.splitext(base_name)[0]
section = '[%s]\n' % name
content = section + open(filename).read()
parser.readfp(StringIO(content))
# convert to dictionary
for section in parser.sections():
items = parser.items(section)
# add section as scenario name
items.append(('scenario_name', section))
# add full path to the blocks
items.append(('full_path', filename))
blocks[section] = {}
for key, value in items:
blocks[section][key] = value
# Ok we have generated a structure that looks like this:
# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},
# { 'bar' : { 'd': 'e', 'f': 'g'}}
# where foo and bar are scenarios and their dicts are the options for
# that scenario (e.g. hazard, exposure etc)
return blocks | [
"def",
"read_scenarios",
"(",
"filename",
")",
":",
"# Input checks",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
"blocks",
"=",
"{",
"}",
"parser",
"=",
"ConfigParser",
"(",
")",
"# Parse the file content.",
"# if the content don't have section header",
"# we use the filename.",
"try",
":",
"parser",
".",
"read",
"(",
"filename",
")",
"except",
"MissingSectionHeaderError",
":",
"base_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"base_name",
")",
"[",
"0",
"]",
"section",
"=",
"'[%s]\\n'",
"%",
"name",
"content",
"=",
"section",
"+",
"open",
"(",
"filename",
")",
".",
"read",
"(",
")",
"parser",
".",
"readfp",
"(",
"StringIO",
"(",
"content",
")",
")",
"# convert to dictionary",
"for",
"section",
"in",
"parser",
".",
"sections",
"(",
")",
":",
"items",
"=",
"parser",
".",
"items",
"(",
"section",
")",
"# add section as scenario name",
"items",
".",
"append",
"(",
"(",
"'scenario_name'",
",",
"section",
")",
")",
"# add full path to the blocks",
"items",
".",
"append",
"(",
"(",
"'full_path'",
",",
"filename",
")",
")",
"blocks",
"[",
"section",
"]",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"items",
":",
"blocks",
"[",
"section",
"]",
"[",
"key",
"]",
"=",
"value",
"# Ok we have generated a structure that looks like this:",
"# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},",
"# { 'bar' : { 'd': 'e', 'f': 'g'}}",
"# where foo and bar are scenarios and their dicts are the options for",
"# that scenario (e.g. hazard, exposure etc)",
"return",
"blocks"
] | Read keywords dictionary from file.
:param filename: Name of file holding scenarios .
:return Dictionary of with structure like this
{{ 'foo' : { 'a': 'b', 'c': 'd'},
{ 'bar' : { 'd': 'e', 'f': 'g'}}
A scenarios file may look like this:
[jakarta_flood]
hazard: /path/to/hazard.tif
exposure: /path/to/exposure.tif
function: function_id
aggregation: /path/to/aggregation_layer.tif
extent: minx, miny, maxx, maxy
Notes:
path for hazard, exposure, and aggregation are relative to scenario
file path | [
"Read",
"keywords",
"dictionary",
"from",
"file",
"."
] | python | train |
synw/dataswim | dataswim/data/select.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/select.py#L13-L21 | def first_(self):
"""
Select the first row
"""
try:
val = self.df.iloc[0]
return val
except Exception as e:
self.err(e, "Can not select first row") | [
"def",
"first_",
"(",
"self",
")",
":",
"try",
":",
"val",
"=",
"self",
".",
"df",
".",
"iloc",
"[",
"0",
"]",
"return",
"val",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not select first row\"",
")"
] | Select the first row | [
"Select",
"the",
"first",
"row"
] | python | train |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L908-L969 | def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self | [
"def",
"_fit_tfa_inner",
"(",
"self",
",",
"data",
",",
"R",
",",
"template_centers",
",",
"template_widths",
",",
"template_centers_mean_cov",
",",
"template_widths_mean_var_reci",
")",
":",
"nfeature",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"nsample",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"feature_indices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"nfeature",
",",
"self",
".",
"max_num_voxel",
",",
"replace",
"=",
"False",
")",
"sample_features",
"=",
"np",
".",
"zeros",
"(",
"nfeature",
")",
".",
"astype",
"(",
"bool",
")",
"sample_features",
"[",
"feature_indices",
"]",
"=",
"True",
"samples_indices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"nsample",
",",
"self",
".",
"max_num_tr",
",",
"replace",
"=",
"False",
")",
"curr_data",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"max_num_voxel",
",",
"self",
".",
"max_num_tr",
")",
")",
".",
"astype",
"(",
"float",
")",
"curr_data",
"=",
"data",
"[",
"feature_indices",
"]",
"curr_data",
"=",
"curr_data",
"[",
":",
",",
"samples_indices",
"]",
".",
"copy",
"(",
")",
"curr_R",
"=",
"R",
"[",
"feature_indices",
"]",
".",
"copy",
"(",
")",
"centers",
"=",
"self",
".",
"get_centers",
"(",
"self",
".",
"local_prior",
")",
"widths",
"=",
"self",
".",
"get_widths",
"(",
"self",
".",
"local_prior",
")",
"unique_R",
",",
"inds",
"=",
"self",
".",
"get_unique_R",
"(",
"curr_R",
")",
"F",
"=",
"self",
".",
"get_factors",
"(",
"unique_R",
",",
"inds",
",",
"centers",
",",
"widths",
")",
"W",
"=",
"self",
".",
"get_weights",
"(",
"curr_data",
",",
"F",
")",
"self",
".",
"local_posterior_",
",",
"self",
".",
"total_cost",
"=",
"self",
".",
"_estimate_centers_widths",
"(",
"unique_R",
",",
"inds",
",",
"curr_data",
",",
"W",
",",
"centers",
",",
"widths",
",",
"template_centers",
",",
"template_centers_mean_cov",
",",
"template_widths",
",",
"template_widths_mean_var_reci",
")",
"return",
"self"
] | Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself. | [
"Fit",
"TFA",
"model",
"the",
"inner",
"loop",
"part"
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_bin_run.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1468-L1514 | def stage_tc_associations(self, entity1, entity2):
"""Add an attribute to a resource.
Args:
entity1 (str): A Redis variable containing a TCEntity.
entity2 (str): A Redis variable containing a TCEntity.
"""
# resource 1
entity1 = self.tcex.playbook.read(entity1)
entity1_id = entity1.get('id')
entity1_owner = entity1.get('ownerName')
entity1_type = entity1.get('type')
if entity1.get('type') in self.tcex.indicator_types:
entity1_id = entity1.get('value')
# resource 2
entity2 = self.tcex.playbook.read(entity2)
entity2_id = entity2.get('id')
entity2_owner = entity1.get('ownerName')
entity2_type = entity2.get('type')
if entity2.get('type') in self.tcex.indicator_types:
entity2_id = entity2.get('value')
if entity1_owner != entity2_owner:
self.log.error('[stage] Can not associate resource across owners.')
return
resource1 = self.tcex.resource(entity1_type)
resource1.http_method = 'POST'
resource1.owner = entity1_owner
resource1.resource_id(entity1_id)
resource2 = self.tcex.resource(entity2_type)
resource2.resource_id(entity2_id)
a_resource = resource1.associations(resource2)
response = a_resource.request()
if response.get('status') != 'Success':
self.log.warning(
'[stage] Failed associating "{}:{}" with "{}:{}" ({}).'.format(
entity1_type,
entity1_id,
entity2_type,
entity2_id,
response.get('response').text,
)
) | [
"def",
"stage_tc_associations",
"(",
"self",
",",
"entity1",
",",
"entity2",
")",
":",
"# resource 1",
"entity1",
"=",
"self",
".",
"tcex",
".",
"playbook",
".",
"read",
"(",
"entity1",
")",
"entity1_id",
"=",
"entity1",
".",
"get",
"(",
"'id'",
")",
"entity1_owner",
"=",
"entity1",
".",
"get",
"(",
"'ownerName'",
")",
"entity1_type",
"=",
"entity1",
".",
"get",
"(",
"'type'",
")",
"if",
"entity1",
".",
"get",
"(",
"'type'",
")",
"in",
"self",
".",
"tcex",
".",
"indicator_types",
":",
"entity1_id",
"=",
"entity1",
".",
"get",
"(",
"'value'",
")",
"# resource 2",
"entity2",
"=",
"self",
".",
"tcex",
".",
"playbook",
".",
"read",
"(",
"entity2",
")",
"entity2_id",
"=",
"entity2",
".",
"get",
"(",
"'id'",
")",
"entity2_owner",
"=",
"entity1",
".",
"get",
"(",
"'ownerName'",
")",
"entity2_type",
"=",
"entity2",
".",
"get",
"(",
"'type'",
")",
"if",
"entity2",
".",
"get",
"(",
"'type'",
")",
"in",
"self",
".",
"tcex",
".",
"indicator_types",
":",
"entity2_id",
"=",
"entity2",
".",
"get",
"(",
"'value'",
")",
"if",
"entity1_owner",
"!=",
"entity2_owner",
":",
"self",
".",
"log",
".",
"error",
"(",
"'[stage] Can not associate resource across owners.'",
")",
"return",
"resource1",
"=",
"self",
".",
"tcex",
".",
"resource",
"(",
"entity1_type",
")",
"resource1",
".",
"http_method",
"=",
"'POST'",
"resource1",
".",
"owner",
"=",
"entity1_owner",
"resource1",
".",
"resource_id",
"(",
"entity1_id",
")",
"resource2",
"=",
"self",
".",
"tcex",
".",
"resource",
"(",
"entity2_type",
")",
"resource2",
".",
"resource_id",
"(",
"entity2_id",
")",
"a_resource",
"=",
"resource1",
".",
"associations",
"(",
"resource2",
")",
"response",
"=",
"a_resource",
".",
"request",
"(",
")",
"if",
"response",
".",
"get",
"(",
"'status'",
")",
"!=",
"'Success'",
":",
"self",
".",
"log",
".",
"warning",
"(",
"'[stage] Failed associating \"{}:{}\" with \"{}:{}\" ({}).'",
".",
"format",
"(",
"entity1_type",
",",
"entity1_id",
",",
"entity2_type",
",",
"entity2_id",
",",
"response",
".",
"get",
"(",
"'response'",
")",
".",
"text",
",",
")",
")"
] | Add an attribute to a resource.
Args:
entity1 (str): A Redis variable containing a TCEntity.
entity2 (str): A Redis variable containing a TCEntity. | [
"Add",
"an",
"attribute",
"to",
"a",
"resource",
"."
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/discoursegraph.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L1048-L1078 | def select_neighbors_by_layer(docgraph, node, layer, data=False):
"""
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
"""
for node_id in docgraph.neighbors_iter(node):
node_layers = docgraph.node[node_id]['layers']
if isinstance(layer, (str, unicode)):
condition = layer in node_layers
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_layers for l in layer)
if condition:
yield (node_id, docgraph.node[node_id]) if data else (node_id) | [
"def",
"select_neighbors_by_layer",
"(",
"docgraph",
",",
"node",
",",
"layer",
",",
"data",
"=",
"False",
")",
":",
"for",
"node_id",
"in",
"docgraph",
".",
"neighbors_iter",
"(",
"node",
")",
":",
"node_layers",
"=",
"docgraph",
".",
"node",
"[",
"node_id",
"]",
"[",
"'layers'",
"]",
"if",
"isinstance",
"(",
"layer",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"condition",
"=",
"layer",
"in",
"node_layers",
"else",
":",
"# ``layer`` is a list/set/dict of layers",
"condition",
"=",
"any",
"(",
"l",
"in",
"node_layers",
"for",
"l",
"in",
"layer",
")",
"if",
"condition",
":",
"yield",
"(",
"node_id",
",",
"docgraph",
".",
"node",
"[",
"node_id",
"]",
")",
"if",
"data",
"else",
"(",
"node_id",
")"
] | Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples. | [
"Get",
"all",
"neighboring",
"nodes",
"belonging",
"to",
"(",
"any",
"of",
")",
"the",
"given",
"layer",
"(",
"s",
")",
"A",
"neighboring",
"node",
"is",
"a",
"node",
"that",
"the",
"given",
"node",
"connects",
"to",
"with",
"an",
"outgoing",
"edge",
"."
] | python | train |
jbloomlab/phydms | phydmslib/weblogo.py | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/weblogo.py#L561-L705 | def _my_eps_formatter(logodata, format, ordered_alphabets) :
""" Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top.
"""
substitutions = {}
from_format =[
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right","line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"
]
for s in from_format :
substitutions[s] = getattr(format,s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join( ("[",str(color.red) , str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
if hasattr(format.color_scheme, 'rules'):
grouplist = format.color_scheme.rules
else:
# this line needed for weblogo 3.4
grouplist = format.color_scheme.groups
for group in grouplist:
cf = format_color(group.color)
for s in group.symbols :
colors.append( " ("+s+") " + cf )
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = None #JDB
#JDB conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start- format.first_index
seq_to = format.logo_end - format.first_index +1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to) :
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index!=0 and (stack_index % format.stacks_per_line) ==0 :
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("(%s) StartStack" % format.annotate[seq_index] )
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name]
else :
stack_height = 1.0 # Probability
# The following code modified by JDB to use ordered_alphabets
# and also to replace the "blank" characters 'b' and 'B'
# by spaces.
s_d = dict(zip(logodata.alphabet, logodata.counts[seq_index]))
s = []
for aa in ordered_alphabets[seq_index]:
if aa not in ['B', 'b']:
s.append((s_d[aa], aa))
else:
s.append((s_d[aa], ' '))
# s = [(s_d[aa], aa) for aa in ordered_alphabets[seq_index]]
# Sort by frequency. If equal frequency then reverse alphabetic
# (So sort reverse alphabetic first, then frequencty)
# TODO: doublecheck this actual works
#s = list(zip(logodata.counts[seq_index], logodata.alphabet))
#s.sort(key= lambda x: x[1])
#s.reverse()
#s.sort(key= lambda x: x[0])
#if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0 :
fraction_width = 1.0
if format.scale_width :
fraction_width = logodata.weight[seq_index]
# print(fraction_width, file=sys.stderr)
for c in s:
data.append(" %f %f (%s) ShowSymbol" % (fraction_width, c[0]*stack_height/C, c[1]) )
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C>0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *=conv_factor
if high> format.yaxis_scale : high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up) )
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = corebio.utils.resource_string( __name__, '_weblogo_template.eps', __file__).decode()
logo = string.Template(template).substitute(substitutions)
return logo.encode() | [
"def",
"_my_eps_formatter",
"(",
"logodata",
",",
"format",
",",
"ordered_alphabets",
")",
":",
"substitutions",
"=",
"{",
"}",
"from_format",
"=",
"[",
"\"creation_date\"",
",",
"\"logo_width\"",
",",
"\"logo_height\"",
",",
"\"lines_per_logo\"",
",",
"\"line_width\"",
",",
"\"line_height\"",
",",
"\"line_margin_right\"",
",",
"\"line_margin_left\"",
",",
"\"line_margin_bottom\"",
",",
"\"line_margin_top\"",
",",
"\"title_height\"",
",",
"\"xaxis_label_height\"",
",",
"\"creator_text\"",
",",
"\"logo_title\"",
",",
"\"logo_margin\"",
",",
"\"stroke_width\"",
",",
"\"tic_length\"",
",",
"\"stacks_per_line\"",
",",
"\"stack_margin\"",
",",
"\"yaxis_label\"",
",",
"\"yaxis_tic_interval\"",
",",
"\"yaxis_minor_tic_interval\"",
",",
"\"xaxis_label\"",
",",
"\"xaxis_tic_interval\"",
",",
"\"number_interval\"",
",",
"\"fineprint\"",
",",
"\"shrink_fraction\"",
",",
"\"errorbar_fraction\"",
",",
"\"errorbar_width_fraction\"",
",",
"\"errorbar_gray\"",
",",
"\"small_fontsize\"",
",",
"\"fontsize\"",
",",
"\"title_fontsize\"",
",",
"\"number_fontsize\"",
",",
"\"text_font\"",
",",
"\"logo_font\"",
",",
"\"title_font\"",
",",
"\"logo_label\"",
",",
"\"yaxis_scale\"",
",",
"\"end_type\"",
",",
"\"debug\"",
",",
"\"show_title\"",
",",
"\"show_xaxis\"",
",",
"\"show_xaxis_label\"",
",",
"\"show_yaxis\"",
",",
"\"show_yaxis_label\"",
",",
"\"show_boxes\"",
",",
"\"show_errorbars\"",
",",
"\"show_fineprint\"",
",",
"\"rotate_numbers\"",
",",
"\"show_ends\"",
",",
"\"stack_height\"",
",",
"\"stack_width\"",
"]",
"for",
"s",
"in",
"from_format",
":",
"substitutions",
"[",
"s",
"]",
"=",
"getattr",
"(",
"format",
",",
"s",
")",
"substitutions",
"[",
"\"shrink\"",
"]",
"=",
"str",
"(",
"format",
".",
"show_boxes",
")",
".",
"lower",
"(",
")",
"# --------- COLORS --------------",
"def",
"format_color",
"(",
"color",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"(",
"\"[\"",
",",
"str",
"(",
"color",
".",
"red",
")",
",",
"str",
"(",
"color",
".",
"green",
")",
",",
"str",
"(",
"color",
".",
"blue",
")",
",",
"\"]\"",
")",
")",
"substitutions",
"[",
"\"default_color\"",
"]",
"=",
"format_color",
"(",
"format",
".",
"default_color",
")",
"colors",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"format",
".",
"color_scheme",
",",
"'rules'",
")",
":",
"grouplist",
"=",
"format",
".",
"color_scheme",
".",
"rules",
"else",
":",
"# this line needed for weblogo 3.4",
"grouplist",
"=",
"format",
".",
"color_scheme",
".",
"groups",
"for",
"group",
"in",
"grouplist",
":",
"cf",
"=",
"format_color",
"(",
"group",
".",
"color",
")",
"for",
"s",
"in",
"group",
".",
"symbols",
":",
"colors",
".",
"append",
"(",
"\" (\"",
"+",
"s",
"+",
"\") \"",
"+",
"cf",
")",
"substitutions",
"[",
"\"color_dict\"",
"]",
"=",
"\"\\n\"",
".",
"join",
"(",
"colors",
")",
"data",
"=",
"[",
"]",
"# Unit conversion. 'None' for probability units",
"conv_factor",
"=",
"None",
"#JDB",
"#JDB conv_factor = std_units[format.unit_name]",
"data",
".",
"append",
"(",
"\"StartLine\"",
")",
"seq_from",
"=",
"format",
".",
"logo_start",
"-",
"format",
".",
"first_index",
"seq_to",
"=",
"format",
".",
"logo_end",
"-",
"format",
".",
"first_index",
"+",
"1",
"# seq_index : zero based index into sequence data",
"# logo_index : User visible coordinate, first_index based",
"# stack_index : zero based index of visible stacks",
"for",
"seq_index",
"in",
"range",
"(",
"seq_from",
",",
"seq_to",
")",
":",
"logo_index",
"=",
"seq_index",
"+",
"format",
".",
"first_index",
"stack_index",
"=",
"seq_index",
"-",
"seq_from",
"if",
"stack_index",
"!=",
"0",
"and",
"(",
"stack_index",
"%",
"format",
".",
"stacks_per_line",
")",
"==",
"0",
":",
"data",
".",
"append",
"(",
"\"\"",
")",
"data",
".",
"append",
"(",
"\"EndLine\"",
")",
"data",
".",
"append",
"(",
"\"StartLine\"",
")",
"data",
".",
"append",
"(",
"\"\"",
")",
"data",
".",
"append",
"(",
"\"(%s) StartStack\"",
"%",
"format",
".",
"annotate",
"[",
"seq_index",
"]",
")",
"if",
"conv_factor",
":",
"stack_height",
"=",
"logodata",
".",
"entropy",
"[",
"seq_index",
"]",
"*",
"std_units",
"[",
"format",
".",
"unit_name",
"]",
"else",
":",
"stack_height",
"=",
"1.0",
"# Probability",
"# The following code modified by JDB to use ordered_alphabets",
"# and also to replace the \"blank\" characters 'b' and 'B'",
"# by spaces.",
"s_d",
"=",
"dict",
"(",
"zip",
"(",
"logodata",
".",
"alphabet",
",",
"logodata",
".",
"counts",
"[",
"seq_index",
"]",
")",
")",
"s",
"=",
"[",
"]",
"for",
"aa",
"in",
"ordered_alphabets",
"[",
"seq_index",
"]",
":",
"if",
"aa",
"not",
"in",
"[",
"'B'",
",",
"'b'",
"]",
":",
"s",
".",
"append",
"(",
"(",
"s_d",
"[",
"aa",
"]",
",",
"aa",
")",
")",
"else",
":",
"s",
".",
"append",
"(",
"(",
"s_d",
"[",
"aa",
"]",
",",
"' '",
")",
")",
"# s = [(s_d[aa], aa) for aa in ordered_alphabets[seq_index]]",
"# Sort by frequency. If equal frequency then reverse alphabetic",
"# (So sort reverse alphabetic first, then frequencty)",
"# TODO: doublecheck this actual works",
"#s = list(zip(logodata.counts[seq_index], logodata.alphabet))",
"#s.sort(key= lambda x: x[1])",
"#s.reverse()",
"#s.sort(key= lambda x: x[0])",
"#if not format.reverse_stacks: s.reverse()",
"C",
"=",
"float",
"(",
"sum",
"(",
"logodata",
".",
"counts",
"[",
"seq_index",
"]",
")",
")",
"if",
"C",
">",
"0.0",
":",
"fraction_width",
"=",
"1.0",
"if",
"format",
".",
"scale_width",
":",
"fraction_width",
"=",
"logodata",
".",
"weight",
"[",
"seq_index",
"]",
"# print(fraction_width, file=sys.stderr)",
"for",
"c",
"in",
"s",
":",
"data",
".",
"append",
"(",
"\" %f %f (%s) ShowSymbol\"",
"%",
"(",
"fraction_width",
",",
"c",
"[",
"0",
"]",
"*",
"stack_height",
"/",
"C",
",",
"c",
"[",
"1",
"]",
")",
")",
"# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.",
"if",
"logodata",
".",
"entropy_interval",
"is",
"not",
"None",
"and",
"conv_factor",
"and",
"C",
">",
"0.0",
":",
"low",
",",
"high",
"=",
"logodata",
".",
"entropy_interval",
"[",
"seq_index",
"]",
"center",
"=",
"logodata",
".",
"entropy",
"[",
"seq_index",
"]",
"low",
"*=",
"conv_factor",
"high",
"*=",
"conv_factor",
"center",
"*=",
"conv_factor",
"if",
"high",
">",
"format",
".",
"yaxis_scale",
":",
"high",
"=",
"format",
".",
"yaxis_scale",
"down",
"=",
"(",
"center",
"-",
"low",
")",
"up",
"=",
"(",
"high",
"-",
"center",
")",
"data",
".",
"append",
"(",
"\" %f %f DrawErrorbar\"",
"%",
"(",
"down",
",",
"up",
")",
")",
"data",
".",
"append",
"(",
"\"EndStack\"",
")",
"data",
".",
"append",
"(",
"\"\"",
")",
"data",
".",
"append",
"(",
"\"EndLine\"",
")",
"substitutions",
"[",
"\"logo_data\"",
"]",
"=",
"\"\\n\"",
".",
"join",
"(",
"data",
")",
"# Create and output logo",
"template",
"=",
"corebio",
".",
"utils",
".",
"resource_string",
"(",
"__name__",
",",
"'_weblogo_template.eps'",
",",
"__file__",
")",
".",
"decode",
"(",
")",
"logo",
"=",
"string",
".",
"Template",
"(",
"template",
")",
".",
"substitute",
"(",
"substitutions",
")",
"return",
"logo",
".",
"encode",
"(",
")"
] | Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top. | [
"Generate",
"a",
"logo",
"in",
"Encapsulated",
"Postscript",
"(",
"EPS",
")",
"Modified",
"from",
"weblogo",
"version",
"3",
".",
"4",
"source",
"code",
"."
] | python | train |
unt-libraries/pyuntl | pyuntl/util.py | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/util.py#L19-L23 | def normalize_UNTL(subject):
"""Normalize a UNTL subject heading for consistency."""
subject = subject.strip()
subject = re.sub(r'[\s]+', ' ', subject)
return subject | [
"def",
"normalize_UNTL",
"(",
"subject",
")",
":",
"subject",
"=",
"subject",
".",
"strip",
"(",
")",
"subject",
"=",
"re",
".",
"sub",
"(",
"r'[\\s]+'",
",",
"' '",
",",
"subject",
")",
"return",
"subject"
] | Normalize a UNTL subject heading for consistency. | [
"Normalize",
"a",
"UNTL",
"subject",
"heading",
"for",
"consistency",
"."
] | python | train |
SHTOOLS/SHTOOLS | pyshtools/shclasses/shgravcoeffs.py | https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shgravcoeffs.py#L1243-L1336 | def spectrum(self, function='geoid', lmax=None, unit='per_l', base=10.):
"""
Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if function.lower() not in ('potential', 'geoid', 'radial', 'total'):
raise ValueError(
"function must be of type 'potential', 'geoid', 'radial', or "
"'total'. Provided value was {:s}".format(repr(function))
)
s = _spectrum(self.coeffs, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if self.errors is not None:
es = _spectrum(self.errors, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if function.lower() == 'potential':
s *= (self.gm / self.r0)**2
if self.errors is not None:
es *= (self.gm / self.r0)**2
elif function.lower() == 'geoid':
s *= self.r0**2
if self.errors is not None:
es *= self.r0**2
elif function.lower() == 'radial':
degrees = _np.arange(len(s))
s *= (self.gm * (degrees + 1) / self.r0**2)**2
if self.errors is not None:
es *= (self.gm * (degrees + 1) / self.r0**2)**2
elif function.lower() == 'total':
degrees = _np.arange(len(s))
s *= (self.gm / self.r0**2)**2 * (degrees + 1) * (2 * degrees + 1)
if self.errors is not None:
es *= (self.gm / self.r0**2)**2 * (degrees + 1) * \
(2 * degrees + 1)
if self.errors is not None:
return s, es
else:
return s | [
"def",
"spectrum",
"(",
"self",
",",
"function",
"=",
"'geoid'",
",",
"lmax",
"=",
"None",
",",
"unit",
"=",
"'per_l'",
",",
"base",
"=",
"10.",
")",
":",
"if",
"function",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'potential'",
",",
"'geoid'",
",",
"'radial'",
",",
"'total'",
")",
":",
"raise",
"ValueError",
"(",
"\"function must be of type 'potential', 'geoid', 'radial', or \"",
"\"'total'. Provided value was {:s}\"",
".",
"format",
"(",
"repr",
"(",
"function",
")",
")",
")",
"s",
"=",
"_spectrum",
"(",
"self",
".",
"coeffs",
",",
"normalization",
"=",
"self",
".",
"normalization",
",",
"convention",
"=",
"'power'",
",",
"unit",
"=",
"unit",
",",
"base",
"=",
"base",
",",
"lmax",
"=",
"lmax",
")",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"es",
"=",
"_spectrum",
"(",
"self",
".",
"errors",
",",
"normalization",
"=",
"self",
".",
"normalization",
",",
"convention",
"=",
"'power'",
",",
"unit",
"=",
"unit",
",",
"base",
"=",
"base",
",",
"lmax",
"=",
"lmax",
")",
"if",
"function",
".",
"lower",
"(",
")",
"==",
"'potential'",
":",
"s",
"*=",
"(",
"self",
".",
"gm",
"/",
"self",
".",
"r0",
")",
"**",
"2",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"es",
"*=",
"(",
"self",
".",
"gm",
"/",
"self",
".",
"r0",
")",
"**",
"2",
"elif",
"function",
".",
"lower",
"(",
")",
"==",
"'geoid'",
":",
"s",
"*=",
"self",
".",
"r0",
"**",
"2",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"es",
"*=",
"self",
".",
"r0",
"**",
"2",
"elif",
"function",
".",
"lower",
"(",
")",
"==",
"'radial'",
":",
"degrees",
"=",
"_np",
".",
"arange",
"(",
"len",
"(",
"s",
")",
")",
"s",
"*=",
"(",
"self",
".",
"gm",
"*",
"(",
"degrees",
"+",
"1",
")",
"/",
"self",
".",
"r0",
"**",
"2",
")",
"**",
"2",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"es",
"*=",
"(",
"self",
".",
"gm",
"*",
"(",
"degrees",
"+",
"1",
")",
"/",
"self",
".",
"r0",
"**",
"2",
")",
"**",
"2",
"elif",
"function",
".",
"lower",
"(",
")",
"==",
"'total'",
":",
"degrees",
"=",
"_np",
".",
"arange",
"(",
"len",
"(",
"s",
")",
")",
"s",
"*=",
"(",
"self",
".",
"gm",
"/",
"self",
".",
"r0",
"**",
"2",
")",
"**",
"2",
"*",
"(",
"degrees",
"+",
"1",
")",
"*",
"(",
"2",
"*",
"degrees",
"+",
"1",
")",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"es",
"*=",
"(",
"self",
".",
"gm",
"/",
"self",
".",
"r0",
"**",
"2",
")",
"**",
"2",
"*",
"(",
"degrees",
"+",
"1",
")",
"*",
"(",
"2",
"*",
"degrees",
"+",
"1",
")",
"if",
"self",
".",
"errors",
"is",
"not",
"None",
":",
"return",
"s",
",",
"es",
"else",
":",
"return",
"s"
] | Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a). | [
"Return",
"the",
"spectrum",
"as",
"a",
"function",
"of",
"spherical",
"harmonic",
"degree",
"."
] | python | train |
log2timeline/dfvfs | dfvfs/vfs/vshadow_file_system.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/vshadow_file_system.py#L123-L131 | def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
VShadowFileEntry: file entry or None if not available.
"""
path_spec = vshadow_path_spec.VShadowPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | [
"def",
"GetRootFileEntry",
"(",
"self",
")",
":",
"path_spec",
"=",
"vshadow_path_spec",
".",
"VShadowPathSpec",
"(",
"location",
"=",
"self",
".",
"LOCATION_ROOT",
",",
"parent",
"=",
"self",
".",
"_path_spec",
".",
"parent",
")",
"return",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")"
] | Retrieves the root file entry.
Returns:
VShadowFileEntry: file entry or None if not available. | [
"Retrieves",
"the",
"root",
"file",
"entry",
"."
] | python | train |
poppy-project/pypot | pypot/vrep/remoteApiBindings/vrep.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L169-L174 | def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode) | [
"def",
"simxSetJointPosition",
"(",
"clientID",
",",
"jointHandle",
",",
"position",
",",
"operationMode",
")",
":",
"return",
"c_SetJointPosition",
"(",
"clientID",
",",
"jointHandle",
",",
"position",
",",
"operationMode",
")"
] | Please have a look at the function description/documentation in the V-REP user manual | [
"Please",
"have",
"a",
"look",
"at",
"the",
"function",
"description",
"/",
"documentation",
"in",
"the",
"V",
"-",
"REP",
"user",
"manual"
] | python | train |
EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/funcs.py | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L199-L216 | def rank(keys, axis=semantics.axis_default):
"""where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys
"""
index = as_index(keys, axis)
return index.rank | [
"def",
"rank",
"(",
"keys",
",",
"axis",
"=",
"semantics",
".",
"axis_default",
")",
":",
"index",
"=",
"as_index",
"(",
"keys",
",",
"axis",
")",
"return",
"index",
".",
"rank"
] | where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys | [
"where",
"each",
"item",
"is",
"in",
"the",
"pecking",
"order",
"."
] | python | train |
delfick/harpoon | harpoon/ship/runner.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/ship/runner.py#L578-L594 | def intervention(self, commit, conf):
"""Ask the user if they want to commit this container and run sh in it"""
if not conf.harpoon.interactive or conf.harpoon.no_intervention:
yield
return
hp.write_to(conf.harpoon.stdout, "!!!!\n")
hp.write_to(conf.harpoon.stdout, "It would appear building the image failed\n")
hp.write_to(conf.harpoon.stdout, "Do you want to run {0} where the build to help debug why it failed?\n".format(conf.resolved_shell))
conf.harpoon.stdout.flush()
answer = input("[y]: ")
if answer and not answer.lower().startswith("y"):
yield
return
with self.commit_and_run(commit, conf, command=conf.resolved_shell):
yield | [
"def",
"intervention",
"(",
"self",
",",
"commit",
",",
"conf",
")",
":",
"if",
"not",
"conf",
".",
"harpoon",
".",
"interactive",
"or",
"conf",
".",
"harpoon",
".",
"no_intervention",
":",
"yield",
"return",
"hp",
".",
"write_to",
"(",
"conf",
".",
"harpoon",
".",
"stdout",
",",
"\"!!!!\\n\"",
")",
"hp",
".",
"write_to",
"(",
"conf",
".",
"harpoon",
".",
"stdout",
",",
"\"It would appear building the image failed\\n\"",
")",
"hp",
".",
"write_to",
"(",
"conf",
".",
"harpoon",
".",
"stdout",
",",
"\"Do you want to run {0} where the build to help debug why it failed?\\n\"",
".",
"format",
"(",
"conf",
".",
"resolved_shell",
")",
")",
"conf",
".",
"harpoon",
".",
"stdout",
".",
"flush",
"(",
")",
"answer",
"=",
"input",
"(",
"\"[y]: \"",
")",
"if",
"answer",
"and",
"not",
"answer",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"y\"",
")",
":",
"yield",
"return",
"with",
"self",
".",
"commit_and_run",
"(",
"commit",
",",
"conf",
",",
"command",
"=",
"conf",
".",
"resolved_shell",
")",
":",
"yield"
] | Ask the user if they want to commit this container and run sh in it | [
"Ask",
"the",
"user",
"if",
"they",
"want",
"to",
"commit",
"this",
"container",
"and",
"run",
"sh",
"in",
"it"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/notes.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1185-L1290 | def import_staging(self, source, staging_start=None, as_qual=False,
test_filename=None, test_rater=None):
"""Action: import an external sleep staging file.
Parameters
----------
source : str
Name of program where staging was exported. One of 'alice',
'compumedics', 'domino', 'remlogic', 'sandman'.
staging_start : datetime, optional
Absolute time when staging begins.
as_qual : bool
if True, scores will be imported as quality
"""
if self.annot is None: # remove if buttons are disabled
msg = 'No score file loaded'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
return
if self.parent.info.dataset is None:
msg = 'No dataset loaded'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
return
record_start = self.parent.info.dataset.header['start_time']
if test_filename is None:
filename, _ = QFileDialog.getOpenFileName(self,
'Load staging file',
None,
'Text File (*.txt)')
else:
filename = test_filename
if filename == '':
return
if test_rater is None:
rater, ok = QInputDialog.getText(self, 'Import staging',
'Enter rater name')
if not ok:
return
if rater in self.annot.raters and not as_qual:
msgBox = QMessageBox(QMessageBox.Question, 'Overwrite staging',
'Rater %s already exists. \n \n'
'Overwrite %s\'s sleep staging '
'with imported staging? Events '
'and bookmarks will be preserved.'
% (rater, rater))
msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
msgBox.setDefaultButton(QMessageBox.No)
response = msgBox.exec_()
if response == QMessageBox.No:
return
else:
rater = test_rater
if source in ['deltamed', 'compumedics']:
time_str, ok = QInputDialog.getText(self, 'Staging start time',
'Enter date and time when '
'staging \nbegins, using '
'24-hour clock. \n\nFormat: '
'YYYY,MM,DD HH:mm:SS')
if not ok:
return
try:
staging_start = datetime.strptime(time_str,
'%Y,%m,%d %H:%M:%S')
except (ValueError, TypeError) as e:
msg = 'Incorrect formatting for date and time.'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
return
poor = ['Artefact', 'Artifact']
if as_qual:
query = 'Which epoch label should be read as Poor quality signal?'
poor, ok = QInputDialog.getText(self, 'Import quality', query)
poor = [poor]
if not ok:
return
try:
unaligned = self.annot.import_staging(filename, source, rater,
record_start,
staging_start=staging_start,
poor=poor,
as_qual=as_qual)
except FileNotFoundError:
msg = 'File not found'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
if unaligned:
msg = 'Imported scores are not aligned with existing scores.'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
self.display_notes()
self.parent.create_menubar() | [
"def",
"import_staging",
"(",
"self",
",",
"source",
",",
"staging_start",
"=",
"None",
",",
"as_qual",
"=",
"False",
",",
"test_filename",
"=",
"None",
",",
"test_rater",
"=",
"None",
")",
":",
"if",
"self",
".",
"annot",
"is",
"None",
":",
"# remove if buttons are disabled",
"msg",
"=",
"'No score file loaded'",
"self",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"msg",
")",
"lg",
".",
"info",
"(",
"msg",
")",
"return",
"if",
"self",
".",
"parent",
".",
"info",
".",
"dataset",
"is",
"None",
":",
"msg",
"=",
"'No dataset loaded'",
"self",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"msg",
")",
"lg",
".",
"info",
"(",
"msg",
")",
"return",
"record_start",
"=",
"self",
".",
"parent",
".",
"info",
".",
"dataset",
".",
"header",
"[",
"'start_time'",
"]",
"if",
"test_filename",
"is",
"None",
":",
"filename",
",",
"_",
"=",
"QFileDialog",
".",
"getOpenFileName",
"(",
"self",
",",
"'Load staging file'",
",",
"None",
",",
"'Text File (*.txt)'",
")",
"else",
":",
"filename",
"=",
"test_filename",
"if",
"filename",
"==",
"''",
":",
"return",
"if",
"test_rater",
"is",
"None",
":",
"rater",
",",
"ok",
"=",
"QInputDialog",
".",
"getText",
"(",
"self",
",",
"'Import staging'",
",",
"'Enter rater name'",
")",
"if",
"not",
"ok",
":",
"return",
"if",
"rater",
"in",
"self",
".",
"annot",
".",
"raters",
"and",
"not",
"as_qual",
":",
"msgBox",
"=",
"QMessageBox",
"(",
"QMessageBox",
".",
"Question",
",",
"'Overwrite staging'",
",",
"'Rater %s already exists. \\n \\n'",
"'Overwrite %s\\'s sleep staging '",
"'with imported staging? Events '",
"'and bookmarks will be preserved.'",
"%",
"(",
"rater",
",",
"rater",
")",
")",
"msgBox",
".",
"setStandardButtons",
"(",
"QMessageBox",
".",
"Yes",
"|",
"QMessageBox",
".",
"No",
")",
"msgBox",
".",
"setDefaultButton",
"(",
"QMessageBox",
".",
"No",
")",
"response",
"=",
"msgBox",
".",
"exec_",
"(",
")",
"if",
"response",
"==",
"QMessageBox",
".",
"No",
":",
"return",
"else",
":",
"rater",
"=",
"test_rater",
"if",
"source",
"in",
"[",
"'deltamed'",
",",
"'compumedics'",
"]",
":",
"time_str",
",",
"ok",
"=",
"QInputDialog",
".",
"getText",
"(",
"self",
",",
"'Staging start time'",
",",
"'Enter date and time when '",
"'staging \\nbegins, using '",
"'24-hour clock. \\n\\nFormat: '",
"'YYYY,MM,DD HH:mm:SS'",
")",
"if",
"not",
"ok",
":",
"return",
"try",
":",
"staging_start",
"=",
"datetime",
".",
"strptime",
"(",
"time_str",
",",
"'%Y,%m,%d %H:%M:%S'",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
"as",
"e",
":",
"msg",
"=",
"'Incorrect formatting for date and time.'",
"self",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"msg",
")",
"lg",
".",
"info",
"(",
"msg",
")",
"return",
"poor",
"=",
"[",
"'Artefact'",
",",
"'Artifact'",
"]",
"if",
"as_qual",
":",
"query",
"=",
"'Which epoch label should be read as Poor quality signal?'",
"poor",
",",
"ok",
"=",
"QInputDialog",
".",
"getText",
"(",
"self",
",",
"'Import quality'",
",",
"query",
")",
"poor",
"=",
"[",
"poor",
"]",
"if",
"not",
"ok",
":",
"return",
"try",
":",
"unaligned",
"=",
"self",
".",
"annot",
".",
"import_staging",
"(",
"filename",
",",
"source",
",",
"rater",
",",
"record_start",
",",
"staging_start",
"=",
"staging_start",
",",
"poor",
"=",
"poor",
",",
"as_qual",
"=",
"as_qual",
")",
"except",
"FileNotFoundError",
":",
"msg",
"=",
"'File not found'",
"self",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"msg",
")",
"lg",
".",
"info",
"(",
"msg",
")",
"if",
"unaligned",
":",
"msg",
"=",
"'Imported scores are not aligned with existing scores.'",
"self",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"msg",
")",
"lg",
".",
"info",
"(",
"msg",
")",
"self",
".",
"display_notes",
"(",
")",
"self",
".",
"parent",
".",
"create_menubar",
"(",
")"
] | Action: import an external sleep staging file.
Parameters
----------
source : str
Name of program where staging was exported. One of 'alice',
'compumedics', 'domino', 'remlogic', 'sandman'.
staging_start : datetime, optional
Absolute time when staging begins.
as_qual : bool
if True, scores will be imported as quality | [
"Action",
":",
"import",
"an",
"external",
"sleep",
"staging",
"file",
"."
] | python | train |
data-8/datascience | datascience/maps.py | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L335-L352 | def _read_geojson_features(data, features=None, prefix=""):
"""Return a dict of features keyed by ID."""
if features is None:
features = collections.OrderedDict()
for i, feature in enumerate(data['features']):
key = feature.get('id', prefix + str(i))
feature_type = feature['geometry']['type']
if feature_type == 'FeatureCollection':
_read_geojson_features(feature, features, prefix + '.' + key)
elif feature_type == 'Point':
value = Circle._convert_point(feature)
elif feature_type in ['Polygon', 'MultiPolygon']:
value = Region(feature)
else:
# TODO Support all http://geojson.org/geojson-spec.html#geometry-objects
value = None
features[key] = value
return features | [
"def",
"_read_geojson_features",
"(",
"data",
",",
"features",
"=",
"None",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"i",
",",
"feature",
"in",
"enumerate",
"(",
"data",
"[",
"'features'",
"]",
")",
":",
"key",
"=",
"feature",
".",
"get",
"(",
"'id'",
",",
"prefix",
"+",
"str",
"(",
"i",
")",
")",
"feature_type",
"=",
"feature",
"[",
"'geometry'",
"]",
"[",
"'type'",
"]",
"if",
"feature_type",
"==",
"'FeatureCollection'",
":",
"_read_geojson_features",
"(",
"feature",
",",
"features",
",",
"prefix",
"+",
"'.'",
"+",
"key",
")",
"elif",
"feature_type",
"==",
"'Point'",
":",
"value",
"=",
"Circle",
".",
"_convert_point",
"(",
"feature",
")",
"elif",
"feature_type",
"in",
"[",
"'Polygon'",
",",
"'MultiPolygon'",
"]",
":",
"value",
"=",
"Region",
"(",
"feature",
")",
"else",
":",
"# TODO Support all http://geojson.org/geojson-spec.html#geometry-objects",
"value",
"=",
"None",
"features",
"[",
"key",
"]",
"=",
"value",
"return",
"features"
] | Return a dict of features keyed by ID. | [
"Return",
"a",
"dict",
"of",
"features",
"keyed",
"by",
"ID",
"."
] | python | train |
tnkteja/myhelp | virtualEnvironment/lib/python2.7/site-packages/coverage/files.py | https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/files.py#L163-L173 | def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if fpath == d:
# This is the same file!
return True
if fpath[len(d)] == os.sep:
# This is a file in the directory
return True
return False | [
"def",
"match",
"(",
"self",
",",
"fpath",
")",
":",
"for",
"d",
"in",
"self",
".",
"dirs",
":",
"if",
"fpath",
".",
"startswith",
"(",
"d",
")",
":",
"if",
"fpath",
"==",
"d",
":",
"# This is the same file!",
"return",
"True",
"if",
"fpath",
"[",
"len",
"(",
"d",
")",
"]",
"==",
"os",
".",
"sep",
":",
"# This is a file in the directory",
"return",
"True",
"return",
"False"
] | Does `fpath` indicate a file in one of our trees? | [
"Does",
"fpath",
"indicate",
"a",
"file",
"in",
"one",
"of",
"our",
"trees?"
] | python | test |
gruns/icecream | icecream/icecream.py | https://github.com/gruns/icecream/blob/cb4f3d50ec747637721fe58b80f2cc2a2baedabf/icecream/icecream.py#L369-L384 | def splitExpressionsOntoSeparateLines(source):
"""
Split every expression onto its own line so any preceding and/or trailing
expressions, like 'foo(1); ' and '; foo(2)' of
foo(1); ic(1); foo(2)
are properly separated from ic(1) for dis.findlinestarts(). Otherwise, any
preceding and/or trailing expressions break ic(1)'s bytecode offset
calculation with dis.findlinestarts().
"""
indices = [expr.col_offset for expr in ast.parse(source).body]
lines = [s.strip() for s in splitStringAtIndices(source, indices)]
oneExpressionPerLine = joinContinuedLines(lines)
return oneExpressionPerLine | [
"def",
"splitExpressionsOntoSeparateLines",
"(",
"source",
")",
":",
"indices",
"=",
"[",
"expr",
".",
"col_offset",
"for",
"expr",
"in",
"ast",
".",
"parse",
"(",
"source",
")",
".",
"body",
"]",
"lines",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"splitStringAtIndices",
"(",
"source",
",",
"indices",
")",
"]",
"oneExpressionPerLine",
"=",
"joinContinuedLines",
"(",
"lines",
")",
"return",
"oneExpressionPerLine"
] | Split every expression onto its own line so any preceding and/or trailing
expressions, like 'foo(1); ' and '; foo(2)' of
foo(1); ic(1); foo(2)
are properly separated from ic(1) for dis.findlinestarts(). Otherwise, any
preceding and/or trailing expressions break ic(1)'s bytecode offset
calculation with dis.findlinestarts(). | [
"Split",
"every",
"expression",
"onto",
"its",
"own",
"line",
"so",
"any",
"preceding",
"and",
"/",
"or",
"trailing",
"expressions",
"like",
"foo",
"(",
"1",
")",
";",
"and",
";",
"foo",
"(",
"2",
")",
"of"
] | python | train |
HttpRunner/HttpRunner | httprunner/parser.py | https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/parser.py#L556-L610 | def prepare_lazy_data(content, functions_mapping=None, check_variables_set=None, cached=False):
""" make string in content as lazy object with functions_mapping
Raises:
exceptions.VariableNotFound: if any variable undefined in check_variables_set
"""
# TODO: refactor type check
if content is None or isinstance(content, (numeric_types, bool, type)):
return content
elif isinstance(content, (list, set, tuple)):
return [
prepare_lazy_data(
item,
functions_mapping,
check_variables_set,
cached
)
for item in content
]
elif isinstance(content, dict):
parsed_content = {}
for key, value in content.items():
parsed_key = prepare_lazy_data(
key,
functions_mapping,
check_variables_set,
cached
)
parsed_value = prepare_lazy_data(
value,
functions_mapping,
check_variables_set,
cached
)
parsed_content[parsed_key] = parsed_value
return parsed_content
elif isinstance(content, basestring):
# content is in string format here
if not is_var_or_func_exist(content):
# content is neither variable nor function
# replace $$ notation with $ and consider it as normal char.
# e.g. abc => abc, abc$$def => abc$def, abc$$$$def$$h => abc$$def$h
return content.replace("$$", "$")
functions_mapping = functions_mapping or {}
check_variables_set = check_variables_set or set()
content = content.strip()
content = LazyString(content, functions_mapping, check_variables_set, cached)
return content | [
"def",
"prepare_lazy_data",
"(",
"content",
",",
"functions_mapping",
"=",
"None",
",",
"check_variables_set",
"=",
"None",
",",
"cached",
"=",
"False",
")",
":",
"# TODO: refactor type check",
"if",
"content",
"is",
"None",
"or",
"isinstance",
"(",
"content",
",",
"(",
"numeric_types",
",",
"bool",
",",
"type",
")",
")",
":",
"return",
"content",
"elif",
"isinstance",
"(",
"content",
",",
"(",
"list",
",",
"set",
",",
"tuple",
")",
")",
":",
"return",
"[",
"prepare_lazy_data",
"(",
"item",
",",
"functions_mapping",
",",
"check_variables_set",
",",
"cached",
")",
"for",
"item",
"in",
"content",
"]",
"elif",
"isinstance",
"(",
"content",
",",
"dict",
")",
":",
"parsed_content",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"content",
".",
"items",
"(",
")",
":",
"parsed_key",
"=",
"prepare_lazy_data",
"(",
"key",
",",
"functions_mapping",
",",
"check_variables_set",
",",
"cached",
")",
"parsed_value",
"=",
"prepare_lazy_data",
"(",
"value",
",",
"functions_mapping",
",",
"check_variables_set",
",",
"cached",
")",
"parsed_content",
"[",
"parsed_key",
"]",
"=",
"parsed_value",
"return",
"parsed_content",
"elif",
"isinstance",
"(",
"content",
",",
"basestring",
")",
":",
"# content is in string format here",
"if",
"not",
"is_var_or_func_exist",
"(",
"content",
")",
":",
"# content is neither variable nor function",
"# replace $$ notation with $ and consider it as normal char.",
"# e.g. abc => abc, abc$$def => abc$def, abc$$$$def$$h => abc$$def$h",
"return",
"content",
".",
"replace",
"(",
"\"$$\"",
",",
"\"$\"",
")",
"functions_mapping",
"=",
"functions_mapping",
"or",
"{",
"}",
"check_variables_set",
"=",
"check_variables_set",
"or",
"set",
"(",
")",
"content",
"=",
"content",
".",
"strip",
"(",
")",
"content",
"=",
"LazyString",
"(",
"content",
",",
"functions_mapping",
",",
"check_variables_set",
",",
"cached",
")",
"return",
"content"
] | make string in content as lazy object with functions_mapping
Raises:
exceptions.VariableNotFound: if any variable undefined in check_variables_set | [
"make",
"string",
"in",
"content",
"as",
"lazy",
"object",
"with",
"functions_mapping"
] | python | train |
aitjcize/cppman | cppman/main.py | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L160-L193 | def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names | [
"def",
"parse_title",
"(",
"self",
",",
"title",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'^\\s*((?:\\(size_type\\)|(?:.|\\(\\))*?)*)((?:\\([^)]+\\))?)\\s*$'",
",",
"title",
")",
"postfix",
"=",
"m",
".",
"group",
"(",
"2",
")",
"t_names",
"=",
"m",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"','",
")",
"t_names",
"=",
"[",
"n",
".",
"strip",
"(",
")",
"for",
"n",
"in",
"t_names",
"]",
"prefix",
"=",
"self",
".",
"parse_expression",
"(",
"t_names",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"names",
"=",
"[",
"]",
"for",
"n",
"in",
"t_names",
":",
"r",
"=",
"self",
".",
"parse_expression",
"(",
"n",
")",
"if",
"prefix",
"==",
"r",
"[",
"0",
"]",
":",
"names",
".",
"append",
"(",
"n",
"+",
"postfix",
")",
"else",
":",
"names",
".",
"append",
"(",
"prefix",
"+",
"r",
"[",
"1",
"]",
"+",
"postfix",
")",
"return",
"names"
] | split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
``` | [
"split",
"of",
"the",
"last",
"parenthesis",
"operator",
"==",
"!",
"=",
"<",
"<",
"=",
"(",
"std",
"::",
"vector",
")",
"tested",
"with",
"operator",
"==",
"!",
"=",
"<",
"<",
"=",
">",
">",
"=",
"(",
"std",
"::",
"vector",
")",
"operator",
"==",
"!",
"=",
"<",
"<",
"=",
">",
">",
"=",
"(",
"std",
"::",
"vector",
")",
"operator",
"==",
"!",
"=",
"<",
"<",
"=",
">",
">",
"=",
"operator",
"==",
"!",
"=",
"<",
"<",
"=",
">",
">",
"=",
"std",
"::",
"rel_ops",
"::",
"operator!",
"=",
">",
"<",
"=",
">",
"=",
"std",
"::",
"atomic",
"::",
"operator",
"=",
"std",
"::",
"array",
"::",
"operator",
"[]",
"std",
"::",
"function",
"::",
"operator",
"()",
"std",
"::",
"vector",
"::",
"at",
"std",
"::",
"relational",
"operators",
"(",
"vector",
")",
"std",
"::",
"vector",
"::",
"begin",
"std",
"::",
"vector",
"::",
"cbegin",
"std",
"::",
"abs",
"(",
"float",
")",
"std",
"::",
"fabs",
"std",
"::",
"unordered_set",
"::",
"begin",
"(",
"size_type",
")",
"std",
"::",
"unordered_set",
"::",
"cbegin",
"(",
"size_type",
")"
] | python | train |
todddeluca/dones | dones.py | https://github.com/todddeluca/dones/blob/6ef56565556987e701fed797a405f0825fe2e15a/dones.py#L324-L339 | def doTransaction(conn, start=True, startSQL='START TRANSACTION'):
'''
wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server.
'''
try:
if start:
executeSQL(conn, startSQL)
yield conn
except:
if conn is not None:
conn.rollback()
raise
else:
conn.commit() | [
"def",
"doTransaction",
"(",
"conn",
",",
"start",
"=",
"True",
",",
"startSQL",
"=",
"'START TRANSACTION'",
")",
":",
"try",
":",
"if",
"start",
":",
"executeSQL",
"(",
"conn",
",",
"startSQL",
")",
"yield",
"conn",
"except",
":",
"if",
"conn",
"is",
"not",
"None",
":",
"conn",
".",
"rollback",
"(",
")",
"raise",
"else",
":",
"conn",
".",
"commit",
"(",
")"
] | wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server. | [
"wrap",
"a",
"connection",
"in",
"a",
"transaction",
".",
"starts",
"a",
"transaction",
"yields",
"the",
"conn",
"and",
"then",
"if",
"an",
"exception",
"occurs",
"calls",
"rollback",
"()",
".",
"otherwise",
"calls",
"commit",
"()",
".",
"start",
":",
"if",
"True",
"executes",
"START",
"TRANSACTION",
"sql",
"before",
"yielding",
"conn",
".",
"Useful",
"for",
"connections",
"that",
"are",
"autocommit",
"by",
"default",
".",
"startSQL",
":",
"override",
"if",
"START",
"TRANSACTION",
"does",
"not",
"work",
"for",
"your",
"db",
"server",
"."
] | python | train |
bitesofcode/projexui | projexui/xsettings.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L557-L563 | def load(self):
"""
Loads the settings from disk for this XSettings object, if it is a custom format.
"""
# load the custom format
if self._customFormat and os.path.exists(self.fileName()):
self._customFormat.load(self.fileName()) | [
"def",
"load",
"(",
"self",
")",
":",
"# load the custom format\r",
"if",
"self",
".",
"_customFormat",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"fileName",
"(",
")",
")",
":",
"self",
".",
"_customFormat",
".",
"load",
"(",
"self",
".",
"fileName",
"(",
")",
")"
] | Loads the settings from disk for this XSettings object, if it is a custom format. | [
"Loads",
"the",
"settings",
"from",
"disk",
"for",
"this",
"XSettings",
"object",
"if",
"it",
"is",
"a",
"custom",
"format",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3395-L3410 | def schedule_and_propagate_host_downtime(self, host, start_time, end_time,
fixed, trigger_id, duration, author, comment):
"""DOES NOTHING (Should create host downtime and start it?)
Format of the line that triggers function call::
SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:return: None
"""
logger.warning("The external command 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME' "
"is not currently implemented in Alignak. If you really need it, "
"request for its implementation in the project repository: "
"https://github.com/Alignak-monitoring/alignak")
self.send_an_element(make_monitoring_log(
'warning', 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME: this command is not implemented!')) | [
"def",
"schedule_and_propagate_host_downtime",
"(",
"self",
",",
"host",
",",
"start_time",
",",
"end_time",
",",
"fixed",
",",
"trigger_id",
",",
"duration",
",",
"author",
",",
"comment",
")",
":",
"logger",
".",
"warning",
"(",
"\"The external command 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME' \"",
"\"is not currently implemented in Alignak. If you really need it, \"",
"\"request for its implementation in the project repository: \"",
"\"https://github.com/Alignak-monitoring/alignak\"",
")",
"self",
".",
"send_an_element",
"(",
"make_monitoring_log",
"(",
"'warning'",
",",
"'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME: this command is not implemented!'",
")",
")"
] | DOES NOTHING (Should create host downtime and start it?)
Format of the line that triggers function call::
SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:return: None | [
"DOES",
"NOTHING",
"(",
"Should",
"create",
"host",
"downtime",
"and",
"start",
"it?",
")",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
python-openxml/python-docx | docx/oxml/xmlchemy.py | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/xmlchemy.py#L469-L477 | def _prop_name(self):
"""
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
"""
if ':' in self._nsptagname:
start = self._nsptagname.index(':') + 1
else:
start = 0
return self._nsptagname[start:] | [
"def",
"_prop_name",
"(",
"self",
")",
":",
"if",
"':'",
"in",
"self",
".",
"_nsptagname",
":",
"start",
"=",
"self",
".",
"_nsptagname",
".",
"index",
"(",
"':'",
")",
"+",
"1",
"else",
":",
"start",
"=",
"0",
"return",
"self",
".",
"_nsptagname",
"[",
"start",
":",
"]"
] | Calculate property name from tag name, e.g. a:schemeClr -> schemeClr. | [
"Calculate",
"property",
"name",
"from",
"tag",
"name",
"e",
".",
"g",
".",
"a",
":",
"schemeClr",
"-",
">",
"schemeClr",
"."
] | python | train |
limpyd/redis-limpyd | limpyd/fields.py | https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L735-L744 | def _pop(self, command, *args, **kwargs):
"""
Shortcut for commands that pop a value from the field, returning it while
removing it.
The returned value will be deindexed
"""
result = self._traverse_command(command, *args, **kwargs)
if self.indexable:
self.deindex([result])
return result | [
"def",
"_pop",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"_traverse_command",
"(",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"indexable",
":",
"self",
".",
"deindex",
"(",
"[",
"result",
"]",
")",
"return",
"result"
] | Shortcut for commands that pop a value from the field, returning it while
removing it.
The returned value will be deindexed | [
"Shortcut",
"for",
"commands",
"that",
"pop",
"a",
"value",
"from",
"the",
"field",
"returning",
"it",
"while",
"removing",
"it",
".",
"The",
"returned",
"value",
"will",
"be",
"deindexed"
] | python | train |
keybase/python-triplesec | triplesec/__init__.py | https://github.com/keybase/python-triplesec/blob/0a73e18cfe542d0cd5ee57bd823a67412b4b717e/triplesec/__init__.py#L91-L110 | def encrypt_ascii(self, data, key=None, v=None, extra_bytes=0,
digest="hex"):
"""
Encrypt data and return as ascii string. Hexadecimal digest as default.
Avaiable digests:
hex: Hexadecimal
base64: Base 64
hqx: hexbin4
"""
digests = {"hex": binascii.b2a_hex,
"base64": binascii.b2a_base64,
"hqx": binascii.b2a_hqx}
digestor = digests.get(digest)
if not digestor:
TripleSecError(u"Digestor not supported.")
binary_result = self.encrypt(data, key, v, extra_bytes)
result = digestor(binary_result)
return result | [
"def",
"encrypt_ascii",
"(",
"self",
",",
"data",
",",
"key",
"=",
"None",
",",
"v",
"=",
"None",
",",
"extra_bytes",
"=",
"0",
",",
"digest",
"=",
"\"hex\"",
")",
":",
"digests",
"=",
"{",
"\"hex\"",
":",
"binascii",
".",
"b2a_hex",
",",
"\"base64\"",
":",
"binascii",
".",
"b2a_base64",
",",
"\"hqx\"",
":",
"binascii",
".",
"b2a_hqx",
"}",
"digestor",
"=",
"digests",
".",
"get",
"(",
"digest",
")",
"if",
"not",
"digestor",
":",
"TripleSecError",
"(",
"u\"Digestor not supported.\"",
")",
"binary_result",
"=",
"self",
".",
"encrypt",
"(",
"data",
",",
"key",
",",
"v",
",",
"extra_bytes",
")",
"result",
"=",
"digestor",
"(",
"binary_result",
")",
"return",
"result"
] | Encrypt data and return as ascii string. Hexadecimal digest as default.
Avaiable digests:
hex: Hexadecimal
base64: Base 64
hqx: hexbin4 | [
"Encrypt",
"data",
"and",
"return",
"as",
"ascii",
"string",
".",
"Hexadecimal",
"digest",
"as",
"default",
"."
] | python | train |
instaloader/instaloader | instaloader/structures.py | https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/structures.py#L228-L233 | def caption(self) -> Optional[str]:
"""Caption."""
if "edge_media_to_caption" in self._node and self._node["edge_media_to_caption"]["edges"]:
return self._node["edge_media_to_caption"]["edges"][0]["node"]["text"]
elif "caption" in self._node:
return self._node["caption"] | [
"def",
"caption",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"\"edge_media_to_caption\"",
"in",
"self",
".",
"_node",
"and",
"self",
".",
"_node",
"[",
"\"edge_media_to_caption\"",
"]",
"[",
"\"edges\"",
"]",
":",
"return",
"self",
".",
"_node",
"[",
"\"edge_media_to_caption\"",
"]",
"[",
"\"edges\"",
"]",
"[",
"0",
"]",
"[",
"\"node\"",
"]",
"[",
"\"text\"",
"]",
"elif",
"\"caption\"",
"in",
"self",
".",
"_node",
":",
"return",
"self",
".",
"_node",
"[",
"\"caption\"",
"]"
] | Caption. | [
"Caption",
"."
] | python | train |
tobgu/pyrsistent | pyrsistent/_plist.py | https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_plist.py#L199-L219 | def remove(self, elem):
"""
Return new list with first element equal to elem removed. O(k) where k is the position
of the element that is removed.
Raises ValueError if no matching element is found.
>>> plist([1, 2, 1]).remove(1)
plist([2, 1])
"""
builder = _PListBuilder()
head = self
while head:
if head.first == elem:
return builder.append_plist(head.rest)
builder.append_elem(head.first)
head = head.rest
raise ValueError('{0} not found in PList'.format(elem)) | [
"def",
"remove",
"(",
"self",
",",
"elem",
")",
":",
"builder",
"=",
"_PListBuilder",
"(",
")",
"head",
"=",
"self",
"while",
"head",
":",
"if",
"head",
".",
"first",
"==",
"elem",
":",
"return",
"builder",
".",
"append_plist",
"(",
"head",
".",
"rest",
")",
"builder",
".",
"append_elem",
"(",
"head",
".",
"first",
")",
"head",
"=",
"head",
".",
"rest",
"raise",
"ValueError",
"(",
"'{0} not found in PList'",
".",
"format",
"(",
"elem",
")",
")"
] | Return new list with first element equal to elem removed. O(k) where k is the position
of the element that is removed.
Raises ValueError if no matching element is found.
>>> plist([1, 2, 1]).remove(1)
plist([2, 1]) | [
"Return",
"new",
"list",
"with",
"first",
"element",
"equal",
"to",
"elem",
"removed",
".",
"O",
"(",
"k",
")",
"where",
"k",
"is",
"the",
"position",
"of",
"the",
"element",
"that",
"is",
"removed",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/zmqshell.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/zmqshell.py#L511-L522 | def auto_rewrite_input(self, cmd):
"""Called to show the auto-rewritten input for autocall and friends.
FIXME: this payload is currently not correctly processed by the
frontend.
"""
new = self.prompt_manager.render('rewrite') + cmd
payload = dict(
source='IPython.zmq.zmqshell.ZMQInteractiveShell.auto_rewrite_input',
transformed_input=new,
)
self.payload_manager.write_payload(payload) | [
"def",
"auto_rewrite_input",
"(",
"self",
",",
"cmd",
")",
":",
"new",
"=",
"self",
".",
"prompt_manager",
".",
"render",
"(",
"'rewrite'",
")",
"+",
"cmd",
"payload",
"=",
"dict",
"(",
"source",
"=",
"'IPython.zmq.zmqshell.ZMQInteractiveShell.auto_rewrite_input'",
",",
"transformed_input",
"=",
"new",
",",
")",
"self",
".",
"payload_manager",
".",
"write_payload",
"(",
"payload",
")"
] | Called to show the auto-rewritten input for autocall and friends.
FIXME: this payload is currently not correctly processed by the
frontend. | [
"Called",
"to",
"show",
"the",
"auto",
"-",
"rewritten",
"input",
"for",
"autocall",
"and",
"friends",
"."
] | python | test |
nerdvegas/rez | src/rez/resolved_context.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolved_context.py#L545-L555 | def get_current(cls):
"""Get the context for the current env, if there is one.
Returns:
`ResolvedContext`: Current context, or None if not in a resolved env.
"""
filepath = os.getenv("REZ_RXT_FILE")
if not filepath or not os.path.exists(filepath):
return None
return cls.load(filepath) | [
"def",
"get_current",
"(",
"cls",
")",
":",
"filepath",
"=",
"os",
".",
"getenv",
"(",
"\"REZ_RXT_FILE\"",
")",
"if",
"not",
"filepath",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"return",
"None",
"return",
"cls",
".",
"load",
"(",
"filepath",
")"
] | Get the context for the current env, if there is one.
Returns:
`ResolvedContext`: Current context, or None if not in a resolved env. | [
"Get",
"the",
"context",
"for",
"the",
"current",
"env",
"if",
"there",
"is",
"one",
"."
] | python | train |
brandon-rhodes/logging_tree | logging_tree/format.py | https://github.com/brandon-rhodes/logging_tree/blob/8513cf85b3bf8ff1b58e54c73718a41ef6524a4c/logging_tree/format.py#L144-L170 | def describe_handler(h):
"""Yield one or more lines describing the logging handler `h`."""
t = h.__class__ # using type() breaks in Python <= 2.6
format = handler_formats.get(t)
if format is not None:
yield format % h.__dict__
else:
yield repr(h)
level = getattr(h, 'level', logging.NOTSET)
if level != logging.NOTSET:
yield ' Level ' + logging.getLevelName(level)
for f in getattr(h, 'filters', ()):
yield ' Filter %s' % describe_filter(f)
formatter = getattr(h, 'formatter', None)
if formatter is not None:
if type(formatter) is logging.Formatter:
yield ' Formatter fmt=%r datefmt=%r' % (
getattr(formatter, '_fmt', None),
getattr(formatter, 'datefmt', None))
else:
yield ' Formatter %r' % (formatter,)
if t is logging.handlers.MemoryHandler and h.target is not None:
yield ' Flushes output to:'
g = describe_handler(h.target)
yield ' Handler ' + next(g)
for line in g:
yield ' ' + line | [
"def",
"describe_handler",
"(",
"h",
")",
":",
"t",
"=",
"h",
".",
"__class__",
"# using type() breaks in Python <= 2.6",
"format",
"=",
"handler_formats",
".",
"get",
"(",
"t",
")",
"if",
"format",
"is",
"not",
"None",
":",
"yield",
"format",
"%",
"h",
".",
"__dict__",
"else",
":",
"yield",
"repr",
"(",
"h",
")",
"level",
"=",
"getattr",
"(",
"h",
",",
"'level'",
",",
"logging",
".",
"NOTSET",
")",
"if",
"level",
"!=",
"logging",
".",
"NOTSET",
":",
"yield",
"' Level '",
"+",
"logging",
".",
"getLevelName",
"(",
"level",
")",
"for",
"f",
"in",
"getattr",
"(",
"h",
",",
"'filters'",
",",
"(",
")",
")",
":",
"yield",
"' Filter %s'",
"%",
"describe_filter",
"(",
"f",
")",
"formatter",
"=",
"getattr",
"(",
"h",
",",
"'formatter'",
",",
"None",
")",
"if",
"formatter",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"formatter",
")",
"is",
"logging",
".",
"Formatter",
":",
"yield",
"' Formatter fmt=%r datefmt=%r'",
"%",
"(",
"getattr",
"(",
"formatter",
",",
"'_fmt'",
",",
"None",
")",
",",
"getattr",
"(",
"formatter",
",",
"'datefmt'",
",",
"None",
")",
")",
"else",
":",
"yield",
"' Formatter %r'",
"%",
"(",
"formatter",
",",
")",
"if",
"t",
"is",
"logging",
".",
"handlers",
".",
"MemoryHandler",
"and",
"h",
".",
"target",
"is",
"not",
"None",
":",
"yield",
"' Flushes output to:'",
"g",
"=",
"describe_handler",
"(",
"h",
".",
"target",
")",
"yield",
"' Handler '",
"+",
"next",
"(",
"g",
")",
"for",
"line",
"in",
"g",
":",
"yield",
"' '",
"+",
"line"
] | Yield one or more lines describing the logging handler `h`. | [
"Yield",
"one",
"or",
"more",
"lines",
"describing",
"the",
"logging",
"handler",
"h",
"."
] | python | train |
blha303/DO-runin | runin/runin.py | https://github.com/blha303/DO-runin/blob/4e725165e79f8bc0a2e1cb07a83f414686570e90/runin/runin.py#L13-L29 | def match_keys(inp, p=False):
"""Takes a comma-separated string of key ids or fingerprints and returns a list of key ids"""
_keys = []
ssh_keys = DO.get_ssh_keys()
for k in inp.split(","):
done = False
if k.isdigit():
for _ in [s for s in ssh_keys if s["id"] == int(k)]:
done = True
_keys.append(_["fingerprint"])
else:
for _ in [s for s in ssh_keys if s["fingerprint"] == k]:
done = True
_keys.append(_["fingerprint"])
if p and not done:
print("Could not find a match for '{}', skipping".format(k), file=sys.stderr)
return _keys | [
"def",
"match_keys",
"(",
"inp",
",",
"p",
"=",
"False",
")",
":",
"_keys",
"=",
"[",
"]",
"ssh_keys",
"=",
"DO",
".",
"get_ssh_keys",
"(",
")",
"for",
"k",
"in",
"inp",
".",
"split",
"(",
"\",\"",
")",
":",
"done",
"=",
"False",
"if",
"k",
".",
"isdigit",
"(",
")",
":",
"for",
"_",
"in",
"[",
"s",
"for",
"s",
"in",
"ssh_keys",
"if",
"s",
"[",
"\"id\"",
"]",
"==",
"int",
"(",
"k",
")",
"]",
":",
"done",
"=",
"True",
"_keys",
".",
"append",
"(",
"_",
"[",
"\"fingerprint\"",
"]",
")",
"else",
":",
"for",
"_",
"in",
"[",
"s",
"for",
"s",
"in",
"ssh_keys",
"if",
"s",
"[",
"\"fingerprint\"",
"]",
"==",
"k",
"]",
":",
"done",
"=",
"True",
"_keys",
".",
"append",
"(",
"_",
"[",
"\"fingerprint\"",
"]",
")",
"if",
"p",
"and",
"not",
"done",
":",
"print",
"(",
"\"Could not find a match for '{}', skipping\"",
".",
"format",
"(",
"k",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"_keys"
] | Takes a comma-separated string of key ids or fingerprints and returns a list of key ids | [
"Takes",
"a",
"comma",
"-",
"separated",
"string",
"of",
"key",
"ids",
"or",
"fingerprints",
"and",
"returns",
"a",
"list",
"of",
"key",
"ids"
] | python | train |
Datary/scrapbag | scrapbag/csvs.py | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/csvs.py#L264-L278 | def row_iter_limiter(rows, begin_row, way, c_value):
"""
Alghoritm to detect row limits when row have more that one column.
Depending the init params find from the begin or behind.
NOT SURE THAT IT WORKS WELL..
"""
limit = None
for index in range(begin_row, len(rows)):
if not len(exclude_empty_values(rows[way * index])) == 1:
limit = way * index + c_value if way * index + \
c_value not in [way * len(rows), 0] else None
break
return limit | [
"def",
"row_iter_limiter",
"(",
"rows",
",",
"begin_row",
",",
"way",
",",
"c_value",
")",
":",
"limit",
"=",
"None",
"for",
"index",
"in",
"range",
"(",
"begin_row",
",",
"len",
"(",
"rows",
")",
")",
":",
"if",
"not",
"len",
"(",
"exclude_empty_values",
"(",
"rows",
"[",
"way",
"*",
"index",
"]",
")",
")",
"==",
"1",
":",
"limit",
"=",
"way",
"*",
"index",
"+",
"c_value",
"if",
"way",
"*",
"index",
"+",
"c_value",
"not",
"in",
"[",
"way",
"*",
"len",
"(",
"rows",
")",
",",
"0",
"]",
"else",
"None",
"break",
"return",
"limit"
] | Alghoritm to detect row limits when row have more that one column.
Depending the init params find from the begin or behind.
NOT SURE THAT IT WORKS WELL.. | [
"Alghoritm",
"to",
"detect",
"row",
"limits",
"when",
"row",
"have",
"more",
"that",
"one",
"column",
".",
"Depending",
"the",
"init",
"params",
"find",
"from",
"the",
"begin",
"or",
"behind",
".",
"NOT",
"SURE",
"THAT",
"IT",
"WORKS",
"WELL",
".."
] | python | train |
saltstack/salt | salt/pillar/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/__init__.py#L333-L374 | def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
'''
Compile pillar and set it to the cache, if not found.
:param args:
:param kwargs:
:return:
'''
log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv)
log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*')
# Check the cache!
if self.minion_id in self.cache: # Keyed by minion_id
# TODO Compare grains, etc?
if self.pillarenv in self.cache[self.minion_id]:
# We have a cache hit! Send it back.
log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv)
pillar_data = self.cache[self.minion_id][self.pillarenv]
else:
# We found the minion but not the env. Store it.
pillar_data = self.fetch_pillar()
self.cache[self.minion_id][self.pillarenv] = pillar_data
self.cache.store()
log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id)
else:
# We haven't seen this minion yet in the cache. Store it.
pillar_data = self.fetch_pillar()
self.cache[self.minion_id] = {self.pillarenv: pillar_data}
log.debug('Pillar cache has been added for minion %s', self.minion_id)
log.debug('Current pillar cache: %s', self.cache[self.minion_id])
# we dont want the pillar_override baked into the cached fetch_pillar from above
if self.pillar_override:
pillar_data = merge(
pillar_data,
self.pillar_override,
self.opts.get('pillar_source_merging_strategy', 'smart'),
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
pillar_data.update(self.pillar_override)
return pillar_data | [
"def",
"compile_pillar",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Will likely just be pillar_dirs",
"log",
".",
"debug",
"(",
"'Scanning pillar cache for information about minion %s and pillarenv %s'",
",",
"self",
".",
"minion_id",
",",
"self",
".",
"pillarenv",
")",
"log",
".",
"debug",
"(",
"'Scanning cache for minion %s: %s'",
",",
"self",
".",
"minion_id",
",",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
"or",
"'*empty*'",
")",
"# Check the cache!",
"if",
"self",
".",
"minion_id",
"in",
"self",
".",
"cache",
":",
"# Keyed by minion_id",
"# TODO Compare grains, etc?",
"if",
"self",
".",
"pillarenv",
"in",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
":",
"# We have a cache hit! Send it back.",
"log",
".",
"debug",
"(",
"'Pillar cache hit for minion %s and pillarenv %s'",
",",
"self",
".",
"minion_id",
",",
"self",
".",
"pillarenv",
")",
"pillar_data",
"=",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
"[",
"self",
".",
"pillarenv",
"]",
"else",
":",
"# We found the minion but not the env. Store it.",
"pillar_data",
"=",
"self",
".",
"fetch_pillar",
"(",
")",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
"[",
"self",
".",
"pillarenv",
"]",
"=",
"pillar_data",
"self",
".",
"cache",
".",
"store",
"(",
")",
"log",
".",
"debug",
"(",
"'Pillar cache miss for pillarenv %s for minion %s'",
",",
"self",
".",
"pillarenv",
",",
"self",
".",
"minion_id",
")",
"else",
":",
"# We haven't seen this minion yet in the cache. Store it.",
"pillar_data",
"=",
"self",
".",
"fetch_pillar",
"(",
")",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
"=",
"{",
"self",
".",
"pillarenv",
":",
"pillar_data",
"}",
"log",
".",
"debug",
"(",
"'Pillar cache has been added for minion %s'",
",",
"self",
".",
"minion_id",
")",
"log",
".",
"debug",
"(",
"'Current pillar cache: %s'",
",",
"self",
".",
"cache",
"[",
"self",
".",
"minion_id",
"]",
")",
"# we dont want the pillar_override baked into the cached fetch_pillar from above",
"if",
"self",
".",
"pillar_override",
":",
"pillar_data",
"=",
"merge",
"(",
"pillar_data",
",",
"self",
".",
"pillar_override",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'pillar_source_merging_strategy'",
",",
"'smart'",
")",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer'",
",",
"'yaml'",
")",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'pillar_merge_lists'",
",",
"False",
")",
")",
"pillar_data",
".",
"update",
"(",
"self",
".",
"pillar_override",
")",
"return",
"pillar_data"
] | Compile pillar and set it to the cache, if not found.
:param args:
:param kwargs:
:return: | [
"Compile",
"pillar",
"and",
"set",
"it",
"to",
"the",
"cache",
"if",
"not",
"found",
"."
] | python | train |
LeastAuthority/txkube | src/txkube/_exception.py | https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_exception.py#L11-L21 | def _full_kind(details):
"""
Determine the full kind (including a group if applicable) for some failure
details.
:see: ``v1.Status.details``
"""
kind = details[u"kind"]
if details.get(u"group") is not None:
kind += u"." + details[u"group"]
return kind | [
"def",
"_full_kind",
"(",
"details",
")",
":",
"kind",
"=",
"details",
"[",
"u\"kind\"",
"]",
"if",
"details",
".",
"get",
"(",
"u\"group\"",
")",
"is",
"not",
"None",
":",
"kind",
"+=",
"u\".\"",
"+",
"details",
"[",
"u\"group\"",
"]",
"return",
"kind"
] | Determine the full kind (including a group if applicable) for some failure
details.
:see: ``v1.Status.details`` | [
"Determine",
"the",
"full",
"kind",
"(",
"including",
"a",
"group",
"if",
"applicable",
")",
"for",
"some",
"failure",
"details",
"."
] | python | train |
Grunny/zap-cli | zapcli/commands/policies.py | https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/policies.py#L96-L99 | def _get_all_policy_ids(zap_helper):
"""Get all policy IDs."""
policies = zap_helper.zap.ascan.policies()
return [p['id'] for p in policies] | [
"def",
"_get_all_policy_ids",
"(",
"zap_helper",
")",
":",
"policies",
"=",
"zap_helper",
".",
"zap",
".",
"ascan",
".",
"policies",
"(",
")",
"return",
"[",
"p",
"[",
"'id'",
"]",
"for",
"p",
"in",
"policies",
"]"
] | Get all policy IDs. | [
"Get",
"all",
"policy",
"IDs",
"."
] | python | train |
poppy-project/pypot | pypot/vrep/remoteApiBindings/vrep.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L395-L402 | def simxLoadScene(clientID, scenePathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(scenePathAndName) is str):
scenePathAndName=scenePathAndName.encode('utf-8')
return c_LoadScene(clientID, scenePathAndName, options, operationMode) | [
"def",
"simxLoadScene",
"(",
"clientID",
",",
"scenePathAndName",
",",
"options",
",",
"operationMode",
")",
":",
"if",
"(",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
")",
"and",
"(",
"type",
"(",
"scenePathAndName",
")",
"is",
"str",
")",
":",
"scenePathAndName",
"=",
"scenePathAndName",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"c_LoadScene",
"(",
"clientID",
",",
"scenePathAndName",
",",
"options",
",",
"operationMode",
")"
] | Please have a look at the function description/documentation in the V-REP user manual | [
"Please",
"have",
"a",
"look",
"at",
"the",
"function",
"description",
"/",
"documentation",
"in",
"the",
"V",
"-",
"REP",
"user",
"manual"
] | python | train |
chrisrink10/basilisp | src/basilisp/lang/compiler/generator.py | https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L748-L808 | def _deftype_to_py_ast( # pylint: disable=too-many-branches
ctx: GeneratorContext, node: DefType
) -> GeneratedPyAST:
"""Return a Python AST Node for a `deftype*` expression."""
assert node.op == NodeOp.DEFTYPE
type_name = munge(node.name)
ctx.symbol_table.new_symbol(sym.symbol(node.name), type_name, LocalType.DEFTYPE)
bases = []
for base in node.interfaces:
base_node = gen_py_ast(ctx, base)
assert (
count(base_node.dependencies) == 0
), "Class and host form nodes do not have dependencies"
bases.append(base_node.node)
decorator = ast.Call(
func=_ATTR_CLASS_DECORATOR_NAME,
args=[],
keywords=[
ast.keyword(arg="cmp", value=ast.NameConstant(False)),
ast.keyword(arg="frozen", value=ast.NameConstant(node.is_frozen)),
ast.keyword(arg="slots", value=ast.NameConstant(True)),
],
)
with ctx.new_symbol_table(node.name):
type_nodes = []
for field in node.fields:
safe_field = munge(field.name)
type_nodes.append(
ast.Assign(
targets=[ast.Name(id=safe_field, ctx=ast.Store())],
value=ast.Call(func=_ATTRIB_FIELD_FN_NAME, args=[], keywords=[]),
)
)
ctx.symbol_table.new_symbol(sym.symbol(field.name), safe_field, field.local)
type_deps: List[ast.AST] = []
for method in node.methods:
type_ast = __deftype_method_to_py_ast(ctx, method)
type_nodes.append(type_ast.node)
type_deps.extend(type_ast.dependencies)
return GeneratedPyAST(
node=ast.Name(id=type_name, ctx=ast.Load()),
dependencies=list(
chain(
type_deps,
[
ast.ClassDef(
name=type_name,
bases=bases,
keywords=[],
body=type_nodes,
decorator_list=[decorator],
)
],
)
),
) | [
"def",
"_deftype_to_py_ast",
"(",
"# pylint: disable=too-many-branches",
"ctx",
":",
"GeneratorContext",
",",
"node",
":",
"DefType",
")",
"->",
"GeneratedPyAST",
":",
"assert",
"node",
".",
"op",
"==",
"NodeOp",
".",
"DEFTYPE",
"type_name",
"=",
"munge",
"(",
"node",
".",
"name",
")",
"ctx",
".",
"symbol_table",
".",
"new_symbol",
"(",
"sym",
".",
"symbol",
"(",
"node",
".",
"name",
")",
",",
"type_name",
",",
"LocalType",
".",
"DEFTYPE",
")",
"bases",
"=",
"[",
"]",
"for",
"base",
"in",
"node",
".",
"interfaces",
":",
"base_node",
"=",
"gen_py_ast",
"(",
"ctx",
",",
"base",
")",
"assert",
"(",
"count",
"(",
"base_node",
".",
"dependencies",
")",
"==",
"0",
")",
",",
"\"Class and host form nodes do not have dependencies\"",
"bases",
".",
"append",
"(",
"base_node",
".",
"node",
")",
"decorator",
"=",
"ast",
".",
"Call",
"(",
"func",
"=",
"_ATTR_CLASS_DECORATOR_NAME",
",",
"args",
"=",
"[",
"]",
",",
"keywords",
"=",
"[",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"cmp\"",
",",
"value",
"=",
"ast",
".",
"NameConstant",
"(",
"False",
")",
")",
",",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"frozen\"",
",",
"value",
"=",
"ast",
".",
"NameConstant",
"(",
"node",
".",
"is_frozen",
")",
")",
",",
"ast",
".",
"keyword",
"(",
"arg",
"=",
"\"slots\"",
",",
"value",
"=",
"ast",
".",
"NameConstant",
"(",
"True",
")",
")",
",",
"]",
",",
")",
"with",
"ctx",
".",
"new_symbol_table",
"(",
"node",
".",
"name",
")",
":",
"type_nodes",
"=",
"[",
"]",
"for",
"field",
"in",
"node",
".",
"fields",
":",
"safe_field",
"=",
"munge",
"(",
"field",
".",
"name",
")",
"type_nodes",
".",
"append",
"(",
"ast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"ast",
".",
"Name",
"(",
"id",
"=",
"safe_field",
",",
"ctx",
"=",
"ast",
".",
"Store",
"(",
")",
")",
"]",
",",
"value",
"=",
"ast",
".",
"Call",
"(",
"func",
"=",
"_ATTRIB_FIELD_FN_NAME",
",",
"args",
"=",
"[",
"]",
",",
"keywords",
"=",
"[",
"]",
")",
",",
")",
")",
"ctx",
".",
"symbol_table",
".",
"new_symbol",
"(",
"sym",
".",
"symbol",
"(",
"field",
".",
"name",
")",
",",
"safe_field",
",",
"field",
".",
"local",
")",
"type_deps",
":",
"List",
"[",
"ast",
".",
"AST",
"]",
"=",
"[",
"]",
"for",
"method",
"in",
"node",
".",
"methods",
":",
"type_ast",
"=",
"__deftype_method_to_py_ast",
"(",
"ctx",
",",
"method",
")",
"type_nodes",
".",
"append",
"(",
"type_ast",
".",
"node",
")",
"type_deps",
".",
"extend",
"(",
"type_ast",
".",
"dependencies",
")",
"return",
"GeneratedPyAST",
"(",
"node",
"=",
"ast",
".",
"Name",
"(",
"id",
"=",
"type_name",
",",
"ctx",
"=",
"ast",
".",
"Load",
"(",
")",
")",
",",
"dependencies",
"=",
"list",
"(",
"chain",
"(",
"type_deps",
",",
"[",
"ast",
".",
"ClassDef",
"(",
"name",
"=",
"type_name",
",",
"bases",
"=",
"bases",
",",
"keywords",
"=",
"[",
"]",
",",
"body",
"=",
"type_nodes",
",",
"decorator_list",
"=",
"[",
"decorator",
"]",
",",
")",
"]",
",",
")",
")",
",",
")"
] | Return a Python AST Node for a `deftype*` expression. | [
"Return",
"a",
"Python",
"AST",
"Node",
"for",
"a",
"deftype",
"*",
"expression",
"."
] | python | test |
rochacbruno/flasgger | examples/restful.py | https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/examples/restful.py#L84-L109 | def put(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
201:
description: The task has been updated
schema:
$ref: '#/definitions/Task'
"""
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201 | [
"def",
"put",
"(",
"self",
",",
"todo_id",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"task",
"=",
"{",
"'task'",
":",
"args",
"[",
"'task'",
"]",
"}",
"TODOS",
"[",
"todo_id",
"]",
"=",
"task",
"return",
"task",
",",
"201"
] | This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
201:
description: The task has been updated
schema:
$ref: '#/definitions/Task' | [
"This",
"is",
"an",
"example",
"---",
"tags",
":",
"-",
"restful",
"parameters",
":",
"-",
"in",
":",
"body",
"name",
":",
"body",
"schema",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"Task",
"-",
"in",
":",
"path",
"name",
":",
"todo_id",
"required",
":",
"true",
"description",
":",
"The",
"ID",
"of",
"the",
"task",
"try",
"42!",
"type",
":",
"string",
"responses",
":",
"201",
":",
"description",
":",
"The",
"task",
"has",
"been",
"updated",
"schema",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"Task"
] | python | train |
BerkeleyAutomation/perception | perception/ensenso_sensor.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/ensenso_sensor.py#L124-L135 | def stop(self):
""" Stop the sensor """
# check that everything is running
if not self._running:
logging.warning('Ensenso not running. Aborting stop')
return False
# stop subs
self._pointcloud_sub.unregister()
self._camera_info_sub.unregister()
self._running = False
return True | [
"def",
"stop",
"(",
"self",
")",
":",
"# check that everything is running",
"if",
"not",
"self",
".",
"_running",
":",
"logging",
".",
"warning",
"(",
"'Ensenso not running. Aborting stop'",
")",
"return",
"False",
"# stop subs",
"self",
".",
"_pointcloud_sub",
".",
"unregister",
"(",
")",
"self",
".",
"_camera_info_sub",
".",
"unregister",
"(",
")",
"self",
".",
"_running",
"=",
"False",
"return",
"True"
] | Stop the sensor | [
"Stop",
"the",
"sensor"
] | python | train |
DistrictDataLabs/yellowbrick | yellowbrick/features/manifold.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/features/manifold.py#L232-L260 | def manifold(self, transformer):
"""
Creates the manifold estimator if a string value is passed in,
validates other objects passed in.
"""
if not is_estimator(transformer):
if transformer not in self.ALGORITHMS:
raise YellowbrickValueError(
"could not create manifold for '%s'".format(str(transformer))
)
# Create a new transformer with the specified params
self._name = MANIFOLD_NAMES[transformer]
transformer = clone(self.ALGORITHMS[transformer])
params = {
"n_components": 2,
"n_neighbors": self.n_neighbors,
"random_state": self.random_state,
}
for param in list(params.keys()):
if param not in transformer.get_params():
del params[param]
transformer.set_params(**params)
self._manifold = transformer
if self._name is None:
self._name = self._manifold.__class__.__name__ | [
"def",
"manifold",
"(",
"self",
",",
"transformer",
")",
":",
"if",
"not",
"is_estimator",
"(",
"transformer",
")",
":",
"if",
"transformer",
"not",
"in",
"self",
".",
"ALGORITHMS",
":",
"raise",
"YellowbrickValueError",
"(",
"\"could not create manifold for '%s'\"",
".",
"format",
"(",
"str",
"(",
"transformer",
")",
")",
")",
"# Create a new transformer with the specified params",
"self",
".",
"_name",
"=",
"MANIFOLD_NAMES",
"[",
"transformer",
"]",
"transformer",
"=",
"clone",
"(",
"self",
".",
"ALGORITHMS",
"[",
"transformer",
"]",
")",
"params",
"=",
"{",
"\"n_components\"",
":",
"2",
",",
"\"n_neighbors\"",
":",
"self",
".",
"n_neighbors",
",",
"\"random_state\"",
":",
"self",
".",
"random_state",
",",
"}",
"for",
"param",
"in",
"list",
"(",
"params",
".",
"keys",
"(",
")",
")",
":",
"if",
"param",
"not",
"in",
"transformer",
".",
"get_params",
"(",
")",
":",
"del",
"params",
"[",
"param",
"]",
"transformer",
".",
"set_params",
"(",
"*",
"*",
"params",
")",
"self",
".",
"_manifold",
"=",
"transformer",
"if",
"self",
".",
"_name",
"is",
"None",
":",
"self",
".",
"_name",
"=",
"self",
".",
"_manifold",
".",
"__class__",
".",
"__name__"
] | Creates the manifold estimator if a string value is passed in,
validates other objects passed in. | [
"Creates",
"the",
"manifold",
"estimator",
"if",
"a",
"string",
"value",
"is",
"passed",
"in",
"validates",
"other",
"objects",
"passed",
"in",
"."
] | python | train |
dade-ai/snipy | snipy/img/imageutil.py | https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/img/imageutil.py#L151-L163 | def snoise2dvec(size, *params, **kwargs): #, vlacunarity):
"""
vector parameters
:param size:
:param vz:
:param vscale:
:param voctave:
:param vpersistence:
:param vlacunarity:
:return:
"""
data = (snoise2d(size, *p, **kwargs) for p in zip(*params)) # , vlacunarity))
return np.stack(data, 0) | [
"def",
"snoise2dvec",
"(",
"size",
",",
"*",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"#, vlacunarity):",
"data",
"=",
"(",
"snoise2d",
"(",
"size",
",",
"*",
"p",
",",
"*",
"*",
"kwargs",
")",
"for",
"p",
"in",
"zip",
"(",
"*",
"params",
")",
")",
"# , vlacunarity))",
"return",
"np",
".",
"stack",
"(",
"data",
",",
"0",
")"
] | vector parameters
:param size:
:param vz:
:param vscale:
:param voctave:
:param vpersistence:
:param vlacunarity:
:return: | [
"vector",
"parameters",
":",
"param",
"size",
":",
":",
"param",
"vz",
":",
":",
"param",
"vscale",
":",
":",
"param",
"voctave",
":",
":",
"param",
"vpersistence",
":",
":",
"param",
"vlacunarity",
":",
":",
"return",
":"
] | python | valid |
mdsol/rwslib | rwslib/builders/metadata.py | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1462-L1475 | def build(self, builder):
"""Build XML by appending to builder"""
params = dict(OID=self.oid, Name=self.name, DataType=self.datatype.value)
if self.sas_format_name is not None:
params["SASFormatName"] = self.sas_format_name
builder.start("CodeList", params)
for item in self.codelist_items:
item.build(builder)
for alias in self.aliases:
alias.build(builder)
builder.end("CodeList") | [
"def",
"build",
"(",
"self",
",",
"builder",
")",
":",
"params",
"=",
"dict",
"(",
"OID",
"=",
"self",
".",
"oid",
",",
"Name",
"=",
"self",
".",
"name",
",",
"DataType",
"=",
"self",
".",
"datatype",
".",
"value",
")",
"if",
"self",
".",
"sas_format_name",
"is",
"not",
"None",
":",
"params",
"[",
"\"SASFormatName\"",
"]",
"=",
"self",
".",
"sas_format_name",
"builder",
".",
"start",
"(",
"\"CodeList\"",
",",
"params",
")",
"for",
"item",
"in",
"self",
".",
"codelist_items",
":",
"item",
".",
"build",
"(",
"builder",
")",
"for",
"alias",
"in",
"self",
".",
"aliases",
":",
"alias",
".",
"build",
"(",
"builder",
")",
"builder",
".",
"end",
"(",
"\"CodeList\"",
")"
] | Build XML by appending to builder | [
"Build",
"XML",
"by",
"appending",
"to",
"builder"
] | python | train |
saltstack/salt | salt/pillar/s3.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/s3.py#L352-L362 | def _read_buckets_cache_file(cache_file):
'''
Return the contents of the buckets cache file
'''
log.debug('Reading buckets cache file')
with salt.utils.files.fopen(cache_file, 'rb') as fp_:
data = pickle.load(fp_)
return data | [
"def",
"_read_buckets_cache_file",
"(",
"cache_file",
")",
":",
"log",
".",
"debug",
"(",
"'Reading buckets cache file'",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"cache_file",
",",
"'rb'",
")",
"as",
"fp_",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"fp_",
")",
"return",
"data"
] | Return the contents of the buckets cache file | [
"Return",
"the",
"contents",
"of",
"the",
"buckets",
"cache",
"file"
] | python | train |
django-danceschool/django-danceschool | danceschool/core/models.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/models.py#L2340-L2359 | def warningFlag(self):
'''
When viewing individual event registrations, there are a large number of potential
issues that can arise that may warrant scrutiny. This property just checks all of
these conditions and indicates if anything is amiss so that the template need not
check each of these conditions individually repeatedly.
'''
if not hasattr(self,'invoiceitem'):
return True
if apps.is_installed('danceschool.financial'):
'''
If the financial app is installed, then we can also check additional
properties set by that app to ensure that there are no inconsistencies
'''
if self.invoiceitem.revenueNotYetReceived != 0 or self.invoiceitem.revenueMismatch:
return True
return (
self.price != self.invoiceitem.grossTotal or
self.invoiceitem.invoice.unpaid or self.invoiceitem.invoice.outstandingBalance != 0
) | [
"def",
"warningFlag",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'invoiceitem'",
")",
":",
"return",
"True",
"if",
"apps",
".",
"is_installed",
"(",
"'danceschool.financial'",
")",
":",
"'''\n If the financial app is installed, then we can also check additional\n properties set by that app to ensure that there are no inconsistencies\n '''",
"if",
"self",
".",
"invoiceitem",
".",
"revenueNotYetReceived",
"!=",
"0",
"or",
"self",
".",
"invoiceitem",
".",
"revenueMismatch",
":",
"return",
"True",
"return",
"(",
"self",
".",
"price",
"!=",
"self",
".",
"invoiceitem",
".",
"grossTotal",
"or",
"self",
".",
"invoiceitem",
".",
"invoice",
".",
"unpaid",
"or",
"self",
".",
"invoiceitem",
".",
"invoice",
".",
"outstandingBalance",
"!=",
"0",
")"
] | When viewing individual event registrations, there are a large number of potential
issues that can arise that may warrant scrutiny. This property just checks all of
these conditions and indicates if anything is amiss so that the template need not
check each of these conditions individually repeatedly. | [
"When",
"viewing",
"individual",
"event",
"registrations",
"there",
"are",
"a",
"large",
"number",
"of",
"potential",
"issues",
"that",
"can",
"arise",
"that",
"may",
"warrant",
"scrutiny",
".",
"This",
"property",
"just",
"checks",
"all",
"of",
"these",
"conditions",
"and",
"indicates",
"if",
"anything",
"is",
"amiss",
"so",
"that",
"the",
"template",
"need",
"not",
"check",
"each",
"of",
"these",
"conditions",
"individually",
"repeatedly",
"."
] | python | train |
metakirby5/colorz | colorz.py | https://github.com/metakirby5/colorz/blob/11fd47a28d7a4af5b91d29978524335c8fef8cc9/colorz.py#L56-L61 | def get_colors(img):
"""
Returns a list of all the image's colors.
"""
w, h = img.size
return [color[:3] for count, color in img.convert('RGB').getcolors(w * h)] | [
"def",
"get_colors",
"(",
"img",
")",
":",
"w",
",",
"h",
"=",
"img",
".",
"size",
"return",
"[",
"color",
"[",
":",
"3",
"]",
"for",
"count",
",",
"color",
"in",
"img",
".",
"convert",
"(",
"'RGB'",
")",
".",
"getcolors",
"(",
"w",
"*",
"h",
")",
"]"
] | Returns a list of all the image's colors. | [
"Returns",
"a",
"list",
"of",
"all",
"the",
"image",
"s",
"colors",
"."
] | python | train |
karjaljo/hiisi | hiisi/hiisi.py | https://github.com/karjaljo/hiisi/blob/de6a64df5dcbcb37d5d3d5468663e65a7794f9a8/hiisi/hiisi.py#L135-L181 | def create_from_filedict(self, filedict):
"""
Creates h5 file from dictionary containing the file structure.
Filedict is a regular dictinary whose keys are hdf5 paths and whose
values are dictinaries containing the metadata and datasets. Metadata
is given as normal key-value -pairs and dataset arrays are given using
'DATASET' key. Datasets must be numpy arrays.
Method can also be used to append existing hdf5 file. If the file is
opened in read only mode, method does nothing.
Examples
--------
Create newfile.h5 and fill it with data and metadata
>>> h5f = HiisiHDF('newfile.h5', 'w')
>>> filedict = {'/':{'attr1':'A'},
'/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'}
>>> h5f.create_from_filedict(filedict)
"""
if self.mode in ['r+','w', 'w-', 'x', 'a']:
for h5path, path_content in filedict.iteritems():
if path_content.has_key('DATASET'):
# If path exist, write only metadata
if h5path in self:
for key, value in path_content.iteritems():
if key != 'DATASET':
self[h5path].attrs[key] = value
else:
try:
group = self.create_group(os.path.dirname(h5path))
except ValueError:
group = self[os.path.dirname(h5path)]
pass # This pass has no effect?
new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET'])
for key, value in path_content.iteritems():
if key != 'DATASET':
new_dataset.attrs[key] = value
else:
try:
group = self.create_group(h5path)
except ValueError:
group = self[h5path]
for key, value in path_content.iteritems():
group.attrs[key] = value | [
"def",
"create_from_filedict",
"(",
"self",
",",
"filedict",
")",
":",
"if",
"self",
".",
"mode",
"in",
"[",
"'r+'",
",",
"'w'",
",",
"'w-'",
",",
"'x'",
",",
"'a'",
"]",
":",
"for",
"h5path",
",",
"path_content",
"in",
"filedict",
".",
"iteritems",
"(",
")",
":",
"if",
"path_content",
".",
"has_key",
"(",
"'DATASET'",
")",
":",
"# If path exist, write only metadata",
"if",
"h5path",
"in",
"self",
":",
"for",
"key",
",",
"value",
"in",
"path_content",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"!=",
"'DATASET'",
":",
"self",
"[",
"h5path",
"]",
".",
"attrs",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"try",
":",
"group",
"=",
"self",
".",
"create_group",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"h5path",
")",
")",
"except",
"ValueError",
":",
"group",
"=",
"self",
"[",
"os",
".",
"path",
".",
"dirname",
"(",
"h5path",
")",
"]",
"pass",
"# This pass has no effect?",
"new_dataset",
"=",
"group",
".",
"create_dataset",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"h5path",
")",
",",
"data",
"=",
"path_content",
"[",
"'DATASET'",
"]",
")",
"for",
"key",
",",
"value",
"in",
"path_content",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"!=",
"'DATASET'",
":",
"new_dataset",
".",
"attrs",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"try",
":",
"group",
"=",
"self",
".",
"create_group",
"(",
"h5path",
")",
"except",
"ValueError",
":",
"group",
"=",
"self",
"[",
"h5path",
"]",
"for",
"key",
",",
"value",
"in",
"path_content",
".",
"iteritems",
"(",
")",
":",
"group",
".",
"attrs",
"[",
"key",
"]",
"=",
"value"
] | Creates h5 file from dictionary containing the file structure.
Filedict is a regular dictinary whose keys are hdf5 paths and whose
values are dictinaries containing the metadata and datasets. Metadata
is given as normal key-value -pairs and dataset arrays are given using
'DATASET' key. Datasets must be numpy arrays.
Method can also be used to append existing hdf5 file. If the file is
opened in read only mode, method does nothing.
Examples
--------
Create newfile.h5 and fill it with data and metadata
>>> h5f = HiisiHDF('newfile.h5', 'w')
>>> filedict = {'/':{'attr1':'A'},
'/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'}
>>> h5f.create_from_filedict(filedict) | [
"Creates",
"h5",
"file",
"from",
"dictionary",
"containing",
"the",
"file",
"structure",
".",
"Filedict",
"is",
"a",
"regular",
"dictinary",
"whose",
"keys",
"are",
"hdf5",
"paths",
"and",
"whose",
"values",
"are",
"dictinaries",
"containing",
"the",
"metadata",
"and",
"datasets",
".",
"Metadata",
"is",
"given",
"as",
"normal",
"key",
"-",
"value",
"-",
"pairs",
"and",
"dataset",
"arrays",
"are",
"given",
"using",
"DATASET",
"key",
".",
"Datasets",
"must",
"be",
"numpy",
"arrays",
".",
"Method",
"can",
"also",
"be",
"used",
"to",
"append",
"existing",
"hdf5",
"file",
".",
"If",
"the",
"file",
"is",
"opened",
"in",
"read",
"only",
"mode",
"method",
"does",
"nothing",
"."
] | python | train |
timothydmorton/isochrones | isochrones/yapsi/grid.py | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/yapsi/grid.py#L45-L54 | def get_feh(cls, filename):
"""
example filename: yapsi_w_X0p602357_Z0p027643.dat
"""
X,Y,Z = cls._get_XYZ(filename)
Xsun = 0.703812
Zsun = 0.016188
return np.log10((Z/X) / (Zsun/Xsun)) | [
"def",
"get_feh",
"(",
"cls",
",",
"filename",
")",
":",
"X",
",",
"Y",
",",
"Z",
"=",
"cls",
".",
"_get_XYZ",
"(",
"filename",
")",
"Xsun",
"=",
"0.703812",
"Zsun",
"=",
"0.016188",
"return",
"np",
".",
"log10",
"(",
"(",
"Z",
"/",
"X",
")",
"/",
"(",
"Zsun",
"/",
"Xsun",
")",
")"
] | example filename: yapsi_w_X0p602357_Z0p027643.dat | [
"example",
"filename",
":",
"yapsi_w_X0p602357_Z0p027643",
".",
"dat"
] | python | train |
pyupio/pyup | pyup/providers/gitlab.py | https://github.com/pyupio/pyup/blob/b20fa88e03cfdf5dc409a9f00d27629188171c31/pyup/providers/gitlab.py#L131-L140 | def delete_branch(self, repo, branch, prefix):
"""
Deletes a branch.
:param repo: github.Repository
:param branch: string name of the branch to delete
"""
# make sure that the name of the branch begins with pyup.
assert branch.startswith(prefix)
obj = repo.branches.get(branch)
obj.delete() | [
"def",
"delete_branch",
"(",
"self",
",",
"repo",
",",
"branch",
",",
"prefix",
")",
":",
"# make sure that the name of the branch begins with pyup.",
"assert",
"branch",
".",
"startswith",
"(",
"prefix",
")",
"obj",
"=",
"repo",
".",
"branches",
".",
"get",
"(",
"branch",
")",
"obj",
".",
"delete",
"(",
")"
] | Deletes a branch.
:param repo: github.Repository
:param branch: string name of the branch to delete | [
"Deletes",
"a",
"branch",
".",
":",
"param",
"repo",
":",
"github",
".",
"Repository",
":",
"param",
"branch",
":",
"string",
"name",
"of",
"the",
"branch",
"to",
"delete"
] | python | train |
datastax/python-driver | cassandra/cqlengine/connection.py | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/connection.py#L266-L296 | def set_session(s):
"""
Configures the default connection with a preexisting :class:`cassandra.cluster.Session`
Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``.
This may be relaxed in the future
"""
try:
conn = get_connection()
except CQLEngineException:
# no default connection set; initalize one
register_connection('default', session=s, default=True)
conn = get_connection()
if conn.session:
log.warning("configuring new default connection for cqlengine when one was already set")
if s.row_factory is not dict_factory:
raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.")
conn.session = s
conn.cluster = s.cluster
# Set default keyspace from given session's keyspace
if conn.session.keyspace:
from cassandra.cqlengine import models
models.DEFAULT_KEYSPACE = conn.session.keyspace
conn.setup_session()
log.debug("cqlengine default connection initialized with %s", s) | [
"def",
"set_session",
"(",
"s",
")",
":",
"try",
":",
"conn",
"=",
"get_connection",
"(",
")",
"except",
"CQLEngineException",
":",
"# no default connection set; initalize one",
"register_connection",
"(",
"'default'",
",",
"session",
"=",
"s",
",",
"default",
"=",
"True",
")",
"conn",
"=",
"get_connection",
"(",
")",
"if",
"conn",
".",
"session",
":",
"log",
".",
"warning",
"(",
"\"configuring new default connection for cqlengine when one was already set\"",
")",
"if",
"s",
".",
"row_factory",
"is",
"not",
"dict_factory",
":",
"raise",
"CQLEngineException",
"(",
"\"Failed to initialize: 'Session.row_factory' must be 'dict_factory'.\"",
")",
"conn",
".",
"session",
"=",
"s",
"conn",
".",
"cluster",
"=",
"s",
".",
"cluster",
"# Set default keyspace from given session's keyspace",
"if",
"conn",
".",
"session",
".",
"keyspace",
":",
"from",
"cassandra",
".",
"cqlengine",
"import",
"models",
"models",
".",
"DEFAULT_KEYSPACE",
"=",
"conn",
".",
"session",
".",
"keyspace",
"conn",
".",
"setup_session",
"(",
")",
"log",
".",
"debug",
"(",
"\"cqlengine default connection initialized with %s\"",
",",
"s",
")"
] | Configures the default connection with a preexisting :class:`cassandra.cluster.Session`
Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``.
This may be relaxed in the future | [
"Configures",
"the",
"default",
"connection",
"with",
"a",
"preexisting",
":",
"class",
":",
"cassandra",
".",
"cluster",
".",
"Session"
] | python | train |
Holzhaus/python-cmuclmtk | cmuclmtk/__init__.py | https://github.com/Holzhaus/python-cmuclmtk/blob/67a5c6713c497ca644ea1c697a70e8d930c9d4b4/cmuclmtk/__init__.py#L477-L490 | def text2vocab(text, output_file, text2wfreq_kwargs={}, wfreq2vocab_kwargs={}):
"""
Convienience function that uses text2wfreq and wfreq2vocab to create a vocabulary file from text.
"""
with tempfile.NamedTemporaryFile(suffix='.wfreq', delete=False) as f:
wfreq_file = f.name
try:
text2wfreq(text, wfreq_file, **text2wfreq_kwargs)
wfreq2vocab(wfreq_file, output_file, **wfreq2vocab_kwargs)
except ConversionError:
raise
finally:
os.remove(wfreq_file) | [
"def",
"text2vocab",
"(",
"text",
",",
"output_file",
",",
"text2wfreq_kwargs",
"=",
"{",
"}",
",",
"wfreq2vocab_kwargs",
"=",
"{",
"}",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.wfreq'",
",",
"delete",
"=",
"False",
")",
"as",
"f",
":",
"wfreq_file",
"=",
"f",
".",
"name",
"try",
":",
"text2wfreq",
"(",
"text",
",",
"wfreq_file",
",",
"*",
"*",
"text2wfreq_kwargs",
")",
"wfreq2vocab",
"(",
"wfreq_file",
",",
"output_file",
",",
"*",
"*",
"wfreq2vocab_kwargs",
")",
"except",
"ConversionError",
":",
"raise",
"finally",
":",
"os",
".",
"remove",
"(",
"wfreq_file",
")"
] | Convienience function that uses text2wfreq and wfreq2vocab to create a vocabulary file from text. | [
"Convienience",
"function",
"that",
"uses",
"text2wfreq",
"and",
"wfreq2vocab",
"to",
"create",
"a",
"vocabulary",
"file",
"from",
"text",
"."
] | python | train |
wandb/client | wandb/history.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/history.py#L84-L94 | def stream(self, name):
"""Stream can be used to record different time series:
run.history.stream("batch").add({"gradients": 1})
"""
if self.stream_name != "default":
raise ValueError("Nested streams aren't supported")
if self._streams.get(name) == None:
self._streams[name] = History(self.fname, out_dir=self.out_dir,
add_callback=self._add_callback, stream_name=name)
return self._streams[name] | [
"def",
"stream",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"stream_name",
"!=",
"\"default\"",
":",
"raise",
"ValueError",
"(",
"\"Nested streams aren't supported\"",
")",
"if",
"self",
".",
"_streams",
".",
"get",
"(",
"name",
")",
"==",
"None",
":",
"self",
".",
"_streams",
"[",
"name",
"]",
"=",
"History",
"(",
"self",
".",
"fname",
",",
"out_dir",
"=",
"self",
".",
"out_dir",
",",
"add_callback",
"=",
"self",
".",
"_add_callback",
",",
"stream_name",
"=",
"name",
")",
"return",
"self",
".",
"_streams",
"[",
"name",
"]"
] | Stream can be used to record different time series:
run.history.stream("batch").add({"gradients": 1}) | [
"Stream",
"can",
"be",
"used",
"to",
"record",
"different",
"time",
"series",
":"
] | python | train |
uber/doubles | doubles/method_double.py | https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/method_double.py#L133-L144 | def _verify_method(self):
"""Verify that a method may be doubled.
Verifies that the target object has a method matching the name the user is attempting to
double.
:raise: ``VerifyingDoubleError`` if no matching method is found.
"""
class_level = self._target.is_class_or_module()
verify_method(self._target, self._method_name, class_level=class_level) | [
"def",
"_verify_method",
"(",
"self",
")",
":",
"class_level",
"=",
"self",
".",
"_target",
".",
"is_class_or_module",
"(",
")",
"verify_method",
"(",
"self",
".",
"_target",
",",
"self",
".",
"_method_name",
",",
"class_level",
"=",
"class_level",
")"
] | Verify that a method may be doubled.
Verifies that the target object has a method matching the name the user is attempting to
double.
:raise: ``VerifyingDoubleError`` if no matching method is found. | [
"Verify",
"that",
"a",
"method",
"may",
"be",
"doubled",
"."
] | python | train |
ryanjdillon/pyotelem | pyotelem/physio_seal.py | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L203-L254 | def lip2dens(perc_lipid, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994,
dens_ash=2.3):
'''Derive tissue density from lipids
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
perc_lipid: float or ndarray
Percent lipid of body composition
dens_lipid: float
Density of lipid in animal (Default 0.9007 g/cm^3)
dens_prot: float
Density of protein in animal (Default 1.34 g/cm^3)
dens_water: float
Density of water in animal (Default 0.994 g/cm^3)
dens_ash: float
Density of ash in animal (Default 2.3 g/cm^3)
Returns
-------
dens_gcm3: float or ndarray
Density of seal calculated from percent compositions and densities of
components from Moore et al. (1963)
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6
'''
import numpy
# Cast iterables to numpy array
if numpy.iterable(perc_lipid):
perc_lipid = numpy.asarray(perc_lipid)
perc_water, perc_protein, perc_ash = perc_bc_from_lipid(perc_lipid)
dens_gcm3 = (dens_lipid * (0.01 * perc_lipid)) + \
(dens_prot * (0.01 * perc_protein)) + \
(dens_water * (0.01 * perc_water)) + \
(dens_ash * (0.01 * perc_ash))
return dens_gcm3 | [
"def",
"lip2dens",
"(",
"perc_lipid",
",",
"dens_lipid",
"=",
"0.9007",
",",
"dens_prot",
"=",
"1.34",
",",
"dens_water",
"=",
"0.994",
",",
"dens_ash",
"=",
"2.3",
")",
":",
"import",
"numpy",
"# Cast iterables to numpy array",
"if",
"numpy",
".",
"iterable",
"(",
"perc_lipid",
")",
":",
"perc_lipid",
"=",
"numpy",
".",
"asarray",
"(",
"perc_lipid",
")",
"perc_water",
",",
"perc_protein",
",",
"perc_ash",
"=",
"perc_bc_from_lipid",
"(",
"perc_lipid",
")",
"dens_gcm3",
"=",
"(",
"dens_lipid",
"*",
"(",
"0.01",
"*",
"perc_lipid",
")",
")",
"+",
"(",
"dens_prot",
"*",
"(",
"0.01",
"*",
"perc_protein",
")",
")",
"+",
"(",
"dens_water",
"*",
"(",
"0.01",
"*",
"perc_water",
")",
")",
"+",
"(",
"dens_ash",
"*",
"(",
"0.01",
"*",
"perc_ash",
")",
")",
"return",
"dens_gcm3"
] | Derive tissue density from lipids
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
perc_lipid: float or ndarray
Percent lipid of body composition
dens_lipid: float
Density of lipid in animal (Default 0.9007 g/cm^3)
dens_prot: float
Density of protein in animal (Default 1.34 g/cm^3)
dens_water: float
Density of water in animal (Default 0.994 g/cm^3)
dens_ash: float
Density of ash in animal (Default 2.3 g/cm^3)
Returns
-------
dens_gcm3: float or ndarray
Density of seal calculated from percent compositions and densities of
components from Moore et al. (1963)
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6 | [
"Derive",
"tissue",
"density",
"from",
"lipids"
] | python | train |
taskcluster/taskcluster-client.py | taskcluster/aio/secrets.py | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/secrets.py#L56-L65 | async def remove(self, *args, **kwargs):
"""
Delete Secret
Delete the secret associated with some key.
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs) | [
"async",
"def",
"remove",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"remove\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Delete Secret
Delete the secret associated with some key.
This method is ``stable`` | [
"Delete",
"Secret"
] | python | train |
scanny/python-pptx | pptx/parts/presentation.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/presentation.py#L62-L74 | def notes_master_part(self):
"""
Return the |NotesMasterPart| object for this presentation. If the
presentation does not have a notes master, one is created from
a default template. The same single instance is returned on each
call.
"""
try:
return self.part_related_by(RT.NOTES_MASTER)
except KeyError:
notes_master_part = NotesMasterPart.create_default(self.package)
self.relate_to(notes_master_part, RT.NOTES_MASTER)
return notes_master_part | [
"def",
"notes_master_part",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"part_related_by",
"(",
"RT",
".",
"NOTES_MASTER",
")",
"except",
"KeyError",
":",
"notes_master_part",
"=",
"NotesMasterPart",
".",
"create_default",
"(",
"self",
".",
"package",
")",
"self",
".",
"relate_to",
"(",
"notes_master_part",
",",
"RT",
".",
"NOTES_MASTER",
")",
"return",
"notes_master_part"
] | Return the |NotesMasterPart| object for this presentation. If the
presentation does not have a notes master, one is created from
a default template. The same single instance is returned on each
call. | [
"Return",
"the",
"|NotesMasterPart|",
"object",
"for",
"this",
"presentation",
".",
"If",
"the",
"presentation",
"does",
"not",
"have",
"a",
"notes",
"master",
"one",
"is",
"created",
"from",
"a",
"default",
"template",
".",
"The",
"same",
"single",
"instance",
"is",
"returned",
"on",
"each",
"call",
"."
] | python | train |
Becksteinlab/GromacsWrapper | gromacs/cbook.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1575-L1594 | def _translate_residue(self, selection, default_atomname='CA'):
"""Translate selection for a single res to make_ndx syntax."""
m = self.RESIDUE.match(selection)
if not m:
errmsg = "Selection {selection!r} is not valid.".format(**vars())
logger.error(errmsg)
raise ValueError(errmsg)
gmx_resid = self.gmx_resid(int(m.group('resid'))) # magic offset correction
residue = m.group('aa')
if len(residue) == 1:
gmx_resname = utilities.convert_aa_code(residue) # only works for AA
else:
gmx_resname = residue # use 3-letter for any resname
gmx_atomname = m.group('atom')
if gmx_atomname is None:
gmx_atomname = default_atomname
return {'resname':gmx_resname, 'resid':gmx_resid, 'atomname':gmx_atomname} | [
"def",
"_translate_residue",
"(",
"self",
",",
"selection",
",",
"default_atomname",
"=",
"'CA'",
")",
":",
"m",
"=",
"self",
".",
"RESIDUE",
".",
"match",
"(",
"selection",
")",
"if",
"not",
"m",
":",
"errmsg",
"=",
"\"Selection {selection!r} is not valid.\"",
".",
"format",
"(",
"*",
"*",
"vars",
"(",
")",
")",
"logger",
".",
"error",
"(",
"errmsg",
")",
"raise",
"ValueError",
"(",
"errmsg",
")",
"gmx_resid",
"=",
"self",
".",
"gmx_resid",
"(",
"int",
"(",
"m",
".",
"group",
"(",
"'resid'",
")",
")",
")",
"# magic offset correction",
"residue",
"=",
"m",
".",
"group",
"(",
"'aa'",
")",
"if",
"len",
"(",
"residue",
")",
"==",
"1",
":",
"gmx_resname",
"=",
"utilities",
".",
"convert_aa_code",
"(",
"residue",
")",
"# only works for AA",
"else",
":",
"gmx_resname",
"=",
"residue",
"# use 3-letter for any resname",
"gmx_atomname",
"=",
"m",
".",
"group",
"(",
"'atom'",
")",
"if",
"gmx_atomname",
"is",
"None",
":",
"gmx_atomname",
"=",
"default_atomname",
"return",
"{",
"'resname'",
":",
"gmx_resname",
",",
"'resid'",
":",
"gmx_resid",
",",
"'atomname'",
":",
"gmx_atomname",
"}"
] | Translate selection for a single res to make_ndx syntax. | [
"Translate",
"selection",
"for",
"a",
"single",
"res",
"to",
"make_ndx",
"syntax",
"."
] | python | valid |
llimllib/limbo | limbo/plugins/gif.py | https://github.com/llimllib/limbo/blob/f0980f20f733b670debcae454b167da32c57a044/limbo/plugins/gif.py#L19-L38 | def gif(search, unsafe=False):
"""given a search string, return a gif URL via google search"""
searchb = quote(search.encode("utf8"))
safe = "&safe=" if unsafe else "&safe=active"
searchurl = "https://www.google.com/search?tbs=itp:animated&tbm=isch&q={0}{1}" \
.format(searchb, safe)
# this is an old iphone user agent. Seems to make google return good results.
useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us)" \
" AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"
result = requests.get(searchurl, headers={"User-agent": useragent}).text
gifs = list(map(unescape, re.findall(r"var u='(.*?)'", result)))
shuffle(gifs)
if gifs:
return gifs[0]
return "" | [
"def",
"gif",
"(",
"search",
",",
"unsafe",
"=",
"False",
")",
":",
"searchb",
"=",
"quote",
"(",
"search",
".",
"encode",
"(",
"\"utf8\"",
")",
")",
"safe",
"=",
"\"&safe=\"",
"if",
"unsafe",
"else",
"\"&safe=active\"",
"searchurl",
"=",
"\"https://www.google.com/search?tbs=itp:animated&tbm=isch&q={0}{1}\"",
".",
"format",
"(",
"searchb",
",",
"safe",
")",
"# this is an old iphone user agent. Seems to make google return good results.",
"useragent",
"=",
"\"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us)\"",
"\" AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7\"",
"result",
"=",
"requests",
".",
"get",
"(",
"searchurl",
",",
"headers",
"=",
"{",
"\"User-agent\"",
":",
"useragent",
"}",
")",
".",
"text",
"gifs",
"=",
"list",
"(",
"map",
"(",
"unescape",
",",
"re",
".",
"findall",
"(",
"r\"var u='(.*?)'\"",
",",
"result",
")",
")",
")",
"shuffle",
"(",
"gifs",
")",
"if",
"gifs",
":",
"return",
"gifs",
"[",
"0",
"]",
"return",
"\"\""
] | given a search string, return a gif URL via google search | [
"given",
"a",
"search",
"string",
"return",
"a",
"gif",
"URL",
"via",
"google",
"search"
] | python | train |
mdsol/rwslib | rwslib/builders/modm.py | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/modm.py#L122-L131 | def mixin_params(self, params):
"""
Merge in the MdsolAttribute for the passed parameter
:param dict params: dictionary of object parameters
"""
if not isinstance(params, (dict,)):
raise AttributeError("Cannot mixin to object of type {}".format(type(params)))
for attribute in self.attributes:
params.update({attribute.tag: attribute.value}) | [
"def",
"mixin_params",
"(",
"self",
",",
"params",
")",
":",
"if",
"not",
"isinstance",
"(",
"params",
",",
"(",
"dict",
",",
")",
")",
":",
"raise",
"AttributeError",
"(",
"\"Cannot mixin to object of type {}\"",
".",
"format",
"(",
"type",
"(",
"params",
")",
")",
")",
"for",
"attribute",
"in",
"self",
".",
"attributes",
":",
"params",
".",
"update",
"(",
"{",
"attribute",
".",
"tag",
":",
"attribute",
".",
"value",
"}",
")"
] | Merge in the MdsolAttribute for the passed parameter
:param dict params: dictionary of object parameters | [
"Merge",
"in",
"the",
"MdsolAttribute",
"for",
"the",
"passed",
"parameter"
] | python | train |
csparpa/pyowm | pyowm/weatherapi25/historian.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/historian.py#L102-L123 | def max_temperature(self, unit='kelvin'):
"""Returns a tuple containing the max value in the temperature
series preceeded by its timestamp
:param unit: the unit of measure for the temperature values. May be
among: '*kelvin*' (default), '*celsius*' or '*fahrenheit*'
:type unit: str
:returns: a tuple
:raises: ValueError when invalid values are provided for the unit of
measure or the measurement series is empty
"""
if unit not in ('kelvin', 'celsius', 'fahrenheit'):
raise ValueError("Invalid value for parameter 'unit'")
maximum = max(self._purge_none_samples(self.temperature_series()),
key=itemgetter(1))
if unit == 'kelvin':
result = maximum
if unit == 'celsius':
result = (maximum[0], temputils.kelvin_to_celsius(maximum[1]))
if unit == 'fahrenheit':
result = (maximum[0], temputils.kelvin_to_fahrenheit(maximum[1]))
return result | [
"def",
"max_temperature",
"(",
"self",
",",
"unit",
"=",
"'kelvin'",
")",
":",
"if",
"unit",
"not",
"in",
"(",
"'kelvin'",
",",
"'celsius'",
",",
"'fahrenheit'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for parameter 'unit'\"",
")",
"maximum",
"=",
"max",
"(",
"self",
".",
"_purge_none_samples",
"(",
"self",
".",
"temperature_series",
"(",
")",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
")",
"if",
"unit",
"==",
"'kelvin'",
":",
"result",
"=",
"maximum",
"if",
"unit",
"==",
"'celsius'",
":",
"result",
"=",
"(",
"maximum",
"[",
"0",
"]",
",",
"temputils",
".",
"kelvin_to_celsius",
"(",
"maximum",
"[",
"1",
"]",
")",
")",
"if",
"unit",
"==",
"'fahrenheit'",
":",
"result",
"=",
"(",
"maximum",
"[",
"0",
"]",
",",
"temputils",
".",
"kelvin_to_fahrenheit",
"(",
"maximum",
"[",
"1",
"]",
")",
")",
"return",
"result"
] | Returns a tuple containing the max value in the temperature
series preceeded by its timestamp
:param unit: the unit of measure for the temperature values. May be
among: '*kelvin*' (default), '*celsius*' or '*fahrenheit*'
:type unit: str
:returns: a tuple
:raises: ValueError when invalid values are provided for the unit of
measure or the measurement series is empty | [
"Returns",
"a",
"tuple",
"containing",
"the",
"max",
"value",
"in",
"the",
"temperature",
"series",
"preceeded",
"by",
"its",
"timestamp",
":",
"param",
"unit",
":",
"the",
"unit",
"of",
"measure",
"for",
"the",
"temperature",
"values",
".",
"May",
"be",
"among",
":",
"*",
"kelvin",
"*",
"(",
"default",
")",
"*",
"celsius",
"*",
"or",
"*",
"fahrenheit",
"*",
":",
"type",
"unit",
":",
"str",
":",
"returns",
":",
"a",
"tuple",
":",
"raises",
":",
"ValueError",
"when",
"invalid",
"values",
"are",
"provided",
"for",
"the",
"unit",
"of",
"measure",
"or",
"the",
"measurement",
"series",
"is",
"empty"
] | python | train |
F-Secure/see | see/environment.py | https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/see/environment.py#L102-L108 | def load_configuration(configuration):
"""Returns a dictionary, accepts a dictionary or a path to a JSON file."""
if isinstance(configuration, dict):
return configuration
else:
with open(configuration) as configfile:
return json.load(configfile) | [
"def",
"load_configuration",
"(",
"configuration",
")",
":",
"if",
"isinstance",
"(",
"configuration",
",",
"dict",
")",
":",
"return",
"configuration",
"else",
":",
"with",
"open",
"(",
"configuration",
")",
"as",
"configfile",
":",
"return",
"json",
".",
"load",
"(",
"configfile",
")"
] | Returns a dictionary, accepts a dictionary or a path to a JSON file. | [
"Returns",
"a",
"dictionary",
"accepts",
"a",
"dictionary",
"or",
"a",
"path",
"to",
"a",
"JSON",
"file",
"."
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/nodes.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/nodes.py#L107-L117 | def editor(self, value):
"""
Setter for **self.__editor** attribute.
:param value: Attribute value.
:type value: Editor
"""
if value is not None:
assert type(value) is Editor, "'{0}' attribute: '{1}' type is not 'Editor'!".format("editor", value)
self.__editor = value | [
"def",
"editor",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"Editor",
",",
"\"'{0}' attribute: '{1}' type is not 'Editor'!\"",
".",
"format",
"(",
"\"editor\"",
",",
"value",
")",
"self",
".",
"__editor",
"=",
"value"
] | Setter for **self.__editor** attribute.
:param value: Attribute value.
:type value: Editor | [
"Setter",
"for",
"**",
"self",
".",
"__editor",
"**",
"attribute",
"."
] | python | train |
jmgilman/Neolib | neolib/pyamf/codec.py | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/codec.py#L423-L478 | def getTypeFunc(self, data):
"""
Returns a callable that will encode C{data} to C{self.stream}. If
C{data} is unencodable, then C{None} is returned.
"""
if data is None:
return self.writeNull
t = type(data)
# try types that we know will work
if t is str or issubclass(t, str):
return self.writeBytes
if t is unicode or issubclass(t, unicode):
return self.writeString
elif t is bool:
return self.writeBoolean
elif t is float:
return self.writeNumber
elif t in python.int_types:
return self.writeNumber
elif t in (list, tuple):
return self.writeList
elif isinstance(data, (list, tuple)):
return self.writeSequence
elif t is types.GeneratorType:
return self.writeGenerator
elif t is pyamf.UndefinedType:
return self.writeUndefined
elif t in (datetime.date, datetime.datetime, datetime.time):
return self.writeDate
elif xml.is_xml(data):
return self.writeXML
# check for any overridden types
for type_, func in pyamf.TYPE_MAP.iteritems():
try:
if isinstance(data, type_):
return _CustomTypeFunc(self, func)
except TypeError:
if python.callable(type_) and type_(data):
return _CustomTypeFunc(self, func)
# now try some types that won't encode
if t in python.class_types:
# can't encode classes
return None
elif isinstance(data, python.func_types):
# can't encode code objects
return None
elif isinstance(t, types.ModuleType):
# cannot encode module objects
return None
# well, we tried ..
return self.writeObject | [
"def",
"getTypeFunc",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"self",
".",
"writeNull",
"t",
"=",
"type",
"(",
"data",
")",
"# try types that we know will work",
"if",
"t",
"is",
"str",
"or",
"issubclass",
"(",
"t",
",",
"str",
")",
":",
"return",
"self",
".",
"writeBytes",
"if",
"t",
"is",
"unicode",
"or",
"issubclass",
"(",
"t",
",",
"unicode",
")",
":",
"return",
"self",
".",
"writeString",
"elif",
"t",
"is",
"bool",
":",
"return",
"self",
".",
"writeBoolean",
"elif",
"t",
"is",
"float",
":",
"return",
"self",
".",
"writeNumber",
"elif",
"t",
"in",
"python",
".",
"int_types",
":",
"return",
"self",
".",
"writeNumber",
"elif",
"t",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"return",
"self",
".",
"writeList",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"self",
".",
"writeSequence",
"elif",
"t",
"is",
"types",
".",
"GeneratorType",
":",
"return",
"self",
".",
"writeGenerator",
"elif",
"t",
"is",
"pyamf",
".",
"UndefinedType",
":",
"return",
"self",
".",
"writeUndefined",
"elif",
"t",
"in",
"(",
"datetime",
".",
"date",
",",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"time",
")",
":",
"return",
"self",
".",
"writeDate",
"elif",
"xml",
".",
"is_xml",
"(",
"data",
")",
":",
"return",
"self",
".",
"writeXML",
"# check for any overridden types",
"for",
"type_",
",",
"func",
"in",
"pyamf",
".",
"TYPE_MAP",
".",
"iteritems",
"(",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"data",
",",
"type_",
")",
":",
"return",
"_CustomTypeFunc",
"(",
"self",
",",
"func",
")",
"except",
"TypeError",
":",
"if",
"python",
".",
"callable",
"(",
"type_",
")",
"and",
"type_",
"(",
"data",
")",
":",
"return",
"_CustomTypeFunc",
"(",
"self",
",",
"func",
")",
"# now try some types that won't encode",
"if",
"t",
"in",
"python",
".",
"class_types",
":",
"# can't encode classes",
"return",
"None",
"elif",
"isinstance",
"(",
"data",
",",
"python",
".",
"func_types",
")",
":",
"# can't encode code objects",
"return",
"None",
"elif",
"isinstance",
"(",
"t",
",",
"types",
".",
"ModuleType",
")",
":",
"# cannot encode module objects",
"return",
"None",
"# well, we tried ..",
"return",
"self",
".",
"writeObject"
] | Returns a callable that will encode C{data} to C{self.stream}. If
C{data} is unencodable, then C{None} is returned. | [
"Returns",
"a",
"callable",
"that",
"will",
"encode",
"C",
"{",
"data",
"}",
"to",
"C",
"{",
"self",
".",
"stream",
"}",
".",
"If",
"C",
"{",
"data",
"}",
"is",
"unencodable",
"then",
"C",
"{",
"None",
"}",
"is",
"returned",
"."
] | python | train |
markchil/gptools | gptools/utils.py | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L2032-L2439 | def plot_sampler(
sampler, suptitle=None, labels=None, bins=50,
plot_samples=False, plot_hist=True, plot_chains=True,
burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None,
cmap='gray_r', hist_color='k', chain_alpha=0.1,
points=None, covs=None, colors=None, ci=[0.95],
max_hist_ticks=None, max_chain_ticks=6,
label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0,
label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None,
chain_ticklabel_fontsize=None, xticklabel_angle=90.0,
bottom_sep=0.075, suptitle_space=0.1, fixed_height=None,
fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1,
ax_space=0.1
):
"""Plot the results of MCMC sampler (posterior and chains).
Loosely based on triangle.py. Provides extensive options to format the plot.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to plot the chains/marginals of. Can also be an array of
samples which matches the shape of the `chain` attribute that would be
present in a :py:class:`emcee.Sampler` instance.
suptitle : str, optional
The figure title to place at the top. Default is no title.
labels : list of str, optional
The labels to use for each of the free parameters. Default is to leave
the axes unlabeled.
bins : int, optional
Number of bins to use for the histograms. Default is 50.
plot_samples : bool, optional
If True, the samples are plotted as individual points. Default is False.
plot_hist : bool, optional
If True, histograms are plotted. Default is True.
plot_chains : bool, optional
If True, plot the sampler chains at the bottom. Default is True.
burn : int, optional
The number of samples to burn before making the marginal histograms.
Default is zero (use all samples).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
temp_idx : int, optional
Index of the temperature to plot when plotting a
:py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior).
weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional
The weight for each sample. This is useful for post-processing the
output from MultiNest sampling, for instance. Default is to not weight
the samples.
cutoff_weight : float, optional
If `weights` and `cutoff_weight` are present, points with
`weights < cutoff_weight * weights.max()` will be excluded. Default is
to plot all points.
cmap : str, optional
The colormap to use for the histograms. Default is 'gray_r'.
hist_color : str, optional
The color to use for the univariate histograms. Default is 'k'.
chain_alpha : float, optional
The transparency to use for the plots of the individual chains. Setting
this to something low lets you better visualize what is going on.
Default is 0.1.
points : array, (`D`,) or (`N`, `D`), optional
Array of point(s) to plot onto each marginal and chain. Default is None.
covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional
Covariance matrix or array of covariance matrices to plot onto each
marginal. If you do not want to plot a covariance matrix for a specific
point, set its corresponding entry to `None`. Default is to not plot
confidence ellipses for any points.
colors : array of str, (`N`,), optional
The colors to use for the points in `points`. Default is to use the
standard matplotlib RGBCMYK cycle.
ci : array, (`num_ci`,), optional
List of confidence intervals to plot for each non-`None` entry in `covs`.
Default is 0.95 (just plot the 95 percent confidence interval).
max_hist_ticks : int, optional
The maximum number of ticks for the histogram plots. Default is None
(no limit).
max_chain_ticks : int, optional
The maximum number of y-axis ticks for the chain plots. Default is 6.
label_chain_y : bool, optional
If True, the chain plots will have y axis labels. Default is False.
hide_chain_yticklabels : bool, optional
If True, hide the y axis tick labels for the chain plots. Default is
False (show y tick labels).
chain_ytick_pad : float, optional
The padding (in points) between the y-axis tick labels and the axis for
the chain plots. Default is 2.0.
label_fontsize : float, optional
The font size (in points) to use for the axis labels. Default is
`axes.labelsize`.
ticklabel_fontsize : float, optional
The font size (in points) to use for the axis tick labels. Default is
`xtick.labelsize`.
chain_label_fontsize : float, optional
The font size (in points) to use for the labels of the chain axes.
Default is `axes.labelsize`.
chain_ticklabel_fontsize : float, optional
The font size (in points) to use for the chain axis tick labels. Default
is `xtick.labelsize`.
xticklabel_angle : float, optional
The angle to rotate the x tick labels, in degrees. Default is 90.
bottom_sep : float, optional
The separation (in relative figure units) between the chains and the
marginals. Default is 0.075.
suptitle_space : float, optional
The amount of space (in relative figure units) to leave for a figure
title. Default is 0.1.
fixed_height : float, optional
The desired figure height (in inches). Default is to automatically
adjust based on `fixed_width` to make the subplots square.
fixed_width : float, optional
The desired figure width (in inches). Default is `figure.figsize[0]`.
l : float, optional
The location (in relative figure units) of the left margin. Default is
0.1.
r : float, optional
The location (in relative figure units) of the right margin. Default is
0.9.
t1 : float, optional
The location (in relative figure units) of the top of the grid of
histograms. Overrides `suptitle_space` if present.
b1 : float, optional
The location (in relative figure units) of the bottom of the grid of
histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if
`plot_chains` is False.
t2 : float, optional
The location (in relative figure units) of the top of the grid of chain
plots. Default is 0.2.
b2 : float, optional
The location (in relative figure units) of the bottom of the grid of
chain plots. Default is 0.1.
ax_space : float, optional
The `w_space` and `h_space` to use (in relative figure units). Default
is 0.1.
"""
masked_weights = None
if points is not None:
points = scipy.atleast_2d(points)
if covs is not None and len(covs) != len(points):
raise ValueError(
"If covariance matrices are provided, len(covs) must equal len(points)!"
)
elif covs is None:
covs = [None,] * len(points)
if colors is None:
c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
colors = [c_cycle.next() for p in points]
# Create axes:
try:
k = sampler.flatchain.shape[-1]
except AttributeError:
# Assumes array input is only case where there is no "flatchain" attribute.
k = sampler.shape[-1]
if labels is None:
labels = [''] * k
# Set up geometry:
# plot_chains =
# True: False:
# +-----------+ +-----------+
# | +-------+ | | +-------+ |
# | | | | | | | |
# | | | | | | | |
# | | | | | | | |
# | +-------+ | | +-------+ |
# | +-------+ | +-----------+
# | | | |
# | +-------+ |
# +-----------+
# We retain support for the original suptitle_space keyword, but can
# override with t1 as needed:
if t1 is None:
t1 = 1 - suptitle_space
# We retain support for the original bottom_sep keyword, but can override
# with b1 as needed:
if b1 is None:
if plot_chains:
b1 = t2 + bottom_sep
else:
b1 = 0.1
if fixed_height is None and fixed_width is None:
# Default: use matplotlib's default width, handle remaining parameters
# with the fixed width case below:
fixed_width = matplotlib.rcParams['figure.figsize'][0]
if fixed_height is None and fixed_width is not None:
# Only width specified, compute height to yield square histograms:
fixed_height = fixed_width * (r - l) / (t1 - b1)
elif fixed_height is not None and fixed_width is None:
# Only height specified, compute width to yield square histograms
fixed_width = fixed_height * (t1 - b1) / (r - l)
# Otherwise width and height are fixed, and we may not have square
# histograms, at the user's discretion.
wspace = ax_space
hspace = ax_space
# gs1 is the histograms, gs2 is the chains:
f = plt.figure(figsize=(fixed_width, fixed_height))
gs1 = mplgs.GridSpec(k, k)
gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace)
if plot_chains:
gs2 = mplgs.GridSpec(1, k)
gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace)
axes = []
# j is the row, i is the column.
for j in xrange(0, k + int(plot_chains)):
row = []
for i in xrange(0, k):
if i > j:
row.append(None)
else:
sharey = row[-1] if i > 0 and i < j and j < k else None
sharex = axes[-1][i] if j > i and j < k else \
(row[-1] if i > 0 and j == k else None)
gs = gs1[j, i] if j < k else gs2[:, i]
row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex))
if j < k and ticklabel_fontsize is not None:
row[-1].tick_params(labelsize=ticklabel_fontsize)
elif j >= k and chain_ticklabel_fontsize is not None:
row[-1].tick_params(labelsize=chain_ticklabel_fontsize)
axes.append(row)
axes = scipy.asarray(axes)
# Update axes with the data:
if isinstance(sampler, emcee.EnsembleSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool)
flat_trace = sampler.chain[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, emcee.PTSampler):
if chain_mask is None:
chain_mask = scipy.ones(sampler.nwalkers, dtype=bool)
flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
elif isinstance(sampler, scipy.ndarray):
if sampler.ndim == 4:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[1], dtype=bool)
flat_trace = sampler[temp_idx, chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[temp_idx, chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 3:
if chain_mask is None:
chain_mask = scipy.ones(sampler.shape[0], dtype=bool)
flat_trace = sampler[chain_mask, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[chain_mask, burn:]
weights = weights.ravel()
elif sampler.ndim == 2:
flat_trace = sampler[burn:, :]
flat_trace = flat_trace.reshape((-1, k))
if weights is not None:
weights = weights[burn:]
weights = weights.ravel()
if cutoff_weight is not None and weights is not None:
mask = weights >= cutoff_weight * weights.max()
flat_trace = flat_trace[mask, :]
masked_weights = weights[mask]
else:
masked_weights = weights
else:
raise ValueError("Unknown sampler class: %s" % (type(sampler),))
# j is the row, i is the column.
for i in xrange(0, k):
axes[i, i].clear()
if plot_hist:
axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled')
if plot_samples:
axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1)
if points is not None:
# axvline can only take a scalar x, so we have to loop:
for p, c, cov in zip(points, colors, covs):
axes[i, i].axvline(x=p[i], linewidth=3, color=c)
if cov is not None:
xlim = axes[i, i].get_xlim()
i_grid = scipy.linspace(xlim[0], xlim[1], 100)
axes[i, i].plot(
i_grid,
scipy.stats.norm.pdf(
i_grid,
loc=p[i],
scale=scipy.sqrt(cov[i, i])
),
c,
linewidth=3.0
)
axes[i, i].set_xlim(xlim)
if i == k - 1:
axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize)
plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
if i < k - 1:
plt.setp(axes[i, i].get_xticklabels(), visible=False)
plt.setp(axes[i, i].get_yticklabels(), visible=False)
for j in xrange(i + 1, k):
axes[j, i].clear()
if plot_hist:
ct, x, y, im = axes[j, i].hist2d(
flat_trace[:, i],
flat_trace[:, j],
bins=bins,
cmap=cmap,
weights=masked_weights
)
if plot_samples:
axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1)
if points is not None:
for p, c, cov in zip(points, colors, covs):
axes[j, i].plot(p[i], p[j], 'o', color=c)
if cov is not None:
Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float)
lam, v = scipy.linalg.eigh(Sigma)
chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci]
a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2]
b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2]
ang = scipy.arctan2(v[1, -1], v[0, -1])
for aval, bval in zip(a, b):
ell = mplp.Ellipse(
[p[i], p[j]],
aval,
bval,
angle=scipy.degrees(ang),
facecolor='none',
edgecolor=c,
linewidth=3
)
axes[j, i].add_artist(ell)
# axes[j, i].plot(points[i], points[j], 'o')
# xmid = 0.5 * (x[1:] + x[:-1])
# ymid = 0.5 * (y[1:] + y[:-1])
# axes[j, i].contour(xmid, ymid, ct.T, colors='k')
if j < k - 1:
plt.setp(axes[j, i].get_xticklabels(), visible=False)
if i != 0:
plt.setp(axes[j, i].get_yticklabels(), visible=False)
if i == 0:
axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize)
if j == k - 1:
axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize)
plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
if plot_chains:
axes[-1, i].clear()
if isinstance(sampler, emcee.EnsembleSampler):
axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha)
elif isinstance(sampler, emcee.PTSampler):
axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha)
else:
if sampler.ndim == 4:
axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha)
elif sampler.ndim == 3:
axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha)
elif sampler.ndim == 2:
axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha)
# Plot the weights on top of the chains:
if weights is not None:
a_wt = axes[-1, i].twinx()
a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r')
plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False)
a_wt.yaxis.set_ticks_position('none')
# Plot the cutoff weight as a horizontal line and the first sample
# which is included as a vertical bar. Note that this won't be quite
# the right behavior if the weights are not roughly monotonic.
if cutoff_weight is not None:
a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r')
wi, = scipy.where(weights >= cutoff_weight * weights.max())
a_wt.axvline(wi[0], linestyle='-', color='r')
if burn > 0:
axes[-1, i].axvline(burn, color='r', linewidth=3)
if points is not None:
for p, c in zip(points, colors):
axes[-1, i].axhline(y=p[i], linewidth=3, color=c)
# Reset the xlim since it seems to get messed up:
axes[-1, i].set_xlim(left=0)
# try:
# [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]]
# except TypeError:
# axes[-1, i].axhline(y=points[i], linewidth=3)
if label_chain_y:
axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize)
axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize)
plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle)
for tick in axes[-1, i].get_yaxis().get_major_ticks():
tick.set_pad(chain_ytick_pad)
tick.label1 = tick._get_text1()
for i in xrange(0, k):
if max_hist_ticks is not None:
axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1))
axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1))
if plot_chains and max_chain_ticks is not None:
axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1))
axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1))
if plot_chains and hide_chain_yticklabels:
plt.setp(axes[k, i].get_yticklabels(), visible=False)
if suptitle is not None:
f.suptitle(suptitle)
f.canvas.draw()
return f | [
"def",
"plot_sampler",
"(",
"sampler",
",",
"suptitle",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"bins",
"=",
"50",
",",
"plot_samples",
"=",
"False",
",",
"plot_hist",
"=",
"True",
",",
"plot_chains",
"=",
"True",
",",
"burn",
"=",
"0",
",",
"chain_mask",
"=",
"None",
",",
"temp_idx",
"=",
"0",
",",
"weights",
"=",
"None",
",",
"cutoff_weight",
"=",
"None",
",",
"cmap",
"=",
"'gray_r'",
",",
"hist_color",
"=",
"'k'",
",",
"chain_alpha",
"=",
"0.1",
",",
"points",
"=",
"None",
",",
"covs",
"=",
"None",
",",
"colors",
"=",
"None",
",",
"ci",
"=",
"[",
"0.95",
"]",
",",
"max_hist_ticks",
"=",
"None",
",",
"max_chain_ticks",
"=",
"6",
",",
"label_chain_y",
"=",
"False",
",",
"hide_chain_yticklabels",
"=",
"False",
",",
"chain_ytick_pad",
"=",
"2.0",
",",
"label_fontsize",
"=",
"None",
",",
"ticklabel_fontsize",
"=",
"None",
",",
"chain_label_fontsize",
"=",
"None",
",",
"chain_ticklabel_fontsize",
"=",
"None",
",",
"xticklabel_angle",
"=",
"90.0",
",",
"bottom_sep",
"=",
"0.075",
",",
"suptitle_space",
"=",
"0.1",
",",
"fixed_height",
"=",
"None",
",",
"fixed_width",
"=",
"None",
",",
"l",
"=",
"0.1",
",",
"r",
"=",
"0.9",
",",
"t1",
"=",
"None",
",",
"b1",
"=",
"None",
",",
"t2",
"=",
"0.2",
",",
"b2",
"=",
"0.1",
",",
"ax_space",
"=",
"0.1",
")",
":",
"masked_weights",
"=",
"None",
"if",
"points",
"is",
"not",
"None",
":",
"points",
"=",
"scipy",
".",
"atleast_2d",
"(",
"points",
")",
"if",
"covs",
"is",
"not",
"None",
"and",
"len",
"(",
"covs",
")",
"!=",
"len",
"(",
"points",
")",
":",
"raise",
"ValueError",
"(",
"\"If covariance matrices are provided, len(covs) must equal len(points)!\"",
")",
"elif",
"covs",
"is",
"None",
":",
"covs",
"=",
"[",
"None",
",",
"]",
"*",
"len",
"(",
"points",
")",
"if",
"colors",
"is",
"None",
":",
"c_cycle",
"=",
"itertools",
".",
"cycle",
"(",
"[",
"'b'",
",",
"'g'",
",",
"'r'",
",",
"'c'",
",",
"'m'",
",",
"'y'",
",",
"'k'",
"]",
")",
"colors",
"=",
"[",
"c_cycle",
".",
"next",
"(",
")",
"for",
"p",
"in",
"points",
"]",
"# Create axes:",
"try",
":",
"k",
"=",
"sampler",
".",
"flatchain",
".",
"shape",
"[",
"-",
"1",
"]",
"except",
"AttributeError",
":",
"# Assumes array input is only case where there is no \"flatchain\" attribute.",
"k",
"=",
"sampler",
".",
"shape",
"[",
"-",
"1",
"]",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"[",
"''",
"]",
"*",
"k",
"# Set up geometry:",
"# plot_chains =",
"# True: False:",
"# +-----------+ +-----------+",
"# | +-------+ | | +-------+ |",
"# | | | | | | | |",
"# | | | | | | | |",
"# | | | | | | | |",
"# | +-------+ | | +-------+ |",
"# | +-------+ | +-----------+",
"# | | | |",
"# | +-------+ |",
"# +-----------+",
"# We retain support for the original suptitle_space keyword, but can",
"# override with t1 as needed:",
"if",
"t1",
"is",
"None",
":",
"t1",
"=",
"1",
"-",
"suptitle_space",
"# We retain support for the original bottom_sep keyword, but can override",
"# with b1 as needed:",
"if",
"b1",
"is",
"None",
":",
"if",
"plot_chains",
":",
"b1",
"=",
"t2",
"+",
"bottom_sep",
"else",
":",
"b1",
"=",
"0.1",
"if",
"fixed_height",
"is",
"None",
"and",
"fixed_width",
"is",
"None",
":",
"# Default: use matplotlib's default width, handle remaining parameters",
"# with the fixed width case below:",
"fixed_width",
"=",
"matplotlib",
".",
"rcParams",
"[",
"'figure.figsize'",
"]",
"[",
"0",
"]",
"if",
"fixed_height",
"is",
"None",
"and",
"fixed_width",
"is",
"not",
"None",
":",
"# Only width specified, compute height to yield square histograms:",
"fixed_height",
"=",
"fixed_width",
"*",
"(",
"r",
"-",
"l",
")",
"/",
"(",
"t1",
"-",
"b1",
")",
"elif",
"fixed_height",
"is",
"not",
"None",
"and",
"fixed_width",
"is",
"None",
":",
"# Only height specified, compute width to yield square histograms",
"fixed_width",
"=",
"fixed_height",
"*",
"(",
"t1",
"-",
"b1",
")",
"/",
"(",
"r",
"-",
"l",
")",
"# Otherwise width and height are fixed, and we may not have square",
"# histograms, at the user's discretion.",
"wspace",
"=",
"ax_space",
"hspace",
"=",
"ax_space",
"# gs1 is the histograms, gs2 is the chains:",
"f",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"fixed_width",
",",
"fixed_height",
")",
")",
"gs1",
"=",
"mplgs",
".",
"GridSpec",
"(",
"k",
",",
"k",
")",
"gs1",
".",
"update",
"(",
"bottom",
"=",
"b1",
",",
"top",
"=",
"t1",
",",
"left",
"=",
"l",
",",
"right",
"=",
"r",
",",
"wspace",
"=",
"wspace",
",",
"hspace",
"=",
"hspace",
")",
"if",
"plot_chains",
":",
"gs2",
"=",
"mplgs",
".",
"GridSpec",
"(",
"1",
",",
"k",
")",
"gs2",
".",
"update",
"(",
"bottom",
"=",
"b2",
",",
"top",
"=",
"t2",
",",
"left",
"=",
"l",
",",
"right",
"=",
"r",
",",
"wspace",
"=",
"wspace",
",",
"hspace",
"=",
"hspace",
")",
"axes",
"=",
"[",
"]",
"# j is the row, i is the column.",
"for",
"j",
"in",
"xrange",
"(",
"0",
",",
"k",
"+",
"int",
"(",
"plot_chains",
")",
")",
":",
"row",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"k",
")",
":",
"if",
"i",
">",
"j",
":",
"row",
".",
"append",
"(",
"None",
")",
"else",
":",
"sharey",
"=",
"row",
"[",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"and",
"i",
"<",
"j",
"and",
"j",
"<",
"k",
"else",
"None",
"sharex",
"=",
"axes",
"[",
"-",
"1",
"]",
"[",
"i",
"]",
"if",
"j",
">",
"i",
"and",
"j",
"<",
"k",
"else",
"(",
"row",
"[",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"and",
"j",
"==",
"k",
"else",
"None",
")",
"gs",
"=",
"gs1",
"[",
"j",
",",
"i",
"]",
"if",
"j",
"<",
"k",
"else",
"gs2",
"[",
":",
",",
"i",
"]",
"row",
".",
"append",
"(",
"f",
".",
"add_subplot",
"(",
"gs",
",",
"sharey",
"=",
"sharey",
",",
"sharex",
"=",
"sharex",
")",
")",
"if",
"j",
"<",
"k",
"and",
"ticklabel_fontsize",
"is",
"not",
"None",
":",
"row",
"[",
"-",
"1",
"]",
".",
"tick_params",
"(",
"labelsize",
"=",
"ticklabel_fontsize",
")",
"elif",
"j",
">=",
"k",
"and",
"chain_ticklabel_fontsize",
"is",
"not",
"None",
":",
"row",
"[",
"-",
"1",
"]",
".",
"tick_params",
"(",
"labelsize",
"=",
"chain_ticklabel_fontsize",
")",
"axes",
".",
"append",
"(",
"row",
")",
"axes",
"=",
"scipy",
".",
"asarray",
"(",
"axes",
")",
"# Update axes with the data:",
"if",
"isinstance",
"(",
"sampler",
",",
"emcee",
".",
"EnsembleSampler",
")",
":",
"if",
"chain_mask",
"is",
"None",
":",
"chain_mask",
"=",
"scipy",
".",
"ones",
"(",
"sampler",
".",
"chain",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"flat_trace",
"=",
"sampler",
".",
"chain",
"[",
"chain_mask",
",",
"burn",
":",
",",
":",
"]",
"flat_trace",
"=",
"flat_trace",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"k",
")",
")",
"elif",
"isinstance",
"(",
"sampler",
",",
"emcee",
".",
"PTSampler",
")",
":",
"if",
"chain_mask",
"is",
"None",
":",
"chain_mask",
"=",
"scipy",
".",
"ones",
"(",
"sampler",
".",
"nwalkers",
",",
"dtype",
"=",
"bool",
")",
"flat_trace",
"=",
"sampler",
".",
"chain",
"[",
"temp_idx",
",",
"chain_mask",
",",
"burn",
":",
",",
":",
"]",
"flat_trace",
"=",
"flat_trace",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"k",
")",
")",
"elif",
"isinstance",
"(",
"sampler",
",",
"scipy",
".",
"ndarray",
")",
":",
"if",
"sampler",
".",
"ndim",
"==",
"4",
":",
"if",
"chain_mask",
"is",
"None",
":",
"chain_mask",
"=",
"scipy",
".",
"ones",
"(",
"sampler",
".",
"shape",
"[",
"1",
"]",
",",
"dtype",
"=",
"bool",
")",
"flat_trace",
"=",
"sampler",
"[",
"temp_idx",
",",
"chain_mask",
",",
"burn",
":",
",",
":",
"]",
"flat_trace",
"=",
"flat_trace",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"k",
")",
")",
"if",
"weights",
"is",
"not",
"None",
":",
"weights",
"=",
"weights",
"[",
"temp_idx",
",",
"chain_mask",
",",
"burn",
":",
"]",
"weights",
"=",
"weights",
".",
"ravel",
"(",
")",
"elif",
"sampler",
".",
"ndim",
"==",
"3",
":",
"if",
"chain_mask",
"is",
"None",
":",
"chain_mask",
"=",
"scipy",
".",
"ones",
"(",
"sampler",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"flat_trace",
"=",
"sampler",
"[",
"chain_mask",
",",
"burn",
":",
",",
":",
"]",
"flat_trace",
"=",
"flat_trace",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"k",
")",
")",
"if",
"weights",
"is",
"not",
"None",
":",
"weights",
"=",
"weights",
"[",
"chain_mask",
",",
"burn",
":",
"]",
"weights",
"=",
"weights",
".",
"ravel",
"(",
")",
"elif",
"sampler",
".",
"ndim",
"==",
"2",
":",
"flat_trace",
"=",
"sampler",
"[",
"burn",
":",
",",
":",
"]",
"flat_trace",
"=",
"flat_trace",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"k",
")",
")",
"if",
"weights",
"is",
"not",
"None",
":",
"weights",
"=",
"weights",
"[",
"burn",
":",
"]",
"weights",
"=",
"weights",
".",
"ravel",
"(",
")",
"if",
"cutoff_weight",
"is",
"not",
"None",
"and",
"weights",
"is",
"not",
"None",
":",
"mask",
"=",
"weights",
">=",
"cutoff_weight",
"*",
"weights",
".",
"max",
"(",
")",
"flat_trace",
"=",
"flat_trace",
"[",
"mask",
",",
":",
"]",
"masked_weights",
"=",
"weights",
"[",
"mask",
"]",
"else",
":",
"masked_weights",
"=",
"weights",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown sampler class: %s\"",
"%",
"(",
"type",
"(",
"sampler",
")",
",",
")",
")",
"# j is the row, i is the column.",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"k",
")",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"clear",
"(",
")",
"if",
"plot_hist",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"hist",
"(",
"flat_trace",
"[",
":",
",",
"i",
"]",
",",
"bins",
"=",
"bins",
",",
"color",
"=",
"hist_color",
",",
"weights",
"=",
"masked_weights",
",",
"normed",
"=",
"True",
",",
"histtype",
"=",
"'stepfilled'",
")",
"if",
"plot_samples",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"plot",
"(",
"flat_trace",
"[",
":",
",",
"i",
"]",
",",
"scipy",
".",
"zeros_like",
"(",
"flat_trace",
"[",
":",
",",
"i",
"]",
")",
",",
"','",
",",
"alpha",
"=",
"0.1",
")",
"if",
"points",
"is",
"not",
"None",
":",
"# axvline can only take a scalar x, so we have to loop:",
"for",
"p",
",",
"c",
",",
"cov",
"in",
"zip",
"(",
"points",
",",
"colors",
",",
"covs",
")",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"axvline",
"(",
"x",
"=",
"p",
"[",
"i",
"]",
",",
"linewidth",
"=",
"3",
",",
"color",
"=",
"c",
")",
"if",
"cov",
"is",
"not",
"None",
":",
"xlim",
"=",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"get_xlim",
"(",
")",
"i_grid",
"=",
"scipy",
".",
"linspace",
"(",
"xlim",
"[",
"0",
"]",
",",
"xlim",
"[",
"1",
"]",
",",
"100",
")",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"plot",
"(",
"i_grid",
",",
"scipy",
".",
"stats",
".",
"norm",
".",
"pdf",
"(",
"i_grid",
",",
"loc",
"=",
"p",
"[",
"i",
"]",
",",
"scale",
"=",
"scipy",
".",
"sqrt",
"(",
"cov",
"[",
"i",
",",
"i",
"]",
")",
")",
",",
"c",
",",
"linewidth",
"=",
"3.0",
")",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"set_xlim",
"(",
"xlim",
")",
"if",
"i",
"==",
"k",
"-",
"1",
":",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"set_xlabel",
"(",
"labels",
"[",
"i",
"]",
",",
"fontsize",
"=",
"label_fontsize",
")",
"plt",
".",
"setp",
"(",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"xaxis",
".",
"get_majorticklabels",
"(",
")",
",",
"rotation",
"=",
"xticklabel_angle",
")",
"if",
"i",
"<",
"k",
"-",
"1",
":",
"plt",
".",
"setp",
"(",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"plt",
".",
"setp",
"(",
"axes",
"[",
"i",
",",
"i",
"]",
".",
"get_yticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"for",
"j",
"in",
"xrange",
"(",
"i",
"+",
"1",
",",
"k",
")",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"clear",
"(",
")",
"if",
"plot_hist",
":",
"ct",
",",
"x",
",",
"y",
",",
"im",
"=",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"hist2d",
"(",
"flat_trace",
"[",
":",
",",
"i",
"]",
",",
"flat_trace",
"[",
":",
",",
"j",
"]",
",",
"bins",
"=",
"bins",
",",
"cmap",
"=",
"cmap",
",",
"weights",
"=",
"masked_weights",
")",
"if",
"plot_samples",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"plot",
"(",
"flat_trace",
"[",
":",
",",
"i",
"]",
",",
"flat_trace",
"[",
":",
",",
"j",
"]",
",",
"','",
",",
"alpha",
"=",
"0.1",
")",
"if",
"points",
"is",
"not",
"None",
":",
"for",
"p",
",",
"c",
",",
"cov",
"in",
"zip",
"(",
"points",
",",
"colors",
",",
"covs",
")",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"plot",
"(",
"p",
"[",
"i",
"]",
",",
"p",
"[",
"j",
"]",
",",
"'o'",
",",
"color",
"=",
"c",
")",
"if",
"cov",
"is",
"not",
"None",
":",
"Sigma",
"=",
"scipy",
".",
"asarray",
"(",
"[",
"[",
"cov",
"[",
"i",
",",
"i",
"]",
",",
"cov",
"[",
"i",
",",
"j",
"]",
"]",
",",
"[",
"cov",
"[",
"j",
",",
"i",
"]",
",",
"cov",
"[",
"j",
",",
"j",
"]",
"]",
"]",
",",
"dtype",
"=",
"float",
")",
"lam",
",",
"v",
"=",
"scipy",
".",
"linalg",
".",
"eigh",
"(",
"Sigma",
")",
"chi2",
"=",
"[",
"-",
"scipy",
".",
"log",
"(",
"1.0",
"-",
"cival",
")",
"*",
"2.0",
"for",
"cival",
"in",
"ci",
"]",
"a",
"=",
"[",
"2.0",
"*",
"scipy",
".",
"sqrt",
"(",
"chi2val",
"*",
"lam",
"[",
"-",
"1",
"]",
")",
"for",
"chi2val",
"in",
"chi2",
"]",
"b",
"=",
"[",
"2.0",
"*",
"scipy",
".",
"sqrt",
"(",
"chi2val",
"*",
"lam",
"[",
"-",
"2",
"]",
")",
"for",
"chi2val",
"in",
"chi2",
"]",
"ang",
"=",
"scipy",
".",
"arctan2",
"(",
"v",
"[",
"1",
",",
"-",
"1",
"]",
",",
"v",
"[",
"0",
",",
"-",
"1",
"]",
")",
"for",
"aval",
",",
"bval",
"in",
"zip",
"(",
"a",
",",
"b",
")",
":",
"ell",
"=",
"mplp",
".",
"Ellipse",
"(",
"[",
"p",
"[",
"i",
"]",
",",
"p",
"[",
"j",
"]",
"]",
",",
"aval",
",",
"bval",
",",
"angle",
"=",
"scipy",
".",
"degrees",
"(",
"ang",
")",
",",
"facecolor",
"=",
"'none'",
",",
"edgecolor",
"=",
"c",
",",
"linewidth",
"=",
"3",
")",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"add_artist",
"(",
"ell",
")",
"# axes[j, i].plot(points[i], points[j], 'o')",
"# xmid = 0.5 * (x[1:] + x[:-1])",
"# ymid = 0.5 * (y[1:] + y[:-1])",
"# axes[j, i].contour(xmid, ymid, ct.T, colors='k')",
"if",
"j",
"<",
"k",
"-",
"1",
":",
"plt",
".",
"setp",
"(",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"get_xticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"if",
"i",
"!=",
"0",
":",
"plt",
".",
"setp",
"(",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"get_yticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"if",
"i",
"==",
"0",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_ylabel",
"(",
"labels",
"[",
"j",
"]",
",",
"fontsize",
"=",
"label_fontsize",
")",
"if",
"j",
"==",
"k",
"-",
"1",
":",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"set_xlabel",
"(",
"labels",
"[",
"i",
"]",
",",
"fontsize",
"=",
"label_fontsize",
")",
"plt",
".",
"setp",
"(",
"axes",
"[",
"j",
",",
"i",
"]",
".",
"xaxis",
".",
"get_majorticklabels",
"(",
")",
",",
"rotation",
"=",
"xticklabel_angle",
")",
"if",
"plot_chains",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"clear",
"(",
")",
"if",
"isinstance",
"(",
"sampler",
",",
"emcee",
".",
"EnsembleSampler",
")",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"plot",
"(",
"sampler",
".",
"chain",
"[",
":",
",",
":",
",",
"i",
"]",
".",
"T",
",",
"alpha",
"=",
"chain_alpha",
")",
"elif",
"isinstance",
"(",
"sampler",
",",
"emcee",
".",
"PTSampler",
")",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"plot",
"(",
"sampler",
".",
"chain",
"[",
"temp_idx",
",",
":",
",",
":",
",",
"i",
"]",
".",
"T",
",",
"alpha",
"=",
"chain_alpha",
")",
"else",
":",
"if",
"sampler",
".",
"ndim",
"==",
"4",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"plot",
"(",
"sampler",
"[",
"temp_idx",
",",
":",
",",
":",
",",
"i",
"]",
".",
"T",
",",
"alpha",
"=",
"chain_alpha",
")",
"elif",
"sampler",
".",
"ndim",
"==",
"3",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"plot",
"(",
"sampler",
"[",
":",
",",
":",
",",
"i",
"]",
".",
"T",
",",
"alpha",
"=",
"chain_alpha",
")",
"elif",
"sampler",
".",
"ndim",
"==",
"2",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"plot",
"(",
"sampler",
"[",
":",
",",
"i",
"]",
".",
"T",
",",
"alpha",
"=",
"chain_alpha",
")",
"# Plot the weights on top of the chains:",
"if",
"weights",
"is",
"not",
"None",
":",
"a_wt",
"=",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"twinx",
"(",
")",
"a_wt",
".",
"plot",
"(",
"weights",
",",
"alpha",
"=",
"chain_alpha",
",",
"linestyle",
"=",
"'--'",
",",
"color",
"=",
"'r'",
")",
"plt",
".",
"setp",
"(",
"a_wt",
".",
"yaxis",
".",
"get_majorticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"a_wt",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'none'",
")",
"# Plot the cutoff weight as a horizontal line and the first sample",
"# which is included as a vertical bar. Note that this won't be quite",
"# the right behavior if the weights are not roughly monotonic.",
"if",
"cutoff_weight",
"is",
"not",
"None",
":",
"a_wt",
".",
"axhline",
"(",
"cutoff_weight",
"*",
"weights",
".",
"max",
"(",
")",
",",
"linestyle",
"=",
"'-'",
",",
"color",
"=",
"'r'",
")",
"wi",
",",
"=",
"scipy",
".",
"where",
"(",
"weights",
">=",
"cutoff_weight",
"*",
"weights",
".",
"max",
"(",
")",
")",
"a_wt",
".",
"axvline",
"(",
"wi",
"[",
"0",
"]",
",",
"linestyle",
"=",
"'-'",
",",
"color",
"=",
"'r'",
")",
"if",
"burn",
">",
"0",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"axvline",
"(",
"burn",
",",
"color",
"=",
"'r'",
",",
"linewidth",
"=",
"3",
")",
"if",
"points",
"is",
"not",
"None",
":",
"for",
"p",
",",
"c",
"in",
"zip",
"(",
"points",
",",
"colors",
")",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"axhline",
"(",
"y",
"=",
"p",
"[",
"i",
"]",
",",
"linewidth",
"=",
"3",
",",
"color",
"=",
"c",
")",
"# Reset the xlim since it seems to get messed up:",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"set_xlim",
"(",
"left",
"=",
"0",
")",
"# try:",
"# [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]]",
"# except TypeError:",
"# axes[-1, i].axhline(y=points[i], linewidth=3)",
"if",
"label_chain_y",
":",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"set_ylabel",
"(",
"labels",
"[",
"i",
"]",
",",
"fontsize",
"=",
"chain_label_fontsize",
")",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"set_xlabel",
"(",
"'step'",
",",
"fontsize",
"=",
"chain_label_fontsize",
")",
"plt",
".",
"setp",
"(",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"xaxis",
".",
"get_majorticklabels",
"(",
")",
",",
"rotation",
"=",
"xticklabel_angle",
")",
"for",
"tick",
"in",
"axes",
"[",
"-",
"1",
",",
"i",
"]",
".",
"get_yaxis",
"(",
")",
".",
"get_major_ticks",
"(",
")",
":",
"tick",
".",
"set_pad",
"(",
"chain_ytick_pad",
")",
"tick",
".",
"label1",
"=",
"tick",
".",
"_get_text1",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"k",
")",
":",
"if",
"max_hist_ticks",
"is",
"not",
"None",
":",
"axes",
"[",
"k",
"-",
"1",
",",
"i",
"]",
".",
"xaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"MaxNLocator",
"(",
"nbins",
"=",
"max_hist_ticks",
"-",
"1",
")",
")",
"axes",
"[",
"i",
",",
"0",
"]",
".",
"yaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"MaxNLocator",
"(",
"nbins",
"=",
"max_hist_ticks",
"-",
"1",
")",
")",
"if",
"plot_chains",
"and",
"max_chain_ticks",
"is",
"not",
"None",
":",
"axes",
"[",
"k",
",",
"i",
"]",
".",
"yaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"MaxNLocator",
"(",
"nbins",
"=",
"max_chain_ticks",
"-",
"1",
")",
")",
"axes",
"[",
"k",
",",
"i",
"]",
".",
"xaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"MaxNLocator",
"(",
"nbins",
"=",
"max_chain_ticks",
"-",
"1",
")",
")",
"if",
"plot_chains",
"and",
"hide_chain_yticklabels",
":",
"plt",
".",
"setp",
"(",
"axes",
"[",
"k",
",",
"i",
"]",
".",
"get_yticklabels",
"(",
")",
",",
"visible",
"=",
"False",
")",
"if",
"suptitle",
"is",
"not",
"None",
":",
"f",
".",
"suptitle",
"(",
"suptitle",
")",
"f",
".",
"canvas",
".",
"draw",
"(",
")",
"return",
"f"
] | Plot the results of MCMC sampler (posterior and chains).
Loosely based on triangle.py. Provides extensive options to format the plot.
Parameters
----------
sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`)
The sampler to plot the chains/marginals of. Can also be an array of
samples which matches the shape of the `chain` attribute that would be
present in a :py:class:`emcee.Sampler` instance.
suptitle : str, optional
The figure title to place at the top. Default is no title.
labels : list of str, optional
The labels to use for each of the free parameters. Default is to leave
the axes unlabeled.
bins : int, optional
Number of bins to use for the histograms. Default is 50.
plot_samples : bool, optional
If True, the samples are plotted as individual points. Default is False.
plot_hist : bool, optional
If True, histograms are plotted. Default is True.
plot_chains : bool, optional
If True, plot the sampler chains at the bottom. Default is True.
burn : int, optional
The number of samples to burn before making the marginal histograms.
Default is zero (use all samples).
chain_mask : (index) array, optional
Mask identifying the chains to keep before plotting, in case there are
bad chains. Default is to use all chains.
temp_idx : int, optional
Index of the temperature to plot when plotting a
:py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior).
weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional
The weight for each sample. This is useful for post-processing the
output from MultiNest sampling, for instance. Default is to not weight
the samples.
cutoff_weight : float, optional
If `weights` and `cutoff_weight` are present, points with
`weights < cutoff_weight * weights.max()` will be excluded. Default is
to plot all points.
cmap : str, optional
The colormap to use for the histograms. Default is 'gray_r'.
hist_color : str, optional
The color to use for the univariate histograms. Default is 'k'.
chain_alpha : float, optional
The transparency to use for the plots of the individual chains. Setting
this to something low lets you better visualize what is going on.
Default is 0.1.
points : array, (`D`,) or (`N`, `D`), optional
Array of point(s) to plot onto each marginal and chain. Default is None.
covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional
Covariance matrix or array of covariance matrices to plot onto each
marginal. If you do not want to plot a covariance matrix for a specific
point, set its corresponding entry to `None`. Default is to not plot
confidence ellipses for any points.
colors : array of str, (`N`,), optional
The colors to use for the points in `points`. Default is to use the
standard matplotlib RGBCMYK cycle.
ci : array, (`num_ci`,), optional
List of confidence intervals to plot for each non-`None` entry in `covs`.
Default is 0.95 (just plot the 95 percent confidence interval).
max_hist_ticks : int, optional
The maximum number of ticks for the histogram plots. Default is None
(no limit).
max_chain_ticks : int, optional
The maximum number of y-axis ticks for the chain plots. Default is 6.
label_chain_y : bool, optional
If True, the chain plots will have y axis labels. Default is False.
hide_chain_yticklabels : bool, optional
If True, hide the y axis tick labels for the chain plots. Default is
False (show y tick labels).
chain_ytick_pad : float, optional
The padding (in points) between the y-axis tick labels and the axis for
the chain plots. Default is 2.0.
label_fontsize : float, optional
The font size (in points) to use for the axis labels. Default is
`axes.labelsize`.
ticklabel_fontsize : float, optional
The font size (in points) to use for the axis tick labels. Default is
`xtick.labelsize`.
chain_label_fontsize : float, optional
The font size (in points) to use for the labels of the chain axes.
Default is `axes.labelsize`.
chain_ticklabel_fontsize : float, optional
The font size (in points) to use for the chain axis tick labels. Default
is `xtick.labelsize`.
xticklabel_angle : float, optional
The angle to rotate the x tick labels, in degrees. Default is 90.
bottom_sep : float, optional
The separation (in relative figure units) between the chains and the
marginals. Default is 0.075.
suptitle_space : float, optional
The amount of space (in relative figure units) to leave for a figure
title. Default is 0.1.
fixed_height : float, optional
The desired figure height (in inches). Default is to automatically
adjust based on `fixed_width` to make the subplots square.
fixed_width : float, optional
The desired figure width (in inches). Default is `figure.figsize[0]`.
l : float, optional
The location (in relative figure units) of the left margin. Default is
0.1.
r : float, optional
The location (in relative figure units) of the right margin. Default is
0.9.
t1 : float, optional
The location (in relative figure units) of the top of the grid of
histograms. Overrides `suptitle_space` if present.
b1 : float, optional
The location (in relative figure units) of the bottom of the grid of
histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if
`plot_chains` is False.
t2 : float, optional
The location (in relative figure units) of the top of the grid of chain
plots. Default is 0.2.
b2 : float, optional
The location (in relative figure units) of the bottom of the grid of
chain plots. Default is 0.1.
ax_space : float, optional
The `w_space` and `h_space` to use (in relative figure units). Default
is 0.1. | [
"Plot",
"the",
"results",
"of",
"MCMC",
"sampler",
"(",
"posterior",
"and",
"chains",
")",
".",
"Loosely",
"based",
"on",
"triangle",
".",
"py",
".",
"Provides",
"extensive",
"options",
"to",
"format",
"the",
"plot",
".",
"Parameters",
"----------",
"sampler",
":",
":",
"py",
":",
"class",
":",
"emcee",
".",
"Sampler",
"instance",
"or",
"array",
"(",
"n_temps",
"n_chains",
"n_samp",
"n_dim",
")",
"(",
"n_chains",
"n_samp",
"n_dim",
")",
"or",
"(",
"n_samp",
"n_dim",
")",
"The",
"sampler",
"to",
"plot",
"the",
"chains",
"/",
"marginals",
"of",
".",
"Can",
"also",
"be",
"an",
"array",
"of",
"samples",
"which",
"matches",
"the",
"shape",
"of",
"the",
"chain",
"attribute",
"that",
"would",
"be",
"present",
"in",
"a",
":",
"py",
":",
"class",
":",
"emcee",
".",
"Sampler",
"instance",
".",
"suptitle",
":",
"str",
"optional",
"The",
"figure",
"title",
"to",
"place",
"at",
"the",
"top",
".",
"Default",
"is",
"no",
"title",
".",
"labels",
":",
"list",
"of",
"str",
"optional",
"The",
"labels",
"to",
"use",
"for",
"each",
"of",
"the",
"free",
"parameters",
".",
"Default",
"is",
"to",
"leave",
"the",
"axes",
"unlabeled",
".",
"bins",
":",
"int",
"optional",
"Number",
"of",
"bins",
"to",
"use",
"for",
"the",
"histograms",
".",
"Default",
"is",
"50",
".",
"plot_samples",
":",
"bool",
"optional",
"If",
"True",
"the",
"samples",
"are",
"plotted",
"as",
"individual",
"points",
".",
"Default",
"is",
"False",
".",
"plot_hist",
":",
"bool",
"optional",
"If",
"True",
"histograms",
"are",
"plotted",
".",
"Default",
"is",
"True",
".",
"plot_chains",
":",
"bool",
"optional",
"If",
"True",
"plot",
"the",
"sampler",
"chains",
"at",
"the",
"bottom",
".",
"Default",
"is",
"True",
".",
"burn",
":",
"int",
"optional",
"The",
"number",
"of",
"samples",
"to",
"burn",
"before",
"making",
"the",
"marginal",
"histograms",
".",
"Default",
"is",
"zero",
"(",
"use",
"all",
"samples",
")",
".",
"chain_mask",
":",
"(",
"index",
")",
"array",
"optional",
"Mask",
"identifying",
"the",
"chains",
"to",
"keep",
"before",
"plotting",
"in",
"case",
"there",
"are",
"bad",
"chains",
".",
"Default",
"is",
"to",
"use",
"all",
"chains",
".",
"temp_idx",
":",
"int",
"optional",
"Index",
"of",
"the",
"temperature",
"to",
"plot",
"when",
"plotting",
"a",
":",
"py",
":",
"class",
":",
"emcee",
".",
"PTSampler",
".",
"Default",
"is",
"0",
"(",
"samples",
"from",
"the",
"posterior",
")",
".",
"weights",
":",
"array",
"(",
"n_temps",
"n_chains",
"n_samp",
")",
"(",
"n_chains",
"n_samp",
")",
"or",
"(",
"n_samp",
")",
"optional",
"The",
"weight",
"for",
"each",
"sample",
".",
"This",
"is",
"useful",
"for",
"post",
"-",
"processing",
"the",
"output",
"from",
"MultiNest",
"sampling",
"for",
"instance",
".",
"Default",
"is",
"to",
"not",
"weight",
"the",
"samples",
".",
"cutoff_weight",
":",
"float",
"optional",
"If",
"weights",
"and",
"cutoff_weight",
"are",
"present",
"points",
"with",
"weights",
"<",
"cutoff_weight",
"*",
"weights",
".",
"max",
"()",
"will",
"be",
"excluded",
".",
"Default",
"is",
"to",
"plot",
"all",
"points",
".",
"cmap",
":",
"str",
"optional",
"The",
"colormap",
"to",
"use",
"for",
"the",
"histograms",
".",
"Default",
"is",
"gray_r",
".",
"hist_color",
":",
"str",
"optional",
"The",
"color",
"to",
"use",
"for",
"the",
"univariate",
"histograms",
".",
"Default",
"is",
"k",
".",
"chain_alpha",
":",
"float",
"optional",
"The",
"transparency",
"to",
"use",
"for",
"the",
"plots",
"of",
"the",
"individual",
"chains",
".",
"Setting",
"this",
"to",
"something",
"low",
"lets",
"you",
"better",
"visualize",
"what",
"is",
"going",
"on",
".",
"Default",
"is",
"0",
".",
"1",
".",
"points",
":",
"array",
"(",
"D",
")",
"or",
"(",
"N",
"D",
")",
"optional",
"Array",
"of",
"point",
"(",
"s",
")",
"to",
"plot",
"onto",
"each",
"marginal",
"and",
"chain",
".",
"Default",
"is",
"None",
".",
"covs",
":",
"array",
"(",
"D",
"D",
")",
"or",
"(",
"N",
"D",
"D",
")",
"optional",
"Covariance",
"matrix",
"or",
"array",
"of",
"covariance",
"matrices",
"to",
"plot",
"onto",
"each",
"marginal",
".",
"If",
"you",
"do",
"not",
"want",
"to",
"plot",
"a",
"covariance",
"matrix",
"for",
"a",
"specific",
"point",
"set",
"its",
"corresponding",
"entry",
"to",
"None",
".",
"Default",
"is",
"to",
"not",
"plot",
"confidence",
"ellipses",
"for",
"any",
"points",
".",
"colors",
":",
"array",
"of",
"str",
"(",
"N",
")",
"optional",
"The",
"colors",
"to",
"use",
"for",
"the",
"points",
"in",
"points",
".",
"Default",
"is",
"to",
"use",
"the",
"standard",
"matplotlib",
"RGBCMYK",
"cycle",
".",
"ci",
":",
"array",
"(",
"num_ci",
")",
"optional",
"List",
"of",
"confidence",
"intervals",
"to",
"plot",
"for",
"each",
"non",
"-",
"None",
"entry",
"in",
"covs",
".",
"Default",
"is",
"0",
".",
"95",
"(",
"just",
"plot",
"the",
"95",
"percent",
"confidence",
"interval",
")",
".",
"max_hist_ticks",
":",
"int",
"optional",
"The",
"maximum",
"number",
"of",
"ticks",
"for",
"the",
"histogram",
"plots",
".",
"Default",
"is",
"None",
"(",
"no",
"limit",
")",
".",
"max_chain_ticks",
":",
"int",
"optional",
"The",
"maximum",
"number",
"of",
"y",
"-",
"axis",
"ticks",
"for",
"the",
"chain",
"plots",
".",
"Default",
"is",
"6",
".",
"label_chain_y",
":",
"bool",
"optional",
"If",
"True",
"the",
"chain",
"plots",
"will",
"have",
"y",
"axis",
"labels",
".",
"Default",
"is",
"False",
".",
"hide_chain_yticklabels",
":",
"bool",
"optional",
"If",
"True",
"hide",
"the",
"y",
"axis",
"tick",
"labels",
"for",
"the",
"chain",
"plots",
".",
"Default",
"is",
"False",
"(",
"show",
"y",
"tick",
"labels",
")",
".",
"chain_ytick_pad",
":",
"float",
"optional",
"The",
"padding",
"(",
"in",
"points",
")",
"between",
"the",
"y",
"-",
"axis",
"tick",
"labels",
"and",
"the",
"axis",
"for",
"the",
"chain",
"plots",
".",
"Default",
"is",
"2",
".",
"0",
".",
"label_fontsize",
":",
"float",
"optional",
"The",
"font",
"size",
"(",
"in",
"points",
")",
"to",
"use",
"for",
"the",
"axis",
"labels",
".",
"Default",
"is",
"axes",
".",
"labelsize",
".",
"ticklabel_fontsize",
":",
"float",
"optional",
"The",
"font",
"size",
"(",
"in",
"points",
")",
"to",
"use",
"for",
"the",
"axis",
"tick",
"labels",
".",
"Default",
"is",
"xtick",
".",
"labelsize",
".",
"chain_label_fontsize",
":",
"float",
"optional",
"The",
"font",
"size",
"(",
"in",
"points",
")",
"to",
"use",
"for",
"the",
"labels",
"of",
"the",
"chain",
"axes",
".",
"Default",
"is",
"axes",
".",
"labelsize",
".",
"chain_ticklabel_fontsize",
":",
"float",
"optional",
"The",
"font",
"size",
"(",
"in",
"points",
")",
"to",
"use",
"for",
"the",
"chain",
"axis",
"tick",
"labels",
".",
"Default",
"is",
"xtick",
".",
"labelsize",
".",
"xticklabel_angle",
":",
"float",
"optional",
"The",
"angle",
"to",
"rotate",
"the",
"x",
"tick",
"labels",
"in",
"degrees",
".",
"Default",
"is",
"90",
".",
"bottom_sep",
":",
"float",
"optional",
"The",
"separation",
"(",
"in",
"relative",
"figure",
"units",
")",
"between",
"the",
"chains",
"and",
"the",
"marginals",
".",
"Default",
"is",
"0",
".",
"075",
".",
"suptitle_space",
":",
"float",
"optional",
"The",
"amount",
"of",
"space",
"(",
"in",
"relative",
"figure",
"units",
")",
"to",
"leave",
"for",
"a",
"figure",
"title",
".",
"Default",
"is",
"0",
".",
"1",
".",
"fixed_height",
":",
"float",
"optional",
"The",
"desired",
"figure",
"height",
"(",
"in",
"inches",
")",
".",
"Default",
"is",
"to",
"automatically",
"adjust",
"based",
"on",
"fixed_width",
"to",
"make",
"the",
"subplots",
"square",
".",
"fixed_width",
":",
"float",
"optional",
"The",
"desired",
"figure",
"width",
"(",
"in",
"inches",
")",
".",
"Default",
"is",
"figure",
".",
"figsize",
"[",
"0",
"]",
".",
"l",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"left",
"margin",
".",
"Default",
"is",
"0",
".",
"1",
".",
"r",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"right",
"margin",
".",
"Default",
"is",
"0",
".",
"9",
".",
"t1",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"top",
"of",
"the",
"grid",
"of",
"histograms",
".",
"Overrides",
"suptitle_space",
"if",
"present",
".",
"b1",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"bottom",
"of",
"the",
"grid",
"of",
"histograms",
".",
"Overrides",
"bottom_sep",
"if",
"present",
".",
"Defaults",
"to",
"0",
".",
"1",
"if",
"plot_chains",
"is",
"False",
".",
"t2",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"top",
"of",
"the",
"grid",
"of",
"chain",
"plots",
".",
"Default",
"is",
"0",
".",
"2",
".",
"b2",
":",
"float",
"optional",
"The",
"location",
"(",
"in",
"relative",
"figure",
"units",
")",
"of",
"the",
"bottom",
"of",
"the",
"grid",
"of",
"chain",
"plots",
".",
"Default",
"is",
"0",
".",
"1",
".",
"ax_space",
":",
"float",
"optional",
"The",
"w_space",
"and",
"h_space",
"to",
"use",
"(",
"in",
"relative",
"figure",
"units",
")",
".",
"Default",
"is",
"0",
".",
"1",
"."
] | python | train |
rigetti/grove | grove/alpha/arbitrary_state/arbitrary_state.py | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/alpha/arbitrary_state/arbitrary_state.py#L65-L120 | def get_rotation_parameters(phases, magnitudes):
"""
Simulates one step of rotations.
Given lists of phases and magnitudes of the same length :math:`N`,
such that :math:`N=2^n` for some positive integer :math:`n`,
finds the rotation angles required for one step of phase and magnitude
unification.
:param list phases: real valued phases from :math:`-\\pi` to :math:`\\pi`.
:param list magnitudes: positive, real value magnitudes such that
the sum of the square of each magnitude is
:math:`2^{-m}` for some nonnegative integer :math:`m`.
:return: A tuple t of four lists such that
- t[0] are the z-rotations needed to unify adjacent pairs of phases
- t[1] are the y-rotations needed to unify adjacent pairs of magnitudes
- t[2] are the updated phases after these rotations are applied
- t[3] are the updated magnitudes after these rotations are applied
:rtype: tuple
"""
# will hold the angles for controlled rotations
# in the phase unification and probability unification steps,
# respectively
z_thetas = []
y_thetas = []
# will hold updated phases and magnitudes after rotations
new_phases = []
new_magnitudes = []
for i in range(0, len(phases), 2):
# find z rotation angles
phi = phases[i]
psi = phases[i + 1]
z_thetas.append(phi - psi)
# update phases after applying such rotations
kappa = (phi + psi) / 2.
new_phases.append(kappa)
# find y rotation angles
a = magnitudes[i]
b = magnitudes[i + 1]
if a == 0 and b == 0:
y_thetas.append(0)
else:
y_thetas.append(
2 * np.arcsin((a - b) / (np.sqrt(2 * (a ** 2 + b ** 2)))))
# update magnitudes after applying such rotations
c = np.sqrt((a ** 2 + b ** 2) / 2.)
new_magnitudes.append(c)
return z_thetas, y_thetas, new_phases, new_magnitudes | [
"def",
"get_rotation_parameters",
"(",
"phases",
",",
"magnitudes",
")",
":",
"# will hold the angles for controlled rotations",
"# in the phase unification and probability unification steps,",
"# respectively",
"z_thetas",
"=",
"[",
"]",
"y_thetas",
"=",
"[",
"]",
"# will hold updated phases and magnitudes after rotations",
"new_phases",
"=",
"[",
"]",
"new_magnitudes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"phases",
")",
",",
"2",
")",
":",
"# find z rotation angles",
"phi",
"=",
"phases",
"[",
"i",
"]",
"psi",
"=",
"phases",
"[",
"i",
"+",
"1",
"]",
"z_thetas",
".",
"append",
"(",
"phi",
"-",
"psi",
")",
"# update phases after applying such rotations",
"kappa",
"=",
"(",
"phi",
"+",
"psi",
")",
"/",
"2.",
"new_phases",
".",
"append",
"(",
"kappa",
")",
"# find y rotation angles",
"a",
"=",
"magnitudes",
"[",
"i",
"]",
"b",
"=",
"magnitudes",
"[",
"i",
"+",
"1",
"]",
"if",
"a",
"==",
"0",
"and",
"b",
"==",
"0",
":",
"y_thetas",
".",
"append",
"(",
"0",
")",
"else",
":",
"y_thetas",
".",
"append",
"(",
"2",
"*",
"np",
".",
"arcsin",
"(",
"(",
"a",
"-",
"b",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
"*",
"(",
"a",
"**",
"2",
"+",
"b",
"**",
"2",
")",
")",
")",
")",
")",
"# update magnitudes after applying such rotations",
"c",
"=",
"np",
".",
"sqrt",
"(",
"(",
"a",
"**",
"2",
"+",
"b",
"**",
"2",
")",
"/",
"2.",
")",
"new_magnitudes",
".",
"append",
"(",
"c",
")",
"return",
"z_thetas",
",",
"y_thetas",
",",
"new_phases",
",",
"new_magnitudes"
] | Simulates one step of rotations.
Given lists of phases and magnitudes of the same length :math:`N`,
such that :math:`N=2^n` for some positive integer :math:`n`,
finds the rotation angles required for one step of phase and magnitude
unification.
:param list phases: real valued phases from :math:`-\\pi` to :math:`\\pi`.
:param list magnitudes: positive, real value magnitudes such that
the sum of the square of each magnitude is
:math:`2^{-m}` for some nonnegative integer :math:`m`.
:return: A tuple t of four lists such that
- t[0] are the z-rotations needed to unify adjacent pairs of phases
- t[1] are the y-rotations needed to unify adjacent pairs of magnitudes
- t[2] are the updated phases after these rotations are applied
- t[3] are the updated magnitudes after these rotations are applied
:rtype: tuple | [
"Simulates",
"one",
"step",
"of",
"rotations",
"."
] | python | train |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py#L145-L198 | def _merge_command(run, full_result, results):
"""Merge a group of results from write commands into the full result.
"""
for offset, result in results:
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
if isinstance(upserted, list):
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
else:
n_upserted = 1
index = run.index(offset)
doc = {_UINDEX: index, _UID: upserted}
full_result["upserted"].append(doc)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
n_modified = result.get("nModified")
# SERVER-13001 - in a mixed sharded cluster a call to
# update could return nModified (>= 2.6) or not (<= 2.4).
# If any call does not return nModified we can't report
# a valid final count so omit the field completely.
if n_modified is not None and "nModified" in full_result:
full_result["nModified"] += n_modified
else:
full_result.pop("nModified", None)
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error) | [
"def",
"_merge_command",
"(",
"run",
",",
"full_result",
",",
"results",
")",
":",
"for",
"offset",
",",
"result",
"in",
"results",
":",
"affected",
"=",
"result",
".",
"get",
"(",
"\"n\"",
",",
"0",
")",
"if",
"run",
".",
"op_type",
"==",
"_INSERT",
":",
"full_result",
"[",
"\"nInserted\"",
"]",
"+=",
"affected",
"elif",
"run",
".",
"op_type",
"==",
"_DELETE",
":",
"full_result",
"[",
"\"nRemoved\"",
"]",
"+=",
"affected",
"elif",
"run",
".",
"op_type",
"==",
"_UPDATE",
":",
"upserted",
"=",
"result",
".",
"get",
"(",
"\"upserted\"",
")",
"if",
"upserted",
":",
"if",
"isinstance",
"(",
"upserted",
",",
"list",
")",
":",
"n_upserted",
"=",
"len",
"(",
"upserted",
")",
"for",
"doc",
"in",
"upserted",
":",
"doc",
"[",
"\"index\"",
"]",
"=",
"run",
".",
"index",
"(",
"doc",
"[",
"\"index\"",
"]",
"+",
"offset",
")",
"full_result",
"[",
"\"upserted\"",
"]",
".",
"extend",
"(",
"upserted",
")",
"else",
":",
"n_upserted",
"=",
"1",
"index",
"=",
"run",
".",
"index",
"(",
"offset",
")",
"doc",
"=",
"{",
"_UINDEX",
":",
"index",
",",
"_UID",
":",
"upserted",
"}",
"full_result",
"[",
"\"upserted\"",
"]",
".",
"append",
"(",
"doc",
")",
"full_result",
"[",
"\"nUpserted\"",
"]",
"+=",
"n_upserted",
"full_result",
"[",
"\"nMatched\"",
"]",
"+=",
"(",
"affected",
"-",
"n_upserted",
")",
"else",
":",
"full_result",
"[",
"\"nMatched\"",
"]",
"+=",
"affected",
"n_modified",
"=",
"result",
".",
"get",
"(",
"\"nModified\"",
")",
"# SERVER-13001 - in a mixed sharded cluster a call to",
"# update could return nModified (>= 2.6) or not (<= 2.4).",
"# If any call does not return nModified we can't report",
"# a valid final count so omit the field completely.",
"if",
"n_modified",
"is",
"not",
"None",
"and",
"\"nModified\"",
"in",
"full_result",
":",
"full_result",
"[",
"\"nModified\"",
"]",
"+=",
"n_modified",
"else",
":",
"full_result",
".",
"pop",
"(",
"\"nModified\"",
",",
"None",
")",
"write_errors",
"=",
"result",
".",
"get",
"(",
"\"writeErrors\"",
")",
"if",
"write_errors",
":",
"for",
"doc",
"in",
"write_errors",
":",
"# Leave the server response intact for APM.",
"replacement",
"=",
"doc",
".",
"copy",
"(",
")",
"idx",
"=",
"doc",
"[",
"\"index\"",
"]",
"+",
"offset",
"replacement",
"[",
"\"index\"",
"]",
"=",
"run",
".",
"index",
"(",
"idx",
")",
"# Add the failed operation to the error document.",
"replacement",
"[",
"_UOP",
"]",
"=",
"run",
".",
"ops",
"[",
"idx",
"]",
"full_result",
"[",
"\"writeErrors\"",
"]",
".",
"append",
"(",
"replacement",
")",
"wc_error",
"=",
"result",
".",
"get",
"(",
"\"writeConcernError\"",
")",
"if",
"wc_error",
":",
"full_result",
"[",
"\"writeConcernErrors\"",
"]",
".",
"append",
"(",
"wc_error",
")"
] | Merge a group of results from write commands into the full result. | [
"Merge",
"a",
"group",
"of",
"results",
"from",
"write",
"commands",
"into",
"the",
"full",
"result",
"."
] | python | train |
zeroSteiner/AdvancedHTTPServer | advancedhttpserver.py | https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1020-L1030 | def respond_unauthorized(self, request_authentication=False):
"""
Respond to the client that the request is unauthorized.
:param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header.
"""
headers = {}
if request_authentication:
headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"'
self.send_response_full(b'Unauthorized', status=401, headers=headers)
return | [
"def",
"respond_unauthorized",
"(",
"self",
",",
"request_authentication",
"=",
"False",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"request_authentication",
":",
"headers",
"[",
"'WWW-Authenticate'",
"]",
"=",
"'Basic realm=\"'",
"+",
"self",
".",
"__config",
"[",
"'server_version'",
"]",
"+",
"'\"'",
"self",
".",
"send_response_full",
"(",
"b'Unauthorized'",
",",
"status",
"=",
"401",
",",
"headers",
"=",
"headers",
")",
"return"
] | Respond to the client that the request is unauthorized.
:param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header. | [
"Respond",
"to",
"the",
"client",
"that",
"the",
"request",
"is",
"unauthorized",
"."
] | python | train |
facetoe/zenpy | zenpy/lib/cache.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/cache.py#L27-L36 | def set_cache_impl(self, cache_impl, maxsize, **kwargs):
"""
Change cache implementation. The contents of the old cache will
be transferred to the new one.
:param cache_impl: Name of cache implementation, must exist in AVAILABLE_CACHES
"""
new_cache = self._get_cache_impl(cache_impl, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache | [
"def",
"set_cache_impl",
"(",
"self",
",",
"cache_impl",
",",
"maxsize",
",",
"*",
"*",
"kwargs",
")",
":",
"new_cache",
"=",
"self",
".",
"_get_cache_impl",
"(",
"cache_impl",
",",
"maxsize",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_populate_new_cache",
"(",
"new_cache",
")",
"self",
".",
"cache",
"=",
"new_cache"
] | Change cache implementation. The contents of the old cache will
be transferred to the new one.
:param cache_impl: Name of cache implementation, must exist in AVAILABLE_CACHES | [
"Change",
"cache",
"implementation",
".",
"The",
"contents",
"of",
"the",
"old",
"cache",
"will",
"be",
"transferred",
"to",
"the",
"new",
"one",
"."
] | python | train |
edx/edx-django-utils | edx_django_utils/monitoring/middleware.py | https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/middleware.py#L177-L202 | def _log_diff_memory_data(self, prefix, new_memory_data, old_memory_data):
"""
Computes and logs the difference in memory utilization
between the given old and new memory data.
"""
def _vmem_used(memory_data):
return memory_data['machine_data'].used
def _process_mem_percent(memory_data):
return memory_data['process_data']['memory_percent']
def _process_rss(memory_data):
return memory_data['process_data']['memory_info'].rss
def _process_vms(memory_data):
return memory_data['process_data']['memory_info'].vms
if new_memory_data and old_memory_data:
log.info(
u"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s",
prefix,
_vmem_used(new_memory_data) - _vmem_used(old_memory_data),
_process_mem_percent(new_memory_data) - _process_mem_percent(old_memory_data),
_process_rss(new_memory_data) - _process_rss(old_memory_data),
_process_vms(new_memory_data) - _process_vms(old_memory_data),
) | [
"def",
"_log_diff_memory_data",
"(",
"self",
",",
"prefix",
",",
"new_memory_data",
",",
"old_memory_data",
")",
":",
"def",
"_vmem_used",
"(",
"memory_data",
")",
":",
"return",
"memory_data",
"[",
"'machine_data'",
"]",
".",
"used",
"def",
"_process_mem_percent",
"(",
"memory_data",
")",
":",
"return",
"memory_data",
"[",
"'process_data'",
"]",
"[",
"'memory_percent'",
"]",
"def",
"_process_rss",
"(",
"memory_data",
")",
":",
"return",
"memory_data",
"[",
"'process_data'",
"]",
"[",
"'memory_info'",
"]",
".",
"rss",
"def",
"_process_vms",
"(",
"memory_data",
")",
":",
"return",
"memory_data",
"[",
"'process_data'",
"]",
"[",
"'memory_info'",
"]",
".",
"vms",
"if",
"new_memory_data",
"and",
"old_memory_data",
":",
"log",
".",
"info",
"(",
"u\"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s\"",
",",
"prefix",
",",
"_vmem_used",
"(",
"new_memory_data",
")",
"-",
"_vmem_used",
"(",
"old_memory_data",
")",
",",
"_process_mem_percent",
"(",
"new_memory_data",
")",
"-",
"_process_mem_percent",
"(",
"old_memory_data",
")",
",",
"_process_rss",
"(",
"new_memory_data",
")",
"-",
"_process_rss",
"(",
"old_memory_data",
")",
",",
"_process_vms",
"(",
"new_memory_data",
")",
"-",
"_process_vms",
"(",
"old_memory_data",
")",
",",
")"
] | Computes and logs the difference in memory utilization
between the given old and new memory data. | [
"Computes",
"and",
"logs",
"the",
"difference",
"in",
"memory",
"utilization",
"between",
"the",
"given",
"old",
"and",
"new",
"memory",
"data",
"."
] | python | train |
rochacbruno/flasgger | flasgger/utils.py | https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/flasgger/utils.py#L730-L739 | def get_vendor_extension_fields(mapping):
"""
Identify vendor extension fields and extract them into a new dictionary.
Examples:
>>> get_vendor_extension_fields({'test': 1})
{}
>>> get_vendor_extension_fields({'test': 1, 'x-test': 2})
{'x-test': 2}
"""
return {k: v for k, v in mapping.items() if k.startswith('x-')} | [
"def",
"get_vendor_extension_fields",
"(",
"mapping",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"mapping",
".",
"items",
"(",
")",
"if",
"k",
".",
"startswith",
"(",
"'x-'",
")",
"}"
] | Identify vendor extension fields and extract them into a new dictionary.
Examples:
>>> get_vendor_extension_fields({'test': 1})
{}
>>> get_vendor_extension_fields({'test': 1, 'x-test': 2})
{'x-test': 2} | [
"Identify",
"vendor",
"extension",
"fields",
"and",
"extract",
"them",
"into",
"a",
"new",
"dictionary",
".",
"Examples",
":",
">>>",
"get_vendor_extension_fields",
"(",
"{",
"test",
":",
"1",
"}",
")",
"{}",
">>>",
"get_vendor_extension_fields",
"(",
"{",
"test",
":",
"1",
"x",
"-",
"test",
":",
"2",
"}",
")",
"{",
"x",
"-",
"test",
":",
"2",
"}"
] | python | train |
codelv/enaml-native | src/enamlnative/core/hotswap/core.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/hotswap/core.py#L79-L83 | def update_class_by_type(old, new):
""" Update declarative classes or fallback on default """
autoreload.update_class(old, new)
if isinstance2(old, new, AtomMeta):
update_atom_members(old, new) | [
"def",
"update_class_by_type",
"(",
"old",
",",
"new",
")",
":",
"autoreload",
".",
"update_class",
"(",
"old",
",",
"new",
")",
"if",
"isinstance2",
"(",
"old",
",",
"new",
",",
"AtomMeta",
")",
":",
"update_atom_members",
"(",
"old",
",",
"new",
")"
] | Update declarative classes or fallback on default | [
"Update",
"declarative",
"classes",
"or",
"fallback",
"on",
"default"
] | python | train |
mandeep/Travis-Encrypt | travis/orderer.py | https://github.com/mandeep/Travis-Encrypt/blob/0dd2da1c71feaadcb84bdeb26827e6dfe1bd3b41/travis/orderer.py#L27-L37 | def ordered_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
"""Dump a yaml configuration as an OrderedDict."""
class OrderedDumper(Dumper):
pass
def dict_representer(dumper, data):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds) | [
"def",
"ordered_dump",
"(",
"data",
",",
"stream",
"=",
"None",
",",
"Dumper",
"=",
"yaml",
".",
"SafeDumper",
",",
"*",
"*",
"kwds",
")",
":",
"class",
"OrderedDumper",
"(",
"Dumper",
")",
":",
"pass",
"def",
"dict_representer",
"(",
"dumper",
",",
"data",
")",
":",
"return",
"dumper",
".",
"represent_mapping",
"(",
"yaml",
".",
"resolver",
".",
"BaseResolver",
".",
"DEFAULT_MAPPING_TAG",
",",
"data",
".",
"items",
"(",
")",
")",
"OrderedDumper",
".",
"add_representer",
"(",
"OrderedDict",
",",
"dict_representer",
")",
"return",
"yaml",
".",
"dump",
"(",
"data",
",",
"stream",
",",
"OrderedDumper",
",",
"*",
"*",
"kwds",
")"
] | Dump a yaml configuration as an OrderedDict. | [
"Dump",
"a",
"yaml",
"configuration",
"as",
"an",
"OrderedDict",
"."
] | python | train |
UCSBarchlab/PyRTL | pyrtl/helperfuncs.py | https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/helperfuncs.py#L277-L323 | def val_to_formatted_str(val, format, enum_set=None):
""" Return a string representation of the value given format specified.
:param val: a string holding an unsigned integer to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given an unsigned integer (not a wirevector!) covert that to a strong ready for output
to a human to interpret. This helps deal with signed/unsigned numbers (simulation
operates on values that have been converted via two's complement), but it also generates
hex, binary, and enum types as outputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val(2, 's3') == '2'
formatted_str_to_val(7, 's3') == '-1'
formatted_str_to_val(5, 'b3') == '101'
formatted_str_to_val(5, 'u3') == '5'
formatted_str_to_val(5, 's3') == '-3'
formatted_str_to_val(10, 'x3') == 'a'
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth)-1
if type == 's':
rval = str(val_to_signed_integer(val, bitwidth))
elif type == 'x':
rval = hex(val)[2:] # cuts off '0x' at the start
elif type == 'b':
rval = bin(val)[2:] # cuts off '0b' at the start
elif type == 'u':
rval = str(int(val)) # nothing fancy
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'
.format(enumname, enum_set))
rval = enum_inst_list[0](val).name
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval | [
"def",
"val_to_formatted_str",
"(",
"val",
",",
"format",
",",
"enum_set",
"=",
"None",
")",
":",
"type",
"=",
"format",
"[",
"0",
"]",
"bitwidth",
"=",
"int",
"(",
"format",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
")",
"bitmask",
"=",
"(",
"1",
"<<",
"bitwidth",
")",
"-",
"1",
"if",
"type",
"==",
"'s'",
":",
"rval",
"=",
"str",
"(",
"val_to_signed_integer",
"(",
"val",
",",
"bitwidth",
")",
")",
"elif",
"type",
"==",
"'x'",
":",
"rval",
"=",
"hex",
"(",
"val",
")",
"[",
"2",
":",
"]",
"# cuts off '0x' at the start",
"elif",
"type",
"==",
"'b'",
":",
"rval",
"=",
"bin",
"(",
"val",
")",
"[",
"2",
":",
"]",
"# cuts off '0b' at the start",
"elif",
"type",
"==",
"'u'",
":",
"rval",
"=",
"str",
"(",
"int",
"(",
"val",
")",
")",
"# nothing fancy",
"elif",
"type",
"==",
"'e'",
":",
"enumname",
"=",
"format",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"enum_inst_list",
"=",
"[",
"e",
"for",
"e",
"in",
"enum_set",
"if",
"e",
".",
"__name__",
"==",
"enumname",
"]",
"if",
"len",
"(",
"enum_inst_list",
")",
"==",
"0",
":",
"raise",
"PyrtlError",
"(",
"'enum \"{}\" not found in passed enum_set \"{}\"'",
".",
"format",
"(",
"enumname",
",",
"enum_set",
")",
")",
"rval",
"=",
"enum_inst_list",
"[",
"0",
"]",
"(",
"val",
")",
".",
"name",
"else",
":",
"raise",
"PyrtlError",
"(",
"'unknown format type {}'",
".",
"format",
"(",
"format",
")",
")",
"return",
"rval"
] | Return a string representation of the value given format specified.
:param val: a string holding an unsigned integer to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given an unsigned integer (not a wirevector!) covert that to a strong ready for output
to a human to interpret. This helps deal with signed/unsigned numbers (simulation
operates on values that have been converted via two's complement), but it also generates
hex, binary, and enum types as outputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val(2, 's3') == '2'
formatted_str_to_val(7, 's3') == '-1'
formatted_str_to_val(5, 'b3') == '101'
formatted_str_to_val(5, 'u3') == '5'
formatted_str_to_val(5, 's3') == '-3'
formatted_str_to_val(10, 'x3') == 'a'
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12 | [
"Return",
"a",
"string",
"representation",
"of",
"the",
"value",
"given",
"format",
"specified",
"."
] | python | train |
TDG-Platform/cloud-harness | gbdx_cloud_harness/services/task_service.py | https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/gbdx_cloud_harness/services/task_service.py#L35-L47 | def delete_task(self, task_name):
'''
Delete a task from the platforms regoistry
:param task_name: name of the task to delete
'''
response = self.session.delete('%s/%s' % (self.task_url, task_name))
if response.status_code == 200:
return response.status_code, 'Task %s deleted' % task_name
elif response.status_code == 400:
return response.status_code, None # Task isn't registered.
else:
return response.status_code, 'Task %s was not deleted: %s' % (task_name, response.text) | [
"def",
"delete_task",
"(",
"self",
",",
"task_name",
")",
":",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"'%s/%s'",
"%",
"(",
"self",
".",
"task_url",
",",
"task_name",
")",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
".",
"status_code",
",",
"'Task %s deleted'",
"%",
"task_name",
"elif",
"response",
".",
"status_code",
"==",
"400",
":",
"return",
"response",
".",
"status_code",
",",
"None",
"# Task isn't registered.",
"else",
":",
"return",
"response",
".",
"status_code",
",",
"'Task %s was not deleted: %s'",
"%",
"(",
"task_name",
",",
"response",
".",
"text",
")"
] | Delete a task from the platforms regoistry
:param task_name: name of the task to delete | [
"Delete",
"a",
"task",
"from",
"the",
"platforms",
"regoistry",
":",
"param",
"task_name",
":",
"name",
"of",
"the",
"task",
"to",
"delete"
] | python | test |
mickybart/python-atlasbroker | atlasbroker/storage.py | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L109-L145 | def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore() | [
"def",
"store",
"(",
"self",
",",
"obj",
")",
":",
"# query",
"if",
"type",
"(",
"obj",
")",
"is",
"AtlasServiceInstance",
".",
"Instance",
":",
"query",
"=",
"{",
"\"instance_id\"",
":",
"obj",
".",
"instance_id",
",",
"\"database\"",
":",
"obj",
".",
"get_dbname",
"(",
")",
",",
"\"cluster\"",
":",
"obj",
".",
"get_cluster",
"(",
")",
",",
"\"parameters\"",
":",
"obj",
".",
"parameters",
"}",
"elif",
"type",
"(",
"obj",
")",
"is",
"AtlasServiceBinding",
".",
"Binding",
":",
"query",
"=",
"{",
"\"binding_id\"",
":",
"obj",
".",
"binding_id",
",",
"\"parameters\"",
":",
"obj",
".",
"parameters",
",",
"\"instance_id\"",
":",
"obj",
".",
"instance",
".",
"instance_id",
"}",
"else",
":",
"raise",
"ErrStorageTypeUnsupported",
"(",
"type",
"(",
"obj",
")",
")",
"# insert",
"try",
":",
"result",
"=",
"self",
".",
"broker",
".",
"insert_one",
"(",
"query",
")",
"except",
":",
"raise",
"ErrStorageMongoConnection",
"(",
"\"Store Instance or Binding\"",
")",
"if",
"result",
"is",
"not",
"None",
":",
"# Flags the obj to provisioned",
"obj",
".",
"provisioned",
"=",
"True",
"return",
"result",
".",
"inserted_id",
"raise",
"ErrStorageStore",
"(",
")"
] | Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance. | [
"Store",
"Store",
"an",
"object",
"into",
"the",
"MongoDB",
"storage",
"for",
"caching",
"Args",
":",
"obj",
"(",
"AtlasServiceBinding",
".",
"Binding",
"or",
"AtlasServiceInstance",
".",
"Instance",
")",
":",
"instance",
"or",
"binding",
"Returns",
":",
"ObjectId",
":",
"MongoDB",
"_id",
"Raises",
":",
"ErrStorageMongoConnection",
":",
"Error",
"during",
"MongoDB",
"communication",
".",
"ErrStorageTypeUnsupported",
":",
"Type",
"unsupported",
".",
"ErrStorageStore",
":",
"Failed",
"to",
"store",
"the",
"binding",
"or",
"instance",
"."
] | python | train |
andreagrandi/toshl-python | toshl/client.py | https://github.com/andreagrandi/toshl-python/blob/16a2aef8a0d389db73db3253b0bea3fcc33cc2bf/toshl/client.py#L11-L47 | def _make_request(
self, api_resource, method='GET', params=None, **kwargs):
"""
Shortcut for a generic request to the Toshl API
:param url: The URL resource part
:param method: REST method
:param parameters: Querystring parameters
:return: requests.Response
"""
if kwargs.get('json'):
headers = {
'Authorization': 'Bearer {}'.format(self._token),
'Content-Type': 'application/json'
}
else:
headers = {
'Authorization': 'Bearer {}'.format(self._token)
}
response = requests.request(
method=method,
url='{0}{1}'.format(self.BASE_API_URL, api_resource),
headers=headers,
params=params,
**kwargs
)
if response.status_code >= 400:
error_response = response.json()
raise(ToshlException(
status_code=response.status_code,
error_id=error_response['error_id'],
error_description=error_response['description'],
extra_info=error_response.get('fields')))
return response | [
"def",
"_make_request",
"(",
"self",
",",
"api_resource",
",",
"method",
"=",
"'GET'",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'json'",
")",
":",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer {}'",
".",
"format",
"(",
"self",
".",
"_token",
")",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"else",
":",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer {}'",
".",
"format",
"(",
"self",
".",
"_token",
")",
"}",
"response",
"=",
"requests",
".",
"request",
"(",
"method",
"=",
"method",
",",
"url",
"=",
"'{0}{1}'",
".",
"format",
"(",
"self",
".",
"BASE_API_URL",
",",
"api_resource",
")",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"*",
"*",
"kwargs",
")",
"if",
"response",
".",
"status_code",
">=",
"400",
":",
"error_response",
"=",
"response",
".",
"json",
"(",
")",
"raise",
"(",
"ToshlException",
"(",
"status_code",
"=",
"response",
".",
"status_code",
",",
"error_id",
"=",
"error_response",
"[",
"'error_id'",
"]",
",",
"error_description",
"=",
"error_response",
"[",
"'description'",
"]",
",",
"extra_info",
"=",
"error_response",
".",
"get",
"(",
"'fields'",
")",
")",
")",
"return",
"response"
] | Shortcut for a generic request to the Toshl API
:param url: The URL resource part
:param method: REST method
:param parameters: Querystring parameters
:return: requests.Response | [
"Shortcut",
"for",
"a",
"generic",
"request",
"to",
"the",
"Toshl",
"API",
":",
"param",
"url",
":",
"The",
"URL",
"resource",
"part",
":",
"param",
"method",
":",
"REST",
"method",
":",
"param",
"parameters",
":",
"Querystring",
"parameters",
":",
"return",
":",
"requests",
".",
"Response"
] | python | train |
payu-org/payu | payu/calendar.py | https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/calendar.py#L41-L55 | def date_plus_seconds(init_date, seconds, caltype):
"""
Get a new_date = date + seconds.
Ignores Feb 29 for no-leap days.
"""
end_date = init_date + datetime.timedelta(seconds=seconds)
if caltype == NOLEAP:
end_date += get_leapdays(init_date, end_date)
if end_date.month == 2 and end_date.day == 29:
end_date += datetime.timedelta(days=1)
return end_date | [
"def",
"date_plus_seconds",
"(",
"init_date",
",",
"seconds",
",",
"caltype",
")",
":",
"end_date",
"=",
"init_date",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"if",
"caltype",
"==",
"NOLEAP",
":",
"end_date",
"+=",
"get_leapdays",
"(",
"init_date",
",",
"end_date",
")",
"if",
"end_date",
".",
"month",
"==",
"2",
"and",
"end_date",
".",
"day",
"==",
"29",
":",
"end_date",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"return",
"end_date"
] | Get a new_date = date + seconds.
Ignores Feb 29 for no-leap days. | [
"Get",
"a",
"new_date",
"=",
"date",
"+",
"seconds",
"."
] | python | train |
vingd/vingd-api-python | vingd/client.py | https://github.com/vingd/vingd-api-python/blob/7548a49973a472f7277c8ef847563faa7b6f3706/vingd/client.py#L627-L691 | def create_voucher(self, amount, expires=None, message='', gid=None):
"""
CREATES a new preallocated voucher with ``amount`` vingd cents reserved
until ``expires``.
:type amount: ``bigint``
:param amount:
Voucher amount in vingd cents.
:type expires: ``datetime``/``dict``
:param expires:
Voucher expiry timestamp, absolute (``datetime``) or relative
(``dict``). Valid keys for relative expiry timestamp dictionary are
same as keyword arguments for `datetime.timedelta` (``days``,
``seconds``, ``minutes``, ``hours``, ``weeks``). Default:
`Vingd.EXP_VOUCHER`.
:type message: ``string``
:param message:
Short message displayed to user when she redeems the voucher on
Vingd frontend.
:type gid: ``alphanum(32)``
:param gid:
Voucher group id. An user can redeem only one voucher per group.
:rtype: ``dict``
:returns:
Created voucher description::
voucher = {
'vid': <voucher_integer_id>,
'vid_encoded': <voucher_string_id>,
'amount_allocated': <int_cents | None if not allocated>,
'amount_vouched': <int_cents>,
'id_fort_transfer': <id_of_allocating_transfer |
None if not allocated>,
'fee': <int_cents>,
'uid_from': <source_account_uid>,
'uid_proxy': <broker_id>,
'uid_to': <destination_account_id | None if not given>,
'gid': <voucher_group_id | None if undefined>,
'ts_valid_until': <iso8601_timestamp_absolute>,
'description': <string | None>,
'message': <string | None>
}
combined with voucher redeem urls on Vingd frontend.
:raises GeneralException:
:resource: ``vouchers/``
:access: authorized users (ACL flag: ``voucher.add``)
"""
expires = absdatetime(expires, default=self.EXP_VOUCHER).isoformat()
voucher = self.request('post', 'vouchers/', json.dumps({
'amount': amount,
'until': expires,
'message': message,
'gid': gid
}))
return {
'raw': voucher,
'urls': {
'redirect': urljoin(self.usr_frontend, '/vouchers/%s' % voucher['vid_encoded']),
'popup': urljoin(self.usr_frontend, '/popup/vouchers/%s' % voucher['vid_encoded'])
}
} | [
"def",
"create_voucher",
"(",
"self",
",",
"amount",
",",
"expires",
"=",
"None",
",",
"message",
"=",
"''",
",",
"gid",
"=",
"None",
")",
":",
"expires",
"=",
"absdatetime",
"(",
"expires",
",",
"default",
"=",
"self",
".",
"EXP_VOUCHER",
")",
".",
"isoformat",
"(",
")",
"voucher",
"=",
"self",
".",
"request",
"(",
"'post'",
",",
"'vouchers/'",
",",
"json",
".",
"dumps",
"(",
"{",
"'amount'",
":",
"amount",
",",
"'until'",
":",
"expires",
",",
"'message'",
":",
"message",
",",
"'gid'",
":",
"gid",
"}",
")",
")",
"return",
"{",
"'raw'",
":",
"voucher",
",",
"'urls'",
":",
"{",
"'redirect'",
":",
"urljoin",
"(",
"self",
".",
"usr_frontend",
",",
"'/vouchers/%s'",
"%",
"voucher",
"[",
"'vid_encoded'",
"]",
")",
",",
"'popup'",
":",
"urljoin",
"(",
"self",
".",
"usr_frontend",
",",
"'/popup/vouchers/%s'",
"%",
"voucher",
"[",
"'vid_encoded'",
"]",
")",
"}",
"}"
] | CREATES a new preallocated voucher with ``amount`` vingd cents reserved
until ``expires``.
:type amount: ``bigint``
:param amount:
Voucher amount in vingd cents.
:type expires: ``datetime``/``dict``
:param expires:
Voucher expiry timestamp, absolute (``datetime``) or relative
(``dict``). Valid keys for relative expiry timestamp dictionary are
same as keyword arguments for `datetime.timedelta` (``days``,
``seconds``, ``minutes``, ``hours``, ``weeks``). Default:
`Vingd.EXP_VOUCHER`.
:type message: ``string``
:param message:
Short message displayed to user when she redeems the voucher on
Vingd frontend.
:type gid: ``alphanum(32)``
:param gid:
Voucher group id. An user can redeem only one voucher per group.
:rtype: ``dict``
:returns:
Created voucher description::
voucher = {
'vid': <voucher_integer_id>,
'vid_encoded': <voucher_string_id>,
'amount_allocated': <int_cents | None if not allocated>,
'amount_vouched': <int_cents>,
'id_fort_transfer': <id_of_allocating_transfer |
None if not allocated>,
'fee': <int_cents>,
'uid_from': <source_account_uid>,
'uid_proxy': <broker_id>,
'uid_to': <destination_account_id | None if not given>,
'gid': <voucher_group_id | None if undefined>,
'ts_valid_until': <iso8601_timestamp_absolute>,
'description': <string | None>,
'message': <string | None>
}
combined with voucher redeem urls on Vingd frontend.
:raises GeneralException:
:resource: ``vouchers/``
:access: authorized users (ACL flag: ``voucher.add``) | [
"CREATES",
"a",
"new",
"preallocated",
"voucher",
"with",
"amount",
"vingd",
"cents",
"reserved",
"until",
"expires",
".",
":",
"type",
"amount",
":",
"bigint",
":",
"param",
"amount",
":",
"Voucher",
"amount",
"in",
"vingd",
"cents",
".",
":",
"type",
"expires",
":",
"datetime",
"/",
"dict",
":",
"param",
"expires",
":",
"Voucher",
"expiry",
"timestamp",
"absolute",
"(",
"datetime",
")",
"or",
"relative",
"(",
"dict",
")",
".",
"Valid",
"keys",
"for",
"relative",
"expiry",
"timestamp",
"dictionary",
"are",
"same",
"as",
"keyword",
"arguments",
"for",
"datetime",
".",
"timedelta",
"(",
"days",
"seconds",
"minutes",
"hours",
"weeks",
")",
".",
"Default",
":",
"Vingd",
".",
"EXP_VOUCHER",
".",
":",
"type",
"message",
":",
"string",
":",
"param",
"message",
":",
"Short",
"message",
"displayed",
"to",
"user",
"when",
"she",
"redeems",
"the",
"voucher",
"on",
"Vingd",
"frontend",
".",
":",
"type",
"gid",
":",
"alphanum",
"(",
"32",
")",
":",
"param",
"gid",
":",
"Voucher",
"group",
"id",
".",
"An",
"user",
"can",
"redeem",
"only",
"one",
"voucher",
"per",
"group",
".",
":",
"rtype",
":",
"dict",
":",
"returns",
":",
"Created",
"voucher",
"description",
"::",
"voucher",
"=",
"{",
"vid",
":",
"<voucher_integer_id",
">",
"vid_encoded",
":",
"<voucher_string_id",
">",
"amount_allocated",
":",
"<int_cents",
"|",
"None",
"if",
"not",
"allocated",
">",
"amount_vouched",
":",
"<int_cents",
">",
"id_fort_transfer",
":",
"<id_of_allocating_transfer",
"|",
"None",
"if",
"not",
"allocated",
">",
"fee",
":",
"<int_cents",
">",
"uid_from",
":",
"<source_account_uid",
">",
"uid_proxy",
":",
"<broker_id",
">",
"uid_to",
":",
"<destination_account_id",
"|",
"None",
"if",
"not",
"given",
">",
"gid",
":",
"<voucher_group_id",
"|",
"None",
"if",
"undefined",
">",
"ts_valid_until",
":",
"<iso8601_timestamp_absolute",
">",
"description",
":",
"<string",
"|",
"None",
">",
"message",
":",
"<string",
"|",
"None",
">",
"}",
"combined",
"with",
"voucher",
"redeem",
"urls",
"on",
"Vingd",
"frontend",
".",
":",
"raises",
"GeneralException",
":",
":",
"resource",
":",
"vouchers",
"/",
":",
"access",
":",
"authorized",
"users",
"(",
"ACL",
"flag",
":",
"voucher",
".",
"add",
")"
] | python | train |
aboSamoor/polyglot | polyglot/mapping/embeddings.py | https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/embeddings.py#L80-L88 | def most_frequent(self, k, inplace=False):
"""Only most frequent k words to be included in the embeddings."""
vocabulary = self.vocabulary.most_frequent(k)
vectors = np.asarray([self[w] for w in vocabulary])
if inplace:
self.vocabulary = vocabulary
self.vectors = vectors
return self
return Embedding(vectors=vectors, vocabulary=vocabulary) | [
"def",
"most_frequent",
"(",
"self",
",",
"k",
",",
"inplace",
"=",
"False",
")",
":",
"vocabulary",
"=",
"self",
".",
"vocabulary",
".",
"most_frequent",
"(",
"k",
")",
"vectors",
"=",
"np",
".",
"asarray",
"(",
"[",
"self",
"[",
"w",
"]",
"for",
"w",
"in",
"vocabulary",
"]",
")",
"if",
"inplace",
":",
"self",
".",
"vocabulary",
"=",
"vocabulary",
"self",
".",
"vectors",
"=",
"vectors",
"return",
"self",
"return",
"Embedding",
"(",
"vectors",
"=",
"vectors",
",",
"vocabulary",
"=",
"vocabulary",
")"
] | Only most frequent k words to be included in the embeddings. | [
"Only",
"most",
"frequent",
"k",
"words",
"to",
"be",
"included",
"in",
"the",
"embeddings",
"."
] | python | train |
digidotcom/python-devicecloud | devicecloud/streams.py | https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/streams.py#L224-L283 | def bulk_write_datapoints(self, datapoints):
"""Perform a bulk write (or set of writes) of a collection of data points
This method takes a list (or other iterable) of datapoints and writes them
to Device Cloud in an efficient manner, minimizing the number of HTTP
requests that need to be made.
As this call is performed from outside the context of any particular stream,
each DataPoint object passed in must include information about the stream
into which the point should be written.
If all data points being written are for the same stream, you may want to
consider using :meth:`~DataStream.bulk_write_datapoints` instead.
Example::
datapoints = []
for i in range(300):
datapoints.append(DataPoint(
stream_id="my/stream%d" % (i % 3),
data_type=STREAM_TYPE_INTEGER,
units="meters",
data=i,
))
dc.streams.bulk_write_datapoints(datapoints)
Depending on the size of the list of datapoints provided, this method may
need to make multiple calls to Device Cloud (in chunks of 250).
:param list datapoints: a list of datapoints to be written to Device Cloud
:raises TypeError: if a list of datapoints is not provided
:raises ValueError: if any of the provided data points do not have all required
information (such as information about the stream)
:raises DeviceCloudHttpException: in the case of an unexpected error in communicating
with Device Cloud.
"""
datapoints = list(datapoints) # effectively performs validation that we have the right type
for dp in datapoints:
if not isinstance(dp, DataPoint):
raise TypeError("All items in the datapoints list must be DataPoints")
if dp.get_stream_id() is None:
raise ValueError("stream_id must be set on all datapoints")
remaining_datapoints = datapoints
while remaining_datapoints:
# take up to 250 points and post them until complete
this_chunk_of_datapoints = remaining_datapoints[:MAXIMUM_DATAPOINTS_PER_POST]
remaining_datapoints = remaining_datapoints[MAXIMUM_DATAPOINTS_PER_POST:]
# Build XML list containing data for all points
datapoints_out = StringIO()
datapoints_out.write("<list>")
for dp in this_chunk_of_datapoints:
datapoints_out.write(dp.to_xml())
datapoints_out.write("</list>")
# And send the HTTP Post
self._conn.post("/ws/DataPoint", datapoints_out.getvalue())
logger.info('DataPoint batch of %s datapoints written', len(this_chunk_of_datapoints)) | [
"def",
"bulk_write_datapoints",
"(",
"self",
",",
"datapoints",
")",
":",
"datapoints",
"=",
"list",
"(",
"datapoints",
")",
"# effectively performs validation that we have the right type",
"for",
"dp",
"in",
"datapoints",
":",
"if",
"not",
"isinstance",
"(",
"dp",
",",
"DataPoint",
")",
":",
"raise",
"TypeError",
"(",
"\"All items in the datapoints list must be DataPoints\"",
")",
"if",
"dp",
".",
"get_stream_id",
"(",
")",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"stream_id must be set on all datapoints\"",
")",
"remaining_datapoints",
"=",
"datapoints",
"while",
"remaining_datapoints",
":",
"# take up to 250 points and post them until complete",
"this_chunk_of_datapoints",
"=",
"remaining_datapoints",
"[",
":",
"MAXIMUM_DATAPOINTS_PER_POST",
"]",
"remaining_datapoints",
"=",
"remaining_datapoints",
"[",
"MAXIMUM_DATAPOINTS_PER_POST",
":",
"]",
"# Build XML list containing data for all points",
"datapoints_out",
"=",
"StringIO",
"(",
")",
"datapoints_out",
".",
"write",
"(",
"\"<list>\"",
")",
"for",
"dp",
"in",
"this_chunk_of_datapoints",
":",
"datapoints_out",
".",
"write",
"(",
"dp",
".",
"to_xml",
"(",
")",
")",
"datapoints_out",
".",
"write",
"(",
"\"</list>\"",
")",
"# And send the HTTP Post",
"self",
".",
"_conn",
".",
"post",
"(",
"\"/ws/DataPoint\"",
",",
"datapoints_out",
".",
"getvalue",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'DataPoint batch of %s datapoints written'",
",",
"len",
"(",
"this_chunk_of_datapoints",
")",
")"
] | Perform a bulk write (or set of writes) of a collection of data points
This method takes a list (or other iterable) of datapoints and writes them
to Device Cloud in an efficient manner, minimizing the number of HTTP
requests that need to be made.
As this call is performed from outside the context of any particular stream,
each DataPoint object passed in must include information about the stream
into which the point should be written.
If all data points being written are for the same stream, you may want to
consider using :meth:`~DataStream.bulk_write_datapoints` instead.
Example::
datapoints = []
for i in range(300):
datapoints.append(DataPoint(
stream_id="my/stream%d" % (i % 3),
data_type=STREAM_TYPE_INTEGER,
units="meters",
data=i,
))
dc.streams.bulk_write_datapoints(datapoints)
Depending on the size of the list of datapoints provided, this method may
need to make multiple calls to Device Cloud (in chunks of 250).
:param list datapoints: a list of datapoints to be written to Device Cloud
:raises TypeError: if a list of datapoints is not provided
:raises ValueError: if any of the provided data points do not have all required
information (such as information about the stream)
:raises DeviceCloudHttpException: in the case of an unexpected error in communicating
with Device Cloud. | [
"Perform",
"a",
"bulk",
"write",
"(",
"or",
"set",
"of",
"writes",
")",
"of",
"a",
"collection",
"of",
"data",
"points"
] | python | train |
dustinmm80/healthy | package_utils.py | https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/package_utils.py#L60-L70 | def main():
"""
Main function for this module
"""
sandbox = create_sandbox()
directory = download_package_to_sandbox(
sandbox,
'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'
)
print(directory)
destroy_sandbox(sandbox) | [
"def",
"main",
"(",
")",
":",
"sandbox",
"=",
"create_sandbox",
"(",
")",
"directory",
"=",
"download_package_to_sandbox",
"(",
"sandbox",
",",
"'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'",
")",
"print",
"(",
"directory",
")",
"destroy_sandbox",
"(",
"sandbox",
")"
] | Main function for this module | [
"Main",
"function",
"for",
"this",
"module"
] | python | train |
LEMS/pylems | lems/parser/LEMS.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L614-L632 | def parse_data_display(self, node):
"""
Parses <DataDisplay>
@param node: Node containing the <DataDisplay> element
@type node: xml.etree.Element
"""
if 'title' in node.lattrib:
title = node.lattrib['title']
else:
self.raise_error('<DataDisplay> must have a title.')
if 'dataregion' in node.lattrib:
data_region = node.lattrib['dataregion']
else:
data_region = None
self.current_simulation.add_data_display(DataDisplay(title, data_region)) | [
"def",
"parse_data_display",
"(",
"self",
",",
"node",
")",
":",
"if",
"'title'",
"in",
"node",
".",
"lattrib",
":",
"title",
"=",
"node",
".",
"lattrib",
"[",
"'title'",
"]",
"else",
":",
"self",
".",
"raise_error",
"(",
"'<DataDisplay> must have a title.'",
")",
"if",
"'dataregion'",
"in",
"node",
".",
"lattrib",
":",
"data_region",
"=",
"node",
".",
"lattrib",
"[",
"'dataregion'",
"]",
"else",
":",
"data_region",
"=",
"None",
"self",
".",
"current_simulation",
".",
"add_data_display",
"(",
"DataDisplay",
"(",
"title",
",",
"data_region",
")",
")"
] | Parses <DataDisplay>
@param node: Node containing the <DataDisplay> element
@type node: xml.etree.Element | [
"Parses",
"<DataDisplay",
">"
] | python | train |
wright-group/WrightTools | WrightTools/artists/_helpers.py | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_helpers.py#L1010-L1045 | def stitch_to_animation(images, outpath=None, *, duration=0.5, palettesize=256, verbose=True):
"""Stitch a series of images into an animation.
Currently supports animated gifs, other formats coming as needed.
Parameters
----------
images : list of strings
Filepaths to the images to stitch together, in order of apperence.
outpath : string (optional)
Path of output, including extension. If None, bases output path on path
of first path in `images`. Default is None.
duration : number or list of numbers (optional)
Duration of (each) frame in seconds. Default is 0.5.
palettesize : int (optional)
The number of colors in the resulting animation. Input is rounded to
the nearest power of 2. Default is 1024.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
# parse filename
if outpath is None:
outpath = os.path.splitext(images[0])[0] + ".gif"
# write
t = wt_kit.Timer(verbose=False)
with t, imageio.get_writer(
outpath, mode="I", duration=duration, palettesize=palettesize
) as writer:
for p in images:
image = imageio.imread(p)
writer.append_data(image)
# finish
if verbose:
interval = np.round(t.interval, 2)
print("gif generated in {0} seconds - saved at {1}".format(interval, outpath))
return outpath | [
"def",
"stitch_to_animation",
"(",
"images",
",",
"outpath",
"=",
"None",
",",
"*",
",",
"duration",
"=",
"0.5",
",",
"palettesize",
"=",
"256",
",",
"verbose",
"=",
"True",
")",
":",
"# parse filename",
"if",
"outpath",
"is",
"None",
":",
"outpath",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"images",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"+",
"\".gif\"",
"# write",
"t",
"=",
"wt_kit",
".",
"Timer",
"(",
"verbose",
"=",
"False",
")",
"with",
"t",
",",
"imageio",
".",
"get_writer",
"(",
"outpath",
",",
"mode",
"=",
"\"I\"",
",",
"duration",
"=",
"duration",
",",
"palettesize",
"=",
"palettesize",
")",
"as",
"writer",
":",
"for",
"p",
"in",
"images",
":",
"image",
"=",
"imageio",
".",
"imread",
"(",
"p",
")",
"writer",
".",
"append_data",
"(",
"image",
")",
"# finish",
"if",
"verbose",
":",
"interval",
"=",
"np",
".",
"round",
"(",
"t",
".",
"interval",
",",
"2",
")",
"print",
"(",
"\"gif generated in {0} seconds - saved at {1}\"",
".",
"format",
"(",
"interval",
",",
"outpath",
")",
")",
"return",
"outpath"
] | Stitch a series of images into an animation.
Currently supports animated gifs, other formats coming as needed.
Parameters
----------
images : list of strings
Filepaths to the images to stitch together, in order of apperence.
outpath : string (optional)
Path of output, including extension. If None, bases output path on path
of first path in `images`. Default is None.
duration : number or list of numbers (optional)
Duration of (each) frame in seconds. Default is 0.5.
palettesize : int (optional)
The number of colors in the resulting animation. Input is rounded to
the nearest power of 2. Default is 1024.
verbose : bool (optional)
Toggle talkback. Default is True. | [
"Stitch",
"a",
"series",
"of",
"images",
"into",
"an",
"animation",
"."
] | python | train |
cogeotiff/rio-tiler | rio_tiler/cbers.py | https://github.com/cogeotiff/rio-tiler/blob/09bb0fc6cee556410477f016abbae172b12c46a6/rio_tiler/cbers.py#L148-L195 | def metadata(sceneid, pmin=2, pmax=98, **kwargs):
"""
Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = _cbers_parse_scene_id(sceneid)
cbers_address = "{}/{}".format(CBERS_BUCKET, scene_params["key"])
bands = scene_params["bands"]
ref_band = scene_params["reference_band"]
info = {"sceneid": sceneid}
addresses = [
"{}/{}_BAND{}.tif".format(cbers_address, sceneid, band) for band in bands
]
_stats_worker = partial(
utils.raster_get_stats,
indexes=[1],
nodata=0,
overview_level=2,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, addresses))
info["bounds"] = [r["bounds"] for b, r in zip(bands, responses) if b == ref_band][0]
info["statistics"] = {
b: v for b, d in zip(bands, responses) for k, v in d["statistics"].items()
}
return info | [
"def",
"metadata",
"(",
"sceneid",
",",
"pmin",
"=",
"2",
",",
"pmax",
"=",
"98",
",",
"*",
"*",
"kwargs",
")",
":",
"scene_params",
"=",
"_cbers_parse_scene_id",
"(",
"sceneid",
")",
"cbers_address",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"CBERS_BUCKET",
",",
"scene_params",
"[",
"\"key\"",
"]",
")",
"bands",
"=",
"scene_params",
"[",
"\"bands\"",
"]",
"ref_band",
"=",
"scene_params",
"[",
"\"reference_band\"",
"]",
"info",
"=",
"{",
"\"sceneid\"",
":",
"sceneid",
"}",
"addresses",
"=",
"[",
"\"{}/{}_BAND{}.tif\"",
".",
"format",
"(",
"cbers_address",
",",
"sceneid",
",",
"band",
")",
"for",
"band",
"in",
"bands",
"]",
"_stats_worker",
"=",
"partial",
"(",
"utils",
".",
"raster_get_stats",
",",
"indexes",
"=",
"[",
"1",
"]",
",",
"nodata",
"=",
"0",
",",
"overview_level",
"=",
"2",
",",
"percentiles",
"=",
"(",
"pmin",
",",
"pmax",
")",
",",
"*",
"*",
"kwargs",
")",
"with",
"futures",
".",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"MAX_THREADS",
")",
"as",
"executor",
":",
"responses",
"=",
"list",
"(",
"executor",
".",
"map",
"(",
"_stats_worker",
",",
"addresses",
")",
")",
"info",
"[",
"\"bounds\"",
"]",
"=",
"[",
"r",
"[",
"\"bounds\"",
"]",
"for",
"b",
",",
"r",
"in",
"zip",
"(",
"bands",
",",
"responses",
")",
"if",
"b",
"==",
"ref_band",
"]",
"[",
"0",
"]",
"info",
"[",
"\"statistics\"",
"]",
"=",
"{",
"b",
":",
"v",
"for",
"b",
",",
"d",
"in",
"zip",
"(",
"bands",
",",
"responses",
")",
"for",
"k",
",",
"v",
"in",
"d",
"[",
"\"statistics\"",
"]",
".",
"items",
"(",
")",
"}",
"return",
"info"
] | Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics. | [
"Return",
"band",
"bounds",
"and",
"statistics",
"."
] | python | train |
jilljenn/tryalgo | tryalgo/rabin_karp.py | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/rabin_karp.py#L30-L57 | def rabin_karp_matching(s, t):
"""Find a substring by Rabin-Karp
:param s: the haystack string
:param t: the needle string
:returns: index i such that s[i: i + len(t)] == t, or -1
:complexity: O(len(s) + len(t)) in expected time,
and O(len(s) * len(t)) in worst case
"""
hash_s = 0
hash_t = 0
len_s = len(s)
len_t = len(t)
last_pos = pow(DOMAIN, len_t - 1) % PRIME
if len_s < len_t:
return -1
for i in range(len_t): # preprocessing
hash_s = (DOMAIN * hash_s + ord(s[i])) % PRIME
hash_t = (DOMAIN * hash_t + ord(t[i])) % PRIME
for i in range(len_s - len_t + 1):
if hash_s == hash_t: # check character by character
if matches(s, t, i, 0, len_t):
return i
if i < len_s - len_t:
hash_s = roll_hash(hash_s, ord(s[i]), ord(s[i + len_t]),
last_pos)
return -1 | [
"def",
"rabin_karp_matching",
"(",
"s",
",",
"t",
")",
":",
"hash_s",
"=",
"0",
"hash_t",
"=",
"0",
"len_s",
"=",
"len",
"(",
"s",
")",
"len_t",
"=",
"len",
"(",
"t",
")",
"last_pos",
"=",
"pow",
"(",
"DOMAIN",
",",
"len_t",
"-",
"1",
")",
"%",
"PRIME",
"if",
"len_s",
"<",
"len_t",
":",
"return",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"len_t",
")",
":",
"# preprocessing",
"hash_s",
"=",
"(",
"DOMAIN",
"*",
"hash_s",
"+",
"ord",
"(",
"s",
"[",
"i",
"]",
")",
")",
"%",
"PRIME",
"hash_t",
"=",
"(",
"DOMAIN",
"*",
"hash_t",
"+",
"ord",
"(",
"t",
"[",
"i",
"]",
")",
")",
"%",
"PRIME",
"for",
"i",
"in",
"range",
"(",
"len_s",
"-",
"len_t",
"+",
"1",
")",
":",
"if",
"hash_s",
"==",
"hash_t",
":",
"# check character by character",
"if",
"matches",
"(",
"s",
",",
"t",
",",
"i",
",",
"0",
",",
"len_t",
")",
":",
"return",
"i",
"if",
"i",
"<",
"len_s",
"-",
"len_t",
":",
"hash_s",
"=",
"roll_hash",
"(",
"hash_s",
",",
"ord",
"(",
"s",
"[",
"i",
"]",
")",
",",
"ord",
"(",
"s",
"[",
"i",
"+",
"len_t",
"]",
")",
",",
"last_pos",
")",
"return",
"-",
"1"
] | Find a substring by Rabin-Karp
:param s: the haystack string
:param t: the needle string
:returns: index i such that s[i: i + len(t)] == t, or -1
:complexity: O(len(s) + len(t)) in expected time,
and O(len(s) * len(t)) in worst case | [
"Find",
"a",
"substring",
"by",
"Rabin",
"-",
"Karp"
] | python | train |
erkghlerngm44/malaffinity | malaffinity/__init__.py | https://github.com/erkghlerngm44/malaffinity/blob/d866b9198b668333f0b86567b2faebdb20587e30/malaffinity/__init__.py#L29-L44 | def calculate_affinity(user1, user2, round=False): # pragma: no cover
"""
Quick one-off affinity calculations.
Creates an instance of the ``MALAffinity`` class with ``user1``,
then calculates affinity with ``user2``.
:param str user1: First user
:param str user2: Second user
:param round: Decimal places to round affinity values to.
Specify ``False`` for no rounding
:type round: int or False
:return: (float affinity, int shared)
:rtype: tuple
"""
return MALAffinity(base_user=user1, round=round).calculate_affinity(user2) | [
"def",
"calculate_affinity",
"(",
"user1",
",",
"user2",
",",
"round",
"=",
"False",
")",
":",
"# pragma: no cover",
"return",
"MALAffinity",
"(",
"base_user",
"=",
"user1",
",",
"round",
"=",
"round",
")",
".",
"calculate_affinity",
"(",
"user2",
")"
] | Quick one-off affinity calculations.
Creates an instance of the ``MALAffinity`` class with ``user1``,
then calculates affinity with ``user2``.
:param str user1: First user
:param str user2: Second user
:param round: Decimal places to round affinity values to.
Specify ``False`` for no rounding
:type round: int or False
:return: (float affinity, int shared)
:rtype: tuple | [
"Quick",
"one",
"-",
"off",
"affinity",
"calculations",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xchart/xchartscene.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartscene.py#L60-L68 | def drawBackground(self, painter, rect):
"""
Draws the background for the chart scene.
:param painter | <QPainter>
rect | <QRect>
"""
chart = self.chart()
chart._drawBackground(self, painter, rect) | [
"def",
"drawBackground",
"(",
"self",
",",
"painter",
",",
"rect",
")",
":",
"chart",
"=",
"self",
".",
"chart",
"(",
")",
"chart",
".",
"_drawBackground",
"(",
"self",
",",
"painter",
",",
"rect",
")"
] | Draws the background for the chart scene.
:param painter | <QPainter>
rect | <QRect> | [
"Draws",
"the",
"background",
"for",
"the",
"chart",
"scene",
".",
":",
"param",
"painter",
"|",
"<QPainter",
">",
"rect",
"|",
"<QRect",
">"
] | python | train |
apache/airflow | airflow/contrib/operators/mssql_to_gcs.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mssql_to_gcs.py#L214-L222 | def convert_types(cls, value):
"""
Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
"""
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value | [
"def",
"convert_types",
"(",
"cls",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"decimal",
".",
"Decimal",
")",
":",
"return",
"float",
"(",
"value",
")",
"else",
":",
"return",
"value"
] | Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery. | [
"Takes",
"a",
"value",
"from",
"MSSQL",
"and",
"converts",
"it",
"to",
"a",
"value",
"that",
"s",
"safe",
"for",
"JSON",
"/",
"Google",
"Cloud",
"Storage",
"/",
"BigQuery",
"."
] | python | test |
ggravlingen/pytradfri | pytradfri/smart_task.py | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/smart_task.py#L255-L262 | def item_controller(self):
"""Method to control a task."""
return StartActionItemController(
self,
self.raw,
self.state,
self.path,
self.devices_dict) | [
"def",
"item_controller",
"(",
"self",
")",
":",
"return",
"StartActionItemController",
"(",
"self",
",",
"self",
".",
"raw",
",",
"self",
".",
"state",
",",
"self",
".",
"path",
",",
"self",
".",
"devices_dict",
")"
] | Method to control a task. | [
"Method",
"to",
"control",
"a",
"task",
"."
] | python | train |
rnwolf/jira-metrics-extract | jira_metrics_extract/cycletime.py | https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L658-L679 | def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None):
"""Return a data frame with columns `completed_timestamp` of the
given frequency, either
`count`, where count is the number of items
'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints'
completed at that timestamp (e.g. daily).
"""
if len(cycle_data)<1:
return None # Note completed items yet, return None
if pointscolumn:
return cycle_data[['completed_timestamp', pointscolumn]] \
.rename(columns={pointscolumn: 'sum'}) \
.groupby('completed_timestamp').sum() \
.resample(frequency).sum() \
.fillna(0)
else:
return cycle_data[['completed_timestamp', 'key']] \
.rename(columns={'key': 'count'}) \
.groupby('completed_timestamp').count() \
.resample(frequency).sum() \
.fillna(0) | [
"def",
"throughput_data",
"(",
"self",
",",
"cycle_data",
",",
"frequency",
"=",
"'1D'",
",",
"pointscolumn",
"=",
"None",
")",
":",
"if",
"len",
"(",
"cycle_data",
")",
"<",
"1",
":",
"return",
"None",
"# Note completed items yet, return None",
"if",
"pointscolumn",
":",
"return",
"cycle_data",
"[",
"[",
"'completed_timestamp'",
",",
"pointscolumn",
"]",
"]",
".",
"rename",
"(",
"columns",
"=",
"{",
"pointscolumn",
":",
"'sum'",
"}",
")",
".",
"groupby",
"(",
"'completed_timestamp'",
")",
".",
"sum",
"(",
")",
".",
"resample",
"(",
"frequency",
")",
".",
"sum",
"(",
")",
".",
"fillna",
"(",
"0",
")",
"else",
":",
"return",
"cycle_data",
"[",
"[",
"'completed_timestamp'",
",",
"'key'",
"]",
"]",
".",
"rename",
"(",
"columns",
"=",
"{",
"'key'",
":",
"'count'",
"}",
")",
".",
"groupby",
"(",
"'completed_timestamp'",
")",
".",
"count",
"(",
")",
".",
"resample",
"(",
"frequency",
")",
".",
"sum",
"(",
")",
".",
"fillna",
"(",
"0",
")"
] | Return a data frame with columns `completed_timestamp` of the
given frequency, either
`count`, where count is the number of items
'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints'
completed at that timestamp (e.g. daily). | [
"Return",
"a",
"data",
"frame",
"with",
"columns",
"completed_timestamp",
"of",
"the",
"given",
"frequency",
"either",
"count",
"where",
"count",
"is",
"the",
"number",
"of",
"items",
"sum",
"where",
"sum",
"is",
"the",
"sum",
"of",
"value",
"specified",
"by",
"pointscolumn",
".",
"Expected",
"to",
"be",
"StoryPoints",
"completed",
"at",
"that",
"timestamp",
"(",
"e",
".",
"g",
".",
"daily",
")",
"."
] | python | train |
aouyar/PyMunin | pymunin/__init__.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L448-L464 | def saveState(self, stateObj):
"""Utility methos to save plugin state stored in stateObj to persistent
storage to permit access to previous state in subsequent plugin runs.
Any object that can be pickled and unpickled can be used to store the
plugin state.
@param stateObj: Object that stores plugin state.
"""
try:
fp = open(self._stateFile, 'w')
pickle.dump(stateObj, fp)
except:
raise IOError("Failure in storing plugin state in file: %s"
% self._stateFile)
return True | [
"def",
"saveState",
"(",
"self",
",",
"stateObj",
")",
":",
"try",
":",
"fp",
"=",
"open",
"(",
"self",
".",
"_stateFile",
",",
"'w'",
")",
"pickle",
".",
"dump",
"(",
"stateObj",
",",
"fp",
")",
"except",
":",
"raise",
"IOError",
"(",
"\"Failure in storing plugin state in file: %s\"",
"%",
"self",
".",
"_stateFile",
")",
"return",
"True"
] | Utility methos to save plugin state stored in stateObj to persistent
storage to permit access to previous state in subsequent plugin runs.
Any object that can be pickled and unpickled can be used to store the
plugin state.
@param stateObj: Object that stores plugin state. | [
"Utility",
"methos",
"to",
"save",
"plugin",
"state",
"stored",
"in",
"stateObj",
"to",
"persistent",
"storage",
"to",
"permit",
"access",
"to",
"previous",
"state",
"in",
"subsequent",
"plugin",
"runs",
".",
"Any",
"object",
"that",
"can",
"be",
"pickled",
"and",
"unpickled",
"can",
"be",
"used",
"to",
"store",
"the",
"plugin",
"state",
"."
] | python | train |
samghelms/mathviz | mathviz_hopper/src/bottle.py | https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L954-L974 | def error(self, code=500, callback=None):
""" Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404'
"""
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
self.error_handler[int(code)] = callback
return callback
return decorator(callback) if callback else decorator | [
"def",
"error",
"(",
"self",
",",
"code",
"=",
"500",
",",
"callback",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"callback",
")",
":",
"if",
"isinstance",
"(",
"callback",
",",
"basestring",
")",
":",
"callback",
"=",
"load",
"(",
"callback",
")",
"self",
".",
"error_handler",
"[",
"int",
"(",
"code",
")",
"]",
"=",
"callback",
"return",
"callback",
"return",
"decorator",
"(",
"callback",
")",
"if",
"callback",
"else",
"decorator"
] | Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404' | [
"Register",
"an",
"output",
"handler",
"for",
"a",
"HTTP",
"error",
"code",
".",
"Can",
"be",
"used",
"as",
"a",
"decorator",
"or",
"called",
"directly",
"::"
] | python | train |
keon/algorithms | algorithms/calculator/math_parser.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/calculator/math_parser.py#L68-L75 | def apply_operation(op_stack, out_stack):
"""
Apply operation to the first 2 items of the output queue
op_stack Deque (reference)
out_stack Deque (reference)
"""
out_stack.append(calc(out_stack.pop(), out_stack.pop(), op_stack.pop())) | [
"def",
"apply_operation",
"(",
"op_stack",
",",
"out_stack",
")",
":",
"out_stack",
".",
"append",
"(",
"calc",
"(",
"out_stack",
".",
"pop",
"(",
")",
",",
"out_stack",
".",
"pop",
"(",
")",
",",
"op_stack",
".",
"pop",
"(",
")",
")",
")"
] | Apply operation to the first 2 items of the output queue
op_stack Deque (reference)
out_stack Deque (reference) | [
"Apply",
"operation",
"to",
"the",
"first",
"2",
"items",
"of",
"the",
"output",
"queue",
"op_stack",
"Deque",
"(",
"reference",
")",
"out_stack",
"Deque",
"(",
"reference",
")"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.