repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
wummel/linkchecker | linkcheck/configuration/confparse.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/confparse.py#L64-L70 | def read_string_option (self, section, option, allowempty=False):
"""Read a string option."""
if self.has_option(section, option):
value = self.get(section, option)
if not allowempty and not value:
raise LinkCheckerError(_("invalid empty value for %s: %s\n") % (option, value))
self.config[option] = value | [
"def",
"read_string_option",
"(",
"self",
",",
"section",
",",
"option",
",",
"allowempty",
"=",
"False",
")",
":",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"option",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"section",
",",
"option",
")",
"if",
"not",
"allowempty",
"and",
"not",
"value",
":",
"raise",
"LinkCheckerError",
"(",
"_",
"(",
"\"invalid empty value for %s: %s\\n\"",
")",
"%",
"(",
"option",
",",
"value",
")",
")",
"self",
".",
"config",
"[",
"option",
"]",
"=",
"value"
] | Read a string option. | [
"Read",
"a",
"string",
"option",
"."
] | python | train |
PmagPy/PmagPy | pmagpy/pmagplotlib.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L198-L216 | def k_s(X):
"""
Kolmorgorov-Smirnov statistic. Finds the
probability that the data are distributed
as func - used method of Numerical Recipes (Press et al., 1986)
"""
xbar, sigma = pmag.gausspars(X)
d, f = 0, 0.
for i in range(1, len(X) + 1):
b = old_div(float(i), float(len(X)))
a = gaussfunc(X[i - 1], xbar, sigma)
if abs(f - a) > abs(b - a):
delta = abs(f - a)
else:
delta = abs(b - a)
if delta > d:
d = delta
f = b
return d, xbar, sigma | [
"def",
"k_s",
"(",
"X",
")",
":",
"xbar",
",",
"sigma",
"=",
"pmag",
".",
"gausspars",
"(",
"X",
")",
"d",
",",
"f",
"=",
"0",
",",
"0.",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"X",
")",
"+",
"1",
")",
":",
"b",
"=",
"old_div",
"(",
"float",
"(",
"i",
")",
",",
"float",
"(",
"len",
"(",
"X",
")",
")",
")",
"a",
"=",
"gaussfunc",
"(",
"X",
"[",
"i",
"-",
"1",
"]",
",",
"xbar",
",",
"sigma",
")",
"if",
"abs",
"(",
"f",
"-",
"a",
")",
">",
"abs",
"(",
"b",
"-",
"a",
")",
":",
"delta",
"=",
"abs",
"(",
"f",
"-",
"a",
")",
"else",
":",
"delta",
"=",
"abs",
"(",
"b",
"-",
"a",
")",
"if",
"delta",
">",
"d",
":",
"d",
"=",
"delta",
"f",
"=",
"b",
"return",
"d",
",",
"xbar",
",",
"sigma"
] | Kolmorgorov-Smirnov statistic. Finds the
probability that the data are distributed
as func - used method of Numerical Recipes (Press et al., 1986) | [
"Kolmorgorov",
"-",
"Smirnov",
"statistic",
".",
"Finds",
"the",
"probability",
"that",
"the",
"data",
"are",
"distributed",
"as",
"func",
"-",
"used",
"method",
"of",
"Numerical",
"Recipes",
"(",
"Press",
"et",
"al",
".",
"1986",
")"
] | python | train |
acorg/dark-matter | dark/aa.py | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/aa.py#L982-L1022 | def find(s):
"""
Find an amino acid whose name or abbreviation is s.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
return: An L{AminoAcid} instance or C{None} if no matching amino acid can
be located.
"""
abbrev1 = None
origS = s
if ' ' in s:
# Convert first word to title case, others to lower.
first, rest = s.split(' ', 1)
s = first.title() + ' ' + rest.lower()
else:
s = s.title()
if s in NAMES:
abbrev1 = s
elif s in ABBREV3_TO_ABBREV1:
abbrev1 = ABBREV3_TO_ABBREV1[s]
elif s in NAMES_TO_ABBREV1:
abbrev1 = NAMES_TO_ABBREV1[s]
else:
# Look for a 3-letter codon.
def findCodon(target):
for abbrev1, codons in CODONS.items():
for codon in codons:
if codon == target:
return abbrev1
abbrev1 = findCodon(origS.upper())
if abbrev1:
return AminoAcid(
NAMES[abbrev1], ABBREV3[abbrev1], abbrev1, CODONS[abbrev1],
PROPERTIES[abbrev1], PROPERTY_DETAILS[abbrev1],
PROPERTY_CLUSTERS[abbrev1]) | [
"def",
"find",
"(",
"s",
")",
":",
"abbrev1",
"=",
"None",
"origS",
"=",
"s",
"if",
"' '",
"in",
"s",
":",
"# Convert first word to title case, others to lower.",
"first",
",",
"rest",
"=",
"s",
".",
"split",
"(",
"' '",
",",
"1",
")",
"s",
"=",
"first",
".",
"title",
"(",
")",
"+",
"' '",
"+",
"rest",
".",
"lower",
"(",
")",
"else",
":",
"s",
"=",
"s",
".",
"title",
"(",
")",
"if",
"s",
"in",
"NAMES",
":",
"abbrev1",
"=",
"s",
"elif",
"s",
"in",
"ABBREV3_TO_ABBREV1",
":",
"abbrev1",
"=",
"ABBREV3_TO_ABBREV1",
"[",
"s",
"]",
"elif",
"s",
"in",
"NAMES_TO_ABBREV1",
":",
"abbrev1",
"=",
"NAMES_TO_ABBREV1",
"[",
"s",
"]",
"else",
":",
"# Look for a 3-letter codon.",
"def",
"findCodon",
"(",
"target",
")",
":",
"for",
"abbrev1",
",",
"codons",
"in",
"CODONS",
".",
"items",
"(",
")",
":",
"for",
"codon",
"in",
"codons",
":",
"if",
"codon",
"==",
"target",
":",
"return",
"abbrev1",
"abbrev1",
"=",
"findCodon",
"(",
"origS",
".",
"upper",
"(",
")",
")",
"if",
"abbrev1",
":",
"return",
"AminoAcid",
"(",
"NAMES",
"[",
"abbrev1",
"]",
",",
"ABBREV3",
"[",
"abbrev1",
"]",
",",
"abbrev1",
",",
"CODONS",
"[",
"abbrev1",
"]",
",",
"PROPERTIES",
"[",
"abbrev1",
"]",
",",
"PROPERTY_DETAILS",
"[",
"abbrev1",
"]",
",",
"PROPERTY_CLUSTERS",
"[",
"abbrev1",
"]",
")"
] | Find an amino acid whose name or abbreviation is s.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
return: An L{AminoAcid} instance or C{None} if no matching amino acid can
be located. | [
"Find",
"an",
"amino",
"acid",
"whose",
"name",
"or",
"abbreviation",
"is",
"s",
"."
] | python | train |
OnroerendErfgoed/crabpy_pyramid | crabpy_pyramid/renderers/capakey.py | https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/capakey.py#L49-L59 | def list_perceel_adapter(obj, request):
"""
Adapter for rendering a list of
:class: `crabpy.gateway.capakey.Perceel` to json.
"""
return {
'id': obj.id,
'sectie': obj.sectie,
'capakey': obj.capakey,
'percid': obj.percid
} | [
"def",
"list_perceel_adapter",
"(",
"obj",
",",
"request",
")",
":",
"return",
"{",
"'id'",
":",
"obj",
".",
"id",
",",
"'sectie'",
":",
"obj",
".",
"sectie",
",",
"'capakey'",
":",
"obj",
".",
"capakey",
",",
"'percid'",
":",
"obj",
".",
"percid",
"}"
] | Adapter for rendering a list of
:class: `crabpy.gateway.capakey.Perceel` to json. | [
"Adapter",
"for",
"rendering",
"a",
"list",
"of",
":",
"class",
":",
"crabpy",
".",
"gateway",
".",
"capakey",
".",
"Perceel",
"to",
"json",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/layers/dense_variational.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/dense_variational.py#L215-L254 | def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values.
"""
config = {
'units': self.units,
'activation': (tf.keras.activations.serialize(self.activation)
if self.activation else None),
'activity_regularizer':
tf.keras.initializers.serialize(self.activity_regularizer),
}
function_keys = [
'kernel_posterior_fn',
'kernel_posterior_tensor_fn',
'kernel_prior_fn',
'kernel_divergence_fn',
'bias_posterior_fn',
'bias_posterior_tensor_fn',
'bias_prior_fn',
'bias_divergence_fn',
]
for function_key in function_keys:
function = getattr(self, function_key)
if function is None:
function_name = None
function_type = None
else:
function_name, function_type = tfp_layers_util.serialize_function(
function)
config[function_key] = function_name
config[function_key + '_type'] = function_type
base_config = super(_DenseVariational, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | [
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"{",
"'units'",
":",
"self",
".",
"units",
",",
"'activation'",
":",
"(",
"tf",
".",
"keras",
".",
"activations",
".",
"serialize",
"(",
"self",
".",
"activation",
")",
"if",
"self",
".",
"activation",
"else",
"None",
")",
",",
"'activity_regularizer'",
":",
"tf",
".",
"keras",
".",
"initializers",
".",
"serialize",
"(",
"self",
".",
"activity_regularizer",
")",
",",
"}",
"function_keys",
"=",
"[",
"'kernel_posterior_fn'",
",",
"'kernel_posterior_tensor_fn'",
",",
"'kernel_prior_fn'",
",",
"'kernel_divergence_fn'",
",",
"'bias_posterior_fn'",
",",
"'bias_posterior_tensor_fn'",
",",
"'bias_prior_fn'",
",",
"'bias_divergence_fn'",
",",
"]",
"for",
"function_key",
"in",
"function_keys",
":",
"function",
"=",
"getattr",
"(",
"self",
",",
"function_key",
")",
"if",
"function",
"is",
"None",
":",
"function_name",
"=",
"None",
"function_type",
"=",
"None",
"else",
":",
"function_name",
",",
"function_type",
"=",
"tfp_layers_util",
".",
"serialize_function",
"(",
"function",
")",
"config",
"[",
"function_key",
"]",
"=",
"function_name",
"config",
"[",
"function_key",
"+",
"'_type'",
"]",
"=",
"function_type",
"base_config",
"=",
"super",
"(",
"_DenseVariational",
",",
"self",
")",
".",
"get_config",
"(",
")",
"return",
"dict",
"(",
"list",
"(",
"base_config",
".",
"items",
"(",
")",
")",
"+",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
")"
] | Returns the config of the layer.
A layer config is a Python dictionary (serializable) containing the
configuration of a layer. The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
config: A Python dictionary of class keyword arguments and their
serialized values. | [
"Returns",
"the",
"config",
"of",
"the",
"layer",
"."
] | python | test |
pkkid/python-plexapi | plexapi/settings.py | https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/settings.py#L32-L40 | def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
for elem in data:
id = utils.lowerFirst(elem.attrib['id'])
if id in self._settings:
self._settings[id]._loadData(elem)
continue
self._settings[id] = Setting(self._server, elem, self._initpath) | [
"def",
"_loadData",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_data",
"=",
"data",
"for",
"elem",
"in",
"data",
":",
"id",
"=",
"utils",
".",
"lowerFirst",
"(",
"elem",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"id",
"in",
"self",
".",
"_settings",
":",
"self",
".",
"_settings",
"[",
"id",
"]",
".",
"_loadData",
"(",
"elem",
")",
"continue",
"self",
".",
"_settings",
"[",
"id",
"]",
"=",
"Setting",
"(",
"self",
".",
"_server",
",",
"elem",
",",
"self",
".",
"_initpath",
")"
] | Load attribute values from Plex XML response. | [
"Load",
"attribute",
"values",
"from",
"Plex",
"XML",
"response",
"."
] | python | train |
fkmclane/python-ardrone | ardrone/drone.py | https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L87-L91 | def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"at",
"(",
"ardrone",
".",
"at",
".",
"ref",
",",
"False",
",",
"True",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"self",
".",
"at",
"(",
"ardrone",
".",
"at",
".",
"ref",
",",
"False",
",",
"False",
")"
] | Toggle the drone's emergency state. | [
"Toggle",
"the",
"drone",
"s",
"emergency",
"state",
"."
] | python | train |
angr/angr | angr/analyses/forward_analysis.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/forward_analysis.py#L800-L812 | def _peek_job(self, pos):
"""
Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised
if that position does not currently exist in the job list.
:param int pos: Position of the job to get.
:return: The job
"""
if pos < len(self._job_info_queue):
return self._job_info_queue[pos].job
raise IndexError() | [
"def",
"_peek_job",
"(",
"self",
",",
"pos",
")",
":",
"if",
"pos",
"<",
"len",
"(",
"self",
".",
"_job_info_queue",
")",
":",
"return",
"self",
".",
"_job_info_queue",
"[",
"pos",
"]",
".",
"job",
"raise",
"IndexError",
"(",
")"
] | Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised
if that position does not currently exist in the job list.
:param int pos: Position of the job to get.
:return: The job | [
"Return",
"the",
"job",
"currently",
"at",
"position",
"pos",
"but",
"still",
"keep",
"it",
"in",
"the",
"job",
"queue",
".",
"An",
"IndexError",
"will",
"be",
"raised",
"if",
"that",
"position",
"does",
"not",
"currently",
"exist",
"in",
"the",
"job",
"list",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/werkzeug/contrib/cache.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/werkzeug/contrib/cache.py#L206-L217 | def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
value = (self.get(key) or 0) + delta
return value if self.set(key, value) else None | [
"def",
"inc",
"(",
"self",
",",
"key",
",",
"delta",
"=",
"1",
")",
":",
"value",
"=",
"(",
"self",
".",
"get",
"(",
"key",
")",
"or",
"0",
")",
"+",
"delta",
"return",
"value",
"if",
"self",
".",
"set",
"(",
"key",
",",
"value",
")",
"else",
"None"
] | Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors. | [
"Increments",
"the",
"value",
"of",
"a",
"key",
"by",
"delta",
".",
"If",
"the",
"key",
"does",
"not",
"yet",
"exist",
"it",
"is",
"initialized",
"with",
"delta",
"."
] | python | test |
dcaune/perseus-lib-python-common | exifread/classes.py | https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/exifread/classes.py#L64-L84 | def s2n(self, offset, length, signed=0):
"""
Convert slice to integer, based on sign and endian flags.
Usually this offset is assumed to be relative to the beginning of the
start of the EXIF information.
For some cameras that use relative tags, this offset may be relative
to some other starting point.
"""
self.file.seek(self.offset + offset)
sliced = self.file.read(length)
if self.endian == 'I':
val = s2n_intel(sliced)
else:
val = s2n_motorola(sliced)
# Sign extension?
if signed:
msb = 1 << (8 * length - 1)
if val & msb:
val -= (msb << 1)
return val | [
"def",
"s2n",
"(",
"self",
",",
"offset",
",",
"length",
",",
"signed",
"=",
"0",
")",
":",
"self",
".",
"file",
".",
"seek",
"(",
"self",
".",
"offset",
"+",
"offset",
")",
"sliced",
"=",
"self",
".",
"file",
".",
"read",
"(",
"length",
")",
"if",
"self",
".",
"endian",
"==",
"'I'",
":",
"val",
"=",
"s2n_intel",
"(",
"sliced",
")",
"else",
":",
"val",
"=",
"s2n_motorola",
"(",
"sliced",
")",
"# Sign extension?",
"if",
"signed",
":",
"msb",
"=",
"1",
"<<",
"(",
"8",
"*",
"length",
"-",
"1",
")",
"if",
"val",
"&",
"msb",
":",
"val",
"-=",
"(",
"msb",
"<<",
"1",
")",
"return",
"val"
] | Convert slice to integer, based on sign and endian flags.
Usually this offset is assumed to be relative to the beginning of the
start of the EXIF information.
For some cameras that use relative tags, this offset may be relative
to some other starting point. | [
"Convert",
"slice",
"to",
"integer",
"based",
"on",
"sign",
"and",
"endian",
"flags",
"."
] | python | train |
NicolasLM/spinach | spinach/brokers/base.py | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/base.py#L102-L107 | def next_future_job_delta(self) -> Optional[float]:
"""Give the amount of seconds before the next future job is due."""
job = self._get_next_future_job()
if not job:
return None
return (job.at - datetime.now(timezone.utc)).total_seconds() | [
"def",
"next_future_job_delta",
"(",
"self",
")",
"->",
"Optional",
"[",
"float",
"]",
":",
"job",
"=",
"self",
".",
"_get_next_future_job",
"(",
")",
"if",
"not",
"job",
":",
"return",
"None",
"return",
"(",
"job",
".",
"at",
"-",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
")",
".",
"total_seconds",
"(",
")"
] | Give the amount of seconds before the next future job is due. | [
"Give",
"the",
"amount",
"of",
"seconds",
"before",
"the",
"next",
"future",
"job",
"is",
"due",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_pool.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor_pool.py#L963-L983 | def _GetTypeFromScope(self, package, type_name, scope):
"""Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type.
"""
if type_name not in scope:
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join(components + [type_name])
if possible_match in scope:
type_name = possible_match
break
else:
components.pop(-1)
return scope[type_name] | [
"def",
"_GetTypeFromScope",
"(",
"self",
",",
"package",
",",
"type_name",
",",
"scope",
")",
":",
"if",
"type_name",
"not",
"in",
"scope",
":",
"components",
"=",
"_PrefixWithDot",
"(",
"package",
")",
".",
"split",
"(",
"'.'",
")",
"while",
"components",
":",
"possible_match",
"=",
"'.'",
".",
"join",
"(",
"components",
"+",
"[",
"type_name",
"]",
")",
"if",
"possible_match",
"in",
"scope",
":",
"type_name",
"=",
"possible_match",
"break",
"else",
":",
"components",
".",
"pop",
"(",
"-",
"1",
")",
"return",
"scope",
"[",
"type_name",
"]"
] | Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type. | [
"Finds",
"a",
"given",
"type",
"name",
"in",
"the",
"current",
"scope",
"."
] | python | train |
Jarn/jarn.viewdoc | jarn/viewdoc/viewdoc.py | https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L341-L348 | def publish_string(self, rest, outfile, styles=''):
"""Render a reST string as HTML.
"""
html = self.convert_string(rest)
html = self.strip_xml_header(html)
html = self.apply_styles(html, styles)
self.write_file(html, outfile)
return outfile | [
"def",
"publish_string",
"(",
"self",
",",
"rest",
",",
"outfile",
",",
"styles",
"=",
"''",
")",
":",
"html",
"=",
"self",
".",
"convert_string",
"(",
"rest",
")",
"html",
"=",
"self",
".",
"strip_xml_header",
"(",
"html",
")",
"html",
"=",
"self",
".",
"apply_styles",
"(",
"html",
",",
"styles",
")",
"self",
".",
"write_file",
"(",
"html",
",",
"outfile",
")",
"return",
"outfile"
] | Render a reST string as HTML. | [
"Render",
"a",
"reST",
"string",
"as",
"HTML",
"."
] | python | train |
polyaxon/polyaxon-cli | polyaxon_cli/cli/check.py | https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/check.py#L67-L98 | def check(file, # pylint:disable=redefined-builtin
version,
definition):
"""Check a polyaxonfile."""
file = file or 'polyaxonfile.yaml'
specification = check_polyaxonfile(file).specification
if version:
Printer.decorate_format_value('The version is: {}',
specification.version,
'yellow')
if definition:
job_condition = (specification.is_job or
specification.is_build or
specification.is_notebook or
specification.is_tensorboard)
if specification.is_experiment:
Printer.decorate_format_value('This polyaxon specification has {}',
'One experiment',
'yellow')
if job_condition:
Printer.decorate_format_value('This {} polyaxon specification is valid',
specification.kind,
'yellow')
if specification.is_group:
experiments_def = specification.experiments_def
click.echo(
'This polyaxon specification has experiment group with the following definition:')
get_group_experiments_info(**experiments_def)
return specification | [
"def",
"check",
"(",
"file",
",",
"# pylint:disable=redefined-builtin",
"version",
",",
"definition",
")",
":",
"file",
"=",
"file",
"or",
"'polyaxonfile.yaml'",
"specification",
"=",
"check_polyaxonfile",
"(",
"file",
")",
".",
"specification",
"if",
"version",
":",
"Printer",
".",
"decorate_format_value",
"(",
"'The version is: {}'",
",",
"specification",
".",
"version",
",",
"'yellow'",
")",
"if",
"definition",
":",
"job_condition",
"=",
"(",
"specification",
".",
"is_job",
"or",
"specification",
".",
"is_build",
"or",
"specification",
".",
"is_notebook",
"or",
"specification",
".",
"is_tensorboard",
")",
"if",
"specification",
".",
"is_experiment",
":",
"Printer",
".",
"decorate_format_value",
"(",
"'This polyaxon specification has {}'",
",",
"'One experiment'",
",",
"'yellow'",
")",
"if",
"job_condition",
":",
"Printer",
".",
"decorate_format_value",
"(",
"'This {} polyaxon specification is valid'",
",",
"specification",
".",
"kind",
",",
"'yellow'",
")",
"if",
"specification",
".",
"is_group",
":",
"experiments_def",
"=",
"specification",
".",
"experiments_def",
"click",
".",
"echo",
"(",
"'This polyaxon specification has experiment group with the following definition:'",
")",
"get_group_experiments_info",
"(",
"*",
"*",
"experiments_def",
")",
"return",
"specification"
] | Check a polyaxonfile. | [
"Check",
"a",
"polyaxonfile",
"."
] | python | valid |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L53-L60 | def stat_container(self, container):
"""Stat container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
"""
LOG.debug('stat_container() with %s is success.', self.driver)
return self.driver.stat_container(container) | [
"def",
"stat_container",
"(",
"self",
",",
"container",
")",
":",
"LOG",
".",
"debug",
"(",
"'stat_container() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
"driver",
".",
"stat_container",
"(",
"container",
")"
] | Stat container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon). | [
"Stat",
"container",
"metadata"
] | python | train |
hadrianl/huobi | huobitrade/service.py | https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L76-L86 | def get_last_depth(self, symbol, _type, _async=False):
"""
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol, 'type': _type}
url = u.MARKET_URL + '/market/depth'
return http_get_request(url, params, _async=_async) | [
"def",
"get_last_depth",
"(",
"self",
",",
"symbol",
",",
"_type",
",",
"_async",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'symbol'",
":",
"symbol",
",",
"'type'",
":",
"_type",
"}",
"url",
"=",
"u",
".",
"MARKET_URL",
"+",
"'/market/depth'",
"return",
"http_get_request",
"(",
"url",
",",
"params",
",",
"_async",
"=",
"_async",
")"
] | 获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return: | [
"获取marketdepth",
":",
"param",
"symbol",
":",
"param",
"type",
":",
"可选值:",
"{",
"percent10",
"step0",
"step1",
"step2",
"step3",
"step4",
"step5",
"}",
":",
"return",
":"
] | python | train |
jrief/djangocms-cascade | cmsplugin_cascade/plugin_base.py | https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L282-L295 | def _get_parent_classes_transparent(cls, slot, page, instance=None):
"""
Return all parent classes including those marked as "transparent".
"""
parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance)
if parent_classes is None:
if cls.get_require_parent(slot, page) is False:
return
parent_classes = []
# add all plugins marked as 'transparent', since they all are potential parents
parent_classes = set(parent_classes)
parent_classes.update(TransparentContainer.get_plugins())
return list(parent_classes) | [
"def",
"_get_parent_classes_transparent",
"(",
"cls",
",",
"slot",
",",
"page",
",",
"instance",
"=",
"None",
")",
":",
"parent_classes",
"=",
"super",
"(",
"CascadePluginBase",
",",
"cls",
")",
".",
"get_parent_classes",
"(",
"slot",
",",
"page",
",",
"instance",
")",
"if",
"parent_classes",
"is",
"None",
":",
"if",
"cls",
".",
"get_require_parent",
"(",
"slot",
",",
"page",
")",
"is",
"False",
":",
"return",
"parent_classes",
"=",
"[",
"]",
"# add all plugins marked as 'transparent', since they all are potential parents",
"parent_classes",
"=",
"set",
"(",
"parent_classes",
")",
"parent_classes",
".",
"update",
"(",
"TransparentContainer",
".",
"get_plugins",
"(",
")",
")",
"return",
"list",
"(",
"parent_classes",
")"
] | Return all parent classes including those marked as "transparent". | [
"Return",
"all",
"parent",
"classes",
"including",
"those",
"marked",
"as",
"transparent",
"."
] | python | train |
esterhui/pypu | pypu/service_facebook.py | https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L377-L466 | def _upload_or_replace_fb(self,directory,fn,_album_id,\
_megapixels=None,resize_request=None,movealbum_request=None,\
changetitle_request=None,_title=None):
"""Does the actual upload to fb.
if resize_request, will resize picture only if
it already exists and the geometry on fb doesn't match
what we want,
returns (status)"""
# We should check here if
db=self._loadDB(directory)
# If resize request, make tempfile and
# resize.
if _megapixels:
fp = tempfile.NamedTemporaryFile()
fullfile_resized=fp.name
logger.debug("tempfile for resized is %s"%(fp.name))
fullfile=os.path.join(directory,fn)
# If JPEG, then resize
ext=os.path.splitext(fullfile)[1].lower()
if ext=='.jpg':
isJPG=True
else:
isJPG=False
# If already in DB, remove first, then overwrite
if fn in db:
pid=db[fn]['photoid']
if resize_request and isJPG:
logger.info("fb: Resize request for %s",fn)
if self._already_resized_on_fb(fullfile,pid,_megapixels):
logger.debug("%s - Already in DB and resized, skipping",fn)
return True
elif movealbum_request:
logger.info("fb: Move album request for %s",fn)
if self._already_in_album(fullfile,pid,_album_id):
logger.debug("%s - Already in DB and in correct album, skipping",fn)
return True
elif changetitle_request:
logger.info("fb: Change title request for %s",fn)
if self._title_uptodate(fullfile,pid,_title):
logger.debug("%s - Already in DB and title up to date, skipping",fn)
return True
# --- If we are here it means photo should be updated.
# With FB graph API this means removing the photo
# and uploading with new meta data.
logger.debug("%s - Already in DB, removing first",fn)
if not self._remove_media(directory,fn):
logger.error("%s - fb: couldn't replace (remove) file\n",fn)
return False
# Do we have to resize?
if _megapixels and isJPG:
if pusher_utils.resize_image(fullfile,fullfile_resized,_megapixels):
logger.debug("%s resized to %s successfully"\
%(fullfile,fullfile_resized))
fullfile=fullfile_resized
else:
logger.warning("%s couldn't resize, uploading original"\
%(fullfile))
logger.debug("Upload %s to fb, album=%s, title='%s'",\
fn,_album_id,_title)
# We can get a place id by doing a search
# http://graph.facebook.com/search?type=city¢er=37,-122&distance=1000
# Do the actual upload
resp=self.fb.put_photo(open(fullfile),\
message=_title,album_id=_album_id,\
)
#place='106377336067638'\
logger.debug("%s - Upload response is : %s"%(fn,resp))
if not resp.has_key('id'):
print("%s - fb: upload failed", fn)
return False
pid=resp['id']
db[fn]={}
db[fn]['photoid']=pid
logger.debug("%s - fb: uploaded with photoid %s",fn,pid);
self._saveDB(directory,db)
return True | [
"def",
"_upload_or_replace_fb",
"(",
"self",
",",
"directory",
",",
"fn",
",",
"_album_id",
",",
"_megapixels",
"=",
"None",
",",
"resize_request",
"=",
"None",
",",
"movealbum_request",
"=",
"None",
",",
"changetitle_request",
"=",
"None",
",",
"_title",
"=",
"None",
")",
":",
"# We should check here if ",
"db",
"=",
"self",
".",
"_loadDB",
"(",
"directory",
")",
"# If resize request, make tempfile and",
"# resize.",
"if",
"_megapixels",
":",
"fp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"fullfile_resized",
"=",
"fp",
".",
"name",
"logger",
".",
"debug",
"(",
"\"tempfile for resized is %s\"",
"%",
"(",
"fp",
".",
"name",
")",
")",
"fullfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"fn",
")",
"# If JPEG, then resize",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fullfile",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"ext",
"==",
"'.jpg'",
":",
"isJPG",
"=",
"True",
"else",
":",
"isJPG",
"=",
"False",
"# If already in DB, remove first, then overwrite",
"if",
"fn",
"in",
"db",
":",
"pid",
"=",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"if",
"resize_request",
"and",
"isJPG",
":",
"logger",
".",
"info",
"(",
"\"fb: Resize request for %s\"",
",",
"fn",
")",
"if",
"self",
".",
"_already_resized_on_fb",
"(",
"fullfile",
",",
"pid",
",",
"_megapixels",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s - Already in DB and resized, skipping\"",
",",
"fn",
")",
"return",
"True",
"elif",
"movealbum_request",
":",
"logger",
".",
"info",
"(",
"\"fb: Move album request for %s\"",
",",
"fn",
")",
"if",
"self",
".",
"_already_in_album",
"(",
"fullfile",
",",
"pid",
",",
"_album_id",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s - Already in DB and in correct album, skipping\"",
",",
"fn",
")",
"return",
"True",
"elif",
"changetitle_request",
":",
"logger",
".",
"info",
"(",
"\"fb: Change title request for %s\"",
",",
"fn",
")",
"if",
"self",
".",
"_title_uptodate",
"(",
"fullfile",
",",
"pid",
",",
"_title",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s - Already in DB and title up to date, skipping\"",
",",
"fn",
")",
"return",
"True",
"# --- If we are here it means photo should be updated.",
"# With FB graph API this means removing the photo",
"# and uploading with new meta data.",
"logger",
".",
"debug",
"(",
"\"%s - Already in DB, removing first\"",
",",
"fn",
")",
"if",
"not",
"self",
".",
"_remove_media",
"(",
"directory",
",",
"fn",
")",
":",
"logger",
".",
"error",
"(",
"\"%s - fb: couldn't replace (remove) file\\n\"",
",",
"fn",
")",
"return",
"False",
"# Do we have to resize?",
"if",
"_megapixels",
"and",
"isJPG",
":",
"if",
"pusher_utils",
".",
"resize_image",
"(",
"fullfile",
",",
"fullfile_resized",
",",
"_megapixels",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s resized to %s successfully\"",
"%",
"(",
"fullfile",
",",
"fullfile_resized",
")",
")",
"fullfile",
"=",
"fullfile_resized",
"else",
":",
"logger",
".",
"warning",
"(",
"\"%s couldn't resize, uploading original\"",
"%",
"(",
"fullfile",
")",
")",
"logger",
".",
"debug",
"(",
"\"Upload %s to fb, album=%s, title='%s'\"",
",",
"fn",
",",
"_album_id",
",",
"_title",
")",
"# We can get a place id by doing a search",
"# http://graph.facebook.com/search?type=city¢er=37,-122&distance=1000",
"# Do the actual upload",
"resp",
"=",
"self",
".",
"fb",
".",
"put_photo",
"(",
"open",
"(",
"fullfile",
")",
",",
"message",
"=",
"_title",
",",
"album_id",
"=",
"_album_id",
",",
")",
"#place='106377336067638'\\",
"logger",
".",
"debug",
"(",
"\"%s - Upload response is : %s\"",
"%",
"(",
"fn",
",",
"resp",
")",
")",
"if",
"not",
"resp",
".",
"has_key",
"(",
"'id'",
")",
":",
"print",
"(",
"\"%s - fb: upload failed\"",
",",
"fn",
")",
"return",
"False",
"pid",
"=",
"resp",
"[",
"'id'",
"]",
"db",
"[",
"fn",
"]",
"=",
"{",
"}",
"db",
"[",
"fn",
"]",
"[",
"'photoid'",
"]",
"=",
"pid",
"logger",
".",
"debug",
"(",
"\"%s - fb: uploaded with photoid %s\"",
",",
"fn",
",",
"pid",
")",
"self",
".",
"_saveDB",
"(",
"directory",
",",
"db",
")",
"return",
"True"
] | Does the actual upload to fb.
if resize_request, will resize picture only if
it already exists and the geometry on fb doesn't match
what we want,
returns (status) | [
"Does",
"the",
"actual",
"upload",
"to",
"fb",
".",
"if",
"resize_request",
"will",
"resize",
"picture",
"only",
"if",
"it",
"already",
"exists",
"and",
"the",
"geometry",
"on",
"fb",
"doesn",
"t",
"match",
"what",
"we",
"want",
"returns",
"(",
"status",
")"
] | python | train |
python-xlib/python-xlib | examples/run_examples.py | https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/examples/run_examples.py#L34-L41 | def run_example(path):
""" Returns returncode of example """
cmd = "{0} {1}".format(sys.executable, path)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
print(res[1].decode())
return proc.returncode | [
"def",
"run_example",
"(",
"path",
")",
":",
"cmd",
"=",
"\"{0} {1}\"",
".",
"format",
"(",
"sys",
".",
"executable",
",",
"path",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"res",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"proc",
".",
"returncode",
":",
"print",
"(",
"res",
"[",
"1",
"]",
".",
"decode",
"(",
")",
")",
"return",
"proc",
".",
"returncode"
] | Returns returncode of example | [
"Returns",
"returncode",
"of",
"example"
] | python | train |
Clinical-Genomics/trailblazer | trailblazer/mip/config.py | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/config.py#L65-L79 | def prepare_config(data: dict) -> dict:
"""Prepare the config data."""
data_copy = deepcopy(data)
# handle single sample cases with 'unknown' phenotype
if len(data_copy['samples']) == 1:
if data_copy['samples'][0]['phenotype'] == 'unknown':
LOG.info("setting 'unknown' phenotype to 'unaffected'")
data_copy['samples'][0]['phenotype'] = 'unaffected'
# set the mother/father to '0' if they are not set for a sample
for sample_data in data_copy['samples']:
sample_data['mother'] = sample_data.get('mother') or '0'
sample_data['father'] = sample_data.get('father') or '0'
if sample_data['analysis_type'] == 'wgs' and sample_data.get('capture_kit') is None:
sample_data['capture_kit'] = DEFAULT_CAPTURE_KIT
return data_copy | [
"def",
"prepare_config",
"(",
"data",
":",
"dict",
")",
"->",
"dict",
":",
"data_copy",
"=",
"deepcopy",
"(",
"data",
")",
"# handle single sample cases with 'unknown' phenotype",
"if",
"len",
"(",
"data_copy",
"[",
"'samples'",
"]",
")",
"==",
"1",
":",
"if",
"data_copy",
"[",
"'samples'",
"]",
"[",
"0",
"]",
"[",
"'phenotype'",
"]",
"==",
"'unknown'",
":",
"LOG",
".",
"info",
"(",
"\"setting 'unknown' phenotype to 'unaffected'\"",
")",
"data_copy",
"[",
"'samples'",
"]",
"[",
"0",
"]",
"[",
"'phenotype'",
"]",
"=",
"'unaffected'",
"# set the mother/father to '0' if they are not set for a sample",
"for",
"sample_data",
"in",
"data_copy",
"[",
"'samples'",
"]",
":",
"sample_data",
"[",
"'mother'",
"]",
"=",
"sample_data",
".",
"get",
"(",
"'mother'",
")",
"or",
"'0'",
"sample_data",
"[",
"'father'",
"]",
"=",
"sample_data",
".",
"get",
"(",
"'father'",
")",
"or",
"'0'",
"if",
"sample_data",
"[",
"'analysis_type'",
"]",
"==",
"'wgs'",
"and",
"sample_data",
".",
"get",
"(",
"'capture_kit'",
")",
"is",
"None",
":",
"sample_data",
"[",
"'capture_kit'",
"]",
"=",
"DEFAULT_CAPTURE_KIT",
"return",
"data_copy"
] | Prepare the config data. | [
"Prepare",
"the",
"config",
"data",
"."
] | python | train |
hydraplatform/hydra-base | hydra_base/lib/template.py | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L807-L850 | def set_network_template(template_id, network_id, **kwargs):
"""
Apply an existing template to a network. Used when a template has changed, and additional attributes
must be added to the network's elements.
"""
resource_types = []
#There should only ever be one matching type, but if there are more,
#all we can do is pick the first one.
try:
network_type = db.DBSession.query(ResourceType).filter(ResourceType.ref_key=='NETWORK',
ResourceType.network_id==network_id,
ResourceType.type_id==TemplateType.type_id,
TemplateType.template_id==template_id).one()
resource_types.append(network_type)
except NoResultFound:
log.debug("No network type to set.")
pass
node_types = db.DBSession.query(ResourceType).filter(ResourceType.ref_key=='NODE',
ResourceType.node_id==Node.node_id,
Node.network_id==network_id,
ResourceType.type_id==TemplateType.type_id,
TemplateType.template_id==template_id).all()
link_types = db.DBSession.query(ResourceType).filter(ResourceType.ref_key=='LINK',
ResourceType.link_id==Link.link_id,
Link.network_id==network_id,
ResourceType.type_id==TemplateType.type_id,
TemplateType.template_id==template_id).all()
group_types = db.DBSession.query(ResourceType).filter(ResourceType.ref_key=='GROUP',
ResourceType.group_id==ResourceGroup.group_id,
ResourceGroup.network_id==network_id,
ResourceType.type_id==TemplateType.type_id,
TemplateType.template_id==template_id).all()
resource_types.extend(node_types)
resource_types.extend(link_types)
resource_types.extend(group_types)
assign_types_to_resources(resource_types)
log.debug("Finished setting network template") | [
"def",
"set_network_template",
"(",
"template_id",
",",
"network_id",
",",
"*",
"*",
"kwargs",
")",
":",
"resource_types",
"=",
"[",
"]",
"#There should only ever be one matching type, but if there are more,",
"#all we can do is pick the first one.",
"try",
":",
"network_type",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"ResourceType",
".",
"ref_key",
"==",
"'NETWORK'",
",",
"ResourceType",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"TemplateType",
".",
"type_id",
",",
"TemplateType",
".",
"template_id",
"==",
"template_id",
")",
".",
"one",
"(",
")",
"resource_types",
".",
"append",
"(",
"network_type",
")",
"except",
"NoResultFound",
":",
"log",
".",
"debug",
"(",
"\"No network type to set.\"",
")",
"pass",
"node_types",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"ResourceType",
".",
"ref_key",
"==",
"'NODE'",
",",
"ResourceType",
".",
"node_id",
"==",
"Node",
".",
"node_id",
",",
"Node",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"TemplateType",
".",
"type_id",
",",
"TemplateType",
".",
"template_id",
"==",
"template_id",
")",
".",
"all",
"(",
")",
"link_types",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"ResourceType",
".",
"ref_key",
"==",
"'LINK'",
",",
"ResourceType",
".",
"link_id",
"==",
"Link",
".",
"link_id",
",",
"Link",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"TemplateType",
".",
"type_id",
",",
"TemplateType",
".",
"template_id",
"==",
"template_id",
")",
".",
"all",
"(",
")",
"group_types",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"ResourceType",
".",
"ref_key",
"==",
"'GROUP'",
",",
"ResourceType",
".",
"group_id",
"==",
"ResourceGroup",
".",
"group_id",
",",
"ResourceGroup",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"TemplateType",
".",
"type_id",
",",
"TemplateType",
".",
"template_id",
"==",
"template_id",
")",
".",
"all",
"(",
")",
"resource_types",
".",
"extend",
"(",
"node_types",
")",
"resource_types",
".",
"extend",
"(",
"link_types",
")",
"resource_types",
".",
"extend",
"(",
"group_types",
")",
"assign_types_to_resources",
"(",
"resource_types",
")",
"log",
".",
"debug",
"(",
"\"Finished setting network template\"",
")"
] | Apply an existing template to a network. Used when a template has changed, and additional attributes
must be added to the network's elements. | [
"Apply",
"an",
"existing",
"template",
"to",
"a",
"network",
".",
"Used",
"when",
"a",
"template",
"has",
"changed",
"and",
"additional",
"attributes",
"must",
"be",
"added",
"to",
"the",
"network",
"s",
"elements",
"."
] | python | train |
OpenGov/carpenter | carpenter/blocks/tableanalyzer.py | https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L310-L377 | def _find_block_start(self, table, used_cells, possible_block_start, start_pos, end_pos):
'''
Finds the start of a block from a suggested start location. This location can be at a lower
column but not a lower row. The function traverses columns until it finds a stopping
condition or a repeat condition that restarts on the next column.
Note this also finds the lowest row of block_end.
'''
current_col = possible_block_start[1]
block_start = list(possible_block_start)
block_end = list(possible_block_start)
repeat = True
checked_all = False
# Repeat until we've met satisfactory conditions for catching all edge cases or we've
# checked all valid block locations
while not checked_all and repeat:
block_end[0] = max(block_end[0], possible_block_start[0])
block_end[1] = max(block_end[1], current_col)
single_titled_block = True
table_column = TableTranspose(table)[current_col]
used_column = TableTranspose(used_cells)[current_col]
# We need to find a non empty cell before we can stop
blank_start = is_empty_cell(table_column[possible_block_start[0]])
blank_exited = not blank_start
# Unless we have assume_complete_blocks set to True
if blank_start and self.assume_complete_blocks:
# Found a blank? We're done
repeat = False
break
#TODO refactor code below into new function for easier reading
# Analyze the beginning columns
for row_index in xrange(possible_block_start[0], end_pos[0] + 1):
# Ensure we catch the edge case of the data reaching the edge of the table --
# block_end should then equal end_pos
if blank_exited:
block_end[0] = max(block_end[0], row_index)
if row_index == end_pos[0] or used_column[row_index]:
# We've gone through the whole range
checked_all = True
repeat = False
break
if not blank_exited:
blank_exited = not is_empty_cell(table_column[row_index])
if single_titled_block and not self._single_length_title(table, row_index, current_col):
single_titled_block = False
# If we saw single length titles for several more than threshold rows, then we
# have a unique block before an actual content block
if self._above_blank_repeat_threshold(possible_block_start[0], row_index):
repeat = False
break
if is_empty_cell(table_column[row_index]) and len(table[row_index]) > current_col + 1:
current_col += 1
break
# Go find the left most column that's still valid
table_row = table[row_index]
used_row = used_cells[row_index]
for column_index in range(current_col, start_pos[1] - 1, -1):
if is_empty_cell(table_row[column_index]) or used_row[column_index]:
break
else:
block_start[1] = min(block_start[1], column_index)
# Check if we've seen few enough cells to guess that we have a repeating title
repeat = blank_start or self._below_blank_repeat_threshold(possible_block_start[0], row_index)
return block_start, block_end | [
"def",
"_find_block_start",
"(",
"self",
",",
"table",
",",
"used_cells",
",",
"possible_block_start",
",",
"start_pos",
",",
"end_pos",
")",
":",
"current_col",
"=",
"possible_block_start",
"[",
"1",
"]",
"block_start",
"=",
"list",
"(",
"possible_block_start",
")",
"block_end",
"=",
"list",
"(",
"possible_block_start",
")",
"repeat",
"=",
"True",
"checked_all",
"=",
"False",
"# Repeat until we've met satisfactory conditions for catching all edge cases or we've",
"# checked all valid block locations",
"while",
"not",
"checked_all",
"and",
"repeat",
":",
"block_end",
"[",
"0",
"]",
"=",
"max",
"(",
"block_end",
"[",
"0",
"]",
",",
"possible_block_start",
"[",
"0",
"]",
")",
"block_end",
"[",
"1",
"]",
"=",
"max",
"(",
"block_end",
"[",
"1",
"]",
",",
"current_col",
")",
"single_titled_block",
"=",
"True",
"table_column",
"=",
"TableTranspose",
"(",
"table",
")",
"[",
"current_col",
"]",
"used_column",
"=",
"TableTranspose",
"(",
"used_cells",
")",
"[",
"current_col",
"]",
"# We need to find a non empty cell before we can stop",
"blank_start",
"=",
"is_empty_cell",
"(",
"table_column",
"[",
"possible_block_start",
"[",
"0",
"]",
"]",
")",
"blank_exited",
"=",
"not",
"blank_start",
"# Unless we have assume_complete_blocks set to True",
"if",
"blank_start",
"and",
"self",
".",
"assume_complete_blocks",
":",
"# Found a blank? We're done",
"repeat",
"=",
"False",
"break",
"#TODO refactor code below into new function for easier reading",
"# Analyze the beginning columns",
"for",
"row_index",
"in",
"xrange",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"end_pos",
"[",
"0",
"]",
"+",
"1",
")",
":",
"# Ensure we catch the edge case of the data reaching the edge of the table --",
"# block_end should then equal end_pos",
"if",
"blank_exited",
":",
"block_end",
"[",
"0",
"]",
"=",
"max",
"(",
"block_end",
"[",
"0",
"]",
",",
"row_index",
")",
"if",
"row_index",
"==",
"end_pos",
"[",
"0",
"]",
"or",
"used_column",
"[",
"row_index",
"]",
":",
"# We've gone through the whole range",
"checked_all",
"=",
"True",
"repeat",
"=",
"False",
"break",
"if",
"not",
"blank_exited",
":",
"blank_exited",
"=",
"not",
"is_empty_cell",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"if",
"single_titled_block",
"and",
"not",
"self",
".",
"_single_length_title",
"(",
"table",
",",
"row_index",
",",
"current_col",
")",
":",
"single_titled_block",
"=",
"False",
"# If we saw single length titles for several more than threshold rows, then we",
"# have a unique block before an actual content block",
"if",
"self",
".",
"_above_blank_repeat_threshold",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"row_index",
")",
":",
"repeat",
"=",
"False",
"break",
"if",
"is_empty_cell",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"and",
"len",
"(",
"table",
"[",
"row_index",
"]",
")",
">",
"current_col",
"+",
"1",
":",
"current_col",
"+=",
"1",
"break",
"# Go find the left most column that's still valid",
"table_row",
"=",
"table",
"[",
"row_index",
"]",
"used_row",
"=",
"used_cells",
"[",
"row_index",
"]",
"for",
"column_index",
"in",
"range",
"(",
"current_col",
",",
"start_pos",
"[",
"1",
"]",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"is_empty_cell",
"(",
"table_row",
"[",
"column_index",
"]",
")",
"or",
"used_row",
"[",
"column_index",
"]",
":",
"break",
"else",
":",
"block_start",
"[",
"1",
"]",
"=",
"min",
"(",
"block_start",
"[",
"1",
"]",
",",
"column_index",
")",
"# Check if we've seen few enough cells to guess that we have a repeating title",
"repeat",
"=",
"blank_start",
"or",
"self",
".",
"_below_blank_repeat_threshold",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"row_index",
")",
"return",
"block_start",
",",
"block_end"
] | Finds the start of a block from a suggested start location. This location can be at a lower
column but not a lower row. The function traverses columns until it finds a stopping
condition or a repeat condition that restarts on the next column.
Note this also finds the lowest row of block_end. | [
"Finds",
"the",
"start",
"of",
"a",
"block",
"from",
"a",
"suggested",
"start",
"location",
".",
"This",
"location",
"can",
"be",
"at",
"a",
"lower",
"column",
"but",
"not",
"a",
"lower",
"row",
".",
"The",
"function",
"traverses",
"columns",
"until",
"it",
"finds",
"a",
"stopping",
"condition",
"or",
"a",
"repeat",
"condition",
"that",
"restarts",
"on",
"the",
"next",
"column",
"."
] | python | train |
cloudendpoints/endpoints-python | endpoints/message_parser.py | https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/message_parser.py#L154-L227 | def __message_to_schema(self, message_type):
"""Parse a single message into JSON Schema.
Will recursively descend the message structure
and also parse other messages references via MessageFields.
Args:
message_type: protorpc.messages.Message class to parse.
Returns:
An object representation of the schema.
"""
name = self.__normalized_name(message_type)
schema = {
'id': name,
'type': 'object',
}
if message_type.__doc__:
schema['description'] = message_type.__doc__
properties = {}
for field in message_type.all_fields():
descriptor = {}
# Info about the type of this field. This is either merged with
# the descriptor or it's placed within the descriptor's 'items'
# property, depending on whether this is a repeated field or not.
type_info = {}
if type(field) == messages.MessageField:
field_type = field.type().__class__
type_info['$ref'] = self.add_message(field_type)
if field_type.__doc__:
descriptor['description'] = field_type.__doc__
else:
schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(
type(field), self.__DEFAULT_SCHEMA_TYPE)
# If the map pointed to a dictionary, check if the field's variant
# is in that dictionary and use the type specified there.
if isinstance(schema_type, dict):
variant_map = schema_type
variant = getattr(field, 'variant', None)
if variant in variant_map:
schema_type = variant_map[variant]
else:
# The variant map needs to specify a default value, mapped by None.
schema_type = variant_map[None]
type_info['type'] = schema_type[0]
if schema_type[1]:
type_info['format'] = schema_type[1]
if type(field) == messages.EnumField:
sorted_enums = sorted([enum_info for enum_info in field.type],
key=lambda enum_info: enum_info.number)
type_info['enum'] = [enum_info.name for enum_info in sorted_enums]
if field.required:
descriptor['required'] = True
if field.default:
if type(field) == messages.EnumField:
descriptor['default'] = str(field.default)
else:
descriptor['default'] = field.default
if field.repeated:
descriptor['items'] = type_info
descriptor['type'] = 'array'
else:
descriptor.update(type_info)
properties[field.name] = descriptor
schema['properties'] = properties
return schema | [
"def",
"__message_to_schema",
"(",
"self",
",",
"message_type",
")",
":",
"name",
"=",
"self",
".",
"__normalized_name",
"(",
"message_type",
")",
"schema",
"=",
"{",
"'id'",
":",
"name",
",",
"'type'",
":",
"'object'",
",",
"}",
"if",
"message_type",
".",
"__doc__",
":",
"schema",
"[",
"'description'",
"]",
"=",
"message_type",
".",
"__doc__",
"properties",
"=",
"{",
"}",
"for",
"field",
"in",
"message_type",
".",
"all_fields",
"(",
")",
":",
"descriptor",
"=",
"{",
"}",
"# Info about the type of this field. This is either merged with",
"# the descriptor or it's placed within the descriptor's 'items'",
"# property, depending on whether this is a repeated field or not.",
"type_info",
"=",
"{",
"}",
"if",
"type",
"(",
"field",
")",
"==",
"messages",
".",
"MessageField",
":",
"field_type",
"=",
"field",
".",
"type",
"(",
")",
".",
"__class__",
"type_info",
"[",
"'$ref'",
"]",
"=",
"self",
".",
"add_message",
"(",
"field_type",
")",
"if",
"field_type",
".",
"__doc__",
":",
"descriptor",
"[",
"'description'",
"]",
"=",
"field_type",
".",
"__doc__",
"else",
":",
"schema_type",
"=",
"self",
".",
"__FIELD_TO_SCHEMA_TYPE_MAP",
".",
"get",
"(",
"type",
"(",
"field",
")",
",",
"self",
".",
"__DEFAULT_SCHEMA_TYPE",
")",
"# If the map pointed to a dictionary, check if the field's variant",
"# is in that dictionary and use the type specified there.",
"if",
"isinstance",
"(",
"schema_type",
",",
"dict",
")",
":",
"variant_map",
"=",
"schema_type",
"variant",
"=",
"getattr",
"(",
"field",
",",
"'variant'",
",",
"None",
")",
"if",
"variant",
"in",
"variant_map",
":",
"schema_type",
"=",
"variant_map",
"[",
"variant",
"]",
"else",
":",
"# The variant map needs to specify a default value, mapped by None.",
"schema_type",
"=",
"variant_map",
"[",
"None",
"]",
"type_info",
"[",
"'type'",
"]",
"=",
"schema_type",
"[",
"0",
"]",
"if",
"schema_type",
"[",
"1",
"]",
":",
"type_info",
"[",
"'format'",
"]",
"=",
"schema_type",
"[",
"1",
"]",
"if",
"type",
"(",
"field",
")",
"==",
"messages",
".",
"EnumField",
":",
"sorted_enums",
"=",
"sorted",
"(",
"[",
"enum_info",
"for",
"enum_info",
"in",
"field",
".",
"type",
"]",
",",
"key",
"=",
"lambda",
"enum_info",
":",
"enum_info",
".",
"number",
")",
"type_info",
"[",
"'enum'",
"]",
"=",
"[",
"enum_info",
".",
"name",
"for",
"enum_info",
"in",
"sorted_enums",
"]",
"if",
"field",
".",
"required",
":",
"descriptor",
"[",
"'required'",
"]",
"=",
"True",
"if",
"field",
".",
"default",
":",
"if",
"type",
"(",
"field",
")",
"==",
"messages",
".",
"EnumField",
":",
"descriptor",
"[",
"'default'",
"]",
"=",
"str",
"(",
"field",
".",
"default",
")",
"else",
":",
"descriptor",
"[",
"'default'",
"]",
"=",
"field",
".",
"default",
"if",
"field",
".",
"repeated",
":",
"descriptor",
"[",
"'items'",
"]",
"=",
"type_info",
"descriptor",
"[",
"'type'",
"]",
"=",
"'array'",
"else",
":",
"descriptor",
".",
"update",
"(",
"type_info",
")",
"properties",
"[",
"field",
".",
"name",
"]",
"=",
"descriptor",
"schema",
"[",
"'properties'",
"]",
"=",
"properties",
"return",
"schema"
] | Parse a single message into JSON Schema.
Will recursively descend the message structure
and also parse other messages references via MessageFields.
Args:
message_type: protorpc.messages.Message class to parse.
Returns:
An object representation of the schema. | [
"Parse",
"a",
"single",
"message",
"into",
"JSON",
"Schema",
"."
] | python | train |
quantumlib/Cirq | cirq/circuits/circuit.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/circuit.py#L1528-L1542 | def to_qasm(self,
header: Optional[str] = None,
precision: int = 10,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> str:
"""Returns QASM equivalent to the circuit.
Args:
header: A multi-line string that is placed in a comment at the top
of the QASM. Defaults to a cirq version specifier.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the QASM
register.
"""
return str(self._to_qasm_output(header, precision, qubit_order)) | [
"def",
"to_qasm",
"(",
"self",
",",
"header",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"precision",
":",
"int",
"=",
"10",
",",
"qubit_order",
":",
"ops",
".",
"QubitOrderOrList",
"=",
"ops",
".",
"QubitOrder",
".",
"DEFAULT",
",",
")",
"->",
"str",
":",
"return",
"str",
"(",
"self",
".",
"_to_qasm_output",
"(",
"header",
",",
"precision",
",",
"qubit_order",
")",
")"
] | Returns QASM equivalent to the circuit.
Args:
header: A multi-line string that is placed in a comment at the top
of the QASM. Defaults to a cirq version specifier.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the QASM
register. | [
"Returns",
"QASM",
"equivalent",
"to",
"the",
"circuit",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/parsers/linux_file_parser.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/linux_file_parser.py#L772-L803 | def _ParseShVariables(self, lines):
"""Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
"""
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
# Pad out the list so that it's always 2 elements, even if the split
# failed.
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
# Stop processing the line unless the entry might allow paths to still
# be set, e.g.
# reserved words: "export"
# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.
break
return paths | [
"def",
"_ParseShVariables",
"(",
"self",
",",
"lines",
")",
":",
"paths",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"for",
"entry",
"in",
"line",
":",
"if",
"\"=\"",
"in",
"entry",
":",
"# Pad out the list so that it's always 2 elements, even if the split",
"# failed.",
"target",
",",
"vals",
"=",
"(",
"entry",
".",
"split",
"(",
"\"=\"",
",",
"1",
")",
"+",
"[",
"\"\"",
"]",
")",
"[",
":",
"2",
"]",
"if",
"vals",
":",
"path_vals",
"=",
"vals",
".",
"split",
"(",
"\":\"",
")",
"else",
":",
"path_vals",
"=",
"[",
"]",
"self",
".",
"_ExpandPath",
"(",
"target",
",",
"path_vals",
",",
"paths",
")",
"elif",
"entry",
"not",
"in",
"self",
".",
"_SH_CONTINUATION",
":",
"# Stop processing the line unless the entry might allow paths to still",
"# be set, e.g.",
"# reserved words: \"export\"",
"# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.",
"break",
"return",
"paths"
] | Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values. | [
"Extract",
"env_var",
"and",
"path",
"values",
"from",
"sh",
"derivative",
"shells",
"."
] | python | train |
ehansis/ozelot | examples/superheroes/superheroes/analysis.py | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L90-L201 | def plots_html_page():
"""Generate general statistics
Output is an html page, rendered to 'plots_html_page.html' in the output directory.
"""
# page template
template = jenv.get_template("plots_html_page.html")
# container for template context
context = dict()
# a database client/session to run queries in
cl = client.get_client()
session = cl.create_session()
# general styling
seaborn.set_style('whitegrid')
#
# plot: number of superheroes, by year of first appearance
#
# just query all character data, do analysis in pandas
query = session.query(models.Character)
character_data = cl.df_query(query)
# plot character appearances per year
fig = plt.figure(figsize=pixels_to_inches((400, 300)))
plt.plot(character_data.groupby('first_apperance_year')['id'].count(), '-o', c=seaborn.color_palette()[0])
# labels and title
plt.xlabel('Year')
plt.ylabel('Number of first appearances')
plt.title('Number of first character appearances per year')
# render to svg string, store in template context
context['first_appearances_per_year_svg'] = fig_to_svg(fig)
plt.close(fig)
#
# plot: number of movies, by year of publication
#
# just query all movie data, do analysis in pandas
query = session.query(models.Movie)
movie_data = cl.df_query(query)
# plot movie publications per year
fig = plt.figure(figsize=pixels_to_inches((400, 300)))
plt.plot(movie_data.groupby('year')['id'].count(), '-o', c=seaborn.color_palette()[1])
plt.xlabel('Year')
plt.ylabel('Number of movies')
plt.title('Number of movies per year')
context['movies_per_year_svg'] = fig_to_svg(fig)
plt.close(fig)
#
# plot: average character appearances per movie per year
#
# query number of character appearances for each movie, together with the movie year
query = session.query(sa.func.count(models.MovieAppearance.character_id).label('n_characters'),
models.Movie.id,
models.Movie.year) \
.join(models.Movie) \
.group_by(models.Movie.id, models.Movie.year)
appearance_counts = cl.df_query(query)
fig = plt.figure(figsize=pixels_to_inches((400, 300)))
plt.plot(appearance_counts.groupby('year')['n_characters'].mean(), '-o', c=seaborn.color_palette()[2])
plt.xlabel('Year')
plt.ylabel('Average number of characters')
plt.title('Average number of characters in a movie, per year')
context['average_appearances_per_movie_svg'] = fig_to_svg(fig)
#
# plots: average movie budget per year, with and without inflation adjustment
#
fig = plt.figure(figsize=pixels_to_inches((400, 300)))
plt.plot(movie_data.groupby('year')['budget'].mean() / 1e6, '-o', c=seaborn.color_palette()[3])
plt.xlabel('Year')
plt.ylabel('Average budget in Mio Euro')
plt.title('Average movie budget per year')
plt.xlim(1980, plt.xlim()[1])
context['budget_per_year_svg'] = fig_to_svg(fig)
plt.close(fig)
fig = plt.figure(figsize=pixels_to_inches((400, 300)))
plt.plot(movie_data.groupby('year')['budget_inflation_adjusted'].mean() / 1e6, '-o', c=seaborn.color_palette()[4])
plt.xlabel('Year')
plt.ylabel('Average budget in Mio Euro')
plt.title('Average movie budget per year, adjusted for inflation')
plt.xlim(1980, plt.xlim()[1])
context['budget_adjusted_per_year_svg'] = fig_to_svg(fig)
plt.close(fig)
#
# render template
#
# add additional context data:
# - html code for list of imported universes
# noinspection PyUnresolvedReferences
context['universes_list'] = ', '.join(config.UNIVERSES)
out_file = path.join(out_dir, "plots_html_page.html")
html_content = template.render(**context)
with open(out_file, 'w') as f:
f.write(html_content)
# done, clean up
plt.close('all')
session.close() | [
"def",
"plots_html_page",
"(",
")",
":",
"# page template",
"template",
"=",
"jenv",
".",
"get_template",
"(",
"\"plots_html_page.html\"",
")",
"# container for template context",
"context",
"=",
"dict",
"(",
")",
"# a database client/session to run queries in",
"cl",
"=",
"client",
".",
"get_client",
"(",
")",
"session",
"=",
"cl",
".",
"create_session",
"(",
")",
"# general styling",
"seaborn",
".",
"set_style",
"(",
"'whitegrid'",
")",
"#",
"# plot: number of superheroes, by year of first appearance",
"#",
"# just query all character data, do analysis in pandas",
"query",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Character",
")",
"character_data",
"=",
"cl",
".",
"df_query",
"(",
"query",
")",
"# plot character appearances per year",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"pixels_to_inches",
"(",
"(",
"400",
",",
"300",
")",
")",
")",
"plt",
".",
"plot",
"(",
"character_data",
".",
"groupby",
"(",
"'first_apperance_year'",
")",
"[",
"'id'",
"]",
".",
"count",
"(",
")",
",",
"'-o'",
",",
"c",
"=",
"seaborn",
".",
"color_palette",
"(",
")",
"[",
"0",
"]",
")",
"# labels and title",
"plt",
".",
"xlabel",
"(",
"'Year'",
")",
"plt",
".",
"ylabel",
"(",
"'Number of first appearances'",
")",
"plt",
".",
"title",
"(",
"'Number of first character appearances per year'",
")",
"# render to svg string, store in template context",
"context",
"[",
"'first_appearances_per_year_svg'",
"]",
"=",
"fig_to_svg",
"(",
"fig",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"#",
"# plot: number of movies, by year of publication",
"#",
"# just query all movie data, do analysis in pandas",
"query",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Movie",
")",
"movie_data",
"=",
"cl",
".",
"df_query",
"(",
"query",
")",
"# plot movie publications per year",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"pixels_to_inches",
"(",
"(",
"400",
",",
"300",
")",
")",
")",
"plt",
".",
"plot",
"(",
"movie_data",
".",
"groupby",
"(",
"'year'",
")",
"[",
"'id'",
"]",
".",
"count",
"(",
")",
",",
"'-o'",
",",
"c",
"=",
"seaborn",
".",
"color_palette",
"(",
")",
"[",
"1",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'Year'",
")",
"plt",
".",
"ylabel",
"(",
"'Number of movies'",
")",
"plt",
".",
"title",
"(",
"'Number of movies per year'",
")",
"context",
"[",
"'movies_per_year_svg'",
"]",
"=",
"fig_to_svg",
"(",
"fig",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"#",
"# plot: average character appearances per movie per year",
"#",
"# query number of character appearances for each movie, together with the movie year",
"query",
"=",
"session",
".",
"query",
"(",
"sa",
".",
"func",
".",
"count",
"(",
"models",
".",
"MovieAppearance",
".",
"character_id",
")",
".",
"label",
"(",
"'n_characters'",
")",
",",
"models",
".",
"Movie",
".",
"id",
",",
"models",
".",
"Movie",
".",
"year",
")",
".",
"join",
"(",
"models",
".",
"Movie",
")",
".",
"group_by",
"(",
"models",
".",
"Movie",
".",
"id",
",",
"models",
".",
"Movie",
".",
"year",
")",
"appearance_counts",
"=",
"cl",
".",
"df_query",
"(",
"query",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"pixels_to_inches",
"(",
"(",
"400",
",",
"300",
")",
")",
")",
"plt",
".",
"plot",
"(",
"appearance_counts",
".",
"groupby",
"(",
"'year'",
")",
"[",
"'n_characters'",
"]",
".",
"mean",
"(",
")",
",",
"'-o'",
",",
"c",
"=",
"seaborn",
".",
"color_palette",
"(",
")",
"[",
"2",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'Year'",
")",
"plt",
".",
"ylabel",
"(",
"'Average number of characters'",
")",
"plt",
".",
"title",
"(",
"'Average number of characters in a movie, per year'",
")",
"context",
"[",
"'average_appearances_per_movie_svg'",
"]",
"=",
"fig_to_svg",
"(",
"fig",
")",
"#",
"# plots: average movie budget per year, with and without inflation adjustment",
"#",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"pixels_to_inches",
"(",
"(",
"400",
",",
"300",
")",
")",
")",
"plt",
".",
"plot",
"(",
"movie_data",
".",
"groupby",
"(",
"'year'",
")",
"[",
"'budget'",
"]",
".",
"mean",
"(",
")",
"/",
"1e6",
",",
"'-o'",
",",
"c",
"=",
"seaborn",
".",
"color_palette",
"(",
")",
"[",
"3",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'Year'",
")",
"plt",
".",
"ylabel",
"(",
"'Average budget in Mio Euro'",
")",
"plt",
".",
"title",
"(",
"'Average movie budget per year'",
")",
"plt",
".",
"xlim",
"(",
"1980",
",",
"plt",
".",
"xlim",
"(",
")",
"[",
"1",
"]",
")",
"context",
"[",
"'budget_per_year_svg'",
"]",
"=",
"fig_to_svg",
"(",
"fig",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"pixels_to_inches",
"(",
"(",
"400",
",",
"300",
")",
")",
")",
"plt",
".",
"plot",
"(",
"movie_data",
".",
"groupby",
"(",
"'year'",
")",
"[",
"'budget_inflation_adjusted'",
"]",
".",
"mean",
"(",
")",
"/",
"1e6",
",",
"'-o'",
",",
"c",
"=",
"seaborn",
".",
"color_palette",
"(",
")",
"[",
"4",
"]",
")",
"plt",
".",
"xlabel",
"(",
"'Year'",
")",
"plt",
".",
"ylabel",
"(",
"'Average budget in Mio Euro'",
")",
"plt",
".",
"title",
"(",
"'Average movie budget per year, adjusted for inflation'",
")",
"plt",
".",
"xlim",
"(",
"1980",
",",
"plt",
".",
"xlim",
"(",
")",
"[",
"1",
"]",
")",
"context",
"[",
"'budget_adjusted_per_year_svg'",
"]",
"=",
"fig_to_svg",
"(",
"fig",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"#",
"# render template",
"#",
"# add additional context data:",
"# - html code for list of imported universes",
"# noinspection PyUnresolvedReferences",
"context",
"[",
"'universes_list'",
"]",
"=",
"', '",
".",
"join",
"(",
"config",
".",
"UNIVERSES",
")",
"out_file",
"=",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"plots_html_page.html\"",
")",
"html_content",
"=",
"template",
".",
"render",
"(",
"*",
"*",
"context",
")",
"with",
"open",
"(",
"out_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html_content",
")",
"# done, clean up",
"plt",
".",
"close",
"(",
"'all'",
")",
"session",
".",
"close",
"(",
")"
] | Generate general statistics
Output is an html page, rendered to 'plots_html_page.html' in the output directory. | [
"Generate",
"general",
"statistics"
] | python | train |
sunlightlabs/name-cleaver | name_cleaver/cleaver.py | https://github.com/sunlightlabs/name-cleaver/blob/48d3838fd9521235bd1586017fa4b31236ffc88e/name_cleaver/cleaver.py#L115-L133 | def reverse_last_first(self, name):
""" Takes a name that is in [last, first] format and returns it in a hopefully [first last] order.
Also extracts the suffix and puts it back on the end, in case it's embedded somewhere in the middle.
"""
# make sure we don't put a suffix in the middle, as in "Smith, Tom II"
name, suffix = self.extract_suffix(name)
split = re.split(', ?', name)
# make sure that the comma is not just preceding a suffix, such as "Jr",
# by checking that we have at least 2 name parts and the last doesn't match
# our suffix regex
if len(split) >= 2:
split.reverse()
if suffix:
split.append(suffix)
return ' '.join(split) | [
"def",
"reverse_last_first",
"(",
"self",
",",
"name",
")",
":",
"# make sure we don't put a suffix in the middle, as in \"Smith, Tom II\"",
"name",
",",
"suffix",
"=",
"self",
".",
"extract_suffix",
"(",
"name",
")",
"split",
"=",
"re",
".",
"split",
"(",
"', ?'",
",",
"name",
")",
"# make sure that the comma is not just preceding a suffix, such as \"Jr\",",
"# by checking that we have at least 2 name parts and the last doesn't match",
"# our suffix regex",
"if",
"len",
"(",
"split",
")",
">=",
"2",
":",
"split",
".",
"reverse",
"(",
")",
"if",
"suffix",
":",
"split",
".",
"append",
"(",
"suffix",
")",
"return",
"' '",
".",
"join",
"(",
"split",
")"
] | Takes a name that is in [last, first] format and returns it in a hopefully [first last] order.
Also extracts the suffix and puts it back on the end, in case it's embedded somewhere in the middle. | [
"Takes",
"a",
"name",
"that",
"is",
"in",
"[",
"last",
"first",
"]",
"format",
"and",
"returns",
"it",
"in",
"a",
"hopefully",
"[",
"first",
"last",
"]",
"order",
".",
"Also",
"extracts",
"the",
"suffix",
"and",
"puts",
"it",
"back",
"on",
"the",
"end",
"in",
"case",
"it",
"s",
"embedded",
"somewhere",
"in",
"the",
"middle",
"."
] | python | train |
trevisanj/a99 | a99/gui/xmisc.py | https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/xmisc.py#L424-L430 | def add_signal(self, signal):
"""Adds "input" signal to connected signals.
Internally connects the signal to a control slot."""
self.__signals.append(signal)
if self.__connected:
# Connects signal if the current state is "connected"
self.__connect_signal(signal) | [
"def",
"add_signal",
"(",
"self",
",",
"signal",
")",
":",
"self",
".",
"__signals",
".",
"append",
"(",
"signal",
")",
"if",
"self",
".",
"__connected",
":",
"# Connects signal if the current state is \"connected\"\r",
"self",
".",
"__connect_signal",
"(",
"signal",
")"
] | Adds "input" signal to connected signals.
Internally connects the signal to a control slot. | [
"Adds",
"input",
"signal",
"to",
"connected",
"signals",
".",
"Internally",
"connects",
"the",
"signal",
"to",
"a",
"control",
"slot",
"."
] | python | train |
scanny/python-pptx | pptx/chart/plot.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/plot.py#L164-L172 | def overlap(self, value):
"""
Set the value of the ``<c:overlap>`` child element to *int_value*,
or remove the overlap element if *int_value* is 0.
"""
if value == 0:
self._element._remove_overlap()
return
self._element.get_or_add_overlap().val = value | [
"def",
"overlap",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"0",
":",
"self",
".",
"_element",
".",
"_remove_overlap",
"(",
")",
"return",
"self",
".",
"_element",
".",
"get_or_add_overlap",
"(",
")",
".",
"val",
"=",
"value"
] | Set the value of the ``<c:overlap>`` child element to *int_value*,
or remove the overlap element if *int_value* is 0. | [
"Set",
"the",
"value",
"of",
"the",
"<c",
":",
"overlap",
">",
"child",
"element",
"to",
"*",
"int_value",
"*",
"or",
"remove",
"the",
"overlap",
"element",
"if",
"*",
"int_value",
"*",
"is",
"0",
"."
] | python | train |
obriencj/python-javatools | javatools/report.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L123-L143 | def setup(self):
"""
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
"""
if self._formats:
# setup has been run already.
return
basedir = self.basedir
options = self.options
crumbs = self.get_relative_breadcrumbs()
fmts = list()
for fmt_class in self.formats:
fmt = fmt_class(basedir, options, crumbs)
fmt.setup()
fmts.append(fmt)
self._formats = fmts | [
"def",
"setup",
"(",
"self",
")",
":",
"if",
"self",
".",
"_formats",
":",
"# setup has been run already.",
"return",
"basedir",
"=",
"self",
".",
"basedir",
"options",
"=",
"self",
".",
"options",
"crumbs",
"=",
"self",
".",
"get_relative_breadcrumbs",
"(",
")",
"fmts",
"=",
"list",
"(",
")",
"for",
"fmt_class",
"in",
"self",
".",
"formats",
":",
"fmt",
"=",
"fmt_class",
"(",
"basedir",
",",
"options",
",",
"crumbs",
")",
"fmt",
".",
"setup",
"(",
")",
"fmts",
".",
"append",
"(",
"fmt",
")",
"self",
".",
"_formats",
"=",
"fmts"
] | instantiates all report formats that have been added to this
reporter, and calls their setup methods. | [
"instantiates",
"all",
"report",
"formats",
"that",
"have",
"been",
"added",
"to",
"this",
"reporter",
"and",
"calls",
"their",
"setup",
"methods",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/io/datasets.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/io/datasets.py#L52-L61 | def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tostring(),
np.ubyte).reshape((value.shape + (4,))) | [
"def",
"pack_ieee",
"(",
"value",
")",
":",
"return",
"np",
".",
"fromstring",
"(",
"value",
".",
"tostring",
"(",
")",
",",
"np",
".",
"ubyte",
")",
".",
"reshape",
"(",
"(",
"value",
".",
"shape",
"+",
"(",
"4",
",",
")",
")",
")"
] | Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel | [
"Packs",
"float",
"ieee",
"binary",
"representation",
"into",
"4",
"unsigned",
"int8"
] | python | train |
davidwtbuxton/notrequests | notrequests.py | https://github.com/davidwtbuxton/notrequests/blob/e48ee6107a58c2f373c33f78e3302608edeba7f3/notrequests.py#L121-L126 | def json(self, **kwargs):
"""Decodes response as JSON."""
encoding = detect_encoding(self.content[:4])
value = self.content.decode(encoding)
return simplejson.loads(value, **kwargs) | [
"def",
"json",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"encoding",
"=",
"detect_encoding",
"(",
"self",
".",
"content",
"[",
":",
"4",
"]",
")",
"value",
"=",
"self",
".",
"content",
".",
"decode",
"(",
"encoding",
")",
"return",
"simplejson",
".",
"loads",
"(",
"value",
",",
"*",
"*",
"kwargs",
")"
] | Decodes response as JSON. | [
"Decodes",
"response",
"as",
"JSON",
"."
] | python | train |
CalebBell/ht | ht/conv_two_phase.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_two_phase.py#L33-L94 | def Davis_David(m, x, D, rhol, rhog, Cpl, kl, mul):
r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 0.060\left(\frac{\rho_L}{\rho_G}\right)^{0.28}
\left(\frac{DG_{TP} x}{\mu_L}\right)^{0.87}
\left(\frac{C_{p,L} \mu_L}{k_L}\right)^{0.4}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval [-]
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Developed for both vertical and horizontal flow, and flow patters of
annular or mist annular flow. Steam-water and air-water were the only
considered fluid combinations. Quality ranged from 0.1 to 1 in their data.
[1]_ claimed an AAE of 17%.
Examples
--------
>>> Davis_David(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, kl=.6,
... mul=1E-3)
1437.3282869955121
References
----------
.. [1] Davis, E. J., and M. M. David. "Two-Phase Gas-Liquid Convection Heat
Transfer. A Correlation." Industrial & Engineering Chemistry
Fundamentals 3, no. 2 (May 1, 1964): 111-18. doi:10.1021/i160010a005.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
'''
G = m/(pi/4*D**2)
Prl = Prandtl(Cp=Cpl, mu=mul, k=kl)
Nu_TP = 0.060*(rhol/rhog)**0.28*(D*G*x/mul)**0.87*Prl**0.4
return Nu_TP*kl/D | [
"def",
"Davis_David",
"(",
"m",
",",
"x",
",",
"D",
",",
"rhol",
",",
"rhog",
",",
"Cpl",
",",
"kl",
",",
"mul",
")",
":",
"G",
"=",
"m",
"/",
"(",
"pi",
"/",
"4",
"*",
"D",
"**",
"2",
")",
"Prl",
"=",
"Prandtl",
"(",
"Cp",
"=",
"Cpl",
",",
"mu",
"=",
"mul",
",",
"k",
"=",
"kl",
")",
"Nu_TP",
"=",
"0.060",
"*",
"(",
"rhol",
"/",
"rhog",
")",
"**",
"0.28",
"*",
"(",
"D",
"*",
"G",
"*",
"x",
"/",
"mul",
")",
"**",
"0.87",
"*",
"Prl",
"**",
"0.4",
"return",
"Nu_TP",
"*",
"kl",
"/",
"D"
] | r'''Calculates the two-phase non-boiling heat transfer coefficient of a
liquid and gas flowing inside a tube of any inclination, as in [1]_ and
reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 0.060\left(\frac{\rho_L}{\rho_G}\right)^{0.28}
\left(\frac{DG_{TP} x}{\mu_L}\right)^{0.87}
\left(\frac{C_{p,L} \mu_L}{k_L}\right)^{0.4}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval [-]
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Developed for both vertical and horizontal flow, and flow patters of
annular or mist annular flow. Steam-water and air-water were the only
considered fluid combinations. Quality ranged from 0.1 to 1 in their data.
[1]_ claimed an AAE of 17%.
Examples
--------
>>> Davis_David(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, kl=.6,
... mul=1E-3)
1437.3282869955121
References
----------
.. [1] Davis, E. J., and M. M. David. "Two-Phase Gas-Liquid Convection Heat
Transfer. A Correlation." Industrial & Engineering Chemistry
Fundamentals 3, no. 2 (May 1, 1964): 111-18. doi:10.1021/i160010a005.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691. | [
"r",
"Calculates",
"the",
"two",
"-",
"phase",
"non",
"-",
"boiling",
"heat",
"transfer",
"coefficient",
"of",
"a",
"liquid",
"and",
"gas",
"flowing",
"inside",
"a",
"tube",
"of",
"any",
"inclination",
"as",
"in",
"[",
"1",
"]",
"_",
"and",
"reviewed",
"in",
"[",
"2",
"]",
"_",
"."
] | python | train |
openvax/varlens | varlens/reads_util.py | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/reads_util.py#L104-L141 | def add_args(parser, positional=False):
"""
Extends a commandline argument parser with arguments for specifying
read sources.
"""
group = parser.add_argument_group("read loading")
group.add_argument("reads" if positional else "--reads",
nargs="+", default=[],
help="Paths to bam files. Any number of paths may be specified.")
group.add_argument(
"--read-source-name",
nargs="+",
help="Names for each read source. The number of names specified "
"must match the number of bam files. If not specified, filenames are "
"used for names.")
# Add filters
group = parser.add_argument_group(
"read filtering",
"A number of read filters are available. See the pysam "
"documentation (http://pysam.readthedocs.org/en/latest/api.html) "
"for details on what these fields mean. When multiple filter "
"options are specified, reads must match *all* filters.")
for (name, (kind, message, function)) in READ_FILTERS.items():
extra = {}
if kind is bool:
extra["action"] = "store_true"
extra["default"] = None
elif kind is int:
extra["type"] = int
extra["metavar"] = "N"
elif kind is str:
extra["metavar"] = "STRING"
group.add_argument("--" + name.replace("_", "-"),
help=message,
**extra) | [
"def",
"add_args",
"(",
"parser",
",",
"positional",
"=",
"False",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"read loading\"",
")",
"group",
".",
"add_argument",
"(",
"\"reads\"",
"if",
"positional",
"else",
"\"--reads\"",
",",
"nargs",
"=",
"\"+\"",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"Paths to bam files. Any number of paths may be specified.\"",
")",
"group",
".",
"add_argument",
"(",
"\"--read-source-name\"",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"Names for each read source. The number of names specified \"",
"\"must match the number of bam files. If not specified, filenames are \"",
"\"used for names.\"",
")",
"# Add filters",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"read filtering\"",
",",
"\"A number of read filters are available. See the pysam \"",
"\"documentation (http://pysam.readthedocs.org/en/latest/api.html) \"",
"\"for details on what these fields mean. When multiple filter \"",
"\"options are specified, reads must match *all* filters.\"",
")",
"for",
"(",
"name",
",",
"(",
"kind",
",",
"message",
",",
"function",
")",
")",
"in",
"READ_FILTERS",
".",
"items",
"(",
")",
":",
"extra",
"=",
"{",
"}",
"if",
"kind",
"is",
"bool",
":",
"extra",
"[",
"\"action\"",
"]",
"=",
"\"store_true\"",
"extra",
"[",
"\"default\"",
"]",
"=",
"None",
"elif",
"kind",
"is",
"int",
":",
"extra",
"[",
"\"type\"",
"]",
"=",
"int",
"extra",
"[",
"\"metavar\"",
"]",
"=",
"\"N\"",
"elif",
"kind",
"is",
"str",
":",
"extra",
"[",
"\"metavar\"",
"]",
"=",
"\"STRING\"",
"group",
".",
"add_argument",
"(",
"\"--\"",
"+",
"name",
".",
"replace",
"(",
"\"_\"",
",",
"\"-\"",
")",
",",
"help",
"=",
"message",
",",
"*",
"*",
"extra",
")"
] | Extends a commandline argument parser with arguments for specifying
read sources. | [
"Extends",
"a",
"commandline",
"argument",
"parser",
"with",
"arguments",
"for",
"specifying",
"read",
"sources",
"."
] | python | train |
facetoe/zenpy | zenpy/lib/api.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1727-L1735 | def create(self, section, article):
"""
Create (POST) an Article - See: Zendesk API `Reference
<https://developer.zendesk.com/rest_api/docs/help_center/articles#create-article>`__.
:param section: Section ID or object
:param article: Article to create
"""
return CRUDRequest(self).post(article, create=True, id=section) | [
"def",
"create",
"(",
"self",
",",
"section",
",",
"article",
")",
":",
"return",
"CRUDRequest",
"(",
"self",
")",
".",
"post",
"(",
"article",
",",
"create",
"=",
"True",
",",
"id",
"=",
"section",
")"
] | Create (POST) an Article - See: Zendesk API `Reference
<https://developer.zendesk.com/rest_api/docs/help_center/articles#create-article>`__.
:param section: Section ID or object
:param article: Article to create | [
"Create",
"(",
"POST",
")",
"an",
"Article",
"-",
"See",
":",
"Zendesk",
"API",
"Reference",
"<https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"help_center",
"/",
"articles#create",
"-",
"article",
">",
"__",
"."
] | python | train |
yahoo/TensorFlowOnSpark | examples/imagenet/inception/image_processing.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/image_processing.py#L107-L137 | def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels | [
"def",
"distorted_inputs",
"(",
"dataset",
",",
"batch_size",
"=",
"None",
",",
"num_preprocess_threads",
"=",
"None",
")",
":",
"if",
"not",
"batch_size",
":",
"batch_size",
"=",
"FLAGS",
".",
"batch_size",
"# Force all input processing onto CPU in order to reserve the GPU for",
"# the forward inference and back-propagation.",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"images",
",",
"labels",
"=",
"batch_inputs",
"(",
"dataset",
",",
"batch_size",
",",
"train",
"=",
"True",
",",
"num_preprocess_threads",
"=",
"num_preprocess_threads",
",",
"num_readers",
"=",
"FLAGS",
".",
"num_readers",
")",
"return",
"images",
",",
"labels"
] | Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size]. | [
"Generate",
"batches",
"of",
"distorted",
"versions",
"of",
"ImageNet",
"images",
"."
] | python | train |
aouyar/PyMunin | pysysinfo/system.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L78-L96 | def getCPUuse(self):
"""Return cpu time utilization in seconds.
@return: Dictionary of stats.
"""
hz = os.sysconf('SC_CLK_TCK')
info_dict = {}
try:
fp = open(cpustatFile, 'r')
line = fp.readline()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % cpustatFile)
headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest']
arr = line.split()
if len(arr) > 1 and arr[0] == 'cpu':
return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]]))
return info_dict | [
"def",
"getCPUuse",
"(",
"self",
")",
":",
"hz",
"=",
"os",
".",
"sysconf",
"(",
"'SC_CLK_TCK'",
")",
"info_dict",
"=",
"{",
"}",
"try",
":",
"fp",
"=",
"open",
"(",
"cpustatFile",
",",
"'r'",
")",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
"fp",
".",
"close",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'Failed reading stats from file: %s'",
"%",
"cpustatFile",
")",
"headers",
"=",
"[",
"'user'",
",",
"'nice'",
",",
"'system'",
",",
"'idle'",
",",
"'iowait'",
",",
"'irq'",
",",
"'softirq'",
",",
"'steal'",
",",
"'guest'",
"]",
"arr",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"arr",
")",
">",
"1",
"and",
"arr",
"[",
"0",
"]",
"==",
"'cpu'",
":",
"return",
"dict",
"(",
"zip",
"(",
"headers",
"[",
"0",
":",
"len",
"(",
"arr",
")",
"]",
",",
"[",
"(",
"float",
"(",
"t",
")",
"/",
"hz",
")",
"for",
"t",
"in",
"arr",
"[",
"1",
":",
"]",
"]",
")",
")",
"return",
"info_dict"
] | Return cpu time utilization in seconds.
@return: Dictionary of stats. | [
"Return",
"cpu",
"time",
"utilization",
"in",
"seconds",
"."
] | python | train |
poppy-project/pypot | pypot/vrep/io.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/io.py#L255-L261 | def change_object_name(self, old_name, new_name):
""" Change object name """
h = self._get_object_handle(old_name)
if old_name in self._object_handles:
self._object_handles.pop(old_name)
lua_code = "simSetObjectName({}, '{}')".format(h, new_name)
self._inject_lua_code(lua_code) | [
"def",
"change_object_name",
"(",
"self",
",",
"old_name",
",",
"new_name",
")",
":",
"h",
"=",
"self",
".",
"_get_object_handle",
"(",
"old_name",
")",
"if",
"old_name",
"in",
"self",
".",
"_object_handles",
":",
"self",
".",
"_object_handles",
".",
"pop",
"(",
"old_name",
")",
"lua_code",
"=",
"\"simSetObjectName({}, '{}')\"",
".",
"format",
"(",
"h",
",",
"new_name",
")",
"self",
".",
"_inject_lua_code",
"(",
"lua_code",
")"
] | Change object name | [
"Change",
"object",
"name"
] | python | train |
bwohlberg/sporco | sporco/dictlrn/cbpdndl.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/cbpdndl.py#L140-L154 | def ConvCnstrMODOptionsDefaults(method='fista'):
"""Get defaults dict for the ConvCnstrMOD class specified by the
``method`` parameter.
"""
dflt = copy.deepcopy(ccmod_class_label_lookup(method).Options.defaults)
if method == 'fista':
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
else:
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
return dflt | [
"def",
"ConvCnstrMODOptionsDefaults",
"(",
"method",
"=",
"'fista'",
")",
":",
"dflt",
"=",
"copy",
".",
"deepcopy",
"(",
"ccmod_class_label_lookup",
"(",
"method",
")",
".",
"Options",
".",
"defaults",
")",
"if",
"method",
"==",
"'fista'",
":",
"dflt",
".",
"update",
"(",
"{",
"'MaxMainIter'",
":",
"1",
",",
"'BackTrack'",
":",
"{",
"'gamma_u'",
":",
"1.2",
",",
"'MaxIter'",
":",
"50",
"}",
"}",
")",
"else",
":",
"dflt",
".",
"update",
"(",
"{",
"'MaxMainIter'",
":",
"1",
",",
"'AutoRho'",
":",
"{",
"'Period'",
":",
"10",
",",
"'AutoScaling'",
":",
"False",
",",
"'RsdlRatio'",
":",
"10.0",
",",
"'Scaling'",
":",
"2.0",
",",
"'RsdlTarget'",
":",
"1.0",
"}",
"}",
")",
"return",
"dflt"
] | Get defaults dict for the ConvCnstrMOD class specified by the
``method`` parameter. | [
"Get",
"defaults",
"dict",
"for",
"the",
"ConvCnstrMOD",
"class",
"specified",
"by",
"the",
"method",
"parameter",
"."
] | python | train |
ABI-Software/MeshParser | src/meshparser/base/parser.py | https://github.com/ABI-Software/MeshParser/blob/08dc0ce7c44d0149b443261ff6d3708e28a928e7/src/meshparser/base/parser.py#L41-L69 | def getElements(self, zero_based=True, pared=False):
"""
Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists
"""
points = self._points[:]
elements = self._elements[:]
offset = 0
if not zero_based:
offset = 1
np = None
if pared:
np = NodePare()
np.addPoints(points)
np.parePoints()
if pared or not zero_based:
modified_elements = []
for element in elements:
modified_element = [index + offset if np is None else np.getParedIndex(index) + offset
for index in element]
modified_elements.append(modified_element)
elements = modified_elements
return elements | [
"def",
"getElements",
"(",
"self",
",",
"zero_based",
"=",
"True",
",",
"pared",
"=",
"False",
")",
":",
"points",
"=",
"self",
".",
"_points",
"[",
":",
"]",
"elements",
"=",
"self",
".",
"_elements",
"[",
":",
"]",
"offset",
"=",
"0",
"if",
"not",
"zero_based",
":",
"offset",
"=",
"1",
"np",
"=",
"None",
"if",
"pared",
":",
"np",
"=",
"NodePare",
"(",
")",
"np",
".",
"addPoints",
"(",
"points",
")",
"np",
".",
"parePoints",
"(",
")",
"if",
"pared",
"or",
"not",
"zero_based",
":",
"modified_elements",
"=",
"[",
"]",
"for",
"element",
"in",
"elements",
":",
"modified_element",
"=",
"[",
"index",
"+",
"offset",
"if",
"np",
"is",
"None",
"else",
"np",
".",
"getParedIndex",
"(",
"index",
")",
"+",
"offset",
"for",
"index",
"in",
"element",
"]",
"modified_elements",
".",
"append",
"(",
"modified_element",
")",
"elements",
"=",
"modified_elements",
"return",
"elements"
] | Get the elements of the mesh as a list of point index list.
:param zero_based: use zero based index of points if true otherwise use 1-based index of points.
:param pared: use the pared down list of points
:return: A list of point index lists | [
"Get",
"the",
"elements",
"of",
"the",
"mesh",
"as",
"a",
"list",
"of",
"point",
"index",
"list",
".",
":",
"param",
"zero_based",
":",
"use",
"zero",
"based",
"index",
"of",
"points",
"if",
"true",
"otherwise",
"use",
"1",
"-",
"based",
"index",
"of",
"points",
".",
":",
"param",
"pared",
":",
"use",
"the",
"pared",
"down",
"list",
"of",
"points",
":",
"return",
":",
"A",
"list",
"of",
"point",
"index",
"lists"
] | python | train |
vertexproject/synapse | synapse/lib/dyndeps.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/dyndeps.py#L40-L50 | def getDynMeth(name):
'''
Retrieve and return an unbound method by python path.
'''
cname, fname = name.rsplit('.', 1)
clas = getDynLocal(cname)
if clas is None:
return None
return getattr(clas, fname, None) | [
"def",
"getDynMeth",
"(",
"name",
")",
":",
"cname",
",",
"fname",
"=",
"name",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"clas",
"=",
"getDynLocal",
"(",
"cname",
")",
"if",
"clas",
"is",
"None",
":",
"return",
"None",
"return",
"getattr",
"(",
"clas",
",",
"fname",
",",
"None",
")"
] | Retrieve and return an unbound method by python path. | [
"Retrieve",
"and",
"return",
"an",
"unbound",
"method",
"by",
"python",
"path",
"."
] | python | train |
crytic/slither | slither/printers/summary/function.py | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/printers/summary/function.py#L24-L64 | def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
for c in self.contracts:
(name, inheritance, var, func_summaries, modif_summaries) = c.get_summary()
txt = "\nContract %s"%name
txt += '\nContract vars: '+str(var)
txt += '\nInheritance:: '+str(inheritance)
table = PrettyTable(["Function",
"Visibility",
"Modifiers",
"Read",
"Write",
"Internal Calls",
"External Calls"])
for (_c_name, f_name, visi, modifiers, read, write, internal_calls, external_calls) in func_summaries:
read = self._convert(read)
write = self._convert(write)
internal_calls = self._convert(internal_calls)
external_calls = self._convert(external_calls)
table.add_row([f_name, visi, modifiers, read, write, internal_calls, external_calls])
txt += "\n \n"+str(table)
table = PrettyTable(["Modifiers",
"Visibility",
"Read",
"Write",
"Internal Calls",
"External Calls"])
for (_c_name, f_name, visi, _, read, write, internal_calls, external_calls) in modif_summaries:
read = self._convert(read)
write = self._convert(write)
internal_calls = self._convert(internal_calls)
external_calls = self._convert(external_calls)
table.add_row([f_name, visi, read, write, internal_calls, external_calls])
txt += "\n\n"+str(table)
txt += "\n"
self.info(txt) | [
"def",
"output",
"(",
"self",
",",
"_filename",
")",
":",
"for",
"c",
"in",
"self",
".",
"contracts",
":",
"(",
"name",
",",
"inheritance",
",",
"var",
",",
"func_summaries",
",",
"modif_summaries",
")",
"=",
"c",
".",
"get_summary",
"(",
")",
"txt",
"=",
"\"\\nContract %s\"",
"%",
"name",
"txt",
"+=",
"'\\nContract vars: '",
"+",
"str",
"(",
"var",
")",
"txt",
"+=",
"'\\nInheritance:: '",
"+",
"str",
"(",
"inheritance",
")",
"table",
"=",
"PrettyTable",
"(",
"[",
"\"Function\"",
",",
"\"Visibility\"",
",",
"\"Modifiers\"",
",",
"\"Read\"",
",",
"\"Write\"",
",",
"\"Internal Calls\"",
",",
"\"External Calls\"",
"]",
")",
"for",
"(",
"_c_name",
",",
"f_name",
",",
"visi",
",",
"modifiers",
",",
"read",
",",
"write",
",",
"internal_calls",
",",
"external_calls",
")",
"in",
"func_summaries",
":",
"read",
"=",
"self",
".",
"_convert",
"(",
"read",
")",
"write",
"=",
"self",
".",
"_convert",
"(",
"write",
")",
"internal_calls",
"=",
"self",
".",
"_convert",
"(",
"internal_calls",
")",
"external_calls",
"=",
"self",
".",
"_convert",
"(",
"external_calls",
")",
"table",
".",
"add_row",
"(",
"[",
"f_name",
",",
"visi",
",",
"modifiers",
",",
"read",
",",
"write",
",",
"internal_calls",
",",
"external_calls",
"]",
")",
"txt",
"+=",
"\"\\n \\n\"",
"+",
"str",
"(",
"table",
")",
"table",
"=",
"PrettyTable",
"(",
"[",
"\"Modifiers\"",
",",
"\"Visibility\"",
",",
"\"Read\"",
",",
"\"Write\"",
",",
"\"Internal Calls\"",
",",
"\"External Calls\"",
"]",
")",
"for",
"(",
"_c_name",
",",
"f_name",
",",
"visi",
",",
"_",
",",
"read",
",",
"write",
",",
"internal_calls",
",",
"external_calls",
")",
"in",
"modif_summaries",
":",
"read",
"=",
"self",
".",
"_convert",
"(",
"read",
")",
"write",
"=",
"self",
".",
"_convert",
"(",
"write",
")",
"internal_calls",
"=",
"self",
".",
"_convert",
"(",
"internal_calls",
")",
"external_calls",
"=",
"self",
".",
"_convert",
"(",
"external_calls",
")",
"table",
".",
"add_row",
"(",
"[",
"f_name",
",",
"visi",
",",
"read",
",",
"write",
",",
"internal_calls",
",",
"external_calls",
"]",
")",
"txt",
"+=",
"\"\\n\\n\"",
"+",
"str",
"(",
"table",
")",
"txt",
"+=",
"\"\\n\"",
"self",
".",
"info",
"(",
"txt",
")"
] | _filename is not used
Args:
_filename(string) | [
"_filename",
"is",
"not",
"used",
"Args",
":",
"_filename",
"(",
"string",
")"
] | python | train |
svinota/mdns | mdns/zeroconf.py | https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L751-L758 | def read_string(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0] | [
"def",
"read_string",
"(",
"self",
",",
"len",
")",
":",
"format",
"=",
"'!'",
"+",
"str",
"(",
"len",
")",
"+",
"'s'",
"length",
"=",
"struct",
".",
"calcsize",
"(",
"format",
")",
"info",
"=",
"struct",
".",
"unpack",
"(",
"format",
",",
"self",
".",
"data",
"[",
"self",
".",
"offset",
":",
"self",
".",
"offset",
"+",
"length",
"]",
")",
"self",
".",
"offset",
"+=",
"length",
"return",
"info",
"[",
"0",
"]"
] | Reads a string of a given length from the packet | [
"Reads",
"a",
"string",
"of",
"a",
"given",
"length",
"from",
"the",
"packet"
] | python | train |
chemlab/chemlab | chemlab/io/datafile.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/io/datafile.py#L102-L140 | def datafile(filename, mode="rb", format=None):
"""Initialize the appropriate
:py:class:`~chemlab.io.iohandler.IOHandler` for a given file
extension or file format.
The *datafile* function can be conveniently used to quickly read
or write data in a certain format::
>>> handler = datafile("molecule.pdb")
>>> mol = handler.read("molecule")
# You can also use this shortcut
>>> mol = datafile("molecule.pdb").read("molecule")
**Parameters**
filename: str
Path of the file to open.
format: str or None
When different from *None*, can be used to specify a
format identifier for that file. It should be used when
the extension is ambiguous or when there isn't a specified
filename. See below for a list of the formats supported by chemlab.
"""
filename = os.path.expanduser(filename)
base, ext = os.path.splitext(filename)
if format is None:
hc = get_handler_class(ext)
else:
hc = _handler_map.get(format)
if hc is None:
raise ValueError('Format {} not supported.'.format(format))
fd = open(filename, mode)
handler = hc(fd)
return handler | [
"def",
"datafile",
"(",
"filename",
",",
"mode",
"=",
"\"rb\"",
",",
"format",
"=",
"None",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"format",
"is",
"None",
":",
"hc",
"=",
"get_handler_class",
"(",
"ext",
")",
"else",
":",
"hc",
"=",
"_handler_map",
".",
"get",
"(",
"format",
")",
"if",
"hc",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Format {} not supported.'",
".",
"format",
"(",
"format",
")",
")",
"fd",
"=",
"open",
"(",
"filename",
",",
"mode",
")",
"handler",
"=",
"hc",
"(",
"fd",
")",
"return",
"handler"
] | Initialize the appropriate
:py:class:`~chemlab.io.iohandler.IOHandler` for a given file
extension or file format.
The *datafile* function can be conveniently used to quickly read
or write data in a certain format::
>>> handler = datafile("molecule.pdb")
>>> mol = handler.read("molecule")
# You can also use this shortcut
>>> mol = datafile("molecule.pdb").read("molecule")
**Parameters**
filename: str
Path of the file to open.
format: str or None
When different from *None*, can be used to specify a
format identifier for that file. It should be used when
the extension is ambiguous or when there isn't a specified
filename. See below for a list of the formats supported by chemlab. | [
"Initialize",
"the",
"appropriate",
":",
"py",
":",
"class",
":",
"~chemlab",
".",
"io",
".",
"iohandler",
".",
"IOHandler",
"for",
"a",
"given",
"file",
"extension",
"or",
"file",
"format",
"."
] | python | train |
waqasbhatti/astrobase | astrobase/services/simbad.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/services/simbad.py#L105-L857 | def tap_query(querystr,
simbad_mirror='simbad',
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/simbad-cache',
verbose=True,
timeout=10.0,
refresh=2.0,
maxtimeout=90.0,
maxtries=3,
complete_query_later=False,
jitter=5.0):
'''This queries the SIMBAD TAP service using the ADQL query string provided.
Parameters
----------
querystr : str
This is the ADQL query string. See:
http://www.ivoa.net/documents/ADQL/2.0 for the specification.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# get the default params
inputparams = TAP_PARAMS.copy()
# update them with our input params
inputparams['QUERY'] = querystr[::]
if returnformat in RETURN_FORMATS:
inputparams['FORMAT'] = returnformat
else:
LOGWARNING('unknown result format: %s requested, using CSV' %
returnformat)
inputparams['FORMAT'] = 'csv'
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachefname and look for it
xcachekey = '-'.join([repr(inputparams[x])
for x in sorted(inputparams.keys())])
cachekey = hashlib.sha256(xcachekey.encode()).hexdigest()
cachefname = os.path.join(
cachedir,
'%s.%s' % (cachekey, RETURN_FORMATS[returnformat])
)
provenance = 'cache'
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
##########################################
## COMPLETE A QUERY THAT MAY BE RUNNING ##
##########################################
# first, check if this query can be resurrected
if (not forcefetch and
complete_query_later and
os.path.exists(incomplete_qpklf)):
with open(incomplete_qpklf, 'rb') as infd:
incomplete_qinfo = pickle.load(infd)
LOGWARNING('complete_query_later = True, and '
'this query was not completed on a '
'previous run, will check if it is done now...')
# get the status URL and go into a loop to see if the query completed
waitdone = False
timeelapsed = 0.0
simbad_mirror = incomplete_qinfo['simbad_mirror']
status_url = incomplete_qinfo['status_url']
phasekeyword = incomplete_qinfo['phase_keyword']
resultkeyword = incomplete_qinfo['result_keyword']
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('SIMBAD TAP query still not done '
'after waiting %s seconds for results.\n'
'status URL is: %s' %
(maxtimeout,
repr(inputparams),
status_url))
return None
try:
resreq = requests.get(status_url,
timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('SIMBAD query completed, '
'retrieving results...')
waitdone = True
# if we're not done yet, then wait some more
elif jobstatus != 'ERROR':
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
# if the JOB failed, then bail out immediately
else:
LOGERROR('SIMBAD TAP query failed due to a server error.\n'
'status URL: %s\n'
'status contents: %s' %
(status_url,
resreq.text))
# since this job failed, remove the incomplete query pickle
# so we can try this from scratch
os.remove(incomplete_qpklf)
return None
except requests.exceptions.Timeout as e:
LOGEXCEPTION(
'SIMBAD query timed out while waiting for status '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception as e:
LOGEXCEPTION(
'SIMBAD query failed while waiting for status\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
# if the query fails completely, then either the status URL
# doesn't exist any more or something else went wrong. we'll
# remove the incomplete query pickle so we can try this from
# scratch
os.remove(incomplete_qpklf)
return None
#
# at this point, we should be ready to get the query results
#
LOGINFO('query completed, retrieving results...')
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
provenance = 'cache'
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
# all went well, so we'll remove the incomplete query pickle
os.remove(incomplete_qpklf)
return resdict
except requests.exceptions.Timeout as e:
LOGEXCEPTION(
'SIMBAD query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception as e:
LOGEXCEPTION(
'SIMBAD query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
# if the result download fails, then either the result URL doesn't
# exist any more or something else went wrong. we'll remove the
# incomplete query pickle so we can try this from scratch
os.remove(incomplete_qpklf)
return None
#####################
## RUN A NEW QUERY ##
#####################
# otherwise, we check the cache if it's done already, or run it again if not
if forcefetch or (not os.path.exists(cachefname)):
provenance = 'new download'
time.sleep(random.randint(1,jitter))
# generate a jobid here and update the input params
jobid = 'ab-simbad-%i' % time.time()
inputparams['JOBNAME'] = jobid
inputparams['JOBDESCRIPTION'] = 'astrobase-simbad-tap-ADQL-query'
try:
waitdone = False
timeelapsed = 0.0
# set the simbad mirror to use
if simbad_mirror is not None and simbad_mirror in SIMBAD_URLS:
tapurl = SIMBAD_URLS[simbad_mirror]['url']
resultkeyword = SIMBAD_URLS[simbad_mirror]['resultkeyword']
phasekeyword = SIMBAD_URLS[simbad_mirror]['phasekeyword']
randkey = simbad_mirror
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=SIMBAD_URLS[simbad_mirror]['table']
)
)
else:
randkey = random.choice(list(SIMBAD_URLS.keys()))
tapurl = SIMBAD_URLS[randkey]['url']
resultkeyword = SIMBAD_URLS[randkey]['resultkeyword']
phasekeyword = SIMBAD_URLS[randkey]['phasekeyword']
# sub in a table name if this is left unresolved in the input
# query
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=SIMBAD_URLS[randkey]['table']
)
)
if verbose:
LOGINFO('using SIMBAD mirror TAP URL: %s' % tapurl)
# send the query and get status
if verbose:
LOGINFO(
'submitting SIMBAD TAP query request for input params: %s'
% repr(inputparams)
)
# here, we'll make sure the SIMBAD mirror works before doing
# anything else
mirrorok = False
ntries = 1
while (not mirrorok):
if ntries > maxtries:
LOGERROR('maximum number of allowed SIMBAD query '
'submission tries (%s) reached, bailing out...' %
maxtries)
return None
try:
req = requests.post(tapurl,
data=inputparams,
timeout=timeout)
resp_status = req.status_code
req.raise_for_status()
mirrorok = True
# this handles immediate 503s
except requests.exceptions.HTTPError as e:
LOGWARNING(
'SIMBAD TAP server: %s not responding, '
'trying another mirror...'
% tapurl
)
mirrorok = False
# for now, we have only one SIMBAD mirror to hit, so we'll
# wait a random time between 1 and 5 seconds to hit it again
remainingmirrors = list(SIMBAD_URLS.keys())
waittime = random.choice(range(1,6))
time.sleep(waittime)
randkey = remainingmirrors[0]
tapurl = SIMBAD_URLS[randkey]['url']
resultkeyword = SIMBAD_URLS[randkey]['resultkeyword']
phasekeyword = SIMBAD_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=SIMBAD_URLS[randkey]['table']
)
)
# this handles initial query submission timeouts
except requests.exceptions.Timeout as e:
LOGWARNING(
'SIMBAD TAP query submission timed out, '
'mirror is probably down. Trying another mirror...'
)
mirrorok = False
# for now, we have only one SIMBAD mirror to hit, so we'll
# wait a random time between 1 and 5 seconds to hit it again
remainingmirrors = list(SIMBAD_URLS.keys())
waittime = random.choice(range(1,6))
time.sleep(waittime)
randkey = remainingmirrors[0]
tapurl = SIMBAD_URLS[randkey]['url']
resultkeyword = SIMBAD_URLS[randkey]['resultkeyword']
phasekeyword = SIMBAD_URLS[randkey]['phasekeyword']
if '{table}' in querystr:
inputparams['QUERY'] = (
querystr.format(
table=SIMBAD_URLS[randkey]['table']
)
)
# update the number of submission tries
ntries = ntries + 1
# NOTE: python-requests follows the "303 See Other" redirect
# automatically, so we get the XML status doc immediately. We don't
# need to look up the location of it in the initial response's
# header as in the SIMBAD example.
status_url = req.url
# parse the response XML and get the job status
resxml = parseString(req.text)
jobstatuselem = resxml.getElementsByTagName(phasekeyword)
if jobstatuselem:
jobstatuselem = jobstatuselem[0]
else:
LOGERROR('could not parse job phase using '
'keyword %s in result XML' % phasekeyword)
LOGERROR('%s' % req.txt)
req.close()
return None
jobstatus = jobstatuselem.firstChild.toxml()
# if the job completed already, jump down to retrieving results
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('SIMBAD query completed, '
'retrieving results...')
waitdone = True
elif jobstatus == 'ERROR':
if verbose:
LOGERROR(
'SIMBAD query failed immediately '
'(probably an ADQL error): %s, '
'status URL: %s, status contents: %s' %
(repr(inputparams),
status_url,
req.text)
)
return None
# we wait for the job to complete if it's not done already
else:
if verbose:
LOGINFO(
'request submitted successfully, '
'current status is: %s. '
'waiting for results...' % jobstatus
)
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('SIMBAD TAP query timed out '
'after waiting %s seconds for results.\n'
'request was: %s\n'
'status URL is: %s\n'
'last status was: %s' %
(maxtimeout,
repr(inputparams),
status_url,
jobstatus))
# here, we'll check if we're allowed to sleep on a query
# for a bit and return to it later if the last status
# was QUEUED or EXECUTING
if complete_query_later and jobstatus in ('EXECUTING',
'QUEUED'):
# write a pickle with the query params that we can
# pick up later to finish this query
incomplete_qpklf = os.path.join(
cachedir,
'incomplete-query-%s' % cachekey
)
with open(incomplete_qpklf, 'wb') as outfd:
savedict = inputparams.copy()
savedict['status_url'] = status_url
savedict['last_status'] = jobstatus
savedict['simbad_mirror'] = simbad_mirror
savedict['phase_keyword'] = phasekeyword
savedict['result_keyword'] = resultkeyword
pickle.dump(savedict,
outfd,
pickle.HIGHEST_PROTOCOL)
LOGINFO('complete_query_later = True, '
'last state of query was: %s, '
'will resume later if this function '
'is called again with the same query' %
jobstatus)
return None
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
try:
resreq = requests.get(status_url, timeout=timeout)
resreq.raise_for_status()
# parse the response XML and get the job status
resxml = parseString(resreq.text)
jobstatuselem = (
resxml.getElementsByTagName(phasekeyword)[0]
)
jobstatus = jobstatuselem.firstChild.toxml()
if jobstatus == 'COMPLETED':
if verbose:
LOGINFO('SIMBAD query completed, '
'retrieving results...')
waitdone = True
else:
if verbose:
LOGINFO('elapsed time: %.1f, '
'current status: %s, '
'status URL: %s, waiting...'
% (timeelapsed, jobstatus, status_url))
continue
except requests.exceptions.Timeout as e:
LOGEXCEPTION(
'SIMBAD query timed out while waiting for results '
'download results.\n'
'query: %s\n'
'status URL: %s' %
(repr(inputparams), status_url)
)
return None
except Exception as e:
LOGEXCEPTION(
'SIMBAD query failed while waiting for results\n'
'query: %s\n'
'status URL: %s\n'
'status contents: %s' %
(repr(inputparams),
status_url,
resreq.text)
)
return None
#
# at this point, we should be ready to get the query results
#
result_url_elem = resxml.getElementsByTagName(resultkeyword)[0]
result_url = result_url_elem.getAttribute('xlink:href')
result_nrows = result_url_elem.getAttribute('rows')
try:
resreq = requests.get(result_url, timeout=timeout)
resreq.raise_for_status()
if cachefname.endswith('.gz'):
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
else:
with open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
if verbose:
LOGINFO('done. rows in result: %s' % result_nrows)
tablefname = cachefname
except requests.exceptions.Timeout as e:
LOGEXCEPTION(
'SIMBAD query timed out while trying to '
'download results.\n'
'query: %s\n'
'result URL: %s' %
(repr(inputparams), result_url)
)
return None
except Exception as e:
LOGEXCEPTION(
'SIMBAD query failed because of an error '
'while trying to download results.\n'
'query: %s\n'
'result URL: %s\n'
'response status code: %s' %
(repr(inputparams),
result_url,
resreq.status_code)
)
return None
except requests.exceptions.HTTPError as e:
LOGEXCEPTION('SIMBAD TAP query failed.\nrequest status was: '
'%s.\nquery was: %s' % (resp_status,
repr(inputparams)))
return None
except requests.exceptions.Timeout as e:
LOGERROR('SIMBAD TAP query submission timed out, '
'site is probably down. Request was: '
'%s' % repr(inputparams))
return None
except Exception as e:
LOGEXCEPTION('SIMBAD TAP query request failed for '
'%s' % repr(inputparams))
if 'resxml' in locals():
LOGERROR('HTTP response from service:\n%s' % req.text)
return None
############################
## GET RESULTS FROM CACHE ##
############################
else:
if verbose:
LOGINFO('getting cached SIMBAD query result for '
'request: %s' %
(repr(inputparams)))
tablefname = cachefname
# try to open the cached file to make sure it's OK
try:
infd = gzip.open(cachefname,'rb')
simbad_objectnames = np.genfromtxt(
infd,
names=True,
delimiter=',',
dtype='U20,f8,f8,U20,U20,U20,i8,U600,f8',
usecols=(0,1,2,3,4,5,6,7,8),
comments='?', # object names can have '#' in them
)
infd.close()
except Exception as e:
LOGEXCEPTION('could not read cached SIMBAD result file: %s, '
'fetching from server again' % cachefname)
return tap_query(querystr,
simbad_mirror=simbad_mirror,
returnformat=returnformat,
forcefetch=True,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout)
#
# all done with retrieval, now return the result dict
#
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'provenance':provenance,
'result':tablefname}
return resdict | [
"def",
"tap_query",
"(",
"querystr",
",",
"simbad_mirror",
"=",
"'simbad'",
",",
"returnformat",
"=",
"'csv'",
",",
"forcefetch",
"=",
"False",
",",
"cachedir",
"=",
"'~/.astrobase/simbad-cache'",
",",
"verbose",
"=",
"True",
",",
"timeout",
"=",
"10.0",
",",
"refresh",
"=",
"2.0",
",",
"maxtimeout",
"=",
"90.0",
",",
"maxtries",
"=",
"3",
",",
"complete_query_later",
"=",
"False",
",",
"jitter",
"=",
"5.0",
")",
":",
"# get the default params",
"inputparams",
"=",
"TAP_PARAMS",
".",
"copy",
"(",
")",
"# update them with our input params",
"inputparams",
"[",
"'QUERY'",
"]",
"=",
"querystr",
"[",
":",
":",
"]",
"if",
"returnformat",
"in",
"RETURN_FORMATS",
":",
"inputparams",
"[",
"'FORMAT'",
"]",
"=",
"returnformat",
"else",
":",
"LOGWARNING",
"(",
"'unknown result format: %s requested, using CSV'",
"%",
"returnformat",
")",
"inputparams",
"[",
"'FORMAT'",
"]",
"=",
"'csv'",
"# see if the cachedir exists",
"if",
"'~'",
"in",
"cachedir",
":",
"cachedir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"cachedir",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cachedir",
")",
":",
"os",
".",
"makedirs",
"(",
"cachedir",
")",
"# generate the cachefname and look for it",
"xcachekey",
"=",
"'-'",
".",
"join",
"(",
"[",
"repr",
"(",
"inputparams",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"sorted",
"(",
"inputparams",
".",
"keys",
"(",
")",
")",
"]",
")",
"cachekey",
"=",
"hashlib",
".",
"sha256",
"(",
"xcachekey",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"cachefname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cachedir",
",",
"'%s.%s'",
"%",
"(",
"cachekey",
",",
"RETURN_FORMATS",
"[",
"returnformat",
"]",
")",
")",
"provenance",
"=",
"'cache'",
"incomplete_qpklf",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cachedir",
",",
"'incomplete-query-%s'",
"%",
"cachekey",
")",
"##########################################",
"## COMPLETE A QUERY THAT MAY BE RUNNING ##",
"##########################################",
"# first, check if this query can be resurrected",
"if",
"(",
"not",
"forcefetch",
"and",
"complete_query_later",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"incomplete_qpklf",
")",
")",
":",
"with",
"open",
"(",
"incomplete_qpklf",
",",
"'rb'",
")",
"as",
"infd",
":",
"incomplete_qinfo",
"=",
"pickle",
".",
"load",
"(",
"infd",
")",
"LOGWARNING",
"(",
"'complete_query_later = True, and '",
"'this query was not completed on a '",
"'previous run, will check if it is done now...'",
")",
"# get the status URL and go into a loop to see if the query completed",
"waitdone",
"=",
"False",
"timeelapsed",
"=",
"0.0",
"simbad_mirror",
"=",
"incomplete_qinfo",
"[",
"'simbad_mirror'",
"]",
"status_url",
"=",
"incomplete_qinfo",
"[",
"'status_url'",
"]",
"phasekeyword",
"=",
"incomplete_qinfo",
"[",
"'phase_keyword'",
"]",
"resultkeyword",
"=",
"incomplete_qinfo",
"[",
"'result_keyword'",
"]",
"while",
"not",
"waitdone",
":",
"if",
"timeelapsed",
">",
"maxtimeout",
":",
"LOGERROR",
"(",
"'SIMBAD TAP query still not done '",
"'after waiting %s seconds for results.\\n'",
"'status URL is: %s'",
"%",
"(",
"maxtimeout",
",",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
")",
")",
"return",
"None",
"try",
":",
"resreq",
"=",
"requests",
".",
"get",
"(",
"status_url",
",",
"timeout",
"=",
"timeout",
")",
"resreq",
".",
"raise_for_status",
"(",
")",
"# parse the response XML and get the job status",
"resxml",
"=",
"parseString",
"(",
"resreq",
".",
"text",
")",
"jobstatuselem",
"=",
"(",
"resxml",
".",
"getElementsByTagName",
"(",
"phasekeyword",
")",
"[",
"0",
"]",
")",
"jobstatus",
"=",
"jobstatuselem",
".",
"firstChild",
".",
"toxml",
"(",
")",
"if",
"jobstatus",
"==",
"'COMPLETED'",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'SIMBAD query completed, '",
"'retrieving results...'",
")",
"waitdone",
"=",
"True",
"# if we're not done yet, then wait some more",
"elif",
"jobstatus",
"!=",
"'ERROR'",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'elapsed time: %.1f, '",
"'current status: %s, '",
"'status URL: %s, waiting...'",
"%",
"(",
"timeelapsed",
",",
"jobstatus",
",",
"status_url",
")",
")",
"time",
".",
"sleep",
"(",
"refresh",
")",
"timeelapsed",
"=",
"timeelapsed",
"+",
"refresh",
"# if the JOB failed, then bail out immediately",
"else",
":",
"LOGERROR",
"(",
"'SIMBAD TAP query failed due to a server error.\\n'",
"'status URL: %s\\n'",
"'status contents: %s'",
"%",
"(",
"status_url",
",",
"resreq",
".",
"text",
")",
")",
"# since this job failed, remove the incomplete query pickle",
"# so we can try this from scratch",
"os",
".",
"remove",
"(",
"incomplete_qpklf",
")",
"return",
"None",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query timed out while waiting for status '",
"'download results.\\n'",
"'query: %s\\n'",
"'status URL: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
")",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query failed while waiting for status\\n'",
"'query: %s\\n'",
"'status URL: %s\\n'",
"'status contents: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
",",
"resreq",
".",
"text",
")",
")",
"# if the query fails completely, then either the status URL",
"# doesn't exist any more or something else went wrong. we'll",
"# remove the incomplete query pickle so we can try this from",
"# scratch",
"os",
".",
"remove",
"(",
"incomplete_qpklf",
")",
"return",
"None",
"#",
"# at this point, we should be ready to get the query results",
"#",
"LOGINFO",
"(",
"'query completed, retrieving results...'",
")",
"result_url_elem",
"=",
"resxml",
".",
"getElementsByTagName",
"(",
"resultkeyword",
")",
"[",
"0",
"]",
"result_url",
"=",
"result_url_elem",
".",
"getAttribute",
"(",
"'xlink:href'",
")",
"result_nrows",
"=",
"result_url_elem",
".",
"getAttribute",
"(",
"'rows'",
")",
"try",
":",
"resreq",
"=",
"requests",
".",
"get",
"(",
"result_url",
",",
"timeout",
"=",
"timeout",
")",
"resreq",
".",
"raise_for_status",
"(",
")",
"if",
"cachefname",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"cachefname",
",",
"'wb'",
")",
"as",
"outfd",
":",
"for",
"chunk",
"in",
"resreq",
".",
"iter_content",
"(",
"chunk_size",
"=",
"65536",
")",
":",
"outfd",
".",
"write",
"(",
"chunk",
")",
"else",
":",
"with",
"open",
"(",
"cachefname",
",",
"'wb'",
")",
"as",
"outfd",
":",
"for",
"chunk",
"in",
"resreq",
".",
"iter_content",
"(",
"chunk_size",
"=",
"65536",
")",
":",
"outfd",
".",
"write",
"(",
"chunk",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'done. rows in result: %s'",
"%",
"result_nrows",
")",
"tablefname",
"=",
"cachefname",
"provenance",
"=",
"'cache'",
"# return a dict pointing to the result file",
"# we'll parse this later",
"resdict",
"=",
"{",
"'params'",
":",
"inputparams",
",",
"'provenance'",
":",
"provenance",
",",
"'result'",
":",
"tablefname",
"}",
"# all went well, so we'll remove the incomplete query pickle",
"os",
".",
"remove",
"(",
"incomplete_qpklf",
")",
"return",
"resdict",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query timed out while trying to '",
"'download results.\\n'",
"'query: %s\\n'",
"'result URL: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"result_url",
")",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query failed because of an error '",
"'while trying to download results.\\n'",
"'query: %s\\n'",
"'result URL: %s\\n'",
"'response status code: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"result_url",
",",
"resreq",
".",
"status_code",
")",
")",
"# if the result download fails, then either the result URL doesn't",
"# exist any more or something else went wrong. we'll remove the",
"# incomplete query pickle so we can try this from scratch",
"os",
".",
"remove",
"(",
"incomplete_qpklf",
")",
"return",
"None",
"#####################",
"## RUN A NEW QUERY ##",
"#####################",
"# otherwise, we check the cache if it's done already, or run it again if not",
"if",
"forcefetch",
"or",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cachefname",
")",
")",
":",
"provenance",
"=",
"'new download'",
"time",
".",
"sleep",
"(",
"random",
".",
"randint",
"(",
"1",
",",
"jitter",
")",
")",
"# generate a jobid here and update the input params",
"jobid",
"=",
"'ab-simbad-%i'",
"%",
"time",
".",
"time",
"(",
")",
"inputparams",
"[",
"'JOBNAME'",
"]",
"=",
"jobid",
"inputparams",
"[",
"'JOBDESCRIPTION'",
"]",
"=",
"'astrobase-simbad-tap-ADQL-query'",
"try",
":",
"waitdone",
"=",
"False",
"timeelapsed",
"=",
"0.0",
"# set the simbad mirror to use",
"if",
"simbad_mirror",
"is",
"not",
"None",
"and",
"simbad_mirror",
"in",
"SIMBAD_URLS",
":",
"tapurl",
"=",
"SIMBAD_URLS",
"[",
"simbad_mirror",
"]",
"[",
"'url'",
"]",
"resultkeyword",
"=",
"SIMBAD_URLS",
"[",
"simbad_mirror",
"]",
"[",
"'resultkeyword'",
"]",
"phasekeyword",
"=",
"SIMBAD_URLS",
"[",
"simbad_mirror",
"]",
"[",
"'phasekeyword'",
"]",
"randkey",
"=",
"simbad_mirror",
"# sub in a table name if this is left unresolved in the input",
"# query",
"if",
"'{table}'",
"in",
"querystr",
":",
"inputparams",
"[",
"'QUERY'",
"]",
"=",
"(",
"querystr",
".",
"format",
"(",
"table",
"=",
"SIMBAD_URLS",
"[",
"simbad_mirror",
"]",
"[",
"'table'",
"]",
")",
")",
"else",
":",
"randkey",
"=",
"random",
".",
"choice",
"(",
"list",
"(",
"SIMBAD_URLS",
".",
"keys",
"(",
")",
")",
")",
"tapurl",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'url'",
"]",
"resultkeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'resultkeyword'",
"]",
"phasekeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'phasekeyword'",
"]",
"# sub in a table name if this is left unresolved in the input",
"# query",
"if",
"'{table}'",
"in",
"querystr",
":",
"inputparams",
"[",
"'QUERY'",
"]",
"=",
"(",
"querystr",
".",
"format",
"(",
"table",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'table'",
"]",
")",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'using SIMBAD mirror TAP URL: %s'",
"%",
"tapurl",
")",
"# send the query and get status",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'submitting SIMBAD TAP query request for input params: %s'",
"%",
"repr",
"(",
"inputparams",
")",
")",
"# here, we'll make sure the SIMBAD mirror works before doing",
"# anything else",
"mirrorok",
"=",
"False",
"ntries",
"=",
"1",
"while",
"(",
"not",
"mirrorok",
")",
":",
"if",
"ntries",
">",
"maxtries",
":",
"LOGERROR",
"(",
"'maximum number of allowed SIMBAD query '",
"'submission tries (%s) reached, bailing out...'",
"%",
"maxtries",
")",
"return",
"None",
"try",
":",
"req",
"=",
"requests",
".",
"post",
"(",
"tapurl",
",",
"data",
"=",
"inputparams",
",",
"timeout",
"=",
"timeout",
")",
"resp_status",
"=",
"req",
".",
"status_code",
"req",
".",
"raise_for_status",
"(",
")",
"mirrorok",
"=",
"True",
"# this handles immediate 503s",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"LOGWARNING",
"(",
"'SIMBAD TAP server: %s not responding, '",
"'trying another mirror...'",
"%",
"tapurl",
")",
"mirrorok",
"=",
"False",
"# for now, we have only one SIMBAD mirror to hit, so we'll",
"# wait a random time between 1 and 5 seconds to hit it again",
"remainingmirrors",
"=",
"list",
"(",
"SIMBAD_URLS",
".",
"keys",
"(",
")",
")",
"waittime",
"=",
"random",
".",
"choice",
"(",
"range",
"(",
"1",
",",
"6",
")",
")",
"time",
".",
"sleep",
"(",
"waittime",
")",
"randkey",
"=",
"remainingmirrors",
"[",
"0",
"]",
"tapurl",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'url'",
"]",
"resultkeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'resultkeyword'",
"]",
"phasekeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'phasekeyword'",
"]",
"if",
"'{table}'",
"in",
"querystr",
":",
"inputparams",
"[",
"'QUERY'",
"]",
"=",
"(",
"querystr",
".",
"format",
"(",
"table",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'table'",
"]",
")",
")",
"# this handles initial query submission timeouts",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGWARNING",
"(",
"'SIMBAD TAP query submission timed out, '",
"'mirror is probably down. Trying another mirror...'",
")",
"mirrorok",
"=",
"False",
"# for now, we have only one SIMBAD mirror to hit, so we'll",
"# wait a random time between 1 and 5 seconds to hit it again",
"remainingmirrors",
"=",
"list",
"(",
"SIMBAD_URLS",
".",
"keys",
"(",
")",
")",
"waittime",
"=",
"random",
".",
"choice",
"(",
"range",
"(",
"1",
",",
"6",
")",
")",
"time",
".",
"sleep",
"(",
"waittime",
")",
"randkey",
"=",
"remainingmirrors",
"[",
"0",
"]",
"tapurl",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'url'",
"]",
"resultkeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'resultkeyword'",
"]",
"phasekeyword",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'phasekeyword'",
"]",
"if",
"'{table}'",
"in",
"querystr",
":",
"inputparams",
"[",
"'QUERY'",
"]",
"=",
"(",
"querystr",
".",
"format",
"(",
"table",
"=",
"SIMBAD_URLS",
"[",
"randkey",
"]",
"[",
"'table'",
"]",
")",
")",
"# update the number of submission tries",
"ntries",
"=",
"ntries",
"+",
"1",
"# NOTE: python-requests follows the \"303 See Other\" redirect",
"# automatically, so we get the XML status doc immediately. We don't",
"# need to look up the location of it in the initial response's",
"# header as in the SIMBAD example.",
"status_url",
"=",
"req",
".",
"url",
"# parse the response XML and get the job status",
"resxml",
"=",
"parseString",
"(",
"req",
".",
"text",
")",
"jobstatuselem",
"=",
"resxml",
".",
"getElementsByTagName",
"(",
"phasekeyword",
")",
"if",
"jobstatuselem",
":",
"jobstatuselem",
"=",
"jobstatuselem",
"[",
"0",
"]",
"else",
":",
"LOGERROR",
"(",
"'could not parse job phase using '",
"'keyword %s in result XML'",
"%",
"phasekeyword",
")",
"LOGERROR",
"(",
"'%s'",
"%",
"req",
".",
"txt",
")",
"req",
".",
"close",
"(",
")",
"return",
"None",
"jobstatus",
"=",
"jobstatuselem",
".",
"firstChild",
".",
"toxml",
"(",
")",
"# if the job completed already, jump down to retrieving results",
"if",
"jobstatus",
"==",
"'COMPLETED'",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'SIMBAD query completed, '",
"'retrieving results...'",
")",
"waitdone",
"=",
"True",
"elif",
"jobstatus",
"==",
"'ERROR'",
":",
"if",
"verbose",
":",
"LOGERROR",
"(",
"'SIMBAD query failed immediately '",
"'(probably an ADQL error): %s, '",
"'status URL: %s, status contents: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
",",
"req",
".",
"text",
")",
")",
"return",
"None",
"# we wait for the job to complete if it's not done already",
"else",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'request submitted successfully, '",
"'current status is: %s. '",
"'waiting for results...'",
"%",
"jobstatus",
")",
"while",
"not",
"waitdone",
":",
"if",
"timeelapsed",
">",
"maxtimeout",
":",
"LOGERROR",
"(",
"'SIMBAD TAP query timed out '",
"'after waiting %s seconds for results.\\n'",
"'request was: %s\\n'",
"'status URL is: %s\\n'",
"'last status was: %s'",
"%",
"(",
"maxtimeout",
",",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
",",
"jobstatus",
")",
")",
"# here, we'll check if we're allowed to sleep on a query",
"# for a bit and return to it later if the last status",
"# was QUEUED or EXECUTING",
"if",
"complete_query_later",
"and",
"jobstatus",
"in",
"(",
"'EXECUTING'",
",",
"'QUEUED'",
")",
":",
"# write a pickle with the query params that we can",
"# pick up later to finish this query",
"incomplete_qpklf",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cachedir",
",",
"'incomplete-query-%s'",
"%",
"cachekey",
")",
"with",
"open",
"(",
"incomplete_qpklf",
",",
"'wb'",
")",
"as",
"outfd",
":",
"savedict",
"=",
"inputparams",
".",
"copy",
"(",
")",
"savedict",
"[",
"'status_url'",
"]",
"=",
"status_url",
"savedict",
"[",
"'last_status'",
"]",
"=",
"jobstatus",
"savedict",
"[",
"'simbad_mirror'",
"]",
"=",
"simbad_mirror",
"savedict",
"[",
"'phase_keyword'",
"]",
"=",
"phasekeyword",
"savedict",
"[",
"'result_keyword'",
"]",
"=",
"resultkeyword",
"pickle",
".",
"dump",
"(",
"savedict",
",",
"outfd",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"LOGINFO",
"(",
"'complete_query_later = True, '",
"'last state of query was: %s, '",
"'will resume later if this function '",
"'is called again with the same query'",
"%",
"jobstatus",
")",
"return",
"None",
"time",
".",
"sleep",
"(",
"refresh",
")",
"timeelapsed",
"=",
"timeelapsed",
"+",
"refresh",
"try",
":",
"resreq",
"=",
"requests",
".",
"get",
"(",
"status_url",
",",
"timeout",
"=",
"timeout",
")",
"resreq",
".",
"raise_for_status",
"(",
")",
"# parse the response XML and get the job status",
"resxml",
"=",
"parseString",
"(",
"resreq",
".",
"text",
")",
"jobstatuselem",
"=",
"(",
"resxml",
".",
"getElementsByTagName",
"(",
"phasekeyword",
")",
"[",
"0",
"]",
")",
"jobstatus",
"=",
"jobstatuselem",
".",
"firstChild",
".",
"toxml",
"(",
")",
"if",
"jobstatus",
"==",
"'COMPLETED'",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'SIMBAD query completed, '",
"'retrieving results...'",
")",
"waitdone",
"=",
"True",
"else",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'elapsed time: %.1f, '",
"'current status: %s, '",
"'status URL: %s, waiting...'",
"%",
"(",
"timeelapsed",
",",
"jobstatus",
",",
"status_url",
")",
")",
"continue",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query timed out while waiting for results '",
"'download results.\\n'",
"'query: %s\\n'",
"'status URL: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
")",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query failed while waiting for results\\n'",
"'query: %s\\n'",
"'status URL: %s\\n'",
"'status contents: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"status_url",
",",
"resreq",
".",
"text",
")",
")",
"return",
"None",
"#",
"# at this point, we should be ready to get the query results",
"#",
"result_url_elem",
"=",
"resxml",
".",
"getElementsByTagName",
"(",
"resultkeyword",
")",
"[",
"0",
"]",
"result_url",
"=",
"result_url_elem",
".",
"getAttribute",
"(",
"'xlink:href'",
")",
"result_nrows",
"=",
"result_url_elem",
".",
"getAttribute",
"(",
"'rows'",
")",
"try",
":",
"resreq",
"=",
"requests",
".",
"get",
"(",
"result_url",
",",
"timeout",
"=",
"timeout",
")",
"resreq",
".",
"raise_for_status",
"(",
")",
"if",
"cachefname",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"cachefname",
",",
"'wb'",
")",
"as",
"outfd",
":",
"for",
"chunk",
"in",
"resreq",
".",
"iter_content",
"(",
"chunk_size",
"=",
"65536",
")",
":",
"outfd",
".",
"write",
"(",
"chunk",
")",
"else",
":",
"with",
"open",
"(",
"cachefname",
",",
"'wb'",
")",
"as",
"outfd",
":",
"for",
"chunk",
"in",
"resreq",
".",
"iter_content",
"(",
"chunk_size",
"=",
"65536",
")",
":",
"outfd",
".",
"write",
"(",
"chunk",
")",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'done. rows in result: %s'",
"%",
"result_nrows",
")",
"tablefname",
"=",
"cachefname",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query timed out while trying to '",
"'download results.\\n'",
"'query: %s\\n'",
"'result URL: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"result_url",
")",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD query failed because of an error '",
"'while trying to download results.\\n'",
"'query: %s\\n'",
"'result URL: %s\\n'",
"'response status code: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
",",
"result_url",
",",
"resreq",
".",
"status_code",
")",
")",
"return",
"None",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD TAP query failed.\\nrequest status was: '",
"'%s.\\nquery was: %s'",
"%",
"(",
"resp_status",
",",
"repr",
"(",
"inputparams",
")",
")",
")",
"return",
"None",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
"as",
"e",
":",
"LOGERROR",
"(",
"'SIMBAD TAP query submission timed out, '",
"'site is probably down. Request was: '",
"'%s'",
"%",
"repr",
"(",
"inputparams",
")",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'SIMBAD TAP query request failed for '",
"'%s'",
"%",
"repr",
"(",
"inputparams",
")",
")",
"if",
"'resxml'",
"in",
"locals",
"(",
")",
":",
"LOGERROR",
"(",
"'HTTP response from service:\\n%s'",
"%",
"req",
".",
"text",
")",
"return",
"None",
"############################",
"## GET RESULTS FROM CACHE ##",
"############################",
"else",
":",
"if",
"verbose",
":",
"LOGINFO",
"(",
"'getting cached SIMBAD query result for '",
"'request: %s'",
"%",
"(",
"repr",
"(",
"inputparams",
")",
")",
")",
"tablefname",
"=",
"cachefname",
"# try to open the cached file to make sure it's OK",
"try",
":",
"infd",
"=",
"gzip",
".",
"open",
"(",
"cachefname",
",",
"'rb'",
")",
"simbad_objectnames",
"=",
"np",
".",
"genfromtxt",
"(",
"infd",
",",
"names",
"=",
"True",
",",
"delimiter",
"=",
"','",
",",
"dtype",
"=",
"'U20,f8,f8,U20,U20,U20,i8,U600,f8'",
",",
"usecols",
"=",
"(",
"0",
",",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"7",
",",
"8",
")",
",",
"comments",
"=",
"'?'",
",",
"# object names can have '#' in them",
")",
"infd",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"'could not read cached SIMBAD result file: %s, '",
"'fetching from server again'",
"%",
"cachefname",
")",
"return",
"tap_query",
"(",
"querystr",
",",
"simbad_mirror",
"=",
"simbad_mirror",
",",
"returnformat",
"=",
"returnformat",
",",
"forcefetch",
"=",
"True",
",",
"cachedir",
"=",
"cachedir",
",",
"verbose",
"=",
"verbose",
",",
"timeout",
"=",
"timeout",
",",
"refresh",
"=",
"refresh",
",",
"maxtimeout",
"=",
"maxtimeout",
")",
"#",
"# all done with retrieval, now return the result dict",
"#",
"# return a dict pointing to the result file",
"# we'll parse this later",
"resdict",
"=",
"{",
"'params'",
":",
"inputparams",
",",
"'provenance'",
":",
"provenance",
",",
"'result'",
":",
"tablefname",
"}",
"return",
"resdict"
] | This queries the SIMBAD TAP service using the ADQL query string provided.
Parameters
----------
querystr : str
This is the ADQL query string. See:
http://www.ivoa.net/documents/ADQL/2.0 for the specification.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table} | [
"This",
"queries",
"the",
"SIMBAD",
"TAP",
"service",
"using",
"the",
"ADQL",
"query",
"string",
"provided",
"."
] | python | valid |
rstoneback/pysat | pysat/instruments/omni_hro.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/omni_hro.py#L141-L186 | def time_shift_to_magnetic_poles(inst):
""" OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
-----------
inst : Instrument class object
Instrument with OMNI HRO data
Notes
---------
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk.
"""
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x']*6371.2/-inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
print (time_x[idx])
print (time_x)
time_x_offset = [pds.DateOffset(seconds = time)
for time in time_x.astype(int)]
new_index=[]
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return | [
"def",
"time_shift_to_magnetic_poles",
"(",
"inst",
")",
":",
"# need to fill in Vx to get an estimate of what is going on",
"inst",
"[",
"'Vx'",
"]",
"=",
"inst",
"[",
"'Vx'",
"]",
".",
"interpolate",
"(",
"'nearest'",
")",
"inst",
"[",
"'Vx'",
"]",
"=",
"inst",
"[",
"'Vx'",
"]",
".",
"fillna",
"(",
"method",
"=",
"'backfill'",
")",
"inst",
"[",
"'Vx'",
"]",
"=",
"inst",
"[",
"'Vx'",
"]",
".",
"fillna",
"(",
"method",
"=",
"'pad'",
")",
"inst",
"[",
"'BSN_x'",
"]",
"=",
"inst",
"[",
"'BSN_x'",
"]",
".",
"interpolate",
"(",
"'nearest'",
")",
"inst",
"[",
"'BSN_x'",
"]",
"=",
"inst",
"[",
"'BSN_x'",
"]",
".",
"fillna",
"(",
"method",
"=",
"'backfill'",
")",
"inst",
"[",
"'BSN_x'",
"]",
"=",
"inst",
"[",
"'BSN_x'",
"]",
".",
"fillna",
"(",
"method",
"=",
"'pad'",
")",
"# make sure there are no gaps larger than a minute",
"inst",
".",
"data",
"=",
"inst",
".",
"data",
".",
"resample",
"(",
"'1T'",
")",
".",
"interpolate",
"(",
"'time'",
")",
"time_x",
"=",
"inst",
"[",
"'BSN_x'",
"]",
"*",
"6371.2",
"/",
"-",
"inst",
"[",
"'Vx'",
"]",
"idx",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"time_x",
")",
")",
"if",
"len",
"(",
"idx",
")",
">",
"0",
":",
"print",
"(",
"time_x",
"[",
"idx",
"]",
")",
"print",
"(",
"time_x",
")",
"time_x_offset",
"=",
"[",
"pds",
".",
"DateOffset",
"(",
"seconds",
"=",
"time",
")",
"for",
"time",
"in",
"time_x",
".",
"astype",
"(",
"int",
")",
"]",
"new_index",
"=",
"[",
"]",
"for",
"i",
",",
"time",
"in",
"enumerate",
"(",
"time_x_offset",
")",
":",
"new_index",
".",
"append",
"(",
"inst",
".",
"data",
".",
"index",
"[",
"i",
"]",
"+",
"time",
")",
"inst",
".",
"data",
".",
"index",
"=",
"new_index",
"inst",
".",
"data",
"=",
"inst",
".",
"data",
".",
"sort_index",
"(",
")",
"return"
] | OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
-----------
inst : Instrument class object
Instrument with OMNI HRO data
Notes
---------
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk. | [
"OMNI",
"data",
"is",
"time",
"-",
"shifted",
"to",
"bow",
"shock",
".",
"Time",
"shifted",
"again",
"to",
"intersections",
"with",
"magnetic",
"pole",
"."
] | python | train |
bykof/billomapy | billomapy/billomapy.py | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2744-L2757 | def get_all_payments_of_credit_note(self, credit_note_id):
"""
Get all payments of credit note
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param credit_note_id: the credit note id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_payments_of_credit_note_per_page,
resource=CREDIT_NOTE_PAYMENTS,
**{'credit_note_id': credit_note_id}
) | [
"def",
"get_all_payments_of_credit_note",
"(",
"self",
",",
"credit_note_id",
")",
":",
"return",
"self",
".",
"_iterate_through_pages",
"(",
"get_function",
"=",
"self",
".",
"get_payments_of_credit_note_per_page",
",",
"resource",
"=",
"CREDIT_NOTE_PAYMENTS",
",",
"*",
"*",
"{",
"'credit_note_id'",
":",
"credit_note_id",
"}",
")"
] | Get all payments of credit note
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param credit_note_id: the credit note id
:return: list | [
"Get",
"all",
"payments",
"of",
"credit",
"note",
"This",
"will",
"iterate",
"over",
"all",
"pages",
"until",
"it",
"gets",
"all",
"elements",
".",
"So",
"if",
"the",
"rate",
"limit",
"exceeded",
"it",
"will",
"throw",
"an",
"Exception",
"and",
"you",
"will",
"get",
"nothing"
] | python | train |
CalebBell/thermo | thermo/phase_change.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/phase_change.py#L500-L570 | def MK(T, Tc, omega):
r'''Calculates enthalpy of vaporization at arbitrary temperatures using a
the work of [1]_; requires a chemical's critical temperature and
acentric factor.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = \Delta H_{vap}^{(0)} + \omega \Delta H_{vap}^{(1)} + \omega^2 \Delta H_{vap}^{(2)}
\frac{\Delta H_{vap}^{(i)}}{RT_c} = b^{(j)} \tau^{1/3} + b_2^{(j)} \tau^{5/6}
+ b_3^{(j)} \tau^{1.2083} + b_4^{(j)}\tau + b_5^{(j)} \tau^2 + b_6^{(j)} \tau^3
\tau = 1-T/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
The original article has been reviewed. A total of 18 coefficients are used:
WARNING: The correlation has been implemented as described in the article,
but its results seem different and with some error.
Its results match with other functions however.
Has poor behavior for low-temperature use.
Examples
--------
Problem in article for SMK function.
>>> MK(553.15, 751.35, 0.302)
38727.993546377205
References
----------
.. [1] Morgan, David L., and Riki Kobayashi. "Extension of Pitzer CSP
Models for Vapor Pressures and Heats of Vaporization to Long-Chain
Hydrocarbons." Fluid Phase Equilibria 94 (March 15, 1994): 51-87.
doi:10.1016/0378-3812(94)87051-9.
'''
bs = [[5.2804, 0.080022, 7.2543],
[12.8650, 273.23, -346.45],
[1.1710, 465.08, -610.48],
[-13.1160, -638.51, 839.89],
[0.4858, -145.12, 160.05],
[-1.0880, 74.049, -50.711]]
tau = 1. - T/Tc
H0 = (bs[0][0]*tau**(0.3333) + bs[1][0]*tau**(0.8333) + bs[2][0]*tau**(1.2083) +
bs[3][0]*tau + bs[4][0]*tau**(2) + bs[5][0]*tau**(3))*R*Tc
H1 = (bs[0][1]*tau**(0.3333) + bs[1][1]*tau**(0.8333) + bs[2][1]*tau**(1.2083) +
bs[3][1]*tau + bs[4][1]*tau**(2) + bs[5][1]*tau**(3))*R*Tc
H2 = (bs[0][2]*tau**(0.3333) + bs[1][2]*tau**(0.8333) + bs[2][2]*tau**(1.2083) +
bs[3][2]*tau + bs[4][2]*tau**(2) + bs[5][2]*tau**(3))*R*Tc
return H0 + omega*H1 + omega**2*H2 | [
"def",
"MK",
"(",
"T",
",",
"Tc",
",",
"omega",
")",
":",
"bs",
"=",
"[",
"[",
"5.2804",
",",
"0.080022",
",",
"7.2543",
"]",
",",
"[",
"12.8650",
",",
"273.23",
",",
"-",
"346.45",
"]",
",",
"[",
"1.1710",
",",
"465.08",
",",
"-",
"610.48",
"]",
",",
"[",
"-",
"13.1160",
",",
"-",
"638.51",
",",
"839.89",
"]",
",",
"[",
"0.4858",
",",
"-",
"145.12",
",",
"160.05",
"]",
",",
"[",
"-",
"1.0880",
",",
"74.049",
",",
"-",
"50.711",
"]",
"]",
"tau",
"=",
"1.",
"-",
"T",
"/",
"Tc",
"H0",
"=",
"(",
"bs",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"tau",
"**",
"(",
"0.3333",
")",
"+",
"bs",
"[",
"1",
"]",
"[",
"0",
"]",
"*",
"tau",
"**",
"(",
"0.8333",
")",
"+",
"bs",
"[",
"2",
"]",
"[",
"0",
"]",
"*",
"tau",
"**",
"(",
"1.2083",
")",
"+",
"bs",
"[",
"3",
"]",
"[",
"0",
"]",
"*",
"tau",
"+",
"bs",
"[",
"4",
"]",
"[",
"0",
"]",
"*",
"tau",
"**",
"(",
"2",
")",
"+",
"bs",
"[",
"5",
"]",
"[",
"0",
"]",
"*",
"tau",
"**",
"(",
"3",
")",
")",
"*",
"R",
"*",
"Tc",
"H1",
"=",
"(",
"bs",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"tau",
"**",
"(",
"0.3333",
")",
"+",
"bs",
"[",
"1",
"]",
"[",
"1",
"]",
"*",
"tau",
"**",
"(",
"0.8333",
")",
"+",
"bs",
"[",
"2",
"]",
"[",
"1",
"]",
"*",
"tau",
"**",
"(",
"1.2083",
")",
"+",
"bs",
"[",
"3",
"]",
"[",
"1",
"]",
"*",
"tau",
"+",
"bs",
"[",
"4",
"]",
"[",
"1",
"]",
"*",
"tau",
"**",
"(",
"2",
")",
"+",
"bs",
"[",
"5",
"]",
"[",
"1",
"]",
"*",
"tau",
"**",
"(",
"3",
")",
")",
"*",
"R",
"*",
"Tc",
"H2",
"=",
"(",
"bs",
"[",
"0",
"]",
"[",
"2",
"]",
"*",
"tau",
"**",
"(",
"0.3333",
")",
"+",
"bs",
"[",
"1",
"]",
"[",
"2",
"]",
"*",
"tau",
"**",
"(",
"0.8333",
")",
"+",
"bs",
"[",
"2",
"]",
"[",
"2",
"]",
"*",
"tau",
"**",
"(",
"1.2083",
")",
"+",
"bs",
"[",
"3",
"]",
"[",
"2",
"]",
"*",
"tau",
"+",
"bs",
"[",
"4",
"]",
"[",
"2",
"]",
"*",
"tau",
"**",
"(",
"2",
")",
"+",
"bs",
"[",
"5",
"]",
"[",
"2",
"]",
"*",
"tau",
"**",
"(",
"3",
")",
")",
"*",
"R",
"*",
"Tc",
"return",
"H0",
"+",
"omega",
"*",
"H1",
"+",
"omega",
"**",
"2",
"*",
"H2"
] | r'''Calculates enthalpy of vaporization at arbitrary temperatures using a
the work of [1]_; requires a chemical's critical temperature and
acentric factor.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = \Delta H_{vap}^{(0)} + \omega \Delta H_{vap}^{(1)} + \omega^2 \Delta H_{vap}^{(2)}
\frac{\Delta H_{vap}^{(i)}}{RT_c} = b^{(j)} \tau^{1/3} + b_2^{(j)} \tau^{5/6}
+ b_3^{(j)} \tau^{1.2083} + b_4^{(j)}\tau + b_5^{(j)} \tau^2 + b_6^{(j)} \tau^3
\tau = 1-T/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
The original article has been reviewed. A total of 18 coefficients are used:
WARNING: The correlation has been implemented as described in the article,
but its results seem different and with some error.
Its results match with other functions however.
Has poor behavior for low-temperature use.
Examples
--------
Problem in article for SMK function.
>>> MK(553.15, 751.35, 0.302)
38727.993546377205
References
----------
.. [1] Morgan, David L., and Riki Kobayashi. "Extension of Pitzer CSP
Models for Vapor Pressures and Heats of Vaporization to Long-Chain
Hydrocarbons." Fluid Phase Equilibria 94 (March 15, 1994): 51-87.
doi:10.1016/0378-3812(94)87051-9. | [
"r",
"Calculates",
"enthalpy",
"of",
"vaporization",
"at",
"arbitrary",
"temperatures",
"using",
"a",
"the",
"work",
"of",
"[",
"1",
"]",
"_",
";",
"requires",
"a",
"chemical",
"s",
"critical",
"temperature",
"and",
"acentric",
"factor",
"."
] | python | valid |
ManiacalLabs/BiblioPixel | bibliopixel/control/routing.py | https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/control/routing.py#L53-L70 | def receive(self, msg):
"""
Returns a (receiver, msg) pair, where receiver is `None` if no route for
the message was found, or otherwise an object with a `receive` method
that can accept that `msg`.
"""
x = self.routing
while not isinstance(x, ActionList):
if not x or not msg:
return None, msg
if not isinstance(x, dict):
raise ValueError('Unexpected type %s' % type(x))
_, value = msg.popitem(last=False)
x = x.get(str(value))
return x, msg | [
"def",
"receive",
"(",
"self",
",",
"msg",
")",
":",
"x",
"=",
"self",
".",
"routing",
"while",
"not",
"isinstance",
"(",
"x",
",",
"ActionList",
")",
":",
"if",
"not",
"x",
"or",
"not",
"msg",
":",
"return",
"None",
",",
"msg",
"if",
"not",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'Unexpected type %s'",
"%",
"type",
"(",
"x",
")",
")",
"_",
",",
"value",
"=",
"msg",
".",
"popitem",
"(",
"last",
"=",
"False",
")",
"x",
"=",
"x",
".",
"get",
"(",
"str",
"(",
"value",
")",
")",
"return",
"x",
",",
"msg"
] | Returns a (receiver, msg) pair, where receiver is `None` if no route for
the message was found, or otherwise an object with a `receive` method
that can accept that `msg`. | [
"Returns",
"a",
"(",
"receiver",
"msg",
")",
"pair",
"where",
"receiver",
"is",
"None",
"if",
"no",
"route",
"for",
"the",
"message",
"was",
"found",
"or",
"otherwise",
"an",
"object",
"with",
"a",
"receive",
"method",
"that",
"can",
"accept",
"that",
"msg",
"."
] | python | valid |
klorenz/python-argdeco | argdeco/config.py | https://github.com/klorenz/python-argdeco/blob/8d01acef8c19d6883873689d017b14857876412d/argdeco/config.py#L134-L163 | def update(self, E=None, **F):
'''flatten nested dictionaries to update pathwise
>>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}})
{'foo': {'bar': 'glork', 'blub': 'bla'}
In contrast to:
>>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}})
{'foo: {'blub': 'bla'}'}
'''
def _update(D):
for k,v in D.items():
if super(ConfigDict, self).__contains__(k):
if isinstance(self[k], ConfigDict):
self[k].update(v)
else:
self[k] = self.assimilate(v)
else:
self[k] = self.assimilate(v)
if E is not None:
if not hasattr(E, 'keys'):
E = self.assimilate(dict(E))
_update(E)
_update(F)
return self | [
"def",
"update",
"(",
"self",
",",
"E",
"=",
"None",
",",
"*",
"*",
"F",
")",
":",
"def",
"_update",
"(",
"D",
")",
":",
"for",
"k",
",",
"v",
"in",
"D",
".",
"items",
"(",
")",
":",
"if",
"super",
"(",
"ConfigDict",
",",
"self",
")",
".",
"__contains__",
"(",
"k",
")",
":",
"if",
"isinstance",
"(",
"self",
"[",
"k",
"]",
",",
"ConfigDict",
")",
":",
"self",
"[",
"k",
"]",
".",
"update",
"(",
"v",
")",
"else",
":",
"self",
"[",
"k",
"]",
"=",
"self",
".",
"assimilate",
"(",
"v",
")",
"else",
":",
"self",
"[",
"k",
"]",
"=",
"self",
".",
"assimilate",
"(",
"v",
")",
"if",
"E",
"is",
"not",
"None",
":",
"if",
"not",
"hasattr",
"(",
"E",
",",
"'keys'",
")",
":",
"E",
"=",
"self",
".",
"assimilate",
"(",
"dict",
"(",
"E",
")",
")",
"_update",
"(",
"E",
")",
"_update",
"(",
"F",
")",
"return",
"self"
] | flatten nested dictionaries to update pathwise
>>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}})
{'foo': {'bar': 'glork', 'blub': 'bla'}
In contrast to:
>>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}})
{'foo: {'blub': 'bla'}'} | [
"flatten",
"nested",
"dictionaries",
"to",
"update",
"pathwise"
] | python | train |
willkg/markus | markus/backends/logging.py | https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L219-L224 | def gauge(self, stat, value, tags=None):
"""Set a gauge."""
self.rollup()
# FIXME(willkg): what to do with tags?
self.gauge_stats.setdefault(stat, []).append(value) | [
"def",
"gauge",
"(",
"self",
",",
"stat",
",",
"value",
",",
"tags",
"=",
"None",
")",
":",
"self",
".",
"rollup",
"(",
")",
"# FIXME(willkg): what to do with tags?",
"self",
".",
"gauge_stats",
".",
"setdefault",
"(",
"stat",
",",
"[",
"]",
")",
".",
"append",
"(",
"value",
")"
] | Set a gauge. | [
"Set",
"a",
"gauge",
"."
] | python | test |
azraq27/neural | neural/stats.py | https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/stats.py#L62-L66 | def mask_average(dset,mask):
'''Returns average of voxels in ``dset`` within non-zero voxels of ``mask``'''
o = nl.run(['3dmaskave','-q','-mask',mask,dset])
if o:
return float(o.output.split()[-1]) | [
"def",
"mask_average",
"(",
"dset",
",",
"mask",
")",
":",
"o",
"=",
"nl",
".",
"run",
"(",
"[",
"'3dmaskave'",
",",
"'-q'",
",",
"'-mask'",
",",
"mask",
",",
"dset",
"]",
")",
"if",
"o",
":",
"return",
"float",
"(",
"o",
".",
"output",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
")"
] | Returns average of voxels in ``dset`` within non-zero voxels of ``mask`` | [
"Returns",
"average",
"of",
"voxels",
"in",
"dset",
"within",
"non",
"-",
"zero",
"voxels",
"of",
"mask"
] | python | train |
maxpumperla/elephas | elephas/utils/sockets.py | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/sockets.py#L58-L71 | def send(socket, data, num_bytes=20):
"""Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
"""
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data) | [
"def",
"send",
"(",
"socket",
",",
"data",
",",
"num_bytes",
"=",
"20",
")",
":",
"pickled_data",
"=",
"pickle",
".",
"dumps",
"(",
"data",
",",
"-",
"1",
")",
"length",
"=",
"str",
"(",
"len",
"(",
"pickled_data",
")",
")",
".",
"zfill",
"(",
"num_bytes",
")",
"socket",
".",
"sendall",
"(",
"length",
".",
"encode",
"(",
")",
")",
"socket",
".",
"sendall",
"(",
"pickled_data",
")"
] | Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data | [
"Send",
"data",
"to",
"specified",
"socket",
"."
] | python | train |
QualiSystems/vCenterShell | package/cloudshell/cp/vcenter/network/vnic/vnic_service.py | https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/network/vnic/vnic_service.py#L270-L278 | def map_vnics(vm):
"""
maps the vnic on the vm by name
:param vm: virtual machine
:return: dictionary: {'vnic_name': vnic}
"""
return {device.deviceInfo.label: device
for device in vm.config.hardware.device
if isinstance(device, vim.vm.device.VirtualEthernetCard)} | [
"def",
"map_vnics",
"(",
"vm",
")",
":",
"return",
"{",
"device",
".",
"deviceInfo",
".",
"label",
":",
"device",
"for",
"device",
"in",
"vm",
".",
"config",
".",
"hardware",
".",
"device",
"if",
"isinstance",
"(",
"device",
",",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualEthernetCard",
")",
"}"
] | maps the vnic on the vm by name
:param vm: virtual machine
:return: dictionary: {'vnic_name': vnic} | [
"maps",
"the",
"vnic",
"on",
"the",
"vm",
"by",
"name",
":",
"param",
"vm",
":",
"virtual",
"machine",
":",
"return",
":",
"dictionary",
":",
"{",
"vnic_name",
":",
"vnic",
"}"
] | python | train |
Dallinger/Dallinger | dallinger/models.py | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L1022-L1038 | def transformations(self, type=None, failed=False):
"""
Get Transformations done by this Node.
type must be a type of Transformation (defaults to Transformation)
Failed can be True, False or "all"
"""
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed".format(failed))
if type is None:
type = Transformation
if failed == "all":
return type.query.filter_by(node_id=self.id).all()
else:
return type.query.filter_by(node_id=self.id, failed=failed).all() | [
"def",
"transformations",
"(",
"self",
",",
"type",
"=",
"None",
",",
"failed",
"=",
"False",
")",
":",
"if",
"failed",
"not",
"in",
"[",
"\"all\"",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid transmission failed\"",
".",
"format",
"(",
"failed",
")",
")",
"if",
"type",
"is",
"None",
":",
"type",
"=",
"Transformation",
"if",
"failed",
"==",
"\"all\"",
":",
"return",
"type",
".",
"query",
".",
"filter_by",
"(",
"node_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"type",
".",
"query",
".",
"filter_by",
"(",
"node_id",
"=",
"self",
".",
"id",
",",
"failed",
"=",
"failed",
")",
".",
"all",
"(",
")"
] | Get Transformations done by this Node.
type must be a type of Transformation (defaults to Transformation)
Failed can be True, False or "all" | [
"Get",
"Transformations",
"done",
"by",
"this",
"Node",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10541-L10560 | def camera_feedback_send(self, time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags, force_mavlink1=False):
'''
Camera Capture Feedback
time_usec : Image timestamp (microseconds since UNIX epoch), as passed in by CAMERA_STATUS message (or autopilot if no CCB) (uint64_t)
target_system : System ID (uint8_t)
cam_idx : Camera ID (uint8_t)
img_idx : Image index (uint16_t)
lat : Latitude in (deg * 1E7) (int32_t)
lng : Longitude in (deg * 1E7) (int32_t)
alt_msl : Altitude Absolute (meters AMSL) (float)
alt_rel : Altitude Relative (meters above HOME location) (float)
roll : Camera Roll angle (earth frame, degrees, +-180) (float)
pitch : Camera Pitch angle (earth frame, degrees, +-180) (float)
yaw : Camera Yaw (earth frame, degrees, 0-360, true) (float)
foc_len : Focal Length (mm) (float)
flags : See CAMERA_FEEDBACK_FLAGS enum for definition of the bitmask (uint8_t)
'''
return self.send(self.camera_feedback_encode(time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags), force_mavlink1=force_mavlink1) | [
"def",
"camera_feedback_send",
"(",
"self",
",",
"time_usec",
",",
"target_system",
",",
"cam_idx",
",",
"img_idx",
",",
"lat",
",",
"lng",
",",
"alt_msl",
",",
"alt_rel",
",",
"roll",
",",
"pitch",
",",
"yaw",
",",
"foc_len",
",",
"flags",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"camera_feedback_encode",
"(",
"time_usec",
",",
"target_system",
",",
"cam_idx",
",",
"img_idx",
",",
"lat",
",",
"lng",
",",
"alt_msl",
",",
"alt_rel",
",",
"roll",
",",
"pitch",
",",
"yaw",
",",
"foc_len",
",",
"flags",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | Camera Capture Feedback
time_usec : Image timestamp (microseconds since UNIX epoch), as passed in by CAMERA_STATUS message (or autopilot if no CCB) (uint64_t)
target_system : System ID (uint8_t)
cam_idx : Camera ID (uint8_t)
img_idx : Image index (uint16_t)
lat : Latitude in (deg * 1E7) (int32_t)
lng : Longitude in (deg * 1E7) (int32_t)
alt_msl : Altitude Absolute (meters AMSL) (float)
alt_rel : Altitude Relative (meters above HOME location) (float)
roll : Camera Roll angle (earth frame, degrees, +-180) (float)
pitch : Camera Pitch angle (earth frame, degrees, +-180) (float)
yaw : Camera Yaw (earth frame, degrees, 0-360, true) (float)
foc_len : Focal Length (mm) (float)
flags : See CAMERA_FEEDBACK_FLAGS enum for definition of the bitmask (uint8_t) | [
"Camera",
"Capture",
"Feedback"
] | python | train |
saltstack/salt | salt/fileserver/svnfs.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L591-L635 | def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices.
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
for repo in init():
env_root = _env_root(repo, tgt_env)
if env_root is None:
# Environment not found, try the next repo
continue
if repo['mountpoint'] \
and not path.startswith(repo['mountpoint'] + os.path.sep):
continue
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
if repo['root']:
repo_path = os.path.join(repo['root'], repo_path)
full = os.path.join(env_root, repo_path)
if os.path.isfile(full):
fnd['rel'] = path
fnd['path'] = full
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
# 0 => st_mode=33188
# 1 => st_ino=10227377
# 2 => st_dev=65026
# 3 => st_nlink=1
# 4 => st_uid=1000
# 5 => st_gid=1000
# 6 => st_size=1056233
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
fnd['stat'] = list(os.stat(full))
except Exception:
pass
return fnd
return fnd | [
"def",
"find_file",
"(",
"path",
",",
"tgt_env",
"=",
"'base'",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0613",
"fnd",
"=",
"{",
"'path'",
":",
"''",
",",
"'rel'",
":",
"''",
"}",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
"or",
"tgt_env",
"not",
"in",
"envs",
"(",
")",
":",
"return",
"fnd",
"for",
"repo",
"in",
"init",
"(",
")",
":",
"env_root",
"=",
"_env_root",
"(",
"repo",
",",
"tgt_env",
")",
"if",
"env_root",
"is",
"None",
":",
"# Environment not found, try the next repo",
"continue",
"if",
"repo",
"[",
"'mountpoint'",
"]",
"and",
"not",
"path",
".",
"startswith",
"(",
"repo",
"[",
"'mountpoint'",
"]",
"+",
"os",
".",
"path",
".",
"sep",
")",
":",
"continue",
"repo_path",
"=",
"path",
"[",
"len",
"(",
"repo",
"[",
"'mountpoint'",
"]",
")",
":",
"]",
".",
"lstrip",
"(",
"os",
".",
"path",
".",
"sep",
")",
"if",
"repo",
"[",
"'root'",
"]",
":",
"repo_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"repo",
"[",
"'root'",
"]",
",",
"repo_path",
")",
"full",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env_root",
",",
"repo_path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full",
")",
":",
"fnd",
"[",
"'rel'",
"]",
"=",
"path",
"fnd",
"[",
"'path'",
"]",
"=",
"full",
"try",
":",
"# Converting the stat result to a list, the elements of the",
"# list correspond to the following stat_result params:",
"# 0 => st_mode=33188",
"# 1 => st_ino=10227377",
"# 2 => st_dev=65026",
"# 3 => st_nlink=1",
"# 4 => st_uid=1000",
"# 5 => st_gid=1000",
"# 6 => st_size=1056233",
"# 7 => st_atime=1468284229",
"# 8 => st_mtime=1456338235",
"# 9 => st_ctime=1456338235",
"fnd",
"[",
"'stat'",
"]",
"=",
"list",
"(",
"os",
".",
"stat",
"(",
"full",
")",
")",
"except",
"Exception",
":",
"pass",
"return",
"fnd",
"return",
"fnd"
] | Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices. | [
"Find",
"the",
"first",
"file",
"to",
"match",
"the",
"path",
"and",
"ref",
".",
"This",
"operates",
"similarly",
"to",
"the",
"roots",
"file",
"sever",
"but",
"with",
"assumptions",
"of",
"the",
"directory",
"structure",
"based",
"on",
"svn",
"standard",
"practices",
"."
] | python | train |
linnarsson-lab/loompy | loompy/loompy.py | https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L708-L762 | def batch_scan_layers(self, cells: np.ndarray = None, genes: np.ndarray = None, axis: int = 0, batch_size: int = 1000, layers: Iterable = None) -> Iterable[Tuple[int, np.ndarray, Dict]]:
"""
**DEPRECATED** - Use `scan` instead
"""
deprecated("'batch_scan_layers' is deprecated. Use 'scan' instead")
if cells is None:
cells = np.fromiter(range(self.shape[1]), dtype='int')
if genes is None:
genes = np.fromiter(range(self.shape[0]), dtype='int')
if layers is None:
layers = self.layers.keys()
if axis == 1:
cols_per_chunk = batch_size
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
selection = cells - ix
# Pick out the cells that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < cols_per_chunk))[0]]
if selection.shape[0] == 0:
ix += cols_per_chunk
continue
# Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][:, ix:ix + cols_per_chunk]
vals[key] = vals[key][genes, :]
vals[key] = vals[key][:, selection]
yield (ix, ix + selection, vals)
ix += cols_per_chunk
if axis == 0:
rows_per_chunk = batch_size
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
selection = genes - ix
# Pick out the genes that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < rows_per_chunk))[0]]
if selection.shape[0] == 0:
ix += rows_per_chunk
continue
# Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][ix:ix + rows_per_chunk, :]
vals[key] = vals[key][selection, :]
vals[key] = vals[key][:, cells]
yield (ix, ix + selection, vals)
ix += rows_per_chunk | [
"def",
"batch_scan_layers",
"(",
"self",
",",
"cells",
":",
"np",
".",
"ndarray",
"=",
"None",
",",
"genes",
":",
"np",
".",
"ndarray",
"=",
"None",
",",
"axis",
":",
"int",
"=",
"0",
",",
"batch_size",
":",
"int",
"=",
"1000",
",",
"layers",
":",
"Iterable",
"=",
"None",
")",
"->",
"Iterable",
"[",
"Tuple",
"[",
"int",
",",
"np",
".",
"ndarray",
",",
"Dict",
"]",
"]",
":",
"deprecated",
"(",
"\"'batch_scan_layers' is deprecated. Use 'scan' instead\"",
")",
"if",
"cells",
"is",
"None",
":",
"cells",
"=",
"np",
".",
"fromiter",
"(",
"range",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"'int'",
")",
"if",
"genes",
"is",
"None",
":",
"genes",
"=",
"np",
".",
"fromiter",
"(",
"range",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"'int'",
")",
"if",
"layers",
"is",
"None",
":",
"layers",
"=",
"self",
".",
"layers",
".",
"keys",
"(",
")",
"if",
"axis",
"==",
"1",
":",
"cols_per_chunk",
"=",
"batch_size",
"ix",
"=",
"0",
"while",
"ix",
"<",
"self",
".",
"shape",
"[",
"1",
"]",
":",
"cols_per_chunk",
"=",
"min",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
"-",
"ix",
",",
"cols_per_chunk",
")",
"selection",
"=",
"cells",
"-",
"ix",
"# Pick out the cells that are in this batch",
"selection",
"=",
"selection",
"[",
"np",
".",
"where",
"(",
"np",
".",
"logical_and",
"(",
"selection",
">=",
"0",
",",
"selection",
"<",
"cols_per_chunk",
")",
")",
"[",
"0",
"]",
"]",
"if",
"selection",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"ix",
"+=",
"cols_per_chunk",
"continue",
"# Load the whole chunk from the file, then extract genes and cells using fancy indexing",
"vals",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"layers",
":",
"vals",
"[",
"key",
"]",
"=",
"self",
".",
"layers",
"[",
"key",
"]",
"[",
":",
",",
"ix",
":",
"ix",
"+",
"cols_per_chunk",
"]",
"vals",
"[",
"key",
"]",
"=",
"vals",
"[",
"key",
"]",
"[",
"genes",
",",
":",
"]",
"vals",
"[",
"key",
"]",
"=",
"vals",
"[",
"key",
"]",
"[",
":",
",",
"selection",
"]",
"yield",
"(",
"ix",
",",
"ix",
"+",
"selection",
",",
"vals",
")",
"ix",
"+=",
"cols_per_chunk",
"if",
"axis",
"==",
"0",
":",
"rows_per_chunk",
"=",
"batch_size",
"ix",
"=",
"0",
"while",
"ix",
"<",
"self",
".",
"shape",
"[",
"0",
"]",
":",
"rows_per_chunk",
"=",
"min",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
"-",
"ix",
",",
"rows_per_chunk",
")",
"selection",
"=",
"genes",
"-",
"ix",
"# Pick out the genes that are in this batch",
"selection",
"=",
"selection",
"[",
"np",
".",
"where",
"(",
"np",
".",
"logical_and",
"(",
"selection",
">=",
"0",
",",
"selection",
"<",
"rows_per_chunk",
")",
")",
"[",
"0",
"]",
"]",
"if",
"selection",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"ix",
"+=",
"rows_per_chunk",
"continue",
"# Load the whole chunk from the file, then extract genes and cells using fancy indexing",
"vals",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"layers",
":",
"vals",
"[",
"key",
"]",
"=",
"self",
".",
"layers",
"[",
"key",
"]",
"[",
"ix",
":",
"ix",
"+",
"rows_per_chunk",
",",
":",
"]",
"vals",
"[",
"key",
"]",
"=",
"vals",
"[",
"key",
"]",
"[",
"selection",
",",
":",
"]",
"vals",
"[",
"key",
"]",
"=",
"vals",
"[",
"key",
"]",
"[",
":",
",",
"cells",
"]",
"yield",
"(",
"ix",
",",
"ix",
"+",
"selection",
",",
"vals",
")",
"ix",
"+=",
"rows_per_chunk"
] | **DEPRECATED** - Use `scan` instead | [
"**",
"DEPRECATED",
"**",
"-",
"Use",
"scan",
"instead"
] | python | train |
peri-source/peri | peri/opt/optimize.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L735-L777 | def _run1(self):
"""workhorse for do_run_1"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#1. Assuming that J starts updated:
delta_vals = self.find_LM_updates(self.calc_grad())
#2. Increase damping until we get a good step:
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if not good_step:
er0 = self.update_function(self.param_vals)
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
CLOG.debug('Bad step, increasing damping')
CLOG.debug('\t\t%f\t%f' % (self.error, er1))
grad = self.calc_grad()
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if good_step:
break
else:
er0 = self.update_function(self.param_vals)
CLOG.warn('Stuck!')
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
#state is updated, now params:
if good_step:
self._last_error = self.error
self.error = er1
CLOG.debug('Good step\t%f\t%f' % (self._last_error, self.error))
self.update_param_vals(delta_vals, incremental=True)
self.decrease_damping() | [
"def",
"_run1",
"(",
"self",
")",
":",
"if",
"self",
".",
"check_update_J",
"(",
")",
":",
"self",
".",
"update_J",
"(",
")",
"else",
":",
"if",
"self",
".",
"check_Broyden_J",
"(",
")",
":",
"self",
".",
"update_Broyden_J",
"(",
")",
"if",
"self",
".",
"check_update_eig_J",
"(",
")",
":",
"self",
".",
"update_eig_J",
"(",
")",
"#1. Assuming that J starts updated:",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"self",
".",
"calc_grad",
"(",
")",
")",
"#2. Increase damping until we get a good step:",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"(",
"find_best_step",
"(",
"[",
"self",
".",
"error",
",",
"er1",
"]",
")",
"==",
"1",
")",
"if",
"not",
"good_step",
":",
"er0",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"if",
"np",
".",
"abs",
"(",
"er0",
"-",
"self",
".",
"error",
")",
"/",
"er0",
">",
"1e-7",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"CLOG",
".",
"debug",
"(",
"'Bad step, increasing damping'",
")",
"CLOG",
".",
"debug",
"(",
"'\\t\\t%f\\t%f'",
"%",
"(",
"self",
".",
"error",
",",
"er1",
")",
")",
"grad",
"=",
"self",
".",
"calc_grad",
"(",
")",
"for",
"_try",
"in",
"range",
"(",
"self",
".",
"_max_inner_loop",
")",
":",
"self",
".",
"increase_damping",
"(",
")",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"grad",
")",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"(",
"find_best_step",
"(",
"[",
"self",
".",
"error",
",",
"er1",
"]",
")",
"==",
"1",
")",
"if",
"good_step",
":",
"break",
"else",
":",
"er0",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"CLOG",
".",
"warn",
"(",
"'Stuck!'",
")",
"if",
"np",
".",
"abs",
"(",
"er0",
"-",
"self",
".",
"error",
")",
"/",
"er0",
">",
"1e-7",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"#state is updated, now params:",
"if",
"good_step",
":",
"self",
".",
"_last_error",
"=",
"self",
".",
"error",
"self",
".",
"error",
"=",
"er1",
"CLOG",
".",
"debug",
"(",
"'Good step\\t%f\\t%f'",
"%",
"(",
"self",
".",
"_last_error",
",",
"self",
".",
"error",
")",
")",
"self",
".",
"update_param_vals",
"(",
"delta_vals",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"decrease_damping",
"(",
")"
] | workhorse for do_run_1 | [
"workhorse",
"for",
"do_run_1"
] | python | valid |
jobovy/galpy | galpy/potential/Potential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L2678-L2681 | def _rlfunc(rl,lz,pot):
"""Function that gives rvc-lz"""
thisvcirc= vcirc(pot,rl,use_physical=False)
return rl*thisvcirc-lz | [
"def",
"_rlfunc",
"(",
"rl",
",",
"lz",
",",
"pot",
")",
":",
"thisvcirc",
"=",
"vcirc",
"(",
"pot",
",",
"rl",
",",
"use_physical",
"=",
"False",
")",
"return",
"rl",
"*",
"thisvcirc",
"-",
"lz"
] | Function that gives rvc-lz | [
"Function",
"that",
"gives",
"rvc",
"-",
"lz"
] | python | train |
SpriteLink/NIPAP | pynipap/pynipap.py | https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/pynipap/pynipap.py#L1145-L1256 | def save(self, args=None):
""" Save prefix to NIPAP.
If the object represents a new prefix unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_prefix` in the backend, used to
create a new prefix. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_prefix` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values.
"""
if args is None:
args = {}
xmlrpc = XMLRPCConnection()
data = {
'description': self.description,
'comment': self.comment,
'tags': [],
'node': self.node,
'type': self.type,
'country': self.country,
'order_id': self.order_id,
'customer_id': self.customer_id,
'external_key': self.external_key,
'alarm_priority': self.alarm_priority,
'monitor': self.monitor,
'vlan': self.vlan,
'avps': self.avps,
'expires': self.expires
}
if self.status is not None:
data['status'] = self.status
for tag_name in self.tags:
data['tags'].append(tag_name)
if self.vrf is not None:
if not isinstance(self.vrf, VRF):
raise NipapValueError("'vrf' attribute not instance of VRF class.")
data['vrf_id'] = self.vrf.id
# Prefix can be none if we are creating a new prefix
# from a pool or other prefix!
if self.prefix is not None:
data['prefix'] = self.prefix
if self.pool is None:
data['pool_id'] = None
else:
if not isinstance(self.pool, Pool):
raise NipapValueError("'pool' attribute not instance of Pool class.")
data['pool_id'] = self.pool.id
# New object, create from scratch
if self.id is None:
# format args
x_args = {}
if 'from-pool' in args:
x_args['from-pool'] = { 'id': args['from-pool'].id }
if 'family' in args:
x_args['family'] = args['family']
if 'from-prefix' in args:
x_args['from-prefix'] = args['from-prefix']
if 'prefix_length' in args:
x_args['prefix_length'] = args['prefix_length']
try:
prefix = xmlrpc.connection.add_prefix(
{
'attr': data,
'args': x_args,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
# Old object, edit
else:
# Add authoritative source to data
data['authoritative_source'] = self.authoritative_source
try:
# save
prefixes = xmlrpc.connection.edit_prefix(
{
'prefix': { 'id': self.id },
'attr': data,
'auth': self._auth_opts.options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
if len(prefixes) != 1:
raise NipapError('Prefix edit returned %d entries, should be 1.' % len(prefixes))
prefix = prefixes[0]
# Refresh object data with attributes from add/edit operation
Prefix.from_dict(prefix, self)
# update cache
_cache['Prefix'][self.id] = self
if self.pool is not None:
if self.pool.id in _cache['Pool']:
del _cache['Pool'][self.pool.id] | [
"def",
"save",
"(",
"self",
",",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"{",
"}",
"xmlrpc",
"=",
"XMLRPCConnection",
"(",
")",
"data",
"=",
"{",
"'description'",
":",
"self",
".",
"description",
",",
"'comment'",
":",
"self",
".",
"comment",
",",
"'tags'",
":",
"[",
"]",
",",
"'node'",
":",
"self",
".",
"node",
",",
"'type'",
":",
"self",
".",
"type",
",",
"'country'",
":",
"self",
".",
"country",
",",
"'order_id'",
":",
"self",
".",
"order_id",
",",
"'customer_id'",
":",
"self",
".",
"customer_id",
",",
"'external_key'",
":",
"self",
".",
"external_key",
",",
"'alarm_priority'",
":",
"self",
".",
"alarm_priority",
",",
"'monitor'",
":",
"self",
".",
"monitor",
",",
"'vlan'",
":",
"self",
".",
"vlan",
",",
"'avps'",
":",
"self",
".",
"avps",
",",
"'expires'",
":",
"self",
".",
"expires",
"}",
"if",
"self",
".",
"status",
"is",
"not",
"None",
":",
"data",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
"for",
"tag_name",
"in",
"self",
".",
"tags",
":",
"data",
"[",
"'tags'",
"]",
".",
"append",
"(",
"tag_name",
")",
"if",
"self",
".",
"vrf",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"vrf",
",",
"VRF",
")",
":",
"raise",
"NipapValueError",
"(",
"\"'vrf' attribute not instance of VRF class.\"",
")",
"data",
"[",
"'vrf_id'",
"]",
"=",
"self",
".",
"vrf",
".",
"id",
"# Prefix can be none if we are creating a new prefix",
"# from a pool or other prefix!",
"if",
"self",
".",
"prefix",
"is",
"not",
"None",
":",
"data",
"[",
"'prefix'",
"]",
"=",
"self",
".",
"prefix",
"if",
"self",
".",
"pool",
"is",
"None",
":",
"data",
"[",
"'pool_id'",
"]",
"=",
"None",
"else",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"pool",
",",
"Pool",
")",
":",
"raise",
"NipapValueError",
"(",
"\"'pool' attribute not instance of Pool class.\"",
")",
"data",
"[",
"'pool_id'",
"]",
"=",
"self",
".",
"pool",
".",
"id",
"# New object, create from scratch",
"if",
"self",
".",
"id",
"is",
"None",
":",
"# format args",
"x_args",
"=",
"{",
"}",
"if",
"'from-pool'",
"in",
"args",
":",
"x_args",
"[",
"'from-pool'",
"]",
"=",
"{",
"'id'",
":",
"args",
"[",
"'from-pool'",
"]",
".",
"id",
"}",
"if",
"'family'",
"in",
"args",
":",
"x_args",
"[",
"'family'",
"]",
"=",
"args",
"[",
"'family'",
"]",
"if",
"'from-prefix'",
"in",
"args",
":",
"x_args",
"[",
"'from-prefix'",
"]",
"=",
"args",
"[",
"'from-prefix'",
"]",
"if",
"'prefix_length'",
"in",
"args",
":",
"x_args",
"[",
"'prefix_length'",
"]",
"=",
"args",
"[",
"'prefix_length'",
"]",
"try",
":",
"prefix",
"=",
"xmlrpc",
".",
"connection",
".",
"add_prefix",
"(",
"{",
"'attr'",
":",
"data",
",",
"'args'",
":",
"x_args",
",",
"'auth'",
":",
"self",
".",
"_auth_opts",
".",
"options",
"}",
")",
"except",
"xmlrpclib",
".",
"Fault",
"as",
"xml_fault",
":",
"raise",
"_fault_to_exception",
"(",
"xml_fault",
")",
"# Old object, edit",
"else",
":",
"# Add authoritative source to data",
"data",
"[",
"'authoritative_source'",
"]",
"=",
"self",
".",
"authoritative_source",
"try",
":",
"# save",
"prefixes",
"=",
"xmlrpc",
".",
"connection",
".",
"edit_prefix",
"(",
"{",
"'prefix'",
":",
"{",
"'id'",
":",
"self",
".",
"id",
"}",
",",
"'attr'",
":",
"data",
",",
"'auth'",
":",
"self",
".",
"_auth_opts",
".",
"options",
"}",
")",
"except",
"xmlrpclib",
".",
"Fault",
"as",
"xml_fault",
":",
"raise",
"_fault_to_exception",
"(",
"xml_fault",
")",
"if",
"len",
"(",
"prefixes",
")",
"!=",
"1",
":",
"raise",
"NipapError",
"(",
"'Prefix edit returned %d entries, should be 1.'",
"%",
"len",
"(",
"prefixes",
")",
")",
"prefix",
"=",
"prefixes",
"[",
"0",
"]",
"# Refresh object data with attributes from add/edit operation",
"Prefix",
".",
"from_dict",
"(",
"prefix",
",",
"self",
")",
"# update cache",
"_cache",
"[",
"'Prefix'",
"]",
"[",
"self",
".",
"id",
"]",
"=",
"self",
"if",
"self",
".",
"pool",
"is",
"not",
"None",
":",
"if",
"self",
".",
"pool",
".",
"id",
"in",
"_cache",
"[",
"'Pool'",
"]",
":",
"del",
"_cache",
"[",
"'Pool'",
"]",
"[",
"self",
".",
"pool",
".",
"id",
"]"
] | Save prefix to NIPAP.
If the object represents a new prefix unknown to NIPAP (attribute
`id` is `None`) this function maps to the function
:py:func:`nipap.backend.Nipap.add_prefix` in the backend, used to
create a new prefix. Otherwise it maps to the function
:py:func:`nipap.backend.Nipap.edit_prefix` in the backend, used to
modify the VRF. Please see the documentation for the backend
functions for information regarding input arguments and return
values. | [
"Save",
"prefix",
"to",
"NIPAP",
"."
] | python | train |
codelv/enaml-native | src/enamlnative/android/android_list_view.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_list_view.py#L201-L208 | def get_declared_items(self):
""" Override to do it manually
"""
for k, v in super(AndroidListView, self).get_declared_items():
if k == 'layout':
yield k, v
break | [
"def",
"get_declared_items",
"(",
"self",
")",
":",
"for",
"k",
",",
"v",
"in",
"super",
"(",
"AndroidListView",
",",
"self",
")",
".",
"get_declared_items",
"(",
")",
":",
"if",
"k",
"==",
"'layout'",
":",
"yield",
"k",
",",
"v",
"break"
] | Override to do it manually | [
"Override",
"to",
"do",
"it",
"manually"
] | python | train |
saltstack/salt | salt/fileclient.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L960-L989 | def hash_and_stat_file(self, path, saltenv='base'):
'''
Return the hash of a file, to get the hash of a file in the pillar_roots
prepend the path with salt://<file on server> otherwise, prepend the
file with / for a local file.
Additionally, return the stat result of the file, or None if no stat
results were found.
'''
ret = {}
fnd = self.__get_file_path(path, saltenv)
if fnd is None:
return ret, None
try:
# Remote file path (self._find_file() invoked)
fnd_path = fnd['path']
fnd_stat = fnd.get('stat')
except TypeError:
# Local file path
fnd_path = fnd
try:
fnd_stat = list(os.stat(fnd_path))
except Exception:
fnd_stat = None
hash_type = self.opts.get('hash_type', 'md5')
ret['hsum'] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
ret['hash_type'] = hash_type
return ret, fnd_stat | [
"def",
"hash_and_stat_file",
"(",
"self",
",",
"path",
",",
"saltenv",
"=",
"'base'",
")",
":",
"ret",
"=",
"{",
"}",
"fnd",
"=",
"self",
".",
"__get_file_path",
"(",
"path",
",",
"saltenv",
")",
"if",
"fnd",
"is",
"None",
":",
"return",
"ret",
",",
"None",
"try",
":",
"# Remote file path (self._find_file() invoked)",
"fnd_path",
"=",
"fnd",
"[",
"'path'",
"]",
"fnd_stat",
"=",
"fnd",
".",
"get",
"(",
"'stat'",
")",
"except",
"TypeError",
":",
"# Local file path",
"fnd_path",
"=",
"fnd",
"try",
":",
"fnd_stat",
"=",
"list",
"(",
"os",
".",
"stat",
"(",
"fnd_path",
")",
")",
"except",
"Exception",
":",
"fnd_stat",
"=",
"None",
"hash_type",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'hash_type'",
",",
"'md5'",
")",
"ret",
"[",
"'hsum'",
"]",
"=",
"salt",
".",
"utils",
".",
"hashutils",
".",
"get_hash",
"(",
"fnd_path",
",",
"form",
"=",
"hash_type",
")",
"ret",
"[",
"'hash_type'",
"]",
"=",
"hash_type",
"return",
"ret",
",",
"fnd_stat"
] | Return the hash of a file, to get the hash of a file in the pillar_roots
prepend the path with salt://<file on server> otherwise, prepend the
file with / for a local file.
Additionally, return the stat result of the file, or None if no stat
results were found. | [
"Return",
"the",
"hash",
"of",
"a",
"file",
"to",
"get",
"the",
"hash",
"of",
"a",
"file",
"in",
"the",
"pillar_roots",
"prepend",
"the",
"path",
"with",
"salt",
":",
"//",
"<file",
"on",
"server",
">",
"otherwise",
"prepend",
"the",
"file",
"with",
"/",
"for",
"a",
"local",
"file",
"."
] | python | train |
aws/sagemaker-python-sdk | src/sagemaker/local/utils.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/local/utils.py#L22-L40 | def copy_directory_structure(destination_directory, relative_path):
"""Create all the intermediate directories required for relative_path to exist within destination_directory.
This assumes that relative_path is a directory located within root_dir.
Examples:
destination_directory: /tmp/destination
relative_path: test/unit/
will create: /tmp/destination/test/unit
Args:
destination_directory (str): root of the destination directory where the directory structure will be created.
relative_path (str): relative path that will be created within destination_directory
"""
full_path = os.path.join(destination_directory, relative_path)
if os.path.exists(full_path):
return
os.makedirs(destination_directory, relative_path) | [
"def",
"copy_directory_structure",
"(",
"destination_directory",
",",
"relative_path",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"relative_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"full_path",
")",
":",
"return",
"os",
".",
"makedirs",
"(",
"destination_directory",
",",
"relative_path",
")"
] | Create all the intermediate directories required for relative_path to exist within destination_directory.
This assumes that relative_path is a directory located within root_dir.
Examples:
destination_directory: /tmp/destination
relative_path: test/unit/
will create: /tmp/destination/test/unit
Args:
destination_directory (str): root of the destination directory where the directory structure will be created.
relative_path (str): relative path that will be created within destination_directory | [
"Create",
"all",
"the",
"intermediate",
"directories",
"required",
"for",
"relative_path",
"to",
"exist",
"within",
"destination_directory",
".",
"This",
"assumes",
"that",
"relative_path",
"is",
"a",
"directory",
"located",
"within",
"root_dir",
"."
] | python | train |
biolink/ontobio | ontobio/config.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/config.py#L195-L212 | def get_config():
"""
Return configuration for current session.
When called for the first time, this will create a config object, using
whatever is the default load path to find the config yaml
"""
if session.config is None:
path = session.default_config_path
if os.path.isfile(path):
logging.info("LOADING FROM: {}".format(path))
session.config = load_config(path)
else:
session.config = Config()
logging.info("using default session: {}, path does not exist: {}".format(session, path))
else:
logging.info("Using pre-loaded object: {}".format(session.config))
return session.config | [
"def",
"get_config",
"(",
")",
":",
"if",
"session",
".",
"config",
"is",
"None",
":",
"path",
"=",
"session",
".",
"default_config_path",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"logging",
".",
"info",
"(",
"\"LOADING FROM: {}\"",
".",
"format",
"(",
"path",
")",
")",
"session",
".",
"config",
"=",
"load_config",
"(",
"path",
")",
"else",
":",
"session",
".",
"config",
"=",
"Config",
"(",
")",
"logging",
".",
"info",
"(",
"\"using default session: {}, path does not exist: {}\"",
".",
"format",
"(",
"session",
",",
"path",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"Using pre-loaded object: {}\"",
".",
"format",
"(",
"session",
".",
"config",
")",
")",
"return",
"session",
".",
"config"
] | Return configuration for current session.
When called for the first time, this will create a config object, using
whatever is the default load path to find the config yaml | [
"Return",
"configuration",
"for",
"current",
"session",
"."
] | python | train |
TheRealLink/pylgtv | pylgtv/webos_client.py | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L283-L286 | def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices') | [
"def",
"get_inputs",
"(",
"self",
")",
":",
"self",
".",
"request",
"(",
"EP_GET_INPUTS",
")",
"return",
"{",
"}",
"if",
"self",
".",
"last_response",
"is",
"None",
"else",
"self",
".",
"last_response",
".",
"get",
"(",
"'payload'",
")",
".",
"get",
"(",
"'devices'",
")"
] | Get all inputs. | [
"Get",
"all",
"inputs",
"."
] | python | train |
swisscom/cleanerversion | versions/fields.py | https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/fields.py#L345-L368 | def _set_child_joined_alias_using_join_map(child, join_map, alias_map):
"""
Set the joined alias on the child, for Django <= 1.7.x.
:param child:
:param join_map:
:param alias_map:
"""
for lhs, table, join_cols in join_map:
if lhs is None:
continue
if lhs == child.alias:
relevant_alias = child.related_alias
elif lhs == child.related_alias:
relevant_alias = child.alias
else:
continue
join_info = alias_map[relevant_alias]
if join_info.join_type is None:
continue
if join_info.lhs_alias in [child.alias, child.related_alias]:
child.set_joined_alias(relevant_alias)
break | [
"def",
"_set_child_joined_alias_using_join_map",
"(",
"child",
",",
"join_map",
",",
"alias_map",
")",
":",
"for",
"lhs",
",",
"table",
",",
"join_cols",
"in",
"join_map",
":",
"if",
"lhs",
"is",
"None",
":",
"continue",
"if",
"lhs",
"==",
"child",
".",
"alias",
":",
"relevant_alias",
"=",
"child",
".",
"related_alias",
"elif",
"lhs",
"==",
"child",
".",
"related_alias",
":",
"relevant_alias",
"=",
"child",
".",
"alias",
"else",
":",
"continue",
"join_info",
"=",
"alias_map",
"[",
"relevant_alias",
"]",
"if",
"join_info",
".",
"join_type",
"is",
"None",
":",
"continue",
"if",
"join_info",
".",
"lhs_alias",
"in",
"[",
"child",
".",
"alias",
",",
"child",
".",
"related_alias",
"]",
":",
"child",
".",
"set_joined_alias",
"(",
"relevant_alias",
")",
"break"
] | Set the joined alias on the child, for Django <= 1.7.x.
:param child:
:param join_map:
:param alias_map: | [
"Set",
"the",
"joined",
"alias",
"on",
"the",
"child",
"for",
"Django",
"<",
"=",
"1",
".",
"7",
".",
"x",
".",
":",
"param",
"child",
":",
":",
"param",
"join_map",
":",
":",
"param",
"alias_map",
":"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/graphs/layouts/circular.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/graphs/layouts/circular.py#L16-L49 | def circular(adjacency_mat, directed=False):
"""Places all nodes on a single circle.
Parameters
----------
adjacency_mat : matrix or sparse
The graph adjacency matrix
directed : bool
Whether the graph is directed. If this is True, is will also
generate the vertices for arrows, which can be passed to an
ArrowVisual.
Yields
------
(node_vertices, line_vertices, arrow_vertices) : tuple
Yields the node and line vertices in a tuple. This layout only yields a
single time, and has no builtin animation
"""
if issparse(adjacency_mat):
adjacency_mat = adjacency_mat.tocoo()
num_nodes = adjacency_mat.shape[0]
t = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False, dtype=np.float32)
# Visual coordinate system is between 0 and 1, so generate a circle with
# radius 0.5 and center it at the point (0.5, 0.5).
node_coords = (0.5 * np.array([np.cos(t), np.sin(t)]) + 0.5).T
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
node_coords, directed)
yield node_coords, line_vertices, arrows | [
"def",
"circular",
"(",
"adjacency_mat",
",",
"directed",
"=",
"False",
")",
":",
"if",
"issparse",
"(",
"adjacency_mat",
")",
":",
"adjacency_mat",
"=",
"adjacency_mat",
".",
"tocoo",
"(",
")",
"num_nodes",
"=",
"adjacency_mat",
".",
"shape",
"[",
"0",
"]",
"t",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
",",
"num_nodes",
",",
"endpoint",
"=",
"False",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Visual coordinate system is between 0 and 1, so generate a circle with",
"# radius 0.5 and center it at the point (0.5, 0.5).",
"node_coords",
"=",
"(",
"0.5",
"*",
"np",
".",
"array",
"(",
"[",
"np",
".",
"cos",
"(",
"t",
")",
",",
"np",
".",
"sin",
"(",
"t",
")",
"]",
")",
"+",
"0.5",
")",
".",
"T",
"line_vertices",
",",
"arrows",
"=",
"_straight_line_vertices",
"(",
"adjacency_mat",
",",
"node_coords",
",",
"directed",
")",
"yield",
"node_coords",
",",
"line_vertices",
",",
"arrows"
] | Places all nodes on a single circle.
Parameters
----------
adjacency_mat : matrix or sparse
The graph adjacency matrix
directed : bool
Whether the graph is directed. If this is True, is will also
generate the vertices for arrows, which can be passed to an
ArrowVisual.
Yields
------
(node_vertices, line_vertices, arrow_vertices) : tuple
Yields the node and line vertices in a tuple. This layout only yields a
single time, and has no builtin animation | [
"Places",
"all",
"nodes",
"on",
"a",
"single",
"circle",
"."
] | python | train |
onnx/onnxmltools | onnxmltools/convert/libsvm/convert.py | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/libsvm/convert.py#L17-L48 | def convert(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
"""
:param model: a libsvm model
:param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
:param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
:param doc_string: A string attached onto the produced ONNX model
:param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
:param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
:param custom_conversion_functions: a dictionary for specifying the user customized conversion function
:param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
:return: An ONNX model (type: ModelProto) which is equivalent to the input scikit-learn model
"""
if initial_types is None:
raise ValueError('Initial types are required. See usage of convert(...) in \
onnxmltools.convert.libsvm.convert for details')
if name is None:
name = str(uuid4().hex)
# Parse scikit-learn model as our internal data structure (i.e., Topology)
topology = parse_libsvm(model, initial_types, custom_conversion_functions,
custom_shape_calculators)
# Infer variable shapes
topology.compile()
# Convert our Topology object into ONNX. The outcome is an ONNX model.
onnx_model = convert_topology(topology, name, doc_string, target_opset, targeted_onnx)
return onnx_model | [
"def",
"convert",
"(",
"model",
",",
"name",
"=",
"None",
",",
"initial_types",
"=",
"None",
",",
"doc_string",
"=",
"''",
",",
"target_opset",
"=",
"None",
",",
"targeted_onnx",
"=",
"onnx",
".",
"__version__",
",",
"custom_conversion_functions",
"=",
"None",
",",
"custom_shape_calculators",
"=",
"None",
")",
":",
"if",
"initial_types",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Initial types are required. See usage of convert(...) in \\\n onnxmltools.convert.libsvm.convert for details'",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"str",
"(",
"uuid4",
"(",
")",
".",
"hex",
")",
"# Parse scikit-learn model as our internal data structure (i.e., Topology)",
"topology",
"=",
"parse_libsvm",
"(",
"model",
",",
"initial_types",
",",
"custom_conversion_functions",
",",
"custom_shape_calculators",
")",
"# Infer variable shapes",
"topology",
".",
"compile",
"(",
")",
"# Convert our Topology object into ONNX. The outcome is an ONNX model.",
"onnx_model",
"=",
"convert_topology",
"(",
"topology",
",",
"name",
",",
"doc_string",
",",
"target_opset",
",",
"targeted_onnx",
")",
"return",
"onnx_model"
] | :param model: a libsvm model
:param initial_types: a python list. Each element is a tuple of a variable name and a type defined in data_types.py
:param name: The name of the graph (type: GraphProto) in the produced ONNX model (type: ModelProto)
:param doc_string: A string attached onto the produced ONNX model
:param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
:param targeted_onnx: A string (for example, '1.1.2' and '1.2') used to specify the targeted ONNX version of the
produced model. If ONNXMLTools cannot find a compatible ONNX python package, an error may be thrown.
:param custom_conversion_functions: a dictionary for specifying the user customized conversion function
:param custom_shape_calculators: a dictionary for specifying the user customized shape calculator
:return: An ONNX model (type: ModelProto) which is equivalent to the input scikit-learn model | [
":",
"param",
"model",
":",
"a",
"libsvm",
"model",
":",
"param",
"initial_types",
":",
"a",
"python",
"list",
".",
"Each",
"element",
"is",
"a",
"tuple",
"of",
"a",
"variable",
"name",
"and",
"a",
"type",
"defined",
"in",
"data_types",
".",
"py",
":",
"param",
"name",
":",
"The",
"name",
"of",
"the",
"graph",
"(",
"type",
":",
"GraphProto",
")",
"in",
"the",
"produced",
"ONNX",
"model",
"(",
"type",
":",
"ModelProto",
")",
":",
"param",
"doc_string",
":",
"A",
"string",
"attached",
"onto",
"the",
"produced",
"ONNX",
"model",
":",
"param",
"target_opset",
":",
"number",
"for",
"example",
"7",
"for",
"ONNX",
"1",
".",
"2",
"and",
"8",
"for",
"ONNX",
"1",
".",
"3",
".",
":",
"param",
"targeted_onnx",
":",
"A",
"string",
"(",
"for",
"example",
"1",
".",
"1",
".",
"2",
"and",
"1",
".",
"2",
")",
"used",
"to",
"specify",
"the",
"targeted",
"ONNX",
"version",
"of",
"the",
"produced",
"model",
".",
"If",
"ONNXMLTools",
"cannot",
"find",
"a",
"compatible",
"ONNX",
"python",
"package",
"an",
"error",
"may",
"be",
"thrown",
".",
":",
"param",
"custom_conversion_functions",
":",
"a",
"dictionary",
"for",
"specifying",
"the",
"user",
"customized",
"conversion",
"function",
":",
"param",
"custom_shape_calculators",
":",
"a",
"dictionary",
"for",
"specifying",
"the",
"user",
"customized",
"shape",
"calculator",
":",
"return",
":",
"An",
"ONNX",
"model",
"(",
"type",
":",
"ModelProto",
")",
"which",
"is",
"equivalent",
"to",
"the",
"input",
"scikit",
"-",
"learn",
"model"
] | python | train |
facetoe/zenpy | zenpy/lib/api.py | https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1756-L1762 | def comments(self, article):
"""
Retrieve comments for an article
:param article: Article ID or object
"""
return self._query_zendesk(self.endpoint.comments, object_type='comment', id=article) | [
"def",
"comments",
"(",
"self",
",",
"article",
")",
":",
"return",
"self",
".",
"_query_zendesk",
"(",
"self",
".",
"endpoint",
".",
"comments",
",",
"object_type",
"=",
"'comment'",
",",
"id",
"=",
"article",
")"
] | Retrieve comments for an article
:param article: Article ID or object | [
"Retrieve",
"comments",
"for",
"an",
"article"
] | python | train |
balloob/pychromecast | pychromecast/socket_client.py | https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L95-L101 | def _message_to_string(message, data=None):
""" Gives a string representation of a PB2 message. """
if data is None:
data = _json_from_message(message)
return "Message {} from {} to {}: {}".format(
message.namespace, message.source_id, message.destination_id, data) | [
"def",
"_message_to_string",
"(",
"message",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"_json_from_message",
"(",
"message",
")",
"return",
"\"Message {} from {} to {}: {}\"",
".",
"format",
"(",
"message",
".",
"namespace",
",",
"message",
".",
"source_id",
",",
"message",
".",
"destination_id",
",",
"data",
")"
] | Gives a string representation of a PB2 message. | [
"Gives",
"a",
"string",
"representation",
"of",
"a",
"PB2",
"message",
"."
] | python | train |
NoneGG/aredis | aredis/sentinel.py | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/sentinel.py#L276-L299 | def slave_for(self, service_name, redis_class=StrictRedis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave's
address before establishing a new connection.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs)) | [
"def",
"slave_for",
"(",
"self",
",",
"service_name",
",",
"redis_class",
"=",
"StrictRedis",
",",
"connection_pool_class",
"=",
"SentinelConnectionPool",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'is_master'",
"]",
"=",
"False",
"connection_kwargs",
"=",
"dict",
"(",
"self",
".",
"connection_kwargs",
")",
"connection_kwargs",
".",
"update",
"(",
"kwargs",
")",
"return",
"redis_class",
"(",
"connection_pool",
"=",
"connection_pool_class",
"(",
"service_name",
",",
"self",
",",
"*",
"*",
"connection_kwargs",
")",
")"
] | Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave's
address before establishing a new connection.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections. | [
"Returns",
"redis",
"client",
"instance",
"for",
"the",
"service_name",
"slave",
"(",
"s",
")",
"."
] | python | train |
google/grr | grr/server/grr_response_server/databases/mysql_flows.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L49-L66 | def ReadMessageHandlerRequests(self, cursor=None):
"""Reads all message handler requests from the database."""
query = ("SELECT UNIX_TIMESTAMP(timestamp), request,"
" UNIX_TIMESTAMP(leased_until), leased_by "
"FROM message_handler_requests "
"ORDER BY timestamp DESC")
cursor.execute(query)
res = []
for timestamp, request, leased_until, leased_by in cursor.fetchall():
req = rdf_objects.MessageHandlerRequest.FromSerializedString(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_by = leased_by
req.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until)
res.append(req)
return res | [
"def",
"ReadMessageHandlerRequests",
"(",
"self",
",",
"cursor",
"=",
"None",
")",
":",
"query",
"=",
"(",
"\"SELECT UNIX_TIMESTAMP(timestamp), request,\"",
"\" UNIX_TIMESTAMP(leased_until), leased_by \"",
"\"FROM message_handler_requests \"",
"\"ORDER BY timestamp DESC\"",
")",
"cursor",
".",
"execute",
"(",
"query",
")",
"res",
"=",
"[",
"]",
"for",
"timestamp",
",",
"request",
",",
"leased_until",
",",
"leased_by",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"req",
"=",
"rdf_objects",
".",
"MessageHandlerRequest",
".",
"FromSerializedString",
"(",
"request",
")",
"req",
".",
"timestamp",
"=",
"mysql_utils",
".",
"TimestampToRDFDatetime",
"(",
"timestamp",
")",
"req",
".",
"leased_by",
"=",
"leased_by",
"req",
".",
"leased_until",
"=",
"mysql_utils",
".",
"TimestampToRDFDatetime",
"(",
"leased_until",
")",
"res",
".",
"append",
"(",
"req",
")",
"return",
"res"
] | Reads all message handler requests from the database. | [
"Reads",
"all",
"message",
"handler",
"requests",
"from",
"the",
"database",
"."
] | python | train |
peri-source/peri | scripts/tutorial.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/scripts/tutorial.py#L82-L88 | def scramble_positions(p, delete_frac=0.1):
"""randomly deletes particles and adds 1-px noise for a realistic
initial featuring guess"""
probs = [1-delete_frac, delete_frac]
m = np.random.choice([True, False], p.shape[0], p=probs)
jumble = np.random.randn(m.sum(), 3)
return p[m] + jumble | [
"def",
"scramble_positions",
"(",
"p",
",",
"delete_frac",
"=",
"0.1",
")",
":",
"probs",
"=",
"[",
"1",
"-",
"delete_frac",
",",
"delete_frac",
"]",
"m",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"[",
"True",
",",
"False",
"]",
",",
"p",
".",
"shape",
"[",
"0",
"]",
",",
"p",
"=",
"probs",
")",
"jumble",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"m",
".",
"sum",
"(",
")",
",",
"3",
")",
"return",
"p",
"[",
"m",
"]",
"+",
"jumble"
] | randomly deletes particles and adds 1-px noise for a realistic
initial featuring guess | [
"randomly",
"deletes",
"particles",
"and",
"adds",
"1",
"-",
"px",
"noise",
"for",
"a",
"realistic",
"initial",
"featuring",
"guess"
] | python | valid |
ARMmbed/icetea | icetea_lib/CliResponse.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/CliResponse.py#L68-L88 | def verify_message(self, expected_response, break_in_fail=True):
"""
Verifies that expected_response is found in self.lines.
:param expected_response: response or responses to look for. Must be list or str.
:param break_in_fail: If set to True,
re-raises exceptions caught or if message was not found
:return: True or False
:raises: LookupError if message was not found and break_in_fail was True. Other exceptions
might also be raised through searcher.verify_message.
"""
ok = True
try:
ok = verify_message(self.lines, expected_response)
except (TypeError, LookupError) as inst:
ok = False
if break_in_fail:
raise inst
if ok is False and break_in_fail:
raise LookupError("Unexpected message found")
return ok | [
"def",
"verify_message",
"(",
"self",
",",
"expected_response",
",",
"break_in_fail",
"=",
"True",
")",
":",
"ok",
"=",
"True",
"try",
":",
"ok",
"=",
"verify_message",
"(",
"self",
".",
"lines",
",",
"expected_response",
")",
"except",
"(",
"TypeError",
",",
"LookupError",
")",
"as",
"inst",
":",
"ok",
"=",
"False",
"if",
"break_in_fail",
":",
"raise",
"inst",
"if",
"ok",
"is",
"False",
"and",
"break_in_fail",
":",
"raise",
"LookupError",
"(",
"\"Unexpected message found\"",
")",
"return",
"ok"
] | Verifies that expected_response is found in self.lines.
:param expected_response: response or responses to look for. Must be list or str.
:param break_in_fail: If set to True,
re-raises exceptions caught or if message was not found
:return: True or False
:raises: LookupError if message was not found and break_in_fail was True. Other exceptions
might also be raised through searcher.verify_message. | [
"Verifies",
"that",
"expected_response",
"is",
"found",
"in",
"self",
".",
"lines",
"."
] | python | train |
OpenAgInitiative/openag_python | openag/cli/firmware/__init__.py | https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/cli/firmware/__init__.py#L343-L356 | def flash(
categories, param_file, project_dir, plugin, target,
status_update_interval, board
):
"""
Flashes firmware to device (init + run).
Initializes a pio project and runs the result, flashing it to the device.
"""
_init(board, project_dir)
_run(
categories, param_file, project_dir, plugin, target,
status_update_interval
)
print "Done" | [
"def",
"flash",
"(",
"categories",
",",
"param_file",
",",
"project_dir",
",",
"plugin",
",",
"target",
",",
"status_update_interval",
",",
"board",
")",
":",
"_init",
"(",
"board",
",",
"project_dir",
")",
"_run",
"(",
"categories",
",",
"param_file",
",",
"project_dir",
",",
"plugin",
",",
"target",
",",
"status_update_interval",
")",
"print",
"\"Done\""
] | Flashes firmware to device (init + run).
Initializes a pio project and runs the result, flashing it to the device. | [
"Flashes",
"firmware",
"to",
"device",
"(",
"init",
"+",
"run",
")",
".",
"Initializes",
"a",
"pio",
"project",
"and",
"runs",
"the",
"result",
"flashing",
"it",
"to",
"the",
"device",
"."
] | python | train |
mbj4668/pyang | pyang/translators/dsdl.py | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L557-L562 | def dc_element(self, parent, name, text):
"""Add DC element `name` containing `text` to `parent`."""
if self.dc_uri in self.namespaces:
dcel = SchemaNode(self.namespaces[self.dc_uri] + ":" + name,
text=text)
parent.children.insert(0,dcel) | [
"def",
"dc_element",
"(",
"self",
",",
"parent",
",",
"name",
",",
"text",
")",
":",
"if",
"self",
".",
"dc_uri",
"in",
"self",
".",
"namespaces",
":",
"dcel",
"=",
"SchemaNode",
"(",
"self",
".",
"namespaces",
"[",
"self",
".",
"dc_uri",
"]",
"+",
"\":\"",
"+",
"name",
",",
"text",
"=",
"text",
")",
"parent",
".",
"children",
".",
"insert",
"(",
"0",
",",
"dcel",
")"
] | Add DC element `name` containing `text` to `parent`. | [
"Add",
"DC",
"element",
"name",
"containing",
"text",
"to",
"parent",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/tasks.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L1670-L1679 | def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
try:
self.process.stderr.close()
except:
pass
self.set_status(self.S_DONE, "status set to Done")
return self._returncode | [
"def",
"wait",
"(",
"self",
")",
":",
"self",
".",
"_returncode",
"=",
"self",
".",
"process",
".",
"wait",
"(",
")",
"try",
":",
"self",
".",
"process",
".",
"stderr",
".",
"close",
"(",
")",
"except",
":",
"pass",
"self",
".",
"set_status",
"(",
"self",
".",
"S_DONE",
",",
"\"status set to Done\"",
")",
"return",
"self",
".",
"_returncode"
] | Wait for child process to terminate. Set and return returncode attribute. | [
"Wait",
"for",
"child",
"process",
"to",
"terminate",
".",
"Set",
"and",
"return",
"returncode",
"attribute",
"."
] | python | train |
programa-stic/barf-project | barf/core/smt/smttranslator.py | https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/smt/smttranslator.py#L162-L173 | def reset(self):
"""Reset internal state.
"""
self._solver.reset()
# Memory versioning.
self._mem_instance = 0
self._mem_init = smtsymbol.BitVecArray(self._address_size, 8, "MEM_{}".format(self._mem_instance))
self._mem_curr = self.make_array(self._address_size, "MEM_{}".format(self._mem_instance))
self._var_name_mappers = {} | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_solver",
".",
"reset",
"(",
")",
"# Memory versioning.",
"self",
".",
"_mem_instance",
"=",
"0",
"self",
".",
"_mem_init",
"=",
"smtsymbol",
".",
"BitVecArray",
"(",
"self",
".",
"_address_size",
",",
"8",
",",
"\"MEM_{}\"",
".",
"format",
"(",
"self",
".",
"_mem_instance",
")",
")",
"self",
".",
"_mem_curr",
"=",
"self",
".",
"make_array",
"(",
"self",
".",
"_address_size",
",",
"\"MEM_{}\"",
".",
"format",
"(",
"self",
".",
"_mem_instance",
")",
")",
"self",
".",
"_var_name_mappers",
"=",
"{",
"}"
] | Reset internal state. | [
"Reset",
"internal",
"state",
"."
] | python | train |
anomaly/prestans | prestans/types/array.py | https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/types/array.py#L112-L121 | def is_scalar(self):
"""
:return:
:rtype: bool
"""
return \
isinstance(self._element_template, Boolean) or \
isinstance(self._element_template, Float) or \
isinstance(self._element_template, Integer) or \
isinstance(self._element_template, String) | [
"def",
"is_scalar",
"(",
"self",
")",
":",
"return",
"isinstance",
"(",
"self",
".",
"_element_template",
",",
"Boolean",
")",
"or",
"isinstance",
"(",
"self",
".",
"_element_template",
",",
"Float",
")",
"or",
"isinstance",
"(",
"self",
".",
"_element_template",
",",
"Integer",
")",
"or",
"isinstance",
"(",
"self",
".",
"_element_template",
",",
"String",
")"
] | :return:
:rtype: bool | [
":",
"return",
":",
":",
"rtype",
":",
"bool"
] | python | train |
lorien/grab | grab/deprecated.py | https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L230-L236 | def strip_tags(self, content, smart=False):
"""
Strip tags from the HTML content.
"""
from lxml.html import fromstring
return get_node_text(fromstring(content), smart=smart) | [
"def",
"strip_tags",
"(",
"self",
",",
"content",
",",
"smart",
"=",
"False",
")",
":",
"from",
"lxml",
".",
"html",
"import",
"fromstring",
"return",
"get_node_text",
"(",
"fromstring",
"(",
"content",
")",
",",
"smart",
"=",
"smart",
")"
] | Strip tags from the HTML content. | [
"Strip",
"tags",
"from",
"the",
"HTML",
"content",
"."
] | python | train |
raiden-network/raiden | raiden/tasks.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/tasks.py#L76-L98 | def check_gas_reserve(raiden):
""" Check periodically for gas reserve in the account """
while True:
has_enough_balance, estimated_required_balance = gas_reserve.has_enough_gas_reserve(
raiden,
channels_to_open=1,
)
estimated_required_balance_eth = Web3.fromWei(estimated_required_balance, 'ether')
if not has_enough_balance:
log.info('Missing gas reserve', required_wei=estimated_required_balance)
click.secho(
(
'WARNING\n'
"Your account's balance is below the estimated gas reserve of "
f'{estimated_required_balance_eth} eth. This may lead to a loss of '
'of funds because your account will be unable to perform on-chain '
'transactions. Please add funds to your account as soon as possible.'
),
fg='red',
)
gevent.sleep(CHECK_GAS_RESERVE_INTERVAL) | [
"def",
"check_gas_reserve",
"(",
"raiden",
")",
":",
"while",
"True",
":",
"has_enough_balance",
",",
"estimated_required_balance",
"=",
"gas_reserve",
".",
"has_enough_gas_reserve",
"(",
"raiden",
",",
"channels_to_open",
"=",
"1",
",",
")",
"estimated_required_balance_eth",
"=",
"Web3",
".",
"fromWei",
"(",
"estimated_required_balance",
",",
"'ether'",
")",
"if",
"not",
"has_enough_balance",
":",
"log",
".",
"info",
"(",
"'Missing gas reserve'",
",",
"required_wei",
"=",
"estimated_required_balance",
")",
"click",
".",
"secho",
"(",
"(",
"'WARNING\\n'",
"\"Your account's balance is below the estimated gas reserve of \"",
"f'{estimated_required_balance_eth} eth. This may lead to a loss of '",
"'of funds because your account will be unable to perform on-chain '",
"'transactions. Please add funds to your account as soon as possible.'",
")",
",",
"fg",
"=",
"'red'",
",",
")",
"gevent",
".",
"sleep",
"(",
"CHECK_GAS_RESERVE_INTERVAL",
")"
] | Check periodically for gas reserve in the account | [
"Check",
"periodically",
"for",
"gas",
"reserve",
"in",
"the",
"account"
] | python | train |
pyGrowler/Growler | growler/core/application.py | https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L318-L329 | def router(self):
"""
Property returning the router at the top of the middleware
chain's stack (the last item in the list). If the list is empty
OR the item is not an instance of growler.Router, one is created
and added to the middleware chain, matching all requests.
"""
if not self.has_root_router:
self.middleware.add(HTTPMethod.ALL,
MiddlewareChain.ROOT_PATTERN,
Router())
return self.middleware.last().func | [
"def",
"router",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_root_router",
":",
"self",
".",
"middleware",
".",
"add",
"(",
"HTTPMethod",
".",
"ALL",
",",
"MiddlewareChain",
".",
"ROOT_PATTERN",
",",
"Router",
"(",
")",
")",
"return",
"self",
".",
"middleware",
".",
"last",
"(",
")",
".",
"func"
] | Property returning the router at the top of the middleware
chain's stack (the last item in the list). If the list is empty
OR the item is not an instance of growler.Router, one is created
and added to the middleware chain, matching all requests. | [
"Property",
"returning",
"the",
"router",
"at",
"the",
"top",
"of",
"the",
"middleware",
"chain",
"s",
"stack",
"(",
"the",
"last",
"item",
"in",
"the",
"list",
")",
".",
"If",
"the",
"list",
"is",
"empty",
"OR",
"the",
"item",
"is",
"not",
"an",
"instance",
"of",
"growler",
".",
"Router",
"one",
"is",
"created",
"and",
"added",
"to",
"the",
"middleware",
"chain",
"matching",
"all",
"requests",
"."
] | python | train |
pydata/xarray | xarray/plot/utils.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/plot/utils.py#L441-L455 | def _resolve_intervals_2dplot(val, func_name):
"""
Helper function to replace the values of a coordinate array containing
pd.Interval with their mid-points or - for pcolormesh - boundaries which
increases length by 1.
"""
label_extra = ''
if _valid_other_type(val, [pd.Interval]):
if func_name == 'pcolormesh':
val = _interval_to_bound_points(val)
else:
val = _interval_to_mid_points(val)
label_extra = '_center'
return val, label_extra | [
"def",
"_resolve_intervals_2dplot",
"(",
"val",
",",
"func_name",
")",
":",
"label_extra",
"=",
"''",
"if",
"_valid_other_type",
"(",
"val",
",",
"[",
"pd",
".",
"Interval",
"]",
")",
":",
"if",
"func_name",
"==",
"'pcolormesh'",
":",
"val",
"=",
"_interval_to_bound_points",
"(",
"val",
")",
"else",
":",
"val",
"=",
"_interval_to_mid_points",
"(",
"val",
")",
"label_extra",
"=",
"'_center'",
"return",
"val",
",",
"label_extra"
] | Helper function to replace the values of a coordinate array containing
pd.Interval with their mid-points or - for pcolormesh - boundaries which
increases length by 1. | [
"Helper",
"function",
"to",
"replace",
"the",
"values",
"of",
"a",
"coordinate",
"array",
"containing",
"pd",
".",
"Interval",
"with",
"their",
"mid",
"-",
"points",
"or",
"-",
"for",
"pcolormesh",
"-",
"boundaries",
"which",
"increases",
"length",
"by",
"1",
"."
] | python | train |
phoebe-project/phoebe2 | phoebe/algorithms/interp_nDgrid.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/algorithms/interp_nDgrid.py#L104-L121 | def cinterpolate(p, axis_values, pixelgrid):
"""
Interpolates in a grid prepared by create_pixeltypegrid().
Does a similar thing as :py:func:`interpolate`, but does everything in C.
p is an array of parameter arrays.
Careful, the shape of input :envvar:`p` and output is the transpose of
:py:func:`interpolate`.
@param p: Ninterpolate X Npar array
@type p: array
@return: Ninterpolate X Ndata array
@rtype: array
"""
res = libphoebe.interp(p, axis_values, pixelgrid)
return res | [
"def",
"cinterpolate",
"(",
"p",
",",
"axis_values",
",",
"pixelgrid",
")",
":",
"res",
"=",
"libphoebe",
".",
"interp",
"(",
"p",
",",
"axis_values",
",",
"pixelgrid",
")",
"return",
"res"
] | Interpolates in a grid prepared by create_pixeltypegrid().
Does a similar thing as :py:func:`interpolate`, but does everything in C.
p is an array of parameter arrays.
Careful, the shape of input :envvar:`p` and output is the transpose of
:py:func:`interpolate`.
@param p: Ninterpolate X Npar array
@type p: array
@return: Ninterpolate X Ndata array
@rtype: array | [
"Interpolates",
"in",
"a",
"grid",
"prepared",
"by",
"create_pixeltypegrid",
"()",
".",
"Does",
"a",
"similar",
"thing",
"as",
":",
"py",
":",
"func",
":",
"interpolate",
"but",
"does",
"everything",
"in",
"C",
".",
"p",
"is",
"an",
"array",
"of",
"parameter",
"arrays",
".",
"Careful",
"the",
"shape",
"of",
"input",
":",
"envvar",
":",
"p",
"and",
"output",
"is",
"the",
"transpose",
"of",
":",
"py",
":",
"func",
":",
"interpolate",
"."
] | python | train |
pypa/setuptools | setuptools/command/easy_install.py | https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/easy_install.py#L1458-L1496 | def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line) | [
"def",
"expand_paths",
"(",
"inputs",
")",
":",
"seen",
"=",
"{",
"}",
"for",
"dirname",
"in",
"inputs",
":",
"dirname",
"=",
"normalize_path",
"(",
"dirname",
")",
"if",
"dirname",
"in",
"seen",
":",
"continue",
"seen",
"[",
"dirname",
"]",
"=",
"1",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"continue",
"files",
"=",
"os",
".",
"listdir",
"(",
"dirname",
")",
"yield",
"dirname",
",",
"files",
"for",
"name",
"in",
"files",
":",
"if",
"not",
"name",
".",
"endswith",
"(",
"'.pth'",
")",
":",
"# We only care about the .pth files",
"continue",
"if",
"name",
"in",
"(",
"'easy-install.pth'",
",",
"'setuptools.pth'",
")",
":",
"# Ignore .pth files that we control",
"continue",
"# Read the .pth file",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"name",
")",
")",
"lines",
"=",
"list",
"(",
"yield_lines",
"(",
"f",
")",
")",
"f",
".",
"close",
"(",
")",
"# Yield existing non-dupe, non-import directory lines from it",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"\"import\"",
")",
":",
"line",
"=",
"normalize_path",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"if",
"line",
"not",
"in",
"seen",
":",
"seen",
"[",
"line",
"]",
"=",
"1",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"line",
")",
":",
"continue",
"yield",
"line",
",",
"os",
".",
"listdir",
"(",
"line",
")"
] | Yield sys.path directories that might contain "old-style" packages | [
"Yield",
"sys",
".",
"path",
"directories",
"that",
"might",
"contain",
"old",
"-",
"style",
"packages"
] | python | train |
lobeck/flask-bower | flask_bower/__init__.py | https://github.com/lobeck/flask-bower/blob/3ebe08a0931d07e82cb57998db3390d2b5921444/flask_bower/__init__.py#L100-L157 | def build_url(component, filename, **values):
"""
search bower asset and build url
:param component: bower component (package)
:type component: str
:param filename: filename in bower component - can contain directories (like dist/jquery.js)
:type filename: str
:param values: additional url parameters
:type values: dict[str, str]
:return: url
:rtype: str | None
"""
root = current_app.config['BOWER_COMPONENTS_ROOT']
bower_data = None
package_data = None
# check if component exists in bower_components directory
if not os.path.isdir(os.path.join(current_app.root_path, root, component)):
# FallBack to default url_for flask
return None
# load bower.json of specified component
bower_file_path = os.path.join(current_app.root_path, root, component, 'bower.json')
if os.path.exists(bower_file_path):
with open(bower_file_path, 'r') as bower_file:
bower_data = json.load(bower_file)
# check if package.json exists and load package.json data
package_file_path = os.path.join(current_app.root_path, root, component, 'package.json')
if os.path.exists(package_file_path):
with open(package_file_path, 'r') as package_file:
package_data = json.load(package_file)
# check if specified file actually exists
if not os.path.exists(os.path.join(current_app.root_path, root, component, filename)):
return None
# check if minified file exists (by pattern <filename>.min.<ext>
# returns filename if successful
if current_app.config['BOWER_TRY_MINIFIED']:
if '.min.' not in filename:
minified_filename = '%s.min.%s' % tuple(filename.rsplit('.', 1))
minified_path = os.path.join(root, component, minified_filename)
if os.path.exists(os.path.join(current_app.root_path, minified_path)):
filename = minified_filename
# determine version of component and append as ?version= parameter to allow cache busting
if current_app.config['BOWER_QUERYSTRING_REVVING']:
if bower_data is not None and 'version' in bower_data:
values['version'] = bower_data['version']
elif package_data is not None and 'version' in package_data:
values['version'] = package_data['version']
else:
values['version'] = os.path.getmtime(os.path.join(current_app.root_path, root, component, filename))
return url_for('bower.serve', component=component, filename=filename, **values) | [
"def",
"build_url",
"(",
"component",
",",
"filename",
",",
"*",
"*",
"values",
")",
":",
"root",
"=",
"current_app",
".",
"config",
"[",
"'BOWER_COMPONENTS_ROOT'",
"]",
"bower_data",
"=",
"None",
"package_data",
"=",
"None",
"# check if component exists in bower_components directory",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"root",
",",
"component",
")",
")",
":",
"# FallBack to default url_for flask",
"return",
"None",
"# load bower.json of specified component",
"bower_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"root",
",",
"component",
",",
"'bower.json'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"bower_file_path",
")",
":",
"with",
"open",
"(",
"bower_file_path",
",",
"'r'",
")",
"as",
"bower_file",
":",
"bower_data",
"=",
"json",
".",
"load",
"(",
"bower_file",
")",
"# check if package.json exists and load package.json data",
"package_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"root",
",",
"component",
",",
"'package.json'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"package_file_path",
")",
":",
"with",
"open",
"(",
"package_file_path",
",",
"'r'",
")",
"as",
"package_file",
":",
"package_data",
"=",
"json",
".",
"load",
"(",
"package_file",
")",
"# check if specified file actually exists",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"root",
",",
"component",
",",
"filename",
")",
")",
":",
"return",
"None",
"# check if minified file exists (by pattern <filename>.min.<ext>",
"# returns filename if successful",
"if",
"current_app",
".",
"config",
"[",
"'BOWER_TRY_MINIFIED'",
"]",
":",
"if",
"'.min.'",
"not",
"in",
"filename",
":",
"minified_filename",
"=",
"'%s.min.%s'",
"%",
"tuple",
"(",
"filename",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
")",
"minified_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"component",
",",
"minified_filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"minified_path",
")",
")",
":",
"filename",
"=",
"minified_filename",
"# determine version of component and append as ?version= parameter to allow cache busting",
"if",
"current_app",
".",
"config",
"[",
"'BOWER_QUERYSTRING_REVVING'",
"]",
":",
"if",
"bower_data",
"is",
"not",
"None",
"and",
"'version'",
"in",
"bower_data",
":",
"values",
"[",
"'version'",
"]",
"=",
"bower_data",
"[",
"'version'",
"]",
"elif",
"package_data",
"is",
"not",
"None",
"and",
"'version'",
"in",
"package_data",
":",
"values",
"[",
"'version'",
"]",
"=",
"package_data",
"[",
"'version'",
"]",
"else",
":",
"values",
"[",
"'version'",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current_app",
".",
"root_path",
",",
"root",
",",
"component",
",",
"filename",
")",
")",
"return",
"url_for",
"(",
"'bower.serve'",
",",
"component",
"=",
"component",
",",
"filename",
"=",
"filename",
",",
"*",
"*",
"values",
")"
] | search bower asset and build url
:param component: bower component (package)
:type component: str
:param filename: filename in bower component - can contain directories (like dist/jquery.js)
:type filename: str
:param values: additional url parameters
:type values: dict[str, str]
:return: url
:rtype: str | None | [
"search",
"bower",
"asset",
"and",
"build",
"url"
] | python | train |
jobovy/galpy | galpy/potential/SCFPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SCFPotential.py#L695-L774 | def scf_compute_coeffs(dens, N, L, a=1., radial_order=None, costheta_order=None, phi_order=None):
"""
NAME:
scf_compute_coeffs
PURPOSE:
Numerically compute the expansion coefficients for a given triaxial density
INPUT:
dens - A density function that takes a parameter R, z and phi
N - size of the Nth dimension of the expansion coefficients
L - size of the Lth and Mth dimension of the expansion coefficients
a - parameter used to shift the basis functions
radial_order - Number of sample points of the radial integral. If None, radial_order=max(20, N + 3/2L + 1)
costheta_order - Number of sample points of the costheta integral. If None, If costheta_order=max(20, L + 1)
phi_order - Number of sample points of the phi integral. If None, If costheta_order=max(20, L + 1)
OUTPUT:
(Acos,Asin) - Expansion coefficients for density dens that can be given to SCFPotential.__init__
HISTORY:
2016-05-27 - Written - Aladdin
"""
def integrand(xi, costheta, phi):
l = nu.arange(0, L)[nu.newaxis, :, nu.newaxis]
m = nu.arange(0, L)[nu.newaxis,nu.newaxis,:]
r = _xiToR(xi, a)
R = r*nu.sqrt(1 - costheta**2.)
z = r*costheta
Legandre = lpmn(L - 1,L-1,costheta)[0].T[nu.newaxis,:,:]
dV = (1. + xi)**2. * nu.power(1. - xi, -4.)
phi_nl = - a**3*(1. + xi)**l * (1. - xi)**(l + 1.)*_C(xi, N, L)[:,:,nu.newaxis] * Legandre
return dens(R,z, phi) * phi_nl[nu.newaxis, :,:,:]*nu.array([nu.cos(m*phi), nu.sin(m*phi)])*dV
Acos = nu.zeros((N,L,L), float)
Asin = nu.zeros((N,L,L), float)
Ksample = [max(N + 3*L//2 + 1,20), max(L + 1,20 ), max(L + 1,20)]
if radial_order != None:
Ksample[0] = radial_order
if costheta_order != None:
Ksample[1] = costheta_order
if phi_order != None:
Ksample[2] = phi_order
integrated = _gaussianQuadrature(integrand, [[-1., 1.], [-1., 1.], [0, 2*nu.pi]], Ksample = Ksample)
n = nu.arange(0,N)[:,nu.newaxis, nu.newaxis]
l = nu.arange(0,L)[nu.newaxis,:, nu.newaxis]
m = nu.arange(0,L)[nu.newaxis,nu.newaxis,:]
K = .5*n*(n + 4*l + 3) + (l + 1)*(2*l + 1)
Nln = .5*gammaln(l - m + 1) - .5*gammaln(l + m + 1) - (2*l)*nu.log(2)
NN = nu.e**(Nln)
NN[nu.where(NN == nu.inf)] = 0 ## To account for the fact that m cant be bigger than l
constants = NN*(2*l + 1.)**.5
lnI = -(8*l + 6)*nu.log(2) + gammaln(n + 4*l + 3) - gammaln(n + 1) - nu.log(n + 2*l + 3./2) - 2*gammaln(2*l + 3./2)
I = -K*(4*nu.pi) * nu.e**(lnI)
Acos[:,:,:],Asin[:,:,:] = 2*(I**-1.)[nu.newaxis,:,:,:] * integrated * constants[nu.newaxis,:,:,:]
return Acos, Asin | [
"def",
"scf_compute_coeffs",
"(",
"dens",
",",
"N",
",",
"L",
",",
"a",
"=",
"1.",
",",
"radial_order",
"=",
"None",
",",
"costheta_order",
"=",
"None",
",",
"phi_order",
"=",
"None",
")",
":",
"def",
"integrand",
"(",
"xi",
",",
"costheta",
",",
"phi",
")",
":",
"l",
"=",
"nu",
".",
"arange",
"(",
"0",
",",
"L",
")",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
"nu",
".",
"newaxis",
"]",
"m",
"=",
"nu",
".",
"arange",
"(",
"0",
",",
"L",
")",
"[",
"nu",
".",
"newaxis",
",",
"nu",
".",
"newaxis",
",",
":",
"]",
"r",
"=",
"_xiToR",
"(",
"xi",
",",
"a",
")",
"R",
"=",
"r",
"*",
"nu",
".",
"sqrt",
"(",
"1",
"-",
"costheta",
"**",
"2.",
")",
"z",
"=",
"r",
"*",
"costheta",
"Legandre",
"=",
"lpmn",
"(",
"L",
"-",
"1",
",",
"L",
"-",
"1",
",",
"costheta",
")",
"[",
"0",
"]",
".",
"T",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
":",
"]",
"dV",
"=",
"(",
"1.",
"+",
"xi",
")",
"**",
"2.",
"*",
"nu",
".",
"power",
"(",
"1.",
"-",
"xi",
",",
"-",
"4.",
")",
"phi_nl",
"=",
"-",
"a",
"**",
"3",
"*",
"(",
"1.",
"+",
"xi",
")",
"**",
"l",
"*",
"(",
"1.",
"-",
"xi",
")",
"**",
"(",
"l",
"+",
"1.",
")",
"*",
"_C",
"(",
"xi",
",",
"N",
",",
"L",
")",
"[",
":",
",",
":",
",",
"nu",
".",
"newaxis",
"]",
"*",
"Legandre",
"return",
"dens",
"(",
"R",
",",
"z",
",",
"phi",
")",
"*",
"phi_nl",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
":",
",",
":",
"]",
"*",
"nu",
".",
"array",
"(",
"[",
"nu",
".",
"cos",
"(",
"m",
"*",
"phi",
")",
",",
"nu",
".",
"sin",
"(",
"m",
"*",
"phi",
")",
"]",
")",
"*",
"dV",
"Acos",
"=",
"nu",
".",
"zeros",
"(",
"(",
"N",
",",
"L",
",",
"L",
")",
",",
"float",
")",
"Asin",
"=",
"nu",
".",
"zeros",
"(",
"(",
"N",
",",
"L",
",",
"L",
")",
",",
"float",
")",
"Ksample",
"=",
"[",
"max",
"(",
"N",
"+",
"3",
"*",
"L",
"//",
"2",
"+",
"1",
",",
"20",
")",
",",
"max",
"(",
"L",
"+",
"1",
",",
"20",
")",
",",
"max",
"(",
"L",
"+",
"1",
",",
"20",
")",
"]",
"if",
"radial_order",
"!=",
"None",
":",
"Ksample",
"[",
"0",
"]",
"=",
"radial_order",
"if",
"costheta_order",
"!=",
"None",
":",
"Ksample",
"[",
"1",
"]",
"=",
"costheta_order",
"if",
"phi_order",
"!=",
"None",
":",
"Ksample",
"[",
"2",
"]",
"=",
"phi_order",
"integrated",
"=",
"_gaussianQuadrature",
"(",
"integrand",
",",
"[",
"[",
"-",
"1.",
",",
"1.",
"]",
",",
"[",
"-",
"1.",
",",
"1.",
"]",
",",
"[",
"0",
",",
"2",
"*",
"nu",
".",
"pi",
"]",
"]",
",",
"Ksample",
"=",
"Ksample",
")",
"n",
"=",
"nu",
".",
"arange",
"(",
"0",
",",
"N",
")",
"[",
":",
",",
"nu",
".",
"newaxis",
",",
"nu",
".",
"newaxis",
"]",
"l",
"=",
"nu",
".",
"arange",
"(",
"0",
",",
"L",
")",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
"nu",
".",
"newaxis",
"]",
"m",
"=",
"nu",
".",
"arange",
"(",
"0",
",",
"L",
")",
"[",
"nu",
".",
"newaxis",
",",
"nu",
".",
"newaxis",
",",
":",
"]",
"K",
"=",
".5",
"*",
"n",
"*",
"(",
"n",
"+",
"4",
"*",
"l",
"+",
"3",
")",
"+",
"(",
"l",
"+",
"1",
")",
"*",
"(",
"2",
"*",
"l",
"+",
"1",
")",
"Nln",
"=",
".5",
"*",
"gammaln",
"(",
"l",
"-",
"m",
"+",
"1",
")",
"-",
".5",
"*",
"gammaln",
"(",
"l",
"+",
"m",
"+",
"1",
")",
"-",
"(",
"2",
"*",
"l",
")",
"*",
"nu",
".",
"log",
"(",
"2",
")",
"NN",
"=",
"nu",
".",
"e",
"**",
"(",
"Nln",
")",
"NN",
"[",
"nu",
".",
"where",
"(",
"NN",
"==",
"nu",
".",
"inf",
")",
"]",
"=",
"0",
"## To account for the fact that m cant be bigger than l",
"constants",
"=",
"NN",
"*",
"(",
"2",
"*",
"l",
"+",
"1.",
")",
"**",
".5",
"lnI",
"=",
"-",
"(",
"8",
"*",
"l",
"+",
"6",
")",
"*",
"nu",
".",
"log",
"(",
"2",
")",
"+",
"gammaln",
"(",
"n",
"+",
"4",
"*",
"l",
"+",
"3",
")",
"-",
"gammaln",
"(",
"n",
"+",
"1",
")",
"-",
"nu",
".",
"log",
"(",
"n",
"+",
"2",
"*",
"l",
"+",
"3.",
"/",
"2",
")",
"-",
"2",
"*",
"gammaln",
"(",
"2",
"*",
"l",
"+",
"3.",
"/",
"2",
")",
"I",
"=",
"-",
"K",
"*",
"(",
"4",
"*",
"nu",
".",
"pi",
")",
"*",
"nu",
".",
"e",
"**",
"(",
"lnI",
")",
"Acos",
"[",
":",
",",
":",
",",
":",
"]",
",",
"Asin",
"[",
":",
",",
":",
",",
":",
"]",
"=",
"2",
"*",
"(",
"I",
"**",
"-",
"1.",
")",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
":",
",",
":",
"]",
"*",
"integrated",
"*",
"constants",
"[",
"nu",
".",
"newaxis",
",",
":",
",",
":",
",",
":",
"]",
"return",
"Acos",
",",
"Asin"
] | NAME:
scf_compute_coeffs
PURPOSE:
Numerically compute the expansion coefficients for a given triaxial density
INPUT:
dens - A density function that takes a parameter R, z and phi
N - size of the Nth dimension of the expansion coefficients
L - size of the Lth and Mth dimension of the expansion coefficients
a - parameter used to shift the basis functions
radial_order - Number of sample points of the radial integral. If None, radial_order=max(20, N + 3/2L + 1)
costheta_order - Number of sample points of the costheta integral. If None, If costheta_order=max(20, L + 1)
phi_order - Number of sample points of the phi integral. If None, If costheta_order=max(20, L + 1)
OUTPUT:
(Acos,Asin) - Expansion coefficients for density dens that can be given to SCFPotential.__init__
HISTORY:
2016-05-27 - Written - Aladdin | [
"NAME",
":"
] | python | train |
sebdah/dynamic-dynamodb | dynamic_dynamodb/aws/cloudwatch.py | https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/aws/cloudwatch.py#L9-L36 | def __get_connection_cloudwatch():
""" Ensure connection to CloudWatch """
region = get_global_option('region')
try:
if (get_global_option('aws_access_key_id') and
get_global_option('aws_secret_access_key')):
logger.debug(
'Authenticating to CloudWatch using '
'credentials in configuration file')
connection = cloudwatch.connect_to_region(
region,
aws_access_key_id=get_global_option('aws_access_key_id'),
aws_secret_access_key=get_global_option(
'aws_secret_access_key'))
else:
logger.debug(
'Authenticating using boto\'s authentication handler')
connection = cloudwatch.connect_to_region(region)
except Exception as err:
logger.error('Failed connecting to CloudWatch: {0}'.format(err))
logger.error(
'Please report an issue at: '
'https://github.com/sebdah/dynamic-dynamodb/issues')
raise
logger.debug('Connected to CloudWatch in {0}'.format(region))
return connection | [
"def",
"__get_connection_cloudwatch",
"(",
")",
":",
"region",
"=",
"get_global_option",
"(",
"'region'",
")",
"try",
":",
"if",
"(",
"get_global_option",
"(",
"'aws_access_key_id'",
")",
"and",
"get_global_option",
"(",
"'aws_secret_access_key'",
")",
")",
":",
"logger",
".",
"debug",
"(",
"'Authenticating to CloudWatch using '",
"'credentials in configuration file'",
")",
"connection",
"=",
"cloudwatch",
".",
"connect_to_region",
"(",
"region",
",",
"aws_access_key_id",
"=",
"get_global_option",
"(",
"'aws_access_key_id'",
")",
",",
"aws_secret_access_key",
"=",
"get_global_option",
"(",
"'aws_secret_access_key'",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Authenticating using boto\\'s authentication handler'",
")",
"connection",
"=",
"cloudwatch",
".",
"connect_to_region",
"(",
"region",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'Failed connecting to CloudWatch: {0}'",
".",
"format",
"(",
"err",
")",
")",
"logger",
".",
"error",
"(",
"'Please report an issue at: '",
"'https://github.com/sebdah/dynamic-dynamodb/issues'",
")",
"raise",
"logger",
".",
"debug",
"(",
"'Connected to CloudWatch in {0}'",
".",
"format",
"(",
"region",
")",
")",
"return",
"connection"
] | Ensure connection to CloudWatch | [
"Ensure",
"connection",
"to",
"CloudWatch"
] | python | train |
JarryShaw/PyPCAPKit | src/protocols/transport/transport.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/transport/transport.py#L65-L90 | def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
"""
if self._exproto == 'null' and self._exlayer == 'None':
from pcapkit.protocols.raw import Raw as NextLayer
else:
from pcapkit.foundation.analysis import analyse as NextLayer
# from pcapkit.foundation.analysis import analyse as NextLayer
if length == 0:
next_ = NoPayload()
elif self._onerror:
next_ = beholder_ng(NextLayer)(self._file, length, _termination=self._sigterm)
else:
next_ = NextLayer(self._file, length, _termination=self._sigterm)
return next_ | [
"def",
"_import_next_layer",
"(",
"self",
",",
"proto",
",",
"length",
")",
":",
"if",
"self",
".",
"_exproto",
"==",
"'null'",
"and",
"self",
".",
"_exlayer",
"==",
"'None'",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"raw",
"import",
"Raw",
"as",
"NextLayer",
"else",
":",
"from",
"pcapkit",
".",
"foundation",
".",
"analysis",
"import",
"analyse",
"as",
"NextLayer",
"# from pcapkit.foundation.analysis import analyse as NextLayer",
"if",
"length",
"==",
"0",
":",
"next_",
"=",
"NoPayload",
"(",
")",
"elif",
"self",
".",
"_onerror",
":",
"next_",
"=",
"beholder_ng",
"(",
"NextLayer",
")",
"(",
"self",
".",
"_file",
",",
"length",
",",
"_termination",
"=",
"self",
".",
"_sigterm",
")",
"else",
":",
"next_",
"=",
"NextLayer",
"(",
"self",
".",
"_file",
",",
"length",
",",
"_termination",
"=",
"self",
".",
"_sigterm",
")",
"return",
"next_"
] | Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer | [
"Import",
"next",
"layer",
"extractor",
"."
] | python | train |
python-gitlab/python-gitlab | gitlab/v4/objects.py | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L2012-L2044 | def set_release_description(self, description, **kwargs):
"""Set the release notes on the tag.
If the release doesn't exist yet, it will be created. If it already
exists, its description will be updated.
Args:
description (str): Description of the release.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server fails to create the release
GitlabUpdateError: If the server fails to update the release
"""
id = self.get_id().replace('/', '%2F')
path = '%s/%s/release' % (self.manager.path, id)
data = {'description': description}
if self.release is None:
try:
server_data = self.manager.gitlab.http_post(path,
post_data=data,
**kwargs)
except exc.GitlabHttpError as e:
raise exc.GitlabCreateError(e.response_code, e.error_message)
else:
try:
server_data = self.manager.gitlab.http_put(path,
post_data=data,
**kwargs)
except exc.GitlabHttpError as e:
raise exc.GitlabUpdateError(e.response_code, e.error_message)
self.release = server_data | [
"def",
"set_release_description",
"(",
"self",
",",
"description",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"self",
".",
"get_id",
"(",
")",
".",
"replace",
"(",
"'/'",
",",
"'%2F'",
")",
"path",
"=",
"'%s/%s/release'",
"%",
"(",
"self",
".",
"manager",
".",
"path",
",",
"id",
")",
"data",
"=",
"{",
"'description'",
":",
"description",
"}",
"if",
"self",
".",
"release",
"is",
"None",
":",
"try",
":",
"server_data",
"=",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_post",
"(",
"path",
",",
"post_data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"except",
"exc",
".",
"GitlabHttpError",
"as",
"e",
":",
"raise",
"exc",
".",
"GitlabCreateError",
"(",
"e",
".",
"response_code",
",",
"e",
".",
"error_message",
")",
"else",
":",
"try",
":",
"server_data",
"=",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_put",
"(",
"path",
",",
"post_data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"except",
"exc",
".",
"GitlabHttpError",
"as",
"e",
":",
"raise",
"exc",
".",
"GitlabUpdateError",
"(",
"e",
".",
"response_code",
",",
"e",
".",
"error_message",
")",
"self",
".",
"release",
"=",
"server_data"
] | Set the release notes on the tag.
If the release doesn't exist yet, it will be created. If it already
exists, its description will be updated.
Args:
description (str): Description of the release.
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server fails to create the release
GitlabUpdateError: If the server fails to update the release | [
"Set",
"the",
"release",
"notes",
"on",
"the",
"tag",
"."
] | python | train |
LudovicRousseau/pyscard | smartcard/pcsc/PCSCCardConnection.py | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/pcsc/PCSCCardConnection.py#L211-L227 | def doControl(self, controlCode, bytes=[]):
"""Transmit a control command to the reader and return response.
controlCode: control command
bytes: command data to transmit (list of bytes)
return: response are the response bytes (if any)
"""
CardConnection.doControl(self, controlCode, bytes)
hresult, response = SCardControl(self.hcard, controlCode, bytes)
if hresult != 0:
raise SmartcardException(
'Failed to control ' + SCardGetErrorMessage(hresult))
data = [(x + 256) % 256 for x in response]
return list(data) | [
"def",
"doControl",
"(",
"self",
",",
"controlCode",
",",
"bytes",
"=",
"[",
"]",
")",
":",
"CardConnection",
".",
"doControl",
"(",
"self",
",",
"controlCode",
",",
"bytes",
")",
"hresult",
",",
"response",
"=",
"SCardControl",
"(",
"self",
".",
"hcard",
",",
"controlCode",
",",
"bytes",
")",
"if",
"hresult",
"!=",
"0",
":",
"raise",
"SmartcardException",
"(",
"'Failed to control '",
"+",
"SCardGetErrorMessage",
"(",
"hresult",
")",
")",
"data",
"=",
"[",
"(",
"x",
"+",
"256",
")",
"%",
"256",
"for",
"x",
"in",
"response",
"]",
"return",
"list",
"(",
"data",
")"
] | Transmit a control command to the reader and return response.
controlCode: control command
bytes: command data to transmit (list of bytes)
return: response are the response bytes (if any) | [
"Transmit",
"a",
"control",
"command",
"to",
"the",
"reader",
"and",
"return",
"response",
"."
] | python | train |
neuropsychology/NeuroKit.py | examples/UnderDev/eeg/eeg_time_frequency.py | https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/examples/UnderDev/eeg/eeg_time_frequency.py#L99-L176 | def eeg_psd(raw, sensors_include="all", sensors_exclude=None, fmin=0.016, fmax=60, method="multitaper", proj=False):
"""
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
"""
picks = mne.pick_types(raw.info, include=eeg_select_electrodes(include=sensors_include, exclude=sensors_exclude), exclude="bads")
if method == "multitaper":
psds, freqs = mne.time_frequency.psd_multitaper(raw,
fmin=fmin,
fmax=fmax,
low_bias=True,
proj=proj,
picks=picks)
else:
psds, freqs = mne.time_frequency.psd_welch(raw,
fmin=fmin,
fmax=fmax,
proj=proj,
picks=picks)
tf = pd.DataFrame(psds)
tf.columns = eeg_name_frequencies(freqs)
tf = tf.mean(axis=0)
mean_psd = {}
for freq in ["UltraLow", "Delta", "Theta", "Alpha", "Alpha1", "Alpha2", "Mu", "Beta", "Beta1", "Beta2", "Gamma", "Gamma1", "Gamma2", "UltraHigh"]:
mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()
mean_psd = pd.DataFrame.from_dict(mean_psd, orient="index").T
return(mean_psd) | [
"def",
"eeg_psd",
"(",
"raw",
",",
"sensors_include",
"=",
"\"all\"",
",",
"sensors_exclude",
"=",
"None",
",",
"fmin",
"=",
"0.016",
",",
"fmax",
"=",
"60",
",",
"method",
"=",
"\"multitaper\"",
",",
"proj",
"=",
"False",
")",
":",
"picks",
"=",
"mne",
".",
"pick_types",
"(",
"raw",
".",
"info",
",",
"include",
"=",
"eeg_select_electrodes",
"(",
"include",
"=",
"sensors_include",
",",
"exclude",
"=",
"sensors_exclude",
")",
",",
"exclude",
"=",
"\"bads\"",
")",
"if",
"method",
"==",
"\"multitaper\"",
":",
"psds",
",",
"freqs",
"=",
"mne",
".",
"time_frequency",
".",
"psd_multitaper",
"(",
"raw",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
",",
"low_bias",
"=",
"True",
",",
"proj",
"=",
"proj",
",",
"picks",
"=",
"picks",
")",
"else",
":",
"psds",
",",
"freqs",
"=",
"mne",
".",
"time_frequency",
".",
"psd_welch",
"(",
"raw",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
",",
"proj",
"=",
"proj",
",",
"picks",
"=",
"picks",
")",
"tf",
"=",
"pd",
".",
"DataFrame",
"(",
"psds",
")",
"tf",
".",
"columns",
"=",
"eeg_name_frequencies",
"(",
"freqs",
")",
"tf",
"=",
"tf",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"mean_psd",
"=",
"{",
"}",
"for",
"freq",
"in",
"[",
"\"UltraLow\"",
",",
"\"Delta\"",
",",
"\"Theta\"",
",",
"\"Alpha\"",
",",
"\"Alpha1\"",
",",
"\"Alpha2\"",
",",
"\"Mu\"",
",",
"\"Beta\"",
",",
"\"Beta1\"",
",",
"\"Beta2\"",
",",
"\"Gamma\"",
",",
"\"Gamma1\"",
",",
"\"Gamma2\"",
",",
"\"UltraHigh\"",
"]",
":",
"mean_psd",
"[",
"freq",
"]",
"=",
"tf",
"[",
"[",
"freq",
"in",
"s",
"for",
"s",
"in",
"tf",
".",
"index",
"]",
"]",
".",
"mean",
"(",
")",
"mean_psd",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"mean_psd",
",",
"orient",
"=",
"\"index\"",
")",
".",
"T",
"return",
"(",
"mean_psd",
")"
] | Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None | [
"Compute",
"Power",
"-",
"Spectral",
"Density",
"(",
"PSD",
")",
"."
] | python | train |
gholt/swiftly | swiftly/cli/encrypt.py | https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/encrypt.py#L31-L47 | def cli_encrypt(context, key):
"""
Encrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
This can be useful to encrypt to disk before attempting to
upload, allowing uploads retries and segmented encrypted objects.
See :py:mod:`swiftly.cli.encrypt` for context usage information.
See :py:class:`CLIEncrypt` for more information.
"""
with context.io_manager.with_stdout() as stdout:
with context.io_manager.with_stdin() as stdin:
for chunk in aes_encrypt(key, stdin, preamble=AES256CBC):
stdout.write(chunk)
stdout.flush() | [
"def",
"cli_encrypt",
"(",
"context",
",",
"key",
")",
":",
"with",
"context",
".",
"io_manager",
".",
"with_stdout",
"(",
")",
"as",
"stdout",
":",
"with",
"context",
".",
"io_manager",
".",
"with_stdin",
"(",
")",
"as",
"stdin",
":",
"for",
"chunk",
"in",
"aes_encrypt",
"(",
"key",
",",
"stdin",
",",
"preamble",
"=",
"AES256CBC",
")",
":",
"stdout",
".",
"write",
"(",
"chunk",
")",
"stdout",
".",
"flush",
"(",
")"
] | Encrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
This can be useful to encrypt to disk before attempting to
upload, allowing uploads retries and segmented encrypted objects.
See :py:mod:`swiftly.cli.encrypt` for context usage information.
See :py:class:`CLIEncrypt` for more information. | [
"Encrypts",
"context",
".",
"io_manager",
"s",
"stdin",
"and",
"sends",
"that",
"to",
"context",
".",
"io_manager",
"s",
"stdout",
"."
] | python | test |
xlzd/xtls | xtls/timeparser.py | https://github.com/xlzd/xtls/blob/b3cc0ab24197ecaa39adcad7cd828cada9c04a4e/xtls/timeparser.py#L48-L54 | def _build_str_from_chinese(chinese_items):
"""
根据解析出的中文时间字符串的关键字返回对应的标准格式字符串
"""
year, month, day = chinese_items
year = reduce(lambda a, b: a*10+b, map(CHINESE_NUMS.find, year))
return '%04d-%02d-%02d 00:00:00' % (year, _parse_chinese_field(month), _parse_chinese_field(day)) | [
"def",
"_build_str_from_chinese",
"(",
"chinese_items",
")",
":",
"year",
",",
"month",
",",
"day",
"=",
"chinese_items",
"year",
"=",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"*",
"10",
"+",
"b",
",",
"map",
"(",
"CHINESE_NUMS",
".",
"find",
",",
"year",
")",
")",
"return",
"'%04d-%02d-%02d 00:00:00'",
"%",
"(",
"year",
",",
"_parse_chinese_field",
"(",
"month",
")",
",",
"_parse_chinese_field",
"(",
"day",
")",
")"
] | 根据解析出的中文时间字符串的关键字返回对应的标准格式字符串 | [
"根据解析出的中文时间字符串的关键字返回对应的标准格式字符串"
] | python | train |
pandas-dev/pandas | pandas/core/panel.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L834-L866 | def xs(self, key, axis=1):
"""
Return slice of panel along selected axis.
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result | [
"def",
"xs",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"1",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"return",
"self",
"[",
"key",
"]",
"self",
".",
"_consolidate_inplace",
"(",
")",
"axis_number",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"new_data",
"=",
"self",
".",
"_data",
".",
"xs",
"(",
"key",
",",
"axis",
"=",
"axis_number",
",",
"copy",
"=",
"False",
")",
"result",
"=",
"self",
".",
"_construct_return_type",
"(",
"new_data",
")",
"copy",
"=",
"new_data",
".",
"is_mixed_type",
"result",
".",
"_set_is_copy",
"(",
"self",
",",
"copy",
"=",
"copy",
")",
"return",
"result"
] | Return slice of panel along selected axis.
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>` | [
"Return",
"slice",
"of",
"panel",
"along",
"selected",
"axis",
"."
] | python | train |
rcsb/mmtf-python | mmtf/converters/converters.py | https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/converters/converters.py#L22-L32 | def convert_ints_to_bytes(in_ints, num):
"""Convert an integer array into a byte arrays. The number of bytes forming an integer
is defined by num
:param in_ints: the input integers
:param num: the number of bytes per int
:return the integer array"""
out_bytes= b""
for val in in_ints:
out_bytes+=struct.pack(mmtf.utils.constants.NUM_DICT[num], val)
return out_bytes | [
"def",
"convert_ints_to_bytes",
"(",
"in_ints",
",",
"num",
")",
":",
"out_bytes",
"=",
"b\"\"",
"for",
"val",
"in",
"in_ints",
":",
"out_bytes",
"+=",
"struct",
".",
"pack",
"(",
"mmtf",
".",
"utils",
".",
"constants",
".",
"NUM_DICT",
"[",
"num",
"]",
",",
"val",
")",
"return",
"out_bytes"
] | Convert an integer array into a byte arrays. The number of bytes forming an integer
is defined by num
:param in_ints: the input integers
:param num: the number of bytes per int
:return the integer array | [
"Convert",
"an",
"integer",
"array",
"into",
"a",
"byte",
"arrays",
".",
"The",
"number",
"of",
"bytes",
"forming",
"an",
"integer",
"is",
"defined",
"by",
"num"
] | python | train |
gem/oq-engine | openquake/risklib/asset.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/asset.py#L792-L836 | def read(fnames, calculation_mode='', region_constraint='',
ignore_missing_costs=(), asset_nodes=False, check_dupl=True,
tagcol=None, by_country=False):
"""
Call `Exposure.read(fname)` to get an :class:`Exposure` instance
keeping all the assets in memory or
`Exposure.read(fname, asset_nodes=True)` to get an iterator over
Node objects (one Node for each asset).
"""
if by_country: # E??_ -> countrycode
prefix2cc = countries.from_exposures(
os.path.basename(f) for f in fnames)
else:
prefix = ''
allargs = []
tagcol = _minimal_tagcol(fnames, by_country)
for i, fname in enumerate(fnames, 1):
if by_country and len(fnames) > 1:
prefix = prefix2cc['E%02d_' % i] + '_'
elif len(fnames) > 1:
prefix = 'E%02d_' % i
else:
prefix = ''
allargs.append((fname, calculation_mode, region_constraint,
ignore_missing_costs, asset_nodes, check_dupl,
prefix, tagcol))
exp = None
for exposure in parallel.Starmap(
Exposure.read_exp, allargs, distribute='no'):
if exp is None: # first time
exp = exposure
exp.description = 'Composite exposure[%d]' % len(fnames)
else:
assert exposure.cost_types == exp.cost_types
assert exposure.occupancy_periods == exp.occupancy_periods
assert (exposure.insurance_limit_is_absolute ==
exp.insurance_limit_is_absolute)
assert exposure.retrofitted == exp.retrofitted
assert exposure.area == exp.area
exp.assets.extend(exposure.assets)
exp.asset_refs.extend(exposure.asset_refs)
exp.tagcol.extend(exposure.tagcol)
exp.exposures = [os.path.splitext(os.path.basename(f))[0]
for f in fnames]
return exp | [
"def",
"read",
"(",
"fnames",
",",
"calculation_mode",
"=",
"''",
",",
"region_constraint",
"=",
"''",
",",
"ignore_missing_costs",
"=",
"(",
")",
",",
"asset_nodes",
"=",
"False",
",",
"check_dupl",
"=",
"True",
",",
"tagcol",
"=",
"None",
",",
"by_country",
"=",
"False",
")",
":",
"if",
"by_country",
":",
"# E??_ -> countrycode",
"prefix2cc",
"=",
"countries",
".",
"from_exposures",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"for",
"f",
"in",
"fnames",
")",
"else",
":",
"prefix",
"=",
"''",
"allargs",
"=",
"[",
"]",
"tagcol",
"=",
"_minimal_tagcol",
"(",
"fnames",
",",
"by_country",
")",
"for",
"i",
",",
"fname",
"in",
"enumerate",
"(",
"fnames",
",",
"1",
")",
":",
"if",
"by_country",
"and",
"len",
"(",
"fnames",
")",
">",
"1",
":",
"prefix",
"=",
"prefix2cc",
"[",
"'E%02d_'",
"%",
"i",
"]",
"+",
"'_'",
"elif",
"len",
"(",
"fnames",
")",
">",
"1",
":",
"prefix",
"=",
"'E%02d_'",
"%",
"i",
"else",
":",
"prefix",
"=",
"''",
"allargs",
".",
"append",
"(",
"(",
"fname",
",",
"calculation_mode",
",",
"region_constraint",
",",
"ignore_missing_costs",
",",
"asset_nodes",
",",
"check_dupl",
",",
"prefix",
",",
"tagcol",
")",
")",
"exp",
"=",
"None",
"for",
"exposure",
"in",
"parallel",
".",
"Starmap",
"(",
"Exposure",
".",
"read_exp",
",",
"allargs",
",",
"distribute",
"=",
"'no'",
")",
":",
"if",
"exp",
"is",
"None",
":",
"# first time",
"exp",
"=",
"exposure",
"exp",
".",
"description",
"=",
"'Composite exposure[%d]'",
"%",
"len",
"(",
"fnames",
")",
"else",
":",
"assert",
"exposure",
".",
"cost_types",
"==",
"exp",
".",
"cost_types",
"assert",
"exposure",
".",
"occupancy_periods",
"==",
"exp",
".",
"occupancy_periods",
"assert",
"(",
"exposure",
".",
"insurance_limit_is_absolute",
"==",
"exp",
".",
"insurance_limit_is_absolute",
")",
"assert",
"exposure",
".",
"retrofitted",
"==",
"exp",
".",
"retrofitted",
"assert",
"exposure",
".",
"area",
"==",
"exp",
".",
"area",
"exp",
".",
"assets",
".",
"extend",
"(",
"exposure",
".",
"assets",
")",
"exp",
".",
"asset_refs",
".",
"extend",
"(",
"exposure",
".",
"asset_refs",
")",
"exp",
".",
"tagcol",
".",
"extend",
"(",
"exposure",
".",
"tagcol",
")",
"exp",
".",
"exposures",
"=",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"[",
"0",
"]",
"for",
"f",
"in",
"fnames",
"]",
"return",
"exp"
] | Call `Exposure.read(fname)` to get an :class:`Exposure` instance
keeping all the assets in memory or
`Exposure.read(fname, asset_nodes=True)` to get an iterator over
Node objects (one Node for each asset). | [
"Call",
"Exposure",
".",
"read",
"(",
"fname",
")",
"to",
"get",
"an",
":",
"class",
":",
"Exposure",
"instance",
"keeping",
"all",
"the",
"assets",
"in",
"memory",
"or",
"Exposure",
".",
"read",
"(",
"fname",
"asset_nodes",
"=",
"True",
")",
"to",
"get",
"an",
"iterator",
"over",
"Node",
"objects",
"(",
"one",
"Node",
"for",
"each",
"asset",
")",
"."
] | python | train |
SeleniumHQ/selenium | py/selenium/webdriver/remote/mobile.py | https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/mobile.py#L52-L64 | def set_network_connection(self, network):
"""
Set the network connection for the remote device.
Example of setting airplane mode::
driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE)
"""
mode = network.mask if isinstance(network, self.ConnectionType) else network
return self.ConnectionType(self._driver.execute(
Command.SET_NETWORK_CONNECTION, {
'name': 'network_connection',
'parameters': {'type': mode}})['value']) | [
"def",
"set_network_connection",
"(",
"self",
",",
"network",
")",
":",
"mode",
"=",
"network",
".",
"mask",
"if",
"isinstance",
"(",
"network",
",",
"self",
".",
"ConnectionType",
")",
"else",
"network",
"return",
"self",
".",
"ConnectionType",
"(",
"self",
".",
"_driver",
".",
"execute",
"(",
"Command",
".",
"SET_NETWORK_CONNECTION",
",",
"{",
"'name'",
":",
"'network_connection'",
",",
"'parameters'",
":",
"{",
"'type'",
":",
"mode",
"}",
"}",
")",
"[",
"'value'",
"]",
")"
] | Set the network connection for the remote device.
Example of setting airplane mode::
driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE) | [
"Set",
"the",
"network",
"connection",
"for",
"the",
"remote",
"device",
"."
] | python | train |
knagra/farnsworth | base/redirects.py | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/base/redirects.py#L11-L21 | def red_ext(request, message=None):
'''
The external landing.
Also a convenience function for redirecting users who don't have site access to the external page.
Parameters:
request - the request in the calling function
message - a message from the caller function
'''
if message:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('external')) | [
"def",
"red_ext",
"(",
"request",
",",
"message",
"=",
"None",
")",
":",
"if",
"message",
":",
"messages",
".",
"add_message",
"(",
"request",
",",
"messages",
".",
"ERROR",
",",
"message",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'external'",
")",
")"
] | The external landing.
Also a convenience function for redirecting users who don't have site access to the external page.
Parameters:
request - the request in the calling function
message - a message from the caller function | [
"The",
"external",
"landing",
".",
"Also",
"a",
"convenience",
"function",
"for",
"redirecting",
"users",
"who",
"don",
"t",
"have",
"site",
"access",
"to",
"the",
"external",
"page",
".",
"Parameters",
":",
"request",
"-",
"the",
"request",
"in",
"the",
"calling",
"function",
"message",
"-",
"a",
"message",
"from",
"the",
"caller",
"function"
] | python | train |