repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
aewallin/allantools | allantools/ci.py | https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L249-L261 | def b1_boundary(b_hi, N):
"""
B1 ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2
"""
b_lo = b_hi-1
b1_lo = b1_theory(N, b_to_mu(b_lo))
b1_hi = b1_theory(N, b_to_mu(b_hi))
if b1_lo >= -4:
return np.sqrt(b1_lo*b1_hi) # geometric mean
else:
return 0.5*(b1_lo+b1_hi) | [
"def",
"b1_boundary",
"(",
"b_hi",
",",
"N",
")",
":",
"b_lo",
"=",
"b_hi",
"-",
"1",
"b1_lo",
"=",
"b1_theory",
"(",
"N",
",",
"b_to_mu",
"(",
"b_lo",
")",
")",
"b1_hi",
"=",
"b1_theory",
"(",
"N",
",",
"b_to_mu",
"(",
"b_hi",
")",
")",
"if",
"b1_lo",
">=",
"-",
"4",
":",
"return",
"np",
".",
"sqrt",
"(",
"b1_lo",
"*",
"b1_hi",
")",
"# geometric mean",
"else",
":",
"return",
"0.5",
"*",
"(",
"b1_lo",
"+",
"b1_hi",
")"
] | B1 ratio boundary for selecting between [b_hi-1, b_hi]
alpha = b + 2 | [
"B1",
"ratio",
"boundary",
"for",
"selecting",
"between",
"[",
"b_hi",
"-",
"1",
"b_hi",
"]",
"alpha",
"=",
"b",
"+",
"2"
] | python | train |
tensorflow/cleverhans | cleverhans/attacks/max_confidence.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/max_confidence.py#L41-L53 | def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments for the base attacker
"""
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
adv_x = self.attack(x, labels)
return adv_x | [
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"labels",
",",
"_nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"adv_x",
"=",
"self",
".",
"attack",
"(",
"x",
",",
"labels",
")",
"return",
"adv_x"
] | Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments for the base attacker | [
"Generate",
"symbolic",
"graph",
"for",
"adversarial",
"examples",
"and",
"return",
"."
] | python | train |
napalm-automation/napalm-eos | napalm_eos/eos.py | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L190-L199 | def commit_config(self):
"""Implementation of NAPALM method commit_config."""
commands = []
commands.append('copy startup-config flash:rollback-0')
commands.append('configure session {}'.format(self.config_session))
commands.append('commit')
commands.append('write memory')
self.device.run_commands(commands)
self.config_session = None | [
"def",
"commit_config",
"(",
"self",
")",
":",
"commands",
"=",
"[",
"]",
"commands",
".",
"append",
"(",
"'copy startup-config flash:rollback-0'",
")",
"commands",
".",
"append",
"(",
"'configure session {}'",
".",
"format",
"(",
"self",
".",
"config_session",
")",
")",
"commands",
".",
"append",
"(",
"'commit'",
")",
"commands",
".",
"append",
"(",
"'write memory'",
")",
"self",
".",
"device",
".",
"run_commands",
"(",
"commands",
")",
"self",
".",
"config_session",
"=",
"None"
] | Implementation of NAPALM method commit_config. | [
"Implementation",
"of",
"NAPALM",
"method",
"commit_config",
"."
] | python | train |
WZBSocialScienceCenter/tmtoolkit | tmtoolkit/topicmod/visualize.py | https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/visualize.py#L182-L232 | def plot_topic_word_heatmap(fig, ax, topic_word_distrib, vocab,
which_topics=None, which_topic_indices=None,
which_words=None, which_word_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a topic-word distribution `topic_word_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `vocab` as vocabulary on the x-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the y-axis.
A subset of words from `vocab` can be specified either directly with a sequence `which_words` or
`which_document_indices` containing a sequence of word indices in `vocab`.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your topic-word distribution with the
`which_words` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_words is not None and which_word_indices is not None:
raise ValueError('only `which_words` or `which_word_indices` can be set, not both')
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
if which_words is not None:
which_word_indices = np.where(np.isin(vocab, which_words))[0]
select_distrib_subset = False
topic_labels = np.array(range(1, topic_word_distrib.shape[0]+1))
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if which_word_indices is not None:
select_distrib_subset = True
vocab = np.array(vocab)[which_word_indices]
if select_distrib_subset:
topic_word_distrib = mat2d_window_from_indices(topic_word_distrib, which_topic_indices, which_word_indices)
return plot_heatmap(fig, ax, topic_word_distrib,
xaxislabel=xaxislabel or 'vocab',
yaxislabel=yaxislabel or 'topic',
xticklabels=vocab,
yticklabels=topic_labels,
**kwargs) | [
"def",
"plot_topic_word_heatmap",
"(",
"fig",
",",
"ax",
",",
"topic_word_distrib",
",",
"vocab",
",",
"which_topics",
"=",
"None",
",",
"which_topic_indices",
"=",
"None",
",",
"which_words",
"=",
"None",
",",
"which_word_indices",
"=",
"None",
",",
"xaxislabel",
"=",
"None",
",",
"yaxislabel",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"which_topics",
"is",
"not",
"None",
"and",
"which_topic_indices",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'only `which_topics` or `which_topic_indices` can be set, not both'",
")",
"if",
"which_words",
"is",
"not",
"None",
"and",
"which_word_indices",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'only `which_words` or `which_word_indices` can be set, not both'",
")",
"if",
"which_topics",
"is",
"not",
"None",
":",
"which_topic_indices",
"=",
"np",
".",
"array",
"(",
"which_topics",
")",
"-",
"1",
"if",
"which_words",
"is",
"not",
"None",
":",
"which_word_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isin",
"(",
"vocab",
",",
"which_words",
")",
")",
"[",
"0",
"]",
"select_distrib_subset",
"=",
"False",
"topic_labels",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"1",
",",
"topic_word_distrib",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
")",
")",
"if",
"which_topic_indices",
"is",
"not",
"None",
":",
"select_distrib_subset",
"=",
"True",
"topic_labels",
"=",
"topic_labels",
"[",
"which_topic_indices",
"]",
"if",
"which_word_indices",
"is",
"not",
"None",
":",
"select_distrib_subset",
"=",
"True",
"vocab",
"=",
"np",
".",
"array",
"(",
"vocab",
")",
"[",
"which_word_indices",
"]",
"if",
"select_distrib_subset",
":",
"topic_word_distrib",
"=",
"mat2d_window_from_indices",
"(",
"topic_word_distrib",
",",
"which_topic_indices",
",",
"which_word_indices",
")",
"return",
"plot_heatmap",
"(",
"fig",
",",
"ax",
",",
"topic_word_distrib",
",",
"xaxislabel",
"=",
"xaxislabel",
"or",
"'vocab'",
",",
"yaxislabel",
"=",
"yaxislabel",
"or",
"'topic'",
",",
"xticklabels",
"=",
"vocab",
",",
"yticklabels",
"=",
"topic_labels",
",",
"*",
"*",
"kwargs",
")"
] | Plot a heatmap for a topic-word distribution `topic_word_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `vocab` as vocabulary on the x-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the y-axis.
A subset of words from `vocab` can be specified either directly with a sequence `which_words` or
`which_document_indices` containing a sequence of word indices in `vocab`.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your topic-word distribution with the
`which_words` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture. | [
"Plot",
"a",
"heatmap",
"for",
"a",
"topic",
"-",
"word",
"distribution",
"topic_word_distrib",
"to",
"a",
"matplotlib",
"Figure",
"fig",
"and",
"Axes",
"ax",
"using",
"vocab",
"as",
"vocabulary",
"on",
"the",
"x",
"-",
"axis",
"and",
"topics",
"from",
"1",
"to",
"n_topics",
"=",
"doc_topic_distrib",
".",
"shape",
"[",
"1",
"]",
"on",
"the",
"y",
"-",
"axis",
".",
"A",
"subset",
"of",
"words",
"from",
"vocab",
"can",
"be",
"specified",
"either",
"directly",
"with",
"a",
"sequence",
"which_words",
"or",
"which_document_indices",
"containing",
"a",
"sequence",
"of",
"word",
"indices",
"in",
"vocab",
".",
"A",
"subset",
"of",
"topics",
"can",
"be",
"specified",
"either",
"with",
"a",
"sequence",
"which_topics",
"containing",
"sequence",
"of",
"numbers",
"between",
"[",
"1",
"n_topics",
"]",
"or",
"which_topic_indices",
"which",
"is",
"a",
"number",
"between",
"[",
"0",
"n_topics",
"-",
"1",
"]",
"Additional",
"arguments",
"can",
"be",
"passed",
"via",
"kwargs",
"to",
"plot_heatmap",
"."
] | python | train |
pygobject/pgi | pgi/codegen/utils.py | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/codegen/utils.py#L118-L122 | def write_lines(self, lines, level=0):
"""Append multiple new lines"""
for line in lines:
self.write_line(line, level) | [
"def",
"write_lines",
"(",
"self",
",",
"lines",
",",
"level",
"=",
"0",
")",
":",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"write_line",
"(",
"line",
",",
"level",
")"
] | Append multiple new lines | [
"Append",
"multiple",
"new",
"lines"
] | python | train |
tcalmant/ipopo | pelix/ipopo/core.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/core.py#L250-L299 | def __remove_handler_factory(self, svc_ref):
# type: (ServiceReference) -> None
"""
Removes an handler factory
:param svc_ref: ServiceReference of the handler factory to remove
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
# Check if this is the handler we use
if svc_ref not in self._handlers_refs:
return
# Clean up
self.__context.unget_service(svc_ref)
self._handlers_refs.remove(svc_ref)
del self._handlers[handler_id]
# List the components using this handler
to_stop = set() # type: Set[StoredInstance]
for factory_name in self.__factories:
_, factory_context = self.__get_factory_with_context(
factory_name
)
if handler_id in factory_context.get_handlers_ids():
to_stop.update(self.__get_stored_instances(factory_name))
with self.__instances_lock:
for stored_instance in to_stop:
# Extract information
context = stored_instance.context
name = context.name
instance = stored_instance.instance
# Clean up the stored instance (iPOPO side)
del self.__instances[name]
stored_instance.kill()
# Add the component to the waiting queue
self.__waiting_handlers[name] = (context, instance)
# Try to find a new handler factory
new_ref = self.__context.get_service_reference(
handlers_const.SERVICE_IPOPO_HANDLER_FACTORY,
"({0}={1})".format(handlers_const.PROP_HANDLER_ID, handler_id),
)
if new_ref is not None:
self.__add_handler_factory(new_ref) | [
"def",
"__remove_handler_factory",
"(",
"self",
",",
"svc_ref",
")",
":",
"# type: (ServiceReference) -> None",
"with",
"self",
".",
"__handlers_lock",
":",
"# Get the handler ID",
"handler_id",
"=",
"svc_ref",
".",
"get_property",
"(",
"handlers_const",
".",
"PROP_HANDLER_ID",
")",
"# Check if this is the handler we use",
"if",
"svc_ref",
"not",
"in",
"self",
".",
"_handlers_refs",
":",
"return",
"# Clean up",
"self",
".",
"__context",
".",
"unget_service",
"(",
"svc_ref",
")",
"self",
".",
"_handlers_refs",
".",
"remove",
"(",
"svc_ref",
")",
"del",
"self",
".",
"_handlers",
"[",
"handler_id",
"]",
"# List the components using this handler",
"to_stop",
"=",
"set",
"(",
")",
"# type: Set[StoredInstance]",
"for",
"factory_name",
"in",
"self",
".",
"__factories",
":",
"_",
",",
"factory_context",
"=",
"self",
".",
"__get_factory_with_context",
"(",
"factory_name",
")",
"if",
"handler_id",
"in",
"factory_context",
".",
"get_handlers_ids",
"(",
")",
":",
"to_stop",
".",
"update",
"(",
"self",
".",
"__get_stored_instances",
"(",
"factory_name",
")",
")",
"with",
"self",
".",
"__instances_lock",
":",
"for",
"stored_instance",
"in",
"to_stop",
":",
"# Extract information",
"context",
"=",
"stored_instance",
".",
"context",
"name",
"=",
"context",
".",
"name",
"instance",
"=",
"stored_instance",
".",
"instance",
"# Clean up the stored instance (iPOPO side)",
"del",
"self",
".",
"__instances",
"[",
"name",
"]",
"stored_instance",
".",
"kill",
"(",
")",
"# Add the component to the waiting queue",
"self",
".",
"__waiting_handlers",
"[",
"name",
"]",
"=",
"(",
"context",
",",
"instance",
")",
"# Try to find a new handler factory",
"new_ref",
"=",
"self",
".",
"__context",
".",
"get_service_reference",
"(",
"handlers_const",
".",
"SERVICE_IPOPO_HANDLER_FACTORY",
",",
"\"({0}={1})\"",
".",
"format",
"(",
"handlers_const",
".",
"PROP_HANDLER_ID",
",",
"handler_id",
")",
",",
")",
"if",
"new_ref",
"is",
"not",
"None",
":",
"self",
".",
"__add_handler_factory",
"(",
"new_ref",
")"
] | Removes an handler factory
:param svc_ref: ServiceReference of the handler factory to remove | [
"Removes",
"an",
"handler",
"factory"
] | python | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L459-L498 | def gen_schlumberger(self, M, N, a=None):
"""generate one Schlumberger sounding configuration, that is, one set
of configurations for one potential dipole MN.
Parameters
----------
M: int
electrode number for the first potential electrode
N: int
electrode number for the second potential electrode
a: int, optional
stepping between subsequent voltage electrodes. If not set,
determine it as a = abs(M - N)
Returns
-------
configs: Kx4 numpy.ndarray
array holding the configurations
Examples
--------
import reda.configs.configManager as CRconfig
config = CRconfig.ConfigManager(nr_of_electrodes=40)
config.gen_schlumberger(M=20, N=21)
"""
if a is None:
a = np.abs(M - N)
nr_of_steps_left = int(min(M, N) - 1 / a)
nr_of_steps_right = int((self.nr_electrodes - max(M, N)) / a)
configs = []
for i in range(0, min(nr_of_steps_left, nr_of_steps_right)):
A = min(M, N) - (i + 1) * a
B = max(M, N) + (i + 1) * a
configs.append((A, B, M, N))
configs = np.array(configs)
self.add_to_configs(configs)
return configs | [
"def",
"gen_schlumberger",
"(",
"self",
",",
"M",
",",
"N",
",",
"a",
"=",
"None",
")",
":",
"if",
"a",
"is",
"None",
":",
"a",
"=",
"np",
".",
"abs",
"(",
"M",
"-",
"N",
")",
"nr_of_steps_left",
"=",
"int",
"(",
"min",
"(",
"M",
",",
"N",
")",
"-",
"1",
"/",
"a",
")",
"nr_of_steps_right",
"=",
"int",
"(",
"(",
"self",
".",
"nr_electrodes",
"-",
"max",
"(",
"M",
",",
"N",
")",
")",
"/",
"a",
")",
"configs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"min",
"(",
"nr_of_steps_left",
",",
"nr_of_steps_right",
")",
")",
":",
"A",
"=",
"min",
"(",
"M",
",",
"N",
")",
"-",
"(",
"i",
"+",
"1",
")",
"*",
"a",
"B",
"=",
"max",
"(",
"M",
",",
"N",
")",
"+",
"(",
"i",
"+",
"1",
")",
"*",
"a",
"configs",
".",
"append",
"(",
"(",
"A",
",",
"B",
",",
"M",
",",
"N",
")",
")",
"configs",
"=",
"np",
".",
"array",
"(",
"configs",
")",
"self",
".",
"add_to_configs",
"(",
"configs",
")",
"return",
"configs"
] | generate one Schlumberger sounding configuration, that is, one set
of configurations for one potential dipole MN.
Parameters
----------
M: int
electrode number for the first potential electrode
N: int
electrode number for the second potential electrode
a: int, optional
stepping between subsequent voltage electrodes. If not set,
determine it as a = abs(M - N)
Returns
-------
configs: Kx4 numpy.ndarray
array holding the configurations
Examples
--------
import reda.configs.configManager as CRconfig
config = CRconfig.ConfigManager(nr_of_electrodes=40)
config.gen_schlumberger(M=20, N=21) | [
"generate",
"one",
"Schlumberger",
"sounding",
"configuration",
"that",
"is",
"one",
"set",
"of",
"configurations",
"for",
"one",
"potential",
"dipole",
"MN",
"."
] | python | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/menu.py | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/menu.py#L208-L219 | def delete_menu(self, menu):
""" Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises:
"""
if menu.parent is None:
del self.menus[menu.name()]
menu._delete() | [
"def",
"delete_menu",
"(",
"self",
",",
"menu",
")",
":",
"if",
"menu",
".",
"parent",
"is",
"None",
":",
"del",
"self",
".",
"menus",
"[",
"menu",
".",
"name",
"(",
")",
"]",
"menu",
".",
"_delete",
"(",
")"
] | Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises: | [
"Delete",
"the",
"specified",
"menu"
] | python | train |
rstoneback/pysat | pysat/utils.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/utils.py#L42-L76 | def set_data_dir(path=None, store=None):
"""
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
"""
import sys
import os
import pysat
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
if store is None:
store = True
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path) | [
"def",
"set_data_dir",
"(",
"path",
"=",
"None",
",",
"store",
"=",
"None",
")",
":",
"import",
"sys",
"import",
"os",
"import",
"pysat",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"if",
"sys",
".",
"version_info",
"[",
"1",
"]",
"<",
"4",
":",
"import",
"imp",
"re_load",
"=",
"imp",
".",
"reload",
"else",
":",
"import",
"importlib",
"re_load",
"=",
"importlib",
".",
"reload",
"else",
":",
"re_load",
"=",
"reload",
"if",
"store",
"is",
"None",
":",
"store",
"=",
"True",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"if",
"store",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.pysat'",
",",
"'data_path.txt'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"path",
")",
"pysat",
".",
"data_dir",
"=",
"path",
"pysat",
".",
"_files",
"=",
"re_load",
"(",
"pysat",
".",
"_files",
")",
"pysat",
".",
"_instrument",
"=",
"re_load",
"(",
"pysat",
".",
"_instrument",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Path %s does not lead to a valid directory.'",
"%",
"path",
")"
] | Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs | [
"Set",
"the",
"top",
"level",
"directory",
"pysat",
"uses",
"to",
"look",
"for",
"data",
"and",
"reload",
"."
] | python | train |
pycontribs/pyrax | pyrax/cloudloadbalancers.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L635-L643 | def get_health_monitor(self, loadbalancer):
"""
Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict.
"""
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
return body.get("healthMonitor", {}) | [
"def",
"get_health_monitor",
"(",
"self",
",",
"loadbalancer",
")",
":",
"uri",
"=",
"\"/loadbalancers/%s/healthmonitor\"",
"%",
"utils",
".",
"get_id",
"(",
"loadbalancer",
")",
"resp",
",",
"body",
"=",
"self",
".",
"api",
".",
"method_get",
"(",
"uri",
")",
"return",
"body",
".",
"get",
"(",
"\"healthMonitor\"",
",",
"{",
"}",
")"
] | Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict. | [
"Returns",
"a",
"dict",
"representing",
"the",
"health",
"monitor",
"for",
"the",
"load",
"balancer",
".",
"If",
"no",
"monitor",
"has",
"been",
"configured",
"returns",
"an",
"empty",
"dict",
"."
] | python | train |
brandon-rhodes/python-jplephem | jplephem/daf.py | https://github.com/brandon-rhodes/python-jplephem/blob/48c99ce40c627e24c95479d8845e312ea168f567/jplephem/daf.py#L92-L117 | def map_words(self, start, end):
"""Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value.
"""
i, j = 8 * start - 8, 8 * end
try:
fileno = self.file.fileno()
except (AttributeError, io.UnsupportedOperation):
fileno = None
if fileno is None:
skip = 0
self.file.seek(i)
m = self.file.read(j - i)
else:
skip = i % mmap.ALLOCATIONGRANULARITY
r = mmap.ACCESS_READ
m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip)
if sys.version_info > (3,):
m = memoryview(m) # so further slicing can return views
return m, skip | [
"def",
"map_words",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"i",
",",
"j",
"=",
"8",
"*",
"start",
"-",
"8",
",",
"8",
"*",
"end",
"try",
":",
"fileno",
"=",
"self",
".",
"file",
".",
"fileno",
"(",
")",
"except",
"(",
"AttributeError",
",",
"io",
".",
"UnsupportedOperation",
")",
":",
"fileno",
"=",
"None",
"if",
"fileno",
"is",
"None",
":",
"skip",
"=",
"0",
"self",
".",
"file",
".",
"seek",
"(",
"i",
")",
"m",
"=",
"self",
".",
"file",
".",
"read",
"(",
"j",
"-",
"i",
")",
"else",
":",
"skip",
"=",
"i",
"%",
"mmap",
".",
"ALLOCATIONGRANULARITY",
"r",
"=",
"mmap",
".",
"ACCESS_READ",
"m",
"=",
"mmap",
".",
"mmap",
"(",
"fileno",
",",
"length",
"=",
"j",
"-",
"i",
"+",
"skip",
",",
"access",
"=",
"r",
",",
"offset",
"=",
"i",
"-",
"skip",
")",
"if",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
")",
":",
"m",
"=",
"memoryview",
"(",
"m",
")",
"# so further slicing can return views",
"return",
"m",
",",
"skip"
] | Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value. | [
"Return",
"a",
"memory",
"-",
"map",
"of",
"the",
"elements",
"start",
"through",
"end",
"."
] | python | test |
fananimi/pyzk | zk/base.py | https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L1541-L1605 | def get_attendance(self):
"""
return attendance record
:return: List of Attendance object
"""
self.read_sizes()
if self.records == 0:
return []
users = self.get_users()
if self.verbose: print (users)
attendances = []
attendance_data, size = self.read_with_buffer(const.CMD_ATTLOG_RRQ)
if size < 4:
if self.verbose: print ("WRN: no attendance data")
return []
total_size = unpack("I", attendance_data[:4])[0]
record_size = total_size/self.records
if self.verbose: print ("record_size is ", record_size)
attendance_data = attendance_data[4:]
if record_size == 8:
while len(attendance_data) >= 8:
uid, status, timestamp, punch = unpack('HB4sB', attendance_data.ljust(8, b'\x00')[:8])
if self.verbose: print (codecs.encode(attendance_data[:8], 'hex'))
attendance_data = attendance_data[8:]
tuser = list(filter(lambda x: x.uid == uid, users))
if not tuser:
user_id = str(uid)
else:
user_id = tuser[0].user_id
timestamp = self.__decode_time(timestamp)
attendance = Attendance(user_id, timestamp, status, punch, uid)
attendances.append(attendance)
elif record_size == 16:
while len(attendance_data) >= 16:
user_id, timestamp, status, punch, reserved, workcode = unpack('<I4sBB2sI', attendance_data.ljust(16, b'\x00')[:16])
user_id = str(user_id)
if self.verbose: print(codecs.encode(attendance_data[:16], 'hex'))
attendance_data = attendance_data[16:]
tuser = list(filter(lambda x: x.user_id == user_id, users))
if not tuser:
if self.verbose: print("no uid {}", user_id)
uid = str(user_id)
tuser = list(filter(lambda x: x.uid == user_id, users))
if not tuser:
uid = str(user_id)
else:
uid = tuser[0].uid
user_id = tuser[0].user_id
else:
uid = tuser[0].uid
timestamp = self.__decode_time(timestamp)
attendance = Attendance(user_id, timestamp, status, punch, uid)
attendances.append(attendance)
else:
while len(attendance_data) >= 40:
uid, user_id, status, timestamp, punch, space = unpack('<H24sB4sB8s', attendance_data.ljust(40, b'\x00')[:40])
if self.verbose: print (codecs.encode(attendance_data[:40], 'hex'))
user_id = (user_id.split(b'\x00')[0]).decode(errors='ignore')
timestamp = self.__decode_time(timestamp)
attendance = Attendance(user_id, timestamp, status, punch, uid)
attendances.append(attendance)
attendance_data = attendance_data[40:]
return attendances | [
"def",
"get_attendance",
"(",
"self",
")",
":",
"self",
".",
"read_sizes",
"(",
")",
"if",
"self",
".",
"records",
"==",
"0",
":",
"return",
"[",
"]",
"users",
"=",
"self",
".",
"get_users",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"users",
")",
"attendances",
"=",
"[",
"]",
"attendance_data",
",",
"size",
"=",
"self",
".",
"read_with_buffer",
"(",
"const",
".",
"CMD_ATTLOG_RRQ",
")",
"if",
"size",
"<",
"4",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"WRN: no attendance data\"",
")",
"return",
"[",
"]",
"total_size",
"=",
"unpack",
"(",
"\"I\"",
",",
"attendance_data",
"[",
":",
"4",
"]",
")",
"[",
"0",
"]",
"record_size",
"=",
"total_size",
"/",
"self",
".",
"records",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"record_size is \"",
",",
"record_size",
")",
"attendance_data",
"=",
"attendance_data",
"[",
"4",
":",
"]",
"if",
"record_size",
"==",
"8",
":",
"while",
"len",
"(",
"attendance_data",
")",
">=",
"8",
":",
"uid",
",",
"status",
",",
"timestamp",
",",
"punch",
"=",
"unpack",
"(",
"'HB4sB'",
",",
"attendance_data",
".",
"ljust",
"(",
"8",
",",
"b'\\x00'",
")",
"[",
":",
"8",
"]",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"codecs",
".",
"encode",
"(",
"attendance_data",
"[",
":",
"8",
"]",
",",
"'hex'",
")",
")",
"attendance_data",
"=",
"attendance_data",
"[",
"8",
":",
"]",
"tuser",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"uid",
"==",
"uid",
",",
"users",
")",
")",
"if",
"not",
"tuser",
":",
"user_id",
"=",
"str",
"(",
"uid",
")",
"else",
":",
"user_id",
"=",
"tuser",
"[",
"0",
"]",
".",
"user_id",
"timestamp",
"=",
"self",
".",
"__decode_time",
"(",
"timestamp",
")",
"attendance",
"=",
"Attendance",
"(",
"user_id",
",",
"timestamp",
",",
"status",
",",
"punch",
",",
"uid",
")",
"attendances",
".",
"append",
"(",
"attendance",
")",
"elif",
"record_size",
"==",
"16",
":",
"while",
"len",
"(",
"attendance_data",
")",
">=",
"16",
":",
"user_id",
",",
"timestamp",
",",
"status",
",",
"punch",
",",
"reserved",
",",
"workcode",
"=",
"unpack",
"(",
"'<I4sBB2sI'",
",",
"attendance_data",
".",
"ljust",
"(",
"16",
",",
"b'\\x00'",
")",
"[",
":",
"16",
"]",
")",
"user_id",
"=",
"str",
"(",
"user_id",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"codecs",
".",
"encode",
"(",
"attendance_data",
"[",
":",
"16",
"]",
",",
"'hex'",
")",
")",
"attendance_data",
"=",
"attendance_data",
"[",
"16",
":",
"]",
"tuser",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"user_id",
"==",
"user_id",
",",
"users",
")",
")",
"if",
"not",
"tuser",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"no uid {}\"",
",",
"user_id",
")",
"uid",
"=",
"str",
"(",
"user_id",
")",
"tuser",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"uid",
"==",
"user_id",
",",
"users",
")",
")",
"if",
"not",
"tuser",
":",
"uid",
"=",
"str",
"(",
"user_id",
")",
"else",
":",
"uid",
"=",
"tuser",
"[",
"0",
"]",
".",
"uid",
"user_id",
"=",
"tuser",
"[",
"0",
"]",
".",
"user_id",
"else",
":",
"uid",
"=",
"tuser",
"[",
"0",
"]",
".",
"uid",
"timestamp",
"=",
"self",
".",
"__decode_time",
"(",
"timestamp",
")",
"attendance",
"=",
"Attendance",
"(",
"user_id",
",",
"timestamp",
",",
"status",
",",
"punch",
",",
"uid",
")",
"attendances",
".",
"append",
"(",
"attendance",
")",
"else",
":",
"while",
"len",
"(",
"attendance_data",
")",
">=",
"40",
":",
"uid",
",",
"user_id",
",",
"status",
",",
"timestamp",
",",
"punch",
",",
"space",
"=",
"unpack",
"(",
"'<H24sB4sB8s'",
",",
"attendance_data",
".",
"ljust",
"(",
"40",
",",
"b'\\x00'",
")",
"[",
":",
"40",
"]",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"codecs",
".",
"encode",
"(",
"attendance_data",
"[",
":",
"40",
"]",
",",
"'hex'",
")",
")",
"user_id",
"=",
"(",
"user_id",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"errors",
"=",
"'ignore'",
")",
"timestamp",
"=",
"self",
".",
"__decode_time",
"(",
"timestamp",
")",
"attendance",
"=",
"Attendance",
"(",
"user_id",
",",
"timestamp",
",",
"status",
",",
"punch",
",",
"uid",
")",
"attendances",
".",
"append",
"(",
"attendance",
")",
"attendance_data",
"=",
"attendance_data",
"[",
"40",
":",
"]",
"return",
"attendances"
] | return attendance record
:return: List of Attendance object | [
"return",
"attendance",
"record"
] | python | train |
Equitable/trump | trump/orm.py | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1918-L1945 | def update_handle(self, chkpnt_settings):
"""
Update a feeds's handle checkpoint settings
:param chkpnt_settings, dict:
a dictionary where the keys are stings representing
individual handle checkpoint names, for a Feed
(eg. api_failure, feed_type, monounique...)
See FeedHandle.__table__.columns for the
current list.
The values can be either integer or BitFlags.
:return: None
"""
# Note, for now, this function is nearly identical
# to the Symbol version. Careful when augmenting,
# to get the right one.
objs = object_session(self)
# override with anything passed in
for checkpoint in chkpnt_settings:
if checkpoint in FeedHandle.__table__.columns:
settings = chkpnt_settings[checkpoint]
setattr(self.handle, checkpoint, settings)
objs.commit() | [
"def",
"update_handle",
"(",
"self",
",",
"chkpnt_settings",
")",
":",
"# Note, for now, this function is nearly identical\r",
"# to the Symbol version. Careful when augmenting,\r",
"# to get the right one.\r",
"objs",
"=",
"object_session",
"(",
"self",
")",
"# override with anything passed in\r",
"for",
"checkpoint",
"in",
"chkpnt_settings",
":",
"if",
"checkpoint",
"in",
"FeedHandle",
".",
"__table__",
".",
"columns",
":",
"settings",
"=",
"chkpnt_settings",
"[",
"checkpoint",
"]",
"setattr",
"(",
"self",
".",
"handle",
",",
"checkpoint",
",",
"settings",
")",
"objs",
".",
"commit",
"(",
")"
] | Update a feeds's handle checkpoint settings
:param chkpnt_settings, dict:
a dictionary where the keys are stings representing
individual handle checkpoint names, for a Feed
(eg. api_failure, feed_type, monounique...)
See FeedHandle.__table__.columns for the
current list.
The values can be either integer or BitFlags.
:return: None | [
"Update",
"a",
"feeds",
"s",
"handle",
"checkpoint",
"settings",
":",
"param",
"chkpnt_settings",
"dict",
":",
"a",
"dictionary",
"where",
"the",
"keys",
"are",
"stings",
"representing",
"individual",
"handle",
"checkpoint",
"names",
"for",
"a",
"Feed",
"(",
"eg",
".",
"api_failure",
"feed_type",
"monounique",
"...",
")",
"See",
"FeedHandle",
".",
"__table__",
".",
"columns",
"for",
"the",
"current",
"list",
".",
"The",
"values",
"can",
"be",
"either",
"integer",
"or",
"BitFlags",
".",
":",
"return",
":",
"None"
] | python | train |
david-caro/python-autosemver | autosemver/packaging.py | https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/packaging.py#L180-L199 | def create_authors(project_dir=os.curdir):
"""
Creates the authors file, if not in a package.
Returns:
None
Raises:
RuntimeError: If the authors could not be retrieved
"""
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
authors_file = os.path.join(project_dir, 'AUTHORS')
if os.path.exists(pkg_info_file):
return
authors = get_authors(project_dir=project_dir)
with open(authors_file, 'wb') as authors_fd:
authors_fd.write(
b'\n'.join(a.encode('utf-8') for a in authors) + b'\n'
) | [
"def",
"create_authors",
"(",
"project_dir",
"=",
"os",
".",
"curdir",
")",
":",
"pkg_info_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"'PKG-INFO'",
")",
"authors_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"'AUTHORS'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pkg_info_file",
")",
":",
"return",
"authors",
"=",
"get_authors",
"(",
"project_dir",
"=",
"project_dir",
")",
"with",
"open",
"(",
"authors_file",
",",
"'wb'",
")",
"as",
"authors_fd",
":",
"authors_fd",
".",
"write",
"(",
"b'\\n'",
".",
"join",
"(",
"a",
".",
"encode",
"(",
"'utf-8'",
")",
"for",
"a",
"in",
"authors",
")",
"+",
"b'\\n'",
")"
] | Creates the authors file, if not in a package.
Returns:
None
Raises:
RuntimeError: If the authors could not be retrieved | [
"Creates",
"the",
"authors",
"file",
"if",
"not",
"in",
"a",
"package",
"."
] | python | train |
stevearc/dynamo3 | dynamo3/result.py | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/result.py#L331-L345 | def build_kwargs(self):
""" Construct the kwargs to pass to batch_get_item """
keys, self.keys = self.keys[:MAX_GET_BATCH], self.keys[MAX_GET_BATCH:]
query = {'ConsistentRead': self.consistent}
if self.attributes is not None:
query['ProjectionExpression'] = self.attributes
if self.alias:
query['ExpressionAttributeNames'] = self.alias
query['Keys'] = keys
return {
'RequestItems': {
self.tablename: query,
},
'ReturnConsumedCapacity': self.return_capacity,
} | [
"def",
"build_kwargs",
"(",
"self",
")",
":",
"keys",
",",
"self",
".",
"keys",
"=",
"self",
".",
"keys",
"[",
":",
"MAX_GET_BATCH",
"]",
",",
"self",
".",
"keys",
"[",
"MAX_GET_BATCH",
":",
"]",
"query",
"=",
"{",
"'ConsistentRead'",
":",
"self",
".",
"consistent",
"}",
"if",
"self",
".",
"attributes",
"is",
"not",
"None",
":",
"query",
"[",
"'ProjectionExpression'",
"]",
"=",
"self",
".",
"attributes",
"if",
"self",
".",
"alias",
":",
"query",
"[",
"'ExpressionAttributeNames'",
"]",
"=",
"self",
".",
"alias",
"query",
"[",
"'Keys'",
"]",
"=",
"keys",
"return",
"{",
"'RequestItems'",
":",
"{",
"self",
".",
"tablename",
":",
"query",
",",
"}",
",",
"'ReturnConsumedCapacity'",
":",
"self",
".",
"return_capacity",
",",
"}"
] | Construct the kwargs to pass to batch_get_item | [
"Construct",
"the",
"kwargs",
"to",
"pass",
"to",
"batch_get_item"
] | python | train |
delph-in/pydelphin | delphin/interfaces/base.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/interfaces/base.py#L282-L305 | def cleanup(self):
"""
Return aggregated (table, rowdata) tuples and clear the state.
"""
inserts = []
last_run = self._runs[self._last_run_id]
if 'end' not in last_run:
last_run['end'] = datetime.now()
for run_id in sorted(self._runs):
run = self._runs[run_id]
d = {'run-id': run.get('run-id', -1)}
for key in self._run_keys:
if key in run:
d[key] = run[key]
inserts.append(('run', d))
# reset for next task
self._parse_id = -1
self._runs = {}
self._last_run_id = -1
return inserts | [
"def",
"cleanup",
"(",
"self",
")",
":",
"inserts",
"=",
"[",
"]",
"last_run",
"=",
"self",
".",
"_runs",
"[",
"self",
".",
"_last_run_id",
"]",
"if",
"'end'",
"not",
"in",
"last_run",
":",
"last_run",
"[",
"'end'",
"]",
"=",
"datetime",
".",
"now",
"(",
")",
"for",
"run_id",
"in",
"sorted",
"(",
"self",
".",
"_runs",
")",
":",
"run",
"=",
"self",
".",
"_runs",
"[",
"run_id",
"]",
"d",
"=",
"{",
"'run-id'",
":",
"run",
".",
"get",
"(",
"'run-id'",
",",
"-",
"1",
")",
"}",
"for",
"key",
"in",
"self",
".",
"_run_keys",
":",
"if",
"key",
"in",
"run",
":",
"d",
"[",
"key",
"]",
"=",
"run",
"[",
"key",
"]",
"inserts",
".",
"append",
"(",
"(",
"'run'",
",",
"d",
")",
")",
"# reset for next task",
"self",
".",
"_parse_id",
"=",
"-",
"1",
"self",
".",
"_runs",
"=",
"{",
"}",
"self",
".",
"_last_run_id",
"=",
"-",
"1",
"return",
"inserts"
] | Return aggregated (table, rowdata) tuples and clear the state. | [
"Return",
"aggregated",
"(",
"table",
"rowdata",
")",
"tuples",
"and",
"clear",
"the",
"state",
"."
] | python | train |
gplepage/lsqfit | src/lsqfit/__init__.py | https://github.com/gplepage/lsqfit/blob/6a57fd687632c175fccb47d8e8e943cda5e9ce9d/src/lsqfit/__init__.py#L903-L1225 | def format(self, maxline=0, pstyle='v', nline=None, extend=True):
""" Formats fit output details into a string for printing.
The output tabulates the ``chi**2`` per degree of freedom of the fit
(``chi2/dof``), the number of degrees of freedom, the ``Q`` value of
the fit (ie, p-value), and the logarithm of the Gaussian Bayes Factor
for the fit (``logGBF``). At the end it lists the SVD cut, the number
of eigenmodes modified by the SVD cut, the tolerances used in the fit,
and the time in seconds needed to do the fit. The tolerance used to
terminate the fit is marked with an asterisk. It also lists
information about the fitter used if it is other than the standard
choice.
Optionally, ``format`` will also list the best-fit values
for the fit parameters together with the prior for each (in ``[]`` on
each line). Lines for parameters that deviate from their prior by more
than one (prior) standard deviation are marked with asterisks, with
the number of asterisks equal to the number of standard deviations (up
to five). Lines for parameters designated as linear (see ``linear``
keyword) are marked with a minus sign after their prior.
``format`` can also list all of the data and the corresponding values
from the fit, again with asterisks on lines where there is a
significant discrepancy.
Args:
maxline (int or bool): Maximum number of data points for which
fit results and input data are tabulated. ``maxline<0``
implies that only ``chi2``, ``Q``, ``logGBF``, and ``itns``
are tabulated; no parameter values are included. Setting
``maxline=True`` prints all data points; setting it
equal ``None`` or ``False`` is the same as setting
it equal to ``-1``. Default is ``maxline=0``.
pstyle (str or None): Style used for parameter list. Supported
values are 'vv' for very verbose, 'v' for verbose, and 'm' for
minimal. When 'm' is set, only parameters whose values differ
from their prior values are listed. Setting ``pstyle=None``
implies no parameters are listed.
extend (bool): If ``True``, extend the parameter list to
include values derived from log-normal or other
non-Gaussian parameters. So values for fit parameter
``p['log(a)']``, for example, are listed together with
values ``p['a']`` for the exponential of the fit parameter.
Setting ``extend=False`` means that only the value
for ``p['log(a)']`` is listed. Default is ``True``.
Returns:
String containing detailed information about fit.
"""
# unpack arguments
if nline is not None and maxline == 0:
maxline = nline # for legacy code (old name)
if maxline is True:
# print all data
maxline = sys.maxsize
if maxline is False or maxline is None:
maxline = -1
if pstyle is not None:
if pstyle[:2] == 'vv':
pstyle = 'vv'
elif pstyle[:1] == 'v':
pstyle = 'v'
elif pstyle[:1] == 'm':
pstyle = 'm'
else:
raise ValueError("Invalid pstyle: "+str(pstyle))
def collect(v1, v2, style='v', stride=1, extend=False):
""" Collect data from v1 and v2 into table.
Returns list of [label,v1fmt,v2fmt]s for each entry in v1 and
v2. Here v1fmt and v2fmt are strings representing entries in v1
and v2, while label is assembled from the key/index of the
entry.
"""
def nstar(v1, v2):
sdev = max(v1.sdev, v2.sdev)
nstar = int(abs(v1.mean - v2.mean) / sdev)
if nstar > 5:
nstar = 5
elif nstar < 1:
nstar = 0
return ' ' + nstar * '*'
ct = 0
ans = []
width = [0,0,0]
stars = []
if v1.shape is None:
# BufferDict
keys = list(v1.keys())
if extend:
v1 = _gvar.BufferDict(v1)
v2 = _gvar.BufferDict(v2)
ekeys = v1.extension_keys()
if len(ekeys) > 0:
first_ekey = ekeys[0]
keys += ekeys
else:
extend = False
for k in keys:
if extend and k == first_ekey:
# marker indicating beginning of extra keys
stars.append(None)
ans.append(None)
ktag = str(k)
if numpy.shape(v1[k]) == ():
if ct%stride != 0:
ct += 1
continue
if style in ['v','m']:
v1fmt = v1[k].fmt(sep=' ')
v2fmt = v2[k].fmt(sep=' ')
else:
v1fmt = v1[k].fmt(-1)
v2fmt = v2[k].fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1[k], v2[k]))
ans.append([ktag, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
else:
ktag = ktag + " "
for i in numpy.ndindex(v1[k].shape):
if ct%stride != 0:
ct += 1
continue
ifmt = (len(i)*"%d,")[:-1] % i
if style in ['v','m']:
v1fmt = v1[k][i].fmt(sep=' ')
v2fmt = v2[k][i].fmt(sep=' ')
else:
v1fmt = v1[k][i].fmt(-1)
v2fmt = v2[k][i].fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1[k][i], v2[k][i]))
ans.append([ktag+ifmt, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
ktag = ""
else:
# numpy array
v2 = numpy.asarray(v2)
for k in numpy.ndindex(v1.shape):
# convert array(GVar) to GVar
v1k = v1[k] if hasattr(v1[k], 'fmt') else v1[k].flat[0]
v2k = v2[k] if hasattr(v2[k], 'fmt') else v2[k].flat[0]
if ct%stride != 0:
ct += 1
continue
kfmt = (len(k) * "%d,")[:-1] % k
if style in ['v','m']:
v1fmt = v1k.fmt(sep=' ')
v2fmt = v2k.fmt(sep=' ')
else:
v1fmt = v1k.fmt(-1)
v2fmt = v2k.fmt(-1)
if style == 'm' and v1fmt == v2fmt:
ct += 1
continue
stars.append(nstar(v1k, v2k)) ###
ans.append([kfmt, v1fmt, v2fmt])
w = [len(ai) for ai in ans[-1]]
for i, (wo, wn) in enumerate(zip(width, w)):
if wn > wo:
width[i] = wn
ct += 1
collect.width = width
collect.stars = stars
return ans
# build header
dof = self.dof
if dof > 0:
chi2_dof = self.chi2/self.dof
else:
chi2_dof = self.chi2
try:
Q = 'Q = %.2g' % self.Q
except:
Q = ''
try:
logGBF = 'logGBF = %.5g' % self.logGBF
except:
logGBF = ''
if self.prior is None:
descr = ' (no prior)'
else:
descr = ''
table = ('Least Square Fit%s:\n chi2/dof [dof] = %.2g [%d] %s'
' %s\n' % (descr, chi2_dof, dof, Q, logGBF))
if maxline < 0:
return table
# create parameter table
if pstyle is not None:
table = table + '\nParameters:\n'
prior = self.prior
if prior is None:
if self.p0.shape is None:
prior = _gvar.BufferDict(
self.p0, buf=self.p0.flatten() + _gvar.gvar(0,float('inf')))
else:
prior = self.p0 + _gvar.gvar(0,float('inf'))
data = collect(self.palt, prior, style=pstyle, stride=1, extend=extend)
w1, w2, w3 = collect.width
fst = "%%%ds%s%%%ds%s[ %%%ds ]" % (
max(w1, 15), 3 * ' ',
max(w2, 10), int(max(w2,10)/2) * ' ', max(w3,10)
)
if len(self.linear) > 0:
spacer = [' ', '-']
else:
spacer = ['', '']
for i, (di, stars) in enumerate(zip(data, collect.stars)):
if di is None:
# marker for boundary between true fit parameters and derived parameters
ndashes = (
max(w1, 15) + 3 + max(w2, 10) + int(max(w2, 10)/2)
+ 4 + max(w3, 10)
)
table += ndashes * '-' + '\n'
continue
table += (
(fst % tuple(di)) +
spacer[i in self.linear] +
stars + '\n'
)
# settings
settings = "\nSettings:"
if not self.add_svdnoise or self.svdcut is None or self.svdcut < 0:
settings += "\n svdcut/n = {svdcut:.2g}/{svdn}".format(
svdcut=self.svdcut if self.svdcut is not None else 0.0,
svdn=self.svdn
)
else:
settings += "\n svdcut/n = {svdcut:.2g}/{svdn}*".format(
svdcut=self.svdcut, svdn=self.svdn
)
criterion = self.stopping_criterion
try:
fmtstr = [
" tol = ({:.2g},{:.2g},{:.2g})",
" tol = ({:.2g}*,{:.2g},{:.2g})",
" tol = ({:.2g},{:.2g}*,{:.2g})",
" tol = ({:.2g},{:.2g},{:.2g}*)",
][criterion if criterion is not None else 0]
settings += fmtstr.format(*self.tol)
except:
pass
if criterion is not None and criterion == 0:
settings +=" (itns/time = {itns}*/{time:.1f})".format(
itns=self.nit, time=self.time
)
else:
settings +=" (itns/time = {itns}/{time:.1f})".format(
itns=self.nit, time=self.time
)
default_line = '\n fitter = gsl_multifit methods = lm/more/qr\n'
newline = "\n fitter = {} {}\n".format(
self.fitter, self.description
)
if newline != default_line:
settings += newline
else:
settings += '\n'
if maxline <= 0 or self.data is None:
return table + settings
# create table comparing fit results to data
ny = self.y.size
stride = 1 if maxline >= ny else (int(ny/maxline) + 1)
if hasattr(self, 'fcn_p'):
f = self.fcn_p
elif self.x is False:
f = self.fcn(self.p)
else:
f = self.fcn(self.x, self.p)
if hasattr(f, 'keys'):
f = _gvar.BufferDict(f)
else:
f = numpy.array(f)
data = collect(self.y, f, style='v', stride=stride, extend=False)
w1,w2,w3 = collect.width
clabels = ("key","y[key]","f(p)[key]")
if self.y.shape is not None and self.x is not False and self.x is not None:
# use x[k] to label lines in table?
try:
x = numpy.array(self.x)
xlist = []
ct = 0
for k in numpy.ndindex(x.shape):
if ct%stride != 0:
ct += 1
continue
xlist.append("%g" % x[k])
assert len(xlist) == len(data)
except:
xlist = None
if xlist is not None:
for i,(d1,d2,d3) in enumerate(data):
data[i] = (xlist[i],d2,d3)
clabels = ("x[k]","y[k]","f(x[k],p)")
w1,w2,w3 = max(9,w1+4), max(9,w2+4), max(9,w3+4)
table += "\nFit:\n"
fst = "%%%ds%%%ds%%%ds\n" % (w1, w2, w3)
table += fst % clabels
table += (w1 + w2 + w3) * "-" + "\n"
for di, stars in zip(data, collect.stars):
table += fst[:-1] % tuple(di) + stars + '\n'
return table + settings | [
"def",
"format",
"(",
"self",
",",
"maxline",
"=",
"0",
",",
"pstyle",
"=",
"'v'",
",",
"nline",
"=",
"None",
",",
"extend",
"=",
"True",
")",
":",
"# unpack arguments",
"if",
"nline",
"is",
"not",
"None",
"and",
"maxline",
"==",
"0",
":",
"maxline",
"=",
"nline",
"# for legacy code (old name)",
"if",
"maxline",
"is",
"True",
":",
"# print all data",
"maxline",
"=",
"sys",
".",
"maxsize",
"if",
"maxline",
"is",
"False",
"or",
"maxline",
"is",
"None",
":",
"maxline",
"=",
"-",
"1",
"if",
"pstyle",
"is",
"not",
"None",
":",
"if",
"pstyle",
"[",
":",
"2",
"]",
"==",
"'vv'",
":",
"pstyle",
"=",
"'vv'",
"elif",
"pstyle",
"[",
":",
"1",
"]",
"==",
"'v'",
":",
"pstyle",
"=",
"'v'",
"elif",
"pstyle",
"[",
":",
"1",
"]",
"==",
"'m'",
":",
"pstyle",
"=",
"'m'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid pstyle: \"",
"+",
"str",
"(",
"pstyle",
")",
")",
"def",
"collect",
"(",
"v1",
",",
"v2",
",",
"style",
"=",
"'v'",
",",
"stride",
"=",
"1",
",",
"extend",
"=",
"False",
")",
":",
"\"\"\" Collect data from v1 and v2 into table.\n\n Returns list of [label,v1fmt,v2fmt]s for each entry in v1 and\n v2. Here v1fmt and v2fmt are strings representing entries in v1\n and v2, while label is assembled from the key/index of the\n entry.\n \"\"\"",
"def",
"nstar",
"(",
"v1",
",",
"v2",
")",
":",
"sdev",
"=",
"max",
"(",
"v1",
".",
"sdev",
",",
"v2",
".",
"sdev",
")",
"nstar",
"=",
"int",
"(",
"abs",
"(",
"v1",
".",
"mean",
"-",
"v2",
".",
"mean",
")",
"/",
"sdev",
")",
"if",
"nstar",
">",
"5",
":",
"nstar",
"=",
"5",
"elif",
"nstar",
"<",
"1",
":",
"nstar",
"=",
"0",
"return",
"' '",
"+",
"nstar",
"*",
"'*'",
"ct",
"=",
"0",
"ans",
"=",
"[",
"]",
"width",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"stars",
"=",
"[",
"]",
"if",
"v1",
".",
"shape",
"is",
"None",
":",
"# BufferDict",
"keys",
"=",
"list",
"(",
"v1",
".",
"keys",
"(",
")",
")",
"if",
"extend",
":",
"v1",
"=",
"_gvar",
".",
"BufferDict",
"(",
"v1",
")",
"v2",
"=",
"_gvar",
".",
"BufferDict",
"(",
"v2",
")",
"ekeys",
"=",
"v1",
".",
"extension_keys",
"(",
")",
"if",
"len",
"(",
"ekeys",
")",
">",
"0",
":",
"first_ekey",
"=",
"ekeys",
"[",
"0",
"]",
"keys",
"+=",
"ekeys",
"else",
":",
"extend",
"=",
"False",
"for",
"k",
"in",
"keys",
":",
"if",
"extend",
"and",
"k",
"==",
"first_ekey",
":",
"# marker indicating beginning of extra keys",
"stars",
".",
"append",
"(",
"None",
")",
"ans",
".",
"append",
"(",
"None",
")",
"ktag",
"=",
"str",
"(",
"k",
")",
"if",
"numpy",
".",
"shape",
"(",
"v1",
"[",
"k",
"]",
")",
"==",
"(",
")",
":",
"if",
"ct",
"%",
"stride",
"!=",
"0",
":",
"ct",
"+=",
"1",
"continue",
"if",
"style",
"in",
"[",
"'v'",
",",
"'m'",
"]",
":",
"v1fmt",
"=",
"v1",
"[",
"k",
"]",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"v2fmt",
"=",
"v2",
"[",
"k",
"]",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"else",
":",
"v1fmt",
"=",
"v1",
"[",
"k",
"]",
".",
"fmt",
"(",
"-",
"1",
")",
"v2fmt",
"=",
"v2",
"[",
"k",
"]",
".",
"fmt",
"(",
"-",
"1",
")",
"if",
"style",
"==",
"'m'",
"and",
"v1fmt",
"==",
"v2fmt",
":",
"ct",
"+=",
"1",
"continue",
"stars",
".",
"append",
"(",
"nstar",
"(",
"v1",
"[",
"k",
"]",
",",
"v2",
"[",
"k",
"]",
")",
")",
"ans",
".",
"append",
"(",
"[",
"ktag",
",",
"v1fmt",
",",
"v2fmt",
"]",
")",
"w",
"=",
"[",
"len",
"(",
"ai",
")",
"for",
"ai",
"in",
"ans",
"[",
"-",
"1",
"]",
"]",
"for",
"i",
",",
"(",
"wo",
",",
"wn",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"width",
",",
"w",
")",
")",
":",
"if",
"wn",
">",
"wo",
":",
"width",
"[",
"i",
"]",
"=",
"wn",
"ct",
"+=",
"1",
"else",
":",
"ktag",
"=",
"ktag",
"+",
"\" \"",
"for",
"i",
"in",
"numpy",
".",
"ndindex",
"(",
"v1",
"[",
"k",
"]",
".",
"shape",
")",
":",
"if",
"ct",
"%",
"stride",
"!=",
"0",
":",
"ct",
"+=",
"1",
"continue",
"ifmt",
"=",
"(",
"len",
"(",
"i",
")",
"*",
"\"%d,\"",
")",
"[",
":",
"-",
"1",
"]",
"%",
"i",
"if",
"style",
"in",
"[",
"'v'",
",",
"'m'",
"]",
":",
"v1fmt",
"=",
"v1",
"[",
"k",
"]",
"[",
"i",
"]",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"v2fmt",
"=",
"v2",
"[",
"k",
"]",
"[",
"i",
"]",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"else",
":",
"v1fmt",
"=",
"v1",
"[",
"k",
"]",
"[",
"i",
"]",
".",
"fmt",
"(",
"-",
"1",
")",
"v2fmt",
"=",
"v2",
"[",
"k",
"]",
"[",
"i",
"]",
".",
"fmt",
"(",
"-",
"1",
")",
"if",
"style",
"==",
"'m'",
"and",
"v1fmt",
"==",
"v2fmt",
":",
"ct",
"+=",
"1",
"continue",
"stars",
".",
"append",
"(",
"nstar",
"(",
"v1",
"[",
"k",
"]",
"[",
"i",
"]",
",",
"v2",
"[",
"k",
"]",
"[",
"i",
"]",
")",
")",
"ans",
".",
"append",
"(",
"[",
"ktag",
"+",
"ifmt",
",",
"v1fmt",
",",
"v2fmt",
"]",
")",
"w",
"=",
"[",
"len",
"(",
"ai",
")",
"for",
"ai",
"in",
"ans",
"[",
"-",
"1",
"]",
"]",
"for",
"i",
",",
"(",
"wo",
",",
"wn",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"width",
",",
"w",
")",
")",
":",
"if",
"wn",
">",
"wo",
":",
"width",
"[",
"i",
"]",
"=",
"wn",
"ct",
"+=",
"1",
"ktag",
"=",
"\"\"",
"else",
":",
"# numpy array",
"v2",
"=",
"numpy",
".",
"asarray",
"(",
"v2",
")",
"for",
"k",
"in",
"numpy",
".",
"ndindex",
"(",
"v1",
".",
"shape",
")",
":",
"# convert array(GVar) to GVar",
"v1k",
"=",
"v1",
"[",
"k",
"]",
"if",
"hasattr",
"(",
"v1",
"[",
"k",
"]",
",",
"'fmt'",
")",
"else",
"v1",
"[",
"k",
"]",
".",
"flat",
"[",
"0",
"]",
"v2k",
"=",
"v2",
"[",
"k",
"]",
"if",
"hasattr",
"(",
"v2",
"[",
"k",
"]",
",",
"'fmt'",
")",
"else",
"v2",
"[",
"k",
"]",
".",
"flat",
"[",
"0",
"]",
"if",
"ct",
"%",
"stride",
"!=",
"0",
":",
"ct",
"+=",
"1",
"continue",
"kfmt",
"=",
"(",
"len",
"(",
"k",
")",
"*",
"\"%d,\"",
")",
"[",
":",
"-",
"1",
"]",
"%",
"k",
"if",
"style",
"in",
"[",
"'v'",
",",
"'m'",
"]",
":",
"v1fmt",
"=",
"v1k",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"v2fmt",
"=",
"v2k",
".",
"fmt",
"(",
"sep",
"=",
"' '",
")",
"else",
":",
"v1fmt",
"=",
"v1k",
".",
"fmt",
"(",
"-",
"1",
")",
"v2fmt",
"=",
"v2k",
".",
"fmt",
"(",
"-",
"1",
")",
"if",
"style",
"==",
"'m'",
"and",
"v1fmt",
"==",
"v2fmt",
":",
"ct",
"+=",
"1",
"continue",
"stars",
".",
"append",
"(",
"nstar",
"(",
"v1k",
",",
"v2k",
")",
")",
"###",
"ans",
".",
"append",
"(",
"[",
"kfmt",
",",
"v1fmt",
",",
"v2fmt",
"]",
")",
"w",
"=",
"[",
"len",
"(",
"ai",
")",
"for",
"ai",
"in",
"ans",
"[",
"-",
"1",
"]",
"]",
"for",
"i",
",",
"(",
"wo",
",",
"wn",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"width",
",",
"w",
")",
")",
":",
"if",
"wn",
">",
"wo",
":",
"width",
"[",
"i",
"]",
"=",
"wn",
"ct",
"+=",
"1",
"collect",
".",
"width",
"=",
"width",
"collect",
".",
"stars",
"=",
"stars",
"return",
"ans",
"# build header",
"dof",
"=",
"self",
".",
"dof",
"if",
"dof",
">",
"0",
":",
"chi2_dof",
"=",
"self",
".",
"chi2",
"/",
"self",
".",
"dof",
"else",
":",
"chi2_dof",
"=",
"self",
".",
"chi2",
"try",
":",
"Q",
"=",
"'Q = %.2g'",
"%",
"self",
".",
"Q",
"except",
":",
"Q",
"=",
"''",
"try",
":",
"logGBF",
"=",
"'logGBF = %.5g'",
"%",
"self",
".",
"logGBF",
"except",
":",
"logGBF",
"=",
"''",
"if",
"self",
".",
"prior",
"is",
"None",
":",
"descr",
"=",
"' (no prior)'",
"else",
":",
"descr",
"=",
"''",
"table",
"=",
"(",
"'Least Square Fit%s:\\n chi2/dof [dof] = %.2g [%d] %s'",
"' %s\\n'",
"%",
"(",
"descr",
",",
"chi2_dof",
",",
"dof",
",",
"Q",
",",
"logGBF",
")",
")",
"if",
"maxline",
"<",
"0",
":",
"return",
"table",
"# create parameter table",
"if",
"pstyle",
"is",
"not",
"None",
":",
"table",
"=",
"table",
"+",
"'\\nParameters:\\n'",
"prior",
"=",
"self",
".",
"prior",
"if",
"prior",
"is",
"None",
":",
"if",
"self",
".",
"p0",
".",
"shape",
"is",
"None",
":",
"prior",
"=",
"_gvar",
".",
"BufferDict",
"(",
"self",
".",
"p0",
",",
"buf",
"=",
"self",
".",
"p0",
".",
"flatten",
"(",
")",
"+",
"_gvar",
".",
"gvar",
"(",
"0",
",",
"float",
"(",
"'inf'",
")",
")",
")",
"else",
":",
"prior",
"=",
"self",
".",
"p0",
"+",
"_gvar",
".",
"gvar",
"(",
"0",
",",
"float",
"(",
"'inf'",
")",
")",
"data",
"=",
"collect",
"(",
"self",
".",
"palt",
",",
"prior",
",",
"style",
"=",
"pstyle",
",",
"stride",
"=",
"1",
",",
"extend",
"=",
"extend",
")",
"w1",
",",
"w2",
",",
"w3",
"=",
"collect",
".",
"width",
"fst",
"=",
"\"%%%ds%s%%%ds%s[ %%%ds ]\"",
"%",
"(",
"max",
"(",
"w1",
",",
"15",
")",
",",
"3",
"*",
"' '",
",",
"max",
"(",
"w2",
",",
"10",
")",
",",
"int",
"(",
"max",
"(",
"w2",
",",
"10",
")",
"/",
"2",
")",
"*",
"' '",
",",
"max",
"(",
"w3",
",",
"10",
")",
")",
"if",
"len",
"(",
"self",
".",
"linear",
")",
">",
"0",
":",
"spacer",
"=",
"[",
"' '",
",",
"'-'",
"]",
"else",
":",
"spacer",
"=",
"[",
"''",
",",
"''",
"]",
"for",
"i",
",",
"(",
"di",
",",
"stars",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"data",
",",
"collect",
".",
"stars",
")",
")",
":",
"if",
"di",
"is",
"None",
":",
"# marker for boundary between true fit parameters and derived parameters",
"ndashes",
"=",
"(",
"max",
"(",
"w1",
",",
"15",
")",
"+",
"3",
"+",
"max",
"(",
"w2",
",",
"10",
")",
"+",
"int",
"(",
"max",
"(",
"w2",
",",
"10",
")",
"/",
"2",
")",
"+",
"4",
"+",
"max",
"(",
"w3",
",",
"10",
")",
")",
"table",
"+=",
"ndashes",
"*",
"'-'",
"+",
"'\\n'",
"continue",
"table",
"+=",
"(",
"(",
"fst",
"%",
"tuple",
"(",
"di",
")",
")",
"+",
"spacer",
"[",
"i",
"in",
"self",
".",
"linear",
"]",
"+",
"stars",
"+",
"'\\n'",
")",
"# settings",
"settings",
"=",
"\"\\nSettings:\"",
"if",
"not",
"self",
".",
"add_svdnoise",
"or",
"self",
".",
"svdcut",
"is",
"None",
"or",
"self",
".",
"svdcut",
"<",
"0",
":",
"settings",
"+=",
"\"\\n svdcut/n = {svdcut:.2g}/{svdn}\"",
".",
"format",
"(",
"svdcut",
"=",
"self",
".",
"svdcut",
"if",
"self",
".",
"svdcut",
"is",
"not",
"None",
"else",
"0.0",
",",
"svdn",
"=",
"self",
".",
"svdn",
")",
"else",
":",
"settings",
"+=",
"\"\\n svdcut/n = {svdcut:.2g}/{svdn}*\"",
".",
"format",
"(",
"svdcut",
"=",
"self",
".",
"svdcut",
",",
"svdn",
"=",
"self",
".",
"svdn",
")",
"criterion",
"=",
"self",
".",
"stopping_criterion",
"try",
":",
"fmtstr",
"=",
"[",
"\" tol = ({:.2g},{:.2g},{:.2g})\"",
",",
"\" tol = ({:.2g}*,{:.2g},{:.2g})\"",
",",
"\" tol = ({:.2g},{:.2g}*,{:.2g})\"",
",",
"\" tol = ({:.2g},{:.2g},{:.2g}*)\"",
",",
"]",
"[",
"criterion",
"if",
"criterion",
"is",
"not",
"None",
"else",
"0",
"]",
"settings",
"+=",
"fmtstr",
".",
"format",
"(",
"*",
"self",
".",
"tol",
")",
"except",
":",
"pass",
"if",
"criterion",
"is",
"not",
"None",
"and",
"criterion",
"==",
"0",
":",
"settings",
"+=",
"\" (itns/time = {itns}*/{time:.1f})\"",
".",
"format",
"(",
"itns",
"=",
"self",
".",
"nit",
",",
"time",
"=",
"self",
".",
"time",
")",
"else",
":",
"settings",
"+=",
"\" (itns/time = {itns}/{time:.1f})\"",
".",
"format",
"(",
"itns",
"=",
"self",
".",
"nit",
",",
"time",
"=",
"self",
".",
"time",
")",
"default_line",
"=",
"'\\n fitter = gsl_multifit methods = lm/more/qr\\n'",
"newline",
"=",
"\"\\n fitter = {} {}\\n\"",
".",
"format",
"(",
"self",
".",
"fitter",
",",
"self",
".",
"description",
")",
"if",
"newline",
"!=",
"default_line",
":",
"settings",
"+=",
"newline",
"else",
":",
"settings",
"+=",
"'\\n'",
"if",
"maxline",
"<=",
"0",
"or",
"self",
".",
"data",
"is",
"None",
":",
"return",
"table",
"+",
"settings",
"# create table comparing fit results to data",
"ny",
"=",
"self",
".",
"y",
".",
"size",
"stride",
"=",
"1",
"if",
"maxline",
">=",
"ny",
"else",
"(",
"int",
"(",
"ny",
"/",
"maxline",
")",
"+",
"1",
")",
"if",
"hasattr",
"(",
"self",
",",
"'fcn_p'",
")",
":",
"f",
"=",
"self",
".",
"fcn_p",
"elif",
"self",
".",
"x",
"is",
"False",
":",
"f",
"=",
"self",
".",
"fcn",
"(",
"self",
".",
"p",
")",
"else",
":",
"f",
"=",
"self",
".",
"fcn",
"(",
"self",
".",
"x",
",",
"self",
".",
"p",
")",
"if",
"hasattr",
"(",
"f",
",",
"'keys'",
")",
":",
"f",
"=",
"_gvar",
".",
"BufferDict",
"(",
"f",
")",
"else",
":",
"f",
"=",
"numpy",
".",
"array",
"(",
"f",
")",
"data",
"=",
"collect",
"(",
"self",
".",
"y",
",",
"f",
",",
"style",
"=",
"'v'",
",",
"stride",
"=",
"stride",
",",
"extend",
"=",
"False",
")",
"w1",
",",
"w2",
",",
"w3",
"=",
"collect",
".",
"width",
"clabels",
"=",
"(",
"\"key\"",
",",
"\"y[key]\"",
",",
"\"f(p)[key]\"",
")",
"if",
"self",
".",
"y",
".",
"shape",
"is",
"not",
"None",
"and",
"self",
".",
"x",
"is",
"not",
"False",
"and",
"self",
".",
"x",
"is",
"not",
"None",
":",
"# use x[k] to label lines in table?",
"try",
":",
"x",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"x",
")",
"xlist",
"=",
"[",
"]",
"ct",
"=",
"0",
"for",
"k",
"in",
"numpy",
".",
"ndindex",
"(",
"x",
".",
"shape",
")",
":",
"if",
"ct",
"%",
"stride",
"!=",
"0",
":",
"ct",
"+=",
"1",
"continue",
"xlist",
".",
"append",
"(",
"\"%g\"",
"%",
"x",
"[",
"k",
"]",
")",
"assert",
"len",
"(",
"xlist",
")",
"==",
"len",
"(",
"data",
")",
"except",
":",
"xlist",
"=",
"None",
"if",
"xlist",
"is",
"not",
"None",
":",
"for",
"i",
",",
"(",
"d1",
",",
"d2",
",",
"d3",
")",
"in",
"enumerate",
"(",
"data",
")",
":",
"data",
"[",
"i",
"]",
"=",
"(",
"xlist",
"[",
"i",
"]",
",",
"d2",
",",
"d3",
")",
"clabels",
"=",
"(",
"\"x[k]\"",
",",
"\"y[k]\"",
",",
"\"f(x[k],p)\"",
")",
"w1",
",",
"w2",
",",
"w3",
"=",
"max",
"(",
"9",
",",
"w1",
"+",
"4",
")",
",",
"max",
"(",
"9",
",",
"w2",
"+",
"4",
")",
",",
"max",
"(",
"9",
",",
"w3",
"+",
"4",
")",
"table",
"+=",
"\"\\nFit:\\n\"",
"fst",
"=",
"\"%%%ds%%%ds%%%ds\\n\"",
"%",
"(",
"w1",
",",
"w2",
",",
"w3",
")",
"table",
"+=",
"fst",
"%",
"clabels",
"table",
"+=",
"(",
"w1",
"+",
"w2",
"+",
"w3",
")",
"*",
"\"-\"",
"+",
"\"\\n\"",
"for",
"di",
",",
"stars",
"in",
"zip",
"(",
"data",
",",
"collect",
".",
"stars",
")",
":",
"table",
"+=",
"fst",
"[",
":",
"-",
"1",
"]",
"%",
"tuple",
"(",
"di",
")",
"+",
"stars",
"+",
"'\\n'",
"return",
"table",
"+",
"settings"
] | Formats fit output details into a string for printing.
The output tabulates the ``chi**2`` per degree of freedom of the fit
(``chi2/dof``), the number of degrees of freedom, the ``Q`` value of
the fit (ie, p-value), and the logarithm of the Gaussian Bayes Factor
for the fit (``logGBF``). At the end it lists the SVD cut, the number
of eigenmodes modified by the SVD cut, the tolerances used in the fit,
and the time in seconds needed to do the fit. The tolerance used to
terminate the fit is marked with an asterisk. It also lists
information about the fitter used if it is other than the standard
choice.
Optionally, ``format`` will also list the best-fit values
for the fit parameters together with the prior for each (in ``[]`` on
each line). Lines for parameters that deviate from their prior by more
than one (prior) standard deviation are marked with asterisks, with
the number of asterisks equal to the number of standard deviations (up
to five). Lines for parameters designated as linear (see ``linear``
keyword) are marked with a minus sign after their prior.
``format`` can also list all of the data and the corresponding values
from the fit, again with asterisks on lines where there is a
significant discrepancy.
Args:
maxline (int or bool): Maximum number of data points for which
fit results and input data are tabulated. ``maxline<0``
implies that only ``chi2``, ``Q``, ``logGBF``, and ``itns``
are tabulated; no parameter values are included. Setting
``maxline=True`` prints all data points; setting it
equal ``None`` or ``False`` is the same as setting
it equal to ``-1``. Default is ``maxline=0``.
pstyle (str or None): Style used for parameter list. Supported
values are 'vv' for very verbose, 'v' for verbose, and 'm' for
minimal. When 'm' is set, only parameters whose values differ
from their prior values are listed. Setting ``pstyle=None``
implies no parameters are listed.
extend (bool): If ``True``, extend the parameter list to
include values derived from log-normal or other
non-Gaussian parameters. So values for fit parameter
``p['log(a)']``, for example, are listed together with
values ``p['a']`` for the exponential of the fit parameter.
Setting ``extend=False`` means that only the value
for ``p['log(a)']`` is listed. Default is ``True``.
Returns:
String containing detailed information about fit. | [
"Formats",
"fit",
"output",
"details",
"into",
"a",
"string",
"for",
"printing",
"."
] | python | train |
pywbem/pywbem | pywbem/tupleparse.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L1821-L1837 | def parse_methodresponse(self, tup_tree):
"""
Parse expected METHODRESPONSE ELEMENT. I.e.
::
<!ELEMENT METHODRESPONSE (ERROR | (RETURNVALUE?, PARAMVALUE*))>
<!ATTLIST METHODRESPONSE
%CIMName;>
"""
self.check_node(tup_tree, 'METHODRESPONSE', ('NAME',))
return (name(tup_tree),
attrs(tup_tree),
self.list_of_various(tup_tree,
('ERROR', 'RETURNVALUE', 'PARAMVALUE'))) | [
"def",
"parse_methodresponse",
"(",
"self",
",",
"tup_tree",
")",
":",
"self",
".",
"check_node",
"(",
"tup_tree",
",",
"'METHODRESPONSE'",
",",
"(",
"'NAME'",
",",
")",
")",
"return",
"(",
"name",
"(",
"tup_tree",
")",
",",
"attrs",
"(",
"tup_tree",
")",
",",
"self",
".",
"list_of_various",
"(",
"tup_tree",
",",
"(",
"'ERROR'",
",",
"'RETURNVALUE'",
",",
"'PARAMVALUE'",
")",
")",
")"
] | Parse expected METHODRESPONSE ELEMENT. I.e.
::
<!ELEMENT METHODRESPONSE (ERROR | (RETURNVALUE?, PARAMVALUE*))>
<!ATTLIST METHODRESPONSE
%CIMName;> | [
"Parse",
"expected",
"METHODRESPONSE",
"ELEMENT",
".",
"I",
".",
"e",
"."
] | python | train |
facelessuser/wcmatch | wcmatch/glob.py | https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L428-L439 | def globmatch(filename, patterns, *, flags=0):
"""
Check if filename matches pattern.
By default case sensitivity is determined by the file system,
but if `case_sensitive` is set, respect that instead.
"""
flags = _flag_transform(flags)
if not _wcparse.is_unix_style(flags):
filename = util.norm_slash(filename)
return _wcparse.compile(_wcparse.split(patterns, flags), flags).match(filename) | [
"def",
"globmatch",
"(",
"filename",
",",
"patterns",
",",
"*",
",",
"flags",
"=",
"0",
")",
":",
"flags",
"=",
"_flag_transform",
"(",
"flags",
")",
"if",
"not",
"_wcparse",
".",
"is_unix_style",
"(",
"flags",
")",
":",
"filename",
"=",
"util",
".",
"norm_slash",
"(",
"filename",
")",
"return",
"_wcparse",
".",
"compile",
"(",
"_wcparse",
".",
"split",
"(",
"patterns",
",",
"flags",
")",
",",
"flags",
")",
".",
"match",
"(",
"filename",
")"
] | Check if filename matches pattern.
By default case sensitivity is determined by the file system,
but if `case_sensitive` is set, respect that instead. | [
"Check",
"if",
"filename",
"matches",
"pattern",
"."
] | python | train |
chaoss/grimoirelab-sortinghat | sortinghat/parsing/stackalytics.py | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/parsing/stackalytics.py#L130-L207 | def __parse_identities(self, json):
"""Parse identities using Stackalytics format.
The Stackalytics identities format is a JSON document under the
"users" key. The document should follow the next schema:
{
"users": [
{
"launchpad_id": "0-jsmith",
"gerrit_id": "jsmith",
"companies": [
{
"company_name": "Example",
"end_date": null
}
],
"user_name": "John Smith",
"emails": ["[email protected]", "[email protected]"]
},
{
"companies": [
{
"company_name": "Bitergia",
"end_date": null
},
{
"company_name": "Example",
"end_date": "2010-Jan-01"
}
],
"user_name": "John Doe",
"emails": ["[email protected]", "[email protected]"]
}
]
}
:parse json: JSON object to parse
:raise InvalidFormatError: raised when the format of the JSON is
not valid.
"""
try:
for user in json['users']:
name = self.__encode(user['user_name'])
uuid = name
uid = UniqueIdentity(uuid=uuid)
identity = Identity(name=name, email=None, username=None,
source=self.source, uuid=uuid)
uid.identities.append(identity)
for email_addr in user['emails']:
email = self.__encode(email_addr)
identity = Identity(name=name, email=email, username=None,
source=self.source, uuid=uuid)
uid.identities.append(identity)
for site_id in ['gerrit_id', 'launchpad_id']:
username = user.get(site_id, None)
if not username:
continue
username = self.__encode(username)
source = self.source + ':' + site_id.replace('_id', '')
identity = Identity(name=name, email=None, username=username,
source=source, uuid=uuid)
uid.identities.append(identity)
for rol in self.__parse_enrollments(user):
uid.enrollments.append(rol)
self._identities[uuid] = uid
except KeyError as e:
msg = "invalid json format. Attribute %s not found" % e.args
raise InvalidFormatError(cause=msg) | [
"def",
"__parse_identities",
"(",
"self",
",",
"json",
")",
":",
"try",
":",
"for",
"user",
"in",
"json",
"[",
"'users'",
"]",
":",
"name",
"=",
"self",
".",
"__encode",
"(",
"user",
"[",
"'user_name'",
"]",
")",
"uuid",
"=",
"name",
"uid",
"=",
"UniqueIdentity",
"(",
"uuid",
"=",
"uuid",
")",
"identity",
"=",
"Identity",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"None",
",",
"username",
"=",
"None",
",",
"source",
"=",
"self",
".",
"source",
",",
"uuid",
"=",
"uuid",
")",
"uid",
".",
"identities",
".",
"append",
"(",
"identity",
")",
"for",
"email_addr",
"in",
"user",
"[",
"'emails'",
"]",
":",
"email",
"=",
"self",
".",
"__encode",
"(",
"email_addr",
")",
"identity",
"=",
"Identity",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"email",
",",
"username",
"=",
"None",
",",
"source",
"=",
"self",
".",
"source",
",",
"uuid",
"=",
"uuid",
")",
"uid",
".",
"identities",
".",
"append",
"(",
"identity",
")",
"for",
"site_id",
"in",
"[",
"'gerrit_id'",
",",
"'launchpad_id'",
"]",
":",
"username",
"=",
"user",
".",
"get",
"(",
"site_id",
",",
"None",
")",
"if",
"not",
"username",
":",
"continue",
"username",
"=",
"self",
".",
"__encode",
"(",
"username",
")",
"source",
"=",
"self",
".",
"source",
"+",
"':'",
"+",
"site_id",
".",
"replace",
"(",
"'_id'",
",",
"''",
")",
"identity",
"=",
"Identity",
"(",
"name",
"=",
"name",
",",
"email",
"=",
"None",
",",
"username",
"=",
"username",
",",
"source",
"=",
"source",
",",
"uuid",
"=",
"uuid",
")",
"uid",
".",
"identities",
".",
"append",
"(",
"identity",
")",
"for",
"rol",
"in",
"self",
".",
"__parse_enrollments",
"(",
"user",
")",
":",
"uid",
".",
"enrollments",
".",
"append",
"(",
"rol",
")",
"self",
".",
"_identities",
"[",
"uuid",
"]",
"=",
"uid",
"except",
"KeyError",
"as",
"e",
":",
"msg",
"=",
"\"invalid json format. Attribute %s not found\"",
"%",
"e",
".",
"args",
"raise",
"InvalidFormatError",
"(",
"cause",
"=",
"msg",
")"
] | Parse identities using Stackalytics format.
The Stackalytics identities format is a JSON document under the
"users" key. The document should follow the next schema:
{
"users": [
{
"launchpad_id": "0-jsmith",
"gerrit_id": "jsmith",
"companies": [
{
"company_name": "Example",
"end_date": null
}
],
"user_name": "John Smith",
"emails": ["[email protected]", "[email protected]"]
},
{
"companies": [
{
"company_name": "Bitergia",
"end_date": null
},
{
"company_name": "Example",
"end_date": "2010-Jan-01"
}
],
"user_name": "John Doe",
"emails": ["[email protected]", "[email protected]"]
}
]
}
:parse json: JSON object to parse
:raise InvalidFormatError: raised when the format of the JSON is
not valid. | [
"Parse",
"identities",
"using",
"Stackalytics",
"format",
"."
] | python | train |
amicks/Speculator | speculator/features/SO.py | https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/features/SO.py#L33-L45 | def eval_from_json(json):
""" Evaluates SO from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float SO between 0 and 100.
"""
close = json[-1]['close'] # Latest closing price
low = min(poloniex.get_attribute(json, 'low')) # Lowest low
high = max(poloniex.get_attribute(json, 'high')) # Highest high
return SO.eval_algorithm(close, low, high) | [
"def",
"eval_from_json",
"(",
"json",
")",
":",
"close",
"=",
"json",
"[",
"-",
"1",
"]",
"[",
"'close'",
"]",
"# Latest closing price",
"low",
"=",
"min",
"(",
"poloniex",
".",
"get_attribute",
"(",
"json",
",",
"'low'",
")",
")",
"# Lowest low",
"high",
"=",
"max",
"(",
"poloniex",
".",
"get_attribute",
"(",
"json",
",",
"'high'",
")",
")",
"# Highest high",
"return",
"SO",
".",
"eval_algorithm",
"(",
"close",
",",
"low",
",",
"high",
")"
] | Evaluates SO from JSON (typically Poloniex API response)
Args:
json: List of dates where each entry is a dict of raw market data.
Returns:
Float SO between 0 and 100. | [
"Evaluates",
"SO",
"from",
"JSON",
"(",
"typically",
"Poloniex",
"API",
"response",
")"
] | python | train |
prompt-toolkit/pyvim | pyvim/editor.py | https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/editor.py#L247-L260 | def run(self):
"""
Run the event loop for the interface.
This starts the interaction.
"""
# Make sure everything is in sync, before starting.
self.sync_with_prompt_toolkit()
def pre_run():
# Start in navigation mode.
self.application.vi_state.input_mode = InputMode.NAVIGATION
# Run eventloop of prompt_toolkit.
self.application.run(pre_run=pre_run) | [
"def",
"run",
"(",
"self",
")",
":",
"# Make sure everything is in sync, before starting.",
"self",
".",
"sync_with_prompt_toolkit",
"(",
")",
"def",
"pre_run",
"(",
")",
":",
"# Start in navigation mode.",
"self",
".",
"application",
".",
"vi_state",
".",
"input_mode",
"=",
"InputMode",
".",
"NAVIGATION",
"# Run eventloop of prompt_toolkit.",
"self",
".",
"application",
".",
"run",
"(",
"pre_run",
"=",
"pre_run",
")"
] | Run the event loop for the interface.
This starts the interaction. | [
"Run",
"the",
"event",
"loop",
"for",
"the",
"interface",
".",
"This",
"starts",
"the",
"interaction",
"."
] | python | train |
uber/tchannel-python | tchannel/peer_heap.py | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/peer_heap.py#L63-L78 | def lt(self, i, j):
"""Compare the priority of two peers.
Primary comparator will be the rank of each peer. If the ``rank`` is
same then compare the ``order``. The ``order`` attribute of the peer
tracks the heap push order of the peer. This help solve the imbalance
problem caused by randomization when deal with same rank situation.
:param i: ith peer
:param j: jth peer
:return: True or False
"""
if self.peers[i].rank == self.peers[j].rank:
return self.peers[i].order < self.peers[j].order
return self.peers[i].rank < self.peers[j].rank | [
"def",
"lt",
"(",
"self",
",",
"i",
",",
"j",
")",
":",
"if",
"self",
".",
"peers",
"[",
"i",
"]",
".",
"rank",
"==",
"self",
".",
"peers",
"[",
"j",
"]",
".",
"rank",
":",
"return",
"self",
".",
"peers",
"[",
"i",
"]",
".",
"order",
"<",
"self",
".",
"peers",
"[",
"j",
"]",
".",
"order",
"return",
"self",
".",
"peers",
"[",
"i",
"]",
".",
"rank",
"<",
"self",
".",
"peers",
"[",
"j",
"]",
".",
"rank"
] | Compare the priority of two peers.
Primary comparator will be the rank of each peer. If the ``rank`` is
same then compare the ``order``. The ``order`` attribute of the peer
tracks the heap push order of the peer. This help solve the imbalance
problem caused by randomization when deal with same rank situation.
:param i: ith peer
:param j: jth peer
:return: True or False | [
"Compare",
"the",
"priority",
"of",
"two",
"peers",
"."
] | python | train |
apache/incubator-heron | heron/tools/admin/src/python/standalone.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L278-L288 | def template_statemgr_yaml(cl_args, zookeepers):
'''
Template statemgr.yaml
'''
statemgr_config_file_template = "%s/standalone/templates/statemgr.template.yaml" \
% cl_args["config_path"]
statemgr_config_file_actual = "%s/standalone/statemgr.yaml" % cl_args["config_path"]
template_file(statemgr_config_file_template, statemgr_config_file_actual,
{"<zookeeper_host:zookeeper_port>": ",".join(
['"%s"' % zk if ":" in zk else '"%s:2181"' % zk for zk in zookeepers])}) | [
"def",
"template_statemgr_yaml",
"(",
"cl_args",
",",
"zookeepers",
")",
":",
"statemgr_config_file_template",
"=",
"\"%s/standalone/templates/statemgr.template.yaml\"",
"%",
"cl_args",
"[",
"\"config_path\"",
"]",
"statemgr_config_file_actual",
"=",
"\"%s/standalone/statemgr.yaml\"",
"%",
"cl_args",
"[",
"\"config_path\"",
"]",
"template_file",
"(",
"statemgr_config_file_template",
",",
"statemgr_config_file_actual",
",",
"{",
"\"<zookeeper_host:zookeeper_port>\"",
":",
"\",\"",
".",
"join",
"(",
"[",
"'\"%s\"'",
"%",
"zk",
"if",
"\":\"",
"in",
"zk",
"else",
"'\"%s:2181\"'",
"%",
"zk",
"for",
"zk",
"in",
"zookeepers",
"]",
")",
"}",
")"
] | Template statemgr.yaml | [
"Template",
"statemgr",
".",
"yaml"
] | python | valid |
softvar/simplegist | simplegist/mygist.py | https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L13-L34 | def listall(self):
'''
will display all the filenames.
Result can be stored in an array for easy fetching of gistNames
for future purposes.
eg. a = Gist().mygists().listall()
print a[0] #to fetch first gistName
'''
file_name = []
r = requests.get(
'%s/users/%s/gists' % (BASE_URL, self.user),
headers=self.gist.header
)
r_text = json.loads(r.text)
limit = len(r.json())
if (r.status_code == 200 ):
for g,no in zip(r_text, range(0,limit)):
for key,value in r.json()[no]['files'].iteritems():
file_name.append(value['filename'])
return file_name
raise Exception('Username not found') | [
"def",
"listall",
"(",
"self",
")",
":",
"file_name",
"=",
"[",
"]",
"r",
"=",
"requests",
".",
"get",
"(",
"'%s/users/%s/gists'",
"%",
"(",
"BASE_URL",
",",
"self",
".",
"user",
")",
",",
"headers",
"=",
"self",
".",
"gist",
".",
"header",
")",
"r_text",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"limit",
"=",
"len",
"(",
"r",
".",
"json",
"(",
")",
")",
"if",
"(",
"r",
".",
"status_code",
"==",
"200",
")",
":",
"for",
"g",
",",
"no",
"in",
"zip",
"(",
"r_text",
",",
"range",
"(",
"0",
",",
"limit",
")",
")",
":",
"for",
"key",
",",
"value",
"in",
"r",
".",
"json",
"(",
")",
"[",
"no",
"]",
"[",
"'files'",
"]",
".",
"iteritems",
"(",
")",
":",
"file_name",
".",
"append",
"(",
"value",
"[",
"'filename'",
"]",
")",
"return",
"file_name",
"raise",
"Exception",
"(",
"'Username not found'",
")"
] | will display all the filenames.
Result can be stored in an array for easy fetching of gistNames
for future purposes.
eg. a = Gist().mygists().listall()
print a[0] #to fetch first gistName | [
"will",
"display",
"all",
"the",
"filenames",
".",
"Result",
"can",
"be",
"stored",
"in",
"an",
"array",
"for",
"easy",
"fetching",
"of",
"gistNames",
"for",
"future",
"purposes",
".",
"eg",
".",
"a",
"=",
"Gist",
"()",
".",
"mygists",
"()",
".",
"listall",
"()",
"print",
"a",
"[",
"0",
"]",
"#to",
"fetch",
"first",
"gistName"
] | python | train |
lambdamusic/Ontospy | ontospy/core/ontospy.py | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L230-L286 | def build_ontologies(self, exclude_BNodes=False, return_string=False):
"""
Extract ontology instances info from the graph, then creates python objects for them.
Note: often ontology info is nested in structures like this:
[ a owl:Ontology ;
vann:preferredNamespacePrefix "bsym" ;
vann:preferredNamespaceUri "http://bsym.bloomberg.com/sym/" ]
Hence there is some logic to deal with these edge cases.
"""
out = []
qres = self.sparqlHelper.getOntology()
if qres:
# NOTE: SPARQL returns a list of rdflib.query.ResultRow (~ tuples..)
for candidate in qres:
if isBlankNode(candidate[0]):
if exclude_BNodes:
continue
else:
checkDC_ID = [x for x in self.rdflib_graph.objects(
candidate[0], rdflib.namespace.DC.identifier)]
if checkDC_ID:
out += [Ontology(checkDC_ID[0], namespaces=self.namespaces), ]
else:
vannprop = rdflib.URIRef(
"http://purl.org/vocab/vann/preferredNamespaceUri")
vannpref = rdflib.URIRef(
"http://purl.org/vocab/vann/preferredNamespacePrefix")
checkDC_ID = [x for x in self.rdflib_graph.objects(
candidate[0], vannprop)]
if checkDC_ID:
checkDC_prefix = [
x for x in self.rdflib_graph.objects(candidate[0], vannpref)]
if checkDC_prefix:
out += [Ontology(checkDC_ID[0],
namespaces=self.namespaces,
prefPrefix=checkDC_prefix[0])]
else:
out += [Ontology(checkDC_ID[0], namespaces=self.namespaces)]
else:
out += [Ontology(candidate[0], namespaces=self.namespaces)]
else:
pass
# printDebug("No owl:Ontologies found")
# finally... add all annotations/triples
self.all_ontologies = out
for onto in self.all_ontologies:
onto.triples = self.sparqlHelper.entityTriples(onto.uri)
onto._buildGraph() | [
"def",
"build_ontologies",
"(",
"self",
",",
"exclude_BNodes",
"=",
"False",
",",
"return_string",
"=",
"False",
")",
":",
"out",
"=",
"[",
"]",
"qres",
"=",
"self",
".",
"sparqlHelper",
".",
"getOntology",
"(",
")",
"if",
"qres",
":",
"# NOTE: SPARQL returns a list of rdflib.query.ResultRow (~ tuples..)",
"for",
"candidate",
"in",
"qres",
":",
"if",
"isBlankNode",
"(",
"candidate",
"[",
"0",
"]",
")",
":",
"if",
"exclude_BNodes",
":",
"continue",
"else",
":",
"checkDC_ID",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"rdflib_graph",
".",
"objects",
"(",
"candidate",
"[",
"0",
"]",
",",
"rdflib",
".",
"namespace",
".",
"DC",
".",
"identifier",
")",
"]",
"if",
"checkDC_ID",
":",
"out",
"+=",
"[",
"Ontology",
"(",
"checkDC_ID",
"[",
"0",
"]",
",",
"namespaces",
"=",
"self",
".",
"namespaces",
")",
",",
"]",
"else",
":",
"vannprop",
"=",
"rdflib",
".",
"URIRef",
"(",
"\"http://purl.org/vocab/vann/preferredNamespaceUri\"",
")",
"vannpref",
"=",
"rdflib",
".",
"URIRef",
"(",
"\"http://purl.org/vocab/vann/preferredNamespacePrefix\"",
")",
"checkDC_ID",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"rdflib_graph",
".",
"objects",
"(",
"candidate",
"[",
"0",
"]",
",",
"vannprop",
")",
"]",
"if",
"checkDC_ID",
":",
"checkDC_prefix",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"rdflib_graph",
".",
"objects",
"(",
"candidate",
"[",
"0",
"]",
",",
"vannpref",
")",
"]",
"if",
"checkDC_prefix",
":",
"out",
"+=",
"[",
"Ontology",
"(",
"checkDC_ID",
"[",
"0",
"]",
",",
"namespaces",
"=",
"self",
".",
"namespaces",
",",
"prefPrefix",
"=",
"checkDC_prefix",
"[",
"0",
"]",
")",
"]",
"else",
":",
"out",
"+=",
"[",
"Ontology",
"(",
"checkDC_ID",
"[",
"0",
"]",
",",
"namespaces",
"=",
"self",
".",
"namespaces",
")",
"]",
"else",
":",
"out",
"+=",
"[",
"Ontology",
"(",
"candidate",
"[",
"0",
"]",
",",
"namespaces",
"=",
"self",
".",
"namespaces",
")",
"]",
"else",
":",
"pass",
"# printDebug(\"No owl:Ontologies found\")",
"# finally... add all annotations/triples",
"self",
".",
"all_ontologies",
"=",
"out",
"for",
"onto",
"in",
"self",
".",
"all_ontologies",
":",
"onto",
".",
"triples",
"=",
"self",
".",
"sparqlHelper",
".",
"entityTriples",
"(",
"onto",
".",
"uri",
")",
"onto",
".",
"_buildGraph",
"(",
")"
] | Extract ontology instances info from the graph, then creates python objects for them.
Note: often ontology info is nested in structures like this:
[ a owl:Ontology ;
vann:preferredNamespacePrefix "bsym" ;
vann:preferredNamespaceUri "http://bsym.bloomberg.com/sym/" ]
Hence there is some logic to deal with these edge cases. | [
"Extract",
"ontology",
"instances",
"info",
"from",
"the",
"graph",
"then",
"creates",
"python",
"objects",
"for",
"them",
"."
] | python | train |
kivy/python-for-android | pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py | https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/filters.py#L292-L307 | def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 KB,
4.1 MB, 102 bytes, etc). Per default decimal prefixes are used (mega,
giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (mebi, gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
middle = binary and 'i' or ''
if bytes < base:
return "%d Byte%s" % (bytes, bytes != 1 and 's' or '')
elif bytes < base * base:
return "%.1f K%sB" % (bytes / base, middle)
elif bytes < base * base * base:
return "%.1f M%sB" % (bytes / (base * base), middle)
return "%.1f G%sB" % (bytes / (base * base * base), middle) | [
"def",
"do_filesizeformat",
"(",
"value",
",",
"binary",
"=",
"False",
")",
":",
"bytes",
"=",
"float",
"(",
"value",
")",
"base",
"=",
"binary",
"and",
"1024",
"or",
"1000",
"middle",
"=",
"binary",
"and",
"'i'",
"or",
"''",
"if",
"bytes",
"<",
"base",
":",
"return",
"\"%d Byte%s\"",
"%",
"(",
"bytes",
",",
"bytes",
"!=",
"1",
"and",
"'s'",
"or",
"''",
")",
"elif",
"bytes",
"<",
"base",
"*",
"base",
":",
"return",
"\"%.1f K%sB\"",
"%",
"(",
"bytes",
"/",
"base",
",",
"middle",
")",
"elif",
"bytes",
"<",
"base",
"*",
"base",
"*",
"base",
":",
"return",
"\"%.1f M%sB\"",
"%",
"(",
"bytes",
"/",
"(",
"base",
"*",
"base",
")",
",",
"middle",
")",
"return",
"\"%.1f G%sB\"",
"%",
"(",
"bytes",
"/",
"(",
"base",
"*",
"base",
"*",
"base",
")",
",",
"middle",
")"
] | Format the value like a 'human-readable' file size (i.e. 13 KB,
4.1 MB, 102 bytes, etc). Per default decimal prefixes are used (mega,
giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (mebi, gibi). | [
"Format",
"the",
"value",
"like",
"a",
"human",
"-",
"readable",
"file",
"size",
"(",
"i",
".",
"e",
".",
"13",
"KB",
"4",
".",
"1",
"MB",
"102",
"bytes",
"etc",
")",
".",
"Per",
"default",
"decimal",
"prefixes",
"are",
"used",
"(",
"mega",
"giga",
"etc",
".",
")",
"if",
"the",
"second",
"parameter",
"is",
"set",
"to",
"True",
"the",
"binary",
"prefixes",
"are",
"used",
"(",
"mebi",
"gibi",
")",
"."
] | python | train |
Hackerfleet/hfos | hfos/debugger.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/debugger.py#L223-L264 | def debugrequest(self, event):
"""Handler for client-side debug requests"""
try:
self.log("Event: ", event.__dict__, lvl=critical)
if event.data == "storejson":
self.log("Storing received object to /tmp", lvl=critical)
fp = open('/tmp/hfosdebugger_' + str(
event.user.useruuid) + "_" + str(uuid4()), "w")
json.dump(event.data, fp, indent=True)
fp.close()
if event.data == "memdebug":
self.log("Memory hogs:", lvl=critical)
objgraph.show_most_common_types(limit=20)
if event.data == "growth":
self.log("Memory growth since last call:", lvl=critical)
objgraph.show_growth()
if event.data == "graph":
self._drawgraph()
if event.data == "exception":
class TestException(BaseException):
"""Generic exception to test exception monitoring"""
pass
raise TestException
if event.data == "heap":
self.log("Heap log:", self.heapy.heap(), lvl=critical)
if event.data == "buildfrontend":
self.log("Sending frontend build command")
self.fireEvent(frontendbuildrequest(force=True), "setup")
if event.data == "logtail":
self.fireEvent(logtailrequest(event.user, None, None,
event.client), "logger")
if event.data == "trigger_anchorwatch":
from hfos.anchor.anchorwatcher import cli_trigger_anchorwatch
self.fireEvent(cli_trigger_anchorwatch())
except Exception as e:
self.log("Exception during debug handling:", e, type(e),
lvl=critical) | [
"def",
"debugrequest",
"(",
"self",
",",
"event",
")",
":",
"try",
":",
"self",
".",
"log",
"(",
"\"Event: \"",
",",
"event",
".",
"__dict__",
",",
"lvl",
"=",
"critical",
")",
"if",
"event",
".",
"data",
"==",
"\"storejson\"",
":",
"self",
".",
"log",
"(",
"\"Storing received object to /tmp\"",
",",
"lvl",
"=",
"critical",
")",
"fp",
"=",
"open",
"(",
"'/tmp/hfosdebugger_'",
"+",
"str",
"(",
"event",
".",
"user",
".",
"useruuid",
")",
"+",
"\"_\"",
"+",
"str",
"(",
"uuid4",
"(",
")",
")",
",",
"\"w\"",
")",
"json",
".",
"dump",
"(",
"event",
".",
"data",
",",
"fp",
",",
"indent",
"=",
"True",
")",
"fp",
".",
"close",
"(",
")",
"if",
"event",
".",
"data",
"==",
"\"memdebug\"",
":",
"self",
".",
"log",
"(",
"\"Memory hogs:\"",
",",
"lvl",
"=",
"critical",
")",
"objgraph",
".",
"show_most_common_types",
"(",
"limit",
"=",
"20",
")",
"if",
"event",
".",
"data",
"==",
"\"growth\"",
":",
"self",
".",
"log",
"(",
"\"Memory growth since last call:\"",
",",
"lvl",
"=",
"critical",
")",
"objgraph",
".",
"show_growth",
"(",
")",
"if",
"event",
".",
"data",
"==",
"\"graph\"",
":",
"self",
".",
"_drawgraph",
"(",
")",
"if",
"event",
".",
"data",
"==",
"\"exception\"",
":",
"class",
"TestException",
"(",
"BaseException",
")",
":",
"\"\"\"Generic exception to test exception monitoring\"\"\"",
"pass",
"raise",
"TestException",
"if",
"event",
".",
"data",
"==",
"\"heap\"",
":",
"self",
".",
"log",
"(",
"\"Heap log:\"",
",",
"self",
".",
"heapy",
".",
"heap",
"(",
")",
",",
"lvl",
"=",
"critical",
")",
"if",
"event",
".",
"data",
"==",
"\"buildfrontend\"",
":",
"self",
".",
"log",
"(",
"\"Sending frontend build command\"",
")",
"self",
".",
"fireEvent",
"(",
"frontendbuildrequest",
"(",
"force",
"=",
"True",
")",
",",
"\"setup\"",
")",
"if",
"event",
".",
"data",
"==",
"\"logtail\"",
":",
"self",
".",
"fireEvent",
"(",
"logtailrequest",
"(",
"event",
".",
"user",
",",
"None",
",",
"None",
",",
"event",
".",
"client",
")",
",",
"\"logger\"",
")",
"if",
"event",
".",
"data",
"==",
"\"trigger_anchorwatch\"",
":",
"from",
"hfos",
".",
"anchor",
".",
"anchorwatcher",
"import",
"cli_trigger_anchorwatch",
"self",
".",
"fireEvent",
"(",
"cli_trigger_anchorwatch",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"\"Exception during debug handling:\"",
",",
"e",
",",
"type",
"(",
"e",
")",
",",
"lvl",
"=",
"critical",
")"
] | Handler for client-side debug requests | [
"Handler",
"for",
"client",
"-",
"side",
"debug",
"requests"
] | python | train |
geertj/gruvi | lib/gruvi/ssl.py | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/ssl.py#L494-L500 | def close(self):
"""Cleanly shut down the SSL protocol and close the transport."""
if self._closing or self._handle.closed:
return
self._closing = True
self._write_backlog.append([b'', False])
self._process_write_backlog() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closing",
"or",
"self",
".",
"_handle",
".",
"closed",
":",
"return",
"self",
".",
"_closing",
"=",
"True",
"self",
".",
"_write_backlog",
".",
"append",
"(",
"[",
"b''",
",",
"False",
"]",
")",
"self",
".",
"_process_write_backlog",
"(",
")"
] | Cleanly shut down the SSL protocol and close the transport. | [
"Cleanly",
"shut",
"down",
"the",
"SSL",
"protocol",
"and",
"close",
"the",
"transport",
"."
] | python | train |
django-extensions/django-extensions | django_extensions/management/commands/export_emails.py | https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/export_emails.py#L72-L78 | def address(self, qs):
"""
Single entry per line in the format of:
"full name" <[email protected]>;
"""
self.stdout.write("\n".join('"%s" <%s>;' % (full_name(**ent), ent['email']) for ent in qs))
self.stdout.write("\n") | [
"def",
"address",
"(",
"self",
",",
"qs",
")",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"'\"%s\" <%s>;'",
"%",
"(",
"full_name",
"(",
"*",
"*",
"ent",
")",
",",
"ent",
"[",
"'email'",
"]",
")",
"for",
"ent",
"in",
"qs",
")",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")"
] | Single entry per line in the format of:
"full name" <[email protected]>; | [
"Single",
"entry",
"per",
"line",
"in",
"the",
"format",
"of",
":",
"full",
"name",
"<my"
] | python | train |
svenevs/exhale | exhale/graph.py | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L3747-L3830 | def toConsole(self):
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
fmt_spec = {
"class": utils.AnsiColors.BOLD_MAGENTA,
"struct": utils.AnsiColors.BOLD_CYAN,
"define": utils.AnsiColors.BOLD_YELLOW,
"enum": utils.AnsiColors.BOLD_MAGENTA,
"enumvalue": utils.AnsiColors.BOLD_RED, # red means unused in framework
"function": utils.AnsiColors.BOLD_CYAN,
"file": utils.AnsiColors.BOLD_YELLOW,
"dir": utils.AnsiColors.BOLD_MAGENTA,
"group": utils.AnsiColors.BOLD_RED, # red means unused in framework
"namespace": utils.AnsiColors.BOLD_CYAN,
"typedef": utils.AnsiColors.BOLD_YELLOW,
"union": utils.AnsiColors.BOLD_MAGENTA,
"variable": utils.AnsiColors.BOLD_CYAN
}
self.consoleFormat(
"{0} and {1}".format(
utils._use_color("Classes", fmt_spec["class"], sys.stderr),
utils._use_color("Structs", fmt_spec["struct"], sys.stderr),
),
self.class_like,
fmt_spec
)
self.consoleFormat(
utils._use_color("Defines", fmt_spec["define"], sys.stderr),
self.defines,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enums", fmt_spec["enum"], sys.stderr),
self.enums,
fmt_spec
)
self.consoleFormat(
utils._use_color("Enum Values (unused)", fmt_spec["enumvalue"], sys.stderr),
self.enum_values,
fmt_spec
)
self.consoleFormat(
utils._use_color("Functions", fmt_spec["function"], sys.stderr),
self.functions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Files", fmt_spec["file"], sys.stderr),
self.files,
fmt_spec
)
self.consoleFormat(
utils._use_color("Directories", fmt_spec["dir"], sys.stderr),
self.dirs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Groups (unused)", fmt_spec["group"], sys.stderr),
self.groups,
fmt_spec
)
self.consoleFormat(
utils._use_color("Namespaces", fmt_spec["namespace"], sys.stderr),
self.namespaces,
fmt_spec
)
self.consoleFormat(
utils._use_color("Typedefs", fmt_spec["typedef"], sys.stderr),
self.typedefs,
fmt_spec
)
self.consoleFormat(
utils._use_color("Unions", fmt_spec["union"], sys.stderr),
self.unions,
fmt_spec
)
self.consoleFormat(
utils._use_color("Variables", fmt_spec["variable"], sys.stderr),
self.variables,
fmt_spec
) | [
"def",
"toConsole",
"(",
"self",
")",
":",
"fmt_spec",
"=",
"{",
"\"class\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_MAGENTA",
",",
"\"struct\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_CYAN",
",",
"\"define\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_YELLOW",
",",
"\"enum\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_MAGENTA",
",",
"\"enumvalue\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_RED",
",",
"# red means unused in framework",
"\"function\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_CYAN",
",",
"\"file\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_YELLOW",
",",
"\"dir\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_MAGENTA",
",",
"\"group\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_RED",
",",
"# red means unused in framework",
"\"namespace\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_CYAN",
",",
"\"typedef\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_YELLOW",
",",
"\"union\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_MAGENTA",
",",
"\"variable\"",
":",
"utils",
".",
"AnsiColors",
".",
"BOLD_CYAN",
"}",
"self",
".",
"consoleFormat",
"(",
"\"{0} and {1}\"",
".",
"format",
"(",
"utils",
".",
"_use_color",
"(",
"\"Classes\"",
",",
"fmt_spec",
"[",
"\"class\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"utils",
".",
"_use_color",
"(",
"\"Structs\"",
",",
"fmt_spec",
"[",
"\"struct\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
")",
",",
"self",
".",
"class_like",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Defines\"",
",",
"fmt_spec",
"[",
"\"define\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"defines",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Enums\"",
",",
"fmt_spec",
"[",
"\"enum\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"enums",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Enum Values (unused)\"",
",",
"fmt_spec",
"[",
"\"enumvalue\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"enum_values",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Functions\"",
",",
"fmt_spec",
"[",
"\"function\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"functions",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Files\"",
",",
"fmt_spec",
"[",
"\"file\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"files",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Directories\"",
",",
"fmt_spec",
"[",
"\"dir\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"dirs",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Groups (unused)\"",
",",
"fmt_spec",
"[",
"\"group\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"groups",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Namespaces\"",
",",
"fmt_spec",
"[",
"\"namespace\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"namespaces",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Typedefs\"",
",",
"fmt_spec",
"[",
"\"typedef\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"typedefs",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Unions\"",
",",
"fmt_spec",
"[",
"\"union\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"unions",
",",
"fmt_spec",
")",
"self",
".",
"consoleFormat",
"(",
"utils",
".",
"_use_color",
"(",
"\"Variables\"",
",",
"fmt_spec",
"[",
"\"variable\"",
"]",
",",
"sys",
".",
"stderr",
")",
",",
"self",
".",
"variables",
",",
"fmt_spec",
")"
] | Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;) | [
"Convenience",
"function",
"for",
"printing",
"out",
"the",
"entire",
"API",
"being",
"generated",
"to",
"the",
"console",
".",
"Unused",
"in",
"the",
"release",
"but",
"is",
"helpful",
"for",
"debugging",
";",
")"
] | python | train |
googleapis/google-cloud-python | spanner/google/cloud/spanner_v1/streamed.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/streamed.py#L96-L109 | def _merge_values(self, values):
"""Merge values into rows.
:type values: list of :class:`~google.protobuf.struct_pb2.Value`
:param values: non-chunked values from partial result set.
"""
width = len(self.fields)
for value in values:
index = len(self._current_row)
field = self.fields[index]
self._current_row.append(_parse_value_pb(value, field.type))
if len(self._current_row) == width:
self._rows.append(self._current_row)
self._current_row = [] | [
"def",
"_merge_values",
"(",
"self",
",",
"values",
")",
":",
"width",
"=",
"len",
"(",
"self",
".",
"fields",
")",
"for",
"value",
"in",
"values",
":",
"index",
"=",
"len",
"(",
"self",
".",
"_current_row",
")",
"field",
"=",
"self",
".",
"fields",
"[",
"index",
"]",
"self",
".",
"_current_row",
".",
"append",
"(",
"_parse_value_pb",
"(",
"value",
",",
"field",
".",
"type",
")",
")",
"if",
"len",
"(",
"self",
".",
"_current_row",
")",
"==",
"width",
":",
"self",
".",
"_rows",
".",
"append",
"(",
"self",
".",
"_current_row",
")",
"self",
".",
"_current_row",
"=",
"[",
"]"
] | Merge values into rows.
:type values: list of :class:`~google.protobuf.struct_pb2.Value`
:param values: non-chunked values from partial result set. | [
"Merge",
"values",
"into",
"rows",
"."
] | python | train |
rwl/pylon | pylon/io/psat.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psat.py#L369-L381 | def push_bus(self, tokens):
""" Adds a Bus object to the case.
"""
logger.debug("Pushing bus data: %s" % tokens)
bus = Bus()
bus.name = tokens["bus_no"]
bus.v_magnitude = tokens["v_magnitude"]
bus.v_angle = tokens["v_angle"]
bus.v_magnitude = tokens["v_magnitude"]
bus.v_angle = tokens["v_angle"]
self.case.buses.append(bus) | [
"def",
"push_bus",
"(",
"self",
",",
"tokens",
")",
":",
"logger",
".",
"debug",
"(",
"\"Pushing bus data: %s\"",
"%",
"tokens",
")",
"bus",
"=",
"Bus",
"(",
")",
"bus",
".",
"name",
"=",
"tokens",
"[",
"\"bus_no\"",
"]",
"bus",
".",
"v_magnitude",
"=",
"tokens",
"[",
"\"v_magnitude\"",
"]",
"bus",
".",
"v_angle",
"=",
"tokens",
"[",
"\"v_angle\"",
"]",
"bus",
".",
"v_magnitude",
"=",
"tokens",
"[",
"\"v_magnitude\"",
"]",
"bus",
".",
"v_angle",
"=",
"tokens",
"[",
"\"v_angle\"",
"]",
"self",
".",
"case",
".",
"buses",
".",
"append",
"(",
"bus",
")"
] | Adds a Bus object to the case. | [
"Adds",
"a",
"Bus",
"object",
"to",
"the",
"case",
"."
] | python | train |
data61/clkhash | clkhash/bloomfilter.py | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/bloomfilter.py#L258-L286 | def fold_xor(bloomfilter, # type: bitarray
folds # type: int
):
# type: (...) -> bitarray
""" Performs XOR folding on a Bloom filter.
If the length of the original Bloom filter is n and we perform
r folds, then the length of the resulting filter is n / 2 ** r.
:param bloomfilter: Bloom filter to fold
:param folds: number of folds
:return: folded bloom filter
"""
if len(bloomfilter) % 2 ** folds != 0:
msg = ('The length of the bloom filter is {length}. It is not '
'divisible by 2 ** {folds}, so it cannot be folded {folds} '
'times.'
.format(length=len(bloomfilter), folds=folds))
raise ValueError(msg)
for _ in range(folds):
bf1 = bloomfilter[:len(bloomfilter) // 2]
bf2 = bloomfilter[len(bloomfilter) // 2:]
bloomfilter = bf1 ^ bf2
return bloomfilter | [
"def",
"fold_xor",
"(",
"bloomfilter",
",",
"# type: bitarray",
"folds",
"# type: int",
")",
":",
"# type: (...) -> bitarray",
"if",
"len",
"(",
"bloomfilter",
")",
"%",
"2",
"**",
"folds",
"!=",
"0",
":",
"msg",
"=",
"(",
"'The length of the bloom filter is {length}. It is not '",
"'divisible by 2 ** {folds}, so it cannot be folded {folds} '",
"'times.'",
".",
"format",
"(",
"length",
"=",
"len",
"(",
"bloomfilter",
")",
",",
"folds",
"=",
"folds",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"for",
"_",
"in",
"range",
"(",
"folds",
")",
":",
"bf1",
"=",
"bloomfilter",
"[",
":",
"len",
"(",
"bloomfilter",
")",
"//",
"2",
"]",
"bf2",
"=",
"bloomfilter",
"[",
"len",
"(",
"bloomfilter",
")",
"//",
"2",
":",
"]",
"bloomfilter",
"=",
"bf1",
"^",
"bf2",
"return",
"bloomfilter"
] | Performs XOR folding on a Bloom filter.
If the length of the original Bloom filter is n and we perform
r folds, then the length of the resulting filter is n / 2 ** r.
:param bloomfilter: Bloom filter to fold
:param folds: number of folds
:return: folded bloom filter | [
"Performs",
"XOR",
"folding",
"on",
"a",
"Bloom",
"filter",
"."
] | python | train |
Nixiware/viper | nx/viper/application.py | https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/application.py#L173-L186 | def getModel(self, modelIdentifier):
"""
Return the requested model.
:param modelIdentifier: <str> model identifier
:return: <object> model instance
"""
if modelIdentifier in self._models:
return self._models[modelIdentifier]
else:
message = "Application - getModel() - " \
"Model with identifier {} does not exist." \
.format(modelIdentifier)
raise Exception(message) | [
"def",
"getModel",
"(",
"self",
",",
"modelIdentifier",
")",
":",
"if",
"modelIdentifier",
"in",
"self",
".",
"_models",
":",
"return",
"self",
".",
"_models",
"[",
"modelIdentifier",
"]",
"else",
":",
"message",
"=",
"\"Application - getModel() - \"",
"\"Model with identifier {} does not exist.\"",
".",
"format",
"(",
"modelIdentifier",
")",
"raise",
"Exception",
"(",
"message",
")"
] | Return the requested model.
:param modelIdentifier: <str> model identifier
:return: <object> model instance | [
"Return",
"the",
"requested",
"model",
"."
] | python | train |
pip-services3-python/pip-services3-commons-python | pip_services3_commons/data/AnyValueMap.py | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueMap.py#L365-L377 | def get_as_type(self, key, value_type):
"""
Converts map element into a value defined by specied typecode.
If conversion is not possible it returns default value for the specified type.
:param key: an index of element to get.
:param value_type: the TypeCode that defined the type of the result
:return: element value defined by the typecode or default if conversion is not supported.
"""
value = self.get(key)
return TypeConverter.to_type(value_type, value) | [
"def",
"get_as_type",
"(",
"self",
",",
"key",
",",
"value_type",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
")",
"return",
"TypeConverter",
".",
"to_type",
"(",
"value_type",
",",
"value",
")"
] | Converts map element into a value defined by specied typecode.
If conversion is not possible it returns default value for the specified type.
:param key: an index of element to get.
:param value_type: the TypeCode that defined the type of the result
:return: element value defined by the typecode or default if conversion is not supported. | [
"Converts",
"map",
"element",
"into",
"a",
"value",
"defined",
"by",
"specied",
"typecode",
".",
"If",
"conversion",
"is",
"not",
"possible",
"it",
"returns",
"default",
"value",
"for",
"the",
"specified",
"type",
"."
] | python | train |
fossasia/knittingpattern | knittingpattern/convert/KnittingPatternToSVG.py | https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/KnittingPatternToSVG.py#L118-L131 | def _compute_scale(self, instruction_id, svg_dict):
"""Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
"""
bbox = list(map(float, svg_dict["svg"]["@viewBox"].split()))
scale = self._zoom / (bbox[3] - bbox[1])
self._symbol_id_to_scale[instruction_id] = scale | [
"def",
"_compute_scale",
"(",
"self",
",",
"instruction_id",
",",
"svg_dict",
")",
":",
"bbox",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"svg_dict",
"[",
"\"svg\"",
"]",
"[",
"\"@viewBox\"",
"]",
".",
"split",
"(",
")",
")",
")",
"scale",
"=",
"self",
".",
"_zoom",
"/",
"(",
"bbox",
"[",
"3",
"]",
"-",
"bbox",
"[",
"1",
"]",
")",
"self",
".",
"_symbol_id_to_scale",
"[",
"instruction_id",
"]",
"=",
"scale"
] | Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed | [
"Compute",
"the",
"scale",
"of",
"an",
"instruction",
"svg",
"."
] | python | valid |
tanghaibao/jcvi | jcvi/assembly/opticalmap.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/opticalmap.py#L247-L304 | def chimera(args):
"""
%prog chimera bedfile
Scan the bed file to break scaffolds that multi-maps.
"""
p = OptionParser(chimera.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
bed = Bed(bedfile)
selected = select_bed(bed)
mapped = defaultdict(set) # scaffold => chr
chimerabed = "chimera.bed"
fw = open(chimerabed, "w")
for b in selected:
scf = range_parse(b.accn).seqid
chr = b.seqid
mapped[scf].add(chr)
nchimera = 0
for s, chrs in sorted(mapped.items()):
if len(chrs) == 1:
continue
print("=" * 80, file=sys.stderr)
print("{0} mapped to multiple locations: {1}".\
format(s, ",".join(sorted(chrs))), file=sys.stderr)
ranges = []
for b in selected:
rr = range_parse(b.accn)
scf = rr.seqid
if scf == s:
print(b, file=sys.stderr)
ranges.append(rr)
# Identify breakpoints
ranges.sort(key=lambda x: (x.seqid, x.start, x.end))
for a, b in pairwise(ranges):
seqid = a.seqid
if seqid != b.seqid:
continue
start, end = a.end, b.start
if start > end:
start, end = end, start
chimeraline = "\t".join(str(x) for x in (seqid, start, end))
print(chimeraline, file=fw)
print(chimeraline, file=sys.stderr)
nchimera += 1
fw.close()
logging.debug("A total of {0} junctions written to `{1}`.".\
format(nchimera, chimerabed)) | [
"def",
"chimera",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"chimera",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"bedfile",
",",
"=",
"args",
"bed",
"=",
"Bed",
"(",
"bedfile",
")",
"selected",
"=",
"select_bed",
"(",
"bed",
")",
"mapped",
"=",
"defaultdict",
"(",
"set",
")",
"# scaffold => chr",
"chimerabed",
"=",
"\"chimera.bed\"",
"fw",
"=",
"open",
"(",
"chimerabed",
",",
"\"w\"",
")",
"for",
"b",
"in",
"selected",
":",
"scf",
"=",
"range_parse",
"(",
"b",
".",
"accn",
")",
".",
"seqid",
"chr",
"=",
"b",
".",
"seqid",
"mapped",
"[",
"scf",
"]",
".",
"add",
"(",
"chr",
")",
"nchimera",
"=",
"0",
"for",
"s",
",",
"chrs",
"in",
"sorted",
"(",
"mapped",
".",
"items",
"(",
")",
")",
":",
"if",
"len",
"(",
"chrs",
")",
"==",
"1",
":",
"continue",
"print",
"(",
"\"=\"",
"*",
"80",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"\"{0} mapped to multiple locations: {1}\"",
".",
"format",
"(",
"s",
",",
"\",\"",
".",
"join",
"(",
"sorted",
"(",
"chrs",
")",
")",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"ranges",
"=",
"[",
"]",
"for",
"b",
"in",
"selected",
":",
"rr",
"=",
"range_parse",
"(",
"b",
".",
"accn",
")",
"scf",
"=",
"rr",
".",
"seqid",
"if",
"scf",
"==",
"s",
":",
"print",
"(",
"b",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"ranges",
".",
"append",
"(",
"rr",
")",
"# Identify breakpoints",
"ranges",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"seqid",
",",
"x",
".",
"start",
",",
"x",
".",
"end",
")",
")",
"for",
"a",
",",
"b",
"in",
"pairwise",
"(",
"ranges",
")",
":",
"seqid",
"=",
"a",
".",
"seqid",
"if",
"seqid",
"!=",
"b",
".",
"seqid",
":",
"continue",
"start",
",",
"end",
"=",
"a",
".",
"end",
",",
"b",
".",
"start",
"if",
"start",
">",
"end",
":",
"start",
",",
"end",
"=",
"end",
",",
"start",
"chimeraline",
"=",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"seqid",
",",
"start",
",",
"end",
")",
")",
"print",
"(",
"chimeraline",
",",
"file",
"=",
"fw",
")",
"print",
"(",
"chimeraline",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"nchimera",
"+=",
"1",
"fw",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"\"A total of {0} junctions written to `{1}`.\"",
".",
"format",
"(",
"nchimera",
",",
"chimerabed",
")",
")"
] | %prog chimera bedfile
Scan the bed file to break scaffolds that multi-maps. | [
"%prog",
"chimera",
"bedfile"
] | python | train |
5monkeys/django-bananas | bananas/environment.py | https://github.com/5monkeys/django-bananas/blob/cfd318c737f6c4580036c13d2acf32bca96654bf/bananas/environment.py#L40-L54 | def parse_bool(value):
"""
Parse string to bool.
:param str value: String value to parse as bool
:return bool:
"""
boolean = parse_str(value).capitalize()
if boolean in ("True", "Yes", "On", "1"):
return True
elif boolean in ("False", "No", "Off", "0"):
return False
else:
raise ValueError('Unable to parse boolean value "{}"'.format(value)) | [
"def",
"parse_bool",
"(",
"value",
")",
":",
"boolean",
"=",
"parse_str",
"(",
"value",
")",
".",
"capitalize",
"(",
")",
"if",
"boolean",
"in",
"(",
"\"True\"",
",",
"\"Yes\"",
",",
"\"On\"",
",",
"\"1\"",
")",
":",
"return",
"True",
"elif",
"boolean",
"in",
"(",
"\"False\"",
",",
"\"No\"",
",",
"\"Off\"",
",",
"\"0\"",
")",
":",
"return",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"'Unable to parse boolean value \"{}\"'",
".",
"format",
"(",
"value",
")",
")"
] | Parse string to bool.
:param str value: String value to parse as bool
:return bool: | [
"Parse",
"string",
"to",
"bool",
"."
] | python | test |
Gandi/gandi.cli | gandi/cli/modules/webacc.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/webacc.py#L84-L101 | def update(cls, resource, new_name, algorithm, ssl_enable, ssl_disable):
""" Update a webaccelerator"""
params = {}
if new_name:
params['name'] = new_name
if algorithm:
params['lb'] = {'algorithm': algorithm}
if ssl_enable:
params['ssl_enable'] = ssl_enable
if ssl_disable:
params['ssl_enable'] = False
result = cls.call('hosting.rproxy.update', cls.usable_id(resource),
params)
cls.echo('Updating your webaccelerator')
cls.display_progress(result)
cls.echo('The webaccelerator have been udated')
return result | [
"def",
"update",
"(",
"cls",
",",
"resource",
",",
"new_name",
",",
"algorithm",
",",
"ssl_enable",
",",
"ssl_disable",
")",
":",
"params",
"=",
"{",
"}",
"if",
"new_name",
":",
"params",
"[",
"'name'",
"]",
"=",
"new_name",
"if",
"algorithm",
":",
"params",
"[",
"'lb'",
"]",
"=",
"{",
"'algorithm'",
":",
"algorithm",
"}",
"if",
"ssl_enable",
":",
"params",
"[",
"'ssl_enable'",
"]",
"=",
"ssl_enable",
"if",
"ssl_disable",
":",
"params",
"[",
"'ssl_enable'",
"]",
"=",
"False",
"result",
"=",
"cls",
".",
"call",
"(",
"'hosting.rproxy.update'",
",",
"cls",
".",
"usable_id",
"(",
"resource",
")",
",",
"params",
")",
"cls",
".",
"echo",
"(",
"'Updating your webaccelerator'",
")",
"cls",
".",
"display_progress",
"(",
"result",
")",
"cls",
".",
"echo",
"(",
"'The webaccelerator have been udated'",
")",
"return",
"result"
] | Update a webaccelerator | [
"Update",
"a",
"webaccelerator"
] | python | train |
sibirrer/lenstronomy | lenstronomy/LensModel/Profiles/nfw_ellipse.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/nfw_ellipse.py#L25-L42 | def function(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0):
"""
returns double integral of NFW profile
"""
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = min(abs(1. - q), 0.99)
xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e)
xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e)
R_ = np.sqrt(xt1**2 + xt2**2)
rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
f_ = self.nfw.nfwPot(R_, Rs, rho0_input)
return f_ | [
"def",
"function",
"(",
"self",
",",
"x",
",",
"y",
",",
"Rs",
",",
"theta_Rs",
",",
"e1",
",",
"e2",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
":",
"phi_G",
",",
"q",
"=",
"param_util",
".",
"ellipticity2phi_q",
"(",
"e1",
",",
"e2",
")",
"x_shift",
"=",
"x",
"-",
"center_x",
"y_shift",
"=",
"y",
"-",
"center_y",
"cos_phi",
"=",
"np",
".",
"cos",
"(",
"phi_G",
")",
"sin_phi",
"=",
"np",
".",
"sin",
"(",
"phi_G",
")",
"e",
"=",
"min",
"(",
"abs",
"(",
"1.",
"-",
"q",
")",
",",
"0.99",
")",
"xt1",
"=",
"(",
"cos_phi",
"*",
"x_shift",
"+",
"sin_phi",
"*",
"y_shift",
")",
"*",
"np",
".",
"sqrt",
"(",
"1",
"-",
"e",
")",
"xt2",
"=",
"(",
"-",
"sin_phi",
"*",
"x_shift",
"+",
"cos_phi",
"*",
"y_shift",
")",
"*",
"np",
".",
"sqrt",
"(",
"1",
"+",
"e",
")",
"R_",
"=",
"np",
".",
"sqrt",
"(",
"xt1",
"**",
"2",
"+",
"xt2",
"**",
"2",
")",
"rho0_input",
"=",
"self",
".",
"nfw",
".",
"_alpha2rho0",
"(",
"theta_Rs",
"=",
"theta_Rs",
",",
"Rs",
"=",
"Rs",
")",
"if",
"Rs",
"<",
"0.0000001",
":",
"Rs",
"=",
"0.0000001",
"f_",
"=",
"self",
".",
"nfw",
".",
"nfwPot",
"(",
"R_",
",",
"Rs",
",",
"rho0_input",
")",
"return",
"f_"
] | returns double integral of NFW profile | [
"returns",
"double",
"integral",
"of",
"NFW",
"profile"
] | python | train |
insightindustry/validator-collection | validator_collection/checkers.py | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L89-L148 | def are_equivalent(*args, **kwargs):
"""Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if len(args) == 1:
return True
first_item = args[0]
for item in args[1:]:
if type(item) != type(first_item): # pylint: disable=C0123
return False
if isinstance(item, dict):
if not are_dicts_equivalent(item, first_item):
return False
elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)):
if len(item) != len(first_item):
return False
for value in item:
if value not in first_item:
return False
for value in first_item:
if value not in item:
return False
else:
if item != first_item:
return False
return True | [
"def",
"are_equivalent",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"return",
"True",
"first_item",
"=",
"args",
"[",
"0",
"]",
"for",
"item",
"in",
"args",
"[",
"1",
":",
"]",
":",
"if",
"type",
"(",
"item",
")",
"!=",
"type",
"(",
"first_item",
")",
":",
"# pylint: disable=C0123",
"return",
"False",
"if",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"if",
"not",
"are_dicts_equivalent",
"(",
"item",
",",
"first_item",
")",
":",
"return",
"False",
"elif",
"hasattr",
"(",
"item",
",",
"'__iter__'",
")",
"and",
"not",
"isinstance",
"(",
"item",
",",
"(",
"str",
",",
"bytes",
",",
"dict",
")",
")",
":",
"if",
"len",
"(",
"item",
")",
"!=",
"len",
"(",
"first_item",
")",
":",
"return",
"False",
"for",
"value",
"in",
"item",
":",
"if",
"value",
"not",
"in",
"first_item",
":",
"return",
"False",
"for",
"value",
"in",
"first_item",
":",
"if",
"value",
"not",
"in",
"item",
":",
"return",
"False",
"else",
":",
"if",
"item",
"!=",
"first_item",
":",
"return",
"False",
"return",
"True"
] | Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | [
"Indicate",
"if",
"arguments",
"passed",
"to",
"this",
"function",
"are",
"equivalent",
"."
] | python | train |
sptonkin/fuzzyhashlib | fuzzyhashlib/__init__.py | https://github.com/sptonkin/fuzzyhashlib/blob/61999dcfb0893358a330f51d88e4fa494a91bce2/fuzzyhashlib/__init__.py#L291-L297 | def update(self, buf):
"""Update this hash object's state with the provided string."""
if self._final:
raise InvalidOperation("Cannot update finalised tlsh")
else:
self._buf_len += len(buf)
return self._tlsh.update(buf) | [
"def",
"update",
"(",
"self",
",",
"buf",
")",
":",
"if",
"self",
".",
"_final",
":",
"raise",
"InvalidOperation",
"(",
"\"Cannot update finalised tlsh\"",
")",
"else",
":",
"self",
".",
"_buf_len",
"+=",
"len",
"(",
"buf",
")",
"return",
"self",
".",
"_tlsh",
".",
"update",
"(",
"buf",
")"
] | Update this hash object's state with the provided string. | [
"Update",
"this",
"hash",
"object",
"s",
"state",
"with",
"the",
"provided",
"string",
"."
] | python | train |
sibson/vncdotool | vncdotool/rfb.py | https://github.com/sibson/vncdotool/blob/e133a8916efaa0f5ed421e0aa737196624635b0c/vncdotool/rfb.py#L667-L680 | def setKey(self, key):
"""RFB protocol for authentication requires client to encrypt
challenge sent by server with password using DES method. However,
bits in each byte of the password are put in reverse order before
using it as encryption key."""
newkey = []
for ki in range(len(key)):
bsrc = ord(key[ki])
btgt = 0
for i in range(8):
if bsrc & (1 << i):
btgt = btgt | (1 << 7-i)
newkey.append(chr(btgt))
super(RFBDes, self).setKey(newkey) | [
"def",
"setKey",
"(",
"self",
",",
"key",
")",
":",
"newkey",
"=",
"[",
"]",
"for",
"ki",
"in",
"range",
"(",
"len",
"(",
"key",
")",
")",
":",
"bsrc",
"=",
"ord",
"(",
"key",
"[",
"ki",
"]",
")",
"btgt",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"8",
")",
":",
"if",
"bsrc",
"&",
"(",
"1",
"<<",
"i",
")",
":",
"btgt",
"=",
"btgt",
"|",
"(",
"1",
"<<",
"7",
"-",
"i",
")",
"newkey",
".",
"append",
"(",
"chr",
"(",
"btgt",
")",
")",
"super",
"(",
"RFBDes",
",",
"self",
")",
".",
"setKey",
"(",
"newkey",
")"
] | RFB protocol for authentication requires client to encrypt
challenge sent by server with password using DES method. However,
bits in each byte of the password are put in reverse order before
using it as encryption key. | [
"RFB",
"protocol",
"for",
"authentication",
"requires",
"client",
"to",
"encrypt",
"challenge",
"sent",
"by",
"server",
"with",
"password",
"using",
"DES",
"method",
".",
"However",
"bits",
"in",
"each",
"byte",
"of",
"the",
"password",
"are",
"put",
"in",
"reverse",
"order",
"before",
"using",
"it",
"as",
"encryption",
"key",
"."
] | python | train |
westurner/pyrpo | pyrpo/pyrpo.py | https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L283-L292 | def relpath(self):
"""
Determine the relative path to this repository
Returns:
str: relative path to this repository
"""
here = os.path.abspath(os.path.curdir)
relpath = os.path.relpath(self.fpath, here)
return relpath | [
"def",
"relpath",
"(",
"self",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"curdir",
")",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"self",
".",
"fpath",
",",
"here",
")",
"return",
"relpath"
] | Determine the relative path to this repository
Returns:
str: relative path to this repository | [
"Determine",
"the",
"relative",
"path",
"to",
"this",
"repository"
] | python | train |
chaoss/grimoirelab-sirmordred | sirmordred/task_enrich.py | https://github.com/chaoss/grimoirelab-sirmordred/blob/d6ac94d28d707fae23170064d078f1edf937d13e/sirmordred/task_enrich.py#L349-L368 | def retain_identities(self, retention_time):
"""Retain the identities in SortingHat based on the `retention_time`
value declared in the setup.cfg.
:param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data
"""
enrich_es = self.conf['es_enrichment']['url']
sortinghat_db = self.db
current_data_source = self.get_backend(self.backend_section)
active_data_sources = self.config.get_active_data_sources()
if retention_time is None:
logger.debug("[identities retention] Retention policy disabled, no identities will be deleted.")
return
if retention_time <= 0:
logger.debug("[identities retention] Retention time must be greater than 0.")
return
retain_identities(retention_time, enrich_es, sortinghat_db, current_data_source, active_data_sources) | [
"def",
"retain_identities",
"(",
"self",
",",
"retention_time",
")",
":",
"enrich_es",
"=",
"self",
".",
"conf",
"[",
"'es_enrichment'",
"]",
"[",
"'url'",
"]",
"sortinghat_db",
"=",
"self",
".",
"db",
"current_data_source",
"=",
"self",
".",
"get_backend",
"(",
"self",
".",
"backend_section",
")",
"active_data_sources",
"=",
"self",
".",
"config",
".",
"get_active_data_sources",
"(",
")",
"if",
"retention_time",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"[identities retention] Retention policy disabled, no identities will be deleted.\"",
")",
"return",
"if",
"retention_time",
"<=",
"0",
":",
"logger",
".",
"debug",
"(",
"\"[identities retention] Retention time must be greater than 0.\"",
")",
"return",
"retain_identities",
"(",
"retention_time",
",",
"enrich_es",
",",
"sortinghat_db",
",",
"current_data_source",
",",
"active_data_sources",
")"
] | Retain the identities in SortingHat based on the `retention_time`
value declared in the setup.cfg.
:param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data | [
"Retain",
"the",
"identities",
"in",
"SortingHat",
"based",
"on",
"the",
"retention_time",
"value",
"declared",
"in",
"the",
"setup",
".",
"cfg",
"."
] | python | valid |
SuperCowPowers/workbench | workbench/server/data_store.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/data_store.py#L265-L278 | def has_sample(self, md5):
"""Checks if data store has this sample.
Args:
md5: The md5 digest of the required sample.
Returns:
True if sample with this md5 is present, else False.
"""
# The easiest thing is to simply get the sample and if that
# succeeds than return True, else return False
sample = self.get_sample(md5)
return True if sample else False | [
"def",
"has_sample",
"(",
"self",
",",
"md5",
")",
":",
"# The easiest thing is to simply get the sample and if that",
"# succeeds than return True, else return False",
"sample",
"=",
"self",
".",
"get_sample",
"(",
"md5",
")",
"return",
"True",
"if",
"sample",
"else",
"False"
] | Checks if data store has this sample.
Args:
md5: The md5 digest of the required sample.
Returns:
True if sample with this md5 is present, else False. | [
"Checks",
"if",
"data",
"store",
"has",
"this",
"sample",
"."
] | python | train |
uw-it-aca/uw-restclients-uwnetid | uw_uwnetid/subscription.py | https://github.com/uw-it-aca/uw-restclients-uwnetid/blob/58c78b564f9c920a8f8fd408eec959ddd5605b0b/uw_uwnetid/subscription.py#L46-L54 | def select_subscription(subs_code, subscriptions):
"""
Return the uwnetid.subscription object with the subs_code.
"""
if subs_code and subscriptions:
for subs in subscriptions:
if (subs.subscription_code == subs_code):
return subs
return None | [
"def",
"select_subscription",
"(",
"subs_code",
",",
"subscriptions",
")",
":",
"if",
"subs_code",
"and",
"subscriptions",
":",
"for",
"subs",
"in",
"subscriptions",
":",
"if",
"(",
"subs",
".",
"subscription_code",
"==",
"subs_code",
")",
":",
"return",
"subs",
"return",
"None"
] | Return the uwnetid.subscription object with the subs_code. | [
"Return",
"the",
"uwnetid",
".",
"subscription",
"object",
"with",
"the",
"subs_code",
"."
] | python | train |
night-crawler/django-docker-helpers | django_docker_helpers/cli/django/management/commands/run_configured_uwsgi.py | https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/cli/django/management/commands/run_configured_uwsgi.py#L9-L40 | def write_uwsgi_ini_cfg(fp: t.IO, cfg: dict):
"""
Writes into IO stream the uwsgi.ini file content (actually it does smth strange, just look below).
uWSGI configs are likely to break INI (YAML, etc) specification (double key definition)
so it writes `cfg` object (dict) in "uWSGI Style".
>>> import sys
>>> cfg = {
... 'static-map': [
... '/static/=/application/static/',
... '/media/=/application/media/',
... '/usermedia/=/application/usermedia/']
... }
>>> write_uwsgi_ini_cfg(sys.stdout, cfg)
[uwsgi]
static-map = /static/=/application/static/
static-map = /media/=/application/media/
static-map = /usermedia/=/application/usermedia/
"""
fp.write(f'[uwsgi]\n')
for key, val in cfg.items():
if isinstance(val, bool):
val = str(val).lower()
if isinstance(val, list):
for v in val:
fp.write(f'{key} = {v}\n')
continue
fp.write(f'{key} = {val}\n') | [
"def",
"write_uwsgi_ini_cfg",
"(",
"fp",
":",
"t",
".",
"IO",
",",
"cfg",
":",
"dict",
")",
":",
"fp",
".",
"write",
"(",
"f'[uwsgi]\\n'",
")",
"for",
"key",
",",
"val",
"in",
"cfg",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"val",
"=",
"str",
"(",
"val",
")",
".",
"lower",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"for",
"v",
"in",
"val",
":",
"fp",
".",
"write",
"(",
"f'{key} = {v}\\n'",
")",
"continue",
"fp",
".",
"write",
"(",
"f'{key} = {val}\\n'",
")"
] | Writes into IO stream the uwsgi.ini file content (actually it does smth strange, just look below).
uWSGI configs are likely to break INI (YAML, etc) specification (double key definition)
so it writes `cfg` object (dict) in "uWSGI Style".
>>> import sys
>>> cfg = {
... 'static-map': [
... '/static/=/application/static/',
... '/media/=/application/media/',
... '/usermedia/=/application/usermedia/']
... }
>>> write_uwsgi_ini_cfg(sys.stdout, cfg)
[uwsgi]
static-map = /static/=/application/static/
static-map = /media/=/application/media/
static-map = /usermedia/=/application/usermedia/ | [
"Writes",
"into",
"IO",
"stream",
"the",
"uwsgi",
".",
"ini",
"file",
"content",
"(",
"actually",
"it",
"does",
"smth",
"strange",
"just",
"look",
"below",
")",
"."
] | python | train |
nugget/python-insteonplm | insteonplm/address.py | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L124-L129 | def bytes(self):
"""Emit the address in bytes format."""
addrbyte = b'\x00\x00\x00'
if self.addr is not None:
addrbyte = self.addr
return addrbyte | [
"def",
"bytes",
"(",
"self",
")",
":",
"addrbyte",
"=",
"b'\\x00\\x00\\x00'",
"if",
"self",
".",
"addr",
"is",
"not",
"None",
":",
"addrbyte",
"=",
"self",
".",
"addr",
"return",
"addrbyte"
] | Emit the address in bytes format. | [
"Emit",
"the",
"address",
"in",
"bytes",
"format",
"."
] | python | train |
brean/python-pathfinding | pathfinding/finder/finder.py | https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/finder.py#L142-L166 | def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs | [
"def",
"find_path",
"(",
"self",
",",
"start",
",",
"end",
",",
"grid",
")",
":",
"self",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# execution time limitation",
"self",
".",
"runs",
"=",
"0",
"# count number of iterations",
"start",
".",
"opened",
"=",
"True",
"open_list",
"=",
"[",
"start",
"]",
"while",
"len",
"(",
"open_list",
")",
">",
"0",
":",
"self",
".",
"runs",
"+=",
"1",
"self",
".",
"keep_running",
"(",
")",
"path",
"=",
"self",
".",
"check_neighbors",
"(",
"start",
",",
"end",
",",
"grid",
",",
"open_list",
")",
"if",
"path",
":",
"return",
"path",
",",
"self",
".",
"runs",
"# failed to find path",
"return",
"[",
"]",
",",
"self",
".",
"runs"
] | find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return: | [
"find",
"a",
"path",
"from",
"start",
"to",
"end",
"node",
"on",
"grid",
"by",
"iterating",
"over",
"all",
"neighbors",
"of",
"a",
"node",
"(",
"see",
"check_neighbors",
")",
":",
"param",
"start",
":",
"start",
"node",
":",
"param",
"end",
":",
"end",
"node",
":",
"param",
"grid",
":",
"grid",
"that",
"stores",
"all",
"possible",
"steps",
"/",
"tiles",
"as",
"2D",
"-",
"list",
":",
"return",
":"
] | python | train |
ktbyers/netmiko | netmiko/base_connection.py | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/base_connection.py#L1369-L1377 | def normalize_cmd(self, command):
"""Normalize CLI commands to have a single trailing newline.
:param command: Command that may require line feed to be normalized
:type command: str
"""
command = command.rstrip()
command += self.RETURN
return command | [
"def",
"normalize_cmd",
"(",
"self",
",",
"command",
")",
":",
"command",
"=",
"command",
".",
"rstrip",
"(",
")",
"command",
"+=",
"self",
".",
"RETURN",
"return",
"command"
] | Normalize CLI commands to have a single trailing newline.
:param command: Command that may require line feed to be normalized
:type command: str | [
"Normalize",
"CLI",
"commands",
"to",
"have",
"a",
"single",
"trailing",
"newline",
"."
] | python | train |
ga4gh/ga4gh-server | ga4gh/server/datarepo.py | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L191-L197 | def getReferenceSetByName(self, name):
"""
Returns the reference set with the specified name.
"""
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._referenceSetNameMap[name] | [
"def",
"getReferenceSetByName",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_referenceSetNameMap",
":",
"raise",
"exceptions",
".",
"ReferenceSetNameNotFoundException",
"(",
"name",
")",
"return",
"self",
".",
"_referenceSetNameMap",
"[",
"name",
"]"
] | Returns the reference set with the specified name. | [
"Returns",
"the",
"reference",
"set",
"with",
"the",
"specified",
"name",
"."
] | python | train |
jorbas/GADDAG | gaddag/gaddag.py | https://github.com/jorbas/GADDAG/blob/a0ede3def715c586e1f273d96e9fc0d537cd9561/gaddag/gaddag.py#L162-L184 | def ends_with(self, suffix):
"""
Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found.
"""
suffix = suffix.lower()
found_words = []
res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding="ascii"))
tmp = res
while tmp:
word = tmp.contents.str.decode("ascii")
found_words.append(word)
tmp = tmp.contents.next
cgaddag.gdg_destroy_result(res)
return found_words | [
"def",
"ends_with",
"(",
"self",
",",
"suffix",
")",
":",
"suffix",
"=",
"suffix",
".",
"lower",
"(",
")",
"found_words",
"=",
"[",
"]",
"res",
"=",
"cgaddag",
".",
"gdg_ends_with",
"(",
"self",
".",
"gdg",
",",
"suffix",
".",
"encode",
"(",
"encoding",
"=",
"\"ascii\"",
")",
")",
"tmp",
"=",
"res",
"while",
"tmp",
":",
"word",
"=",
"tmp",
".",
"contents",
".",
"str",
".",
"decode",
"(",
"\"ascii\"",
")",
"found_words",
".",
"append",
"(",
"word",
")",
"tmp",
"=",
"tmp",
".",
"contents",
".",
"next",
"cgaddag",
".",
"gdg_destroy_result",
"(",
"res",
")",
"return",
"found_words"
] | Find all words ending with a suffix.
Args:
suffix: A suffix to be searched for.
Returns:
A list of all words found. | [
"Find",
"all",
"words",
"ending",
"with",
"a",
"suffix",
"."
] | python | train |
Capitains/flask-capitains-nemo | flask_nemo/plugins/default.py | https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/plugins/default.py#L17-L63 | def render(self, **kwargs):
""" Make breadcrumbs for a route
:param kwargs: dictionary of named arguments used to construct the view
:type kwargs: dict
:return: List of dict items the view can use to construct the link.
:rtype: {str: list({ "link": str, "title", str, "args", dict})}
"""
breadcrumbs = []
# this is the list of items we want to accumulate in the breadcrumb trail.
# item[0] is the key into the kwargs["url"] object and item[1] is the name of the route
# setting a route name to None means that it's needed to construct the route of the next item in the list
# but shouldn't be included in the list itself (this is currently the case for work --
# at some point we probably should include work in the navigation)
breadcrumbs = []
if "collections" in kwargs:
breadcrumbs = [{
"title": "Text Collections",
"link": ".r_collections",
"args": {}
}]
if "parents" in kwargs["collections"]:
breadcrumbs += [
{
"title": parent["label"],
"link": ".r_collection_semantic",
"args": {
"objectId": parent["id"],
"semantic": f_slugify(parent["label"]),
},
}
for parent in kwargs["collections"]["parents"]
][::-1]
if "current" in kwargs["collections"]:
breadcrumbs.append({
"title": kwargs["collections"]["current"]["label"],
"link": None,
"args": {}
})
# don't link the last item in the trail
if len(breadcrumbs) > 0:
breadcrumbs[-1]["link"] = None
return {"breadcrumbs": breadcrumbs} | [
"def",
"render",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"breadcrumbs",
"=",
"[",
"]",
"# this is the list of items we want to accumulate in the breadcrumb trail.",
"# item[0] is the key into the kwargs[\"url\"] object and item[1] is the name of the route",
"# setting a route name to None means that it's needed to construct the route of the next item in the list",
"# but shouldn't be included in the list itself (this is currently the case for work --",
"# at some point we probably should include work in the navigation)",
"breadcrumbs",
"=",
"[",
"]",
"if",
"\"collections\"",
"in",
"kwargs",
":",
"breadcrumbs",
"=",
"[",
"{",
"\"title\"",
":",
"\"Text Collections\"",
",",
"\"link\"",
":",
"\".r_collections\"",
",",
"\"args\"",
":",
"{",
"}",
"}",
"]",
"if",
"\"parents\"",
"in",
"kwargs",
"[",
"\"collections\"",
"]",
":",
"breadcrumbs",
"+=",
"[",
"{",
"\"title\"",
":",
"parent",
"[",
"\"label\"",
"]",
",",
"\"link\"",
":",
"\".r_collection_semantic\"",
",",
"\"args\"",
":",
"{",
"\"objectId\"",
":",
"parent",
"[",
"\"id\"",
"]",
",",
"\"semantic\"",
":",
"f_slugify",
"(",
"parent",
"[",
"\"label\"",
"]",
")",
",",
"}",
",",
"}",
"for",
"parent",
"in",
"kwargs",
"[",
"\"collections\"",
"]",
"[",
"\"parents\"",
"]",
"]",
"[",
":",
":",
"-",
"1",
"]",
"if",
"\"current\"",
"in",
"kwargs",
"[",
"\"collections\"",
"]",
":",
"breadcrumbs",
".",
"append",
"(",
"{",
"\"title\"",
":",
"kwargs",
"[",
"\"collections\"",
"]",
"[",
"\"current\"",
"]",
"[",
"\"label\"",
"]",
",",
"\"link\"",
":",
"None",
",",
"\"args\"",
":",
"{",
"}",
"}",
")",
"# don't link the last item in the trail",
"if",
"len",
"(",
"breadcrumbs",
")",
">",
"0",
":",
"breadcrumbs",
"[",
"-",
"1",
"]",
"[",
"\"link\"",
"]",
"=",
"None",
"return",
"{",
"\"breadcrumbs\"",
":",
"breadcrumbs",
"}"
] | Make breadcrumbs for a route
:param kwargs: dictionary of named arguments used to construct the view
:type kwargs: dict
:return: List of dict items the view can use to construct the link.
:rtype: {str: list({ "link": str, "title", str, "args", dict})} | [
"Make",
"breadcrumbs",
"for",
"a",
"route"
] | python | valid |
jason-weirather/py-seq-tools | seqtools/format/sam/bam/bamindex.py | https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/bam/bamindex.py#L89-L95 | def get_coords_by_name(self,name):
"""
.. warning:: not implemented
"""
sys.stderr.write("error unimplemented get_coords_by_name\n")
sys.exit()
return [[self._lines[x]['filestart'],self._lines[x]['innerstart']] for x in self._queries[self._name_to_num[name]]] | [
"def",
"get_coords_by_name",
"(",
"self",
",",
"name",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"error unimplemented get_coords_by_name\\n\"",
")",
"sys",
".",
"exit",
"(",
")",
"return",
"[",
"[",
"self",
".",
"_lines",
"[",
"x",
"]",
"[",
"'filestart'",
"]",
",",
"self",
".",
"_lines",
"[",
"x",
"]",
"[",
"'innerstart'",
"]",
"]",
"for",
"x",
"in",
"self",
".",
"_queries",
"[",
"self",
".",
"_name_to_num",
"[",
"name",
"]",
"]",
"]"
] | .. warning:: not implemented | [
"..",
"warning",
"::",
"not",
"implemented"
] | python | train |
AguaClara/aguaclara | aguaclara/core/utility.py | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/utility.py#L64-L132 | def list_handler(HandlerResult="nparray"):
"""Wraps a function to handle list inputs."""
def decorate(func):
def wrapper(*args, **kwargs):
"""Run through the wrapped function once for each array element.
:param HandlerResult: output type. Defaults to numpy arrays.
"""
sequences = []
enumsUnitCheck = enumerate(args)
argsList = list(args)
#This for loop identifies pint unit objects and strips them
#of their units.
for num, arg in enumsUnitCheck:
if type(arg) == type(1 * u.m):
argsList[num] = arg.to_base_units().magnitude
enumsUnitless = enumerate(argsList)
#This for loop identifies arguments that are sequences and
#adds their index location to the list 'sequences'.
for num, arg in enumsUnitless:
if isinstance(arg, (list, tuple, np.ndarray)):
sequences.append(num)
#If there are no sequences to iterate through, simply return
#the function.
if len(sequences) == 0:
result = func(*args, **kwargs)
else:
#iterant keeps track of how many times we've iterated and
#limiter stops the loop once we've iterated as many times
#as there are list elements. Without this check, a few
#erroneous runs will occur, appending the last couple values
#to the end of the list multiple times.
#
#We only care about the length of sequences[0] because this
#function is recursive, and sequences[0] is always the relevant
#sequences for any given run.
limiter = len(argsList[sequences[0]])
iterant = 0
result = []
for num in sequences:
for arg in argsList[num]:
if iterant >= limiter:
break
#We can safely replace the entire list argument
#with a single element from it because of the looping
#we're doing. We redefine the object, but that
#definition remains within this namespace and does
#not penetrate further up the function.
argsList[num] = arg
#Here we dive down the rabbit hole. This ends up
#creating a multi-dimensional array shaped by the
#sizes and shapes of the lists passed.
result.append(wrapper(*argsList,
HandlerResult=HandlerResult, **kwargs))
iterant += 1
#HandlerResult allows the user to specify what type to
#return the generated sequence as. It defaults to numpy
#arrays because functions tend to handle them better, but if
#the user does not wish to import numpy the base Python options
#are available to them.
if HandlerResult == "nparray":
result = np.array(result)
elif HandlerResult == "tuple":
result = tuple(result)
elif HandlerResult == "list":
result == list(result)
return result
return wrapper
return decorate | [
"def",
"list_handler",
"(",
"HandlerResult",
"=",
"\"nparray\"",
")",
":",
"def",
"decorate",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Run through the wrapped function once for each array element.\n\n :param HandlerResult: output type. Defaults to numpy arrays.\n \"\"\"",
"sequences",
"=",
"[",
"]",
"enumsUnitCheck",
"=",
"enumerate",
"(",
"args",
")",
"argsList",
"=",
"list",
"(",
"args",
")",
"#This for loop identifies pint unit objects and strips them",
"#of their units.",
"for",
"num",
",",
"arg",
"in",
"enumsUnitCheck",
":",
"if",
"type",
"(",
"arg",
")",
"==",
"type",
"(",
"1",
"*",
"u",
".",
"m",
")",
":",
"argsList",
"[",
"num",
"]",
"=",
"arg",
".",
"to_base_units",
"(",
")",
".",
"magnitude",
"enumsUnitless",
"=",
"enumerate",
"(",
"argsList",
")",
"#This for loop identifies arguments that are sequences and",
"#adds their index location to the list 'sequences'.",
"for",
"num",
",",
"arg",
"in",
"enumsUnitless",
":",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"list",
",",
"tuple",
",",
"np",
".",
"ndarray",
")",
")",
":",
"sequences",
".",
"append",
"(",
"num",
")",
"#If there are no sequences to iterate through, simply return",
"#the function.",
"if",
"len",
"(",
"sequences",
")",
"==",
"0",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"#iterant keeps track of how many times we've iterated and",
"#limiter stops the loop once we've iterated as many times",
"#as there are list elements. Without this check, a few",
"#erroneous runs will occur, appending the last couple values",
"#to the end of the list multiple times.",
"#",
"#We only care about the length of sequences[0] because this",
"#function is recursive, and sequences[0] is always the relevant",
"#sequences for any given run.",
"limiter",
"=",
"len",
"(",
"argsList",
"[",
"sequences",
"[",
"0",
"]",
"]",
")",
"iterant",
"=",
"0",
"result",
"=",
"[",
"]",
"for",
"num",
"in",
"sequences",
":",
"for",
"arg",
"in",
"argsList",
"[",
"num",
"]",
":",
"if",
"iterant",
">=",
"limiter",
":",
"break",
"#We can safely replace the entire list argument",
"#with a single element from it because of the looping",
"#we're doing. We redefine the object, but that",
"#definition remains within this namespace and does",
"#not penetrate further up the function.",
"argsList",
"[",
"num",
"]",
"=",
"arg",
"#Here we dive down the rabbit hole. This ends up",
"#creating a multi-dimensional array shaped by the",
"#sizes and shapes of the lists passed.",
"result",
".",
"append",
"(",
"wrapper",
"(",
"*",
"argsList",
",",
"HandlerResult",
"=",
"HandlerResult",
",",
"*",
"*",
"kwargs",
")",
")",
"iterant",
"+=",
"1",
"#HandlerResult allows the user to specify what type to",
"#return the generated sequence as. It defaults to numpy",
"#arrays because functions tend to handle them better, but if",
"#the user does not wish to import numpy the base Python options",
"#are available to them.",
"if",
"HandlerResult",
"==",
"\"nparray\"",
":",
"result",
"=",
"np",
".",
"array",
"(",
"result",
")",
"elif",
"HandlerResult",
"==",
"\"tuple\"",
":",
"result",
"=",
"tuple",
"(",
"result",
")",
"elif",
"HandlerResult",
"==",
"\"list\"",
":",
"result",
"==",
"list",
"(",
"result",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorate"
] | Wraps a function to handle list inputs. | [
"Wraps",
"a",
"function",
"to",
"handle",
"list",
"inputs",
"."
] | python | train |
42cc/bets-api | bets/__init__.py | https://github.com/42cc/bets-api/blob/63a8227c7d8c65eef9974374607bc34effff5c7c/bets/__init__.py#L103-L139 | def get_bets(self, type=None, order_by=None, state=None, project_id=None,
page=None, page_size=None):
"""Return bets with given filters and ordering.
:param type: return bets only with this type.
Use None to include all (default).
:param order_by: '-last_stake' or 'last_stake' to sort by stake's
created date or None for default ordering.
:param state: one of 'active', 'closed', 'all' (default 'active').
:param project_id: return bets associated with given project id in kava
:param page: default 1.
:param page_site: page size (default 100).
"""
if page is None:
page = 1
if page_size is None:
page_size = 100
if state == 'all':
_states = [] # all states == no filter
elif state == 'closed':
_states = self.CLOSED_STATES
else:
_states = self.ACTIVE_STATES
url = urljoin(
self.settings['bets_url'],
'bets?page={}&page_size={}'.format(page, page_size))
url += '&state={}'.format(','.join(_states))
if type is not None:
url += '&type={}'.format(type)
if order_by in ['-last_stake', 'last_stake']:
url += '&order_by={}'.format(order_by)
if project_id is not None:
url += '&kava_project_id={}'.format(project_id)
res = self._req(url)
return res['bets']['results'] | [
"def",
"get_bets",
"(",
"self",
",",
"type",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"state",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"page",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"if",
"page",
"is",
"None",
":",
"page",
"=",
"1",
"if",
"page_size",
"is",
"None",
":",
"page_size",
"=",
"100",
"if",
"state",
"==",
"'all'",
":",
"_states",
"=",
"[",
"]",
"# all states == no filter",
"elif",
"state",
"==",
"'closed'",
":",
"_states",
"=",
"self",
".",
"CLOSED_STATES",
"else",
":",
"_states",
"=",
"self",
".",
"ACTIVE_STATES",
"url",
"=",
"urljoin",
"(",
"self",
".",
"settings",
"[",
"'bets_url'",
"]",
",",
"'bets?page={}&page_size={}'",
".",
"format",
"(",
"page",
",",
"page_size",
")",
")",
"url",
"+=",
"'&state={}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"_states",
")",
")",
"if",
"type",
"is",
"not",
"None",
":",
"url",
"+=",
"'&type={}'",
".",
"format",
"(",
"type",
")",
"if",
"order_by",
"in",
"[",
"'-last_stake'",
",",
"'last_stake'",
"]",
":",
"url",
"+=",
"'&order_by={}'",
".",
"format",
"(",
"order_by",
")",
"if",
"project_id",
"is",
"not",
"None",
":",
"url",
"+=",
"'&kava_project_id={}'",
".",
"format",
"(",
"project_id",
")",
"res",
"=",
"self",
".",
"_req",
"(",
"url",
")",
"return",
"res",
"[",
"'bets'",
"]",
"[",
"'results'",
"]"
] | Return bets with given filters and ordering.
:param type: return bets only with this type.
Use None to include all (default).
:param order_by: '-last_stake' or 'last_stake' to sort by stake's
created date or None for default ordering.
:param state: one of 'active', 'closed', 'all' (default 'active').
:param project_id: return bets associated with given project id in kava
:param page: default 1.
:param page_site: page size (default 100). | [
"Return",
"bets",
"with",
"given",
"filters",
"and",
"ordering",
"."
] | python | valid |
requests/requests-ntlm | requests_ntlm/requests_ntlm.py | https://github.com/requests/requests-ntlm/blob/f71fee60aa64c17941114d4eae40aed670a77afd/requests_ntlm/requests_ntlm.py#L138-L168 | def response_hook(self, r, **kwargs):
"""The actual hook handler."""
if r.status_code == 401:
# Handle server auth.
www_authenticate = r.headers.get('www-authenticate', '').lower()
auth_type = _auth_type_from_header(www_authenticate)
if auth_type is not None:
return self.retry_using_http_NTLM_auth(
'www-authenticate',
'Authorization',
r,
auth_type,
kwargs
)
elif r.status_code == 407:
# If we didn't have server auth, do proxy auth.
proxy_authenticate = r.headers.get(
'proxy-authenticate', ''
).lower()
auth_type = _auth_type_from_header(proxy_authenticate)
if auth_type is not None:
return self.retry_using_http_NTLM_auth(
'proxy-authenticate',
'Proxy-authorization',
r,
auth_type,
kwargs
)
return r | [
"def",
"response_hook",
"(",
"self",
",",
"r",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"r",
".",
"status_code",
"==",
"401",
":",
"# Handle server auth.",
"www_authenticate",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"'www-authenticate'",
",",
"''",
")",
".",
"lower",
"(",
")",
"auth_type",
"=",
"_auth_type_from_header",
"(",
"www_authenticate",
")",
"if",
"auth_type",
"is",
"not",
"None",
":",
"return",
"self",
".",
"retry_using_http_NTLM_auth",
"(",
"'www-authenticate'",
",",
"'Authorization'",
",",
"r",
",",
"auth_type",
",",
"kwargs",
")",
"elif",
"r",
".",
"status_code",
"==",
"407",
":",
"# If we didn't have server auth, do proxy auth.",
"proxy_authenticate",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"'proxy-authenticate'",
",",
"''",
")",
".",
"lower",
"(",
")",
"auth_type",
"=",
"_auth_type_from_header",
"(",
"proxy_authenticate",
")",
"if",
"auth_type",
"is",
"not",
"None",
":",
"return",
"self",
".",
"retry_using_http_NTLM_auth",
"(",
"'proxy-authenticate'",
",",
"'Proxy-authorization'",
",",
"r",
",",
"auth_type",
",",
"kwargs",
")",
"return",
"r"
] | The actual hook handler. | [
"The",
"actual",
"hook",
"handler",
"."
] | python | train |
rq/rq-scheduler | rq_scheduler/scheduler.py | https://github.com/rq/rq-scheduler/blob/ee60c19e42a46ba787f762733a0036aa0cf2f7b7/rq_scheduler/scheduler.py#L290-L325 | def get_jobs(self, until=None, with_times=False, offset=None, length=None):
"""
Returns a iterator of job instances that will be queued until the given
time. If no 'until' argument is given all jobs are returned.
If with_times is True, a list of tuples consisting of the job instance
and it's scheduled execution time is returned.
If offset and length are specified, a slice of the list starting at the
specified zero-based offset of the specified length will be returned.
If either of offset or length is specified, then both must be, or
an exception will be raised.
"""
def epoch_to_datetime(epoch):
return from_unix(float(epoch))
until = rationalize_until(until)
job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0,
until, withscores=with_times,
score_cast_func=epoch_to_datetime,
start=offset, num=length)
if not with_times:
job_ids = zip(job_ids, repeat(None))
for job_id, sched_time in job_ids:
job_id = job_id.decode('utf-8')
try:
job = self.job_class.fetch(job_id, connection=self.connection)
except NoSuchJobError:
# Delete jobs that aren't there from scheduler
self.cancel(job_id)
continue
if with_times:
yield (job, sched_time)
else:
yield job | [
"def",
"get_jobs",
"(",
"self",
",",
"until",
"=",
"None",
",",
"with_times",
"=",
"False",
",",
"offset",
"=",
"None",
",",
"length",
"=",
"None",
")",
":",
"def",
"epoch_to_datetime",
"(",
"epoch",
")",
":",
"return",
"from_unix",
"(",
"float",
"(",
"epoch",
")",
")",
"until",
"=",
"rationalize_until",
"(",
"until",
")",
"job_ids",
"=",
"self",
".",
"connection",
".",
"zrangebyscore",
"(",
"self",
".",
"scheduled_jobs_key",
",",
"0",
",",
"until",
",",
"withscores",
"=",
"with_times",
",",
"score_cast_func",
"=",
"epoch_to_datetime",
",",
"start",
"=",
"offset",
",",
"num",
"=",
"length",
")",
"if",
"not",
"with_times",
":",
"job_ids",
"=",
"zip",
"(",
"job_ids",
",",
"repeat",
"(",
"None",
")",
")",
"for",
"job_id",
",",
"sched_time",
"in",
"job_ids",
":",
"job_id",
"=",
"job_id",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"job",
"=",
"self",
".",
"job_class",
".",
"fetch",
"(",
"job_id",
",",
"connection",
"=",
"self",
".",
"connection",
")",
"except",
"NoSuchJobError",
":",
"# Delete jobs that aren't there from scheduler",
"self",
".",
"cancel",
"(",
"job_id",
")",
"continue",
"if",
"with_times",
":",
"yield",
"(",
"job",
",",
"sched_time",
")",
"else",
":",
"yield",
"job"
] | Returns a iterator of job instances that will be queued until the given
time. If no 'until' argument is given all jobs are returned.
If with_times is True, a list of tuples consisting of the job instance
and it's scheduled execution time is returned.
If offset and length are specified, a slice of the list starting at the
specified zero-based offset of the specified length will be returned.
If either of offset or length is specified, then both must be, or
an exception will be raised. | [
"Returns",
"a",
"iterator",
"of",
"job",
"instances",
"that",
"will",
"be",
"queued",
"until",
"the",
"given",
"time",
".",
"If",
"no",
"until",
"argument",
"is",
"given",
"all",
"jobs",
"are",
"returned",
"."
] | python | train |
Erotemic/utool | utool/util_str.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L1582-L1694 | def list_str(list_, **listkw):
r"""
Makes a pretty list string
Args:
list_ (list): input list
**listkw: nl, newlines, packed, truncate, nobr, nobraces, itemsep,
trailing_sep, truncatekw, strvals, recursive,
indent_, precision, use_numpy, with_dtype, force_dtype,
stritems, strkeys, align, explicit, sorted_, key_order,
key_order_metric, maxlen
Returns:
str: retstr
CommandLine:
python -m utool.util_str --test-list_str
python -m utool.util_str --exec-list_str --truncate=True
python -m utool.util_str --exec-list_str --truncate=0
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> list_ = [[(('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, '')],
>>> [(['--nodyn'], 1, False, ''), (['--nodyn'], 1, False, '')]]
>>> listkw = {'nl': 2}
>>> result = list_str(list_, **listkw)
>>> print(result)
[
[
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
],
[
(['--nodyn'], 1, False, ''),
(['--nodyn'], 1, False, ''),
],
]
"""
import utool as ut
newlines = listkw.pop('nl', listkw.pop('newlines', 1))
packed = listkw.pop('packed', False)
truncate = listkw.pop('truncate', False)
listkw['nl'] = _rectify_countdown_or_bool(newlines)
listkw['truncate'] = _rectify_countdown_or_bool(truncate)
listkw['packed'] = _rectify_countdown_or_bool(packed)
nobraces = listkw.pop('nobr', listkw.pop('nobraces', False))
itemsep = listkw.get('itemsep', ' ')
# Doesn't actually put in trailing comma if on same line
trailing_sep = listkw.get('trailing_sep', True)
with_comma = True
itemstr_list = get_itemstr_list(list_, **listkw)
is_tuple = isinstance(list_, tuple)
is_set = isinstance(list_, (set, frozenset, ut.oset))
is_onetup = isinstance(list_, (tuple)) and len(list_) <= 1
if nobraces:
lbr, rbr = '', ''
elif is_tuple:
lbr, rbr = '(', ')'
elif is_set:
lbr, rbr = '{', '}'
else:
lbr, rbr = '[', ']'
if len(itemstr_list) == 0:
newlines = False
if newlines is not False and (newlines is True or newlines > 0):
sep = ',\n' if with_comma else '\n'
if nobraces:
body_str = sep.join(itemstr_list)
if trailing_sep:
body_str += ','
retstr = body_str
else:
if packed:
# DEPRICATE?
joinstr = sep + itemsep * len(lbr)
body_str = joinstr.join([itemstr for itemstr in itemstr_list])
if trailing_sep:
body_str += ','
braced_body_str = (lbr + '' + body_str + '' + rbr)
else:
body_str = sep.join([
ut.indent(itemstr) for itemstr in itemstr_list])
if trailing_sep:
body_str += ','
braced_body_str = (lbr + '\n' + body_str + '\n' + rbr)
retstr = braced_body_str
else:
sep = ',' + itemsep if with_comma else itemsep
body_str = sep.join(itemstr_list)
if is_onetup:
body_str += ','
retstr = (lbr + body_str + rbr)
# TODO: rectify with dict_truncate
do_truncate = truncate is not False and (truncate is True or truncate == 0)
if do_truncate:
truncatekw = listkw.get('truncatekw', {})
retstr = truncate_str(retstr, **truncatekw)
return retstr | [
"def",
"list_str",
"(",
"list_",
",",
"*",
"*",
"listkw",
")",
":",
"import",
"utool",
"as",
"ut",
"newlines",
"=",
"listkw",
".",
"pop",
"(",
"'nl'",
",",
"listkw",
".",
"pop",
"(",
"'newlines'",
",",
"1",
")",
")",
"packed",
"=",
"listkw",
".",
"pop",
"(",
"'packed'",
",",
"False",
")",
"truncate",
"=",
"listkw",
".",
"pop",
"(",
"'truncate'",
",",
"False",
")",
"listkw",
"[",
"'nl'",
"]",
"=",
"_rectify_countdown_or_bool",
"(",
"newlines",
")",
"listkw",
"[",
"'truncate'",
"]",
"=",
"_rectify_countdown_or_bool",
"(",
"truncate",
")",
"listkw",
"[",
"'packed'",
"]",
"=",
"_rectify_countdown_or_bool",
"(",
"packed",
")",
"nobraces",
"=",
"listkw",
".",
"pop",
"(",
"'nobr'",
",",
"listkw",
".",
"pop",
"(",
"'nobraces'",
",",
"False",
")",
")",
"itemsep",
"=",
"listkw",
".",
"get",
"(",
"'itemsep'",
",",
"' '",
")",
"# Doesn't actually put in trailing comma if on same line",
"trailing_sep",
"=",
"listkw",
".",
"get",
"(",
"'trailing_sep'",
",",
"True",
")",
"with_comma",
"=",
"True",
"itemstr_list",
"=",
"get_itemstr_list",
"(",
"list_",
",",
"*",
"*",
"listkw",
")",
"is_tuple",
"=",
"isinstance",
"(",
"list_",
",",
"tuple",
")",
"is_set",
"=",
"isinstance",
"(",
"list_",
",",
"(",
"set",
",",
"frozenset",
",",
"ut",
".",
"oset",
")",
")",
"is_onetup",
"=",
"isinstance",
"(",
"list_",
",",
"(",
"tuple",
")",
")",
"and",
"len",
"(",
"list_",
")",
"<=",
"1",
"if",
"nobraces",
":",
"lbr",
",",
"rbr",
"=",
"''",
",",
"''",
"elif",
"is_tuple",
":",
"lbr",
",",
"rbr",
"=",
"'('",
",",
"')'",
"elif",
"is_set",
":",
"lbr",
",",
"rbr",
"=",
"'{'",
",",
"'}'",
"else",
":",
"lbr",
",",
"rbr",
"=",
"'['",
",",
"']'",
"if",
"len",
"(",
"itemstr_list",
")",
"==",
"0",
":",
"newlines",
"=",
"False",
"if",
"newlines",
"is",
"not",
"False",
"and",
"(",
"newlines",
"is",
"True",
"or",
"newlines",
">",
"0",
")",
":",
"sep",
"=",
"',\\n'",
"if",
"with_comma",
"else",
"'\\n'",
"if",
"nobraces",
":",
"body_str",
"=",
"sep",
".",
"join",
"(",
"itemstr_list",
")",
"if",
"trailing_sep",
":",
"body_str",
"+=",
"','",
"retstr",
"=",
"body_str",
"else",
":",
"if",
"packed",
":",
"# DEPRICATE?",
"joinstr",
"=",
"sep",
"+",
"itemsep",
"*",
"len",
"(",
"lbr",
")",
"body_str",
"=",
"joinstr",
".",
"join",
"(",
"[",
"itemstr",
"for",
"itemstr",
"in",
"itemstr_list",
"]",
")",
"if",
"trailing_sep",
":",
"body_str",
"+=",
"','",
"braced_body_str",
"=",
"(",
"lbr",
"+",
"''",
"+",
"body_str",
"+",
"''",
"+",
"rbr",
")",
"else",
":",
"body_str",
"=",
"sep",
".",
"join",
"(",
"[",
"ut",
".",
"indent",
"(",
"itemstr",
")",
"for",
"itemstr",
"in",
"itemstr_list",
"]",
")",
"if",
"trailing_sep",
":",
"body_str",
"+=",
"','",
"braced_body_str",
"=",
"(",
"lbr",
"+",
"'\\n'",
"+",
"body_str",
"+",
"'\\n'",
"+",
"rbr",
")",
"retstr",
"=",
"braced_body_str",
"else",
":",
"sep",
"=",
"','",
"+",
"itemsep",
"if",
"with_comma",
"else",
"itemsep",
"body_str",
"=",
"sep",
".",
"join",
"(",
"itemstr_list",
")",
"if",
"is_onetup",
":",
"body_str",
"+=",
"','",
"retstr",
"=",
"(",
"lbr",
"+",
"body_str",
"+",
"rbr",
")",
"# TODO: rectify with dict_truncate",
"do_truncate",
"=",
"truncate",
"is",
"not",
"False",
"and",
"(",
"truncate",
"is",
"True",
"or",
"truncate",
"==",
"0",
")",
"if",
"do_truncate",
":",
"truncatekw",
"=",
"listkw",
".",
"get",
"(",
"'truncatekw'",
",",
"{",
"}",
")",
"retstr",
"=",
"truncate_str",
"(",
"retstr",
",",
"*",
"*",
"truncatekw",
")",
"return",
"retstr"
] | r"""
Makes a pretty list string
Args:
list_ (list): input list
**listkw: nl, newlines, packed, truncate, nobr, nobraces, itemsep,
trailing_sep, truncatekw, strvals, recursive,
indent_, precision, use_numpy, with_dtype, force_dtype,
stritems, strkeys, align, explicit, sorted_, key_order,
key_order_metric, maxlen
Returns:
str: retstr
CommandLine:
python -m utool.util_str --test-list_str
python -m utool.util_str --exec-list_str --truncate=True
python -m utool.util_str --exec-list_str --truncate=0
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> list_ = [[(('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, ''),
>>> (('--verbose-qt', '--verbqt'), 1, False, '')],
>>> [(['--nodyn'], 1, False, ''), (['--nodyn'], 1, False, '')]]
>>> listkw = {'nl': 2}
>>> result = list_str(list_, **listkw)
>>> print(result)
[
[
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
(('--verbose-qt', '--verbqt'), 1, False, ''),
],
[
(['--nodyn'], 1, False, ''),
(['--nodyn'], 1, False, ''),
],
] | [
"r",
"Makes",
"a",
"pretty",
"list",
"string"
] | python | train |
rchatterjee/pwmodels | src/pwmodel/fast_fuzzysearch.py | https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/fast_fuzzysearch.py#L107-L123 | def query(self, w, ed=1): # Can only handle ed=1
"""
Finds the fuzzy matches (within edit distance 1) of w from words
"""
assert ed <= self._ed
if ed == 0:
return [w] if w in self._L else ['']
w = str(w)
n = len(w)
prefix, suffix = w[:n // 2], w[n // 2:][::-1]
options_w_prefix = self._L.keys(prefix)
options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)]
return [
_w
for _w in set(itertools.chain(options_w_prefix, options_w_suffix))
if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1
] | [
"def",
"query",
"(",
"self",
",",
"w",
",",
"ed",
"=",
"1",
")",
":",
"# Can only handle ed=1",
"assert",
"ed",
"<=",
"self",
".",
"_ed",
"if",
"ed",
"==",
"0",
":",
"return",
"[",
"w",
"]",
"if",
"w",
"in",
"self",
".",
"_L",
"else",
"[",
"''",
"]",
"w",
"=",
"str",
"(",
"w",
")",
"n",
"=",
"len",
"(",
"w",
")",
"prefix",
",",
"suffix",
"=",
"w",
"[",
":",
"n",
"//",
"2",
"]",
",",
"w",
"[",
"n",
"//",
"2",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"options_w_prefix",
"=",
"self",
".",
"_L",
".",
"keys",
"(",
"prefix",
")",
"options_w_suffix",
"=",
"[",
"x",
"[",
":",
":",
"-",
"1",
"]",
"for",
"x",
"in",
"self",
".",
"_R",
".",
"iterkeys",
"(",
"suffix",
")",
"]",
"return",
"[",
"_w",
"for",
"_w",
"in",
"set",
"(",
"itertools",
".",
"chain",
"(",
"options_w_prefix",
",",
"options_w_suffix",
")",
")",
"if",
"abs",
"(",
"len",
"(",
"_w",
")",
"-",
"len",
"(",
"w",
")",
")",
"<=",
"1",
"and",
"lvdistance",
"(",
"str",
"(",
"_w",
")",
",",
"str",
"(",
"w",
")",
",",
"1",
")",
"<=",
"1",
"]"
] | Finds the fuzzy matches (within edit distance 1) of w from words | [
"Finds",
"the",
"fuzzy",
"matches",
"(",
"within",
"edit",
"distance",
"1",
")",
"of",
"w",
"from",
"words"
] | python | train |
jic-dtool/dtoolcore | dtoolcore/utils.py | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/utils.py#L191-L204 | def mkdir_parents(path):
"""Create the given directory path.
This includes all necessary parent directories. Does not raise an error if
the directory already exists.
:param path: path to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise | [
"def",
"mkdir_parents",
"(",
"path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"pass",
"else",
":",
"raise"
] | Create the given directory path.
This includes all necessary parent directories. Does not raise an error if
the directory already exists.
:param path: path to create | [
"Create",
"the",
"given",
"directory",
"path",
".",
"This",
"includes",
"all",
"necessary",
"parent",
"directories",
".",
"Does",
"not",
"raise",
"an",
"error",
"if",
"the",
"directory",
"already",
"exists",
".",
":",
"param",
"path",
":",
"path",
"to",
"create"
] | python | train |
pantsbuild/pex | pex/pex_builder.py | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/pex_builder.py#L146-L155 | def add_source(self, filename, env_filename):
"""Add a source to the PEX environment.
:param filename: The source filename to add to the PEX; None to create an empty file at
`env_filename`.
:param env_filename: The destination filename in the PEX. This path
must be a relative path.
"""
self._ensure_unfrozen('Adding source')
self._copy_or_link(filename, env_filename, "source") | [
"def",
"add_source",
"(",
"self",
",",
"filename",
",",
"env_filename",
")",
":",
"self",
".",
"_ensure_unfrozen",
"(",
"'Adding source'",
")",
"self",
".",
"_copy_or_link",
"(",
"filename",
",",
"env_filename",
",",
"\"source\"",
")"
] | Add a source to the PEX environment.
:param filename: The source filename to add to the PEX; None to create an empty file at
`env_filename`.
:param env_filename: The destination filename in the PEX. This path
must be a relative path. | [
"Add",
"a",
"source",
"to",
"the",
"PEX",
"environment",
"."
] | python | train |
shi-cong/PYSTUDY | PYSTUDY/image/pillib.py | https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/image/pillib.py#L236-L253 | def get_exif_data(self, image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data | [
"def",
"get_exif_data",
"(",
"self",
",",
"image",
")",
":",
"exif_data",
"=",
"{",
"}",
"info",
"=",
"image",
".",
"_getexif",
"(",
")",
"if",
"info",
":",
"for",
"tag",
",",
"value",
"in",
"info",
".",
"items",
"(",
")",
":",
"decoded",
"=",
"TAGS",
".",
"get",
"(",
"tag",
",",
"tag",
")",
"if",
"decoded",
"==",
"\"GPSInfo\"",
":",
"gps_data",
"=",
"{",
"}",
"for",
"t",
"in",
"value",
":",
"sub_decoded",
"=",
"GPSTAGS",
".",
"get",
"(",
"t",
",",
"t",
")",
"gps_data",
"[",
"sub_decoded",
"]",
"=",
"value",
"[",
"t",
"]",
"exif_data",
"[",
"decoded",
"]",
"=",
"gps_data",
"else",
":",
"exif_data",
"[",
"decoded",
"]",
"=",
"value",
"return",
"exif_data"
] | Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags | [
"Returns",
"a",
"dictionary",
"from",
"the",
"exif",
"data",
"of",
"an",
"PIL",
"Image",
"item",
".",
"Also",
"converts",
"the",
"GPS",
"Tags"
] | python | train |
mikekatz04/BOWIE | bowie/plotutils/plottypes.py | https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/plottypes.py#L71-L123 | def make_plot(self):
"""Creates the ratio plot.
"""
# sets colormap for ratio comparison plot
cmap = getattr(cm, self.colormap)
# set values of ratio comparison contour
normval = 2.0
num_contours = 40 # must be even
levels = np.linspace(-normval, normval, num_contours)
norm = colors.Normalize(-normval, normval)
# find Loss/Gain contour and Ratio contour
self.set_comparison()
diff_out, loss_gain_contour = self.find_difference_contour()
cmap.set_bad(color='white', alpha=0.001)
# plot ratio contours
sc = self.axis.contourf(self.xvals[0], self.yvals[0], diff_out,
levels=levels, norm=norm,
extend='both', cmap=cmap)
self.colorbar.setup_colorbars(sc)
# toggle line contours of orders of magnitude of ratio comparisons
if self.order_contour_lines:
self.axis.contour(self.xvals[0], self.yvals[0], diff_out, np.array(
[-2.0, -1.0, 1.0, 2.0]), colors='black', linewidths=1.0)
# plot loss gain contour
if self.loss_gain_status is True:
# if there is no loss/gain contours, this will produce an error,
# so we catch the exception.
try:
# make hatching
cs = self.axis.contourf(self.xvals[0], self.yvals[0],
loss_gain_contour, levels=[-2, -0.5, 0.5, 2], colors='none',
hatches=['x', None, '+'])
# make loss/gain contour outline
self.axis.contour(self.xvals[0], self.yvals[0],
loss_gain_contour, 3, colors='black', linewidths=2)
except ValueError:
pass
if self.add_legend:
loss_patch = Patch(fill=None, label='Loss', hatch='x', linestyle='--', linewidth=2)
gain_patch = Patch(fill=None, label='Gain', hatch='+', linestyle='-', linewidth=2)
legend = self.axis.legend(handles=[loss_patch, gain_patch], **self.legend_kwargs)
return | [
"def",
"make_plot",
"(",
"self",
")",
":",
"# sets colormap for ratio comparison plot",
"cmap",
"=",
"getattr",
"(",
"cm",
",",
"self",
".",
"colormap",
")",
"# set values of ratio comparison contour",
"normval",
"=",
"2.0",
"num_contours",
"=",
"40",
"# must be even",
"levels",
"=",
"np",
".",
"linspace",
"(",
"-",
"normval",
",",
"normval",
",",
"num_contours",
")",
"norm",
"=",
"colors",
".",
"Normalize",
"(",
"-",
"normval",
",",
"normval",
")",
"# find Loss/Gain contour and Ratio contour",
"self",
".",
"set_comparison",
"(",
")",
"diff_out",
",",
"loss_gain_contour",
"=",
"self",
".",
"find_difference_contour",
"(",
")",
"cmap",
".",
"set_bad",
"(",
"color",
"=",
"'white'",
",",
"alpha",
"=",
"0.001",
")",
"# plot ratio contours",
"sc",
"=",
"self",
".",
"axis",
".",
"contourf",
"(",
"self",
".",
"xvals",
"[",
"0",
"]",
",",
"self",
".",
"yvals",
"[",
"0",
"]",
",",
"diff_out",
",",
"levels",
"=",
"levels",
",",
"norm",
"=",
"norm",
",",
"extend",
"=",
"'both'",
",",
"cmap",
"=",
"cmap",
")",
"self",
".",
"colorbar",
".",
"setup_colorbars",
"(",
"sc",
")",
"# toggle line contours of orders of magnitude of ratio comparisons",
"if",
"self",
".",
"order_contour_lines",
":",
"self",
".",
"axis",
".",
"contour",
"(",
"self",
".",
"xvals",
"[",
"0",
"]",
",",
"self",
".",
"yvals",
"[",
"0",
"]",
",",
"diff_out",
",",
"np",
".",
"array",
"(",
"[",
"-",
"2.0",
",",
"-",
"1.0",
",",
"1.0",
",",
"2.0",
"]",
")",
",",
"colors",
"=",
"'black'",
",",
"linewidths",
"=",
"1.0",
")",
"# plot loss gain contour",
"if",
"self",
".",
"loss_gain_status",
"is",
"True",
":",
"# if there is no loss/gain contours, this will produce an error,",
"# so we catch the exception.",
"try",
":",
"# make hatching",
"cs",
"=",
"self",
".",
"axis",
".",
"contourf",
"(",
"self",
".",
"xvals",
"[",
"0",
"]",
",",
"self",
".",
"yvals",
"[",
"0",
"]",
",",
"loss_gain_contour",
",",
"levels",
"=",
"[",
"-",
"2",
",",
"-",
"0.5",
",",
"0.5",
",",
"2",
"]",
",",
"colors",
"=",
"'none'",
",",
"hatches",
"=",
"[",
"'x'",
",",
"None",
",",
"'+'",
"]",
")",
"# make loss/gain contour outline",
"self",
".",
"axis",
".",
"contour",
"(",
"self",
".",
"xvals",
"[",
"0",
"]",
",",
"self",
".",
"yvals",
"[",
"0",
"]",
",",
"loss_gain_contour",
",",
"3",
",",
"colors",
"=",
"'black'",
",",
"linewidths",
"=",
"2",
")",
"except",
"ValueError",
":",
"pass",
"if",
"self",
".",
"add_legend",
":",
"loss_patch",
"=",
"Patch",
"(",
"fill",
"=",
"None",
",",
"label",
"=",
"'Loss'",
",",
"hatch",
"=",
"'x'",
",",
"linestyle",
"=",
"'--'",
",",
"linewidth",
"=",
"2",
")",
"gain_patch",
"=",
"Patch",
"(",
"fill",
"=",
"None",
",",
"label",
"=",
"'Gain'",
",",
"hatch",
"=",
"'+'",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"2",
")",
"legend",
"=",
"self",
".",
"axis",
".",
"legend",
"(",
"handles",
"=",
"[",
"loss_patch",
",",
"gain_patch",
"]",
",",
"*",
"*",
"self",
".",
"legend_kwargs",
")",
"return"
] | Creates the ratio plot. | [
"Creates",
"the",
"ratio",
"plot",
"."
] | python | train |
fermiPy/fermipy | fermipy/ltcube.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/ltcube.py#L20-L108 | def fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges):
"""Generate a sequence of livetime distributions at the sky
positions given by ``skydir``. The output of the method are two
NxM arrays containing a sequence of histograms for N sky positions
and M incidence angle bins where the bin edges are defined by
``costh_edges``. This method uses the same algorithm as
`gtltcube` with the exception that SC time intervals are assumed
to be aligned with GTIs.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Vector of sky directions for which livetime histograms will be
accumulated.
tab_sc : `~astropy.table.Table`
Spacecraft table. Must contain the following columns: START,
STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH.
tab_gti : `~astropy.table.Table`
Table of good time intervals (GTIs).
zmax : float
Zenith cut.
costh_edges : `~numpy.ndarray`
Incidence angle bin edges in cos(angle).
Returns
-------
lt : `~numpy.ndarray`
Array of livetime histograms.
lt_wt : `~numpy.ndarray`
Array of histograms of weighted livetime (livetime x livetime
fraction).
"""
if len(tab_gti) == 0:
shape = (len(costh_edges) - 1, len(skydir))
return (np.zeros(shape), np.zeros(shape))
m = (tab_sc['START'] < tab_gti['STOP'][-1])
m &= (tab_sc['STOP'] > tab_gti['START'][0])
tab_sc = tab_sc[m]
cos_zmax = np.cos(np.radians(zmax))
sc_t0 = np.array(tab_sc['START'].data)
sc_t1 = np.array(tab_sc['STOP'].data)
sc_live = np.array(tab_sc['LIVETIME'].data)
sc_lfrac = sc_live / (sc_t1 - sc_t0)
sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data),
np.radians(tab_sc['DEC_SCZ'].data))
zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data),
np.radians(tab_sc['DEC_ZENITH'].data))
tab_gti_t0 = np.array(tab_gti['START'].data)
tab_gti_t1 = np.array(tab_gti['STOP'].data)
# Index of the closest GTI interval
idx = np.digitize(sc_t0, tab_gti_t0) - 1
# start/stop time of closest GTI interval
gti_t0 = np.zeros_like(sc_t0)
gti_t1 = np.zeros_like(sc_t1)
gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]]
gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]]
nbin = len(costh_edges) - 1
lt = np.zeros((nbin,) + skydir.shape)
lt_wt = np.zeros((nbin,) + skydir.shape)
m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1)
xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad)
for i, t in enumerate(xyz):
cos_sep = utils.dot_prod(t, sc_xyz)
cos_zn = utils.dot_prod(t, zn_xyz)
m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0)
bins = np.digitize(cos_sep[m], bins=costh_edges) - 1
bins = np.clip(bins, 0, nbin - 1)
lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin)
lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m],
minlength=nbin)
return lt, lt_wt | [
"def",
"fill_livetime_hist",
"(",
"skydir",
",",
"tab_sc",
",",
"tab_gti",
",",
"zmax",
",",
"costh_edges",
")",
":",
"if",
"len",
"(",
"tab_gti",
")",
"==",
"0",
":",
"shape",
"=",
"(",
"len",
"(",
"costh_edges",
")",
"-",
"1",
",",
"len",
"(",
"skydir",
")",
")",
"return",
"(",
"np",
".",
"zeros",
"(",
"shape",
")",
",",
"np",
".",
"zeros",
"(",
"shape",
")",
")",
"m",
"=",
"(",
"tab_sc",
"[",
"'START'",
"]",
"<",
"tab_gti",
"[",
"'STOP'",
"]",
"[",
"-",
"1",
"]",
")",
"m",
"&=",
"(",
"tab_sc",
"[",
"'STOP'",
"]",
">",
"tab_gti",
"[",
"'START'",
"]",
"[",
"0",
"]",
")",
"tab_sc",
"=",
"tab_sc",
"[",
"m",
"]",
"cos_zmax",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"radians",
"(",
"zmax",
")",
")",
"sc_t0",
"=",
"np",
".",
"array",
"(",
"tab_sc",
"[",
"'START'",
"]",
".",
"data",
")",
"sc_t1",
"=",
"np",
".",
"array",
"(",
"tab_sc",
"[",
"'STOP'",
"]",
".",
"data",
")",
"sc_live",
"=",
"np",
".",
"array",
"(",
"tab_sc",
"[",
"'LIVETIME'",
"]",
".",
"data",
")",
"sc_lfrac",
"=",
"sc_live",
"/",
"(",
"sc_t1",
"-",
"sc_t0",
")",
"sc_xyz",
"=",
"angle_to_cartesian",
"(",
"np",
".",
"radians",
"(",
"tab_sc",
"[",
"'RA_SCZ'",
"]",
".",
"data",
")",
",",
"np",
".",
"radians",
"(",
"tab_sc",
"[",
"'DEC_SCZ'",
"]",
".",
"data",
")",
")",
"zn_xyz",
"=",
"angle_to_cartesian",
"(",
"np",
".",
"radians",
"(",
"tab_sc",
"[",
"'RA_ZENITH'",
"]",
".",
"data",
")",
",",
"np",
".",
"radians",
"(",
"tab_sc",
"[",
"'DEC_ZENITH'",
"]",
".",
"data",
")",
")",
"tab_gti_t0",
"=",
"np",
".",
"array",
"(",
"tab_gti",
"[",
"'START'",
"]",
".",
"data",
")",
"tab_gti_t1",
"=",
"np",
".",
"array",
"(",
"tab_gti",
"[",
"'STOP'",
"]",
".",
"data",
")",
"# Index of the closest GTI interval",
"idx",
"=",
"np",
".",
"digitize",
"(",
"sc_t0",
",",
"tab_gti_t0",
")",
"-",
"1",
"# start/stop time of closest GTI interval",
"gti_t0",
"=",
"np",
".",
"zeros_like",
"(",
"sc_t0",
")",
"gti_t1",
"=",
"np",
".",
"zeros_like",
"(",
"sc_t1",
")",
"gti_t0",
"[",
"idx",
">=",
"0",
"]",
"=",
"tab_gti_t0",
"[",
"idx",
"[",
"idx",
">=",
"0",
"]",
"]",
"gti_t1",
"[",
"idx",
">=",
"0",
"]",
"=",
"tab_gti_t1",
"[",
"idx",
"[",
"idx",
">=",
"0",
"]",
"]",
"nbin",
"=",
"len",
"(",
"costh_edges",
")",
"-",
"1",
"lt",
"=",
"np",
".",
"zeros",
"(",
"(",
"nbin",
",",
")",
"+",
"skydir",
".",
"shape",
")",
"lt_wt",
"=",
"np",
".",
"zeros",
"(",
"(",
"nbin",
",",
")",
"+",
"skydir",
".",
"shape",
")",
"m0",
"=",
"(",
"idx",
">=",
"0",
")",
"&",
"(",
"sc_t0",
">=",
"gti_t0",
")",
"&",
"(",
"sc_t1",
"<=",
"gti_t1",
")",
"xyz",
"=",
"angle_to_cartesian",
"(",
"skydir",
".",
"ra",
".",
"rad",
",",
"skydir",
".",
"dec",
".",
"rad",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"xyz",
")",
":",
"cos_sep",
"=",
"utils",
".",
"dot_prod",
"(",
"t",
",",
"sc_xyz",
")",
"cos_zn",
"=",
"utils",
".",
"dot_prod",
"(",
"t",
",",
"zn_xyz",
")",
"m",
"=",
"m0",
"&",
"(",
"cos_zn",
">",
"cos_zmax",
")",
"&",
"(",
"cos_sep",
">",
"0.0",
")",
"bins",
"=",
"np",
".",
"digitize",
"(",
"cos_sep",
"[",
"m",
"]",
",",
"bins",
"=",
"costh_edges",
")",
"-",
"1",
"bins",
"=",
"np",
".",
"clip",
"(",
"bins",
",",
"0",
",",
"nbin",
"-",
"1",
")",
"lt",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"bincount",
"(",
"bins",
",",
"weights",
"=",
"sc_live",
"[",
"m",
"]",
",",
"minlength",
"=",
"nbin",
")",
"lt_wt",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"bincount",
"(",
"bins",
",",
"weights",
"=",
"sc_live",
"[",
"m",
"]",
"*",
"sc_lfrac",
"[",
"m",
"]",
",",
"minlength",
"=",
"nbin",
")",
"return",
"lt",
",",
"lt_wt"
] | Generate a sequence of livetime distributions at the sky
positions given by ``skydir``. The output of the method are two
NxM arrays containing a sequence of histograms for N sky positions
and M incidence angle bins where the bin edges are defined by
``costh_edges``. This method uses the same algorithm as
`gtltcube` with the exception that SC time intervals are assumed
to be aligned with GTIs.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Vector of sky directions for which livetime histograms will be
accumulated.
tab_sc : `~astropy.table.Table`
Spacecraft table. Must contain the following columns: START,
STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH.
tab_gti : `~astropy.table.Table`
Table of good time intervals (GTIs).
zmax : float
Zenith cut.
costh_edges : `~numpy.ndarray`
Incidence angle bin edges in cos(angle).
Returns
-------
lt : `~numpy.ndarray`
Array of livetime histograms.
lt_wt : `~numpy.ndarray`
Array of histograms of weighted livetime (livetime x livetime
fraction). | [
"Generate",
"a",
"sequence",
"of",
"livetime",
"distributions",
"at",
"the",
"sky",
"positions",
"given",
"by",
"skydir",
".",
"The",
"output",
"of",
"the",
"method",
"are",
"two",
"NxM",
"arrays",
"containing",
"a",
"sequence",
"of",
"histograms",
"for",
"N",
"sky",
"positions",
"and",
"M",
"incidence",
"angle",
"bins",
"where",
"the",
"bin",
"edges",
"are",
"defined",
"by",
"costh_edges",
".",
"This",
"method",
"uses",
"the",
"same",
"algorithm",
"as",
"gtltcube",
"with",
"the",
"exception",
"that",
"SC",
"time",
"intervals",
"are",
"assumed",
"to",
"be",
"aligned",
"with",
"GTIs",
"."
] | python | train |
chemlab/chemlab | chemlab/core/random.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/random.py#L40-L104 | def random_lattice_box(mol_list, mol_number, size,
spacing=np.array([0.3, 0.3, 0.3])):
'''Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0])
'''
# Generate the coordinates
positions = spaced_lattice(size, spacing)
# Randomize them
np.random.shuffle(positions)
n_mol = sum(mol_number)
n_atoms = sum(nmol*mol.n_atoms for mol, nmol in zip(mol_list, mol_number))
# Assert that we have enough space
assert len(positions) >= n_mol, "Can't fit {} molecules in {} spaces".format(n_mol,
len(positions))
box_vectors = np.zeros((3, 3))
box_vectors[0,0] = size[0]
box_vectors[1,1] = size[1]
box_vectors[2,2] = size[2]
# Initialize a system
s = System.empty()
with s.batch() as b:
mol_list = [m.copy() for m in mol_list]
# Add the molecules
pi = 0
for i, mol in enumerate(mol_list):
for j in range(mol_number[i]):
mol.move_to(positions[pi])
b.append(mol.copy())
pi += 1
return s | [
"def",
"random_lattice_box",
"(",
"mol_list",
",",
"mol_number",
",",
"size",
",",
"spacing",
"=",
"np",
".",
"array",
"(",
"[",
"0.3",
",",
"0.3",
",",
"0.3",
"]",
")",
")",
":",
"# Generate the coordinates",
"positions",
"=",
"spaced_lattice",
"(",
"size",
",",
"spacing",
")",
"# Randomize them",
"np",
".",
"random",
".",
"shuffle",
"(",
"positions",
")",
"n_mol",
"=",
"sum",
"(",
"mol_number",
")",
"n_atoms",
"=",
"sum",
"(",
"nmol",
"*",
"mol",
".",
"n_atoms",
"for",
"mol",
",",
"nmol",
"in",
"zip",
"(",
"mol_list",
",",
"mol_number",
")",
")",
"# Assert that we have enough space",
"assert",
"len",
"(",
"positions",
")",
">=",
"n_mol",
",",
"\"Can't fit {} molecules in {} spaces\"",
".",
"format",
"(",
"n_mol",
",",
"len",
"(",
"positions",
")",
")",
"box_vectors",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
")",
"box_vectors",
"[",
"0",
",",
"0",
"]",
"=",
"size",
"[",
"0",
"]",
"box_vectors",
"[",
"1",
",",
"1",
"]",
"=",
"size",
"[",
"1",
"]",
"box_vectors",
"[",
"2",
",",
"2",
"]",
"=",
"size",
"[",
"2",
"]",
"# Initialize a system",
"s",
"=",
"System",
".",
"empty",
"(",
")",
"with",
"s",
".",
"batch",
"(",
")",
"as",
"b",
":",
"mol_list",
"=",
"[",
"m",
".",
"copy",
"(",
")",
"for",
"m",
"in",
"mol_list",
"]",
"# Add the molecules",
"pi",
"=",
"0",
"for",
"i",
",",
"mol",
"in",
"enumerate",
"(",
"mol_list",
")",
":",
"for",
"j",
"in",
"range",
"(",
"mol_number",
"[",
"i",
"]",
")",
":",
"mol",
".",
"move_to",
"(",
"positions",
"[",
"pi",
"]",
")",
"b",
".",
"append",
"(",
"mol",
".",
"copy",
"(",
")",
")",
"pi",
"+=",
"1",
"return",
"s"
] | Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0]) | [
"Make",
"a",
"box",
"by",
"placing",
"the",
"molecules",
"specified",
"in",
"*",
"mol_list",
"*",
"on",
"random",
"points",
"of",
"an",
"evenly",
"spaced",
"lattice",
"."
] | python | train |
bachya/pyairvisual | pyairvisual/supported.py | https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/supported.py#L12-L19 | async def cities(self, country: str, state: str) -> list:
"""Return a list of supported cities in a country/state."""
data = await self._request(
'get', 'cities', params={
'state': state,
'country': country
})
return [d['city'] for d in data['data']] | [
"async",
"def",
"cities",
"(",
"self",
",",
"country",
":",
"str",
",",
"state",
":",
"str",
")",
"->",
"list",
":",
"data",
"=",
"await",
"self",
".",
"_request",
"(",
"'get'",
",",
"'cities'",
",",
"params",
"=",
"{",
"'state'",
":",
"state",
",",
"'country'",
":",
"country",
"}",
")",
"return",
"[",
"d",
"[",
"'city'",
"]",
"for",
"d",
"in",
"data",
"[",
"'data'",
"]",
"]"
] | Return a list of supported cities in a country/state. | [
"Return",
"a",
"list",
"of",
"supported",
"cities",
"in",
"a",
"country",
"/",
"state",
"."
] | python | train |
jwkvam/plotlywrapper | plotlywrapper.py | https://github.com/jwkvam/plotlywrapper/blob/762b42912e824fecb1212c186900f2ebdd0ab12b/plotlywrapper.py#L891-L940 | def fill_between(
x=None,
ylow=None,
yhigh=None,
label=None,
color=None,
width=None,
dash=None,
opacity=None,
mode='lines+markers',
**kargs
):
"""Fill between `ylow` and `yhigh`.
Parameters
----------
x : array-like, optional
ylow : TODO, optional
yhigh : TODO, optional
Returns
-------
Chart
"""
plot = line(
x=x,
y=ylow,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill=None,
**kargs
)
plot += line(
x=x,
y=yhigh,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tonexty',
**kargs
)
return plot | [
"def",
"fill_between",
"(",
"x",
"=",
"None",
",",
"ylow",
"=",
"None",
",",
"yhigh",
"=",
"None",
",",
"label",
"=",
"None",
",",
"color",
"=",
"None",
",",
"width",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"opacity",
"=",
"None",
",",
"mode",
"=",
"'lines+markers'",
",",
"*",
"*",
"kargs",
")",
":",
"plot",
"=",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"ylow",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"fill",
"=",
"None",
",",
"*",
"*",
"kargs",
")",
"plot",
"+=",
"line",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"yhigh",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"width",
"=",
"width",
",",
"dash",
"=",
"dash",
",",
"opacity",
"=",
"opacity",
",",
"mode",
"=",
"mode",
",",
"fill",
"=",
"'tonexty'",
",",
"*",
"*",
"kargs",
")",
"return",
"plot"
] | Fill between `ylow` and `yhigh`.
Parameters
----------
x : array-like, optional
ylow : TODO, optional
yhigh : TODO, optional
Returns
-------
Chart | [
"Fill",
"between",
"ylow",
"and",
"yhigh",
"."
] | python | train |
allenai/allennlp | allennlp/models/model.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/model.py#L126-L172 | def forward_on_instances(self,
instances: List[Instance]) -> List[Dict[str, numpy.ndarray]]:
"""
Takes a list of :class:`~allennlp.data.instance.Instance`s, converts that text into
arrays using this model's :class:`Vocabulary`, passes those arrays through
:func:`self.forward()` and :func:`self.decode()` (which by default does nothing)
and returns the result. Before returning the result, we convert any
``torch.Tensors`` into numpy arrays and separate the
batched output into a list of individual dicts per instance. Note that typically
this will be faster on a GPU (and conditionally, on a CPU) than repeated calls to
:func:`forward_on_instance`.
Parameters
----------
instances : List[Instance], required
The instances to run the model on.
Returns
-------
A list of the models output for each instance.
"""
batch_size = len(instances)
with torch.no_grad():
cuda_device = self._get_prediction_device()
dataset = Batch(instances)
dataset.index_instances(self.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
outputs = self.decode(self(**model_input))
instance_separated_output: List[Dict[str, numpy.ndarray]] = [{} for _ in dataset.instances]
for name, output in list(outputs.items()):
if isinstance(output, torch.Tensor):
# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.
# This occurs with batch size 1, because we still want to include the loss in that case.
if output.dim() == 0:
output = output.unsqueeze(0)
if output.size(0) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
output = output.detach().cpu().numpy()
elif len(output) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
for instance_output, batch_element in zip(instance_separated_output, output):
instance_output[name] = batch_element
return instance_separated_output | [
"def",
"forward_on_instances",
"(",
"self",
",",
"instances",
":",
"List",
"[",
"Instance",
"]",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"numpy",
".",
"ndarray",
"]",
"]",
":",
"batch_size",
"=",
"len",
"(",
"instances",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"cuda_device",
"=",
"self",
".",
"_get_prediction_device",
"(",
")",
"dataset",
"=",
"Batch",
"(",
"instances",
")",
"dataset",
".",
"index_instances",
"(",
"self",
".",
"vocab",
")",
"model_input",
"=",
"util",
".",
"move_to_device",
"(",
"dataset",
".",
"as_tensor_dict",
"(",
")",
",",
"cuda_device",
")",
"outputs",
"=",
"self",
".",
"decode",
"(",
"self",
"(",
"*",
"*",
"model_input",
")",
")",
"instance_separated_output",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"numpy",
".",
"ndarray",
"]",
"]",
"=",
"[",
"{",
"}",
"for",
"_",
"in",
"dataset",
".",
"instances",
"]",
"for",
"name",
",",
"output",
"in",
"list",
"(",
"outputs",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"output",
",",
"torch",
".",
"Tensor",
")",
":",
"# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.",
"# This occurs with batch size 1, because we still want to include the loss in that case.",
"if",
"output",
".",
"dim",
"(",
")",
"==",
"0",
":",
"output",
"=",
"output",
".",
"unsqueeze",
"(",
"0",
")",
"if",
"output",
".",
"size",
"(",
"0",
")",
"!=",
"batch_size",
":",
"self",
".",
"_maybe_warn_for_unseparable_batches",
"(",
"name",
")",
"continue",
"output",
"=",
"output",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"elif",
"len",
"(",
"output",
")",
"!=",
"batch_size",
":",
"self",
".",
"_maybe_warn_for_unseparable_batches",
"(",
"name",
")",
"continue",
"for",
"instance_output",
",",
"batch_element",
"in",
"zip",
"(",
"instance_separated_output",
",",
"output",
")",
":",
"instance_output",
"[",
"name",
"]",
"=",
"batch_element",
"return",
"instance_separated_output"
] | Takes a list of :class:`~allennlp.data.instance.Instance`s, converts that text into
arrays using this model's :class:`Vocabulary`, passes those arrays through
:func:`self.forward()` and :func:`self.decode()` (which by default does nothing)
and returns the result. Before returning the result, we convert any
``torch.Tensors`` into numpy arrays and separate the
batched output into a list of individual dicts per instance. Note that typically
this will be faster on a GPU (and conditionally, on a CPU) than repeated calls to
:func:`forward_on_instance`.
Parameters
----------
instances : List[Instance], required
The instances to run the model on.
Returns
-------
A list of the models output for each instance. | [
"Takes",
"a",
"list",
"of",
":",
"class",
":",
"~allennlp",
".",
"data",
".",
"instance",
".",
"Instance",
"s",
"converts",
"that",
"text",
"into",
"arrays",
"using",
"this",
"model",
"s",
":",
"class",
":",
"Vocabulary",
"passes",
"those",
"arrays",
"through",
":",
"func",
":",
"self",
".",
"forward",
"()",
"and",
":",
"func",
":",
"self",
".",
"decode",
"()",
"(",
"which",
"by",
"default",
"does",
"nothing",
")",
"and",
"returns",
"the",
"result",
".",
"Before",
"returning",
"the",
"result",
"we",
"convert",
"any",
"torch",
".",
"Tensors",
"into",
"numpy",
"arrays",
"and",
"separate",
"the",
"batched",
"output",
"into",
"a",
"list",
"of",
"individual",
"dicts",
"per",
"instance",
".",
"Note",
"that",
"typically",
"this",
"will",
"be",
"faster",
"on",
"a",
"GPU",
"(",
"and",
"conditionally",
"on",
"a",
"CPU",
")",
"than",
"repeated",
"calls",
"to",
":",
"func",
":",
"forward_on_instance",
"."
] | python | train |
deepmind/pysc2 | pysc2/bin/valid_actions.py | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/bin/valid_actions.py#L34-L56 | def main(unused_argv):
"""Print the valid actions."""
feats = features.Features(
# Actually irrelevant whether it's feature or rgb size.
features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(
screen=FLAGS.screen_size,
minimap=FLAGS.minimap_size)))
action_spec = feats.action_spec()
flattened = 0
count = 0
for func in action_spec.functions:
if FLAGS.hide_specific and actions.FUNCTIONS[func.id].general_id != 0:
continue
count += 1
act_flat = 1
for arg in func.args:
for size in arg.sizes:
act_flat *= size
flattened += act_flat
print(func.str(True))
print("Total base actions:", count)
print("Total possible actions (flattened):", flattened) | [
"def",
"main",
"(",
"unused_argv",
")",
":",
"feats",
"=",
"features",
".",
"Features",
"(",
"# Actually irrelevant whether it's feature or rgb size.",
"features",
".",
"AgentInterfaceFormat",
"(",
"feature_dimensions",
"=",
"features",
".",
"Dimensions",
"(",
"screen",
"=",
"FLAGS",
".",
"screen_size",
",",
"minimap",
"=",
"FLAGS",
".",
"minimap_size",
")",
")",
")",
"action_spec",
"=",
"feats",
".",
"action_spec",
"(",
")",
"flattened",
"=",
"0",
"count",
"=",
"0",
"for",
"func",
"in",
"action_spec",
".",
"functions",
":",
"if",
"FLAGS",
".",
"hide_specific",
"and",
"actions",
".",
"FUNCTIONS",
"[",
"func",
".",
"id",
"]",
".",
"general_id",
"!=",
"0",
":",
"continue",
"count",
"+=",
"1",
"act_flat",
"=",
"1",
"for",
"arg",
"in",
"func",
".",
"args",
":",
"for",
"size",
"in",
"arg",
".",
"sizes",
":",
"act_flat",
"*=",
"size",
"flattened",
"+=",
"act_flat",
"print",
"(",
"func",
".",
"str",
"(",
"True",
")",
")",
"print",
"(",
"\"Total base actions:\"",
",",
"count",
")",
"print",
"(",
"\"Total possible actions (flattened):\"",
",",
"flattened",
")"
] | Print the valid actions. | [
"Print",
"the",
"valid",
"actions",
"."
] | python | train |
saltstack/salt | salt/modules/git.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L4192-L4289 | def reset(cwd,
opts='',
git_opts='',
user=None,
password=None,
identity=None,
ignore_retcode=False,
output_encoding=None):
'''
Interface to `git-reset(1)`_, returns the stdout from the git command
cwd
The path to the git checkout
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``reset``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
identity
Path to a private key to use for ssh URLs. Salt will not attempt to use
passphrase-protected keys unless invoked from the minion using
``salt-call``, to prevent blocking waiting for user input. Key can also
be specified as a SaltStack file server URL, eg.
``salt://location/identity_file``.
.. note::
For greater security with passphraseless private keys, see the
`sshd(8)`_ manpage for information on securing the keypair from the
remote side in the ``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT
.. versionadded:: 2018.3.5,2019.2.1,Neon
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-reset(1)`: http://git-scm.com/docs/git-reset
CLI Examples:
.. code-block:: bash
# Soft reset to a specific commit ID
salt myminion git.reset /path/to/repo ac3ee5c
# Hard reset
salt myminion git.reset /path/to/repo opts='--hard origin/master'
'''
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.append('reset')
command.extend(_format_opts(opts))
return _git_run(command,
cwd=cwd,
user=user,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'] | [
"def",
"reset",
"(",
"cwd",
",",
"opts",
"=",
"''",
",",
"git_opts",
"=",
"''",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"identity",
"=",
"None",
",",
"ignore_retcode",
"=",
"False",
",",
"output_encoding",
"=",
"None",
")",
":",
"cwd",
"=",
"_expand_path",
"(",
"cwd",
",",
"user",
")",
"command",
"=",
"[",
"'git'",
"]",
"+",
"_format_git_opts",
"(",
"git_opts",
")",
"command",
".",
"append",
"(",
"'reset'",
")",
"command",
".",
"extend",
"(",
"_format_opts",
"(",
"opts",
")",
")",
"return",
"_git_run",
"(",
"command",
",",
"cwd",
"=",
"cwd",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"identity",
"=",
"identity",
",",
"ignore_retcode",
"=",
"ignore_retcode",
",",
"output_encoding",
"=",
"output_encoding",
")",
"[",
"'stdout'",
"]"
] | Interface to `git-reset(1)`_, returns the stdout from the git command
cwd
The path to the git checkout
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``reset``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
identity
Path to a private key to use for ssh URLs. Salt will not attempt to use
passphrase-protected keys unless invoked from the minion using
``salt-call``, to prevent blocking waiting for user input. Key can also
be specified as a SaltStack file server URL, eg.
``salt://location/identity_file``.
.. note::
For greater security with passphraseless private keys, see the
`sshd(8)`_ manpage for information on securing the keypair from the
remote side in the ``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT
.. versionadded:: 2018.3.5,2019.2.1,Neon
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-reset(1)`: http://git-scm.com/docs/git-reset
CLI Examples:
.. code-block:: bash
# Soft reset to a specific commit ID
salt myminion git.reset /path/to/repo ac3ee5c
# Hard reset
salt myminion git.reset /path/to/repo opts='--hard origin/master' | [
"Interface",
"to",
"git",
"-",
"reset",
"(",
"1",
")",
"_",
"returns",
"the",
"stdout",
"from",
"the",
"git",
"command"
] | python | train |
brechtm/rinohtype | src/rinoh/layout.py | https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/layout.py#L196-L200 | def place(self):
"""Place this container's canvas onto the parent container's canvas."""
self.place_children()
self.canvas.append(self.parent.canvas,
float(self.left), float(self.top)) | [
"def",
"place",
"(",
"self",
")",
":",
"self",
".",
"place_children",
"(",
")",
"self",
".",
"canvas",
".",
"append",
"(",
"self",
".",
"parent",
".",
"canvas",
",",
"float",
"(",
"self",
".",
"left",
")",
",",
"float",
"(",
"self",
".",
"top",
")",
")"
] | Place this container's canvas onto the parent container's canvas. | [
"Place",
"this",
"container",
"s",
"canvas",
"onto",
"the",
"parent",
"container",
"s",
"canvas",
"."
] | python | train |
datacats/datacats | datacats/docker.py | https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/docker.py#L394-L398 | def pull_stream(image):
"""
Return generator of pull status objects
"""
return (json.loads(s) for s in _get_docker().pull(image, stream=True)) | [
"def",
"pull_stream",
"(",
"image",
")",
":",
"return",
"(",
"json",
".",
"loads",
"(",
"s",
")",
"for",
"s",
"in",
"_get_docker",
"(",
")",
".",
"pull",
"(",
"image",
",",
"stream",
"=",
"True",
")",
")"
] | Return generator of pull status objects | [
"Return",
"generator",
"of",
"pull",
"status",
"objects"
] | python | train |
draios/python-sdc-client | sdcclient/_common.py | https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_common.py#L521-L563 | def create_sysdig_capture(self, hostname, capture_name, duration, capture_filter='', folder='/'):
'''**Description**
Create a new sysdig capture. The capture will be immediately started.
**Arguments**
- **hostname**: the hostname of the instrumented host where the capture will be taken.
- **capture_name**: the name of the capture.
- **duration**: the duration of the capture, in seconds.
- **capture_filter**: a sysdig filter expression.
- **folder**: directory in the S3 bucket where the capture will be saved.
**Success Return Value**
A dictionary showing the details of the new capture.
**Example**
`examples/create_sysdig_capture.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_sysdig_capture.py>`_
'''
res = self.get_connected_agents()
if not res[0]:
return res
capture_agent = None
for agent in res[1]:
if hostname == agent['hostName']:
capture_agent = agent
break
if capture_agent is None:
return [False, hostname + ' not found']
data = {
'agent': capture_agent,
'name': capture_name,
'duration': duration,
'folder': folder,
'filters': capture_filter,
'bucketName': '',
'source': self.product
}
res = requests.post(self.url + '/api/sysdig', headers=self.hdrs, data=json.dumps(data), verify=self.ssl_verify)
return self._request_result(res) | [
"def",
"create_sysdig_capture",
"(",
"self",
",",
"hostname",
",",
"capture_name",
",",
"duration",
",",
"capture_filter",
"=",
"''",
",",
"folder",
"=",
"'/'",
")",
":",
"res",
"=",
"self",
".",
"get_connected_agents",
"(",
")",
"if",
"not",
"res",
"[",
"0",
"]",
":",
"return",
"res",
"capture_agent",
"=",
"None",
"for",
"agent",
"in",
"res",
"[",
"1",
"]",
":",
"if",
"hostname",
"==",
"agent",
"[",
"'hostName'",
"]",
":",
"capture_agent",
"=",
"agent",
"break",
"if",
"capture_agent",
"is",
"None",
":",
"return",
"[",
"False",
",",
"hostname",
"+",
"' not found'",
"]",
"data",
"=",
"{",
"'agent'",
":",
"capture_agent",
",",
"'name'",
":",
"capture_name",
",",
"'duration'",
":",
"duration",
",",
"'folder'",
":",
"folder",
",",
"'filters'",
":",
"capture_filter",
",",
"'bucketName'",
":",
"''",
",",
"'source'",
":",
"self",
".",
"product",
"}",
"res",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"url",
"+",
"'/api/sysdig'",
",",
"headers",
"=",
"self",
".",
"hdrs",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"verify",
"=",
"self",
".",
"ssl_verify",
")",
"return",
"self",
".",
"_request_result",
"(",
"res",
")"
] | **Description**
Create a new sysdig capture. The capture will be immediately started.
**Arguments**
- **hostname**: the hostname of the instrumented host where the capture will be taken.
- **capture_name**: the name of the capture.
- **duration**: the duration of the capture, in seconds.
- **capture_filter**: a sysdig filter expression.
- **folder**: directory in the S3 bucket where the capture will be saved.
**Success Return Value**
A dictionary showing the details of the new capture.
**Example**
`examples/create_sysdig_capture.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_sysdig_capture.py>`_ | [
"**",
"Description",
"**",
"Create",
"a",
"new",
"sysdig",
"capture",
".",
"The",
"capture",
"will",
"be",
"immediately",
"started",
"."
] | python | test |
zhanglab/psamm | psamm/massconsistency.py | https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/massconsistency.py#L74-L138 | def check_reaction_consistency(database, solver, exchange=set(),
checked=set(), zeromass=set(), weights={}):
"""Check inconsistent reactions by minimizing mass residuals
Return a reaction iterable, and compound iterable. The reaction iterable
yields reaction ids and mass residuals. The compound iterable yields
compound ids and mass assignments.
Each compound is assigned a mass of at least one, and the masses are
balanced using the stoichiometric matrix. In addition, each reaction has a
residual mass that is included in the mass balance equations. The L1-norm
of the residuals is minimized. Reactions in the checked set are assumed to
have been manually checked and therefore have the residual fixed at zero.
"""
# Create Flux balance problem
prob = solver.create_problem()
compound_set = _non_localized_compounds(database)
mass_compounds = compound_set.difference(zeromass)
# Define mass variables
m = prob.namespace(mass_compounds, lower=1)
# Define residual mass variables and objective constriants
z = prob.namespace(database.reactions, lower=0)
r = prob.namespace(database.reactions)
objective = z.expr((reaction_id, weights.get(reaction_id, 1))
for reaction_id in database.reactions)
prob.set_objective(objective)
rs = r.set(database.reactions)
zs = z.set(database.reactions)
prob.add_linear_constraints(zs >= rs, rs >= -zs)
massbalance_lhs = {reaction_id: 0 for reaction_id in database.reactions}
for (compound, reaction_id), value in iteritems(database.matrix):
if compound not in zeromass:
mass_var = m(compound.in_compartment(None))
massbalance_lhs[reaction_id] += mass_var * value
for reaction_id, lhs in iteritems(massbalance_lhs):
if reaction_id not in exchange:
if reaction_id not in checked:
prob.add_linear_constraints(lhs + r(reaction_id) == 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Minimize)
except lp.SolverError as e:
raise_from(
MassConsistencyError('Failed to solve mass consistency: {}'.format(
e)), e)
def iterate_reactions():
for reaction_id in database.reactions:
residual = r.value(reaction_id)
yield reaction_id, residual
def iterate_compounds():
for compound in mass_compounds:
yield compound, m.value(compound)
return iterate_reactions(), iterate_compounds() | [
"def",
"check_reaction_consistency",
"(",
"database",
",",
"solver",
",",
"exchange",
"=",
"set",
"(",
")",
",",
"checked",
"=",
"set",
"(",
")",
",",
"zeromass",
"=",
"set",
"(",
")",
",",
"weights",
"=",
"{",
"}",
")",
":",
"# Create Flux balance problem",
"prob",
"=",
"solver",
".",
"create_problem",
"(",
")",
"compound_set",
"=",
"_non_localized_compounds",
"(",
"database",
")",
"mass_compounds",
"=",
"compound_set",
".",
"difference",
"(",
"zeromass",
")",
"# Define mass variables",
"m",
"=",
"prob",
".",
"namespace",
"(",
"mass_compounds",
",",
"lower",
"=",
"1",
")",
"# Define residual mass variables and objective constriants",
"z",
"=",
"prob",
".",
"namespace",
"(",
"database",
".",
"reactions",
",",
"lower",
"=",
"0",
")",
"r",
"=",
"prob",
".",
"namespace",
"(",
"database",
".",
"reactions",
")",
"objective",
"=",
"z",
".",
"expr",
"(",
"(",
"reaction_id",
",",
"weights",
".",
"get",
"(",
"reaction_id",
",",
"1",
")",
")",
"for",
"reaction_id",
"in",
"database",
".",
"reactions",
")",
"prob",
".",
"set_objective",
"(",
"objective",
")",
"rs",
"=",
"r",
".",
"set",
"(",
"database",
".",
"reactions",
")",
"zs",
"=",
"z",
".",
"set",
"(",
"database",
".",
"reactions",
")",
"prob",
".",
"add_linear_constraints",
"(",
"zs",
">=",
"rs",
",",
"rs",
">=",
"-",
"zs",
")",
"massbalance_lhs",
"=",
"{",
"reaction_id",
":",
"0",
"for",
"reaction_id",
"in",
"database",
".",
"reactions",
"}",
"for",
"(",
"compound",
",",
"reaction_id",
")",
",",
"value",
"in",
"iteritems",
"(",
"database",
".",
"matrix",
")",
":",
"if",
"compound",
"not",
"in",
"zeromass",
":",
"mass_var",
"=",
"m",
"(",
"compound",
".",
"in_compartment",
"(",
"None",
")",
")",
"massbalance_lhs",
"[",
"reaction_id",
"]",
"+=",
"mass_var",
"*",
"value",
"for",
"reaction_id",
",",
"lhs",
"in",
"iteritems",
"(",
"massbalance_lhs",
")",
":",
"if",
"reaction_id",
"not",
"in",
"exchange",
":",
"if",
"reaction_id",
"not",
"in",
"checked",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
"+",
"r",
"(",
"reaction_id",
")",
"==",
"0",
")",
"else",
":",
"prob",
".",
"add_linear_constraints",
"(",
"lhs",
"==",
"0",
")",
"# Solve",
"try",
":",
"prob",
".",
"solve",
"(",
"lp",
".",
"ObjectiveSense",
".",
"Minimize",
")",
"except",
"lp",
".",
"SolverError",
"as",
"e",
":",
"raise_from",
"(",
"MassConsistencyError",
"(",
"'Failed to solve mass consistency: {}'",
".",
"format",
"(",
"e",
")",
")",
",",
"e",
")",
"def",
"iterate_reactions",
"(",
")",
":",
"for",
"reaction_id",
"in",
"database",
".",
"reactions",
":",
"residual",
"=",
"r",
".",
"value",
"(",
"reaction_id",
")",
"yield",
"reaction_id",
",",
"residual",
"def",
"iterate_compounds",
"(",
")",
":",
"for",
"compound",
"in",
"mass_compounds",
":",
"yield",
"compound",
",",
"m",
".",
"value",
"(",
"compound",
")",
"return",
"iterate_reactions",
"(",
")",
",",
"iterate_compounds",
"(",
")"
] | Check inconsistent reactions by minimizing mass residuals
Return a reaction iterable, and compound iterable. The reaction iterable
yields reaction ids and mass residuals. The compound iterable yields
compound ids and mass assignments.
Each compound is assigned a mass of at least one, and the masses are
balanced using the stoichiometric matrix. In addition, each reaction has a
residual mass that is included in the mass balance equations. The L1-norm
of the residuals is minimized. Reactions in the checked set are assumed to
have been manually checked and therefore have the residual fixed at zero. | [
"Check",
"inconsistent",
"reactions",
"by",
"minimizing",
"mass",
"residuals"
] | python | train |
mcieslik-mctp/papy | src/papy/core.py | https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L444-L457 | def add_pipers(self, pipers, *args, **kwargs):
"""
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the
specified order. Takes optional arguments for ``Dagger.add_piper``.
Arguments:
- pipers(sequence of valid ``add_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to
the ``Dagger`` in the left to right order.
"""
for piper in pipers:
self.add_piper(piper, *args, **kwargs) | [
"def",
"add_pipers",
"(",
"self",
",",
"pipers",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"piper",
"in",
"pipers",
":",
"self",
".",
"add_piper",
"(",
"piper",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the
specified order. Takes optional arguments for ``Dagger.add_piper``.
Arguments:
- pipers(sequence of valid ``add_piper`` arguments) Sequence of
``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to
the ``Dagger`` in the left to right order. | [
"Adds",
"a",
"sequence",
"of",
"Pipers",
"instances",
"to",
"the",
"Dagger",
"in",
"the",
"specified",
"order",
".",
"Takes",
"optional",
"arguments",
"for",
"Dagger",
".",
"add_piper",
".",
"Arguments",
":",
"-",
"pipers",
"(",
"sequence",
"of",
"valid",
"add_piper",
"arguments",
")",
"Sequence",
"of",
"Pipers",
"or",
"valid",
"Dagger",
".",
"add_piper",
"arguments",
"to",
"be",
"added",
"to",
"the",
"Dagger",
"in",
"the",
"left",
"to",
"right",
"order",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_bin_run.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1264-L1284 | def staging_data(self):
"""Read data files and return all staging data for current profile."""
if self._staging_data is None:
staging_data = []
for staging_file in self.profile.get('data_files') or []:
if os.path.isfile(staging_file):
print(
'Staging Data: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, staging_file)
)
self.log.info('[stage] Staging data file: {}'.format(staging_file))
f = open(staging_file, 'r')
staging_data.extend(json.load(f))
f.close()
else:
print(
'{}{}Could not find file {}.'.format(
c.Style.BRIGHT, c.Fore.RED, staging_file
)
)
self._staging_data = staging_data
return self._staging_data | [
"def",
"staging_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_staging_data",
"is",
"None",
":",
"staging_data",
"=",
"[",
"]",
"for",
"staging_file",
"in",
"self",
".",
"profile",
".",
"get",
"(",
"'data_files'",
")",
"or",
"[",
"]",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"staging_file",
")",
":",
"print",
"(",
"'Staging Data: {}{}{}'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"MAGENTA",
",",
"staging_file",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"'[stage] Staging data file: {}'",
".",
"format",
"(",
"staging_file",
")",
")",
"f",
"=",
"open",
"(",
"staging_file",
",",
"'r'",
")",
"staging_data",
".",
"extend",
"(",
"json",
".",
"load",
"(",
"f",
")",
")",
"f",
".",
"close",
"(",
")",
"else",
":",
"print",
"(",
"'{}{}Could not find file {}.'",
".",
"format",
"(",
"c",
".",
"Style",
".",
"BRIGHT",
",",
"c",
".",
"Fore",
".",
"RED",
",",
"staging_file",
")",
")",
"self",
".",
"_staging_data",
"=",
"staging_data",
"return",
"self",
".",
"_staging_data"
] | Read data files and return all staging data for current profile. | [
"Read",
"data",
"files",
"and",
"return",
"all",
"staging",
"data",
"for",
"current",
"profile",
"."
] | python | train |
Xion/taipan | taipan/functional/constructs.py | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/constructs.py#L37-L54 | def raise_(exception=ABSENT, *args, **kwargs):
"""Raise (or re-raises) an exception.
:param exception: Exception object to raise, or an exception class.
In the latter case, remaining arguments are passed
to the exception's constructor.
If omitted, the currently handled exception is re-raised.
"""
if exception is ABSENT:
raise
else:
if inspect.isclass(exception):
raise exception(*args, **kwargs)
else:
if args or kwargs:
raise TypeError("can't pass arguments along with "
"exception object to raise_()")
raise exception | [
"def",
"raise_",
"(",
"exception",
"=",
"ABSENT",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"exception",
"is",
"ABSENT",
":",
"raise",
"else",
":",
"if",
"inspect",
".",
"isclass",
"(",
"exception",
")",
":",
"raise",
"exception",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"args",
"or",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"can't pass arguments along with \"",
"\"exception object to raise_()\"",
")",
"raise",
"exception"
] | Raise (or re-raises) an exception.
:param exception: Exception object to raise, or an exception class.
In the latter case, remaining arguments are passed
to the exception's constructor.
If omitted, the currently handled exception is re-raised. | [
"Raise",
"(",
"or",
"re",
"-",
"raises",
")",
"an",
"exception",
"."
] | python | train |
naphatkrit/easyci | easyci/vcs/git.py | https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/vcs/git.py#L123-L143 | def get_ignored_files(self):
"""Returns the list of files being ignored in this repository.
Note that file names, not directories, are returned.
So, we will get the following:
a/b.txt
a/c.txt
instead of just:
a/
Returns:
List[str] - list of ignored files. The paths are absolute.
"""
return [os.path.join(self.path, p) for p in
self.run('ls-files', '--ignored', '--exclude-standard',
'--others').strip().split()
] | [
"def",
"get_ignored_files",
"(",
"self",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"p",
")",
"for",
"p",
"in",
"self",
".",
"run",
"(",
"'ls-files'",
",",
"'--ignored'",
",",
"'--exclude-standard'",
",",
"'--others'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"]"
] | Returns the list of files being ignored in this repository.
Note that file names, not directories, are returned.
So, we will get the following:
a/b.txt
a/c.txt
instead of just:
a/
Returns:
List[str] - list of ignored files. The paths are absolute. | [
"Returns",
"the",
"list",
"of",
"files",
"being",
"ignored",
"in",
"this",
"repository",
"."
] | python | train |
ianepperson/pyredminews | redmine/redmine.py | https://github.com/ianepperson/pyredminews/blob/b2b0581483632738a3acca3b4e093c181847b813/redmine/redmine.py#L282-L284 | def close(self, notes=None):
'''Save all changes and close this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_CLOSED, notes=notes) | [
"def",
"close",
"(",
"self",
",",
"notes",
"=",
"None",
")",
":",
"self",
".",
"set_status",
"(",
"self",
".",
"_redmine",
".",
"ISSUE_STATUS_ID_CLOSED",
",",
"notes",
"=",
"notes",
")"
] | Save all changes and close this issue | [
"Save",
"all",
"changes",
"and",
"close",
"this",
"issue"
] | python | train |
kivy/python-for-android | pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/debug.py | https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/debug.py#L62-L69 | def chain_frames(self):
"""Chains the frames. Requires ctypes or the speedups extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None | [
"def",
"chain_frames",
"(",
"self",
")",
":",
"prev_tb",
"=",
"None",
"for",
"tb",
"in",
"self",
".",
"frames",
":",
"if",
"prev_tb",
"is",
"not",
"None",
":",
"prev_tb",
".",
"tb_next",
"=",
"tb",
"prev_tb",
"=",
"tb",
"prev_tb",
".",
"tb_next",
"=",
"None"
] | Chains the frames. Requires ctypes or the speedups extension. | [
"Chains",
"the",
"frames",
".",
"Requires",
"ctypes",
"or",
"the",
"speedups",
"extension",
"."
] | python | train |
pycontribs/pyrax | pyrax/object_storage.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1319-L1333 | def list_subdirs(self, container, marker=None, limit=None, prefix=None,
delimiter=None, full_listing=False):
"""
Returns a list of StorageObjects representing the pseudo-subdirectories
in the specified container. You can use the marker and limit params to
handle pagination, and the prefix param to filter the objects returned.
The 'delimiter' parameter is ignored, as the only meaningful value is
'/'.
"""
mthd = container.list_all if full_listing else container.list
objs = mthd(marker=marker, limit=limit, prefix=prefix, delimiter="/",
return_raw=True)
sdirs = [obj for obj in objs if "subdir" in obj]
mgr = container.object_manager
return [StorageObject(mgr, sdir) for sdir in sdirs] | [
"def",
"list_subdirs",
"(",
"self",
",",
"container",
",",
"marker",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
",",
"full_listing",
"=",
"False",
")",
":",
"mthd",
"=",
"container",
".",
"list_all",
"if",
"full_listing",
"else",
"container",
".",
"list",
"objs",
"=",
"mthd",
"(",
"marker",
"=",
"marker",
",",
"limit",
"=",
"limit",
",",
"prefix",
"=",
"prefix",
",",
"delimiter",
"=",
"\"/\"",
",",
"return_raw",
"=",
"True",
")",
"sdirs",
"=",
"[",
"obj",
"for",
"obj",
"in",
"objs",
"if",
"\"subdir\"",
"in",
"obj",
"]",
"mgr",
"=",
"container",
".",
"object_manager",
"return",
"[",
"StorageObject",
"(",
"mgr",
",",
"sdir",
")",
"for",
"sdir",
"in",
"sdirs",
"]"
] | Returns a list of StorageObjects representing the pseudo-subdirectories
in the specified container. You can use the marker and limit params to
handle pagination, and the prefix param to filter the objects returned.
The 'delimiter' parameter is ignored, as the only meaningful value is
'/'. | [
"Returns",
"a",
"list",
"of",
"StorageObjects",
"representing",
"the",
"pseudo",
"-",
"subdirectories",
"in",
"the",
"specified",
"container",
".",
"You",
"can",
"use",
"the",
"marker",
"and",
"limit",
"params",
"to",
"handle",
"pagination",
"and",
"the",
"prefix",
"param",
"to",
"filter",
"the",
"objects",
"returned",
".",
"The",
"delimiter",
"parameter",
"is",
"ignored",
"as",
"the",
"only",
"meaningful",
"value",
"is",
"/",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/objects/config.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/config.py#L976-L998 | def clean_params(self, params):
"""Convert a list of parameters (key=value) into a dict
This function is used to transform Nagios (or ini) like formated parameters (key=value)
to a dictionary.
:param params: parameters list
:type params: list
:return: dict with key and value. Log error if malformed
:rtype: dict
"""
clean_p = {}
for elt in params:
elts = elt.split('=', 1)
if len(elts) == 1: # error, there is no = !
self.add_error("the parameter %s is malformed! (no = sign)" % elts[0])
else:
if elts[1] == '':
self.add_warning("the parameter %s is ambiguous! "
"No value after =, assuming an empty string" % elts[0])
clean_p[elts[0]] = elts[1]
return clean_p | [
"def",
"clean_params",
"(",
"self",
",",
"params",
")",
":",
"clean_p",
"=",
"{",
"}",
"for",
"elt",
"in",
"params",
":",
"elts",
"=",
"elt",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"len",
"(",
"elts",
")",
"==",
"1",
":",
"# error, there is no = !",
"self",
".",
"add_error",
"(",
"\"the parameter %s is malformed! (no = sign)\"",
"%",
"elts",
"[",
"0",
"]",
")",
"else",
":",
"if",
"elts",
"[",
"1",
"]",
"==",
"''",
":",
"self",
".",
"add_warning",
"(",
"\"the parameter %s is ambiguous! \"",
"\"No value after =, assuming an empty string\"",
"%",
"elts",
"[",
"0",
"]",
")",
"clean_p",
"[",
"elts",
"[",
"0",
"]",
"]",
"=",
"elts",
"[",
"1",
"]",
"return",
"clean_p"
] | Convert a list of parameters (key=value) into a dict
This function is used to transform Nagios (or ini) like formated parameters (key=value)
to a dictionary.
:param params: parameters list
:type params: list
:return: dict with key and value. Log error if malformed
:rtype: dict | [
"Convert",
"a",
"list",
"of",
"parameters",
"(",
"key",
"=",
"value",
")",
"into",
"a",
"dict"
] | python | train |
twosigma/beakerx | beakerx/setupbase.py | https://github.com/twosigma/beakerx/blob/404de61ed627d9daaf6b77eb4859e7cb6f37413f/beakerx/setupbase.py#L155-L162 | def is_stale(target, source):
"""Test whether the target file/directory is stale based on the source
file/directory.
"""
if not os.path.exists(target):
return True
target_mtime = recursive_mtime(target) or 0
return compare_recursive_mtime(source, cutoff=target_mtime) | [
"def",
"is_stale",
"(",
"target",
",",
"source",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
":",
"return",
"True",
"target_mtime",
"=",
"recursive_mtime",
"(",
"target",
")",
"or",
"0",
"return",
"compare_recursive_mtime",
"(",
"source",
",",
"cutoff",
"=",
"target_mtime",
")"
] | Test whether the target file/directory is stale based on the source
file/directory. | [
"Test",
"whether",
"the",
"target",
"file",
"/",
"directory",
"is",
"stale",
"based",
"on",
"the",
"source",
"file",
"/",
"directory",
"."
] | python | train |
stephenmcd/gnotty | gnotty/bots/commands.py | https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/bots/commands.py#L45-L67 | def timesince(self, when):
"""
Returns human friendly version of the timespan between now
and the given datetime.
"""
units = (
("year", 60 * 60 * 24 * 365),
("week", 60 * 60 * 24 * 7),
("day", 60 * 60 * 24),
("hour", 60 * 60),
("minute", 60),
("second", 1),
)
delta = datetime.now() - when
total_seconds = delta.days * 60 * 60 * 24 + delta.seconds
parts = []
for name, seconds in units:
value = total_seconds / seconds
if value > 0:
total_seconds %= seconds
s = "s" if value != 1 else ""
parts.append("%s %s%s" % (value, name, s))
return " and ".join(", ".join(parts).rsplit(", ", 1)) | [
"def",
"timesince",
"(",
"self",
",",
"when",
")",
":",
"units",
"=",
"(",
"(",
"\"year\"",
",",
"60",
"*",
"60",
"*",
"24",
"*",
"365",
")",
",",
"(",
"\"week\"",
",",
"60",
"*",
"60",
"*",
"24",
"*",
"7",
")",
",",
"(",
"\"day\"",
",",
"60",
"*",
"60",
"*",
"24",
")",
",",
"(",
"\"hour\"",
",",
"60",
"*",
"60",
")",
",",
"(",
"\"minute\"",
",",
"60",
")",
",",
"(",
"\"second\"",
",",
"1",
")",
",",
")",
"delta",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"when",
"total_seconds",
"=",
"delta",
".",
"days",
"*",
"60",
"*",
"60",
"*",
"24",
"+",
"delta",
".",
"seconds",
"parts",
"=",
"[",
"]",
"for",
"name",
",",
"seconds",
"in",
"units",
":",
"value",
"=",
"total_seconds",
"/",
"seconds",
"if",
"value",
">",
"0",
":",
"total_seconds",
"%=",
"seconds",
"s",
"=",
"\"s\"",
"if",
"value",
"!=",
"1",
"else",
"\"\"",
"parts",
".",
"append",
"(",
"\"%s %s%s\"",
"%",
"(",
"value",
",",
"name",
",",
"s",
")",
")",
"return",
"\" and \"",
".",
"join",
"(",
"\", \"",
".",
"join",
"(",
"parts",
")",
".",
"rsplit",
"(",
"\", \"",
",",
"1",
")",
")"
] | Returns human friendly version of the timespan between now
and the given datetime. | [
"Returns",
"human",
"friendly",
"version",
"of",
"the",
"timespan",
"between",
"now",
"and",
"the",
"given",
"datetime",
"."
] | python | train |
cga-harvard/Hypermap-Registry | hypermap/search_api/utils.py | https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/utils.py#L185-L195 | def gap_to_sorl(time_gap):
"""
P1D to +1DAY
:param time_gap:
:return: solr's format duration.
"""
quantity, unit = parse_ISO8601(time_gap)
if unit[0] == "WEEKS":
return "+{0}DAYS".format(quantity * 7)
else:
return "+{0}{1}".format(quantity, unit[0]) | [
"def",
"gap_to_sorl",
"(",
"time_gap",
")",
":",
"quantity",
",",
"unit",
"=",
"parse_ISO8601",
"(",
"time_gap",
")",
"if",
"unit",
"[",
"0",
"]",
"==",
"\"WEEKS\"",
":",
"return",
"\"+{0}DAYS\"",
".",
"format",
"(",
"quantity",
"*",
"7",
")",
"else",
":",
"return",
"\"+{0}{1}\"",
".",
"format",
"(",
"quantity",
",",
"unit",
"[",
"0",
"]",
")"
] | P1D to +1DAY
:param time_gap:
:return: solr's format duration. | [
"P1D",
"to",
"+",
"1DAY",
":",
"param",
"time_gap",
":",
":",
"return",
":",
"solr",
"s",
"format",
"duration",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/task/task_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/task/task_client.py#L353-L374 | def create_timeline(self, timeline, scope_identifier, hub_name, plan_id):
"""CreateTimeline.
:param :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` timeline:
:param str scope_identifier: The project GUID to scope the request
:param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>`
"""
route_values = {}
if scope_identifier is not None:
route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str')
if hub_name is not None:
route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
content = self._serialize.body(timeline, 'Timeline')
response = self._send(http_method='POST',
location_id='83597576-cc2c-453c-bea6-2882ae6a1653',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('Timeline', response) | [
"def",
"create_timeline",
"(",
"self",
",",
"timeline",
",",
"scope_identifier",
",",
"hub_name",
",",
"plan_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"scope_identifier",
"is",
"not",
"None",
":",
"route_values",
"[",
"'scopeIdentifier'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'scope_identifier'",
",",
"scope_identifier",
",",
"'str'",
")",
"if",
"hub_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'hubName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'hub_name'",
",",
"hub_name",
",",
"'str'",
")",
"if",
"plan_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'planId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'plan_id'",
",",
"plan_id",
",",
"'str'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"timeline",
",",
"'Timeline'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'POST'",
",",
"location_id",
"=",
"'83597576-cc2c-453c-bea6-2882ae6a1653'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Timeline'",
",",
"response",
")"
] | CreateTimeline.
:param :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` timeline:
:param str scope_identifier: The project GUID to scope the request
:param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` | [
"CreateTimeline",
".",
":",
"param",
":",
"class",
":",
"<Timeline",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"task",
".",
"models",
".",
"Timeline",
">",
"timeline",
":",
":",
"param",
"str",
"scope_identifier",
":",
"The",
"project",
"GUID",
"to",
"scope",
"the",
"request",
":",
"param",
"str",
"hub_name",
":",
"The",
"name",
"of",
"the",
"server",
"hub",
":",
"build",
"for",
"the",
"Build",
"server",
"or",
"rm",
"for",
"the",
"Release",
"Management",
"server",
":",
"param",
"str",
"plan_id",
":",
":",
"rtype",
":",
":",
"class",
":",
"<Timeline",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"task",
".",
"models",
".",
"Timeline",
">"
] | python | train |
mbedmicro/pyOCD | pyocd/utility/conversion.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/utility/conversion.py#L92-L103 | def u64_to_hex16le(val):
"""! @brief Create 16-digit hexadecimal string from 64-bit register value"""
return ''.join("%02x" % (x & 0xFF) for x in (
val,
val >> 8,
val >> 16,
val >> 24,
val >> 32,
val >> 40,
val >> 48,
val >> 56,
)) | [
"def",
"u64_to_hex16le",
"(",
"val",
")",
":",
"return",
"''",
".",
"join",
"(",
"\"%02x\"",
"%",
"(",
"x",
"&",
"0xFF",
")",
"for",
"x",
"in",
"(",
"val",
",",
"val",
">>",
"8",
",",
"val",
">>",
"16",
",",
"val",
">>",
"24",
",",
"val",
">>",
"32",
",",
"val",
">>",
"40",
",",
"val",
">>",
"48",
",",
"val",
">>",
"56",
",",
")",
")"
] | ! @brief Create 16-digit hexadecimal string from 64-bit register value | [
"!"
] | python | train |
inveniosoftware/invenio-oauth2server | invenio_oauth2server/ext.py | https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/ext.py#L123-L136 | def init_app(self, app, entry_point_group='invenio_oauth2server.scopes',
**kwargs):
"""Flask application initialization.
:param app: An instance of :class:`flask.Flask`.
:param entry_point_group: The entrypoint group name to load plugins.
(Default: ``'invenio_oauth2server.scopes'``)
"""
self.init_config(app)
state = _OAuth2ServerState(app, entry_point_group=entry_point_group)
app.extensions['invenio-oauth2server'] = state
return state | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"entry_point_group",
"=",
"'invenio_oauth2server.scopes'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"init_config",
"(",
"app",
")",
"state",
"=",
"_OAuth2ServerState",
"(",
"app",
",",
"entry_point_group",
"=",
"entry_point_group",
")",
"app",
".",
"extensions",
"[",
"'invenio-oauth2server'",
"]",
"=",
"state",
"return",
"state"
] | Flask application initialization.
:param app: An instance of :class:`flask.Flask`.
:param entry_point_group: The entrypoint group name to load plugins.
(Default: ``'invenio_oauth2server.scopes'``) | [
"Flask",
"application",
"initialization",
"."
] | python | train |
acutesoftware/AIKIF | aikif/dataTools/cls_sql_code_generator.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L244-L309 | def extract_dimension(self, dim_name, dim_cols, dim_key, dim_stag_table, src_table, src_cols, grain_cols, where_clause):
"""
selects the src_cols from src_table and groups by dim_grain
then inserts into newly created table dim_name the columns as 'dim_cols
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Dimension - ' + dim_name + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + dim_stag_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + dim_stag_table + ' (\n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in dim_cols])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
self.ddl_text += 'DROP TABLE ' + dim_name + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + dim_name + ' (\n'
self.ddl_text += ' ' + dim_key + ' NUMBER, \n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in dim_cols])
self.ddl_text += ' REC_SOURCE_SYSTEM VARCHAR2(100), \n' # + src_table + '; \n'
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n'
self.ddl_text += 'CREATE OR REPLACE VIEW U' + dim_name[1:] + ' AS SELECT * FROM ' + dim_name + ';\n'
self.ddl_text += 'GRANT SELECT ON U' + dim_name[1:] + ' TO ALL_USERS;\n'
self.ddl_text += '\n'
self.ddl_text += 'DROP SEQUENCE SEQ_' + dim_name + ';\n'
self.ddl_text += 'CREATE SEQUENCE SEQ_' + dim_name + ';\n\n'
self.sql_text += '---------------------------------------------\n'
self.sql_text += '-- Populate Dimension - ' + dim_name + '\n'
self.sql_text += '---------------------------------------------\n'
self.sql_text += "DELETE FROM " + dim_stag_table + ";\n"
self.sql_text += "COMMIT;\n"
self.sql_text += "INSERT INTO " + dim_stag_table + " (\n"
self.sql_text += ", ".join([col for col in dim_cols])
self.sql_text += ")\n (SELECT \n"
self.sql_text += ", ".join([col for col in src_cols])
self.sql_text += "\nFROM " + src_table + "\n"
if where_clause != '':
self.sql_text += "WHERE " + where_clause + "\n"
if len(grain_cols) > 0:
self.sql_text += "GROUP BY " + ", ".join([col for col in grain_cols]) + "\n"
self.sql_text += "); \n"
self.sql_text += "COMMIT;\n"
self.sql_text += "DELETE FROM " + dim_name + ";\n"
self.sql_text += "COMMIT;\n"
self.sql_text += "INSERT INTO " + dim_name + " (\n"
self.sql_text += ", ".join([col for col in dim_cols])
self.sql_text += ", REC_SOURCE_SYSTEM, " + self.date_updated_col + " "
self.sql_text += ") \n(SELECT \n"
self.sql_text += ", ".join([col for col in src_cols])
self.sql_text += ", '" + src_table + "', sysdate "
self.sql_text += "\nFROM " + dim_stag_table + "\n"
self.sql_text += "); \n"
self.sql_text += "COMMIT;\n"
self.sql_text += "UPDATE " + dim_name + " SET " + dim_key + " = SEQ_" + dim_name + ".nextval;\n"
self.sql_text += "COMMIT;\n\n"
print(self.ddl_text)
print(self.sql_text) | [
"def",
"extract_dimension",
"(",
"self",
",",
"dim_name",
",",
"dim_cols",
",",
"dim_key",
",",
"dim_stag_table",
",",
"src_table",
",",
"src_cols",
",",
"grain_cols",
",",
"where_clause",
")",
":",
"self",
".",
"ddl_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"ddl_text",
"+=",
"'-- CREATE Dimension - '",
"+",
"dim_name",
"+",
"'\\n'",
"self",
".",
"ddl_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"ddl_text",
"+=",
"'DROP TABLE '",
"+",
"dim_stag_table",
"+",
"' CASCADE CONSTRAINTS;\\n'",
"self",
".",
"ddl_text",
"+=",
"'CREATE TABLE '",
"+",
"dim_stag_table",
"+",
"' (\\n'",
"self",
".",
"ddl_text",
"+=",
"' '",
".",
"join",
"(",
"[",
"col",
"+",
"' VARCHAR2(200), \\n'",
"for",
"col",
"in",
"dim_cols",
"]",
")",
"self",
".",
"ddl_text",
"+=",
"' '",
"+",
"self",
".",
"date_updated_col",
"+",
"' DATE \\n'",
"# + src_table + '; \\n'",
"self",
".",
"ddl_text",
"+=",
"');\\n'",
"self",
".",
"ddl_text",
"+=",
"'DROP TABLE '",
"+",
"dim_name",
"+",
"' CASCADE CONSTRAINTS;\\n'",
"self",
".",
"ddl_text",
"+=",
"'CREATE TABLE '",
"+",
"dim_name",
"+",
"' (\\n'",
"self",
".",
"ddl_text",
"+=",
"' '",
"+",
"dim_key",
"+",
"' NUMBER, \\n'",
"self",
".",
"ddl_text",
"+=",
"' '",
".",
"join",
"(",
"[",
"col",
"+",
"' VARCHAR2(200), \\n'",
"for",
"col",
"in",
"dim_cols",
"]",
")",
"self",
".",
"ddl_text",
"+=",
"' REC_SOURCE_SYSTEM VARCHAR2(100), \\n'",
"# + src_table + '; \\n'",
"self",
".",
"ddl_text",
"+=",
"' '",
"+",
"self",
".",
"date_updated_col",
"+",
"' DATE \\n'",
"# + src_table + '; \\n'",
"self",
".",
"ddl_text",
"+=",
"');\\n'",
"self",
".",
"ddl_text",
"+=",
"'CREATE OR REPLACE VIEW U'",
"+",
"dim_name",
"[",
"1",
":",
"]",
"+",
"' AS SELECT * FROM '",
"+",
"dim_name",
"+",
"';\\n'",
"self",
".",
"ddl_text",
"+=",
"'GRANT SELECT ON U'",
"+",
"dim_name",
"[",
"1",
":",
"]",
"+",
"' TO ALL_USERS;\\n'",
"self",
".",
"ddl_text",
"+=",
"'\\n'",
"self",
".",
"ddl_text",
"+=",
"'DROP SEQUENCE SEQ_'",
"+",
"dim_name",
"+",
"';\\n'",
"self",
".",
"ddl_text",
"+=",
"'CREATE SEQUENCE SEQ_'",
"+",
"dim_name",
"+",
"';\\n\\n'",
"self",
".",
"sql_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"sql_text",
"+=",
"'-- Populate Dimension - '",
"+",
"dim_name",
"+",
"'\\n'",
"self",
".",
"sql_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"sql_text",
"+=",
"\"DELETE FROM \"",
"+",
"dim_stag_table",
"+",
"\";\\n\"",
"self",
".",
"sql_text",
"+=",
"\"COMMIT;\\n\"",
"self",
".",
"sql_text",
"+=",
"\"INSERT INTO \"",
"+",
"dim_stag_table",
"+",
"\" (\\n\"",
"self",
".",
"sql_text",
"+=",
"\", \"",
".",
"join",
"(",
"[",
"col",
"for",
"col",
"in",
"dim_cols",
"]",
")",
"self",
".",
"sql_text",
"+=",
"\")\\n (SELECT \\n\"",
"self",
".",
"sql_text",
"+=",
"\", \"",
".",
"join",
"(",
"[",
"col",
"for",
"col",
"in",
"src_cols",
"]",
")",
"self",
".",
"sql_text",
"+=",
"\"\\nFROM \"",
"+",
"src_table",
"+",
"\"\\n\"",
"if",
"where_clause",
"!=",
"''",
":",
"self",
".",
"sql_text",
"+=",
"\"WHERE \"",
"+",
"where_clause",
"+",
"\"\\n\"",
"if",
"len",
"(",
"grain_cols",
")",
">",
"0",
":",
"self",
".",
"sql_text",
"+=",
"\"GROUP BY \"",
"+",
"\", \"",
".",
"join",
"(",
"[",
"col",
"for",
"col",
"in",
"grain_cols",
"]",
")",
"+",
"\"\\n\"",
"self",
".",
"sql_text",
"+=",
"\"); \\n\"",
"self",
".",
"sql_text",
"+=",
"\"COMMIT;\\n\"",
"self",
".",
"sql_text",
"+=",
"\"DELETE FROM \"",
"+",
"dim_name",
"+",
"\";\\n\"",
"self",
".",
"sql_text",
"+=",
"\"COMMIT;\\n\"",
"self",
".",
"sql_text",
"+=",
"\"INSERT INTO \"",
"+",
"dim_name",
"+",
"\" (\\n\"",
"self",
".",
"sql_text",
"+=",
"\", \"",
".",
"join",
"(",
"[",
"col",
"for",
"col",
"in",
"dim_cols",
"]",
")",
"self",
".",
"sql_text",
"+=",
"\", REC_SOURCE_SYSTEM, \"",
"+",
"self",
".",
"date_updated_col",
"+",
"\" \"",
"self",
".",
"sql_text",
"+=",
"\") \\n(SELECT \\n\"",
"self",
".",
"sql_text",
"+=",
"\", \"",
".",
"join",
"(",
"[",
"col",
"for",
"col",
"in",
"src_cols",
"]",
")",
"self",
".",
"sql_text",
"+=",
"\", '\"",
"+",
"src_table",
"+",
"\"', sysdate \"",
"self",
".",
"sql_text",
"+=",
"\"\\nFROM \"",
"+",
"dim_stag_table",
"+",
"\"\\n\"",
"self",
".",
"sql_text",
"+=",
"\"); \\n\"",
"self",
".",
"sql_text",
"+=",
"\"COMMIT;\\n\"",
"self",
".",
"sql_text",
"+=",
"\"UPDATE \"",
"+",
"dim_name",
"+",
"\" SET \"",
"+",
"dim_key",
"+",
"\" = SEQ_\"",
"+",
"dim_name",
"+",
"\".nextval;\\n\"",
"self",
".",
"sql_text",
"+=",
"\"COMMIT;\\n\\n\"",
"print",
"(",
"self",
".",
"ddl_text",
")",
"print",
"(",
"self",
".",
"sql_text",
")"
] | selects the src_cols from src_table and groups by dim_grain
then inserts into newly created table dim_name the columns as 'dim_cols | [
"selects",
"the",
"src_cols",
"from",
"src_table",
"and",
"groups",
"by",
"dim_grain",
"then",
"inserts",
"into",
"newly",
"created",
"table",
"dim_name",
"the",
"columns",
"as",
"dim_cols"
] | python | train |
bloomreach/s4cmd | s4cmd.py | https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1386-L1442 | def download(self, source, target, mpi=None, pos=0, chunk=0, part=0):
'''Thread worker for download operation.'''
s3url = S3URL(source)
obj = self.lookup(s3url)
if obj is None:
raise Failure('The obj "%s" does not exists.' % (s3url.path,))
# Initialization: Set up multithreaded downloads.
if not mpi:
# optional checks
if self.opt.dry_run:
message('%s => %s', source, target)
return
elif self.opt.sync_check and self.sync_check(LocalMD5Cache(target), obj):
message('%s => %s (synced)', source, target)
return
elif not self.opt.force and os.path.exists(target):
raise Failure('File already exists: %s' % target)
fsize = int(obj['ContentLength'])
# Small file optimization.
if fsize < self.opt.max_singlepart_download_size:
# Create a single part to chain back main download operation.
mpi = ThreadUtil.MultipartItem(tempfile_get(target))
mpi.total = 1
pos = 0
chunk = fsize
# Continue as one part download.
else:
# Here we use temp filename as the id of mpi.
for args in self.get_file_splits(tempfile_get(target), source, target, fsize, self.opt.multipart_split_size):
self.pool.download(*args)
return
tempfile = mpi.id
if self.opt.recursive:
self.mkdirs(tempfile)
# Download part of the file, range is inclusive.
response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path, Range='bytes=%d-%d' % (pos, pos + chunk - 1))
self.write_file_chunk(tempfile, pos, chunk, response['Body'])
# Finalize
if mpi.complete({'PartNumber': part}):
try:
self.update_privilege(obj, tempfile)
self._verify_file_size(obj, tempfile)
tempfile_set(tempfile, target)
message('%s => %s', source, target)
except Exception as e:
# Note that we don't retry in this case, because
# We are going to remove the temp file, and if we
# retry here with original parameters (wrapped in
# the task item), it would fail anyway
tempfile_set(tempfile, None)
raise Failure('Download Failure: %s, Source: %s.' % (e.message, source)) | [
"def",
"download",
"(",
"self",
",",
"source",
",",
"target",
",",
"mpi",
"=",
"None",
",",
"pos",
"=",
"0",
",",
"chunk",
"=",
"0",
",",
"part",
"=",
"0",
")",
":",
"s3url",
"=",
"S3URL",
"(",
"source",
")",
"obj",
"=",
"self",
".",
"lookup",
"(",
"s3url",
")",
"if",
"obj",
"is",
"None",
":",
"raise",
"Failure",
"(",
"'The obj \"%s\" does not exists.'",
"%",
"(",
"s3url",
".",
"path",
",",
")",
")",
"# Initialization: Set up multithreaded downloads.",
"if",
"not",
"mpi",
":",
"# optional checks",
"if",
"self",
".",
"opt",
".",
"dry_run",
":",
"message",
"(",
"'%s => %s'",
",",
"source",
",",
"target",
")",
"return",
"elif",
"self",
".",
"opt",
".",
"sync_check",
"and",
"self",
".",
"sync_check",
"(",
"LocalMD5Cache",
"(",
"target",
")",
",",
"obj",
")",
":",
"message",
"(",
"'%s => %s (synced)'",
",",
"source",
",",
"target",
")",
"return",
"elif",
"not",
"self",
".",
"opt",
".",
"force",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
":",
"raise",
"Failure",
"(",
"'File already exists: %s'",
"%",
"target",
")",
"fsize",
"=",
"int",
"(",
"obj",
"[",
"'ContentLength'",
"]",
")",
"# Small file optimization.",
"if",
"fsize",
"<",
"self",
".",
"opt",
".",
"max_singlepart_download_size",
":",
"# Create a single part to chain back main download operation.",
"mpi",
"=",
"ThreadUtil",
".",
"MultipartItem",
"(",
"tempfile_get",
"(",
"target",
")",
")",
"mpi",
".",
"total",
"=",
"1",
"pos",
"=",
"0",
"chunk",
"=",
"fsize",
"# Continue as one part download.",
"else",
":",
"# Here we use temp filename as the id of mpi.",
"for",
"args",
"in",
"self",
".",
"get_file_splits",
"(",
"tempfile_get",
"(",
"target",
")",
",",
"source",
",",
"target",
",",
"fsize",
",",
"self",
".",
"opt",
".",
"multipart_split_size",
")",
":",
"self",
".",
"pool",
".",
"download",
"(",
"*",
"args",
")",
"return",
"tempfile",
"=",
"mpi",
".",
"id",
"if",
"self",
".",
"opt",
".",
"recursive",
":",
"self",
".",
"mkdirs",
"(",
"tempfile",
")",
"# Download part of the file, range is inclusive.",
"response",
"=",
"self",
".",
"s3",
".",
"get_object",
"(",
"Bucket",
"=",
"s3url",
".",
"bucket",
",",
"Key",
"=",
"s3url",
".",
"path",
",",
"Range",
"=",
"'bytes=%d-%d'",
"%",
"(",
"pos",
",",
"pos",
"+",
"chunk",
"-",
"1",
")",
")",
"self",
".",
"write_file_chunk",
"(",
"tempfile",
",",
"pos",
",",
"chunk",
",",
"response",
"[",
"'Body'",
"]",
")",
"# Finalize",
"if",
"mpi",
".",
"complete",
"(",
"{",
"'PartNumber'",
":",
"part",
"}",
")",
":",
"try",
":",
"self",
".",
"update_privilege",
"(",
"obj",
",",
"tempfile",
")",
"self",
".",
"_verify_file_size",
"(",
"obj",
",",
"tempfile",
")",
"tempfile_set",
"(",
"tempfile",
",",
"target",
")",
"message",
"(",
"'%s => %s'",
",",
"source",
",",
"target",
")",
"except",
"Exception",
"as",
"e",
":",
"# Note that we don't retry in this case, because",
"# We are going to remove the temp file, and if we",
"# retry here with original parameters (wrapped in",
"# the task item), it would fail anyway",
"tempfile_set",
"(",
"tempfile",
",",
"None",
")",
"raise",
"Failure",
"(",
"'Download Failure: %s, Source: %s.'",
"%",
"(",
"e",
".",
"message",
",",
"source",
")",
")"
] | Thread worker for download operation. | [
"Thread",
"worker",
"for",
"download",
"operation",
"."
] | python | test |
quantopian/pgcontents | pgcontents/api_utils.py | https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/api_utils.py#L137-L148 | def _decode_unknown_from_base64(path, bcontent):
"""
Decode base64 data of unknown format.
Attempts to interpret data as utf-8, falling back to ascii on failure.
"""
content = b64decode(bcontent)
try:
return (content.decode('utf-8'), 'text')
except UnicodeError:
pass
return bcontent.decode('ascii'), 'base64' | [
"def",
"_decode_unknown_from_base64",
"(",
"path",
",",
"bcontent",
")",
":",
"content",
"=",
"b64decode",
"(",
"bcontent",
")",
"try",
":",
"return",
"(",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"'text'",
")",
"except",
"UnicodeError",
":",
"pass",
"return",
"bcontent",
".",
"decode",
"(",
"'ascii'",
")",
",",
"'base64'"
] | Decode base64 data of unknown format.
Attempts to interpret data as utf-8, falling back to ascii on failure. | [
"Decode",
"base64",
"data",
"of",
"unknown",
"format",
"."
] | python | test |
sporteasy/python-poeditor | poeditor/client.py | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L652-L679 | def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
) | [
"def",
"update_terms_translations",
"(",
"self",
",",
"project_id",
",",
"file_path",
"=",
"None",
",",
"language_code",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"sync_terms",
"=",
"False",
",",
"tags",
"=",
"None",
",",
"fuzzy_trigger",
"=",
"None",
")",
":",
"return",
"self",
".",
"_upload",
"(",
"project_id",
"=",
"project_id",
",",
"updating",
"=",
"self",
".",
"UPDATING_TERMS_TRANSLATIONS",
",",
"file_path",
"=",
"file_path",
",",
"language_code",
"=",
"language_code",
",",
"overwrite",
"=",
"overwrite",
",",
"sync_terms",
"=",
"sync_terms",
",",
"tags",
"=",
"tags",
",",
"fuzzy_trigger",
"=",
"fuzzy_trigger",
")"
] | Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values | [
"Updates",
"terms",
"translations"
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/custom_objects_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/custom_objects_api.py#L1727-L1751 | def patch_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data | [
"def",
"patch_cluster_custom_object_scale",
"(",
"self",
",",
"group",
",",
"version",
",",
"plural",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_cluster_custom_object_scale_with_http_info",
"(",
"group",
",",
"version",
",",
"plural",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_cluster_custom_object_scale_with_http_info",
"(",
"group",
",",
"version",
",",
"plural",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread. | [
"patch_cluster_custom_object_scale",
"#",
"noqa",
":",
"E501"
] | python | train |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/document.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/document.py#L615-L677 | def get(self, field_path):
"""Get a value from the snapshot data.
If the data is nested, for example:
.. code-block:: python
>>> snapshot.to_dict()
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
},
'top6': b'\x00\x01 foo',
}
a **field path** can be used to access the nested data. For
example:
.. code-block:: python
>>> snapshot.get('top1')
{
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
}
>>> snapshot.get('top1.middle2')
{
'bottom3': 20,
'bottom4': 22,
}
>>> snapshot.get('top1.middle2.bottom3')
20
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
A copy is returned since the data may contain mutable values,
but the data stored in the snapshot must remain immutable.
Args:
field_path (str): A field path (``.``-delimited list of
field names).
Returns:
Any or None:
(A copy of) the value stored for the ``field_path`` or
None if snapshot document does not exist.
Raises:
KeyError: If the ``field_path`` does not match nested data
in the snapshot.
"""
if not self._exists:
return None
nested_data = field_path_module.get_nested_value(field_path, self._data)
return copy.deepcopy(nested_data) | [
"def",
"get",
"(",
"self",
",",
"field_path",
")",
":",
"if",
"not",
"self",
".",
"_exists",
":",
"return",
"None",
"nested_data",
"=",
"field_path_module",
".",
"get_nested_value",
"(",
"field_path",
",",
"self",
".",
"_data",
")",
"return",
"copy",
".",
"deepcopy",
"(",
"nested_data",
")"
] | Get a value from the snapshot data.
If the data is nested, for example:
.. code-block:: python
>>> snapshot.to_dict()
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
},
'top6': b'\x00\x01 foo',
}
a **field path** can be used to access the nested data. For
example:
.. code-block:: python
>>> snapshot.get('top1')
{
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
}
>>> snapshot.get('top1.middle2')
{
'bottom3': 20,
'bottom4': 22,
}
>>> snapshot.get('top1.middle2.bottom3')
20
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
A copy is returned since the data may contain mutable values,
but the data stored in the snapshot must remain immutable.
Args:
field_path (str): A field path (``.``-delimited list of
field names).
Returns:
Any or None:
(A copy of) the value stored for the ``field_path`` or
None if snapshot document does not exist.
Raises:
KeyError: If the ``field_path`` does not match nested data
in the snapshot. | [
"Get",
"a",
"value",
"from",
"the",
"snapshot",
"data",
"."
] | python | train |
hobson/pug-ann | pug/ann/util.py | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L372-L374 | def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity)) | [
"def",
"build_trainer",
"(",
"nn",
",",
"ds",
",",
"verbosity",
"=",
"1",
")",
":",
"return",
"pb",
".",
"supervised",
".",
"trainers",
".",
"rprop",
".",
"RPropMinusTrainer",
"(",
"nn",
",",
"dataset",
"=",
"ds",
",",
"batchlearning",
"=",
"True",
",",
"verbose",
"=",
"bool",
"(",
"verbosity",
")",
")"
] | Configure neural net trainer from a pybrain dataset | [
"Configure",
"neural",
"net",
"trainer",
"from",
"a",
"pybrain",
"dataset"
] | python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/tango_control/flask_master/app/app.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/flask_master/app/app.py#L167-L174 | def allowed_transitions():
"""Get target states allowed for the current state."""
try:
sdp_state = SDPState()
return sdp_state.allowed_target_states[sdp_state.current_state]
except KeyError:
LOG.error("Key Error")
return dict(state="KeyError", reason="KeyError") | [
"def",
"allowed_transitions",
"(",
")",
":",
"try",
":",
"sdp_state",
"=",
"SDPState",
"(",
")",
"return",
"sdp_state",
".",
"allowed_target_states",
"[",
"sdp_state",
".",
"current_state",
"]",
"except",
"KeyError",
":",
"LOG",
".",
"error",
"(",
"\"Key Error\"",
")",
"return",
"dict",
"(",
"state",
"=",
"\"KeyError\"",
",",
"reason",
"=",
"\"KeyError\"",
")"
] | Get target states allowed for the current state. | [
"Get",
"target",
"states",
"allowed",
"for",
"the",
"current",
"state",
"."
] | python | train |
ContinuumIO/menuinst | menuinst/freedesktop.py | https://github.com/ContinuumIO/menuinst/blob/dae53065e9e82a3352b817cca5895a9b271ddfdb/menuinst/freedesktop.py#L50-L77 | def make_directory_entry(d):
"""
Create a directory entry that conforms to the format of the Desktop Entry
Specification by freedesktop.org. See:
http://freedesktop.org/Standards/desktop-entry-spec
These should work for both KDE and Gnome2
An entry is a .directory file that includes the display name, icon, etc.
It will be placed in the location specified within the passed dict. The
filename can be explicitly specified, but if not provided, will default to
an escaped version of the name.
"""
assert d['path'].endswith('.directory')
# default values
d.setdefault('comment', '')
d.setdefault('icon', '')
fo = open(d['path'], "w")
fo.write("""\
[Desktop Entry]
Type=Directory
Encoding=UTF-8
Name=%(name)s
Comment=%(comment)s
Icon=%(icon)s
""" % d)
fo.close() | [
"def",
"make_directory_entry",
"(",
"d",
")",
":",
"assert",
"d",
"[",
"'path'",
"]",
".",
"endswith",
"(",
"'.directory'",
")",
"# default values",
"d",
".",
"setdefault",
"(",
"'comment'",
",",
"''",
")",
"d",
".",
"setdefault",
"(",
"'icon'",
",",
"''",
")",
"fo",
"=",
"open",
"(",
"d",
"[",
"'path'",
"]",
",",
"\"w\"",
")",
"fo",
".",
"write",
"(",
"\"\"\"\\\n[Desktop Entry]\nType=Directory\nEncoding=UTF-8\nName=%(name)s\nComment=%(comment)s\nIcon=%(icon)s\n\"\"\"",
"%",
"d",
")",
"fo",
".",
"close",
"(",
")"
] | Create a directory entry that conforms to the format of the Desktop Entry
Specification by freedesktop.org. See:
http://freedesktop.org/Standards/desktop-entry-spec
These should work for both KDE and Gnome2
An entry is a .directory file that includes the display name, icon, etc.
It will be placed in the location specified within the passed dict. The
filename can be explicitly specified, but if not provided, will default to
an escaped version of the name. | [
"Create",
"a",
"directory",
"entry",
"that",
"conforms",
"to",
"the",
"format",
"of",
"the",
"Desktop",
"Entry",
"Specification",
"by",
"freedesktop",
".",
"org",
".",
"See",
":",
"http",
":",
"//",
"freedesktop",
".",
"org",
"/",
"Standards",
"/",
"desktop",
"-",
"entry",
"-",
"spec",
"These",
"should",
"work",
"for",
"both",
"KDE",
"and",
"Gnome2"
] | python | train |