repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
wummel/linkchecker | linkcheck/configuration/confparse.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/confparse.py#L230-L238 | def read_plugin_config(self):
"""Read plugin-specific configuration values."""
folders = self.config["pluginfolders"]
modules = plugins.get_plugin_modules(folders)
for pluginclass in plugins.get_plugin_classes(modules):
section = pluginclass.__name__
if self.has_section(section):
self.config["enabledplugins"].append(section)
self.config[section] = pluginclass.read_config(self) | [
"def",
"read_plugin_config",
"(",
"self",
")",
":",
"folders",
"=",
"self",
".",
"config",
"[",
"\"pluginfolders\"",
"]",
"modules",
"=",
"plugins",
".",
"get_plugin_modules",
"(",
"folders",
")",
"for",
"pluginclass",
"in",
"plugins",
".",
"get_plugin_classes",
"(",
"modules",
")",
":",
"section",
"=",
"pluginclass",
".",
"__name__",
"if",
"self",
".",
"has_section",
"(",
"section",
")",
":",
"self",
".",
"config",
"[",
"\"enabledplugins\"",
"]",
".",
"append",
"(",
"section",
")",
"self",
".",
"config",
"[",
"section",
"]",
"=",
"pluginclass",
".",
"read_config",
"(",
"self",
")"
] | Read plugin-specific configuration values. | [
"Read",
"plugin",
"-",
"specific",
"configuration",
"values",
"."
] | python | train |
gwastro/pycbc | pycbc/workflow/datafind.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/datafind.py#L1018-L1057 | def datafind_keep_unique_backups(backup_outs, orig_outs):
"""This function will take a list of backup datafind files, presumably
obtained by querying a remote datafind server, e.g. CIT, and compares
these against a list of original datafind files, presumably obtained by
querying the local datafind server. Only the datafind files in the backup
list that do not appear in the original list are returned. This allows us
to use only files that are missing from the local cluster.
Parameters
-----------
backup_outs : FileList
List of datafind files from the remote datafind server.
orig_outs : FileList
List of datafind files from the local datafind server.
Returns
--------
FileList
List of datafind files in backup_outs and not in orig_outs.
"""
# NOTE: This function is not optimized and could be made considerably
# quicker if speed becomes in issue. With 4s frame files this might
# be slow, but for >1000s files I don't foresee any issue, so I keep
# this simple.
return_list = FileList([])
# We compare the LFNs to determine uniqueness
# Is there a way to associate two paths with one LFN??
orig_names = [f.name for f in orig_outs]
for file in backup_outs:
if file.name not in orig_names:
return_list.append(file)
else:
index_num = orig_names.index(file.name)
orig_out = orig_outs[index_num]
pfns = list(file.pfns)
# This shouldn't happen, but catch if it does
assert(len(pfns) == 1)
orig_out.PFN(pfns[0].url, site='notlocal')
return return_list | [
"def",
"datafind_keep_unique_backups",
"(",
"backup_outs",
",",
"orig_outs",
")",
":",
"# NOTE: This function is not optimized and could be made considerably",
"# quicker if speed becomes in issue. With 4s frame files this might",
"# be slow, but for >1000s files I don't foresee any issue, so I keep",
"# this simple.",
"return_list",
"=",
"FileList",
"(",
"[",
"]",
")",
"# We compare the LFNs to determine uniqueness",
"# Is there a way to associate two paths with one LFN??",
"orig_names",
"=",
"[",
"f",
".",
"name",
"for",
"f",
"in",
"orig_outs",
"]",
"for",
"file",
"in",
"backup_outs",
":",
"if",
"file",
".",
"name",
"not",
"in",
"orig_names",
":",
"return_list",
".",
"append",
"(",
"file",
")",
"else",
":",
"index_num",
"=",
"orig_names",
".",
"index",
"(",
"file",
".",
"name",
")",
"orig_out",
"=",
"orig_outs",
"[",
"index_num",
"]",
"pfns",
"=",
"list",
"(",
"file",
".",
"pfns",
")",
"# This shouldn't happen, but catch if it does",
"assert",
"(",
"len",
"(",
"pfns",
")",
"==",
"1",
")",
"orig_out",
".",
"PFN",
"(",
"pfns",
"[",
"0",
"]",
".",
"url",
",",
"site",
"=",
"'notlocal'",
")",
"return",
"return_list"
] | This function will take a list of backup datafind files, presumably
obtained by querying a remote datafind server, e.g. CIT, and compares
these against a list of original datafind files, presumably obtained by
querying the local datafind server. Only the datafind files in the backup
list that do not appear in the original list are returned. This allows us
to use only files that are missing from the local cluster.
Parameters
-----------
backup_outs : FileList
List of datafind files from the remote datafind server.
orig_outs : FileList
List of datafind files from the local datafind server.
Returns
--------
FileList
List of datafind files in backup_outs and not in orig_outs. | [
"This",
"function",
"will",
"take",
"a",
"list",
"of",
"backup",
"datafind",
"files",
"presumably",
"obtained",
"by",
"querying",
"a",
"remote",
"datafind",
"server",
"e",
".",
"g",
".",
"CIT",
"and",
"compares",
"these",
"against",
"a",
"list",
"of",
"original",
"datafind",
"files",
"presumably",
"obtained",
"by",
"querying",
"the",
"local",
"datafind",
"server",
".",
"Only",
"the",
"datafind",
"files",
"in",
"the",
"backup",
"list",
"that",
"do",
"not",
"appear",
"in",
"the",
"original",
"list",
"are",
"returned",
".",
"This",
"allows",
"us",
"to",
"use",
"only",
"files",
"that",
"are",
"missing",
"from",
"the",
"local",
"cluster",
"."
] | python | train |
h2oai/h2o-3 | h2o-py/h2o/model/model_base.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/model_base.py#L153-L169 | def staged_predict_proba(self, test_data):
"""
Predict class probabilities at each stage of an H2O Model (only GBM models).
The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and
class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding
predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models
build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of staged predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"predict_staged_proba": True})
return h2o.get_frame(j["predictions_frame"]["name"]) | [
"def",
"staged_predict_proba",
"(",
"self",
",",
"test_data",
")",
":",
"if",
"not",
"isinstance",
"(",
"test_data",
",",
"h2o",
".",
"H2OFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"test_data must be an instance of H2OFrame\"",
")",
"j",
"=",
"h2o",
".",
"api",
"(",
"\"POST /3/Predictions/models/%s/frames/%s\"",
"%",
"(",
"self",
".",
"model_id",
",",
"test_data",
".",
"frame_id",
")",
",",
"data",
"=",
"{",
"\"predict_staged_proba\"",
":",
"True",
"}",
")",
"return",
"h2o",
".",
"get_frame",
"(",
"j",
"[",
"\"predictions_frame\"",
"]",
"[",
"\"name\"",
"]",
")"
] | Predict class probabilities at each stage of an H2O Model (only GBM models).
The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and
class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding
predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models
build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of staged predictions. | [
"Predict",
"class",
"probabilities",
"at",
"each",
"stage",
"of",
"an",
"H2O",
"Model",
"(",
"only",
"GBM",
"models",
")",
"."
] | python | test |
mrjoes/sockjs-tornado | sockjs/tornado/session.py | https://github.com/mrjoes/sockjs-tornado/blob/bd3a99b407f1181f054b3b1730f438dde375ca1c/sockjs/tornado/session.py#L241-L252 | def on_delete(self, forced):
"""Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
"""
# Do not remove connection if it was not forced and there's running connection
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close() | [
"def",
"on_delete",
"(",
"self",
",",
"forced",
")",
":",
"# Do not remove connection if it was not forced and there's running connection",
"if",
"not",
"forced",
"and",
"self",
".",
"handler",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"is_closed",
":",
"self",
".",
"promote",
"(",
")",
"else",
":",
"self",
".",
"close",
"(",
")"
] | Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False. | [
"Session",
"expiration",
"callback"
] | python | train |
msztolcman/versionner | versionner/config.py | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L106-L118 | def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler) | [
"def",
"_parse_config_file",
"(",
"self",
",",
"cfg_files",
")",
":",
"cfg_handler",
"=",
"configparser",
".",
"ConfigParser",
"(",
"interpolation",
"=",
"None",
")",
"if",
"not",
"cfg_handler",
".",
"read",
"(",
"map",
"(",
"str",
",",
"cfg_files",
")",
")",
":",
"return",
"self",
".",
"_parse_global_section",
"(",
"cfg_handler",
")",
"self",
".",
"_parse_vcs_section",
"(",
"cfg_handler",
")",
"self",
".",
"_parse_file_section",
"(",
"cfg_handler",
")"
] | Parse config file (ini) and set properties
:return: | [
"Parse",
"config",
"file",
"(",
"ini",
")",
"and",
"set",
"properties"
] | python | train |
google-research/batch-ppo | agents/algorithms/ppo/ppo.py | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L81-L98 | def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('') | [
"def",
"begin_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'begin_episode/'",
")",
":",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"reset_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
"reset_state",
"=",
"utility",
".",
"reinit_nested_vars",
"(",
"self",
".",
"_last_state",
",",
"agent_indices",
")",
"reset_buffer",
"=",
"self",
".",
"_current_episodes",
".",
"clear",
"(",
"agent_indices",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"reset_state",
",",
"reset_buffer",
"]",
")",
":",
"return",
"tf",
".",
"constant",
"(",
"''",
")"
] | Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor. | [
"Reset",
"the",
"recurrent",
"states",
"and",
"stored",
"episode",
"."
] | python | train |
internetarchive/brozzler | brozzler/chrome.py | https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/chrome.py#L34-L60 | def check_version(chrome_exe):
'''
Raises SystemExit if `chrome_exe` is not a supported browser version.
Must run in the main thread to have the desired effect.
'''
# mac$ /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version
# Google Chrome 64.0.3282.140
# mac$ /Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary --version
# Google Chrome 66.0.3341.0 canary
# linux$ chromium-browser --version
# Using PPAPI flash.
# --ppapi-flash-path=/usr/lib/adobe-flashplugin/libpepflashplayer.so --ppapi-flash-version=
# Chromium 61.0.3163.100 Built on Ubuntu , running on Ubuntu 16.04
cmd = [chrome_exe, '--version']
out = subprocess.check_output(cmd, timeout=60)
m = re.search(br'(Chromium|Google Chrome) ([\d.]+)', out)
if not m:
sys.exit(
'unable to parse browser version from output of '
'%r: %r' % (subprocess.list2cmdline(cmd), out))
version_str = m.group(2).decode()
major_version = int(version_str.split('.')[0])
if major_version < 64:
sys.exit('brozzler requires chrome/chromium version 64 or '
'later but %s reports version %s' % (
chrome_exe, version_str)) | [
"def",
"check_version",
"(",
"chrome_exe",
")",
":",
"# mac$ /Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome --version",
"# Google Chrome 64.0.3282.140 ",
"# mac$ /Applications/Google\\ Chrome\\ Canary.app/Contents/MacOS/Google\\ Chrome\\ Canary --version",
"# Google Chrome 66.0.3341.0 canary",
"# linux$ chromium-browser --version",
"# Using PPAPI flash.",
"# --ppapi-flash-path=/usr/lib/adobe-flashplugin/libpepflashplayer.so --ppapi-flash-version=",
"# Chromium 61.0.3163.100 Built on Ubuntu , running on Ubuntu 16.04",
"cmd",
"=",
"[",
"chrome_exe",
",",
"'--version'",
"]",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"timeout",
"=",
"60",
")",
"m",
"=",
"re",
".",
"search",
"(",
"br'(Chromium|Google Chrome) ([\\d.]+)'",
",",
"out",
")",
"if",
"not",
"m",
":",
"sys",
".",
"exit",
"(",
"'unable to parse browser version from output of '",
"'%r: %r'",
"%",
"(",
"subprocess",
".",
"list2cmdline",
"(",
"cmd",
")",
",",
"out",
")",
")",
"version_str",
"=",
"m",
".",
"group",
"(",
"2",
")",
".",
"decode",
"(",
")",
"major_version",
"=",
"int",
"(",
"version_str",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"if",
"major_version",
"<",
"64",
":",
"sys",
".",
"exit",
"(",
"'brozzler requires chrome/chromium version 64 or '",
"'later but %s reports version %s'",
"%",
"(",
"chrome_exe",
",",
"version_str",
")",
")"
] | Raises SystemExit if `chrome_exe` is not a supported browser version.
Must run in the main thread to have the desired effect. | [
"Raises",
"SystemExit",
"if",
"chrome_exe",
"is",
"not",
"a",
"supported",
"browser",
"version",
"."
] | python | train |
jmoiron/johnny-cache | johnny/cache.py | https://github.com/jmoiron/johnny-cache/blob/d96ea94c5dfcde517ff8f65d6ba4e435d8a0168c/johnny/cache.py#L232-L244 | def get_multi_generation(self, tables, db='default'):
"""Takes a list of table names and returns an aggregate
value for the generation"""
generations = []
for table in tables:
generations.append(self.get_single_generation(table, db))
key = self.keygen.gen_multi_key(generations, db)
val = self.cache_backend.get(key, None, db)
#if local.get('in_test', None): print force_bytes(val).ljust(32), key
if val is None:
val = self.keygen.random_generator()
self.cache_backend.set(key, val, settings.MIDDLEWARE_SECONDS, db)
return val | [
"def",
"get_multi_generation",
"(",
"self",
",",
"tables",
",",
"db",
"=",
"'default'",
")",
":",
"generations",
"=",
"[",
"]",
"for",
"table",
"in",
"tables",
":",
"generations",
".",
"append",
"(",
"self",
".",
"get_single_generation",
"(",
"table",
",",
"db",
")",
")",
"key",
"=",
"self",
".",
"keygen",
".",
"gen_multi_key",
"(",
"generations",
",",
"db",
")",
"val",
"=",
"self",
".",
"cache_backend",
".",
"get",
"(",
"key",
",",
"None",
",",
"db",
")",
"#if local.get('in_test', None): print force_bytes(val).ljust(32), key",
"if",
"val",
"is",
"None",
":",
"val",
"=",
"self",
".",
"keygen",
".",
"random_generator",
"(",
")",
"self",
".",
"cache_backend",
".",
"set",
"(",
"key",
",",
"val",
",",
"settings",
".",
"MIDDLEWARE_SECONDS",
",",
"db",
")",
"return",
"val"
] | Takes a list of table names and returns an aggregate
value for the generation | [
"Takes",
"a",
"list",
"of",
"table",
"names",
"and",
"returns",
"an",
"aggregate",
"value",
"for",
"the",
"generation"
] | python | train |
mdsol/rwslib | rwslib/builders/modm.py | https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/modm.py#L200-L209 | def add_milestone(self,
milestone,
codelistoid="MILESTONES"):
"""
Add a milestone
:param codelistoid: specify the CodeListOID (defaults to MILESTONES)
:param str milestone: Milestone to add
"""
if milestone not in self.milestones.get(codelistoid, []):
self._milestones.setdefault(codelistoid, []).append(milestone) | [
"def",
"add_milestone",
"(",
"self",
",",
"milestone",
",",
"codelistoid",
"=",
"\"MILESTONES\"",
")",
":",
"if",
"milestone",
"not",
"in",
"self",
".",
"milestones",
".",
"get",
"(",
"codelistoid",
",",
"[",
"]",
")",
":",
"self",
".",
"_milestones",
".",
"setdefault",
"(",
"codelistoid",
",",
"[",
"]",
")",
".",
"append",
"(",
"milestone",
")"
] | Add a milestone
:param codelistoid: specify the CodeListOID (defaults to MILESTONES)
:param str milestone: Milestone to add | [
"Add",
"a",
"milestone",
":",
"param",
"codelistoid",
":",
"specify",
"the",
"CodeListOID",
"(",
"defaults",
"to",
"MILESTONES",
")",
":",
"param",
"str",
"milestone",
":",
"Milestone",
"to",
"add"
] | python | train |
knipknap/exscript | Exscript/stdlib/ipv4.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/stdlib/ipv4.py#L127-L140 | def pfxmask(scope, ips, pfxlen):
"""
Applies the given prefix length to the given ips, resulting in a
(list of) IP network addresses.
:type ips: string
:param ips: An IP address, or a list of IP addresses.
:type pfxlen: int
:param pfxlen: An IP prefix length.
:rtype: string
:return: The mask(s) that result(s) from converting the prefix length.
"""
mask = ipv4.pfxlen2mask_int(pfxlen[0])
return [ipv4.int2ip(ipv4.ip2int(ip) & mask) for ip in ips] | [
"def",
"pfxmask",
"(",
"scope",
",",
"ips",
",",
"pfxlen",
")",
":",
"mask",
"=",
"ipv4",
".",
"pfxlen2mask_int",
"(",
"pfxlen",
"[",
"0",
"]",
")",
"return",
"[",
"ipv4",
".",
"int2ip",
"(",
"ipv4",
".",
"ip2int",
"(",
"ip",
")",
"&",
"mask",
")",
"for",
"ip",
"in",
"ips",
"]"
] | Applies the given prefix length to the given ips, resulting in a
(list of) IP network addresses.
:type ips: string
:param ips: An IP address, or a list of IP addresses.
:type pfxlen: int
:param pfxlen: An IP prefix length.
:rtype: string
:return: The mask(s) that result(s) from converting the prefix length. | [
"Applies",
"the",
"given",
"prefix",
"length",
"to",
"the",
"given",
"ips",
"resulting",
"in",
"a",
"(",
"list",
"of",
")",
"IP",
"network",
"addresses",
"."
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/win32/context_i386.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/win32/context_i386.py#L102-L112 | def from_dict(cls, fsa):
'Instance a new structure from a Python dictionary.'
fsa = dict(fsa)
s = cls()
for key in cls._integer_members:
setattr(s, key, fsa.get(key))
ra = fsa.get('RegisterArea', None)
if ra is not None:
for index in compat.xrange(0, SIZE_OF_80387_REGISTERS):
s.RegisterArea[index] = ra[index]
return s | [
"def",
"from_dict",
"(",
"cls",
",",
"fsa",
")",
":",
"fsa",
"=",
"dict",
"(",
"fsa",
")",
"s",
"=",
"cls",
"(",
")",
"for",
"key",
"in",
"cls",
".",
"_integer_members",
":",
"setattr",
"(",
"s",
",",
"key",
",",
"fsa",
".",
"get",
"(",
"key",
")",
")",
"ra",
"=",
"fsa",
".",
"get",
"(",
"'RegisterArea'",
",",
"None",
")",
"if",
"ra",
"is",
"not",
"None",
":",
"for",
"index",
"in",
"compat",
".",
"xrange",
"(",
"0",
",",
"SIZE_OF_80387_REGISTERS",
")",
":",
"s",
".",
"RegisterArea",
"[",
"index",
"]",
"=",
"ra",
"[",
"index",
"]",
"return",
"s"
] | Instance a new structure from a Python dictionary. | [
"Instance",
"a",
"new",
"structure",
"from",
"a",
"Python",
"dictionary",
"."
] | python | train |
HDI-Project/BTB | btb/selection/recent.py | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/recent.py#L56-L69 | def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros | [
"def",
"compute_rewards",
"(",
"self",
",",
"scores",
")",
":",
"# take the k + 1 most recent scores so we can get k velocities",
"recent_scores",
"=",
"scores",
"[",
":",
"-",
"self",
".",
"k",
"-",
"2",
":",
"-",
"1",
"]",
"velocities",
"=",
"[",
"recent_scores",
"[",
"i",
"]",
"-",
"recent_scores",
"[",
"i",
"+",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"recent_scores",
")",
"-",
"1",
")",
"]",
"# pad the list out with zeros, so the length of the list is",
"# maintained",
"zeros",
"=",
"(",
"len",
"(",
"scores",
")",
"-",
"self",
".",
"k",
")",
"*",
"[",
"0",
"]",
"return",
"velocities",
"+",
"zeros"
] | Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same. | [
"Compute",
"the",
"velocity",
"of",
"thte",
"k",
"+",
"1",
"most",
"recent",
"scores",
"."
] | python | train |
tensorflow/lucid | lucid/misc/gl/meshutil.py | https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/gl/meshutil.py#L78-L84 | def _parse_vertex_tuple(s):
"""Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...)."""
vt = [0, 0, 0]
for i, c in enumerate(s.split('/')):
if c:
vt[i] = int(c)
return tuple(vt) | [
"def",
"_parse_vertex_tuple",
"(",
"s",
")",
":",
"vt",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"s",
".",
"split",
"(",
"'/'",
")",
")",
":",
"if",
"c",
":",
"vt",
"[",
"i",
"]",
"=",
"int",
"(",
"c",
")",
"return",
"tuple",
"(",
"vt",
")"
] | Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...). | [
"Parse",
"vertex",
"indices",
"in",
"/",
"separated",
"form",
"(",
"like",
"i",
"/",
"j",
"/",
"k",
"i",
"//",
"k",
"...",
")",
"."
] | python | train |
mozilla-iot/webthing-python | webthing/utils.py | https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/utils.py#L17-L32 | def get_ip():
"""
Get the default local IP address.
From: https://stackoverflow.com/a/28950776
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except (socket.error, IndexError):
ip = '127.0.0.1'
finally:
s.close()
return ip | [
"def",
"get_ip",
"(",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"try",
":",
"s",
".",
"connect",
"(",
"(",
"'10.255.255.255'",
",",
"1",
")",
")",
"ip",
"=",
"s",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
"except",
"(",
"socket",
".",
"error",
",",
"IndexError",
")",
":",
"ip",
"=",
"'127.0.0.1'",
"finally",
":",
"s",
".",
"close",
"(",
")",
"return",
"ip"
] | Get the default local IP address.
From: https://stackoverflow.com/a/28950776 | [
"Get",
"the",
"default",
"local",
"IP",
"address",
"."
] | python | test |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/namespace_range.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/namespace_range.py#L305-L322 | def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app) | [
"def",
"normalized_start",
"(",
"self",
")",
":",
"namespaces_after_key",
"=",
"list",
"(",
"self",
".",
"make_datastore_query",
"(",
")",
".",
"Run",
"(",
"limit",
"=",
"1",
")",
")",
"if",
"not",
"namespaces_after_key",
":",
"return",
"None",
"namespace_after_key",
"=",
"namespaces_after_key",
"[",
"0",
"]",
".",
"name",
"(",
")",
"or",
"''",
"return",
"NamespaceRange",
"(",
"namespace_after_key",
",",
"self",
".",
"namespace_end",
",",
"_app",
"=",
"self",
".",
"app",
")"
] | Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore. | [
"Returns",
"a",
"NamespaceRange",
"with",
"leading",
"non",
"-",
"existant",
"namespaces",
"removed",
"."
] | python | train |
tanghaibao/jcvi | jcvi/assembly/geneticmap.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/geneticmap.py#L206-L292 | def dotplot(args):
"""
%prog dotplot map.csv ref.fasta
Make dotplot between chromosomes and linkage maps.
The input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
"""
from jcvi.assembly.allmaps import CSVMapLine
from jcvi.formats.sizes import Sizes
from jcvi.utils.natsort import natsorted
from jcvi.graphics.base import shorten
from jcvi.graphics.dotplot import plt, savefig, markup, normalize_axes, \
downsample, plot_breaks_and_labels, thousands
p = OptionParser(dotplot.__doc__)
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(args, figsize="8x8",
style="dark", dpi=90, cmap="copper")
if len(args) != 2:
sys.exit(not p.print_help())
csvfile, fastafile = args
sizes = natsorted(Sizes(fastafile).mapping.items())
seen = set()
raw_data = []
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot
fp = must_open(csvfile)
for row in fp:
m = CSVMapLine(row)
seen.add(m.seqid)
raw_data.append(m)
# X-axis is the genome assembly
ctgs, ctg_sizes = zip(*sizes)
xsize = sum(ctg_sizes)
qb = list(np.cumsum(ctg_sizes))
qbreaks = list(zip(ctgs, [0] + qb, qb))
qstarts = dict(zip(ctgs, [0] + qb))
# Y-axis is the map
key = lambda x: x.lg
raw_data.sort(key=key)
ssizes = {}
for lg, d in groupby(raw_data, key=key):
ssizes[lg] = max([x.cm for x in d])
ssizes = natsorted(ssizes.items())
lgs, lg_sizes = zip(*ssizes)
ysize = sum(lg_sizes)
sb = list(np.cumsum(lg_sizes))
sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb))
sstarts = dict(zip(lgs, [0] + sb))
# Re-code all the scatter dots
data = [(qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, 'g') \
for x in raw_data if (x.seqid in qstarts)]
npairs = downsample(data)
x, y, c = zip(*data)
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
# Flip X-Y label
gy, gx = op.basename(csvfile).split(".")[:2]
gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30)
xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy,
xsize, ysize, qbreaks, sbreaks)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
title = "Alignment: {} vs {}".format(gx, gy)
title += " ({} markers)".format(thousands(npairs))
root.set_title(markup(title), x=.5, y=.96, color="k")
logging.debug(title)
normalize_axes(root)
image_name = opts.outfile or \
(csvfile.rsplit(".", 1)[0] + "." + iopts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
fig.clear() | [
"def",
"dotplot",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"allmaps",
"import",
"CSVMapLine",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"from",
"jcvi",
".",
"utils",
".",
"natsort",
"import",
"natsorted",
"from",
"jcvi",
".",
"graphics",
".",
"base",
"import",
"shorten",
"from",
"jcvi",
".",
"graphics",
".",
"dotplot",
"import",
"plt",
",",
"savefig",
",",
"markup",
",",
"normalize_axes",
",",
"downsample",
",",
"plot_breaks_and_labels",
",",
"thousands",
"p",
"=",
"OptionParser",
"(",
"dotplot",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
"outfile",
"=",
"None",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"figsize",
"=",
"\"8x8\"",
",",
"style",
"=",
"\"dark\"",
",",
"dpi",
"=",
"90",
",",
"cmap",
"=",
"\"copper\"",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"csvfile",
",",
"fastafile",
"=",
"args",
"sizes",
"=",
"natsorted",
"(",
"Sizes",
"(",
"fastafile",
")",
".",
"mapping",
".",
"items",
"(",
")",
")",
"seen",
"=",
"set",
"(",
")",
"raw_data",
"=",
"[",
"]",
"fig",
"=",
"plt",
".",
"figure",
"(",
"1",
",",
"(",
"iopts",
".",
"w",
",",
"iopts",
".",
"h",
")",
")",
"root",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0",
",",
"0",
",",
"1",
",",
"1",
"]",
")",
"# the whole canvas",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
".1",
",",
".1",
",",
".8",
",",
".8",
"]",
")",
"# the dot plot",
"fp",
"=",
"must_open",
"(",
"csvfile",
")",
"for",
"row",
"in",
"fp",
":",
"m",
"=",
"CSVMapLine",
"(",
"row",
")",
"seen",
".",
"add",
"(",
"m",
".",
"seqid",
")",
"raw_data",
".",
"append",
"(",
"m",
")",
"# X-axis is the genome assembly",
"ctgs",
",",
"ctg_sizes",
"=",
"zip",
"(",
"*",
"sizes",
")",
"xsize",
"=",
"sum",
"(",
"ctg_sizes",
")",
"qb",
"=",
"list",
"(",
"np",
".",
"cumsum",
"(",
"ctg_sizes",
")",
")",
"qbreaks",
"=",
"list",
"(",
"zip",
"(",
"ctgs",
",",
"[",
"0",
"]",
"+",
"qb",
",",
"qb",
")",
")",
"qstarts",
"=",
"dict",
"(",
"zip",
"(",
"ctgs",
",",
"[",
"0",
"]",
"+",
"qb",
")",
")",
"# Y-axis is the map",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"lg",
"raw_data",
".",
"sort",
"(",
"key",
"=",
"key",
")",
"ssizes",
"=",
"{",
"}",
"for",
"lg",
",",
"d",
"in",
"groupby",
"(",
"raw_data",
",",
"key",
"=",
"key",
")",
":",
"ssizes",
"[",
"lg",
"]",
"=",
"max",
"(",
"[",
"x",
".",
"cm",
"for",
"x",
"in",
"d",
"]",
")",
"ssizes",
"=",
"natsorted",
"(",
"ssizes",
".",
"items",
"(",
")",
")",
"lgs",
",",
"lg_sizes",
"=",
"zip",
"(",
"*",
"ssizes",
")",
"ysize",
"=",
"sum",
"(",
"lg_sizes",
")",
"sb",
"=",
"list",
"(",
"np",
".",
"cumsum",
"(",
"lg_sizes",
")",
")",
"sbreaks",
"=",
"list",
"(",
"zip",
"(",
"[",
"(",
"\"LG\"",
"+",
"x",
")",
"for",
"x",
"in",
"lgs",
"]",
",",
"[",
"0",
"]",
"+",
"sb",
",",
"sb",
")",
")",
"sstarts",
"=",
"dict",
"(",
"zip",
"(",
"lgs",
",",
"[",
"0",
"]",
"+",
"sb",
")",
")",
"# Re-code all the scatter dots",
"data",
"=",
"[",
"(",
"qstarts",
"[",
"x",
".",
"seqid",
"]",
"+",
"x",
".",
"pos",
",",
"sstarts",
"[",
"x",
".",
"lg",
"]",
"+",
"x",
".",
"cm",
",",
"'g'",
")",
"for",
"x",
"in",
"raw_data",
"if",
"(",
"x",
".",
"seqid",
"in",
"qstarts",
")",
"]",
"npairs",
"=",
"downsample",
"(",
"data",
")",
"x",
",",
"y",
",",
"c",
"=",
"zip",
"(",
"*",
"data",
")",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"c",
"=",
"c",
",",
"edgecolors",
"=",
"\"none\"",
",",
"s",
"=",
"2",
",",
"lw",
"=",
"0",
")",
"# Flip X-Y label",
"gy",
",",
"gx",
"=",
"op",
".",
"basename",
"(",
"csvfile",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"2",
"]",
"gx",
",",
"gy",
"=",
"shorten",
"(",
"gx",
",",
"maxchar",
"=",
"30",
")",
",",
"shorten",
"(",
"gy",
",",
"maxchar",
"=",
"30",
")",
"xlim",
",",
"ylim",
"=",
"plot_breaks_and_labels",
"(",
"fig",
",",
"root",
",",
"ax",
",",
"gx",
",",
"gy",
",",
"xsize",
",",
"ysize",
",",
"qbreaks",
",",
"sbreaks",
")",
"ax",
".",
"set_xlim",
"(",
"xlim",
")",
"ax",
".",
"set_ylim",
"(",
"ylim",
")",
"title",
"=",
"\"Alignment: {} vs {}\"",
".",
"format",
"(",
"gx",
",",
"gy",
")",
"title",
"+=",
"\" ({} markers)\"",
".",
"format",
"(",
"thousands",
"(",
"npairs",
")",
")",
"root",
".",
"set_title",
"(",
"markup",
"(",
"title",
")",
",",
"x",
"=",
".5",
",",
"y",
"=",
".96",
",",
"color",
"=",
"\"k\"",
")",
"logging",
".",
"debug",
"(",
"title",
")",
"normalize_axes",
"(",
"root",
")",
"image_name",
"=",
"opts",
".",
"outfile",
"or",
"(",
"csvfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".\"",
"+",
"iopts",
".",
"format",
")",
"savefig",
"(",
"image_name",
",",
"dpi",
"=",
"iopts",
".",
"dpi",
",",
"iopts",
"=",
"iopts",
")",
"fig",
".",
"clear",
"(",
")"
] | %prog dotplot map.csv ref.fasta
Make dotplot between chromosomes and linkage maps.
The input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2 | [
"%prog",
"dotplot",
"map",
".",
"csv",
"ref",
".",
"fasta"
] | python | train |
softlayer/softlayer-python | SoftLayer/managers/dedicated_host.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/dedicated_host.py#L313-L326 | def verify_order(self, hostname, domain, location, hourly, flavor, router=None):
"""Verifies an order for a dedicated host.
See :func:`place_order` for a list of available options.
"""
create_options = self._generate_create_dict(hostname=hostname,
router=router,
domain=domain,
flavor=flavor,
datacenter=location,
hourly=hourly)
return self.client['Product_Order'].verifyOrder(create_options) | [
"def",
"verify_order",
"(",
"self",
",",
"hostname",
",",
"domain",
",",
"location",
",",
"hourly",
",",
"flavor",
",",
"router",
"=",
"None",
")",
":",
"create_options",
"=",
"self",
".",
"_generate_create_dict",
"(",
"hostname",
"=",
"hostname",
",",
"router",
"=",
"router",
",",
"domain",
"=",
"domain",
",",
"flavor",
"=",
"flavor",
",",
"datacenter",
"=",
"location",
",",
"hourly",
"=",
"hourly",
")",
"return",
"self",
".",
"client",
"[",
"'Product_Order'",
"]",
".",
"verifyOrder",
"(",
"create_options",
")"
] | Verifies an order for a dedicated host.
See :func:`place_order` for a list of available options. | [
"Verifies",
"an",
"order",
"for",
"a",
"dedicated",
"host",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/build_py.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/build_py.py#L141-L171 | def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
f = open(init_py, 'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils.errors import DistutilsError
raise DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
f.close()
return init_py | [
"def",
"check_package",
"(",
"self",
",",
"package",
",",
"package_dir",
")",
":",
"try",
":",
"return",
"self",
".",
"packages_checked",
"[",
"package",
"]",
"except",
"KeyError",
":",
"pass",
"init_py",
"=",
"orig",
".",
"build_py",
".",
"check_package",
"(",
"self",
",",
"package",
",",
"package_dir",
")",
"self",
".",
"packages_checked",
"[",
"package",
"]",
"=",
"init_py",
"if",
"not",
"init_py",
"or",
"not",
"self",
".",
"distribution",
".",
"namespace_packages",
":",
"return",
"init_py",
"for",
"pkg",
"in",
"self",
".",
"distribution",
".",
"namespace_packages",
":",
"if",
"pkg",
"==",
"package",
"or",
"pkg",
".",
"startswith",
"(",
"package",
"+",
"'.'",
")",
":",
"break",
"else",
":",
"return",
"init_py",
"f",
"=",
"open",
"(",
"init_py",
",",
"'rbU'",
")",
"if",
"'declare_namespace'",
".",
"encode",
"(",
")",
"not",
"in",
"f",
".",
"read",
"(",
")",
":",
"from",
"distutils",
".",
"errors",
"import",
"DistutilsError",
"raise",
"DistutilsError",
"(",
"\"Namespace package problem: %s is a namespace package, but \"",
"\"its\\n__init__.py does not call declare_namespace()! Please \"",
"'fix it.\\n(See the setuptools manual under '",
"'\"Namespace Packages\" for details.)\\n\"'",
"%",
"(",
"package",
",",
")",
")",
"f",
".",
"close",
"(",
")",
"return",
"init_py"
] | Check namespace packages' __init__ for declare_namespace | [
"Check",
"namespace",
"packages",
"__init__",
"for",
"declare_namespace"
] | python | test |
secdev/scapy | scapy/layers/radius.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/radius.py#L566-L575 | def compute_message_authenticator(radius_packet, packed_req_authenticator,
shared_secret):
"""
Computes the "Message-Authenticator" of a given RADIUS packet.
"""
data = prepare_packed_data(radius_packet, packed_req_authenticator)
radius_hmac = hmac.new(shared_secret, data, hashlib.md5)
return radius_hmac.digest() | [
"def",
"compute_message_authenticator",
"(",
"radius_packet",
",",
"packed_req_authenticator",
",",
"shared_secret",
")",
":",
"data",
"=",
"prepare_packed_data",
"(",
"radius_packet",
",",
"packed_req_authenticator",
")",
"radius_hmac",
"=",
"hmac",
".",
"new",
"(",
"shared_secret",
",",
"data",
",",
"hashlib",
".",
"md5",
")",
"return",
"radius_hmac",
".",
"digest",
"(",
")"
] | Computes the "Message-Authenticator" of a given RADIUS packet. | [
"Computes",
"the",
"Message",
"-",
"Authenticator",
"of",
"a",
"given",
"RADIUS",
"packet",
"."
] | python | train |
crunchyroll/ef-open | efopen/ef_cf_diff.py | https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_cf_diff.py#L328-L363 | def get_dict_registry_services(registry, template_files, warn_missing_files=True):
"""
Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning.
"""
with open(registry) as fr:
parsed_registry = json.load(fr)
services = {}
for type, type_services in parsed_registry.iteritems():
for name, service in type_services.iteritems():
if name in services:
logger.warning("Template name appears twice, ignoring later items: `%s`", name)
continue
template_file = get_matching_service_template_file(name, template_files)
if not template_file:
if warn_missing_files:
logger.warning("No template file for `%s` (%s) `%s`", type, service['type'], name)
continue
services[name] = {
'type': type,
'template_file': template_file,
'environments': service['environments']
}
return services | [
"def",
"get_dict_registry_services",
"(",
"registry",
",",
"template_files",
",",
"warn_missing_files",
"=",
"True",
")",
":",
"with",
"open",
"(",
"registry",
")",
"as",
"fr",
":",
"parsed_registry",
"=",
"json",
".",
"load",
"(",
"fr",
")",
"services",
"=",
"{",
"}",
"for",
"type",
",",
"type_services",
"in",
"parsed_registry",
".",
"iteritems",
"(",
")",
":",
"for",
"name",
",",
"service",
"in",
"type_services",
".",
"iteritems",
"(",
")",
":",
"if",
"name",
"in",
"services",
":",
"logger",
".",
"warning",
"(",
"\"Template name appears twice, ignoring later items: `%s`\"",
",",
"name",
")",
"continue",
"template_file",
"=",
"get_matching_service_template_file",
"(",
"name",
",",
"template_files",
")",
"if",
"not",
"template_file",
":",
"if",
"warn_missing_files",
":",
"logger",
".",
"warning",
"(",
"\"No template file for `%s` (%s) `%s`\"",
",",
"type",
",",
"service",
"[",
"'type'",
"]",
",",
"name",
")",
"continue",
"services",
"[",
"name",
"]",
"=",
"{",
"'type'",
":",
"type",
",",
"'template_file'",
":",
"template_file",
",",
"'environments'",
":",
"service",
"[",
"'environments'",
"]",
"}",
"return",
"services"
] | Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning. | [
"Return",
"a",
"dict",
"mapping",
"service",
"name",
"to",
"a",
"dict",
"containing",
"the",
"service",
"s",
"type",
"(",
"fixtures",
"platform_services",
"application_services",
"internal_services",
")",
"the",
"template",
"file",
"s",
"absolute",
"path",
"and",
"a",
"list",
"of",
"environments",
"to",
"which",
"the",
"service",
"is",
"intended",
"to",
"deploy",
"."
] | python | train |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/__init__.py | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L2361-L2387 | def write_all_series_channel_values(self, read_f, write_f, channel,
values):
'''
Return all values for the specified channel of the type corresponding
to the function `f`, where `f` is either `self.series_resistance` or
`self.series_capacitance`.
'''
# Create a copy of the new values we intend to write. Otherwise, if
# `values` is a reference to the calibration object owned by the
# control board, it can be overwritten in the following step which will
# prevent the update.
#
# See http://microfluidics.utoronto.ca/trac/dropbot/ticket/81
values = copy.deepcopy(values)
# Read the current values, and only update the values that are
# different.
original_values = self.read_all_series_channel_values(read_f, channel)
# Make sure that the number of supplied values matches the number of
# corresponding values read from the channel.
assert(len(values) == len(original_values))
for i in range(len(original_values)):
if values[i] != original_values[i]:
write_f(channel, values[i], i) | [
"def",
"write_all_series_channel_values",
"(",
"self",
",",
"read_f",
",",
"write_f",
",",
"channel",
",",
"values",
")",
":",
"# Create a copy of the new values we intend to write. Otherwise, if",
"# `values` is a reference to the calibration object owned by the",
"# control board, it can be overwritten in the following step which will",
"# prevent the update.",
"#",
"# See http://microfluidics.utoronto.ca/trac/dropbot/ticket/81",
"values",
"=",
"copy",
".",
"deepcopy",
"(",
"values",
")",
"# Read the current values, and only update the values that are",
"# different.",
"original_values",
"=",
"self",
".",
"read_all_series_channel_values",
"(",
"read_f",
",",
"channel",
")",
"# Make sure that the number of supplied values matches the number of",
"# corresponding values read from the channel.",
"assert",
"(",
"len",
"(",
"values",
")",
"==",
"len",
"(",
"original_values",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"original_values",
")",
")",
":",
"if",
"values",
"[",
"i",
"]",
"!=",
"original_values",
"[",
"i",
"]",
":",
"write_f",
"(",
"channel",
",",
"values",
"[",
"i",
"]",
",",
"i",
")"
] | Return all values for the specified channel of the type corresponding
to the function `f`, where `f` is either `self.series_resistance` or
`self.series_capacitance`. | [
"Return",
"all",
"values",
"for",
"the",
"specified",
"channel",
"of",
"the",
"type",
"corresponding",
"to",
"the",
"function",
"f",
"where",
"f",
"is",
"either",
"self",
".",
"series_resistance",
"or",
"self",
".",
"series_capacitance",
"."
] | python | train |
log2timeline/dfdatetime | dfdatetime/interface.py | https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/interface.py#L630-L642 | def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch):
"""Determines date values.
Args:
number_of_days (int): number of days since epoch.
date_time_epoch (DateTimeEpoch): date and time of the epoch.
Returns:
tuple[int, int, int]: year, month, day of month.
"""
return self._GetDateValues(
number_of_days, date_time_epoch.year, date_time_epoch.month,
date_time_epoch.day_of_month) | [
"def",
"_GetDateValuesWithEpoch",
"(",
"self",
",",
"number_of_days",
",",
"date_time_epoch",
")",
":",
"return",
"self",
".",
"_GetDateValues",
"(",
"number_of_days",
",",
"date_time_epoch",
".",
"year",
",",
"date_time_epoch",
".",
"month",
",",
"date_time_epoch",
".",
"day_of_month",
")"
] | Determines date values.
Args:
number_of_days (int): number of days since epoch.
date_time_epoch (DateTimeEpoch): date and time of the epoch.
Returns:
tuple[int, int, int]: year, month, day of month. | [
"Determines",
"date",
"values",
"."
] | python | train |
tradenity/python-sdk | tradenity/resources/states_geo_zone.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/states_geo_zone.py#L426-L446 | def delete_states_geo_zone_by_id(cls, states_geo_zone_id, **kwargs):
"""Delete StatesGeoZone
Delete an instance of StatesGeoZone by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_states_geo_zone_by_id(states_geo_zone_id, async=True)
>>> result = thread.get()
:param async bool
:param str states_geo_zone_id: ID of statesGeoZone to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs)
else:
(data) = cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs)
return data | [
"def",
"delete_states_geo_zone_by_id",
"(",
"cls",
",",
"states_geo_zone_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_delete_states_geo_zone_by_id_with_http_info",
"(",
"states_geo_zone_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_delete_states_geo_zone_by_id_with_http_info",
"(",
"states_geo_zone_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Delete StatesGeoZone
Delete an instance of StatesGeoZone by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_states_geo_zone_by_id(states_geo_zone_id, async=True)
>>> result = thread.get()
:param async bool
:param str states_geo_zone_id: ID of statesGeoZone to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | [
"Delete",
"StatesGeoZone"
] | python | train |
fermiPy/fermipy | fermipy/wcs_utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/wcs_utils.py#L123-L182 | def create_wcs(skydir, coordsys='CEL', projection='AIT',
cdelt=1.0, crpix=1., naxis=2, energies=None):
"""Create a WCS object.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate of the WCS reference point.
coordsys : str
projection : str
cdelt : float or (float,float)
In the first case the same value is used for x and y axes
crpix : float or (float,float)
In the first case the same value is used for x and y axes
naxis : {2, 3}
Number of dimensions of the projection.
energies : array-like
Array of energies that defines the third dimension if naxis=3.
"""
w = WCS(naxis=naxis)
if coordsys == 'CEL':
w.wcs.ctype[0] = 'RA---%s' % (projection)
w.wcs.ctype[1] = 'DEC--%s' % (projection)
w.wcs.crval[0] = skydir.icrs.ra.deg
w.wcs.crval[1] = skydir.icrs.dec.deg
elif coordsys == 'GAL':
w.wcs.ctype[0] = 'GLON-%s' % (projection)
w.wcs.ctype[1] = 'GLAT-%s' % (projection)
w.wcs.crval[0] = skydir.galactic.l.deg
w.wcs.crval[1] = skydir.galactic.b.deg
else:
raise Exception('Unrecognized coordinate system.')
try:
w.wcs.crpix[0] = crpix[0]
w.wcs.crpix[1] = crpix[1]
except:
w.wcs.crpix[0] = crpix
w.wcs.crpix[1] = crpix
try:
w.wcs.cdelt[0] = cdelt[0]
w.wcs.cdelt[1] = cdelt[1]
except:
w.wcs.cdelt[0] = -cdelt
w.wcs.cdelt[1] = cdelt
w = WCS(w.to_header())
if naxis == 3 and energies is not None:
w.wcs.crpix[2] = 1
w.wcs.crval[2] = energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
w.wcs.cunit[2] = 'MeV'
return w | [
"def",
"create_wcs",
"(",
"skydir",
",",
"coordsys",
"=",
"'CEL'",
",",
"projection",
"=",
"'AIT'",
",",
"cdelt",
"=",
"1.0",
",",
"crpix",
"=",
"1.",
",",
"naxis",
"=",
"2",
",",
"energies",
"=",
"None",
")",
":",
"w",
"=",
"WCS",
"(",
"naxis",
"=",
"naxis",
")",
"if",
"coordsys",
"==",
"'CEL'",
":",
"w",
".",
"wcs",
".",
"ctype",
"[",
"0",
"]",
"=",
"'RA---%s'",
"%",
"(",
"projection",
")",
"w",
".",
"wcs",
".",
"ctype",
"[",
"1",
"]",
"=",
"'DEC--%s'",
"%",
"(",
"projection",
")",
"w",
".",
"wcs",
".",
"crval",
"[",
"0",
"]",
"=",
"skydir",
".",
"icrs",
".",
"ra",
".",
"deg",
"w",
".",
"wcs",
".",
"crval",
"[",
"1",
"]",
"=",
"skydir",
".",
"icrs",
".",
"dec",
".",
"deg",
"elif",
"coordsys",
"==",
"'GAL'",
":",
"w",
".",
"wcs",
".",
"ctype",
"[",
"0",
"]",
"=",
"'GLON-%s'",
"%",
"(",
"projection",
")",
"w",
".",
"wcs",
".",
"ctype",
"[",
"1",
"]",
"=",
"'GLAT-%s'",
"%",
"(",
"projection",
")",
"w",
".",
"wcs",
".",
"crval",
"[",
"0",
"]",
"=",
"skydir",
".",
"galactic",
".",
"l",
".",
"deg",
"w",
".",
"wcs",
".",
"crval",
"[",
"1",
"]",
"=",
"skydir",
".",
"galactic",
".",
"b",
".",
"deg",
"else",
":",
"raise",
"Exception",
"(",
"'Unrecognized coordinate system.'",
")",
"try",
":",
"w",
".",
"wcs",
".",
"crpix",
"[",
"0",
"]",
"=",
"crpix",
"[",
"0",
"]",
"w",
".",
"wcs",
".",
"crpix",
"[",
"1",
"]",
"=",
"crpix",
"[",
"1",
"]",
"except",
":",
"w",
".",
"wcs",
".",
"crpix",
"[",
"0",
"]",
"=",
"crpix",
"w",
".",
"wcs",
".",
"crpix",
"[",
"1",
"]",
"=",
"crpix",
"try",
":",
"w",
".",
"wcs",
".",
"cdelt",
"[",
"0",
"]",
"=",
"cdelt",
"[",
"0",
"]",
"w",
".",
"wcs",
".",
"cdelt",
"[",
"1",
"]",
"=",
"cdelt",
"[",
"1",
"]",
"except",
":",
"w",
".",
"wcs",
".",
"cdelt",
"[",
"0",
"]",
"=",
"-",
"cdelt",
"w",
".",
"wcs",
".",
"cdelt",
"[",
"1",
"]",
"=",
"cdelt",
"w",
"=",
"WCS",
"(",
"w",
".",
"to_header",
"(",
")",
")",
"if",
"naxis",
"==",
"3",
"and",
"energies",
"is",
"not",
"None",
":",
"w",
".",
"wcs",
".",
"crpix",
"[",
"2",
"]",
"=",
"1",
"w",
".",
"wcs",
".",
"crval",
"[",
"2",
"]",
"=",
"energies",
"[",
"0",
"]",
"w",
".",
"wcs",
".",
"cdelt",
"[",
"2",
"]",
"=",
"energies",
"[",
"1",
"]",
"-",
"energies",
"[",
"0",
"]",
"w",
".",
"wcs",
".",
"ctype",
"[",
"2",
"]",
"=",
"'Energy'",
"w",
".",
"wcs",
".",
"cunit",
"[",
"2",
"]",
"=",
"'MeV'",
"return",
"w"
] | Create a WCS object.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinate of the WCS reference point.
coordsys : str
projection : str
cdelt : float or (float,float)
In the first case the same value is used for x and y axes
crpix : float or (float,float)
In the first case the same value is used for x and y axes
naxis : {2, 3}
Number of dimensions of the projection.
energies : array-like
Array of energies that defines the third dimension if naxis=3. | [
"Create",
"a",
"WCS",
"object",
"."
] | python | train |
itamarst/eliot | eliot/_output.py | https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/_output.py#L472-L480 | def to_file(output_file, encoder=EliotJSONEncoder):
"""
Add a destination that writes a JSON message per line to the given file.
@param output_file: A file-like object.
"""
Logger._destinations.add(
FileDestination(file=output_file, encoder=encoder)
) | [
"def",
"to_file",
"(",
"output_file",
",",
"encoder",
"=",
"EliotJSONEncoder",
")",
":",
"Logger",
".",
"_destinations",
".",
"add",
"(",
"FileDestination",
"(",
"file",
"=",
"output_file",
",",
"encoder",
"=",
"encoder",
")",
")"
] | Add a destination that writes a JSON message per line to the given file.
@param output_file: A file-like object. | [
"Add",
"a",
"destination",
"that",
"writes",
"a",
"JSON",
"message",
"per",
"line",
"to",
"the",
"given",
"file",
"."
] | python | train |
dereneaton/ipyrad | ipyrad/assemble/rawedit.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/rawedit.py#L648-L686 | def concat_multiple_inputs(data, sample):
"""
If multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding.
"""
## if more than one tuple in fastq list
if len(sample.files.fastqs) > 1:
## create a cat command to append them all (doesn't matter if they
## are gzipped, cat still works). Grab index 0 of tuples for R1s.
cmd1 = ["cat"] + [i[0] for i in sample.files.fastqs]
isgzip = ".gz"
if not sample.files.fastqs[0][0].endswith(".gz"):
isgzip = ""
## write to new concat handle
conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concat.fq{}".format(isgzip))
with open(conc1, 'w') as cout1:
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True)
res1 = proc1.communicate()[0]
if proc1.returncode:
raise IPyradWarningExit("error in: {}, {}".format(cmd1, res1))
## Only set conc2 if R2 actually exists
conc2 = 0
if "pair" in data.paramsdict["datatype"]:
cmd2 = ["cat"] + [i[1] for i in sample.files.fastqs]
conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concat.fq{}".format(isgzip))
with open(conc2, 'w') as cout2:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True)
res2 = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("Error concatenating fastq files. Make sure all "\
+ "these files exist: {}\nError message: {}".format(cmd2, proc2.returncode))
## store new file handles
sample.files.concat = [(conc1, conc2)]
return sample.files.concat | [
"def",
"concat_multiple_inputs",
"(",
"data",
",",
"sample",
")",
":",
"## if more than one tuple in fastq list",
"if",
"len",
"(",
"sample",
".",
"files",
".",
"fastqs",
")",
">",
"1",
":",
"## create a cat command to append them all (doesn't matter if they ",
"## are gzipped, cat still works). Grab index 0 of tuples for R1s.",
"cmd1",
"=",
"[",
"\"cat\"",
"]",
"+",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"sample",
".",
"files",
".",
"fastqs",
"]",
"isgzip",
"=",
"\".gz\"",
"if",
"not",
"sample",
".",
"files",
".",
"fastqs",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"isgzip",
"=",
"\"\"",
"## write to new concat handle",
"conc1",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\"_R1_concat.fq{}\"",
".",
"format",
"(",
"isgzip",
")",
")",
"with",
"open",
"(",
"conc1",
",",
"'w'",
")",
"as",
"cout1",
":",
"proc1",
"=",
"sps",
".",
"Popen",
"(",
"cmd1",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"cout1",
",",
"close_fds",
"=",
"True",
")",
"res1",
"=",
"proc1",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"proc1",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\"error in: {}, {}\"",
".",
"format",
"(",
"cmd1",
",",
"res1",
")",
")",
"## Only set conc2 if R2 actually exists",
"conc2",
"=",
"0",
"if",
"\"pair\"",
"in",
"data",
".",
"paramsdict",
"[",
"\"datatype\"",
"]",
":",
"cmd2",
"=",
"[",
"\"cat\"",
"]",
"+",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"sample",
".",
"files",
".",
"fastqs",
"]",
"conc2",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data",
".",
"dirs",
".",
"edits",
",",
"sample",
".",
"name",
"+",
"\"_R2_concat.fq{}\"",
".",
"format",
"(",
"isgzip",
")",
")",
"with",
"open",
"(",
"conc2",
",",
"'w'",
")",
"as",
"cout2",
":",
"proc2",
"=",
"sps",
".",
"Popen",
"(",
"cmd2",
",",
"stderr",
"=",
"sps",
".",
"STDOUT",
",",
"stdout",
"=",
"cout2",
",",
"close_fds",
"=",
"True",
")",
"res2",
"=",
"proc2",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"proc2",
".",
"returncode",
":",
"raise",
"IPyradWarningExit",
"(",
"\"Error concatenating fastq files. Make sure all \"",
"+",
"\"these files exist: {}\\nError message: {}\"",
".",
"format",
"(",
"cmd2",
",",
"proc2",
".",
"returncode",
")",
")",
"## store new file handles",
"sample",
".",
"files",
".",
"concat",
"=",
"[",
"(",
"conc1",
",",
"conc2",
")",
"]",
"return",
"sample",
".",
"files",
".",
"concat"
] | If multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding. | [
"If",
"multiple",
"fastq",
"files",
"were",
"appended",
"into",
"the",
"list",
"of",
"fastqs",
"for",
"samples",
"then",
"we",
"merge",
"them",
"here",
"before",
"proceeding",
"."
] | python | valid |
twilio/twilio-python | twilio/rest/preview/sync/service/document/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/sync/service/document/__init__.py#L296-L309 | def document_permissions(self):
"""
Access the document_permissions
:returns: twilio.rest.preview.sync.service.document.document_permission.DocumentPermissionList
:rtype: twilio.rest.preview.sync.service.document.document_permission.DocumentPermissionList
"""
if self._document_permissions is None:
self._document_permissions = DocumentPermissionList(
self._version,
service_sid=self._solution['service_sid'],
document_sid=self._solution['sid'],
)
return self._document_permissions | [
"def",
"document_permissions",
"(",
"self",
")",
":",
"if",
"self",
".",
"_document_permissions",
"is",
"None",
":",
"self",
".",
"_document_permissions",
"=",
"DocumentPermissionList",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"document_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_document_permissions"
] | Access the document_permissions
:returns: twilio.rest.preview.sync.service.document.document_permission.DocumentPermissionList
:rtype: twilio.rest.preview.sync.service.document.document_permission.DocumentPermissionList | [
"Access",
"the",
"document_permissions"
] | python | train |
delph-in/pydelphin | delphin/repp.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/repp.py#L282-L340 | def from_config(cls, path, directory=None):
"""
Instantiate a REPP from a PET-style `.set` configuration file.
The *path* parameter points to the configuration file.
Submodules are loaded from *directory*. If *directory* is not
given, it is the directory part of *path*.
Args:
path (str): the path to the REPP configuration file
directory (str, optional): the directory in which to search
for submodules
"""
if not exists(path):
raise REPPError('REPP config file not found: {}'.format(path))
confdir = dirname(path)
# TODO: can TDL parsing be repurposed for this variant?
conf = io.open(path, encoding='utf-8').read()
conf = re.sub(r';.*', '', conf).replace('\n',' ')
m = re.search(
r'repp-modules\s*:=\s*((?:[-\w]+\s+)*[-\w]+)\s*\.', conf)
t = re.search(
r'repp-tokenizer\s*:=\s*([-\w]+)\s*\.', conf)
a = re.search(
r'repp-calls\s*:=\s*((?:[-\w]+\s+)*[-\w]+)\s*\.', conf)
f = re.search(
r'format\s*:=\s*(\w+)\s*\.', conf)
d = re.search(
r'repp-directory\s*:=\s*(.*)\.\s*$', conf)
if m is None:
raise REPPError('repp-modules option must be set')
if t is None:
raise REPPError('repp-tokenizer option must be set')
mods = m.group(1).split()
tok = t.group(1).strip()
active = a.group(1).split() if a is not None else None
fmt = f.group(1).strip() if f is not None else None
if directory is None:
if d is not None:
directory = d.group(1).strip(' "')
elif exists(joinpath(confdir, tok + '.rpp')):
directory = confdir
elif exists(joinpath(confdir, 'rpp', tok + '.rpp')):
directory = joinpath(confdir, 'rpp')
elif exists(joinpath(confdir, '../rpp', tok + '.rpp')):
directory = joinpath(confdir, '../rpp')
else:
raise REPPError('Could not find a suitable REPP directory.')
# ignore repp-modules and format?
return REPP.from_file(
joinpath(directory, tok + '.rpp'),
directory=directory,
active=active
) | [
"def",
"from_config",
"(",
"cls",
",",
"path",
",",
"directory",
"=",
"None",
")",
":",
"if",
"not",
"exists",
"(",
"path",
")",
":",
"raise",
"REPPError",
"(",
"'REPP config file not found: {}'",
".",
"format",
"(",
"path",
")",
")",
"confdir",
"=",
"dirname",
"(",
"path",
")",
"# TODO: can TDL parsing be repurposed for this variant?",
"conf",
"=",
"io",
".",
"open",
"(",
"path",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"read",
"(",
")",
"conf",
"=",
"re",
".",
"sub",
"(",
"r';.*'",
",",
"''",
",",
"conf",
")",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'repp-modules\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.'",
",",
"conf",
")",
"t",
"=",
"re",
".",
"search",
"(",
"r'repp-tokenizer\\s*:=\\s*([-\\w]+)\\s*\\.'",
",",
"conf",
")",
"a",
"=",
"re",
".",
"search",
"(",
"r'repp-calls\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.'",
",",
"conf",
")",
"f",
"=",
"re",
".",
"search",
"(",
"r'format\\s*:=\\s*(\\w+)\\s*\\.'",
",",
"conf",
")",
"d",
"=",
"re",
".",
"search",
"(",
"r'repp-directory\\s*:=\\s*(.*)\\.\\s*$'",
",",
"conf",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"REPPError",
"(",
"'repp-modules option must be set'",
")",
"if",
"t",
"is",
"None",
":",
"raise",
"REPPError",
"(",
"'repp-tokenizer option must be set'",
")",
"mods",
"=",
"m",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")",
"tok",
"=",
"t",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"active",
"=",
"a",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")",
"if",
"a",
"is",
"not",
"None",
"else",
"None",
"fmt",
"=",
"f",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"if",
"f",
"is",
"not",
"None",
"else",
"None",
"if",
"directory",
"is",
"None",
":",
"if",
"d",
"is",
"not",
"None",
":",
"directory",
"=",
"d",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
"' \"'",
")",
"elif",
"exists",
"(",
"joinpath",
"(",
"confdir",
",",
"tok",
"+",
"'.rpp'",
")",
")",
":",
"directory",
"=",
"confdir",
"elif",
"exists",
"(",
"joinpath",
"(",
"confdir",
",",
"'rpp'",
",",
"tok",
"+",
"'.rpp'",
")",
")",
":",
"directory",
"=",
"joinpath",
"(",
"confdir",
",",
"'rpp'",
")",
"elif",
"exists",
"(",
"joinpath",
"(",
"confdir",
",",
"'../rpp'",
",",
"tok",
"+",
"'.rpp'",
")",
")",
":",
"directory",
"=",
"joinpath",
"(",
"confdir",
",",
"'../rpp'",
")",
"else",
":",
"raise",
"REPPError",
"(",
"'Could not find a suitable REPP directory.'",
")",
"# ignore repp-modules and format?",
"return",
"REPP",
".",
"from_file",
"(",
"joinpath",
"(",
"directory",
",",
"tok",
"+",
"'.rpp'",
")",
",",
"directory",
"=",
"directory",
",",
"active",
"=",
"active",
")"
] | Instantiate a REPP from a PET-style `.set` configuration file.
The *path* parameter points to the configuration file.
Submodules are loaded from *directory*. If *directory* is not
given, it is the directory part of *path*.
Args:
path (str): the path to the REPP configuration file
directory (str, optional): the directory in which to search
for submodules | [
"Instantiate",
"a",
"REPP",
"from",
"a",
"PET",
"-",
"style",
".",
"set",
"configuration",
"file",
"."
] | python | train |
c0ntrol-x/p4rr0t007 | p4rr0t007/lib/core.py | https://github.com/c0ntrol-x/p4rr0t007/blob/6fe88ec1231a778b9f1d13bc61332581715d646e/p4rr0t007/lib/core.py#L27-L37 | def colorize_logger(logger, stream=None, level=logging.DEBUG):
"""resets the handlers and formatters in a given logger, and sets it up with Colorful() logs
"""
logger.handlers = []
logger.filters = []
stream = stream or sys.stderr
handler = logging.StreamHandler(stream=stream)
handler.setLevel(level)
handler.setFormatter(Colorful())
logger.addHandler(handler)
return logger | [
"def",
"colorize_logger",
"(",
"logger",
",",
"stream",
"=",
"None",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"logger",
".",
"handlers",
"=",
"[",
"]",
"logger",
".",
"filters",
"=",
"[",
"]",
"stream",
"=",
"stream",
"or",
"sys",
".",
"stderr",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"stream",
"=",
"stream",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"handler",
".",
"setFormatter",
"(",
"Colorful",
"(",
")",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] | resets the handlers and formatters in a given logger, and sets it up with Colorful() logs | [
"resets",
"the",
"handlers",
"and",
"formatters",
"in",
"a",
"given",
"logger",
"and",
"sets",
"it",
"up",
"with",
"Colorful",
"()",
"logs"
] | python | train |
chaoss/grimoirelab-perceval | perceval/backends/core/redmine.py | https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/redmine.py#L411-L424 | def sanitize_for_archive(url, headers, payload):
"""Sanitize payload of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns url, headers and the sanitized payload
"""
if RedmineClient.PKEY in payload:
payload.pop(RedmineClient.PKEY)
return url, headers, payload | [
"def",
"sanitize_for_archive",
"(",
"url",
",",
"headers",
",",
"payload",
")",
":",
"if",
"RedmineClient",
".",
"PKEY",
"in",
"payload",
":",
"payload",
".",
"pop",
"(",
"RedmineClient",
".",
"PKEY",
")",
"return",
"url",
",",
"headers",
",",
"payload"
] | Sanitize payload of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns url, headers and the sanitized payload | [
"Sanitize",
"payload",
"of",
"a",
"HTTP",
"request",
"by",
"removing",
"the",
"token",
"information",
"before",
"storing",
"/",
"retrieving",
"archived",
"items"
] | python | test |
coinbase/coinbase-python | coinbase/wallet/client.py | https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L273-L276 | def get_accounts(self, **params):
"""https://developers.coinbase.com/api/v2#list-accounts"""
response = self._get('v2', 'accounts', params=params)
return self._make_api_object(response, Account) | [
"def",
"get_accounts",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"response",
"=",
"self",
".",
"_get",
"(",
"'v2'",
",",
"'accounts'",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_make_api_object",
"(",
"response",
",",
"Account",
")"
] | https://developers.coinbase.com/api/v2#list-accounts | [
"https",
":",
"//",
"developers",
".",
"coinbase",
".",
"com",
"/",
"api",
"/",
"v2#list",
"-",
"accounts"
] | python | train |
mobolic/facebook-sdk | facebook/__init__.py | https://github.com/mobolic/facebook-sdk/blob/65ff582e77f7ed68b6e9643a7490e5dee2a1031b/facebook/__init__.py#L126-L131 | def get_permissions(self, user_id):
"""Fetches the permissions object from the graph."""
response = self.request(
"{0}/{1}/permissions".format(self.version, user_id), {}
)["data"]
return {x["permission"] for x in response if x["status"] == "granted"} | [
"def",
"get_permissions",
"(",
"self",
",",
"user_id",
")",
":",
"response",
"=",
"self",
".",
"request",
"(",
"\"{0}/{1}/permissions\"",
".",
"format",
"(",
"self",
".",
"version",
",",
"user_id",
")",
",",
"{",
"}",
")",
"[",
"\"data\"",
"]",
"return",
"{",
"x",
"[",
"\"permission\"",
"]",
"for",
"x",
"in",
"response",
"if",
"x",
"[",
"\"status\"",
"]",
"==",
"\"granted\"",
"}"
] | Fetches the permissions object from the graph. | [
"Fetches",
"the",
"permissions",
"object",
"from",
"the",
"graph",
"."
] | python | train |
AtomHash/evernode | evernode/classes/form_data.py | https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/form_data.py#L90-L103 | def __get_file(self, file):
""" Get request file and do a security check """
file_object = None
if file['name'] in request.files:
file_object = request.files[file['name']]
clean_filename = secure_filename(file_object.filename)
if clean_filename == '':
return file_object
if file_object and self.__allowed_extension(
clean_filename, file['extensions']):
return file_object
elif file['name'] not in request.files and file['required']:
return file_object
return file_object | [
"def",
"__get_file",
"(",
"self",
",",
"file",
")",
":",
"file_object",
"=",
"None",
"if",
"file",
"[",
"'name'",
"]",
"in",
"request",
".",
"files",
":",
"file_object",
"=",
"request",
".",
"files",
"[",
"file",
"[",
"'name'",
"]",
"]",
"clean_filename",
"=",
"secure_filename",
"(",
"file_object",
".",
"filename",
")",
"if",
"clean_filename",
"==",
"''",
":",
"return",
"file_object",
"if",
"file_object",
"and",
"self",
".",
"__allowed_extension",
"(",
"clean_filename",
",",
"file",
"[",
"'extensions'",
"]",
")",
":",
"return",
"file_object",
"elif",
"file",
"[",
"'name'",
"]",
"not",
"in",
"request",
".",
"files",
"and",
"file",
"[",
"'required'",
"]",
":",
"return",
"file_object",
"return",
"file_object"
] | Get request file and do a security check | [
"Get",
"request",
"file",
"and",
"do",
"a",
"security",
"check"
] | python | train |
codeforamerica/epa_python | epa/pcs/pcs.py | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L46-L56 | def code_description(self, column=None, value=None, **kwargs):
"""
The Permit Compliance System (PCS) records milestones, events, and many
other parameters in code format. To provide text descriptions that
explain the code meanings, the PCS_CODE_DESC provide s complete
information on all types of codes, and for each type, the text
description of each possible code value.
>>> PCS().code_description('code', 110)
"""
return self._resolve_call('PCS_CODE_DESC', column, value, **kwargs) | [
"def",
"code_description",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_CODE_DESC'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | The Permit Compliance System (PCS) records milestones, events, and many
other parameters in code format. To provide text descriptions that
explain the code meanings, the PCS_CODE_DESC provide s complete
information on all types of codes, and for each type, the text
description of each possible code value.
>>> PCS().code_description('code', 110) | [
"The",
"Permit",
"Compliance",
"System",
"(",
"PCS",
")",
"records",
"milestones",
"events",
"and",
"many",
"other",
"parameters",
"in",
"code",
"format",
".",
"To",
"provide",
"text",
"descriptions",
"that",
"explain",
"the",
"code",
"meanings",
"the",
"PCS_CODE_DESC",
"provide",
"s",
"complete",
"information",
"on",
"all",
"types",
"of",
"codes",
"and",
"for",
"each",
"type",
"the",
"text",
"description",
"of",
"each",
"possible",
"code",
"value",
"."
] | python | train |
softlayer/softlayer-python | SoftLayer/CLI/environment.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/environment.py#L41-L43 | def out(self, output, newline=True):
"""Outputs a string to the console (stdout)."""
click.echo(output, nl=newline) | [
"def",
"out",
"(",
"self",
",",
"output",
",",
"newline",
"=",
"True",
")",
":",
"click",
".",
"echo",
"(",
"output",
",",
"nl",
"=",
"newline",
")"
] | Outputs a string to the console (stdout). | [
"Outputs",
"a",
"string",
"to",
"the",
"console",
"(",
"stdout",
")",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/data_generators/cnn_dailymail.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/cnn_dailymail.py#L137-L173 | def example_generator(all_files, urls_path, sum_token):
"""Generate examples."""
def fix_run_on_sents(line):
if u"@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + u"."
filelist = example_splits(urls_path, all_files)
story_summary_split_token = u" <summary> " if sum_token else " "
for story_file in filelist:
story = []
summary = []
reading_highlights = False
for line in tf.gfile.Open(story_file, "rb"):
line = text_encoder.to_unicode_utf8(line.strip())
line = fix_run_on_sents(line)
if not line:
continue
elif line.startswith(u"@highlight"):
if not story:
break # No article text.
reading_highlights = True
elif reading_highlights:
summary.append(line)
else:
story.append(line)
if (not story) or not summary:
continue
yield " ".join(story) + story_summary_split_token + " ".join(summary) | [
"def",
"example_generator",
"(",
"all_files",
",",
"urls_path",
",",
"sum_token",
")",
":",
"def",
"fix_run_on_sents",
"(",
"line",
")",
":",
"if",
"u\"@highlight\"",
"in",
"line",
":",
"return",
"line",
"if",
"not",
"line",
":",
"return",
"line",
"if",
"line",
"[",
"-",
"1",
"]",
"in",
"END_TOKENS",
":",
"return",
"line",
"return",
"line",
"+",
"u\".\"",
"filelist",
"=",
"example_splits",
"(",
"urls_path",
",",
"all_files",
")",
"story_summary_split_token",
"=",
"u\" <summary> \"",
"if",
"sum_token",
"else",
"\" \"",
"for",
"story_file",
"in",
"filelist",
":",
"story",
"=",
"[",
"]",
"summary",
"=",
"[",
"]",
"reading_highlights",
"=",
"False",
"for",
"line",
"in",
"tf",
".",
"gfile",
".",
"Open",
"(",
"story_file",
",",
"\"rb\"",
")",
":",
"line",
"=",
"text_encoder",
".",
"to_unicode_utf8",
"(",
"line",
".",
"strip",
"(",
")",
")",
"line",
"=",
"fix_run_on_sents",
"(",
"line",
")",
"if",
"not",
"line",
":",
"continue",
"elif",
"line",
".",
"startswith",
"(",
"u\"@highlight\"",
")",
":",
"if",
"not",
"story",
":",
"break",
"# No article text.",
"reading_highlights",
"=",
"True",
"elif",
"reading_highlights",
":",
"summary",
".",
"append",
"(",
"line",
")",
"else",
":",
"story",
".",
"append",
"(",
"line",
")",
"if",
"(",
"not",
"story",
")",
"or",
"not",
"summary",
":",
"continue",
"yield",
"\" \"",
".",
"join",
"(",
"story",
")",
"+",
"story_summary_split_token",
"+",
"\" \"",
".",
"join",
"(",
"summary",
")"
] | Generate examples. | [
"Generate",
"examples",
"."
] | python | train |
richardkiss/pycoin | pycoin/services/blockexplorer.py | https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/services/blockexplorer.py#L15-L24 | def tx_for_tx_hash(self, tx_hash):
"""
Get a Tx by its hash.
"""
url = "%s/rawtx/%s" % (self.url, b2h_rev(tx_hash))
d = urlopen(url).read()
j = json.loads(d.decode("utf8"))
tx = Tx.from_hex(j.get("rawtx", ""))
if tx.hash() == tx_hash:
return tx | [
"def",
"tx_for_tx_hash",
"(",
"self",
",",
"tx_hash",
")",
":",
"url",
"=",
"\"%s/rawtx/%s\"",
"%",
"(",
"self",
".",
"url",
",",
"b2h_rev",
"(",
"tx_hash",
")",
")",
"d",
"=",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
"j",
"=",
"json",
".",
"loads",
"(",
"d",
".",
"decode",
"(",
"\"utf8\"",
")",
")",
"tx",
"=",
"Tx",
".",
"from_hex",
"(",
"j",
".",
"get",
"(",
"\"rawtx\"",
",",
"\"\"",
")",
")",
"if",
"tx",
".",
"hash",
"(",
")",
"==",
"tx_hash",
":",
"return",
"tx"
] | Get a Tx by its hash. | [
"Get",
"a",
"Tx",
"by",
"its",
"hash",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py#L12-L22 | def hide_arp_holder_system_max_arp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
system_max = ET.SubElement(hide_arp_holder, "system-max")
arp = ET.SubElement(system_max, "arp")
arp.text = kwargs.pop('arp')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"hide_arp_holder_system_max_arp",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"hide_arp_holder",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"hide-arp-holder\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-arp\"",
")",
"system_max",
"=",
"ET",
".",
"SubElement",
"(",
"hide_arp_holder",
",",
"\"system-max\"",
")",
"arp",
"=",
"ET",
".",
"SubElement",
"(",
"system_max",
",",
"\"arp\"",
")",
"arp",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'arp'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
nimeshkverma/mongo_joins | mongojoin/processdata.py | https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/processdata.py#L40-L71 | def build_pipeline(self, collection):
"""
Creates aggregation pipeline for aggregation
:param collection: Mongo collection for aggregation
:type collection: MongoCollection
:return pipeline: list of dicts
"""
pipeline = []
if isinstance(collection.where_dict, dict) and collection.where_dict:
match_dict = {
"$match": collection.where_dict
}
pipeline.append(match_dict)
group_keys_dict = self.build_mongo_doc(self.join_keys)
push_dict = self.build_mongo_doc(collection.select_keys)
group_by_dict = {
"$group":
{
"_id": group_keys_dict,
"docs": {
"$push": push_dict
}
}
}
pipeline.append(group_by_dict)
return pipeline | [
"def",
"build_pipeline",
"(",
"self",
",",
"collection",
")",
":",
"pipeline",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"collection",
".",
"where_dict",
",",
"dict",
")",
"and",
"collection",
".",
"where_dict",
":",
"match_dict",
"=",
"{",
"\"$match\"",
":",
"collection",
".",
"where_dict",
"}",
"pipeline",
".",
"append",
"(",
"match_dict",
")",
"group_keys_dict",
"=",
"self",
".",
"build_mongo_doc",
"(",
"self",
".",
"join_keys",
")",
"push_dict",
"=",
"self",
".",
"build_mongo_doc",
"(",
"collection",
".",
"select_keys",
")",
"group_by_dict",
"=",
"{",
"\"$group\"",
":",
"{",
"\"_id\"",
":",
"group_keys_dict",
",",
"\"docs\"",
":",
"{",
"\"$push\"",
":",
"push_dict",
"}",
"}",
"}",
"pipeline",
".",
"append",
"(",
"group_by_dict",
")",
"return",
"pipeline"
] | Creates aggregation pipeline for aggregation
:param collection: Mongo collection for aggregation
:type collection: MongoCollection
:return pipeline: list of dicts | [
"Creates",
"aggregation",
"pipeline",
"for",
"aggregation",
":",
"param",
"collection",
":",
"Mongo",
"collection",
"for",
"aggregation",
":",
"type",
"collection",
":",
"MongoCollection"
] | python | train |
common-workflow-language/schema_salad | schema_salad/schema.py | https://github.com/common-workflow-language/schema_salad/blob/608ba207b9058fe0a9c3db161058ab3782eef015/schema_salad/schema.py#L506-L591 | def extend_and_specialize(items, loader):
# type: (List[Dict[Text, Any]], Loader) -> List[Dict[Text, Any]]
"""
Apply 'extend' and 'specialize' to fully materialize derived record types.
"""
items = deepcopy_strip(items)
types = {i["name"]: i for i in items} # type: Dict[Text, Any]
results = []
for stype in items:
if "extends" in stype:
specs = {} # type: Dict[Text, Text]
if "specialize" in stype:
for spec in aslist(stype["specialize"]):
specs[spec["specializeFrom"]] = spec["specializeTo"]
exfields = [] # type: List[Text]
exsym = [] # type: List[Text]
for ex in aslist(stype["extends"]):
if ex not in types:
raise Exception(
"Extends {} in {} refers to invalid base type.".format(
stype["extends"], stype["name"]))
basetype = copy.copy(types[ex])
if stype["type"] == "record":
if specs:
basetype["fields"] = replace_type(
basetype.get("fields", []), specs, loader, set())
for field in basetype.get("fields", []):
if "inherited_from" not in field:
field["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif stype["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if stype["type"] == "record":
stype = copy.copy(stype)
exfields.extend(stype.get("fields", []))
stype["fields"] = exfields
fieldnames = set() # type: Set[Text]
for field in stype["fields"]:
if field["name"] in fieldnames:
raise validate.ValidationException(
"Field name {} appears twice in {}".format(
field["name"], stype["name"]))
else:
fieldnames.add(field["name"])
elif stype["type"] == "enum":
stype = copy.copy(stype)
exsym.extend(stype.get("symbols", []))
stype["symbol"] = exsym
types[stype["name"]] = stype
results.append(stype)
ex_types = {}
for result in results:
ex_types[result["name"]] = result
extended_by = {} # type: Dict[Text, Text]
for result in results:
if "extends" in result:
for ex in aslist(result["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[result["name"]])
add_dictlist(extended_by, avro_name(ex), ex_types[ex])
for result in results:
if result.get("abstract") and result["name"] not in extended_by:
raise validate.ValidationException(
"{} is abstract but missing a concrete subtype".format(
result["name"]))
for result in results:
if "fields" in result:
result["fields"] = replace_type(
result["fields"], extended_by, loader, set())
return results | [
"def",
"extend_and_specialize",
"(",
"items",
",",
"loader",
")",
":",
"# type: (List[Dict[Text, Any]], Loader) -> List[Dict[Text, Any]]",
"items",
"=",
"deepcopy_strip",
"(",
"items",
")",
"types",
"=",
"{",
"i",
"[",
"\"name\"",
"]",
":",
"i",
"for",
"i",
"in",
"items",
"}",
"# type: Dict[Text, Any]",
"results",
"=",
"[",
"]",
"for",
"stype",
"in",
"items",
":",
"if",
"\"extends\"",
"in",
"stype",
":",
"specs",
"=",
"{",
"}",
"# type: Dict[Text, Text]",
"if",
"\"specialize\"",
"in",
"stype",
":",
"for",
"spec",
"in",
"aslist",
"(",
"stype",
"[",
"\"specialize\"",
"]",
")",
":",
"specs",
"[",
"spec",
"[",
"\"specializeFrom\"",
"]",
"]",
"=",
"spec",
"[",
"\"specializeTo\"",
"]",
"exfields",
"=",
"[",
"]",
"# type: List[Text]",
"exsym",
"=",
"[",
"]",
"# type: List[Text]",
"for",
"ex",
"in",
"aslist",
"(",
"stype",
"[",
"\"extends\"",
"]",
")",
":",
"if",
"ex",
"not",
"in",
"types",
":",
"raise",
"Exception",
"(",
"\"Extends {} in {} refers to invalid base type.\"",
".",
"format",
"(",
"stype",
"[",
"\"extends\"",
"]",
",",
"stype",
"[",
"\"name\"",
"]",
")",
")",
"basetype",
"=",
"copy",
".",
"copy",
"(",
"types",
"[",
"ex",
"]",
")",
"if",
"stype",
"[",
"\"type\"",
"]",
"==",
"\"record\"",
":",
"if",
"specs",
":",
"basetype",
"[",
"\"fields\"",
"]",
"=",
"replace_type",
"(",
"basetype",
".",
"get",
"(",
"\"fields\"",
",",
"[",
"]",
")",
",",
"specs",
",",
"loader",
",",
"set",
"(",
")",
")",
"for",
"field",
"in",
"basetype",
".",
"get",
"(",
"\"fields\"",
",",
"[",
"]",
")",
":",
"if",
"\"inherited_from\"",
"not",
"in",
"field",
":",
"field",
"[",
"\"inherited_from\"",
"]",
"=",
"ex",
"exfields",
".",
"extend",
"(",
"basetype",
".",
"get",
"(",
"\"fields\"",
",",
"[",
"]",
")",
")",
"elif",
"stype",
"[",
"\"type\"",
"]",
"==",
"\"enum\"",
":",
"exsym",
".",
"extend",
"(",
"basetype",
".",
"get",
"(",
"\"symbols\"",
",",
"[",
"]",
")",
")",
"if",
"stype",
"[",
"\"type\"",
"]",
"==",
"\"record\"",
":",
"stype",
"=",
"copy",
".",
"copy",
"(",
"stype",
")",
"exfields",
".",
"extend",
"(",
"stype",
".",
"get",
"(",
"\"fields\"",
",",
"[",
"]",
")",
")",
"stype",
"[",
"\"fields\"",
"]",
"=",
"exfields",
"fieldnames",
"=",
"set",
"(",
")",
"# type: Set[Text]",
"for",
"field",
"in",
"stype",
"[",
"\"fields\"",
"]",
":",
"if",
"field",
"[",
"\"name\"",
"]",
"in",
"fieldnames",
":",
"raise",
"validate",
".",
"ValidationException",
"(",
"\"Field name {} appears twice in {}\"",
".",
"format",
"(",
"field",
"[",
"\"name\"",
"]",
",",
"stype",
"[",
"\"name\"",
"]",
")",
")",
"else",
":",
"fieldnames",
".",
"add",
"(",
"field",
"[",
"\"name\"",
"]",
")",
"elif",
"stype",
"[",
"\"type\"",
"]",
"==",
"\"enum\"",
":",
"stype",
"=",
"copy",
".",
"copy",
"(",
"stype",
")",
"exsym",
".",
"extend",
"(",
"stype",
".",
"get",
"(",
"\"symbols\"",
",",
"[",
"]",
")",
")",
"stype",
"[",
"\"symbol\"",
"]",
"=",
"exsym",
"types",
"[",
"stype",
"[",
"\"name\"",
"]",
"]",
"=",
"stype",
"results",
".",
"append",
"(",
"stype",
")",
"ex_types",
"=",
"{",
"}",
"for",
"result",
"in",
"results",
":",
"ex_types",
"[",
"result",
"[",
"\"name\"",
"]",
"]",
"=",
"result",
"extended_by",
"=",
"{",
"}",
"# type: Dict[Text, Text]",
"for",
"result",
"in",
"results",
":",
"if",
"\"extends\"",
"in",
"result",
":",
"for",
"ex",
"in",
"aslist",
"(",
"result",
"[",
"\"extends\"",
"]",
")",
":",
"if",
"ex_types",
"[",
"ex",
"]",
".",
"get",
"(",
"\"abstract\"",
")",
":",
"add_dictlist",
"(",
"extended_by",
",",
"ex",
",",
"ex_types",
"[",
"result",
"[",
"\"name\"",
"]",
"]",
")",
"add_dictlist",
"(",
"extended_by",
",",
"avro_name",
"(",
"ex",
")",
",",
"ex_types",
"[",
"ex",
"]",
")",
"for",
"result",
"in",
"results",
":",
"if",
"result",
".",
"get",
"(",
"\"abstract\"",
")",
"and",
"result",
"[",
"\"name\"",
"]",
"not",
"in",
"extended_by",
":",
"raise",
"validate",
".",
"ValidationException",
"(",
"\"{} is abstract but missing a concrete subtype\"",
".",
"format",
"(",
"result",
"[",
"\"name\"",
"]",
")",
")",
"for",
"result",
"in",
"results",
":",
"if",
"\"fields\"",
"in",
"result",
":",
"result",
"[",
"\"fields\"",
"]",
"=",
"replace_type",
"(",
"result",
"[",
"\"fields\"",
"]",
",",
"extended_by",
",",
"loader",
",",
"set",
"(",
")",
")",
"return",
"results"
] | Apply 'extend' and 'specialize' to fully materialize derived record types. | [
"Apply",
"extend",
"and",
"specialize",
"to",
"fully",
"materialize",
"derived",
"record",
"types",
"."
] | python | train |
bukun/TorCMS | torcms/handlers/post_handler.py | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_handler.py#L711-L724 | def _change_kind(self, post_uid):
'''
To modify the category of the post, and kind.
'''
post_data = self.get_post_data()
logger.info('admin post update: {0}'.format(post_data))
MPost.update_misc(post_uid, kind=post_data['kcat'])
# self.update_category(post_uid)
update_category(post_uid, post_data)
self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid)) | [
"def",
"_change_kind",
"(",
"self",
",",
"post_uid",
")",
":",
"post_data",
"=",
"self",
".",
"get_post_data",
"(",
")",
"logger",
".",
"info",
"(",
"'admin post update: {0}'",
".",
"format",
"(",
"post_data",
")",
")",
"MPost",
".",
"update_misc",
"(",
"post_uid",
",",
"kind",
"=",
"post_data",
"[",
"'kcat'",
"]",
")",
"# self.update_category(post_uid)",
"update_category",
"(",
"post_uid",
",",
"post_data",
")",
"self",
".",
"redirect",
"(",
"'/{0}/{1}'",
".",
"format",
"(",
"router_post",
"[",
"post_data",
"[",
"'kcat'",
"]",
"]",
",",
"post_uid",
")",
")"
] | To modify the category of the post, and kind. | [
"To",
"modify",
"the",
"category",
"of",
"the",
"post",
"and",
"kind",
"."
] | python | train |
Azure/azure-cli-extensions | src/interactive/azext_interactive/azclishell/gather_commands.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/gather_commands.py#L165-L173 | def get_all_subcommands(self):
""" returns all the subcommands """
subcommands = []
for command in self.descrip:
for word in command.split():
for kid in self.command_tree.children:
if word != kid and word not in subcommands:
subcommands.append(word)
return subcommands | [
"def",
"get_all_subcommands",
"(",
"self",
")",
":",
"subcommands",
"=",
"[",
"]",
"for",
"command",
"in",
"self",
".",
"descrip",
":",
"for",
"word",
"in",
"command",
".",
"split",
"(",
")",
":",
"for",
"kid",
"in",
"self",
".",
"command_tree",
".",
"children",
":",
"if",
"word",
"!=",
"kid",
"and",
"word",
"not",
"in",
"subcommands",
":",
"subcommands",
".",
"append",
"(",
"word",
")",
"return",
"subcommands"
] | returns all the subcommands | [
"returns",
"all",
"the",
"subcommands"
] | python | train |
secdev/scapy | scapy/utils.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils.py#L51-L69 | def get_temp_file(keep=False, autoext="", fd=False):
"""Creates a temporary file.
:param keep: If False, automatically delete the file when Scapy exits.
:param autoext: Suffix to add to the generated file name.
:param fd: If True, this returns a file-like object with the temporary
file opened. If False (default), this returns a file path.
"""
f = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext,
delete=False)
if not keep:
conf.temp_files.append(f.name)
if fd:
return f
else:
# Close the file so something else can take it.
f.close()
return f.name | [
"def",
"get_temp_file",
"(",
"keep",
"=",
"False",
",",
"autoext",
"=",
"\"\"",
",",
"fd",
"=",
"False",
")",
":",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"\"scapy\"",
",",
"suffix",
"=",
"autoext",
",",
"delete",
"=",
"False",
")",
"if",
"not",
"keep",
":",
"conf",
".",
"temp_files",
".",
"append",
"(",
"f",
".",
"name",
")",
"if",
"fd",
":",
"return",
"f",
"else",
":",
"# Close the file so something else can take it.",
"f",
".",
"close",
"(",
")",
"return",
"f",
".",
"name"
] | Creates a temporary file.
:param keep: If False, automatically delete the file when Scapy exits.
:param autoext: Suffix to add to the generated file name.
:param fd: If True, this returns a file-like object with the temporary
file opened. If False (default), this returns a file path. | [
"Creates",
"a",
"temporary",
"file",
"."
] | python | train |
apetrynet/pyfilemail | pyfilemail/transfer.py | https://github.com/apetrynet/pyfilemail/blob/eb81b0e69ff42f4335d5298833e4769b750bf397/pyfilemail/transfer.py#L581-L626 | def update(self,
message=None,
subject=None,
days=None,
downloads=None,
notify=None):
"""Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool``
"""
method, url = get_URL('update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id,
}
data = {
'message': message or self.transfer_info.get('message'),
'message': subject or self.transfer_info.get('subject'),
'days': days or self.transfer_info.get('days'),
'downloads': downloads or self.transfer_info.get('downloads'),
'notify': notify or self.transfer_info.get('notify')
}
payload.update(data)
res = getattr(self.session, method)(url, params=payload)
if res.status_code:
self.transfer_info.update(data)
return True
hellraiser(res) | [
"def",
"update",
"(",
"self",
",",
"message",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"days",
"=",
"None",
",",
"downloads",
"=",
"None",
",",
"notify",
"=",
"None",
")",
":",
"method",
",",
"url",
"=",
"get_URL",
"(",
"'update'",
")",
"payload",
"=",
"{",
"'apikey'",
":",
"self",
".",
"config",
".",
"get",
"(",
"'apikey'",
")",
",",
"'logintoken'",
":",
"self",
".",
"session",
".",
"cookies",
".",
"get",
"(",
"'logintoken'",
")",
",",
"'transferid'",
":",
"self",
".",
"transfer_id",
",",
"}",
"data",
"=",
"{",
"'message'",
":",
"message",
"or",
"self",
".",
"transfer_info",
".",
"get",
"(",
"'message'",
")",
",",
"'message'",
":",
"subject",
"or",
"self",
".",
"transfer_info",
".",
"get",
"(",
"'subject'",
")",
",",
"'days'",
":",
"days",
"or",
"self",
".",
"transfer_info",
".",
"get",
"(",
"'days'",
")",
",",
"'downloads'",
":",
"downloads",
"or",
"self",
".",
"transfer_info",
".",
"get",
"(",
"'downloads'",
")",
",",
"'notify'",
":",
"notify",
"or",
"self",
".",
"transfer_info",
".",
"get",
"(",
"'notify'",
")",
"}",
"payload",
".",
"update",
"(",
"data",
")",
"res",
"=",
"getattr",
"(",
"self",
".",
"session",
",",
"method",
")",
"(",
"url",
",",
"params",
"=",
"payload",
")",
"if",
"res",
".",
"status_code",
":",
"self",
".",
"transfer_info",
".",
"update",
"(",
"data",
")",
"return",
"True",
"hellraiser",
"(",
"res",
")"
] | Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool`` | [
"Update",
"properties",
"for",
"a",
"transfer",
"."
] | python | train |
CyberReboot/vent | vent/api/tools.py | https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/api/tools.py#L90-L145 | def inventory(self, choices=None):
""" Return a dictionary of the inventory items and status """
status = (True, None)
if not choices:
return (False, 'No choices made')
try:
# choices: repos, tools, images, built, running, enabled
items = {'repos': [], 'tools': {}, 'images': {},
'built': {}, 'running': {}, 'enabled': {}}
tools = Template(self.manifest).list_tools()
for choice in choices:
for tool in tools:
try:
if choice == 'repos':
if 'repo' in tool:
if (tool['repo'] and
tool['repo'] not in items[choice]):
items[choice].append(tool['repo'])
elif choice == 'tools':
items[choice][tool['section']] = tool['name']
elif choice == 'images':
# TODO also check against docker
items[choice][tool['section']] = tool['image_name']
elif choice == 'built':
items[choice][tool['section']] = tool['built']
elif choice == 'running':
containers = Containers()
status = 'not running'
for container in containers:
image_name = tool['image_name'] \
.rsplit(':' +
tool['version'], 1)[0]
image_name = image_name.replace(':', '-')
image_name = image_name.replace('/', '-')
self.logger.info('image_name: ' + image_name)
if container[0] == image_name:
status = container[1]
elif container[0] == image_name + \
'-' + tool['version']:
status = container[1]
items[choice][tool['section']] = status
elif choice == 'enabled':
items[choice][tool['section']] = tool['enabled']
else:
# unknown choice
pass
except Exception as e: # pragma: no cover
self.logger.error('Unable to grab info about tool: ' +
str(tool) + ' because: ' + str(e))
status = (True, items)
except Exception as e: # pragma: no cover
self.logger.error(
'Inventory failed with error: {0}'.format(str(e)))
status = (False, str(e))
return status | [
"def",
"inventory",
"(",
"self",
",",
"choices",
"=",
"None",
")",
":",
"status",
"=",
"(",
"True",
",",
"None",
")",
"if",
"not",
"choices",
":",
"return",
"(",
"False",
",",
"'No choices made'",
")",
"try",
":",
"# choices: repos, tools, images, built, running, enabled",
"items",
"=",
"{",
"'repos'",
":",
"[",
"]",
",",
"'tools'",
":",
"{",
"}",
",",
"'images'",
":",
"{",
"}",
",",
"'built'",
":",
"{",
"}",
",",
"'running'",
":",
"{",
"}",
",",
"'enabled'",
":",
"{",
"}",
"}",
"tools",
"=",
"Template",
"(",
"self",
".",
"manifest",
")",
".",
"list_tools",
"(",
")",
"for",
"choice",
"in",
"choices",
":",
"for",
"tool",
"in",
"tools",
":",
"try",
":",
"if",
"choice",
"==",
"'repos'",
":",
"if",
"'repo'",
"in",
"tool",
":",
"if",
"(",
"tool",
"[",
"'repo'",
"]",
"and",
"tool",
"[",
"'repo'",
"]",
"not",
"in",
"items",
"[",
"choice",
"]",
")",
":",
"items",
"[",
"choice",
"]",
".",
"append",
"(",
"tool",
"[",
"'repo'",
"]",
")",
"elif",
"choice",
"==",
"'tools'",
":",
"items",
"[",
"choice",
"]",
"[",
"tool",
"[",
"'section'",
"]",
"]",
"=",
"tool",
"[",
"'name'",
"]",
"elif",
"choice",
"==",
"'images'",
":",
"# TODO also check against docker",
"items",
"[",
"choice",
"]",
"[",
"tool",
"[",
"'section'",
"]",
"]",
"=",
"tool",
"[",
"'image_name'",
"]",
"elif",
"choice",
"==",
"'built'",
":",
"items",
"[",
"choice",
"]",
"[",
"tool",
"[",
"'section'",
"]",
"]",
"=",
"tool",
"[",
"'built'",
"]",
"elif",
"choice",
"==",
"'running'",
":",
"containers",
"=",
"Containers",
"(",
")",
"status",
"=",
"'not running'",
"for",
"container",
"in",
"containers",
":",
"image_name",
"=",
"tool",
"[",
"'image_name'",
"]",
".",
"rsplit",
"(",
"':'",
"+",
"tool",
"[",
"'version'",
"]",
",",
"1",
")",
"[",
"0",
"]",
"image_name",
"=",
"image_name",
".",
"replace",
"(",
"':'",
",",
"'-'",
")",
"image_name",
"=",
"image_name",
".",
"replace",
"(",
"'/'",
",",
"'-'",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'image_name: '",
"+",
"image_name",
")",
"if",
"container",
"[",
"0",
"]",
"==",
"image_name",
":",
"status",
"=",
"container",
"[",
"1",
"]",
"elif",
"container",
"[",
"0",
"]",
"==",
"image_name",
"+",
"'-'",
"+",
"tool",
"[",
"'version'",
"]",
":",
"status",
"=",
"container",
"[",
"1",
"]",
"items",
"[",
"choice",
"]",
"[",
"tool",
"[",
"'section'",
"]",
"]",
"=",
"status",
"elif",
"choice",
"==",
"'enabled'",
":",
"items",
"[",
"choice",
"]",
"[",
"tool",
"[",
"'section'",
"]",
"]",
"=",
"tool",
"[",
"'enabled'",
"]",
"else",
":",
"# unknown choice",
"pass",
"except",
"Exception",
"as",
"e",
":",
"# pragma: no cover",
"self",
".",
"logger",
".",
"error",
"(",
"'Unable to grab info about tool: '",
"+",
"str",
"(",
"tool",
")",
"+",
"' because: '",
"+",
"str",
"(",
"e",
")",
")",
"status",
"=",
"(",
"True",
",",
"items",
")",
"except",
"Exception",
"as",
"e",
":",
"# pragma: no cover",
"self",
".",
"logger",
".",
"error",
"(",
"'Inventory failed with error: {0}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"status",
"=",
"(",
"False",
",",
"str",
"(",
"e",
")",
")",
"return",
"status"
] | Return a dictionary of the inventory items and status | [
"Return",
"a",
"dictionary",
"of",
"the",
"inventory",
"items",
"and",
"status"
] | python | train |
inveniosoftware/invenio-base | invenio_base/app.py | https://github.com/inveniosoftware/invenio-base/blob/ed4b7a76516ab2675e19270844400f4e2308f52d/invenio_base/app.py#L282-L297 | def configure_warnings():
"""Configure warnings by routing warnings to the logging system.
It also unhides ``DeprecationWarning``.
.. versionadded: 1.0.0
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# DeprecationWarning is by default hidden, hence we force the
# 'default' behavior on deprecation warnings which is not to hide
# errors.
warnings.simplefilter('default', DeprecationWarning)
warnings.simplefilter('ignore', PendingDeprecationWarning) | [
"def",
"configure_warnings",
"(",
")",
":",
"if",
"not",
"sys",
".",
"warnoptions",
":",
"# Route warnings through python logging",
"logging",
".",
"captureWarnings",
"(",
"True",
")",
"# DeprecationWarning is by default hidden, hence we force the",
"# 'default' behavior on deprecation warnings which is not to hide",
"# errors.",
"warnings",
".",
"simplefilter",
"(",
"'default'",
",",
"DeprecationWarning",
")",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"PendingDeprecationWarning",
")"
] | Configure warnings by routing warnings to the logging system.
It also unhides ``DeprecationWarning``.
.. versionadded: 1.0.0 | [
"Configure",
"warnings",
"by",
"routing",
"warnings",
"to",
"the",
"logging",
"system",
"."
] | python | train |
buckmaxwell/neoapi | neoapi/serializable_structured_node.py | https://github.com/buckmaxwell/neoapi/blob/96c5d83c847d7a12d3d1f17931d85776f5280877/neoapi/serializable_structured_node.py#L891-L930 | def create_relationships(cls, id, related_collection_name, request_json):
r"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification
"""
try:
this_resource = cls.nodes.get(id=id, active=True)
related_collection = getattr(this_resource, related_collection_name)
if type(related_collection) in (One, ZeroOrOne): # Cardinality <= 1 so update_relationship should be used
r = application_codes.error_response([application_codes.FORBIDDEN_VIOLATION])
else:
data = request_json['data']
for rsrc_identifier in data:
the_new_node = cls.get_class_from_type(rsrc_identifier['type']).nodes.get(id=rsrc_identifier['id'])
rel_attrs = rsrc_identifier.get('meta')
if not rel_attrs or isinstance(rel_attrs, dict):
related_collection.connect(the_new_node, rel_attrs)
else:
raise WrongTypeError
#r = this_resource.relationship_collection_response(related_collection_name)
r = make_response('')
r.status_code = http_error_codes.NO_CONTENT
r.headers['Content-Type'] = CONTENT_TYPE
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
except (KeyError, TypeError, WrongTypeError):
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
except AttemptedCardinalityViolation:
r = application_codes.error_response([application_codes.ATTEMPTED_CARDINALITY_VIOLATION])
except MultipleNodesReturned:
r = application_codes.error_response([application_codes.MULTIPLE_NODES_WITH_ID_VIOLATION])
return r | [
"def",
"create_relationships",
"(",
"cls",
",",
"id",
",",
"related_collection_name",
",",
"request_json",
")",
":",
"try",
":",
"this_resource",
"=",
"cls",
".",
"nodes",
".",
"get",
"(",
"id",
"=",
"id",
",",
"active",
"=",
"True",
")",
"related_collection",
"=",
"getattr",
"(",
"this_resource",
",",
"related_collection_name",
")",
"if",
"type",
"(",
"related_collection",
")",
"in",
"(",
"One",
",",
"ZeroOrOne",
")",
":",
"# Cardinality <= 1 so update_relationship should be used",
"r",
"=",
"application_codes",
".",
"error_response",
"(",
"[",
"application_codes",
".",
"FORBIDDEN_VIOLATION",
"]",
")",
"else",
":",
"data",
"=",
"request_json",
"[",
"'data'",
"]",
"for",
"rsrc_identifier",
"in",
"data",
":",
"the_new_node",
"=",
"cls",
".",
"get_class_from_type",
"(",
"rsrc_identifier",
"[",
"'type'",
"]",
")",
".",
"nodes",
".",
"get",
"(",
"id",
"=",
"rsrc_identifier",
"[",
"'id'",
"]",
")",
"rel_attrs",
"=",
"rsrc_identifier",
".",
"get",
"(",
"'meta'",
")",
"if",
"not",
"rel_attrs",
"or",
"isinstance",
"(",
"rel_attrs",
",",
"dict",
")",
":",
"related_collection",
".",
"connect",
"(",
"the_new_node",
",",
"rel_attrs",
")",
"else",
":",
"raise",
"WrongTypeError",
"#r = this_resource.relationship_collection_response(related_collection_name)",
"r",
"=",
"make_response",
"(",
"''",
")",
"r",
".",
"status_code",
"=",
"http_error_codes",
".",
"NO_CONTENT",
"r",
".",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"CONTENT_TYPE",
"except",
"DoesNotExist",
":",
"r",
"=",
"application_codes",
".",
"error_response",
"(",
"[",
"application_codes",
".",
"RESOURCE_NOT_FOUND",
"]",
")",
"except",
"(",
"KeyError",
",",
"TypeError",
",",
"WrongTypeError",
")",
":",
"r",
"=",
"application_codes",
".",
"error_response",
"(",
"[",
"application_codes",
".",
"BAD_FORMAT_VIOLATION",
"]",
")",
"except",
"AttemptedCardinalityViolation",
":",
"r",
"=",
"application_codes",
".",
"error_response",
"(",
"[",
"application_codes",
".",
"ATTEMPTED_CARDINALITY_VIOLATION",
"]",
")",
"except",
"MultipleNodesReturned",
":",
"r",
"=",
"application_codes",
".",
"error_response",
"(",
"[",
"application_codes",
".",
"MULTIPLE_NODES_WITH_ID_VIOLATION",
"]",
")",
"return",
"r"
] | r"""
Used to create relationship(s) between the id node and the nodes identified in the included resource \
identifier objects.
:param id: The 'id' field of the node on the left side of the relationship in the database. The id field must \
be set in the model -- it is not the same as the node id
:param related_collection_name: The name of the relationship
:param request_json: request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-updating-relationships
:return: A response according to the same specification | [
"r",
"Used",
"to",
"create",
"relationship",
"(",
"s",
")",
"between",
"the",
"id",
"node",
"and",
"the",
"nodes",
"identified",
"in",
"the",
"included",
"resource",
"\\",
"identifier",
"objects",
"."
] | python | train |
Esri/ArcREST | src/arcrest/enrichment/_geoenrichment.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/enrichment/_geoenrichment.py#L129-L145 | def __geometryToDict(self, geom):
"""converts a geometry object to a dictionary"""
if isinstance(geom, dict):
return geom
elif isinstance(geom, Point):
pt = geom.asDictionary
return {"geometry": {"x" : pt['x'], "y" : pt['y']}}
elif isinstance(geom, Polygon):
poly = geom.asDictionary
return {
"geometry" : {
"rings" : poly['rings'],
'spatialReference' : poly['spatialReference']
}
}
elif isinstance(geom, list):
return [self.__geometryToDict(g) for g in geom] | [
"def",
"__geometryToDict",
"(",
"self",
",",
"geom",
")",
":",
"if",
"isinstance",
"(",
"geom",
",",
"dict",
")",
":",
"return",
"geom",
"elif",
"isinstance",
"(",
"geom",
",",
"Point",
")",
":",
"pt",
"=",
"geom",
".",
"asDictionary",
"return",
"{",
"\"geometry\"",
":",
"{",
"\"x\"",
":",
"pt",
"[",
"'x'",
"]",
",",
"\"y\"",
":",
"pt",
"[",
"'y'",
"]",
"}",
"}",
"elif",
"isinstance",
"(",
"geom",
",",
"Polygon",
")",
":",
"poly",
"=",
"geom",
".",
"asDictionary",
"return",
"{",
"\"geometry\"",
":",
"{",
"\"rings\"",
":",
"poly",
"[",
"'rings'",
"]",
",",
"'spatialReference'",
":",
"poly",
"[",
"'spatialReference'",
"]",
"}",
"}",
"elif",
"isinstance",
"(",
"geom",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"__geometryToDict",
"(",
"g",
")",
"for",
"g",
"in",
"geom",
"]"
] | converts a geometry object to a dictionary | [
"converts",
"a",
"geometry",
"object",
"to",
"a",
"dictionary"
] | python | train |
klmitch/requiem | requiem/processor.py | https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L57-L69 | def _safe_call(obj, methname, *args, **kwargs):
"""
Safely calls the method with the given methname on the given
object. Remaining positional and keyword arguments are passed to
the method. The return value is None, if the method is not
available, or the return value of the method.
"""
meth = getattr(obj, methname, None)
if meth is None or not callable(meth):
return
return meth(*args, **kwargs) | [
"def",
"_safe_call",
"(",
"obj",
",",
"methname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"meth",
"=",
"getattr",
"(",
"obj",
",",
"methname",
",",
"None",
")",
"if",
"meth",
"is",
"None",
"or",
"not",
"callable",
"(",
"meth",
")",
":",
"return",
"return",
"meth",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Safely calls the method with the given methname on the given
object. Remaining positional and keyword arguments are passed to
the method. The return value is None, if the method is not
available, or the return value of the method. | [
"Safely",
"calls",
"the",
"method",
"with",
"the",
"given",
"methname",
"on",
"the",
"given",
"object",
".",
"Remaining",
"positional",
"and",
"keyword",
"arguments",
"are",
"passed",
"to",
"the",
"method",
".",
"The",
"return",
"value",
"is",
"None",
"if",
"the",
"method",
"is",
"not",
"available",
"or",
"the",
"return",
"value",
"of",
"the",
"method",
"."
] | python | train |
tBaxter/tango-shared-core | build/lib/tango_shared/templatetags/formatting.py | https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/templatetags/formatting.py#L36-L72 | def humanized_join(value, add_links=False):
"""
Given a list of strings, format them with commas and spaces,
but with 'and' at the end.
>>> humanized_join(['apples', 'oranges', 'pears'])
"apples, oranges, and pears"
In a template, if mylist = ['apples', 'oranges', 'pears']
then {{ mylist|humanized_join }}
will output "apples, oranges, and pears"
Passing the add_links option will wrap each item in a link.
Note this requires that anything being passed has get_absolute_url() defined.
then {{ mylist|humanized_join:'add_links' }}
will output "<a href="...">apples</a>, <a href="...">oranges</a>, and <a href="...">pears</a>"
"""
if add_links:
try:
value = ['<a href="%s">%s</a>' % (item.get_absolute_url(), item) for item in value]
except AttributeError:
print("You did not pass objects with get_absolute_url() method.")
return
else:
# make everything a string to avoid errors
value = [six.u(item) for item in value]
if len(value) == 1:
return mark_safe(value[0])
if len(value) == 2:
return mark_safe("%s and %s" % (value[0], value[1]))
# join all but the last element
all_but_last = ", ".join(value[:-1])
return mark_safe("%s, and %s" % (all_but_last, value[-1])) | [
"def",
"humanized_join",
"(",
"value",
",",
"add_links",
"=",
"False",
")",
":",
"if",
"add_links",
":",
"try",
":",
"value",
"=",
"[",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"item",
".",
"get_absolute_url",
"(",
")",
",",
"item",
")",
"for",
"item",
"in",
"value",
"]",
"except",
"AttributeError",
":",
"print",
"(",
"\"You did not pass objects with get_absolute_url() method.\"",
")",
"return",
"else",
":",
"# make everything a string to avoid errors",
"value",
"=",
"[",
"six",
".",
"u",
"(",
"item",
")",
"for",
"item",
"in",
"value",
"]",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"return",
"mark_safe",
"(",
"value",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"value",
")",
"==",
"2",
":",
"return",
"mark_safe",
"(",
"\"%s and %s\"",
"%",
"(",
"value",
"[",
"0",
"]",
",",
"value",
"[",
"1",
"]",
")",
")",
"# join all but the last element",
"all_but_last",
"=",
"\", \"",
".",
"join",
"(",
"value",
"[",
":",
"-",
"1",
"]",
")",
"return",
"mark_safe",
"(",
"\"%s, and %s\"",
"%",
"(",
"all_but_last",
",",
"value",
"[",
"-",
"1",
"]",
")",
")"
] | Given a list of strings, format them with commas and spaces,
but with 'and' at the end.
>>> humanized_join(['apples', 'oranges', 'pears'])
"apples, oranges, and pears"
In a template, if mylist = ['apples', 'oranges', 'pears']
then {{ mylist|humanized_join }}
will output "apples, oranges, and pears"
Passing the add_links option will wrap each item in a link.
Note this requires that anything being passed has get_absolute_url() defined.
then {{ mylist|humanized_join:'add_links' }}
will output "<a href="...">apples</a>, <a href="...">oranges</a>, and <a href="...">pears</a>" | [
"Given",
"a",
"list",
"of",
"strings",
"format",
"them",
"with",
"commas",
"and",
"spaces",
"but",
"with",
"and",
"at",
"the",
"end",
"."
] | python | train |
saltstack/salt | salt/modules/github.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/github.py#L259-L292 | def add_user(name, profile='github'):
'''
Add a GitHub user.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_user github-handle
'''
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
try:
github_named_user = client.get_user(name)
except UnknownObjectException:
log.exception("Resource not found")
return False
headers, data = organization._requester.requestJsonAndCheck(
"PUT",
organization.url + "/memberships/" + github_named_user._identity
)
return data.get('state') == 'pending' | [
"def",
"add_user",
"(",
"name",
",",
"profile",
"=",
"'github'",
")",
":",
"client",
"=",
"_get_client",
"(",
"profile",
")",
"organization",
"=",
"client",
".",
"get_organization",
"(",
"_get_config_value",
"(",
"profile",
",",
"'org_name'",
")",
")",
"try",
":",
"github_named_user",
"=",
"client",
".",
"get_user",
"(",
"name",
")",
"except",
"UnknownObjectException",
":",
"log",
".",
"exception",
"(",
"\"Resource not found\"",
")",
"return",
"False",
"headers",
",",
"data",
"=",
"organization",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
"\"PUT\"",
",",
"organization",
".",
"url",
"+",
"\"/memberships/\"",
"+",
"github_named_user",
".",
"_identity",
")",
"return",
"data",
".",
"get",
"(",
"'state'",
")",
"==",
"'pending'"
] | Add a GitHub user.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_user github-handle | [
"Add",
"a",
"GitHub",
"user",
"."
] | python | train |
maxpumperla/elephas | examples/hyperparam_optimization.py | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/examples/hyperparam_optimization.py#L10-L29 | def data():
"""Data providing function:
Make sure to have every relevant import statement included here and return data as
used in model function below. This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
from keras.datasets import mnist
from keras.utils import np_utils
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test | [
"def",
"data",
"(",
")",
":",
"from",
"keras",
".",
"datasets",
"import",
"mnist",
"from",
"keras",
".",
"utils",
"import",
"np_utils",
"(",
"x_train",
",",
"y_train",
")",
",",
"(",
"x_test",
",",
"y_test",
")",
"=",
"mnist",
".",
"load_data",
"(",
")",
"x_train",
"=",
"x_train",
".",
"reshape",
"(",
"60000",
",",
"784",
")",
"x_test",
"=",
"x_test",
".",
"reshape",
"(",
"10000",
",",
"784",
")",
"x_train",
"=",
"x_train",
".",
"astype",
"(",
"'float32'",
")",
"x_test",
"=",
"x_test",
".",
"astype",
"(",
"'float32'",
")",
"x_train",
"/=",
"255",
"x_test",
"/=",
"255",
"nb_classes",
"=",
"10",
"y_train",
"=",
"np_utils",
".",
"to_categorical",
"(",
"y_train",
",",
"nb_classes",
")",
"y_test",
"=",
"np_utils",
".",
"to_categorical",
"(",
"y_test",
",",
"nb_classes",
")",
"return",
"x_train",
",",
"y_train",
",",
"x_test",
",",
"y_test"
] | Data providing function:
Make sure to have every relevant import statement included here and return data as
used in model function below. This function is separated from model() so that hyperopt
won't reload data for each evaluation run. | [
"Data",
"providing",
"function",
":"
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L64-L75 | def system_monitor_power_threshold_marginal_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
power = ET.SubElement(system_monitor, "power")
threshold = ET.SubElement(power, "threshold")
marginal_threshold = ET.SubElement(threshold, "marginal-threshold")
marginal_threshold.text = kwargs.pop('marginal_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"system_monitor_power_threshold_marginal_threshold",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"system_monitor",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"system-monitor\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-system-monitor\"",
")",
"power",
"=",
"ET",
".",
"SubElement",
"(",
"system_monitor",
",",
"\"power\"",
")",
"threshold",
"=",
"ET",
".",
"SubElement",
"(",
"power",
",",
"\"threshold\"",
")",
"marginal_threshold",
"=",
"ET",
".",
"SubElement",
"(",
"threshold",
",",
"\"marginal-threshold\"",
")",
"marginal_threshold",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'marginal_threshold'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
Swind/pure-python-adb | adb/sync/__init__.py | https://github.com/Swind/pure-python-adb/blob/8e076bc2b25ad33b6c5eb14293329a017d83455f/adb/sync/__init__.py#L95-L108 | def _send_str(self, cmd, args):
"""
Format:
{Command}{args length(little endian)}{str}
Length:
{4}{4}{str length}
"""
logger.debug("{} {}".format(cmd, args))
args = args.encode('utf-8')
le_args_len = self._little_endian(len(args))
data = cmd.encode() + le_args_len + args
logger.debug("Send string: {}".format(data))
self.connection.write(data) | [
"def",
"_send_str",
"(",
"self",
",",
"cmd",
",",
"args",
")",
":",
"logger",
".",
"debug",
"(",
"\"{} {}\"",
".",
"format",
"(",
"cmd",
",",
"args",
")",
")",
"args",
"=",
"args",
".",
"encode",
"(",
"'utf-8'",
")",
"le_args_len",
"=",
"self",
".",
"_little_endian",
"(",
"len",
"(",
"args",
")",
")",
"data",
"=",
"cmd",
".",
"encode",
"(",
")",
"+",
"le_args_len",
"+",
"args",
"logger",
".",
"debug",
"(",
"\"Send string: {}\"",
".",
"format",
"(",
"data",
")",
")",
"self",
".",
"connection",
".",
"write",
"(",
"data",
")"
] | Format:
{Command}{args length(little endian)}{str}
Length:
{4}{4}{str length} | [
"Format",
":",
"{",
"Command",
"}",
"{",
"args",
"length",
"(",
"little",
"endian",
")",
"}",
"{",
"str",
"}",
"Length",
":",
"{",
"4",
"}",
"{",
"4",
"}",
"{",
"str",
"length",
"}"
] | python | train |
chaimleib/intervaltree | intervaltree/intervaltree.py | https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L380-L391 | def discard(self, interval):
"""
Removes an interval from the tree, if present. If not, does
nothing.
Completes in O(log n) time.
"""
if interval not in self:
return
self.all_intervals.discard(interval)
self.top_node = self.top_node.discard(interval)
self._remove_boundaries(interval) | [
"def",
"discard",
"(",
"self",
",",
"interval",
")",
":",
"if",
"interval",
"not",
"in",
"self",
":",
"return",
"self",
".",
"all_intervals",
".",
"discard",
"(",
"interval",
")",
"self",
".",
"top_node",
"=",
"self",
".",
"top_node",
".",
"discard",
"(",
"interval",
")",
"self",
".",
"_remove_boundaries",
"(",
"interval",
")"
] | Removes an interval from the tree, if present. If not, does
nothing.
Completes in O(log n) time. | [
"Removes",
"an",
"interval",
"from",
"the",
"tree",
"if",
"present",
".",
"If",
"not",
"does",
"nothing",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_ti_batch.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L579-L592 | def data_indicators(self, indicators, entity_count):
"""Process Indicator data."""
data = []
# process indicator objects
for xid, indicator_data in indicators.items():
entity_count += 1
if isinstance(indicator_data, dict):
data.append(indicator_data)
else:
data.append(indicator_data.data)
del indicators[xid]
if entity_count >= self._batch_max_chunk:
break
return data, entity_count | [
"def",
"data_indicators",
"(",
"self",
",",
"indicators",
",",
"entity_count",
")",
":",
"data",
"=",
"[",
"]",
"# process indicator objects",
"for",
"xid",
",",
"indicator_data",
"in",
"indicators",
".",
"items",
"(",
")",
":",
"entity_count",
"+=",
"1",
"if",
"isinstance",
"(",
"indicator_data",
",",
"dict",
")",
":",
"data",
".",
"append",
"(",
"indicator_data",
")",
"else",
":",
"data",
".",
"append",
"(",
"indicator_data",
".",
"data",
")",
"del",
"indicators",
"[",
"xid",
"]",
"if",
"entity_count",
">=",
"self",
".",
"_batch_max_chunk",
":",
"break",
"return",
"data",
",",
"entity_count"
] | Process Indicator data. | [
"Process",
"Indicator",
"data",
"."
] | python | train |
linkedin/naarad | src/naarad/utils.py | https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/utils.py#L137-L158 | def get_run_time_period(run_steps):
"""
This method finds the time range which covers all the Run_Steps
:param run_steps: list of Run_Step objects
:return: tuple of start and end timestamps
"""
init_ts_start = get_standardized_timestamp('now', None)
ts_start = init_ts_start
ts_end = '0'
for run_step in run_steps:
if run_step.ts_start and run_step.ts_end:
if run_step.ts_start < ts_start:
ts_start = run_step.ts_start
if run_step.ts_end > ts_end:
ts_end = run_step.ts_end
if ts_end == '0':
ts_end = None
if ts_start == init_ts_start:
ts_start = None
logger.info('get_run_time_period range returned ' + str(ts_start) + ' to ' + str(ts_end))
return ts_start, ts_end | [
"def",
"get_run_time_period",
"(",
"run_steps",
")",
":",
"init_ts_start",
"=",
"get_standardized_timestamp",
"(",
"'now'",
",",
"None",
")",
"ts_start",
"=",
"init_ts_start",
"ts_end",
"=",
"'0'",
"for",
"run_step",
"in",
"run_steps",
":",
"if",
"run_step",
".",
"ts_start",
"and",
"run_step",
".",
"ts_end",
":",
"if",
"run_step",
".",
"ts_start",
"<",
"ts_start",
":",
"ts_start",
"=",
"run_step",
".",
"ts_start",
"if",
"run_step",
".",
"ts_end",
">",
"ts_end",
":",
"ts_end",
"=",
"run_step",
".",
"ts_end",
"if",
"ts_end",
"==",
"'0'",
":",
"ts_end",
"=",
"None",
"if",
"ts_start",
"==",
"init_ts_start",
":",
"ts_start",
"=",
"None",
"logger",
".",
"info",
"(",
"'get_run_time_period range returned '",
"+",
"str",
"(",
"ts_start",
")",
"+",
"' to '",
"+",
"str",
"(",
"ts_end",
")",
")",
"return",
"ts_start",
",",
"ts_end"
] | This method finds the time range which covers all the Run_Steps
:param run_steps: list of Run_Step objects
:return: tuple of start and end timestamps | [
"This",
"method",
"finds",
"the",
"time",
"range",
"which",
"covers",
"all",
"the",
"Run_Steps"
] | python | valid |
tcalmant/ipopo | pelix/ipopo/instance.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/instance.py#L415-L475 | def kill(self):
# type: () -> bool
"""
This instance is killed : invalidate it if needed, clean up all members
When this method is called, this StoredInstance object must have
been removed from the registry
:return: True if the component has been killed, False if it already was
"""
with self._lock:
# Already dead...
if self.state == StoredInstance.KILLED:
return False
try:
self.invalidate(True)
except:
self._logger.exception(
"%s: Error invalidating the instance", self.name
)
# Now that we are nearly clean, be sure we were in a good registry
# state
assert not self._ipopo_service.is_registered_instance(self.name)
# Stop all handlers (can tell to unset a binding)
for handler in self.get_handlers():
results = self.__safe_handler_callback(handler, "stop")
if results:
try:
for binding in results:
self.__unset_binding(
handler, binding[0], binding[1]
)
except Exception as ex:
self._logger.exception(
"Error stopping handler '%s': %s", handler, ex
)
# Call the handlers
self.__safe_handlers_callback("clear")
# Change the state
self.state = StoredInstance.KILLED
# Trigger the event
# pylint: disable=W0212
self._ipopo_service._fire_ipopo_event(
constants.IPopoEvent.KILLED, self.factory_name, self.name
)
# Clean up members
self._handlers.clear()
self.__all_handlers.clear()
self._handlers = None
self.__all_handlers = None
self.context = None
self.instance = None
self._ipopo_service = None
return True | [
"def",
"kill",
"(",
"self",
")",
":",
"# type: () -> bool",
"with",
"self",
".",
"_lock",
":",
"# Already dead...",
"if",
"self",
".",
"state",
"==",
"StoredInstance",
".",
"KILLED",
":",
"return",
"False",
"try",
":",
"self",
".",
"invalidate",
"(",
"True",
")",
"except",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"%s: Error invalidating the instance\"",
",",
"self",
".",
"name",
")",
"# Now that we are nearly clean, be sure we were in a good registry",
"# state",
"assert",
"not",
"self",
".",
"_ipopo_service",
".",
"is_registered_instance",
"(",
"self",
".",
"name",
")",
"# Stop all handlers (can tell to unset a binding)",
"for",
"handler",
"in",
"self",
".",
"get_handlers",
"(",
")",
":",
"results",
"=",
"self",
".",
"__safe_handler_callback",
"(",
"handler",
",",
"\"stop\"",
")",
"if",
"results",
":",
"try",
":",
"for",
"binding",
"in",
"results",
":",
"self",
".",
"__unset_binding",
"(",
"handler",
",",
"binding",
"[",
"0",
"]",
",",
"binding",
"[",
"1",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"Error stopping handler '%s': %s\"",
",",
"handler",
",",
"ex",
")",
"# Call the handlers",
"self",
".",
"__safe_handlers_callback",
"(",
"\"clear\"",
")",
"# Change the state",
"self",
".",
"state",
"=",
"StoredInstance",
".",
"KILLED",
"# Trigger the event",
"# pylint: disable=W0212",
"self",
".",
"_ipopo_service",
".",
"_fire_ipopo_event",
"(",
"constants",
".",
"IPopoEvent",
".",
"KILLED",
",",
"self",
".",
"factory_name",
",",
"self",
".",
"name",
")",
"# Clean up members",
"self",
".",
"_handlers",
".",
"clear",
"(",
")",
"self",
".",
"__all_handlers",
".",
"clear",
"(",
")",
"self",
".",
"_handlers",
"=",
"None",
"self",
".",
"__all_handlers",
"=",
"None",
"self",
".",
"context",
"=",
"None",
"self",
".",
"instance",
"=",
"None",
"self",
".",
"_ipopo_service",
"=",
"None",
"return",
"True"
] | This instance is killed : invalidate it if needed, clean up all members
When this method is called, this StoredInstance object must have
been removed from the registry
:return: True if the component has been killed, False if it already was | [
"This",
"instance",
"is",
"killed",
":",
"invalidate",
"it",
"if",
"needed",
"clean",
"up",
"all",
"members"
] | python | train |
petrjasek/eve-elastic | eve_elastic/elastic.py | https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L410-L416 | def get_settings(self, index):
"""Get settings for index.
:param index: index name
"""
settings = self.es.indices.get_settings(index=index)
return next(iter(settings.values())) | [
"def",
"get_settings",
"(",
"self",
",",
"index",
")",
":",
"settings",
"=",
"self",
".",
"es",
".",
"indices",
".",
"get_settings",
"(",
"index",
"=",
"index",
")",
"return",
"next",
"(",
"iter",
"(",
"settings",
".",
"values",
"(",
")",
")",
")"
] | Get settings for index.
:param index: index name | [
"Get",
"settings",
"for",
"index",
"."
] | python | train |
mrcagney/gtfstk | gtfstk/cleaners.py | https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/cleaners.py#L72-L96 | def clean_ids(feed: "Feed") -> "Feed":
"""
In the given "Feed", strip whitespace from all string IDs and
then replace every remaining whitespace chunk with an underscore.
Return the resulting "Feed".
"""
# Alter feed inputs only, and build a new feed from them.
# The derived feed attributes, such as feed.trips_i,
# will be automatically handled when creating the new feed.
feed = feed.copy()
for table in cs.GTFS_REF["table"].unique():
f = getattr(feed, table)
if f is None:
continue
for column in cs.GTFS_REF.loc[cs.GTFS_REF["table"] == table, "column"]:
if column in f.columns and column.endswith("_id"):
try:
f[column] = f[column].str.strip().str.replace(r"\s+", "_")
setattr(feed, table, f)
except AttributeError:
# Column is not of string type
continue
return feed | [
"def",
"clean_ids",
"(",
"feed",
":",
"\"Feed\"",
")",
"->",
"\"Feed\"",
":",
"# Alter feed inputs only, and build a new feed from them.",
"# The derived feed attributes, such as feed.trips_i,",
"# will be automatically handled when creating the new feed.",
"feed",
"=",
"feed",
".",
"copy",
"(",
")",
"for",
"table",
"in",
"cs",
".",
"GTFS_REF",
"[",
"\"table\"",
"]",
".",
"unique",
"(",
")",
":",
"f",
"=",
"getattr",
"(",
"feed",
",",
"table",
")",
"if",
"f",
"is",
"None",
":",
"continue",
"for",
"column",
"in",
"cs",
".",
"GTFS_REF",
".",
"loc",
"[",
"cs",
".",
"GTFS_REF",
"[",
"\"table\"",
"]",
"==",
"table",
",",
"\"column\"",
"]",
":",
"if",
"column",
"in",
"f",
".",
"columns",
"and",
"column",
".",
"endswith",
"(",
"\"_id\"",
")",
":",
"try",
":",
"f",
"[",
"column",
"]",
"=",
"f",
"[",
"column",
"]",
".",
"str",
".",
"strip",
"(",
")",
".",
"str",
".",
"replace",
"(",
"r\"\\s+\"",
",",
"\"_\"",
")",
"setattr",
"(",
"feed",
",",
"table",
",",
"f",
")",
"except",
"AttributeError",
":",
"# Column is not of string type",
"continue",
"return",
"feed"
] | In the given "Feed", strip whitespace from all string IDs and
then replace every remaining whitespace chunk with an underscore.
Return the resulting "Feed". | [
"In",
"the",
"given",
"Feed",
"strip",
"whitespace",
"from",
"all",
"string",
"IDs",
"and",
"then",
"replace",
"every",
"remaining",
"whitespace",
"chunk",
"with",
"an",
"underscore",
".",
"Return",
"the",
"resulting",
"Feed",
"."
] | python | train |
idank/bashlex | bashlex/parser.py | https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L397-L407 | def p_compound_list(p):
'''compound_list : list
| newline_list list1'''
if len(p) == 2:
p[0] = p[1]
else:
parts = p[2]
if len(parts) > 1:
p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts))
else:
p[0] = parts[0] | [
"def",
"p_compound_list",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"else",
":",
"parts",
"=",
"p",
"[",
"2",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"node",
"(",
"kind",
"=",
"'list'",
",",
"parts",
"=",
"parts",
",",
"pos",
"=",
"_partsspan",
"(",
"parts",
")",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"parts",
"[",
"0",
"]"
] | compound_list : list
| newline_list list1 | [
"compound_list",
":",
"list",
"|",
"newline_list",
"list1"
] | python | train |
openego/ding0 | ding0/grid/mv_grid/solvers/base.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/solvers/base.py#L107-L119 | def length(self):
"""Returns the solution length (or cost)
Returns
-------
float
Solution length (or cost).
"""
length = 0
for r in self._routes:
length = length + r.length()
return length | [
"def",
"length",
"(",
"self",
")",
":",
"length",
"=",
"0",
"for",
"r",
"in",
"self",
".",
"_routes",
":",
"length",
"=",
"length",
"+",
"r",
".",
"length",
"(",
")",
"return",
"length"
] | Returns the solution length (or cost)
Returns
-------
float
Solution length (or cost). | [
"Returns",
"the",
"solution",
"length",
"(",
"or",
"cost",
")",
"Returns",
"-------",
"float",
"Solution",
"length",
"(",
"or",
"cost",
")",
"."
] | python | train |
yunojuno/elasticsearch-django | elasticsearch_django/apps.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/apps.py#L40-L48 | def _validate_mapping(index, strict=False):
"""Check that an index mapping JSON file exists."""
try:
settings.get_index_mapping(index)
except IOError:
if strict:
raise ImproperlyConfigured("Index '%s' has no mapping file." % index)
else:
logger.warning("Index '%s' has no mapping, relying on ES instead.", index) | [
"def",
"_validate_mapping",
"(",
"index",
",",
"strict",
"=",
"False",
")",
":",
"try",
":",
"settings",
".",
"get_index_mapping",
"(",
"index",
")",
"except",
"IOError",
":",
"if",
"strict",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"Index '%s' has no mapping file.\"",
"%",
"index",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Index '%s' has no mapping, relying on ES instead.\"",
",",
"index",
")"
] | Check that an index mapping JSON file exists. | [
"Check",
"that",
"an",
"index",
"mapping",
"JSON",
"file",
"exists",
"."
] | python | train |
dantezhu/melon | melon/melon.py | https://github.com/dantezhu/melon/blob/44d859fa85fbfb2d77479e01eade925a0d26e4f7/melon/melon.py#L161-L167 | def _spawn_fork_workers(self):
"""
通过线程启动多个worker
"""
thread = Thread(target=self._fork_workers, args=())
thread.daemon = True
thread.start() | [
"def",
"_spawn_fork_workers",
"(",
"self",
")",
":",
"thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"_fork_workers",
",",
"args",
"=",
"(",
")",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")"
] | 通过线程启动多个worker | [
"通过线程启动多个worker"
] | python | train |
bwohlberg/sporco | sporco/fista/fista.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/fista/fista.py#L562-L571 | def compute_residuals(self):
"""Compute residuals and stopping thresholds."""
r = self.rsdl()
adapt_tol = self.opt['RelStopTol']
if self.opt['AutoStop', 'Enabled']:
adapt_tol = self.tau0 / (1. + self.k)
return r, adapt_tol | [
"def",
"compute_residuals",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"rsdl",
"(",
")",
"adapt_tol",
"=",
"self",
".",
"opt",
"[",
"'RelStopTol'",
"]",
"if",
"self",
".",
"opt",
"[",
"'AutoStop'",
",",
"'Enabled'",
"]",
":",
"adapt_tol",
"=",
"self",
".",
"tau0",
"/",
"(",
"1.",
"+",
"self",
".",
"k",
")",
"return",
"r",
",",
"adapt_tol"
] | Compute residuals and stopping thresholds. | [
"Compute",
"residuals",
"and",
"stopping",
"thresholds",
"."
] | python | train |
log2timeline/plaso | plaso/formatters/manager.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/manager.py#L17-L34 | def DeregisterFormatter(cls, formatter_class):
"""Deregisters a formatter class.
The formatter classes are identified based on their lower case data type.
Args:
formatter_class (type): class of the formatter.
Raises:
KeyError: if formatter class is not set for the corresponding data type.
"""
formatter_data_type = formatter_class.DATA_TYPE.lower()
if formatter_data_type not in cls._formatter_classes:
raise KeyError(
'Formatter class not set for data type: {0:s}.'.format(
formatter_class.DATA_TYPE))
del cls._formatter_classes[formatter_data_type] | [
"def",
"DeregisterFormatter",
"(",
"cls",
",",
"formatter_class",
")",
":",
"formatter_data_type",
"=",
"formatter_class",
".",
"DATA_TYPE",
".",
"lower",
"(",
")",
"if",
"formatter_data_type",
"not",
"in",
"cls",
".",
"_formatter_classes",
":",
"raise",
"KeyError",
"(",
"'Formatter class not set for data type: {0:s}.'",
".",
"format",
"(",
"formatter_class",
".",
"DATA_TYPE",
")",
")",
"del",
"cls",
".",
"_formatter_classes",
"[",
"formatter_data_type",
"]"
] | Deregisters a formatter class.
The formatter classes are identified based on their lower case data type.
Args:
formatter_class (type): class of the formatter.
Raises:
KeyError: if formatter class is not set for the corresponding data type. | [
"Deregisters",
"a",
"formatter",
"class",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/lib/mp_util.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/lib/mp_util.py#L65-L78 | def gps_newpos(lat, lon, bearing, distance):
'''extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html
'''
lat1 = math.radians(lat)
lon1 = math.radians(lon)
brng = math.radians(bearing)
dr = distance/radius_of_earth
lat2 = math.asin(math.sin(lat1)*math.cos(dr) +
math.cos(lat1)*math.sin(dr)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(dr)*math.cos(lat1),
math.cos(dr)-math.sin(lat1)*math.sin(lat2))
return (math.degrees(lat2), wrap_valid_longitude(math.degrees(lon2))) | [
"def",
"gps_newpos",
"(",
"lat",
",",
"lon",
",",
"bearing",
",",
"distance",
")",
":",
"lat1",
"=",
"math",
".",
"radians",
"(",
"lat",
")",
"lon1",
"=",
"math",
".",
"radians",
"(",
"lon",
")",
"brng",
"=",
"math",
".",
"radians",
"(",
"bearing",
")",
"dr",
"=",
"distance",
"/",
"radius_of_earth",
"lat2",
"=",
"math",
".",
"asin",
"(",
"math",
".",
"sin",
"(",
"lat1",
")",
"*",
"math",
".",
"cos",
"(",
"dr",
")",
"+",
"math",
".",
"cos",
"(",
"lat1",
")",
"*",
"math",
".",
"sin",
"(",
"dr",
")",
"*",
"math",
".",
"cos",
"(",
"brng",
")",
")",
"lon2",
"=",
"lon1",
"+",
"math",
".",
"atan2",
"(",
"math",
".",
"sin",
"(",
"brng",
")",
"*",
"math",
".",
"sin",
"(",
"dr",
")",
"*",
"math",
".",
"cos",
"(",
"lat1",
")",
",",
"math",
".",
"cos",
"(",
"dr",
")",
"-",
"math",
".",
"sin",
"(",
"lat1",
")",
"*",
"math",
".",
"sin",
"(",
"lat2",
")",
")",
"return",
"(",
"math",
".",
"degrees",
"(",
"lat2",
")",
",",
"wrap_valid_longitude",
"(",
"math",
".",
"degrees",
"(",
"lon2",
")",
")",
")"
] | extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html | [
"extrapolate",
"latitude",
"/",
"longitude",
"given",
"a",
"heading",
"and",
"distance",
"thanks",
"to",
"http",
":",
"//",
"www",
".",
"movable",
"-",
"type",
".",
"co",
".",
"uk",
"/",
"scripts",
"/",
"latlong",
".",
"html"
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L603-L690 | def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the feature and target columns used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print(results['accuracy'])
"""
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"self",
".",
"target",
"not",
"in",
"dataset",
".",
"column_names",
"(",
")",
":",
"raise",
"_ToolkitError",
"(",
"\"Must provide ground truth column, '\"",
"+",
"self",
".",
"target",
"+",
"\"' in the evaluation dataset.\"",
")",
"predicted",
"=",
"self",
".",
"_predict_with_probabilities",
"(",
"dataset",
",",
"batch_size",
",",
"verbose",
")",
"avail_metrics",
"=",
"[",
"'accuracy'",
",",
"'auc'",
",",
"'precision'",
",",
"'recall'",
",",
"'f1_score'",
",",
"'confusion_matrix'",
",",
"'roc_curve'",
"]",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'metric'",
",",
"metric",
",",
"avail_metrics",
"+",
"[",
"'auto'",
"]",
")",
"metrics",
"=",
"avail_metrics",
"if",
"metric",
"==",
"'auto'",
"else",
"[",
"metric",
"]",
"ret",
"=",
"{",
"}",
"if",
"'accuracy'",
"in",
"metrics",
":",
"ret",
"[",
"'accuracy'",
"]",
"=",
"_evaluation",
".",
"accuracy",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"self",
".",
"target",
"]",
")",
"if",
"'auc'",
"in",
"metrics",
":",
"ret",
"[",
"'auc'",
"]",
"=",
"_evaluation",
".",
"auc",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"'probability'",
"]",
",",
"index_map",
"=",
"self",
".",
"_class_to_index",
")",
"if",
"'precision'",
"in",
"metrics",
":",
"ret",
"[",
"'precision'",
"]",
"=",
"_evaluation",
".",
"precision",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"self",
".",
"target",
"]",
")",
"if",
"'recall'",
"in",
"metrics",
":",
"ret",
"[",
"'recall'",
"]",
"=",
"_evaluation",
".",
"recall",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"self",
".",
"target",
"]",
")",
"if",
"'f1_score'",
"in",
"metrics",
":",
"ret",
"[",
"'f1_score'",
"]",
"=",
"_evaluation",
".",
"f1_score",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"self",
".",
"target",
"]",
")",
"if",
"'confusion_matrix'",
"in",
"metrics",
":",
"ret",
"[",
"'confusion_matrix'",
"]",
"=",
"_evaluation",
".",
"confusion_matrix",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"self",
".",
"target",
"]",
")",
"if",
"'roc_curve'",
"in",
"metrics",
":",
"ret",
"[",
"'roc_curve'",
"]",
"=",
"_evaluation",
".",
"roc_curve",
"(",
"dataset",
"[",
"self",
".",
"target",
"]",
",",
"predicted",
"[",
"'probability'",
"]",
",",
"index_map",
"=",
"self",
".",
"_class_to_index",
")",
"return",
"ret"
] | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the feature and target columns used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print(results['accuracy']) | [
"Evaluate",
"the",
"model",
"by",
"making",
"predictions",
"of",
"target",
"values",
"and",
"comparing",
"these",
"to",
"actual",
"values",
".",
"Parameters",
"----------",
"dataset",
":",
"SFrame",
"Dataset",
"of",
"new",
"observations",
".",
"Must",
"include",
"columns",
"with",
"the",
"same",
"names",
"as",
"the",
"feature",
"and",
"target",
"columns",
"used",
"for",
"model",
"training",
".",
"Additional",
"columns",
"are",
"ignored",
".",
"metric",
":",
"str",
"optional",
"Name",
"of",
"the",
"evaluation",
"metric",
".",
"Possible",
"values",
"are",
":",
"-",
"auto",
":",
"Returns",
"all",
"available",
"metrics",
".",
"-",
"accuracy",
":",
"Classification",
"accuracy",
"(",
"micro",
"average",
")",
".",
"-",
"auc",
":",
"Area",
"under",
"the",
"ROC",
"curve",
"(",
"macro",
"average",
")",
"-",
"precision",
":",
"Precision",
"score",
"(",
"macro",
"average",
")",
"-",
"recall",
":",
"Recall",
"score",
"(",
"macro",
"average",
")",
"-",
"f1_score",
":",
"F1",
"score",
"(",
"macro",
"average",
")",
"-",
"confusion_matrix",
":",
"An",
"SFrame",
"with",
"counts",
"of",
"possible",
"prediction",
"/",
"true",
"label",
"combinations",
".",
"-",
"roc_curve",
":",
"An",
"SFrame",
"containing",
"information",
"needed",
"for",
"an",
"ROC",
"curve",
"verbose",
":",
"bool",
"optional",
"If",
"True",
"prints",
"prediction",
"progress",
"."
] | python | train |
njsmith/colorspacious | colorspacious/basics.py | https://github.com/njsmith/colorspacious/blob/59e0226003fb1b894597c5081e8ca5a3aa4fcefd/colorspacious/basics.py#L44-L55 | def XYZ100_to_sRGB1_linear(XYZ100):
"""Convert XYZ to linear sRGB, where XYZ is normalized so that reference
white D65 is X=95.05, Y=100, Z=108.90 and sRGB is on the 0-1 scale. Linear
sRGB has a linear relationship to actual light, so it is an appropriate
space for simulating light (e.g. for alpha blending).
"""
XYZ100 = np.asarray(XYZ100, dtype=float)
# this is broadcasting matrix * array-of-vectors, where the vector is the
# last dim
RGB_linear = np.einsum("...ij,...j->...i", XYZ100_to_sRGB1_matrix, XYZ100 / 100)
return RGB_linear | [
"def",
"XYZ100_to_sRGB1_linear",
"(",
"XYZ100",
")",
":",
"XYZ100",
"=",
"np",
".",
"asarray",
"(",
"XYZ100",
",",
"dtype",
"=",
"float",
")",
"# this is broadcasting matrix * array-of-vectors, where the vector is the",
"# last dim",
"RGB_linear",
"=",
"np",
".",
"einsum",
"(",
"\"...ij,...j->...i\"",
",",
"XYZ100_to_sRGB1_matrix",
",",
"XYZ100",
"/",
"100",
")",
"return",
"RGB_linear"
] | Convert XYZ to linear sRGB, where XYZ is normalized so that reference
white D65 is X=95.05, Y=100, Z=108.90 and sRGB is on the 0-1 scale. Linear
sRGB has a linear relationship to actual light, so it is an appropriate
space for simulating light (e.g. for alpha blending). | [
"Convert",
"XYZ",
"to",
"linear",
"sRGB",
"where",
"XYZ",
"is",
"normalized",
"so",
"that",
"reference",
"white",
"D65",
"is",
"X",
"=",
"95",
".",
"05",
"Y",
"=",
"100",
"Z",
"=",
"108",
".",
"90",
"and",
"sRGB",
"is",
"on",
"the",
"0",
"-",
"1",
"scale",
".",
"Linear",
"sRGB",
"has",
"a",
"linear",
"relationship",
"to",
"actual",
"light",
"so",
"it",
"is",
"an",
"appropriate",
"space",
"for",
"simulating",
"light",
"(",
"e",
".",
"g",
".",
"for",
"alpha",
"blending",
")",
"."
] | python | train |
tehmaze/ipcalc | ipcalc.py | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L673-L677 | def check_collision(self, other):
"""Check another network against the given network."""
other = Network(other)
return self.network_long() <= other.network_long() <= self.broadcast_long() or \
other.network_long() <= self.network_long() <= other.broadcast_long() | [
"def",
"check_collision",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"Network",
"(",
"other",
")",
"return",
"self",
".",
"network_long",
"(",
")",
"<=",
"other",
".",
"network_long",
"(",
")",
"<=",
"self",
".",
"broadcast_long",
"(",
")",
"or",
"other",
".",
"network_long",
"(",
")",
"<=",
"self",
".",
"network_long",
"(",
")",
"<=",
"other",
".",
"broadcast_long",
"(",
")"
] | Check another network against the given network. | [
"Check",
"another",
"network",
"against",
"the",
"given",
"network",
"."
] | python | train |
almarklein/pyelastix | pyelastix.py | https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L189-L222 | def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir | [
"def",
"get_tempdir",
"(",
")",
":",
"tempdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'pyelastix'",
")",
"# Make sure it exists",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tempdir",
")",
":",
"os",
".",
"makedirs",
"(",
"tempdir",
")",
"# Clean up all directories for which the process no longer exists",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"tempdir",
")",
":",
"dirName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"fname",
")",
"# Check if is right kind of dir",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"dirName",
")",
"and",
"fname",
".",
"startswith",
"(",
"'id_'",
")",
")",
":",
"continue",
"# Get pid and check if its running",
"try",
":",
"pid",
"=",
"int",
"(",
"fname",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
")",
"except",
"Exception",
":",
"continue",
"if",
"not",
"_is_pid_running",
"(",
"pid",
")",
":",
"_clear_dir",
"(",
"dirName",
")",
"# Select dir that included process and thread id",
"tid",
"=",
"id",
"(",
"threading",
".",
"current_thread",
"(",
")",
"if",
"hasattr",
"(",
"threading",
",",
"'current_thread'",
")",
"else",
"threading",
".",
"currentThread",
"(",
")",
")",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'id_%i_%i'",
"%",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"tid",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
":",
"os",
".",
"mkdir",
"(",
"dir",
")",
"return",
"dir"
] | Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here. | [
"Get",
"the",
"temporary",
"directory",
"where",
"pyelastix",
"stores",
"its",
"temporary",
"files",
".",
"The",
"directory",
"is",
"specific",
"to",
"the",
"current",
"process",
"and",
"the",
"calling",
"thread",
".",
"Generally",
"the",
"user",
"does",
"not",
"need",
"this",
";",
"directories",
"are",
"automatically",
"cleaned",
"up",
".",
"Though",
"Elastix",
"log",
"files",
"are",
"also",
"written",
"here",
"."
] | python | train |
Robpol86/sphinxcontrib-imgur | sphinxcontrib/imgur/sphinx_api.py | https://github.com/Robpol86/sphinxcontrib-imgur/blob/5c178481d645147d10acb096793eda41c12c57af/sphinxcontrib/imgur/sphinx_api.py#L114-L140 | def event_doctree_resolved(app, doctree, _):
"""Called by Sphinx after phase 3 (resolving).
* Replace Imgur text nodes with data from the Sphinx cache.
* Call finalizer for ImgurImageNode nodes.
:param sphinx.application.Sphinx app: Sphinx application object.
:param docutils.nodes.document doctree: Tree of docutils nodes.
:param _: Not used.
"""
album_cache = app.builder.env.imgur_album_cache
image_cache = app.builder.env.imgur_image_cache
for node in doctree.traverse(ImgurTextNode):
cache = album_cache if node.album else image_cache
if node.name == 'imgur-description':
text = cache[node.imgur_id].description
else:
text = cache[node.imgur_id].title
node.replace_self([docutils.nodes.Text(text)])
for node in doctree.traverse(ImgurImageNode):
if node.album and not album_cache[node.imgur_id].cover_id:
app.warn('Album cover Imgur ID for {} not available in local cache.'.format(node.imgur_id))
node.replace_self([docutils.nodes.Text('')])
else:
node.finalize(album_cache, image_cache, lambda m: app.builder.env.warn_node(m, node)) | [
"def",
"event_doctree_resolved",
"(",
"app",
",",
"doctree",
",",
"_",
")",
":",
"album_cache",
"=",
"app",
".",
"builder",
".",
"env",
".",
"imgur_album_cache",
"image_cache",
"=",
"app",
".",
"builder",
".",
"env",
".",
"imgur_image_cache",
"for",
"node",
"in",
"doctree",
".",
"traverse",
"(",
"ImgurTextNode",
")",
":",
"cache",
"=",
"album_cache",
"if",
"node",
".",
"album",
"else",
"image_cache",
"if",
"node",
".",
"name",
"==",
"'imgur-description'",
":",
"text",
"=",
"cache",
"[",
"node",
".",
"imgur_id",
"]",
".",
"description",
"else",
":",
"text",
"=",
"cache",
"[",
"node",
".",
"imgur_id",
"]",
".",
"title",
"node",
".",
"replace_self",
"(",
"[",
"docutils",
".",
"nodes",
".",
"Text",
"(",
"text",
")",
"]",
")",
"for",
"node",
"in",
"doctree",
".",
"traverse",
"(",
"ImgurImageNode",
")",
":",
"if",
"node",
".",
"album",
"and",
"not",
"album_cache",
"[",
"node",
".",
"imgur_id",
"]",
".",
"cover_id",
":",
"app",
".",
"warn",
"(",
"'Album cover Imgur ID for {} not available in local cache.'",
".",
"format",
"(",
"node",
".",
"imgur_id",
")",
")",
"node",
".",
"replace_self",
"(",
"[",
"docutils",
".",
"nodes",
".",
"Text",
"(",
"''",
")",
"]",
")",
"else",
":",
"node",
".",
"finalize",
"(",
"album_cache",
",",
"image_cache",
",",
"lambda",
"m",
":",
"app",
".",
"builder",
".",
"env",
".",
"warn_node",
"(",
"m",
",",
"node",
")",
")"
] | Called by Sphinx after phase 3 (resolving).
* Replace Imgur text nodes with data from the Sphinx cache.
* Call finalizer for ImgurImageNode nodes.
:param sphinx.application.Sphinx app: Sphinx application object.
:param docutils.nodes.document doctree: Tree of docutils nodes.
:param _: Not used. | [
"Called",
"by",
"Sphinx",
"after",
"phase",
"3",
"(",
"resolving",
")",
"."
] | python | train |
jpscaletti/pyceo | pyceo/params.py | https://github.com/jpscaletti/pyceo/blob/7f37eaf8e557d25f8e54634176139e0aad84b8df/pyceo/params.py#L20-L30 | def param(name, help=""):
"""Decorator that add a parameter to the wrapped command or function."""
def decorator(func):
params = getattr(func, "params", [])
_param = Param(name, help)
# Insert at the beginning so the apparent order is preserved
params.insert(0, _param)
func.params = params
return func
return decorator | [
"def",
"param",
"(",
"name",
",",
"help",
"=",
"\"\"",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"params",
"=",
"getattr",
"(",
"func",
",",
"\"params\"",
",",
"[",
"]",
")",
"_param",
"=",
"Param",
"(",
"name",
",",
"help",
")",
"# Insert at the beginning so the apparent order is preserved",
"params",
".",
"insert",
"(",
"0",
",",
"_param",
")",
"func",
".",
"params",
"=",
"params",
"return",
"func",
"return",
"decorator"
] | Decorator that add a parameter to the wrapped command or function. | [
"Decorator",
"that",
"add",
"a",
"parameter",
"to",
"the",
"wrapped",
"command",
"or",
"function",
"."
] | python | train |
theno/fabsetup | fabsetup/fabfile/setup/__init__.py | https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/__init__.py#L378-L423 | def powerline_shell():
'''Install and set up powerline-shell prompt.
More infos:
* https://github.com/banga/powerline-shell
* https://github.com/ohnonot/powerline-shell
* https://askubuntu.com/questions/283908/how-can-i-install-and-use-powerline-plugin
'''
assert env.host == 'localhost', 'This task cannot run on a remote host'
# set up fonts for powerline
checkup_git_repo_legacy('https://github.com/powerline/fonts.git',
name='powerline-fonts')
run('cd ~/repos/powerline-fonts && ./install.sh')
# run('fc-cache -vf ~/.local/share/fonts')
prefix = 'URxvt*font: '
from config import fontlist
line = prefix + fontlist
update_or_append_line(filename='~/.Xresources', prefix=prefix,
new_line=line)
if env.host_string == 'localhost':
run('xrdb ~/.Xresources')
# set up powerline-shell
checkup_git_repo_legacy('https://github.com/banga/powerline-shell.git')
# checkup_git_repo_legacy('https://github.com/ohnonot/powerline-shell.git')
install_file_legacy(path='~/repos/powerline-shell/config.py')
run('cd ~/repos/powerline-shell && ./install.py')
question = 'Use normal question mark (u003F) for untracked files instead '\
'of fancy "black question mark ornament" (u2753, which may not work)?'
if query_yes_no(question, default='yes'):
filename = '~/repos/powerline-shell/powerline-shell.py'
update_or_append_line(filename, keep_backup=False,
prefix=" 'untracked': u'\u2753',",
new_line=" 'untracked': u'\u003F',")
run(flo('chmod u+x {filename}'))
bash_snippet = '~/.bashrc_powerline_shell'
install_file_legacy(path=bash_snippet)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
uncomment_or_update_or_append_line(filename='~/.bashrc', prefix=prefix,
new_line=enabler) | [
"def",
"powerline_shell",
"(",
")",
":",
"assert",
"env",
".",
"host",
"==",
"'localhost'",
",",
"'This task cannot run on a remote host'",
"# set up fonts for powerline",
"checkup_git_repo_legacy",
"(",
"'https://github.com/powerline/fonts.git'",
",",
"name",
"=",
"'powerline-fonts'",
")",
"run",
"(",
"'cd ~/repos/powerline-fonts && ./install.sh'",
")",
"# run('fc-cache -vf ~/.local/share/fonts')",
"prefix",
"=",
"'URxvt*font: '",
"from",
"config",
"import",
"fontlist",
"line",
"=",
"prefix",
"+",
"fontlist",
"update_or_append_line",
"(",
"filename",
"=",
"'~/.Xresources'",
",",
"prefix",
"=",
"prefix",
",",
"new_line",
"=",
"line",
")",
"if",
"env",
".",
"host_string",
"==",
"'localhost'",
":",
"run",
"(",
"'xrdb ~/.Xresources'",
")",
"# set up powerline-shell",
"checkup_git_repo_legacy",
"(",
"'https://github.com/banga/powerline-shell.git'",
")",
"# checkup_git_repo_legacy('https://github.com/ohnonot/powerline-shell.git')",
"install_file_legacy",
"(",
"path",
"=",
"'~/repos/powerline-shell/config.py'",
")",
"run",
"(",
"'cd ~/repos/powerline-shell && ./install.py'",
")",
"question",
"=",
"'Use normal question mark (u003F) for untracked files instead '",
"'of fancy \"black question mark ornament\" (u2753, which may not work)?'",
"if",
"query_yes_no",
"(",
"question",
",",
"default",
"=",
"'yes'",
")",
":",
"filename",
"=",
"'~/repos/powerline-shell/powerline-shell.py'",
"update_or_append_line",
"(",
"filename",
",",
"keep_backup",
"=",
"False",
",",
"prefix",
"=",
"\" 'untracked': u'\\u2753',\"",
",",
"new_line",
"=",
"\" 'untracked': u'\\u003F',\"",
")",
"run",
"(",
"flo",
"(",
"'chmod u+x {filename}'",
")",
")",
"bash_snippet",
"=",
"'~/.bashrc_powerline_shell'",
"install_file_legacy",
"(",
"path",
"=",
"bash_snippet",
")",
"prefix",
"=",
"flo",
"(",
"'if [ -f {bash_snippet} ]; '",
")",
"enabler",
"=",
"flo",
"(",
"'if [ -f {bash_snippet} ]; then source {bash_snippet}; fi'",
")",
"uncomment_or_update_or_append_line",
"(",
"filename",
"=",
"'~/.bashrc'",
",",
"prefix",
"=",
"prefix",
",",
"new_line",
"=",
"enabler",
")"
] | Install and set up powerline-shell prompt.
More infos:
* https://github.com/banga/powerline-shell
* https://github.com/ohnonot/powerline-shell
* https://askubuntu.com/questions/283908/how-can-i-install-and-use-powerline-plugin | [
"Install",
"and",
"set",
"up",
"powerline",
"-",
"shell",
"prompt",
"."
] | python | train |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/query.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/query.py#L70-L96 | def list(self, language=values.unset, model_build=values.unset,
status=values.unset, limit=None, page_size=None):
"""
Lists QueryInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used by the Query resources to read
:param unicode model_build: The SID or unique name of the Model Build to be queried
:param unicode status: The status of the resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.query.QueryInstance]
"""
return list(self.stream(
language=language,
model_build=model_build,
status=status,
limit=limit,
page_size=page_size,
)) | [
"def",
"list",
"(",
"self",
",",
"language",
"=",
"values",
".",
"unset",
",",
"model_build",
"=",
"values",
".",
"unset",
",",
"status",
"=",
"values",
".",
"unset",
",",
"limit",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"return",
"list",
"(",
"self",
".",
"stream",
"(",
"language",
"=",
"language",
",",
"model_build",
"=",
"model_build",
",",
"status",
"=",
"status",
",",
"limit",
"=",
"limit",
",",
"page_size",
"=",
"page_size",
",",
")",
")"
] | Lists QueryInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used by the Query resources to read
:param unicode model_build: The SID or unique name of the Model Build to be queried
:param unicode status: The status of the resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.query.QueryInstance] | [
"Lists",
"QueryInstance",
"records",
"from",
"the",
"API",
"as",
"a",
"list",
".",
"Unlike",
"stream",
"()",
"this",
"operation",
"is",
"eager",
"and",
"will",
"load",
"limit",
"records",
"into",
"memory",
"before",
"returning",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/state_editor/scoped_variable_list.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/scoped_variable_list.py#L150-L157 | def remove_core_element(self, model):
"""Remove respective core element of handed scoped variable model
:param ScopedVariableModel model: Scoped variable model which core element should be removed
:return:
"""
assert model.scoped_variable.parent is self.model.state
gui_helper_state_machine.delete_core_element_of_model(model) | [
"def",
"remove_core_element",
"(",
"self",
",",
"model",
")",
":",
"assert",
"model",
".",
"scoped_variable",
".",
"parent",
"is",
"self",
".",
"model",
".",
"state",
"gui_helper_state_machine",
".",
"delete_core_element_of_model",
"(",
"model",
")"
] | Remove respective core element of handed scoped variable model
:param ScopedVariableModel model: Scoped variable model which core element should be removed
:return: | [
"Remove",
"respective",
"core",
"element",
"of",
"handed",
"scoped",
"variable",
"model"
] | python | train |
chdzq/ARPAbetAndIPAConvertor | arpabetandipaconvertor/model/syllable.py | https://github.com/chdzq/ARPAbetAndIPAConvertor/blob/e8b2fdbb5b7134c4f779f4d6dcd5dc30979a0a26/arpabetandipaconvertor/model/syllable.py#L86-L98 | def translate_to_international_phonetic_alphabet(self, hide_stress_mark=False):
'''
转换成国际音标。只要一个元音的时候需要隐藏重音标识
:param hide_stress_mark:
:return:
'''
translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else ""
for phoneme in self._phoneme_list:
translations += phoneme.ipa
return translations | [
"def",
"translate_to_international_phonetic_alphabet",
"(",
"self",
",",
"hide_stress_mark",
"=",
"False",
")",
":",
"translations",
"=",
"self",
".",
"stress",
".",
"mark_ipa",
"(",
")",
"if",
"(",
"not",
"hide_stress_mark",
")",
"and",
"self",
".",
"have_vowel",
"else",
"\"\"",
"for",
"phoneme",
"in",
"self",
".",
"_phoneme_list",
":",
"translations",
"+=",
"phoneme",
".",
"ipa",
"return",
"translations"
] | 转换成国际音标。只要一个元音的时候需要隐藏重音标识
:param hide_stress_mark:
:return: | [
"转换成国际音标。只要一个元音的时候需要隐藏重音标识",
":",
"param",
"hide_stress_mark",
":",
":",
"return",
":"
] | python | train |
bear/parsedatetime | parsedatetime/__init__.py | https://github.com/bear/parsedatetime/blob/830775dc5e36395622b41f12317f5e10c303d3a2/parsedatetime/__init__.py#L685-L736 | def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value
set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value
set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
diffBase = wkdy - wd
origOffset = offset
if offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if wkdy * style > wd * style or \
currentDayStyle and wkdy == wd:
# wkdy located in current week
offset = 0
elif style in (-1, 1):
# wkdy located in last (-1) or next (1) week
offset = style
else:
# invalid style, or should raise error?
offset = 0
# offset = -1 means last week
# offset = 0 means current week
# offset = 1 means next week
diff = diffBase + 7 * offset
if style == 1 and diff < -7:
diff += 7
elif style == -1 and diff > 7:
diff -= 7
debug and log.debug("wd %s, wkdy %s, offset %d, "
"style %d, currentDayStyle %d",
wd, wkdy, origOffset, style, currentDayStyle)
return diff | [
"def",
"_CalculateDOWDelta",
"(",
"self",
",",
"wd",
",",
"wkdy",
",",
"offset",
",",
"style",
",",
"currentDayStyle",
")",
":",
"diffBase",
"=",
"wkdy",
"-",
"wd",
"origOffset",
"=",
"offset",
"if",
"offset",
"==",
"2",
":",
"# no modifier is present.",
"# i.e. string to be parsed is just DOW",
"if",
"wkdy",
"*",
"style",
">",
"wd",
"*",
"style",
"or",
"currentDayStyle",
"and",
"wkdy",
"==",
"wd",
":",
"# wkdy located in current week",
"offset",
"=",
"0",
"elif",
"style",
"in",
"(",
"-",
"1",
",",
"1",
")",
":",
"# wkdy located in last (-1) or next (1) week",
"offset",
"=",
"style",
"else",
":",
"# invalid style, or should raise error?",
"offset",
"=",
"0",
"# offset = -1 means last week",
"# offset = 0 means current week",
"# offset = 1 means next week",
"diff",
"=",
"diffBase",
"+",
"7",
"*",
"offset",
"if",
"style",
"==",
"1",
"and",
"diff",
"<",
"-",
"7",
":",
"diff",
"+=",
"7",
"elif",
"style",
"==",
"-",
"1",
"and",
"diff",
">",
"7",
":",
"diff",
"-=",
"7",
"debug",
"and",
"log",
".",
"debug",
"(",
"\"wd %s, wkdy %s, offset %d, \"",
"\"style %d, currentDayStyle %d\"",
",",
"wd",
",",
"wkdy",
",",
"origOffset",
",",
"style",
",",
"currentDayStyle",
")",
"return",
"diff"
] | Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value
set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value
set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week | [
"Based",
"on",
"the",
"C",
"{",
"style",
"}",
"and",
"C",
"{",
"currentDayStyle",
"}",
"determine",
"what",
"day",
"-",
"of",
"-",
"week",
"value",
"is",
"to",
"be",
"returned",
"."
] | python | train |
booktype/python-ooxml | ooxml/serialize.py | https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L342-L355 | def has_style(node):
"""Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False
"""
elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps']
return any([True for elem in elements if elem in node.rpr]) | [
"def",
"has_style",
"(",
"node",
")",
":",
"elements",
"=",
"[",
"'b'",
",",
"'i'",
",",
"'u'",
",",
"'strike'",
",",
"'color'",
",",
"'jc'",
",",
"'sz'",
",",
"'ind'",
",",
"'superscript'",
",",
"'subscript'",
",",
"'small_caps'",
"]",
"return",
"any",
"(",
"[",
"True",
"for",
"elem",
"in",
"elements",
"if",
"elem",
"in",
"node",
".",
"rpr",
"]",
")"
] | Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False | [
"Tells",
"us",
"if",
"node",
"element",
"has",
"defined",
"styling",
"."
] | python | train |
swistakm/graceful | src/graceful/errors.py | https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/errors.py#L23-L43 | def _get_description(self):
"""Return human readable description error description.
This description should explain everything that went wrong during
deserialization.
"""
return ", ".join([
part for part in [
"missing: {}".format(self.missing) if self.missing else "",
(
"forbidden: {}".format(self.forbidden)
if self.forbidden else ""
),
"invalid: {}:".format(self.invalid) if self.invalid else "",
(
"failed to parse: {}".format(self.failed)
if self.failed else ""
)
] if part
]) | [
"def",
"_get_description",
"(",
"self",
")",
":",
"return",
"\", \"",
".",
"join",
"(",
"[",
"part",
"for",
"part",
"in",
"[",
"\"missing: {}\"",
".",
"format",
"(",
"self",
".",
"missing",
")",
"if",
"self",
".",
"missing",
"else",
"\"\"",
",",
"(",
"\"forbidden: {}\"",
".",
"format",
"(",
"self",
".",
"forbidden",
")",
"if",
"self",
".",
"forbidden",
"else",
"\"\"",
")",
",",
"\"invalid: {}:\"",
".",
"format",
"(",
"self",
".",
"invalid",
")",
"if",
"self",
".",
"invalid",
"else",
"\"\"",
",",
"(",
"\"failed to parse: {}\"",
".",
"format",
"(",
"self",
".",
"failed",
")",
"if",
"self",
".",
"failed",
"else",
"\"\"",
")",
"]",
"if",
"part",
"]",
")"
] | Return human readable description error description.
This description should explain everything that went wrong during
deserialization. | [
"Return",
"human",
"readable",
"description",
"error",
"description",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/memory_profiler.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L140-L156 | def _find_script(script_name):
""" Find the script.
If the input is not a file, then $PATH will be searched.
"""
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for dir in path:
if dir == '':
continue
fn = os.path.join(dir, script_name)
if os.path.isfile(fn):
return fn
print >> sys.stderr, 'Could not find script {0}'.format(script_name)
raise SystemExit(1) | [
"def",
"_find_script",
"(",
"script_name",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"script_name",
")",
":",
"return",
"script_name",
"path",
"=",
"os",
".",
"getenv",
"(",
"'PATH'",
",",
"os",
".",
"defpath",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"dir",
"in",
"path",
":",
"if",
"dir",
"==",
"''",
":",
"continue",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"script_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"return",
"fn",
"print",
">>",
"sys",
".",
"stderr",
",",
"'Could not find script {0}'",
".",
"format",
"(",
"script_name",
")",
"raise",
"SystemExit",
"(",
"1",
")"
] | Find the script.
If the input is not a file, then $PATH will be searched. | [
"Find",
"the",
"script",
"."
] | python | train |
jborean93/smbprotocol | smbprotocol/connection.py | https://github.com/jborean93/smbprotocol/blob/d8eb00fbc824f97d0f4946e3f768c5e6c723499a/smbprotocol/connection.py#L909-L946 | def send(self, message, sid=None, tid=None, credit_request=None):
"""
Will send a message to the server that is passed in. The final
unencrypted header is returned to the function that called this.
:param message: An SMB message structure to send
:param sid: A session_id that the message is sent for
:param tid: A tree_id object that the message is sent for
:param credit_request: Specifies extra credits to be requested with the
SMB header
:return: Request of the message that was sent
"""
header = self._generate_packet_header(message, sid, tid,
credit_request)
# get the actual Session and TreeConnect object instead of the IDs
session = self.session_table.get(sid, None) if sid else None
tree = None
if tid and session:
if tid not in session.tree_connect_table.keys():
error_msg = "Cannot find Tree with the ID %d in the session " \
"tree table" % tid
raise smbprotocol.exceptions.SMBException(error_msg)
tree = session.tree_connect_table[tid]
if session and session.signing_required and session.signing_key:
self._sign(header, session)
request = Request(header)
self.outstanding_requests[header['message_id'].get_value()] = request
send_data = header.pack()
if (session and session.encrypt_data) or (tree and tree.encrypt_data):
send_data = self._encrypt(send_data, session)
self.transport.send(send_data)
return request | [
"def",
"send",
"(",
"self",
",",
"message",
",",
"sid",
"=",
"None",
",",
"tid",
"=",
"None",
",",
"credit_request",
"=",
"None",
")",
":",
"header",
"=",
"self",
".",
"_generate_packet_header",
"(",
"message",
",",
"sid",
",",
"tid",
",",
"credit_request",
")",
"# get the actual Session and TreeConnect object instead of the IDs",
"session",
"=",
"self",
".",
"session_table",
".",
"get",
"(",
"sid",
",",
"None",
")",
"if",
"sid",
"else",
"None",
"tree",
"=",
"None",
"if",
"tid",
"and",
"session",
":",
"if",
"tid",
"not",
"in",
"session",
".",
"tree_connect_table",
".",
"keys",
"(",
")",
":",
"error_msg",
"=",
"\"Cannot find Tree with the ID %d in the session \"",
"\"tree table\"",
"%",
"tid",
"raise",
"smbprotocol",
".",
"exceptions",
".",
"SMBException",
"(",
"error_msg",
")",
"tree",
"=",
"session",
".",
"tree_connect_table",
"[",
"tid",
"]",
"if",
"session",
"and",
"session",
".",
"signing_required",
"and",
"session",
".",
"signing_key",
":",
"self",
".",
"_sign",
"(",
"header",
",",
"session",
")",
"request",
"=",
"Request",
"(",
"header",
")",
"self",
".",
"outstanding_requests",
"[",
"header",
"[",
"'message_id'",
"]",
".",
"get_value",
"(",
")",
"]",
"=",
"request",
"send_data",
"=",
"header",
".",
"pack",
"(",
")",
"if",
"(",
"session",
"and",
"session",
".",
"encrypt_data",
")",
"or",
"(",
"tree",
"and",
"tree",
".",
"encrypt_data",
")",
":",
"send_data",
"=",
"self",
".",
"_encrypt",
"(",
"send_data",
",",
"session",
")",
"self",
".",
"transport",
".",
"send",
"(",
"send_data",
")",
"return",
"request"
] | Will send a message to the server that is passed in. The final
unencrypted header is returned to the function that called this.
:param message: An SMB message structure to send
:param sid: A session_id that the message is sent for
:param tid: A tree_id object that the message is sent for
:param credit_request: Specifies extra credits to be requested with the
SMB header
:return: Request of the message that was sent | [
"Will",
"send",
"a",
"message",
"to",
"the",
"server",
"that",
"is",
"passed",
"in",
".",
"The",
"final",
"unencrypted",
"header",
"is",
"returned",
"to",
"the",
"function",
"that",
"called",
"this",
"."
] | python | train |
wharris/dougrain | dougrain/builder.py | https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L74-L84 | def add_curie(self, name, href):
"""Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls.
"""
self.draft.set_curie(self, name, href)
return self | [
"def",
"add_curie",
"(",
"self",
",",
"name",
",",
"href",
")",
":",
"self",
".",
"draft",
".",
"set_curie",
"(",
"self",
",",
"name",
",",
"href",
")",
"return",
"self"
] | Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls. | [
"Adds",
"a",
"CURIE",
"definition",
"."
] | python | train |
gaqzi/gocd-cli | gocd_cli/utils.py | https://github.com/gaqzi/gocd-cli/blob/ca8df8ec2274fdc69bce0619aa3794463c4f5a6f/gocd_cli/utils.py#L154-L172 | def get_go_server(settings=None):
"""Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance
"""
if not settings:
settings = get_settings()
return gocd.Server(
settings.get('server'),
user=settings.get('user'),
password=settings.get('password'),
) | [
"def",
"get_go_server",
"(",
"settings",
"=",
"None",
")",
":",
"if",
"not",
"settings",
":",
"settings",
"=",
"get_settings",
"(",
")",
"return",
"gocd",
".",
"Server",
"(",
"settings",
".",
"get",
"(",
"'server'",
")",
",",
"user",
"=",
"settings",
".",
"get",
"(",
"'user'",
")",
",",
"password",
"=",
"settings",
".",
"get",
"(",
"'password'",
")",
",",
")"
] | Returns a `gocd.Server` configured by the `settings`
object.
Args:
settings: a `gocd_cli.settings.Settings` object.
Default: if falsey calls `get_settings`.
Returns:
gocd.Server: a configured gocd.Server instance | [
"Returns",
"a",
"gocd",
".",
"Server",
"configured",
"by",
"the",
"settings",
"object",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L806-L826 | def MapEncoder(field_descriptor):
"""Encoder for extensions of MessageSet.
Maps always have a wire format like this:
message MapEntry {
key_type key = 1;
value_type value = 2;
}
repeated MapEntry map = N;
"""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
encode_message = MessageEncoder(field_descriptor.number, False, False)
def EncodeField(write, value):
for key in value:
entry_msg = message_type._concrete_class(key=key, value=value[key])
encode_message(write, entry_msg)
return EncodeField | [
"def",
"MapEncoder",
"(",
"field_descriptor",
")",
":",
"# Can't look at field_descriptor.message_type._concrete_class because it may",
"# not have been initialized yet.",
"message_type",
"=",
"field_descriptor",
".",
"message_type",
"encode_message",
"=",
"MessageEncoder",
"(",
"field_descriptor",
".",
"number",
",",
"False",
",",
"False",
")",
"def",
"EncodeField",
"(",
"write",
",",
"value",
")",
":",
"for",
"key",
"in",
"value",
":",
"entry_msg",
"=",
"message_type",
".",
"_concrete_class",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
"[",
"key",
"]",
")",
"encode_message",
"(",
"write",
",",
"entry_msg",
")",
"return",
"EncodeField"
] | Encoder for extensions of MessageSet.
Maps always have a wire format like this:
message MapEntry {
key_type key = 1;
value_type value = 2;
}
repeated MapEntry map = N; | [
"Encoder",
"for",
"extensions",
"of",
"MessageSet",
"."
] | python | train |
cloudsmith-io/cloudsmith-cli | cloudsmith_cli/cli/types.py | https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/types.py#L13-L16 | def convert(self, value, *args, **kwargs): # pylint: disable=arguments-differ
"""Take a path with $HOME variables and resolve it to full path."""
value = os.path.expanduser(value)
return super(ExpandPath, self).convert(value, *args, **kwargs) | [
"def",
"convert",
"(",
"self",
",",
"value",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=arguments-differ",
"value",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"value",
")",
"return",
"super",
"(",
"ExpandPath",
",",
"self",
")",
".",
"convert",
"(",
"value",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Take a path with $HOME variables and resolve it to full path. | [
"Take",
"a",
"path",
"with",
"$HOME",
"variables",
"and",
"resolve",
"it",
"to",
"full",
"path",
"."
] | python | train |
log2timeline/plaso | plaso/storage/interface.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/interface.py#L1653-L1681 | def PrepareMergeTaskStorage(self, task):
"""Prepares a task storage for merging.
Moves the task storage file from the processed directory to the merge
directory.
Args:
task (Task): task.
Raises:
IOError: if the storage type is not supported or
if the storage file cannot be renamed.
OSError: if the storage type is not supported or
if the storage file cannot be renamed.
"""
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
merge_storage_file_path = self._GetMergeTaskStorageFilePath(task)
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
task.storage_file_size = os.path.getsize(processed_storage_file_path)
try:
os.rename(processed_storage_file_path, merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to rename task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception)) | [
"def",
"PrepareMergeTaskStorage",
"(",
"self",
",",
"task",
")",
":",
"if",
"self",
".",
"_storage_type",
"!=",
"definitions",
".",
"STORAGE_TYPE_SESSION",
":",
"raise",
"IOError",
"(",
"'Unsupported storage type.'",
")",
"merge_storage_file_path",
"=",
"self",
".",
"_GetMergeTaskStorageFilePath",
"(",
"task",
")",
"processed_storage_file_path",
"=",
"self",
".",
"_GetProcessedStorageFilePath",
"(",
"task",
")",
"task",
".",
"storage_file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"processed_storage_file_path",
")",
"try",
":",
"os",
".",
"rename",
"(",
"processed_storage_file_path",
",",
"merge_storage_file_path",
")",
"except",
"OSError",
"as",
"exception",
":",
"raise",
"IOError",
"(",
"(",
"'Unable to rename task storage file: {0:s} with error: '",
"'{1!s}'",
")",
".",
"format",
"(",
"processed_storage_file_path",
",",
"exception",
")",
")"
] | Prepares a task storage for merging.
Moves the task storage file from the processed directory to the merge
directory.
Args:
task (Task): task.
Raises:
IOError: if the storage type is not supported or
if the storage file cannot be renamed.
OSError: if the storage type is not supported or
if the storage file cannot be renamed. | [
"Prepares",
"a",
"task",
"storage",
"for",
"merging",
"."
] | python | train |
swift-nav/libsbp | generator/sbpg/targets/protobuf.py | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/protobuf.py#L39-L48 | def to_comment(value):
"""
Builds a comment.
"""
if value is None:
return
if len(value.split('\n')) == 1:
return "* " + value
else:
return '\n'.join([' * ' + l for l in value.split('\n')[:-1]]) | [
"def",
"to_comment",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"if",
"len",
"(",
"value",
".",
"split",
"(",
"'\\n'",
")",
")",
"==",
"1",
":",
"return",
"\"* \"",
"+",
"value",
"else",
":",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"' * '",
"+",
"l",
"for",
"l",
"in",
"value",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
"]",
")"
] | Builds a comment. | [
"Builds",
"a",
"comment",
"."
] | python | train |
rackerlabs/timid | timid/context.py | https://github.com/rackerlabs/timid/blob/b1c6aa159ab380a033740f4aa392cf0d125e0ac6/timid/context.py#L90-L109 | def template(self, string):
"""
Interpret a template string. This returns a callable taking one
argument--this context--and returning a string rendered from
the template.
:param string: The template string.
:returns: A callable of one argument that will return the
desired string.
"""
# Short-circuit if the template "string" isn't actually a
# string
if not isinstance(string, six.string_types):
return lambda ctxt: string
# Create the template and return the callable
tmpl = self._jinja.from_string(string)
return lambda ctxt: tmpl.render(ctxt.variables) | [
"def",
"template",
"(",
"self",
",",
"string",
")",
":",
"# Short-circuit if the template \"string\" isn't actually a",
"# string",
"if",
"not",
"isinstance",
"(",
"string",
",",
"six",
".",
"string_types",
")",
":",
"return",
"lambda",
"ctxt",
":",
"string",
"# Create the template and return the callable",
"tmpl",
"=",
"self",
".",
"_jinja",
".",
"from_string",
"(",
"string",
")",
"return",
"lambda",
"ctxt",
":",
"tmpl",
".",
"render",
"(",
"ctxt",
".",
"variables",
")"
] | Interpret a template string. This returns a callable taking one
argument--this context--and returning a string rendered from
the template.
:param string: The template string.
:returns: A callable of one argument that will return the
desired string. | [
"Interpret",
"a",
"template",
"string",
".",
"This",
"returns",
"a",
"callable",
"taking",
"one",
"argument",
"--",
"this",
"context",
"--",
"and",
"returning",
"a",
"string",
"rendered",
"from",
"the",
"template",
"."
] | python | test |
fermiPy/fermipy | fermipy/jobs/file_archive.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L766-L775 | def write_table_file(self, table_file=None):
"""Write the table to self._table_file"""
if self._table is None:
raise RuntimeError("No table to write")
if table_file is not None:
self._table_file = table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table], clobber=True,
namelist=['FILE_ARCHIVE']) | [
"def",
"write_table_file",
"(",
"self",
",",
"table_file",
"=",
"None",
")",
":",
"if",
"self",
".",
"_table",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"No table to write\"",
")",
"if",
"table_file",
"is",
"not",
"None",
":",
"self",
".",
"_table_file",
"=",
"table_file",
"if",
"self",
".",
"_table_file",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"No output file specified for table\"",
")",
"write_tables_to_fits",
"(",
"self",
".",
"_table_file",
",",
"[",
"self",
".",
"_table",
"]",
",",
"clobber",
"=",
"True",
",",
"namelist",
"=",
"[",
"'FILE_ARCHIVE'",
"]",
")"
] | Write the table to self._table_file | [
"Write",
"the",
"table",
"to",
"self",
".",
"_table_file"
] | python | train |
noxdafox/clipspy | clips/classes.py | https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L267-L269 | def name(self):
"""Class name."""
return ffi.string(lib.EnvGetDefclassName(self._env, self._cls)).decode() | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"ffi",
".",
"string",
"(",
"lib",
".",
"EnvGetDefclassName",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_cls",
")",
")",
".",
"decode",
"(",
")"
] | Class name. | [
"Class",
"name",
"."
] | python | train |
todddeluca/python-vagrant | vagrant/__init__.py | https://github.com/todddeluca/python-vagrant/blob/83b26f9337b1f2cb6314210923bbd189e7c9199e/vagrant/__init__.py#L420-L494 | def status(self, vm_name=None):
'''
Return the results of a `vagrant status` call as a list of one or more
Status objects. A Status contains the following attributes:
- name: The VM name in a multi-vm environment. 'default' otherwise.
- state: The state of the underlying guest machine (i.e. VM).
- provider: the name of the VM provider, e.g. 'virtualbox'. None
if no provider is output by vagrant.
Example return values for a multi-VM environment:
[Status(name='web', state='not created', provider='virtualbox'),
Status(name='db', state='not created', provider='virtualbox')]
And for a single-VM environment:
[Status(name='default', state='not created', provider='virtualbox')]
Possible states include, but are not limited to (since new states are
being added as Vagrant evolves):
- 'not_created' if the vm is destroyed
- 'running' if the vm is up
- 'poweroff' if the vm is halted
- 'saved' if the vm is suspended
- 'aborted' if the vm is aborted
Implementation Details:
This command uses the `--machine-readable` flag added in
Vagrant 1.5, mapping the target name, state, and provider-name
to a Status object.
Example with no VM name and multi-vm Vagrantfile:
$ vagrant status --machine-readable
1424098924,web,provider-name,virtualbox
1424098924,web,state,running
1424098924,web,state-human-short,running
1424098924,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
1424098924,db,provider-name,virtualbox
1424098924,db,state,not_created
1424098924,db,state-human-short,not created
1424098924,db,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Example with VM name:
$ vagrant status --machine-readable web
1424099027,web,provider-name,virtualbox
1424099027,web,state,running
1424099027,web,state-human-short,running
1424099027,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
Example with no VM name and single-vm Vagrantfile:
$ vagrant status --machine-readable
1424100021,default,provider-name,virtualbox
1424100021,default,state,not_created
1424100021,default,state-human-short,not created
1424100021,default,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Error example with incorrect VM name:
$ vagrant status --machine-readable api
1424099042,,error-exit,Vagrant::Errors::MachineNotFound,The machine with the name 'api' was not found configured for\nthis Vagrant environment.
Error example with missing Vagrantfile:
$ vagrant status --machine-readable
1424099094,,error-exit,Vagrant::Errors::NoEnvironmentError,A Vagrant environment or target machine is required to run this\ncommand. Run `vagrant init` to create a new Vagrant environment. Or%!(VAGRANT_COMMA)\nget an ID of a target machine from `vagrant global-status` to run\nthis command on. A final option is to change to a directory with a\nVagrantfile and to try again.
'''
# machine-readable output are CSV lines
output = self._run_vagrant_command(['status', '--machine-readable', vm_name])
return self._parse_status(output) | [
"def",
"status",
"(",
"self",
",",
"vm_name",
"=",
"None",
")",
":",
"# machine-readable output are CSV lines",
"output",
"=",
"self",
".",
"_run_vagrant_command",
"(",
"[",
"'status'",
",",
"'--machine-readable'",
",",
"vm_name",
"]",
")",
"return",
"self",
".",
"_parse_status",
"(",
"output",
")"
] | Return the results of a `vagrant status` call as a list of one or more
Status objects. A Status contains the following attributes:
- name: The VM name in a multi-vm environment. 'default' otherwise.
- state: The state of the underlying guest machine (i.e. VM).
- provider: the name of the VM provider, e.g. 'virtualbox'. None
if no provider is output by vagrant.
Example return values for a multi-VM environment:
[Status(name='web', state='not created', provider='virtualbox'),
Status(name='db', state='not created', provider='virtualbox')]
And for a single-VM environment:
[Status(name='default', state='not created', provider='virtualbox')]
Possible states include, but are not limited to (since new states are
being added as Vagrant evolves):
- 'not_created' if the vm is destroyed
- 'running' if the vm is up
- 'poweroff' if the vm is halted
- 'saved' if the vm is suspended
- 'aborted' if the vm is aborted
Implementation Details:
This command uses the `--machine-readable` flag added in
Vagrant 1.5, mapping the target name, state, and provider-name
to a Status object.
Example with no VM name and multi-vm Vagrantfile:
$ vagrant status --machine-readable
1424098924,web,provider-name,virtualbox
1424098924,web,state,running
1424098924,web,state-human-short,running
1424098924,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
1424098924,db,provider-name,virtualbox
1424098924,db,state,not_created
1424098924,db,state-human-short,not created
1424098924,db,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Example with VM name:
$ vagrant status --machine-readable web
1424099027,web,provider-name,virtualbox
1424099027,web,state,running
1424099027,web,state-human-short,running
1424099027,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
Example with no VM name and single-vm Vagrantfile:
$ vagrant status --machine-readable
1424100021,default,provider-name,virtualbox
1424100021,default,state,not_created
1424100021,default,state-human-short,not created
1424100021,default,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Error example with incorrect VM name:
$ vagrant status --machine-readable api
1424099042,,error-exit,Vagrant::Errors::MachineNotFound,The machine with the name 'api' was not found configured for\nthis Vagrant environment.
Error example with missing Vagrantfile:
$ vagrant status --machine-readable
1424099094,,error-exit,Vagrant::Errors::NoEnvironmentError,A Vagrant environment or target machine is required to run this\ncommand. Run `vagrant init` to create a new Vagrant environment. Or%!(VAGRANT_COMMA)\nget an ID of a target machine from `vagrant global-status` to run\nthis command on. A final option is to change to a directory with a\nVagrantfile and to try again. | [
"Return",
"the",
"results",
"of",
"a",
"vagrant",
"status",
"call",
"as",
"a",
"list",
"of",
"one",
"or",
"more",
"Status",
"objects",
".",
"A",
"Status",
"contains",
"the",
"following",
"attributes",
":"
] | python | train |
fboender/ansible-cmdb | lib/mako/ast.py | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/ast.py#L116-L167 | def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls | [
"def",
"get_argument_expressions",
"(",
"self",
",",
"as_call",
"=",
"False",
")",
":",
"namedecls",
"=",
"[",
"]",
"# Build in reverse order, since defaults and slurpy args come last",
"argnames",
"=",
"self",
".",
"argnames",
"[",
":",
":",
"-",
"1",
"]",
"kwargnames",
"=",
"self",
".",
"kwargnames",
"[",
":",
":",
"-",
"1",
"]",
"defaults",
"=",
"self",
".",
"defaults",
"[",
":",
":",
"-",
"1",
"]",
"kwdefaults",
"=",
"self",
".",
"kwdefaults",
"[",
":",
":",
"-",
"1",
"]",
"# Named arguments",
"if",
"self",
".",
"kwargs",
":",
"namedecls",
".",
"append",
"(",
"\"**\"",
"+",
"kwargnames",
".",
"pop",
"(",
"0",
")",
")",
"for",
"name",
"in",
"kwargnames",
":",
"# Keyword-only arguments must always be used by name, so even if",
"# this is a call, print out `foo=foo`",
"if",
"as_call",
":",
"namedecls",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"name",
",",
"name",
")",
")",
"elif",
"kwdefaults",
":",
"default",
"=",
"kwdefaults",
".",
"pop",
"(",
"0",
")",
"if",
"default",
"is",
"None",
":",
"# The AST always gives kwargs a default, since you can do",
"# `def foo(*, a=1, b, c=3)`",
"namedecls",
".",
"append",
"(",
"name",
")",
"else",
":",
"namedecls",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"name",
",",
"pyparser",
".",
"ExpressionGenerator",
"(",
"default",
")",
".",
"value",
"(",
")",
")",
")",
"else",
":",
"namedecls",
".",
"append",
"(",
"name",
")",
"# Positional arguments",
"if",
"self",
".",
"varargs",
":",
"namedecls",
".",
"append",
"(",
"\"*\"",
"+",
"argnames",
".",
"pop",
"(",
"0",
")",
")",
"for",
"name",
"in",
"argnames",
":",
"if",
"as_call",
"or",
"not",
"defaults",
":",
"namedecls",
".",
"append",
"(",
"name",
")",
"else",
":",
"default",
"=",
"defaults",
".",
"pop",
"(",
"0",
")",
"namedecls",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"name",
",",
"pyparser",
".",
"ExpressionGenerator",
"(",
"default",
")",
".",
"value",
"(",
")",
")",
")",
"namedecls",
".",
"reverse",
"(",
")",
"return",
"namedecls"
] | Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist). | [
"Return",
"the",
"argument",
"declarations",
"of",
"this",
"FunctionDecl",
"as",
"a",
"printable",
"list",
"."
] | python | train |
inasafe/inasafe | safe/utilities/qgis_utilities.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/qgis_utilities.py#L132-L174 | def display_warning_message_bar(
title=None,
message=None,
more_details=None,
button_text=tr('Show details ...'),
duration=8,
iface_object=iface):
"""
Display a warning message bar.
:param title: The title of the message bar.
:type title: basestring
:param message: The message inside the message bar.
:type message: basestring
:param more_details: The message inside the 'Show details' button.
:type more_details: basestring
:param button_text: The text of the button if 'more_details' is not empty.
:type button_text: basestring
:param duration: The duration for the display, default is 8 seconds.
:type duration: int
:param iface_object: The QGIS IFace instance. Note that we cannot
use qgis.utils.iface since it is not available in our
test environment.
:type iface_object: QgisInterface
"""
iface_object.messageBar().clearWidgets()
widget = iface_object.messageBar().createMessage(title, message)
if more_details:
button = QPushButton(widget)
button.setText(button_text)
button.pressed.connect(
lambda: display_warning_message_box(
title=title, message=more_details))
widget.layout().addWidget(button)
iface_object.messageBar().pushWidget(widget, Qgis.Warning, duration) | [
"def",
"display_warning_message_bar",
"(",
"title",
"=",
"None",
",",
"message",
"=",
"None",
",",
"more_details",
"=",
"None",
",",
"button_text",
"=",
"tr",
"(",
"'Show details ...'",
")",
",",
"duration",
"=",
"8",
",",
"iface_object",
"=",
"iface",
")",
":",
"iface_object",
".",
"messageBar",
"(",
")",
".",
"clearWidgets",
"(",
")",
"widget",
"=",
"iface_object",
".",
"messageBar",
"(",
")",
".",
"createMessage",
"(",
"title",
",",
"message",
")",
"if",
"more_details",
":",
"button",
"=",
"QPushButton",
"(",
"widget",
")",
"button",
".",
"setText",
"(",
"button_text",
")",
"button",
".",
"pressed",
".",
"connect",
"(",
"lambda",
":",
"display_warning_message_box",
"(",
"title",
"=",
"title",
",",
"message",
"=",
"more_details",
")",
")",
"widget",
".",
"layout",
"(",
")",
".",
"addWidget",
"(",
"button",
")",
"iface_object",
".",
"messageBar",
"(",
")",
".",
"pushWidget",
"(",
"widget",
",",
"Qgis",
".",
"Warning",
",",
"duration",
")"
] | Display a warning message bar.
:param title: The title of the message bar.
:type title: basestring
:param message: The message inside the message bar.
:type message: basestring
:param more_details: The message inside the 'Show details' button.
:type more_details: basestring
:param button_text: The text of the button if 'more_details' is not empty.
:type button_text: basestring
:param duration: The duration for the display, default is 8 seconds.
:type duration: int
:param iface_object: The QGIS IFace instance. Note that we cannot
use qgis.utils.iface since it is not available in our
test environment.
:type iface_object: QgisInterface | [
"Display",
"a",
"warning",
"message",
"bar",
"."
] | python | train |
BlueHack-Core/blueforge | blueforge/apis/s3.py | https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/apis/s3.py#L25-L29 | def download_file_from_bucket(self, bucket, file_path, key):
""" Download file from S3 Bucket """
with open(file_path, 'wb') as data:
self.__s3.download_fileobj(bucket, key, data)
return file_path | [
"def",
"download_file_from_bucket",
"(",
"self",
",",
"bucket",
",",
"file_path",
",",
"key",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"as",
"data",
":",
"self",
".",
"__s3",
".",
"download_fileobj",
"(",
"bucket",
",",
"key",
",",
"data",
")",
"return",
"file_path"
] | Download file from S3 Bucket | [
"Download",
"file",
"from",
"S3",
"Bucket"
] | python | train |
sdispater/pendulum | pendulum/date.py | https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/date.py#L753-L764 | def _last_of_quarter(self, day_of_week=None):
"""
Modify to the last occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the last day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type day_of_week: int or None
:rtype: Date
"""
return self.set(self.year, self.quarter * 3, 1).last_of("month", day_of_week) | [
"def",
"_last_of_quarter",
"(",
"self",
",",
"day_of_week",
"=",
"None",
")",
":",
"return",
"self",
".",
"set",
"(",
"self",
".",
"year",
",",
"self",
".",
"quarter",
"*",
"3",
",",
"1",
")",
".",
"last_of",
"(",
"\"month\"",
",",
"day_of_week",
")"
] | Modify to the last occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the last day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type day_of_week: int or None
:rtype: Date | [
"Modify",
"to",
"the",
"last",
"occurrence",
"of",
"a",
"given",
"day",
"of",
"the",
"week",
"in",
"the",
"current",
"quarter",
".",
"If",
"no",
"day_of_week",
"is",
"provided",
"modify",
"to",
"the",
"last",
"day",
"of",
"the",
"quarter",
".",
"Use",
"the",
"supplied",
"consts",
"to",
"indicate",
"the",
"desired",
"day_of_week",
"ex",
".",
"pendulum",
".",
"MONDAY",
"."
] | python | train |
django-danceschool/django-danceschool | danceschool/guestlist/models.py | https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/guestlist/models.py#L152-L191 | def getListForEvent(self, event=None):
''' Get the list of names associated with a particular event. '''
names = list(self.guestlistname_set.annotate(
guestType=Case(
When(notes__isnull=False, then=F('notes')),
default=Value(ugettext('Manually Added')),
output_field=models.CharField()
)
).values('firstName','lastName','guestType'))
# Component-by-component, OR append filters to an initial filter that always
# evaluates to False.
components = self.guestlistcomponent_set.all()
filters = Q(pk__isnull=True)
# Add prior staff based on the component rule.
for component in components:
if event and self.appliesToEvent(event):
filters = filters | self.getComponentFilters(component,event=event)
else:
filters = filters | self.getComponentFilters(component,dateTime=timezone.now())
# Add all event staff if that box is checked (no need for separate components)
if self.includeStaff and event and self.appliesToEvent(event):
filters = filters | Q(eventstaffmember__event=event)
# Execute the constructed query and add the names of staff
names += list(StaffMember.objects.filter(filters).annotate(
guestType=Case(
When(eventstaffmember__event=event, then=Concat(Value('Event Staff: '), 'eventstaffmember__category__name')),
default=Value(ugettext('Other Staff')),
output_field=models.CharField()
)
).distinct().values('firstName','lastName','guestType'))
if self.includeRegistrants and event and self.appliesToEvent(event):
names += list(Registration.objects.filter(eventregistration__event=event).annotate(
guestType=Value(_('Registered'),output_field=models.CharField())
).values('firstName','lastName','guestType'))
return names | [
"def",
"getListForEvent",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"names",
"=",
"list",
"(",
"self",
".",
"guestlistname_set",
".",
"annotate",
"(",
"guestType",
"=",
"Case",
"(",
"When",
"(",
"notes__isnull",
"=",
"False",
",",
"then",
"=",
"F",
"(",
"'notes'",
")",
")",
",",
"default",
"=",
"Value",
"(",
"ugettext",
"(",
"'Manually Added'",
")",
")",
",",
"output_field",
"=",
"models",
".",
"CharField",
"(",
")",
")",
")",
".",
"values",
"(",
"'firstName'",
",",
"'lastName'",
",",
"'guestType'",
")",
")",
"# Component-by-component, OR append filters to an initial filter that always",
"# evaluates to False.",
"components",
"=",
"self",
".",
"guestlistcomponent_set",
".",
"all",
"(",
")",
"filters",
"=",
"Q",
"(",
"pk__isnull",
"=",
"True",
")",
"# Add prior staff based on the component rule.",
"for",
"component",
"in",
"components",
":",
"if",
"event",
"and",
"self",
".",
"appliesToEvent",
"(",
"event",
")",
":",
"filters",
"=",
"filters",
"|",
"self",
".",
"getComponentFilters",
"(",
"component",
",",
"event",
"=",
"event",
")",
"else",
":",
"filters",
"=",
"filters",
"|",
"self",
".",
"getComponentFilters",
"(",
"component",
",",
"dateTime",
"=",
"timezone",
".",
"now",
"(",
")",
")",
"# Add all event staff if that box is checked (no need for separate components)",
"if",
"self",
".",
"includeStaff",
"and",
"event",
"and",
"self",
".",
"appliesToEvent",
"(",
"event",
")",
":",
"filters",
"=",
"filters",
"|",
"Q",
"(",
"eventstaffmember__event",
"=",
"event",
")",
"# Execute the constructed query and add the names of staff",
"names",
"+=",
"list",
"(",
"StaffMember",
".",
"objects",
".",
"filter",
"(",
"filters",
")",
".",
"annotate",
"(",
"guestType",
"=",
"Case",
"(",
"When",
"(",
"eventstaffmember__event",
"=",
"event",
",",
"then",
"=",
"Concat",
"(",
"Value",
"(",
"'Event Staff: '",
")",
",",
"'eventstaffmember__category__name'",
")",
")",
",",
"default",
"=",
"Value",
"(",
"ugettext",
"(",
"'Other Staff'",
")",
")",
",",
"output_field",
"=",
"models",
".",
"CharField",
"(",
")",
")",
")",
".",
"distinct",
"(",
")",
".",
"values",
"(",
"'firstName'",
",",
"'lastName'",
",",
"'guestType'",
")",
")",
"if",
"self",
".",
"includeRegistrants",
"and",
"event",
"and",
"self",
".",
"appliesToEvent",
"(",
"event",
")",
":",
"names",
"+=",
"list",
"(",
"Registration",
".",
"objects",
".",
"filter",
"(",
"eventregistration__event",
"=",
"event",
")",
".",
"annotate",
"(",
"guestType",
"=",
"Value",
"(",
"_",
"(",
"'Registered'",
")",
",",
"output_field",
"=",
"models",
".",
"CharField",
"(",
")",
")",
")",
".",
"values",
"(",
"'firstName'",
",",
"'lastName'",
",",
"'guestType'",
")",
")",
"return",
"names"
] | Get the list of names associated with a particular event. | [
"Get",
"the",
"list",
"of",
"names",
"associated",
"with",
"a",
"particular",
"event",
"."
] | python | train |
rougeth/bottery | bottery/telegram/engine.py | https://github.com/rougeth/bottery/blob/1c724b867fa16708d59a3dbba5dd2c3de85147a9/bottery/telegram/engine.py#L147-L156 | def get_chat_id(self, message):
'''
Telegram chat type can be either "private", "group", "supergroup" or
"channel".
Return user ID if it is of type "private", chat ID otherwise.
'''
if message.chat.type == 'private':
return message.user.id
return message.chat.id | [
"def",
"get_chat_id",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"chat",
".",
"type",
"==",
"'private'",
":",
"return",
"message",
".",
"user",
".",
"id",
"return",
"message",
".",
"chat",
".",
"id"
] | Telegram chat type can be either "private", "group", "supergroup" or
"channel".
Return user ID if it is of type "private", chat ID otherwise. | [
"Telegram",
"chat",
"type",
"can",
"be",
"either",
"private",
"group",
"supergroup",
"or",
"channel",
".",
"Return",
"user",
"ID",
"if",
"it",
"is",
"of",
"type",
"private",
"chat",
"ID",
"otherwise",
"."
] | python | valid |
mlperf/training | object_detection/pytorch/tools/cityscapes/convert_cityscapes_to_coco.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/tools/cityscapes/convert_cityscapes_to_coco.py#L53-L90 | def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
scipy.misc.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict)) | [
"def",
"convert_coco_stuff_mat",
"(",
"data_dir",
",",
"out_dir",
")",
":",
"sets",
"=",
"[",
"'train'",
",",
"'val'",
"]",
"categories",
"=",
"[",
"]",
"json_name",
"=",
"'coco_stuff_%s.json'",
"ann_dict",
"=",
"{",
"}",
"for",
"data_set",
"in",
"sets",
":",
"file_list",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'%s.txt'",
")",
"images",
"=",
"[",
"]",
"with",
"open",
"(",
"file_list",
"%",
"data_set",
")",
"as",
"f",
":",
"for",
"img_id",
",",
"img_name",
"in",
"enumerate",
"(",
"f",
")",
":",
"img_name",
"=",
"img_name",
".",
"replace",
"(",
"'coco'",
",",
"'COCO'",
")",
".",
"strip",
"(",
"'\\n'",
")",
"image",
"=",
"{",
"}",
"mat_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'annotations/%s.mat'",
"%",
"img_name",
")",
"data",
"=",
"h5py",
".",
"File",
"(",
"mat_file",
",",
"'r'",
")",
"labelMap",
"=",
"data",
".",
"get",
"(",
"'S'",
")",
"if",
"len",
"(",
"categories",
")",
"==",
"0",
":",
"labelNames",
"=",
"data",
".",
"get",
"(",
"'names'",
")",
"for",
"idx",
",",
"n",
"in",
"enumerate",
"(",
"labelNames",
")",
":",
"categories",
".",
"append",
"(",
"{",
"\"id\"",
":",
"idx",
",",
"\"name\"",
":",
"''",
".",
"join",
"(",
"chr",
"(",
"i",
")",
"for",
"i",
"in",
"data",
"[",
"n",
"[",
"0",
"]",
"]",
")",
"}",
")",
"ann_dict",
"[",
"'categories'",
"]",
"=",
"categories",
"scipy",
".",
"misc",
".",
"imsave",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"img_name",
"+",
"'.png'",
")",
",",
"labelMap",
")",
"image",
"[",
"'width'",
"]",
"=",
"labelMap",
".",
"shape",
"[",
"0",
"]",
"image",
"[",
"'height'",
"]",
"=",
"labelMap",
".",
"shape",
"[",
"1",
"]",
"image",
"[",
"'file_name'",
"]",
"=",
"img_name",
"image",
"[",
"'seg_file_name'",
"]",
"=",
"img_name",
"image",
"[",
"'id'",
"]",
"=",
"img_id",
"images",
".",
"append",
"(",
"image",
")",
"ann_dict",
"[",
"'images'",
"]",
"=",
"images",
"print",
"(",
"\"Num images: %s\"",
"%",
"len",
"(",
"images",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"json_name",
"%",
"data_set",
")",
",",
"'wb'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"ann_dict",
")",
")"
] | Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO. | [
"Convert",
"to",
"png",
"and",
"save",
"json",
"with",
"path",
".",
"This",
"currently",
"only",
"contains",
"the",
"segmentation",
"labels",
"for",
"objects",
"+",
"stuff",
"in",
"cocostuff",
"-",
"if",
"we",
"need",
"to",
"combine",
"with",
"other",
"labels",
"from",
"original",
"COCO",
"that",
"will",
"be",
"a",
"TODO",
"."
] | python | train |
BerkeleyAutomation/perception | perception/detector.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/detector.py#L604-L612 | def detector(detector_type):
""" Returns a detector of the specified type. """
if detector_type == 'point_cloud_box':
return PointCloudBoxDetector()
elif detector_type == 'rgbd_foreground_mask_query':
return RgbdForegroundMaskQueryImageDetector()
elif detector_type == 'rgbd_foreground_mask':
return RgbdForegroundMaskDetector()
raise ValueError('Detector type %s not understood' %(detector_type)) | [
"def",
"detector",
"(",
"detector_type",
")",
":",
"if",
"detector_type",
"==",
"'point_cloud_box'",
":",
"return",
"PointCloudBoxDetector",
"(",
")",
"elif",
"detector_type",
"==",
"'rgbd_foreground_mask_query'",
":",
"return",
"RgbdForegroundMaskQueryImageDetector",
"(",
")",
"elif",
"detector_type",
"==",
"'rgbd_foreground_mask'",
":",
"return",
"RgbdForegroundMaskDetector",
"(",
")",
"raise",
"ValueError",
"(",
"'Detector type %s not understood'",
"%",
"(",
"detector_type",
")",
")"
] | Returns a detector of the specified type. | [
"Returns",
"a",
"detector",
"of",
"the",
"specified",
"type",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.