repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
smarie/python-parsyfiles | parsyfiles/parsing_registries.py | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L835-L862 | def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return:
"""
base_msg = "Error while trying to convert value for attribute '{a}' to type <{t}>:\n" \
" - parsed value is : '{v}' of type <{tv}>\n" \
"".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att,
tv=get_pretty_type_str(type(parsed_att)))
msg = StringIO()
if len(list(caught_exec.keys())) > 0:
msg.writelines(' - converters tried are : \n * ')
msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()]))
msg.writelines(' \n Caught the following exceptions: \n')
for converter, err in caught_exec.items():
msg.writelines('--------------- From ' + str(converter) + ' caught: \n')
print_error_to_io_stream(err, msg)
msg.write('\n')
return AttrConversionException(base_msg + msg.getvalue()) | [
"def",
"create",
"(",
"att_name",
":",
"str",
",",
"parsed_att",
":",
"S",
",",
"attribute_type",
":",
"Type",
"[",
"T",
"]",
",",
"caught_exec",
":",
"Dict",
"[",
"Converter",
"[",
"S",
",",
"T",
"]",
",",
"Exception",
"]",
")",
":",
"base_msg",
"=",
"\"Error while trying to convert value for attribute '{a}' to type <{t}>:\\n\"",
"\" - parsed value is : '{v}' of type <{tv}>\\n\"",
"\"\"",
".",
"format",
"(",
"a",
"=",
"str",
"(",
"att_name",
")",
",",
"t",
"=",
"get_pretty_type_str",
"(",
"attribute_type",
")",
",",
"v",
"=",
"parsed_att",
",",
"tv",
"=",
"get_pretty_type_str",
"(",
"type",
"(",
"parsed_att",
")",
")",
")",
"msg",
"=",
"StringIO",
"(",
")",
"if",
"len",
"(",
"list",
"(",
"caught_exec",
".",
"keys",
"(",
")",
")",
")",
">",
"0",
":",
"msg",
".",
"writelines",
"(",
"' - converters tried are : \\n * '",
")",
"msg",
".",
"writelines",
"(",
"'\\n * '",
".",
"join",
"(",
"[",
"str",
"(",
"converter",
")",
"for",
"converter",
"in",
"caught_exec",
".",
"keys",
"(",
")",
"]",
")",
")",
"msg",
".",
"writelines",
"(",
"' \\n Caught the following exceptions: \\n'",
")",
"for",
"converter",
",",
"err",
"in",
"caught_exec",
".",
"items",
"(",
")",
":",
"msg",
".",
"writelines",
"(",
"'--------------- From '",
"+",
"str",
"(",
"converter",
")",
"+",
"' caught: \\n'",
")",
"print_error_to_io_stream",
"(",
"err",
",",
"msg",
")",
"msg",
".",
"write",
"(",
"'\\n'",
")",
"return",
"AttrConversionException",
"(",
"base_msg",
"+",
"msg",
".",
"getvalue",
"(",
")",
")"
]
| Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return: | [
"Helper",
"method",
"provided",
"because",
"we",
"actually",
"can",
"t",
"put",
"that",
"in",
"the",
"constructor",
"it",
"creates",
"a",
"bug",
"in",
"Nose",
"tests",
"https",
":",
"//",
"github",
".",
"com",
"/",
"nose",
"-",
"devs",
"/",
"nose",
"/",
"issues",
"/",
"725"
]
| python | train | 48.214286 |
DataDog/integrations-core | openstack/datadog_checks/openstack/openstack.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack/datadog_checks/openstack/openstack.py#L1374-L1383 | def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get("os_host") or self.hostname
if split_hostname_on_first_period:
hostname = hostname.split('.')[0]
return hostname | [
"def",
"get_my_hostname",
"(",
"self",
",",
"split_hostname_on_first_period",
"=",
"False",
")",
":",
"hostname",
"=",
"self",
".",
"init_config",
".",
"get",
"(",
"\"os_host\"",
")",
"or",
"self",
".",
"hostname",
"if",
"split_hostname_on_first_period",
":",
"hostname",
"=",
"hostname",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"hostname"
]
| Returns a best guess for the hostname registered with OpenStack for this host | [
"Returns",
"a",
"best",
"guess",
"for",
"the",
"hostname",
"registered",
"with",
"OpenStack",
"for",
"this",
"host"
]
| python | train | 34.8 |
Hackerfleet/hfos | hfos/ui/configurator.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/configurator.py#L152-L176 | def get(self, event):
"""Get a stored configuration"""
try:
comp = event.data['uuid']
except KeyError:
comp = None
if not comp:
self.log('Invalid get request without schema or component',
lvl=error)
return
self.log("Config data get request for ", event.data, "from",
event.user)
component = model_factory(Schema).find_one({
'uuid': comp
})
response = {
'component': 'hfos.ui.configurator',
'action': 'get',
'data': component.serializablefields()
}
self.fireEvent(send(event.client.uuid, response)) | [
"def",
"get",
"(",
"self",
",",
"event",
")",
":",
"try",
":",
"comp",
"=",
"event",
".",
"data",
"[",
"'uuid'",
"]",
"except",
"KeyError",
":",
"comp",
"=",
"None",
"if",
"not",
"comp",
":",
"self",
".",
"log",
"(",
"'Invalid get request without schema or component'",
",",
"lvl",
"=",
"error",
")",
"return",
"self",
".",
"log",
"(",
"\"Config data get request for \"",
",",
"event",
".",
"data",
",",
"\"from\"",
",",
"event",
".",
"user",
")",
"component",
"=",
"model_factory",
"(",
"Schema",
")",
".",
"find_one",
"(",
"{",
"'uuid'",
":",
"comp",
"}",
")",
"response",
"=",
"{",
"'component'",
":",
"'hfos.ui.configurator'",
",",
"'action'",
":",
"'get'",
",",
"'data'",
":",
"component",
".",
"serializablefields",
"(",
")",
"}",
"self",
".",
"fireEvent",
"(",
"send",
"(",
"event",
".",
"client",
".",
"uuid",
",",
"response",
")",
")"
]
| Get a stored configuration | [
"Get",
"a",
"stored",
"configuration"
]
| python | train | 27.68 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L23-L101 | def plotFCM(data, channel_names, kind='histogram', ax=None,
autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},
colorbar=False, grid=False,
**kwargs):
"""
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
"""
if ax == None: ax = pl.gca()
xlabel_kwargs.setdefault('size', 16)
ylabel_kwargs.setdefault('size', 16)
channel_names = to_list(channel_names)
if len(channel_names) == 1:
# 1D so histogram plot
kwargs.setdefault('color', 'gray')
kwargs.setdefault('histtype', 'stepfilled')
kwargs.setdefault('bins', 200) # Do not move above
x = data[channel_names[0]].values
if len(x) >= 1:
if (len(x) == 1) and isinstance(kwargs['bins'], int):
# Only needed for hist (not hist2d) due to hist function doing
# excessive input checking
warnings.warn("One of the data sets only has a single event. "
"This event won't be plotted unless the bin locations"
" are explicitly provided to the plotting function. ")
return None
plot_output = ax.hist(x, **kwargs)
else:
return None
elif len(channel_names) == 2:
x = data[channel_names[0]].values # value of first channel
y = data[channel_names[1]].values # value of second channel
if len(x) == 0:
# Don't draw a plot if there's no data
return None
if kind == 'scatter':
kwargs.setdefault('edgecolor', 'none')
plot_output = ax.scatter(x, y, **kwargs)
elif kind == 'histogram':
kwargs.setdefault('bins', 200) # Do not move above
kwargs.setdefault('cmin', 1)
kwargs.setdefault('cmap', pl.cm.copper)
kwargs.setdefault('norm', matplotlib.colors.LogNorm())
plot_output = ax.hist2d(x, y, **kwargs)
mappable = plot_output[-1]
if colorbar:
pl.colorbar(mappable, ax=ax)
else:
raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'")
else:
raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names))
pl.grid(grid)
if autolabel:
y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]
ax.set_xlabel(channel_names[0], **xlabel_kwargs)
ax.set_ylabel(y_label_text, **ylabel_kwargs)
return plot_output | [
"def",
"plotFCM",
"(",
"data",
",",
"channel_names",
",",
"kind",
"=",
"'histogram'",
",",
"ax",
"=",
"None",
",",
"autolabel",
"=",
"True",
",",
"xlabel_kwargs",
"=",
"{",
"}",
",",
"ylabel_kwargs",
"=",
"{",
"}",
",",
"colorbar",
"=",
"False",
",",
"grid",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"==",
"None",
":",
"ax",
"=",
"pl",
".",
"gca",
"(",
")",
"xlabel_kwargs",
".",
"setdefault",
"(",
"'size'",
",",
"16",
")",
"ylabel_kwargs",
".",
"setdefault",
"(",
"'size'",
",",
"16",
")",
"channel_names",
"=",
"to_list",
"(",
"channel_names",
")",
"if",
"len",
"(",
"channel_names",
")",
"==",
"1",
":",
"# 1D so histogram plot",
"kwargs",
".",
"setdefault",
"(",
"'color'",
",",
"'gray'",
")",
"kwargs",
".",
"setdefault",
"(",
"'histtype'",
",",
"'stepfilled'",
")",
"kwargs",
".",
"setdefault",
"(",
"'bins'",
",",
"200",
")",
"# Do not move above",
"x",
"=",
"data",
"[",
"channel_names",
"[",
"0",
"]",
"]",
".",
"values",
"if",
"len",
"(",
"x",
")",
">=",
"1",
":",
"if",
"(",
"len",
"(",
"x",
")",
"==",
"1",
")",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'bins'",
"]",
",",
"int",
")",
":",
"# Only needed for hist (not hist2d) due to hist function doing",
"# excessive input checking",
"warnings",
".",
"warn",
"(",
"\"One of the data sets only has a single event. \"",
"\"This event won't be plotted unless the bin locations\"",
"\" are explicitly provided to the plotting function. \"",
")",
"return",
"None",
"plot_output",
"=",
"ax",
".",
"hist",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"None",
"elif",
"len",
"(",
"channel_names",
")",
"==",
"2",
":",
"x",
"=",
"data",
"[",
"channel_names",
"[",
"0",
"]",
"]",
".",
"values",
"# value of first channel",
"y",
"=",
"data",
"[",
"channel_names",
"[",
"1",
"]",
"]",
".",
"values",
"# value of second channel",
"if",
"len",
"(",
"x",
")",
"==",
"0",
":",
"# Don't draw a plot if there's no data",
"return",
"None",
"if",
"kind",
"==",
"'scatter'",
":",
"kwargs",
".",
"setdefault",
"(",
"'edgecolor'",
",",
"'none'",
")",
"plot_output",
"=",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
"elif",
"kind",
"==",
"'histogram'",
":",
"kwargs",
".",
"setdefault",
"(",
"'bins'",
",",
"200",
")",
"# Do not move above",
"kwargs",
".",
"setdefault",
"(",
"'cmin'",
",",
"1",
")",
"kwargs",
".",
"setdefault",
"(",
"'cmap'",
",",
"pl",
".",
"cm",
".",
"copper",
")",
"kwargs",
".",
"setdefault",
"(",
"'norm'",
",",
"matplotlib",
".",
"colors",
".",
"LogNorm",
"(",
")",
")",
"plot_output",
"=",
"ax",
".",
"hist2d",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
"mappable",
"=",
"plot_output",
"[",
"-",
"1",
"]",
"if",
"colorbar",
":",
"pl",
".",
"colorbar",
"(",
"mappable",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Not a valid plot type. Must be 'scatter', 'histogram'\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Received an unexpected number of channels: \"{}\"'",
".",
"format",
"(",
"channel_names",
")",
")",
"pl",
".",
"grid",
"(",
"grid",
")",
"if",
"autolabel",
":",
"y_label_text",
"=",
"'Counts'",
"if",
"len",
"(",
"channel_names",
")",
"==",
"1",
"else",
"channel_names",
"[",
"1",
"]",
"ax",
".",
"set_xlabel",
"(",
"channel_names",
"[",
"0",
"]",
",",
"*",
"*",
"xlabel_kwargs",
")",
"ax",
".",
"set_ylabel",
"(",
"y_label_text",
",",
"*",
"*",
"ylabel_kwargs",
")",
"return",
"plot_output"
]
| Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used | [
"Plots",
"the",
"sample",
"on",
"the",
"current",
"axis",
"."
]
| python | train | 33.987342 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/extensions/sympyprinting.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/extensions/sympyprinting.py#L54-L64 | def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png | [
"def",
"print_png",
"(",
"o",
")",
":",
"s",
"=",
"latex",
"(",
"o",
",",
"mode",
"=",
"'inline'",
")",
"# mathtext does not understand certain latex flags, so we try to replace",
"# them with suitable subs.",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\\\operatorname'",
",",
"''",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\\\overline'",
",",
"'\\\\bar'",
")",
"png",
"=",
"latex_to_png",
"(",
"s",
")",
"return",
"png"
]
| A function to display sympy expression using inline style LaTeX in PNG. | [
"A",
"function",
"to",
"display",
"sympy",
"expression",
"using",
"inline",
"style",
"LaTeX",
"in",
"PNG",
"."
]
| python | test | 32.727273 |
zkbt/the-friendly-stars | thefriendlystars/constellations/gaia.py | https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/gaia.py#L85-L121 | def from_sky(cls, distancelimit=15, magnitudelimit=18):
'''
Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G).
'''
# define a query for cone search surrounding this center
criteria = []
if distancelimit is not None:
criteria.append('parallax >= {}'.format(1000.0/distancelimit))
if magnitudelimit is not None:
criteria.append('phot_g_mean_mag <= {}'.format(magnitudelimit))
allskyquery = """{} WHERE {}""".format(cls.basequery, ' and '.join(criteria))
print(allskyquery)
# run the query
print('querying Gaia DR2, for distance<{} and G<{}'.format(distancelimit, magnitudelimit))
table = query(allskyquery)
# store the search parameters in this object
c = cls(cls.standardize_table(table))
c.standardized.meta['query'] = allskyquery
c.standardized.meta['magnitudelimit'] = magnitudelimit
c.standardized.meta['distancelimit'] = distancelimit
#c.distancelimit = distancelimit
#c.magnitudelimit = magnitudelimit or c.magnitudelimit
return c | [
"def",
"from_sky",
"(",
"cls",
",",
"distancelimit",
"=",
"15",
",",
"magnitudelimit",
"=",
"18",
")",
":",
"# define a query for cone search surrounding this center",
"criteria",
"=",
"[",
"]",
"if",
"distancelimit",
"is",
"not",
"None",
":",
"criteria",
".",
"append",
"(",
"'parallax >= {}'",
".",
"format",
"(",
"1000.0",
"/",
"distancelimit",
")",
")",
"if",
"magnitudelimit",
"is",
"not",
"None",
":",
"criteria",
".",
"append",
"(",
"'phot_g_mean_mag <= {}'",
".",
"format",
"(",
"magnitudelimit",
")",
")",
"allskyquery",
"=",
"\"\"\"{} WHERE {}\"\"\"",
".",
"format",
"(",
"cls",
".",
"basequery",
",",
"' and '",
".",
"join",
"(",
"criteria",
")",
")",
"print",
"(",
"allskyquery",
")",
"# run the query",
"print",
"(",
"'querying Gaia DR2, for distance<{} and G<{}'",
".",
"format",
"(",
"distancelimit",
",",
"magnitudelimit",
")",
")",
"table",
"=",
"query",
"(",
"allskyquery",
")",
"# store the search parameters in this object",
"c",
"=",
"cls",
"(",
"cls",
".",
"standardize_table",
"(",
"table",
")",
")",
"c",
".",
"standardized",
".",
"meta",
"[",
"'query'",
"]",
"=",
"allskyquery",
"c",
".",
"standardized",
".",
"meta",
"[",
"'magnitudelimit'",
"]",
"=",
"magnitudelimit",
"c",
".",
"standardized",
".",
"meta",
"[",
"'distancelimit'",
"]",
"=",
"distancelimit",
"#c.distancelimit = distancelimit",
"#c.magnitudelimit = magnitudelimit or c.magnitudelimit",
"return",
"c"
]
| Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G). | [
"Create",
"a",
"Constellation",
"from",
"a",
"criteria",
"search",
"of",
"the",
"whole",
"sky",
"."
]
| python | train | 34.783784 |
numenta/htmresearch | projects/sdr_paper/pytorch_experiments/analyze_experiment.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/pytorch_experiments/analyze_experiment.py#L31-L62 | def analyzeParameters(expName, suite):
"""
Analyze the impact of each list parameter in this experiment
"""
print("\n================",expName,"=====================")
try:
expParams = suite.get_params(expName)
pprint.pprint(expParams)
for p in ["boost_strength", "k", "learning_rate", "weight_sparsity",
"k_inference_factor", "boost_strength_factor",
"c1_out_channels", "c1_k", "learning_rate_factor",
"batches_in_epoch",
]:
if p in expParams and type(expParams[p]) == list:
print("\n",p)
for v1 in expParams[p]:
# Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values, params = suite.get_values_fix_params(
expName, 0, "testerror", "last", **{p:v1})
v = np.array(values)
try:
print("Average/min/max for", p, v1, "=", v.mean(), v.min(), v.max())
# sortedIndices = v.argsort()
# for i in sortedIndices[::-1]:
# print(v[i],params[i]["name"])
except:
print("Can't compute stats for",p)
except:
print("Couldn't load experiment",expName) | [
"def",
"analyzeParameters",
"(",
"expName",
",",
"suite",
")",
":",
"print",
"(",
"\"\\n================\"",
",",
"expName",
",",
"\"=====================\"",
")",
"try",
":",
"expParams",
"=",
"suite",
".",
"get_params",
"(",
"expName",
")",
"pprint",
".",
"pprint",
"(",
"expParams",
")",
"for",
"p",
"in",
"[",
"\"boost_strength\"",
",",
"\"k\"",
",",
"\"learning_rate\"",
",",
"\"weight_sparsity\"",
",",
"\"k_inference_factor\"",
",",
"\"boost_strength_factor\"",
",",
"\"c1_out_channels\"",
",",
"\"c1_k\"",
",",
"\"learning_rate_factor\"",
",",
"\"batches_in_epoch\"",
",",
"]",
":",
"if",
"p",
"in",
"expParams",
"and",
"type",
"(",
"expParams",
"[",
"p",
"]",
")",
"==",
"list",
":",
"print",
"(",
"\"\\n\"",
",",
"p",
")",
"for",
"v1",
"in",
"expParams",
"[",
"p",
"]",
":",
"# Retrieve the last totalCorrect from each experiment",
"# Print them sorted from best to worst",
"values",
",",
"params",
"=",
"suite",
".",
"get_values_fix_params",
"(",
"expName",
",",
"0",
",",
"\"testerror\"",
",",
"\"last\"",
",",
"*",
"*",
"{",
"p",
":",
"v1",
"}",
")",
"v",
"=",
"np",
".",
"array",
"(",
"values",
")",
"try",
":",
"print",
"(",
"\"Average/min/max for\"",
",",
"p",
",",
"v1",
",",
"\"=\"",
",",
"v",
".",
"mean",
"(",
")",
",",
"v",
".",
"min",
"(",
")",
",",
"v",
".",
"max",
"(",
")",
")",
"# sortedIndices = v.argsort()",
"# for i in sortedIndices[::-1]:",
"# print(v[i],params[i][\"name\"])",
"except",
":",
"print",
"(",
"\"Can't compute stats for\"",
",",
"p",
")",
"except",
":",
"print",
"(",
"\"Couldn't load experiment\"",
",",
"expName",
")"
]
| Analyze the impact of each list parameter in this experiment | [
"Analyze",
"the",
"impact",
"of",
"each",
"list",
"parameter",
"in",
"this",
"experiment"
]
| python | train | 37.125 |
Locu/chronology | kronos/kronos/utils/uuid.py | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/utils/uuid.py#L14-L20 | def uuid_from_kronos_time(time, _type=UUIDType.RANDOM):
"""
Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time.
"""
return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type) | [
"def",
"uuid_from_kronos_time",
"(",
"time",
",",
"_type",
"=",
"UUIDType",
".",
"RANDOM",
")",
":",
"return",
"timeuuid_from_time",
"(",
"int",
"(",
"time",
")",
"+",
"UUID_TIME_OFFSET",
",",
"type",
"=",
"_type",
")"
]
| Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time. | [
"Generate",
"a",
"UUID",
"with",
"the",
"specified",
"time",
".",
"If",
"lowest",
"is",
"true",
"return",
"the",
"lexicographically",
"first",
"UUID",
"for",
"the",
"specified",
"time",
"."
]
| python | train | 37.571429 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py#L211-L225 | def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
application_name = ET.SubElement(firmware_version_info, "application-name")
application_name.text = kwargs.pop('application_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"show_firmware_version",
"=",
"ET",
".",
"Element",
"(",
"\"show_firmware_version\"",
")",
"config",
"=",
"show_firmware_version",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"show_firmware_version",
",",
"\"output\"",
")",
"show_firmware_version",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"show-firmware-version\"",
")",
"node_info",
"=",
"ET",
".",
"SubElement",
"(",
"show_firmware_version",
",",
"\"node-info\"",
")",
"firmware_version_info",
"=",
"ET",
".",
"SubElement",
"(",
"node_info",
",",
"\"firmware-version-info\"",
")",
"application_name",
"=",
"ET",
".",
"SubElement",
"(",
"firmware_version_info",
",",
"\"application-name\"",
")",
"application_name",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'application_name'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train | 55.133333 |
themartorana/python-postmark | postmark/core.py | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L472-L552 | def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | [
"def",
"send",
"(",
"self",
",",
"test",
"=",
"None",
")",
":",
"self",
".",
"_check_values",
"(",
")",
"# Set up message dictionary",
"json_message",
"=",
"self",
".",
"to_json_message",
"(",
")",
"# if (self.__html_body and not self.__text_body) and self.__multipart:",
"# # TODO: Set up regex to strip html",
"# pass",
"# If test is not specified, attempt to read the Django setting",
"if",
"test",
"is",
"None",
":",
"try",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"as",
"django_settings",
"test",
"=",
"getattr",
"(",
"django_settings",
",",
"\"POSTMARK_TEST_MODE\"",
",",
"None",
")",
"except",
"ImportError",
":",
"pass",
"# If this is a test, just print the message",
"if",
"test",
":",
"print",
"(",
"'JSON message is:\\n%s'",
"%",
"json",
".",
"dumps",
"(",
"json_message",
",",
"cls",
"=",
"PMJSONEncoder",
")",
")",
"return",
"if",
"self",
".",
"__template_id",
":",
"endpoint_url",
"=",
"__POSTMARK_URL__",
"+",
"'email/withTemplate/'",
"else",
":",
"endpoint_url",
"=",
"__POSTMARK_URL__",
"+",
"'email'",
"# Set up the url Request",
"req",
"=",
"Request",
"(",
"endpoint_url",
",",
"json",
".",
"dumps",
"(",
"json_message",
",",
"cls",
"=",
"PMJSONEncoder",
")",
".",
"encode",
"(",
"'utf8'",
")",
",",
"{",
"'Accept'",
":",
"'application/json'",
",",
"'Content-Type'",
":",
"'application/json'",
",",
"'X-Postmark-Server-Token'",
":",
"self",
".",
"__api_key",
",",
"'User-agent'",
":",
"self",
".",
"__user_agent",
"}",
")",
"# Attempt send",
"try",
":",
"# print 'sending request to postmark: %s' % json_message",
"result",
"=",
"urlopen",
"(",
"req",
")",
"jsontxt",
"=",
"result",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf8'",
")",
"result",
".",
"close",
"(",
")",
"if",
"result",
".",
"code",
"==",
"200",
":",
"self",
".",
"message_id",
"=",
"json",
".",
"loads",
"(",
"jsontxt",
")",
".",
"get",
"(",
"'MessageID'",
",",
"None",
")",
"return",
"True",
"else",
":",
"raise",
"PMMailSendException",
"(",
"'Return code %d: %s'",
"%",
"(",
"result",
".",
"code",
",",
"result",
".",
"msg",
")",
")",
"except",
"HTTPError",
"as",
"err",
":",
"if",
"err",
".",
"code",
"==",
"401",
":",
"raise",
"PMMailUnauthorizedException",
"(",
"'Sending Unauthorized - incorrect API key.'",
",",
"err",
")",
"elif",
"err",
".",
"code",
"==",
"422",
":",
"try",
":",
"jsontxt",
"=",
"err",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf8'",
")",
"jsonobj",
"=",
"json",
".",
"loads",
"(",
"jsontxt",
")",
"desc",
"=",
"jsonobj",
"[",
"'Message'",
"]",
"error_code",
"=",
"jsonobj",
"[",
"'ErrorCode'",
"]",
"except",
"KeyError",
":",
"raise",
"PMMailUnprocessableEntityException",
"(",
"'Unprocessable Entity: Description not given'",
")",
"if",
"error_code",
"==",
"406",
":",
"raise",
"PMMailInactiveRecipientException",
"(",
"'You tried to send email to a recipient that has been marked as inactive.'",
")",
"raise",
"PMMailUnprocessableEntityException",
"(",
"'Unprocessable Entity: %s'",
"%",
"desc",
")",
"elif",
"err",
".",
"code",
"==",
"500",
":",
"raise",
"PMMailServerErrorException",
"(",
"'Internal server error at Postmark. Admins have been alerted.'",
",",
"err",
")",
"except",
"URLError",
"as",
"err",
":",
"if",
"hasattr",
"(",
"err",
",",
"'reason'",
")",
":",
"raise",
"PMMailURLException",
"(",
"'URLError: Failed to reach the server: %s (See \"inner_exception\" for details)'",
"%",
"err",
".",
"reason",
",",
"err",
")",
"elif",
"hasattr",
"(",
"err",
",",
"'code'",
")",
":",
"raise",
"PMMailURLException",
"(",
"'URLError: %d: The server couldn\\'t fufill the request. (See \"inner_exception\" for details)'",
"%",
"err",
".",
"code",
",",
"err",
")",
"else",
":",
"raise",
"PMMailURLException",
"(",
"'URLError: The server couldn\\'t fufill the request. (See \"inner_exception\" for details)'",
",",
"err",
")"
]
| Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark | [
"Send",
"the",
"email",
"through",
"the",
"Postmark",
"system",
".",
"Pass",
"test",
"=",
"True",
"to",
"just",
"print",
"out",
"the",
"resulting",
"JSON",
"message",
"being",
"sent",
"to",
"Postmark"
]
| python | train | 42.506173 |
saltstack/salt | salt/spm/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L995-L1013 | def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg) | [
"def",
"_list_files",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"<",
"2",
":",
"raise",
"SPMInvocationError",
"(",
"'A package name must be specified'",
")",
"package",
"=",
"args",
"[",
"-",
"1",
"]",
"files",
"=",
"self",
".",
"_pkgdb_fun",
"(",
"'list_files'",
",",
"package",
",",
"self",
".",
"db_conn",
")",
"if",
"files",
"is",
"None",
":",
"raise",
"SPMPackageError",
"(",
"'package {0} not installed'",
".",
"format",
"(",
"package",
")",
")",
"else",
":",
"for",
"file_",
"in",
"files",
":",
"if",
"self",
".",
"opts",
"[",
"'verbose'",
"]",
":",
"status_msg",
"=",
"','",
".",
"join",
"(",
"file_",
")",
"else",
":",
"status_msg",
"=",
"file_",
"[",
"0",
"]",
"self",
".",
"ui",
".",
"status",
"(",
"status_msg",
")"
]
| List files for an installed package | [
"List",
"files",
"for",
"an",
"installed",
"package"
]
| python | train | 32.789474 |
stitchfix/pyxley | pyxley/charts/mg/line_chart.py | https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/line_chart.py#L58-L67 | def to_json(df, x, y, timeseries=False):
"""Format output for json response."""
values = {k: [] for k in y}
for i, row in df.iterrows():
for yy in y:
values[yy].append({
"x": row[x],
"y": row[yy]
})
return {"result": [values[k] for k in y], "date": timeseries} | [
"def",
"to_json",
"(",
"df",
",",
"x",
",",
"y",
",",
"timeseries",
"=",
"False",
")",
":",
"values",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
"in",
"y",
"}",
"for",
"i",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"for",
"yy",
"in",
"y",
":",
"values",
"[",
"yy",
"]",
".",
"append",
"(",
"{",
"\"x\"",
":",
"row",
"[",
"x",
"]",
",",
"\"y\"",
":",
"row",
"[",
"yy",
"]",
"}",
")",
"return",
"{",
"\"result\"",
":",
"[",
"values",
"[",
"k",
"]",
"for",
"k",
"in",
"y",
"]",
",",
"\"date\"",
":",
"timeseries",
"}"
]
| Format output for json response. | [
"Format",
"output",
"for",
"json",
"response",
"."
]
| python | train | 37.2 |
libtcod/python-tcod | tcod/libtcodpy.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L2775-L2791 | def heightmap_get_normal(
hm: np.ndarray, x: float, y: float, waterLevel: float
) -> Tuple[float, float, float]:
"""Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal.
"""
cn = ffi.new("float[3]")
lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)
return tuple(cn) | [
"def",
"heightmap_get_normal",
"(",
"hm",
":",
"np",
".",
"ndarray",
",",
"x",
":",
"float",
",",
"y",
":",
"float",
",",
"waterLevel",
":",
"float",
")",
"->",
"Tuple",
"[",
"float",
",",
"float",
",",
"float",
"]",
":",
"cn",
"=",
"ffi",
".",
"new",
"(",
"\"float[3]\"",
")",
"lib",
".",
"TCOD_heightmap_get_normal",
"(",
"_heightmap_cdata",
"(",
"hm",
")",
",",
"x",
",",
"y",
",",
"cn",
",",
"waterLevel",
")",
"return",
"tuple",
"(",
"cn",
")"
]
| Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal. | [
"Return",
"the",
"map",
"normal",
"at",
"given",
"coordinates",
"."
]
| python | train | 35.764706 |
a1ezzz/wasp-general | wasp_general/crypto/aes.py | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/crypto/aes.py#L491-L498 | def cipher(self):
""" Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher
"""
#cipher = pyAES.new(*self.mode().aes_args(), **self.mode().aes_kwargs())
cipher = Cipher(*self.mode().aes_args(), **self.mode().aes_kwargs())
return WAES.WAESCipher(cipher) | [
"def",
"cipher",
"(",
"self",
")",
":",
"#cipher = pyAES.new(*self.mode().aes_args(), **self.mode().aes_kwargs())",
"cipher",
"=",
"Cipher",
"(",
"*",
"self",
".",
"mode",
"(",
")",
".",
"aes_args",
"(",
")",
",",
"*",
"*",
"self",
".",
"mode",
"(",
")",
".",
"aes_kwargs",
"(",
")",
")",
"return",
"WAES",
".",
"WAESCipher",
"(",
"cipher",
")"
]
| Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher | [
"Generate",
"AES",
"-",
"cipher"
]
| python | train | 32.625 |
hyperledger/indy-sdk | vcx/wrappers/python3/vcx/api/utils.py | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/utils.py#L6-L37 | async def vcx_agent_provision(config: str) -> None:
"""
Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call.
"""
logger = logging.getLogger(__name__)
if not hasattr(vcx_agent_provision, "cb"):
logger.debug("vcx_agent_provision: Creating callback")
vcx_agent_provision.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_config = c_char_p(config.encode('utf-8'))
result = await do_call('vcx_agent_provision_async',
c_config,
vcx_agent_provision.cb)
logger.debug("vcx_agent_provision completed")
return result.decode() | [
"async",
"def",
"vcx_agent_provision",
"(",
"config",
":",
"str",
")",
"->",
"None",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"not",
"hasattr",
"(",
"vcx_agent_provision",
",",
"\"cb\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"vcx_agent_provision: Creating callback\"",
")",
"vcx_agent_provision",
".",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_uint32",
",",
"c_uint32",
",",
"c_char_p",
")",
")",
"c_config",
"=",
"c_char_p",
"(",
"config",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"result",
"=",
"await",
"do_call",
"(",
"'vcx_agent_provision_async'",
",",
"c_config",
",",
"vcx_agent_provision",
".",
"cb",
")",
"logger",
".",
"debug",
"(",
"\"vcx_agent_provision completed\"",
")",
"return",
"result",
".",
"decode",
"(",
")"
]
| Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call. | [
"Provision",
"an",
"agent",
"in",
"the",
"agency",
"populate",
"configuration",
"and",
"wallet",
"for",
"this",
"agent",
".",
"Example",
":",
"import",
"json",
"enterprise_config",
"=",
"{",
"agency_url",
":",
"http",
":",
"//",
"localhost",
":",
"8080",
"agency_did",
":",
"VsKV7grR1BUE29mG2Fm2kX",
"agency_verkey",
":",
"Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
"wallet_name",
":",
"LIBVCX_SDK_WALLET",
"agent_seed",
":",
"00000000000000000000000001234561",
"enterprise_seed",
":",
"000000000000000000000000Trustee1",
"wallet_key",
":",
"1234",
"}",
"vcx_config",
"=",
"await",
"vcx_agent_provision",
"(",
"json",
".",
"dumps",
"(",
"enterprise_config",
"))",
":",
"param",
"config",
":",
"JSON",
"configuration",
":",
"return",
":",
"Configuration",
"for",
"vcx_init",
"call",
"."
]
| python | train | 38.09375 |
crs4/pydoop | pydoop/hdfs/path.py | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L304-L312 | def exists(hdfs_path, user=None):
"""
Return :obj:`True` if ``hdfs_path`` exists in the default HDFS.
"""
hostname, port, path = split(hdfs_path, user=user)
fs = hdfs_fs.hdfs(hostname, port)
retval = fs.exists(path)
fs.close()
return retval | [
"def",
"exists",
"(",
"hdfs_path",
",",
"user",
"=",
"None",
")",
":",
"hostname",
",",
"port",
",",
"path",
"=",
"split",
"(",
"hdfs_path",
",",
"user",
"=",
"user",
")",
"fs",
"=",
"hdfs_fs",
".",
"hdfs",
"(",
"hostname",
",",
"port",
")",
"retval",
"=",
"fs",
".",
"exists",
"(",
"path",
")",
"fs",
".",
"close",
"(",
")",
"return",
"retval"
]
| Return :obj:`True` if ``hdfs_path`` exists in the default HDFS. | [
"Return",
":",
"obj",
":",
"True",
"if",
"hdfs_path",
"exists",
"in",
"the",
"default",
"HDFS",
"."
]
| python | train | 29.333333 |
5j9/wikitextparser | wikitextparser/_wikitext.py | https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L696-L702 | def parser_functions(self) -> List['ParserFunction']:
"""Return a list of parser function objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction')
for span in self._subspans('ParserFunction')] | [
"def",
"parser_functions",
"(",
"self",
")",
"->",
"List",
"[",
"'ParserFunction'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"ParserFunction",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'ParserFunction'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'ParserFunction'",
")",
"]"
]
| Return a list of parser function objects. | [
"Return",
"a",
"list",
"of",
"parser",
"function",
"objects",
"."
]
| python | test | 47.142857 |
ucbvislab/radiotool | radiotool/composer/track.py | https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/track.py#L165-L182 | def loudest_time(self, start=0, duration=0):
"""Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer
"""
if duration == 0:
duration = self.sound.nframes
self.current_frame = start
arr = self.read_frames(duration)
# get the frame of the maximum amplitude
# different names for the same thing...
# max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
max_amp_sample = int(np.floor(arr.argmax()/2)) + start
return max_amp_sample | [
"def",
"loudest_time",
"(",
"self",
",",
"start",
"=",
"0",
",",
"duration",
"=",
"0",
")",
":",
"if",
"duration",
"==",
"0",
":",
"duration",
"=",
"self",
".",
"sound",
".",
"nframes",
"self",
".",
"current_frame",
"=",
"start",
"arr",
"=",
"self",
".",
"read_frames",
"(",
"duration",
")",
"# get the frame of the maximum amplitude",
"# different names for the same thing...",
"# max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]",
"max_amp_sample",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"arr",
".",
"argmax",
"(",
")",
"/",
"2",
")",
")",
"+",
"start",
"return",
"max_amp_sample"
]
| Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer | [
"Find",
"the",
"loudest",
"time",
"in",
"the",
"window",
"given",
"by",
"start",
"and",
"duration",
"Returns",
"frame",
"number",
"in",
"context",
"of",
"entire",
"track",
"not",
"just",
"the",
"window",
"."
]
| python | train | 43.444444 |
clalancette/pycdlib | pycdlib/rockridge.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L421-L476 | def parse(self, rrstr):
# type: (bytes) -> int
'''
Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PX record already initialized!')
(su_len, su_entry_version_unused, posix_file_mode_le, posix_file_mode_be,
posix_file_links_le, posix_file_links_be, posix_file_user_id_le,
posix_file_user_id_be, posix_file_group_id_le,
posix_file_group_id_be) = struct.unpack_from('=BBLLLLLLLL', rrstr[:38], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
if posix_file_mode_le != utils.swab_32bit(posix_file_mode_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file mode do not agree')
if posix_file_links_le != utils.swab_32bit(posix_file_links_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file links do not agree')
if posix_file_user_id_le != utils.swab_32bit(posix_file_user_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file user ID do not agree')
if posix_file_group_id_le != utils.swab_32bit(posix_file_group_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file group ID do not agree')
# In Rock Ridge 1.09 and 1.10, there is no serial number so the su_len
# is 36, while in Rock Ridge 1.12, there is an 8-byte serial number so
# su_len is 44.
if su_len == 36:
posix_file_serial_number_le = 0
elif su_len == 44:
(posix_file_serial_number_le,
posix_file_serial_number_be) = struct.unpack_from('=LL',
rrstr[:44], 36)
if posix_file_serial_number_le != utils.swab_32bit(posix_file_serial_number_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file serial number do not agree')
else:
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on Rock Ridge PX record')
self.posix_file_mode = posix_file_mode_le
self.posix_file_links = posix_file_links_le
self.posix_user_id = posix_file_user_id_le
self.posix_group_id = posix_file_group_id_le
self.posix_serial_number = posix_file_serial_number_le
self._initialized = True
return su_len | [
"def",
"parse",
"(",
"self",
",",
"rrstr",
")",
":",
"# type: (bytes) -> int",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'PX record already initialized!'",
")",
"(",
"su_len",
",",
"su_entry_version_unused",
",",
"posix_file_mode_le",
",",
"posix_file_mode_be",
",",
"posix_file_links_le",
",",
"posix_file_links_be",
",",
"posix_file_user_id_le",
",",
"posix_file_user_id_be",
",",
"posix_file_group_id_le",
",",
"posix_file_group_id_be",
")",
"=",
"struct",
".",
"unpack_from",
"(",
"'=BBLLLLLLLL'",
",",
"rrstr",
"[",
":",
"38",
"]",
",",
"2",
")",
"# We assume that the caller has already checked the su_entry_version,",
"# so we don't bother.",
"if",
"posix_file_mode_le",
"!=",
"utils",
".",
"swab_32bit",
"(",
"posix_file_mode_be",
")",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'PX record big and little-endian file mode do not agree'",
")",
"if",
"posix_file_links_le",
"!=",
"utils",
".",
"swab_32bit",
"(",
"posix_file_links_be",
")",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'PX record big and little-endian file links do not agree'",
")",
"if",
"posix_file_user_id_le",
"!=",
"utils",
".",
"swab_32bit",
"(",
"posix_file_user_id_be",
")",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'PX record big and little-endian file user ID do not agree'",
")",
"if",
"posix_file_group_id_le",
"!=",
"utils",
".",
"swab_32bit",
"(",
"posix_file_group_id_be",
")",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'PX record big and little-endian file group ID do not agree'",
")",
"# In Rock Ridge 1.09 and 1.10, there is no serial number so the su_len",
"# is 36, while in Rock Ridge 1.12, there is an 8-byte serial number so",
"# su_len is 44.",
"if",
"su_len",
"==",
"36",
":",
"posix_file_serial_number_le",
"=",
"0",
"elif",
"su_len",
"==",
"44",
":",
"(",
"posix_file_serial_number_le",
",",
"posix_file_serial_number_be",
")",
"=",
"struct",
".",
"unpack_from",
"(",
"'=LL'",
",",
"rrstr",
"[",
":",
"44",
"]",
",",
"36",
")",
"if",
"posix_file_serial_number_le",
"!=",
"utils",
".",
"swab_32bit",
"(",
"posix_file_serial_number_be",
")",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'PX record big and little-endian file serial number do not agree'",
")",
"else",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Invalid length on Rock Ridge PX record'",
")",
"self",
".",
"posix_file_mode",
"=",
"posix_file_mode_le",
"self",
".",
"posix_file_links",
"=",
"posix_file_links_le",
"self",
".",
"posix_user_id",
"=",
"posix_file_user_id_le",
"self",
".",
"posix_group_id",
"=",
"posix_file_group_id_le",
"self",
".",
"posix_serial_number",
"=",
"posix_file_serial_number_le",
"self",
".",
"_initialized",
"=",
"True",
"return",
"su_len"
]
| Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12. | [
"Parse",
"a",
"Rock",
"Ridge",
"POSIX",
"File",
"Attributes",
"record",
"out",
"of",
"a",
"string",
"."
]
| python | train | 47.553571 |
jeffh/sniffer | sniffer/scanner/base.py | https://github.com/jeffh/sniffer/blob/8e4c3e77743aef08109ea0225b4a6536d4e60270/sniffer/scanner/base.py#L179-L192 | def observe(self, event_name, func):
"""
event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events
"""
if isinstance(event_name, list) or isinstance(event_name, tuple):
for name in event_name:
self.observe(name, func)
return
self.log(func.__name__, "attached to", event_name)
self._modify_event(event_name, 'append', func) | [
"def",
"observe",
"(",
"self",
",",
"event_name",
",",
"func",
")",
":",
"if",
"isinstance",
"(",
"event_name",
",",
"list",
")",
"or",
"isinstance",
"(",
"event_name",
",",
"tuple",
")",
":",
"for",
"name",
"in",
"event_name",
":",
"self",
".",
"observe",
"(",
"name",
",",
"func",
")",
"return",
"self",
".",
"log",
"(",
"func",
".",
"__name__",
",",
"\"attached to\"",
",",
"event_name",
")",
"self",
".",
"_modify_event",
"(",
"event_name",
",",
"'append'",
",",
"func",
")"
]
| event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events | [
"event_name",
":",
"=",
"{",
"created",
"modified",
"deleted",
"}",
"list",
"tuple"
]
| python | train | 45.785714 |
redcap-tools/PyCap | redcap/project.py | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L114-L123 | def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0 | [
"def",
"is_longitudinal",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"events",
")",
">",
"0",
"and",
"len",
"(",
"self",
".",
"arm_nums",
")",
">",
"0",
"and",
"len",
"(",
"self",
".",
"arm_names",
")",
">",
"0"
]
| Returns
-------
boolean :
longitudinal status of this project | [
"Returns",
"-------",
"boolean",
":",
"longitudinal",
"status",
"of",
"this",
"project"
]
| python | train | 25.8 |
MartinThoma/hwrt | hwrt/features.py | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/features.py#L34-L61 | def get_features(model_description_features):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
]
"""
return utils.get_objectlist(model_description_features,
config_key='features',
module=sys.modules[__name__]) | [
"def",
"get_features",
"(",
"model_description_features",
")",
":",
"return",
"utils",
".",
"get_objectlist",
"(",
"model_description_features",
",",
"config_key",
"=",
"'features'",
",",
"module",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
")"
]
| Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
] | [
"Get",
"features",
"from",
"a",
"list",
"of",
"dictionaries"
]
| python | train | 28.892857 |
dropbox/pyannotate | pyannotate_runtime/collect_types.py | https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_runtime/collect_types.py#L711-L727 | def _make_sampling_sequence(n):
# type: (int) -> List[int]
"""
Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n.
"""
seq = list(range(5))
i = 50
while len(seq) < n:
seq.append(i)
i += 50
return seq | [
"def",
"_make_sampling_sequence",
"(",
"n",
")",
":",
"# type: (int) -> List[int]",
"seq",
"=",
"list",
"(",
"range",
"(",
"5",
")",
")",
"i",
"=",
"50",
"while",
"len",
"(",
"seq",
")",
"<",
"n",
":",
"seq",
".",
"append",
"(",
"i",
")",
"i",
"+=",
"50",
"return",
"seq"
]
| Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n. | [
"Return",
"a",
"list",
"containing",
"the",
"proposed",
"call",
"event",
"sampling",
"sequence",
"."
]
| python | train | 24.176471 |
casouri/launchdman | launchdman/__init__.py | https://github.com/casouri/launchdman/blob/c83840e640cb075fab2534049f1e25fac6933c64/launchdman/__init__.py#L894-L898 | def minute(self):
'''set unit to minute'''
self.magnification = 60
self._update(self.baseNumber, self.magnification)
return self | [
"def",
"minute",
"(",
"self",
")",
":",
"self",
".",
"magnification",
"=",
"60",
"self",
".",
"_update",
"(",
"self",
".",
"baseNumber",
",",
"self",
".",
"magnification",
")",
"return",
"self"
]
| set unit to minute | [
"set",
"unit",
"to",
"minute"
]
| python | train | 31.2 |
ultrabug/py3status | py3status/i3status.py | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/i3status.py#L131-L140 | def run(self):
"""
updates the modules output.
Currently only time and tztime need to do this
"""
if self.update_time_value():
self.i3status.py3_wrapper.notify_update(self.module_name)
due_time = self.py3.time_in(sync_to=self.time_delta)
self.i3status.py3_wrapper.timeout_queue_add(self, due_time) | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"self",
".",
"update_time_value",
"(",
")",
":",
"self",
".",
"i3status",
".",
"py3_wrapper",
".",
"notify_update",
"(",
"self",
".",
"module_name",
")",
"due_time",
"=",
"self",
".",
"py3",
".",
"time_in",
"(",
"sync_to",
"=",
"self",
".",
"time_delta",
")",
"self",
".",
"i3status",
".",
"py3_wrapper",
".",
"timeout_queue_add",
"(",
"self",
",",
"due_time",
")"
]
| updates the modules output.
Currently only time and tztime need to do this | [
"updates",
"the",
"modules",
"output",
".",
"Currently",
"only",
"time",
"and",
"tztime",
"need",
"to",
"do",
"this"
]
| python | train | 35.7 |
flatangle/flatlib | flatlib/lists.py | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/lists.py#L63-L66 | def getObjectsInHouse(self, house):
""" Returns a list with all objects in a house. """
res = [obj for obj in self if house.hasObject(obj)]
return ObjectList(res) | [
"def",
"getObjectsInHouse",
"(",
"self",
",",
"house",
")",
":",
"res",
"=",
"[",
"obj",
"for",
"obj",
"in",
"self",
"if",
"house",
".",
"hasObject",
"(",
"obj",
")",
"]",
"return",
"ObjectList",
"(",
"res",
")"
]
| Returns a list with all objects in a house. | [
"Returns",
"a",
"list",
"with",
"all",
"objects",
"in",
"a",
"house",
"."
]
| python | train | 45.75 |
simpleai-team/simpleai | samples/search/missioners.py | https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/missioners.py#L36-L43 | def result(self, s, a):
'''Result of applying an action to a state.'''
# result: boat on opposite side, and numbers of missioners and
# cannibals updated according to the move
if s[2] == 0:
return (s[0] - a[1][0], s[1] - a[1][1], 1)
else:
return (s[0] + a[1][0], s[1] + a[1][1], 0) | [
"def",
"result",
"(",
"self",
",",
"s",
",",
"a",
")",
":",
"# result: boat on opposite side, and numbers of missioners and",
"# cannibals updated according to the move",
"if",
"s",
"[",
"2",
"]",
"==",
"0",
":",
"return",
"(",
"s",
"[",
"0",
"]",
"-",
"a",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
"-",
"a",
"[",
"1",
"]",
"[",
"1",
"]",
",",
"1",
")",
"else",
":",
"return",
"(",
"s",
"[",
"0",
"]",
"+",
"a",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
"+",
"a",
"[",
"1",
"]",
"[",
"1",
"]",
",",
"0",
")"
]
| Result of applying an action to a state. | [
"Result",
"of",
"applying",
"an",
"action",
"to",
"a",
"state",
"."
]
| python | train | 42.25 |
CEA-COSMIC/ModOpt | modopt/opt/linear.py | https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/linear.py#L186-L233 | def _check_inputs(self, operators, weights):
""" Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats
"""
operators = self._check_type(operators)
for operator in operators:
if not hasattr(operator, 'op'):
raise ValueError('Operators must contain "op" method.')
if not hasattr(operator, 'adj_op'):
raise ValueError('Operators must contain "adj_op" method.')
operator.op = check_callable(operator.op)
operator.cost = check_callable(operator.adj_op)
if not isinstance(weights, type(None)):
weights = self._check_type(weights)
if weights.size != operators.size:
raise ValueError('The number of weights must match the '
'number of operators.')
if not np.issubdtype(weights.dtype, np.floating):
raise TypeError('The weights must be a list of float values.')
return operators, weights | [
"def",
"_check_inputs",
"(",
"self",
",",
"operators",
",",
"weights",
")",
":",
"operators",
"=",
"self",
".",
"_check_type",
"(",
"operators",
")",
"for",
"operator",
"in",
"operators",
":",
"if",
"not",
"hasattr",
"(",
"operator",
",",
"'op'",
")",
":",
"raise",
"ValueError",
"(",
"'Operators must contain \"op\" method.'",
")",
"if",
"not",
"hasattr",
"(",
"operator",
",",
"'adj_op'",
")",
":",
"raise",
"ValueError",
"(",
"'Operators must contain \"adj_op\" method.'",
")",
"operator",
".",
"op",
"=",
"check_callable",
"(",
"operator",
".",
"op",
")",
"operator",
".",
"cost",
"=",
"check_callable",
"(",
"operator",
".",
"adj_op",
")",
"if",
"not",
"isinstance",
"(",
"weights",
",",
"type",
"(",
"None",
")",
")",
":",
"weights",
"=",
"self",
".",
"_check_type",
"(",
"weights",
")",
"if",
"weights",
".",
"size",
"!=",
"operators",
".",
"size",
":",
"raise",
"ValueError",
"(",
"'The number of weights must match the '",
"'number of operators.'",
")",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"weights",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"raise",
"TypeError",
"(",
"'The weights must be a list of float values.'",
")",
"return",
"operators",
",",
"weights"
]
| Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats | [
"Check",
"Inputs"
]
| python | train | 32.375 |
neurodata/ndio | ndio/remote/neurodata.py | https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/neurodata.py#L549-L600 | def create_dataset(self,
name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset=0,
y_offset=0,
z_offset=0,
scaling_levels=0,
scaling_option=0,
dataset_description="",
is_public=0):
"""
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
"""
return self.resources.create_dataset(name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset,
y_offset,
z_offset,
scaling_levels,
scaling_option,
dataset_description,
is_public) | [
"def",
"create_dataset",
"(",
"self",
",",
"name",
",",
"x_img_size",
",",
"y_img_size",
",",
"z_img_size",
",",
"x_vox_res",
",",
"y_vox_res",
",",
"z_vox_res",
",",
"x_offset",
"=",
"0",
",",
"y_offset",
"=",
"0",
",",
"z_offset",
"=",
"0",
",",
"scaling_levels",
"=",
"0",
",",
"scaling_option",
"=",
"0",
",",
"dataset_description",
"=",
"\"\"",
",",
"is_public",
"=",
"0",
")",
":",
"return",
"self",
".",
"resources",
".",
"create_dataset",
"(",
"name",
",",
"x_img_size",
",",
"y_img_size",
",",
"z_img_size",
",",
"x_vox_res",
",",
"y_vox_res",
",",
"z_vox_res",
",",
"x_offset",
",",
"y_offset",
",",
"z_offset",
",",
"scaling_levels",
",",
"scaling_option",
",",
"dataset_description",
",",
"is_public",
")"
]
| Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not | [
"Creates",
"a",
"dataset",
"."
]
| python | test | 42.942308 |
estnltk/estnltk | estnltk/prettyprinter/marker.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/marker.py#L100-L132 | def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | [
"def",
"create_tags_with_concatenated_css_classes",
"(",
"tags",
")",
":",
"current_classes",
"=",
"set",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"pos",
",",
"group",
"in",
"group_tags_at_same_position",
"(",
"tags",
")",
":",
"opening",
",",
"closing",
"=",
"get_opening_closing_tags",
"(",
"group",
")",
"# handle closing tags at current position",
"closing_added",
"=",
"False",
"if",
"len",
"(",
"closing",
")",
">",
"0",
":",
"closing_tag",
"=",
"Tag",
"(",
"pos",
",",
"False",
",",
"''",
")",
"for",
"tag",
"in",
"closing",
":",
"current_classes",
".",
"remove",
"(",
"tag",
".",
"css_class",
")",
"result",
".",
"append",
"(",
"closing_tag",
")",
"closing_added",
"=",
"True",
"# handle opening tags at current position",
"opening_added",
"=",
"False",
"if",
"len",
"(",
"opening",
")",
">",
"0",
":",
"# handle the begin of an overlap",
"if",
"not",
"closing_added",
"and",
"len",
"(",
"current_classes",
")",
">",
"0",
":",
"result",
".",
"append",
"(",
"Tag",
"(",
"pos",
",",
"False",
",",
"''",
")",
")",
"for",
"tag",
"in",
"opening",
":",
"current_classes",
".",
"add",
"(",
"tag",
".",
"css_class",
")",
"opening_tag",
"=",
"Tag",
"(",
"pos",
",",
"True",
",",
"' '",
".",
"join",
"(",
"sorted",
"(",
"current_classes",
")",
")",
")",
"result",
".",
"append",
"(",
"opening_tag",
")",
"opening_added",
"=",
"True",
"# handle the end of an overlap",
"if",
"closing_added",
"and",
"not",
"opening_added",
"and",
"len",
"(",
"current_classes",
")",
">",
"0",
":",
"opening_tag",
"=",
"Tag",
"(",
"pos",
",",
"True",
",",
"' '",
".",
"join",
"(",
"sorted",
"(",
"current_classes",
")",
")",
")",
"result",
".",
"append",
"(",
"opening_tag",
")",
"return",
"result"
]
| Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags. | [
"Function",
"that",
"creates",
"<mark",
">",
"tags",
"such",
"that",
"they",
"are",
"not",
"overlapping",
".",
"In",
"order",
"to",
"do",
"this",
"it",
"concatenates",
"the",
"css",
"classes",
"and",
"stores",
"the",
"concatenated",
"result",
"in",
"new",
"tags",
"."
]
| python | train | 43.484848 |
hyperledger/sawtooth-core | rest_api/sawtooth_rest_api/route_handlers.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/rest_api/sawtooth_rest_api/route_handlers.py#L351-L375 | async def fetch_block(self, request):
"""Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query
"""
error_traps = [error_handlers.BlockNotFoundTrap]
block_id = request.match_info.get('block_id', '')
self._validate_id(block_id)
response = await self._query_validator(
Message.CLIENT_BLOCK_GET_BY_ID_REQUEST,
client_block_pb2.ClientBlockGetResponse,
client_block_pb2.ClientBlockGetByIdRequest(block_id=block_id),
error_traps)
return self._wrap_response(
request,
data=self._expand_block(response['block']),
metadata=self._get_metadata(request, response)) | [
"async",
"def",
"fetch_block",
"(",
"self",
",",
"request",
")",
":",
"error_traps",
"=",
"[",
"error_handlers",
".",
"BlockNotFoundTrap",
"]",
"block_id",
"=",
"request",
".",
"match_info",
".",
"get",
"(",
"'block_id'",
",",
"''",
")",
"self",
".",
"_validate_id",
"(",
"block_id",
")",
"response",
"=",
"await",
"self",
".",
"_query_validator",
"(",
"Message",
".",
"CLIENT_BLOCK_GET_BY_ID_REQUEST",
",",
"client_block_pb2",
".",
"ClientBlockGetResponse",
",",
"client_block_pb2",
".",
"ClientBlockGetByIdRequest",
"(",
"block_id",
"=",
"block_id",
")",
",",
"error_traps",
")",
"return",
"self",
".",
"_wrap_response",
"(",
"request",
",",
"data",
"=",
"self",
".",
"_expand_block",
"(",
"response",
"[",
"'block'",
"]",
")",
",",
"metadata",
"=",
"self",
".",
"_get_metadata",
"(",
"request",
",",
"response",
")",
")"
]
| Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query | [
"Fetches",
"a",
"specific",
"block",
"from",
"the",
"validator",
"specified",
"by",
"id",
".",
"Request",
":",
"path",
":",
"-",
"block_id",
":",
"The",
"128",
"-",
"character",
"id",
"of",
"the",
"block",
"to",
"be",
"fetched"
]
| python | train | 37.24 |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_nsh.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_nsh.py#L50-L68 | def send(self, line):
'''send some bytes'''
line = line.strip()
if line == ".":
self.stop()
return
mav = self.master.mav
if line != '+++':
line += "\r\n"
buf = [ord(x) for x in line]
buf.extend([0]*(70-len(buf)))
flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE
mav.serial_control_send(self.serial_settings.port,
flags,
0, self.serial_settings.baudrate,
len(line), buf) | [
"def",
"send",
"(",
"self",
",",
"line",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"\".\"",
":",
"self",
".",
"stop",
"(",
")",
"return",
"mav",
"=",
"self",
".",
"master",
".",
"mav",
"if",
"line",
"!=",
"'+++'",
":",
"line",
"+=",
"\"\\r\\n\"",
"buf",
"=",
"[",
"ord",
"(",
"x",
")",
"for",
"x",
"in",
"line",
"]",
"buf",
".",
"extend",
"(",
"[",
"0",
"]",
"*",
"(",
"70",
"-",
"len",
"(",
"buf",
")",
")",
")",
"flags",
"=",
"mavutil",
".",
"mavlink",
".",
"SERIAL_CONTROL_FLAG_RESPOND",
"flags",
"|=",
"mavutil",
".",
"mavlink",
".",
"SERIAL_CONTROL_FLAG_MULTI",
"flags",
"|=",
"mavutil",
".",
"mavlink",
".",
"SERIAL_CONTROL_FLAG_EXCLUSIVE",
"mav",
".",
"serial_control_send",
"(",
"self",
".",
"serial_settings",
".",
"port",
",",
"flags",
",",
"0",
",",
"self",
".",
"serial_settings",
".",
"baudrate",
",",
"len",
"(",
"line",
")",
",",
"buf",
")"
]
| send some bytes | [
"send",
"some",
"bytes"
]
| python | train | 35.842105 |
saltstack/salt | salt/utils/sanitizers.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/sanitizers.py#L44-L51 | def filename(value):
'''
Remove everything that would affect paths in the filename
:param value:
:return:
'''
return re.sub('[^a-zA-Z0-9.-_ ]', '', os.path.basename(InputSanitizer.trim(value))) | [
"def",
"filename",
"(",
"value",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'[^a-zA-Z0-9.-_ ]'",
",",
"''",
",",
"os",
".",
"path",
".",
"basename",
"(",
"InputSanitizer",
".",
"trim",
"(",
"value",
")",
")",
")"
]
| Remove everything that would affect paths in the filename
:param value:
:return: | [
"Remove",
"everything",
"that",
"would",
"affect",
"paths",
"in",
"the",
"filename"
]
| python | train | 29.375 |
tradenity/python-sdk | tradenity/resources/table_rate_shipping.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_shipping.py#L561-L581 | def delete_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data | [
"def",
"delete_table_rate_shipping_by_id",
"(",
"cls",
",",
"table_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_delete_table_rate_shipping_by_id_with_http_info",
"(",
"table_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_delete_table_rate_shipping_by_id_with_http_info",
"(",
"table_rate_shipping_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
]
| Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | [
"Delete",
"TableRateShipping"
]
| python | train | 47.52381 |
pallets/werkzeug | src/werkzeug/datastructures.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/datastructures.py#L2333-L2341 | def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append("%s-" % begin if begin >= 0 else str(begin))
else:
ranges.append("%s-%s" % (begin, end - 1))
return "%s=%s" % (self.units, ",".join(ranges)) | [
"def",
"to_header",
"(",
"self",
")",
":",
"ranges",
"=",
"[",
"]",
"for",
"begin",
",",
"end",
"in",
"self",
".",
"ranges",
":",
"if",
"end",
"is",
"None",
":",
"ranges",
".",
"append",
"(",
"\"%s-\"",
"%",
"begin",
"if",
"begin",
">=",
"0",
"else",
"str",
"(",
"begin",
")",
")",
"else",
":",
"ranges",
".",
"append",
"(",
"\"%s-%s\"",
"%",
"(",
"begin",
",",
"end",
"-",
"1",
")",
")",
"return",
"\"%s=%s\"",
"%",
"(",
"self",
".",
"units",
",",
"\",\"",
".",
"join",
"(",
"ranges",
")",
")"
]
| Converts the object back into an HTTP header. | [
"Converts",
"the",
"object",
"back",
"into",
"an",
"HTTP",
"header",
"."
]
| python | train | 40.666667 |
pyBookshelf/bookshelf | bookshelf/api_v2/pkg.py | https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/pkg.py#L241-L259 | def yum_install_from_url(pkg_name, url):
""" installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
"""
if is_package_installed(distribution='el', pkg=pkg_name) is False:
log_green(
"installing %s from %s" % (pkg_name, url))
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -i %s" % url)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() | [
"def",
"yum_install_from_url",
"(",
"pkg_name",
",",
"url",
")",
":",
"if",
"is_package_installed",
"(",
"distribution",
"=",
"'el'",
",",
"pkg",
"=",
"pkg_name",
")",
"is",
"False",
":",
"log_green",
"(",
"\"installing %s from %s\"",
"%",
"(",
"pkg_name",
",",
"url",
")",
")",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"True",
",",
"capture",
"=",
"True",
")",
":",
"result",
"=",
"sudo",
"(",
"\"rpm -i %s\"",
"%",
"url",
")",
"if",
"result",
".",
"return_code",
"==",
"0",
":",
"return",
"True",
"elif",
"result",
".",
"return_code",
"==",
"1",
":",
"return",
"False",
"else",
":",
"# print error to user",
"print",
"(",
"result",
")",
"raise",
"SystemExit",
"(",
")"
]
| installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package | [
"installs",
"a",
"pkg",
"from",
"a",
"url",
"p",
"pkg_name",
":",
"the",
"name",
"of",
"the",
"package",
"to",
"install",
"p",
"url",
":",
"the",
"full",
"URL",
"for",
"the",
"rpm",
"package"
]
| python | train | 38.263158 |
guaix-ucm/pyemir | emirdrp/processing/wavecal/islitlet_progress.py | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/islitlet_progress.py#L27-L45 | def islitlet_progress(islitlet, islitlet_max):
"""Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
"""
if islitlet % 10 == 0:
cout = str(islitlet // 10)
else:
cout = '.'
sys.stdout.write(cout)
if islitlet == islitlet_max:
sys.stdout.write('\n')
sys.stdout.flush() | [
"def",
"islitlet_progress",
"(",
"islitlet",
",",
"islitlet_max",
")",
":",
"if",
"islitlet",
"%",
"10",
"==",
"0",
":",
"cout",
"=",
"str",
"(",
"islitlet",
"//",
"10",
")",
"else",
":",
"cout",
"=",
"'.'",
"sys",
".",
"stdout",
".",
"write",
"(",
"cout",
")",
"if",
"islitlet",
"==",
"islitlet_max",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
]
| Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number. | [
"Auxiliary",
"function",
"to",
"print",
"out",
"progress",
"in",
"loop",
"of",
"slitlets",
"."
]
| python | train | 23.578947 |
shoebot/shoebot | lib/web/cache.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/cache.py#L78-L89 | def age(self, id):
""" Returns the age of the cache entry, in days.
"""
path = self.hash(id)
if os.path.exists(path):
modified = datetime.datetime.fromtimestamp(os.stat(path)[8])
age = datetime.datetime.today() - modified
return age.days
else:
return 0 | [
"def",
"age",
"(",
"self",
",",
"id",
")",
":",
"path",
"=",
"self",
".",
"hash",
"(",
"id",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"modified",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"stat",
"(",
"path",
")",
"[",
"8",
"]",
")",
"age",
"=",
"datetime",
".",
"datetime",
".",
"today",
"(",
")",
"-",
"modified",
"return",
"age",
".",
"days",
"else",
":",
"return",
"0"
]
| Returns the age of the cache entry, in days. | [
"Returns",
"the",
"age",
"of",
"the",
"cache",
"entry",
"in",
"days",
"."
]
| python | valid | 27.583333 |
Robpol86/terminaltables | terminaltables/github_table.py | https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/terminaltables/github_table.py#L56-L70 | def gen_table(self, inner_widths, inner_heights, outer_widths):
"""Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
"""
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
for line in self.gen_row_lines(row, 'row', inner_widths, inner_heights[i]):
yield line
# Yield heading separator.
if i == 0:
yield self.horizontal_border(None, outer_widths) | [
"def",
"gen_table",
"(",
"self",
",",
"inner_widths",
",",
"inner_heights",
",",
"outer_widths",
")",
":",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"table_data",
")",
":",
"# Yield the row line by line (e.g. multi-line rows).",
"for",
"line",
"in",
"self",
".",
"gen_row_lines",
"(",
"row",
",",
"'row'",
",",
"inner_widths",
",",
"inner_heights",
"[",
"i",
"]",
")",
":",
"yield",
"line",
"# Yield heading separator.",
"if",
"i",
"==",
"0",
":",
"yield",
"self",
".",
"horizontal_border",
"(",
"None",
",",
"outer_widths",
")"
]
| Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return: | [
"Combine",
"everything",
"and",
"yield",
"every",
"line",
"of",
"the",
"entire",
"table",
"with",
"borders",
"."
]
| python | train | 50.6 |
b3j0f/utils | b3j0f/utils/ut.py | https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/ut.py#L40-L54 | def _subset(subset, superset):
"""True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
"""
result = True
for k in subset:
result = k in superset and subset[k] == superset[k]
if not result:
break
return result | [
"def",
"_subset",
"(",
"subset",
",",
"superset",
")",
":",
"result",
"=",
"True",
"for",
"k",
"in",
"subset",
":",
"result",
"=",
"k",
"in",
"superset",
"and",
"subset",
"[",
"k",
"]",
"==",
"superset",
"[",
"k",
"]",
"if",
"not",
"result",
":",
"break",
"return",
"result"
]
| True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool | [
"True",
"if",
"subset",
"is",
"a",
"subset",
"of",
"superset",
"."
]
| python | train | 27.333333 |
shaunduncan/helga-facts | helga_facts.py | https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L80-L86 | def forget_fact(term):
"""
Forgets a fact by removing it from the database
"""
logger.info('Removing fact %s', term)
db.facts.remove({'term': term_regex(term)})
return random.choice(ACKS) | [
"def",
"forget_fact",
"(",
"term",
")",
":",
"logger",
".",
"info",
"(",
"'Removing fact %s'",
",",
"term",
")",
"db",
".",
"facts",
".",
"remove",
"(",
"{",
"'term'",
":",
"term_regex",
"(",
"term",
")",
"}",
")",
"return",
"random",
".",
"choice",
"(",
"ACKS",
")"
]
| Forgets a fact by removing it from the database | [
"Forgets",
"a",
"fact",
"by",
"removing",
"it",
"from",
"the",
"database"
]
| python | train | 29.285714 |
honzamach/pydgets | pydgets/widgets.py | https://github.com/honzamach/pydgets/blob/5ca4ce19fc2d9b5f41441fb9163810f8ca502e79/pydgets/widgets.py#L1233-L1267 | def _render_content(self, content, **settings):
"""
Perform widget rendering, but do not print anything.
"""
result = []
columns = settings[self.SETTING_COLUMNS]
# Format each table cell into string.
(columns, content) = self.table_format(columns, content)
# Enumerate each table row.
if settings[self.SETTING_FLAG_ENUMERATE]:
(columns, content) = self.table_enumerate(columns, content)
# Calculate the dimensions of each table column.
dimensions = self.table_measure(columns, content)
# Display table header.
sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
result.append(self.fmt_border(dimensions, 't', **sb))
if settings[self.SETTING_FLAG_HEADER]:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING]
result.append(self.fmt_row_header(columns, dimensions, **s))
result.append(self.fmt_border(dimensions, 'm', **sb))
# Display table body.
for row in content:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING]
result.append(self.fmt_row(columns, dimensions, row, **s))
# Display table footer
result.append(self.fmt_border(dimensions, 'b', **sb))
return result | [
"def",
"_render_content",
"(",
"self",
",",
"content",
",",
"*",
"*",
"settings",
")",
":",
"result",
"=",
"[",
"]",
"columns",
"=",
"settings",
"[",
"self",
".",
"SETTING_COLUMNS",
"]",
"# Format each table cell into string.",
"(",
"columns",
",",
"content",
")",
"=",
"self",
".",
"table_format",
"(",
"columns",
",",
"content",
")",
"# Enumerate each table row.",
"if",
"settings",
"[",
"self",
".",
"SETTING_FLAG_ENUMERATE",
"]",
":",
"(",
"columns",
",",
"content",
")",
"=",
"self",
".",
"table_enumerate",
"(",
"columns",
",",
"content",
")",
"# Calculate the dimensions of each table column.",
"dimensions",
"=",
"self",
".",
"table_measure",
"(",
"columns",
",",
"content",
")",
"# Display table header.",
"sb",
"=",
"{",
"k",
":",
"settings",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"self",
".",
"SETTING_BORDER_STYLE",
",",
"self",
".",
"SETTING_BORDER_FORMATING",
")",
"}",
"result",
".",
"append",
"(",
"self",
".",
"fmt_border",
"(",
"dimensions",
",",
"'t'",
",",
"*",
"*",
"sb",
")",
")",
"if",
"settings",
"[",
"self",
".",
"SETTING_FLAG_HEADER",
"]",
":",
"s",
"=",
"{",
"k",
":",
"settings",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"self",
".",
"SETTING_FLAG_PLAIN",
",",
"self",
".",
"SETTING_BORDER_STYLE",
",",
"self",
".",
"SETTING_BORDER_FORMATING",
")",
"}",
"s",
"[",
"self",
".",
"SETTING_TEXT_FORMATING",
"]",
"=",
"settings",
"[",
"self",
".",
"SETTING_HEADER_FORMATING",
"]",
"result",
".",
"append",
"(",
"self",
".",
"fmt_row_header",
"(",
"columns",
",",
"dimensions",
",",
"*",
"*",
"s",
")",
")",
"result",
".",
"append",
"(",
"self",
".",
"fmt_border",
"(",
"dimensions",
",",
"'m'",
",",
"*",
"*",
"sb",
")",
")",
"# Display table body.",
"for",
"row",
"in",
"content",
":",
"s",
"=",
"{",
"k",
":",
"settings",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"self",
".",
"SETTING_FLAG_PLAIN",
",",
"self",
".",
"SETTING_BORDER_STYLE",
",",
"self",
".",
"SETTING_BORDER_FORMATING",
")",
"}",
"s",
"[",
"self",
".",
"SETTING_TEXT_FORMATING",
"]",
"=",
"settings",
"[",
"self",
".",
"SETTING_TEXT_FORMATING",
"]",
"result",
".",
"append",
"(",
"self",
".",
"fmt_row",
"(",
"columns",
",",
"dimensions",
",",
"row",
",",
"*",
"*",
"s",
")",
")",
"# Display table footer",
"result",
".",
"append",
"(",
"self",
".",
"fmt_border",
"(",
"dimensions",
",",
"'b'",
",",
"*",
"*",
"sb",
")",
")",
"return",
"result"
]
| Perform widget rendering, but do not print anything. | [
"Perform",
"widget",
"rendering",
"but",
"do",
"not",
"print",
"anything",
"."
]
| python | train | 45.685714 |
obriencj/python-javatools | javatools/__init__.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1376-L1389 | def get_arg_type_descriptors(self):
"""
The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types.
"""
if not self.is_method:
return tuple()
tp = _typeseq(self.get_descriptor())
tp = _typeseq(tp[0][1:-1])
return tp | [
"def",
"get_arg_type_descriptors",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_method",
":",
"return",
"tuple",
"(",
")",
"tp",
"=",
"_typeseq",
"(",
"self",
".",
"get_descriptor",
"(",
")",
")",
"tp",
"=",
"_typeseq",
"(",
"tp",
"[",
"0",
"]",
"[",
"1",
":",
"-",
"1",
"]",
")",
"return",
"tp"
]
| The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types. | [
"The",
"parameter",
"type",
"descriptor",
"list",
"for",
"a",
"method",
"or",
"None",
"for",
"a",
"field",
".",
"Type",
"descriptors",
"are",
"shorthand",
"identifiers",
"for",
"the",
"builtin",
"java",
"types",
"."
]
| python | train | 26.5 |
OCHA-DAP/hdx-python-api | src/hdx/data/dataset.py | https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L186-L200 | def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
"""Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
"""
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid) | [
"def",
"add_update_resources",
"(",
"self",
",",
"resources",
",",
"ignore_datasetid",
"=",
"False",
")",
":",
"# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None",
"if",
"not",
"isinstance",
"(",
"resources",
",",
"list",
")",
":",
"raise",
"HDXError",
"(",
"'Resources should be a list!'",
")",
"for",
"resource",
"in",
"resources",
":",
"self",
".",
"add_update_resource",
"(",
"resource",
",",
"ignore_datasetid",
")"
]
| Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None | [
"Add",
"new",
"or",
"update",
"existing",
"resources",
"with",
"new",
"metadata",
"to",
"the",
"dataset"
]
| python | train | 50.066667 |
matthewdeanmartin/jiggle_version | jiggle_version/commands.py | https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/commands.py#L26-L47 | def bump_version(project, source, force_init): # type: (str, str, bool, bool) ->int
"""
Entry point
:return:
"""
file_opener = FileOpener()
# logger.debug("Starting version jiggler...")
jiggler = JiggleVersion(project, source, file_opener, force_init)
logger.debug(
"Current, next : {0} -> {1} : {2}".format(
jiggler.current_version, jiggler.version, jiggler.schema
)
)
if not jiggler.version_finder.validate_current_versions():
logger.debug(unicode(jiggler.version_finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue")
changed = jiggler.jiggle_all()
logger.debug("Changed {0} files".format(changed))
return changed | [
"def",
"bump_version",
"(",
"project",
",",
"source",
",",
"force_init",
")",
":",
"# type: (str, str, bool, bool) ->int",
"file_opener",
"=",
"FileOpener",
"(",
")",
"# logger.debug(\"Starting version jiggler...\")",
"jiggler",
"=",
"JiggleVersion",
"(",
"project",
",",
"source",
",",
"file_opener",
",",
"force_init",
")",
"logger",
".",
"debug",
"(",
"\"Current, next : {0} -> {1} : {2}\"",
".",
"format",
"(",
"jiggler",
".",
"current_version",
",",
"jiggler",
".",
"version",
",",
"jiggler",
".",
"schema",
")",
")",
"if",
"not",
"jiggler",
".",
"version_finder",
".",
"validate_current_versions",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"unicode",
"(",
"jiggler",
".",
"version_finder",
".",
"all_current_versions",
"(",
")",
")",
")",
"logger",
".",
"error",
"(",
"\"Versions not in sync, won't continue\"",
")",
"die",
"(",
"-",
"1",
",",
"\"Versions not in sync, won't continue\"",
")",
"changed",
"=",
"jiggler",
".",
"jiggle_all",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Changed {0} files\"",
".",
"format",
"(",
"changed",
")",
")",
"return",
"changed"
]
| Entry point
:return: | [
"Entry",
"point",
":",
"return",
":"
]
| python | train | 35.454545 |
Calysto/calysto | calysto/ai/conx.py | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L3008-L3014 | def loadTargetsFromFile(self, filename, cols = None, everyNrows = 1,
delim = ' ', checkEven = 1):
"""
Loads targets from file.
"""
self.targets = self.loadVectors(filename, cols, everyNrows,
delim, checkEven) | [
"def",
"loadTargetsFromFile",
"(",
"self",
",",
"filename",
",",
"cols",
"=",
"None",
",",
"everyNrows",
"=",
"1",
",",
"delim",
"=",
"' '",
",",
"checkEven",
"=",
"1",
")",
":",
"self",
".",
"targets",
"=",
"self",
".",
"loadVectors",
"(",
"filename",
",",
"cols",
",",
"everyNrows",
",",
"delim",
",",
"checkEven",
")"
]
| Loads targets from file. | [
"Loads",
"targets",
"from",
"file",
"."
]
| python | train | 43.142857 |
ajbosco/dag-factory | dagfactory/utils.py | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/utils.py#L39-L65 | def get_time_delta(time_string: str) -> timedelta:
"""
Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta
"""
rel_time: Pattern = re.compile(
pattern=r"((?P<hours>\d+?)\s+hour)?((?P<minutes>\d+?)\s+minute)?((?P<seconds>\d+?)\s+second)?((?P<days>\d+?)\s+day)?",
# noqa
flags=re.IGNORECASE,
)
parts: Optional[Match[AnyStr]] = rel_time.match(string=time_string)
if not parts:
raise Exception(f"Invalid relative time: {time_string}")
# https://docs.python.org/3/library/re.html#re.Match.groupdict
parts: Dict[str, str] = parts.groupdict()
time_params = {}
if all(value == None for value in parts.values()):
raise Exception(f"Invalid relative time: {time_string}")
for time_unit, magnitude in parts.items():
if magnitude:
time_params[time_unit]: int = int(magnitude)
return timedelta(**time_params) | [
"def",
"get_time_delta",
"(",
"time_string",
":",
"str",
")",
"->",
"timedelta",
":",
"rel_time",
":",
"Pattern",
"=",
"re",
".",
"compile",
"(",
"pattern",
"=",
"r\"((?P<hours>\\d+?)\\s+hour)?((?P<minutes>\\d+?)\\s+minute)?((?P<seconds>\\d+?)\\s+second)?((?P<days>\\d+?)\\s+day)?\"",
",",
"# noqa",
"flags",
"=",
"re",
".",
"IGNORECASE",
",",
")",
"parts",
":",
"Optional",
"[",
"Match",
"[",
"AnyStr",
"]",
"]",
"=",
"rel_time",
".",
"match",
"(",
"string",
"=",
"time_string",
")",
"if",
"not",
"parts",
":",
"raise",
"Exception",
"(",
"f\"Invalid relative time: {time_string}\"",
")",
"# https://docs.python.org/3/library/re.html#re.Match.groupdict",
"parts",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"parts",
".",
"groupdict",
"(",
")",
"time_params",
"=",
"{",
"}",
"if",
"all",
"(",
"value",
"==",
"None",
"for",
"value",
"in",
"parts",
".",
"values",
"(",
")",
")",
":",
"raise",
"Exception",
"(",
"f\"Invalid relative time: {time_string}\"",
")",
"for",
"time_unit",
",",
"magnitude",
"in",
"parts",
".",
"items",
"(",
")",
":",
"if",
"magnitude",
":",
"time_params",
"[",
"time_unit",
"]",
":",
"int",
"=",
"int",
"(",
"magnitude",
")",
"return",
"timedelta",
"(",
"*",
"*",
"time_params",
")"
]
| Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta | [
"Takes",
"a",
"time",
"string",
"(",
"1",
"hours",
"10",
"days",
"etc",
".",
")",
"and",
"returns",
"a",
"python",
"timedelta",
"object"
]
| python | train | 40.296296 |
hydpy-dev/hydpy | hydpy/core/sequencetools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/sequencetools.py#L910-L914 | def seriesshape(self):
"""Shape of the whole time series (time being the first dimension)."""
seriesshape = [len(hydpy.pub.timegrids.init)]
seriesshape.extend(self.shape)
return tuple(seriesshape) | [
"def",
"seriesshape",
"(",
"self",
")",
":",
"seriesshape",
"=",
"[",
"len",
"(",
"hydpy",
".",
"pub",
".",
"timegrids",
".",
"init",
")",
"]",
"seriesshape",
".",
"extend",
"(",
"self",
".",
"shape",
")",
"return",
"tuple",
"(",
"seriesshape",
")"
]
| Shape of the whole time series (time being the first dimension). | [
"Shape",
"of",
"the",
"whole",
"time",
"series",
"(",
"time",
"being",
"the",
"first",
"dimension",
")",
"."
]
| python | train | 44.8 |
edeposit/edeposit.amqp.ftp | src/edeposit/amqp/ftp/decoders/validator.py | https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/decoders/validator.py#L243-L270 | def _convert_to_dict(data):
"""
Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict.
"""
if isinstance(data, dict):
return data
if isinstance(data, list) or isinstance(data, tuple):
if _all_correct_list(data):
return dict(data)
else:
data = zip(data[::2], data[1::2])
return dict(data)
else:
raise MetaParsingException(
"Can't decode provided metadata - unknown structure."
) | [
"def",
"_convert_to_dict",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"data",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
"or",
"isinstance",
"(",
"data",
",",
"tuple",
")",
":",
"if",
"_all_correct_list",
"(",
"data",
")",
":",
"return",
"dict",
"(",
"data",
")",
"else",
":",
"data",
"=",
"zip",
"(",
"data",
"[",
":",
":",
"2",
"]",
",",
"data",
"[",
"1",
":",
":",
"2",
"]",
")",
"return",
"dict",
"(",
"data",
")",
"else",
":",
"raise",
"MetaParsingException",
"(",
"\"Can't decode provided metadata - unknown structure.\"",
")"
]
| Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict. | [
"Convert",
"data",
"to",
"dictionary",
"."
]
| python | train | 25.357143 |
saltstack/salt | salt/modules/mount.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mount.py#L557-L568 | def match(self, fsys_view):
'''
Compare potentially partial criteria against built filesystems entry dictionary
'''
evalue_dict = fsys_view[1]
for key, value in six.viewitems(self.criteria):
if key in evalue_dict:
if evalue_dict[key] != value:
return False
else:
return False
return True | [
"def",
"match",
"(",
"self",
",",
"fsys_view",
")",
":",
"evalue_dict",
"=",
"fsys_view",
"[",
"1",
"]",
"for",
"key",
",",
"value",
"in",
"six",
".",
"viewitems",
"(",
"self",
".",
"criteria",
")",
":",
"if",
"key",
"in",
"evalue_dict",
":",
"if",
"evalue_dict",
"[",
"key",
"]",
"!=",
"value",
":",
"return",
"False",
"else",
":",
"return",
"False",
"return",
"True"
]
| Compare potentially partial criteria against built filesystems entry dictionary | [
"Compare",
"potentially",
"partial",
"criteria",
"against",
"built",
"filesystems",
"entry",
"dictionary"
]
| python | train | 33.333333 |
PmagPy/PmagPy | pmagpy/pmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L4916-L4971 | def doprinc(data):
"""
Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length]
"""
ppars = {}
rad = old_div(np.pi, 180.)
X = dir2cart(data)
# for rec in data:
# dir=[]
# for c in rec: dir.append(c)
# cart= (dir2cart(dir))
# X.append(cart)
# put in T matrix
#
T = np.array(Tmatrix(X))
#
# get sorted evals/evects
#
t, V = tauV(T)
Pdir = cart2dir(V[0])
ppars['Edir'] = cart2dir(V[1]) # elongation direction
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['dec'] = dec
ppars['inc'] = inc
ppars['N'] = len(data)
ppars['tau1'] = t[0]
ppars['tau2'] = t[1]
ppars['tau3'] = t[2]
Pdir = cart2dir(V[1])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V2dec'] = dec
ppars['V2inc'] = inc
Pdir = cart2dir(V[2])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V3dec'] = dec
ppars['V3inc'] = inc
return ppars | [
"def",
"doprinc",
"(",
"data",
")",
":",
"ppars",
"=",
"{",
"}",
"rad",
"=",
"old_div",
"(",
"np",
".",
"pi",
",",
"180.",
")",
"X",
"=",
"dir2cart",
"(",
"data",
")",
"# for rec in data:",
"# dir=[]",
"# for c in rec: dir.append(c)",
"# cart= (dir2cart(dir))",
"# X.append(cart)",
"# put in T matrix",
"#",
"T",
"=",
"np",
".",
"array",
"(",
"Tmatrix",
"(",
"X",
")",
")",
"#",
"# get sorted evals/evects",
"#",
"t",
",",
"V",
"=",
"tauV",
"(",
"T",
")",
"Pdir",
"=",
"cart2dir",
"(",
"V",
"[",
"0",
"]",
")",
"ppars",
"[",
"'Edir'",
"]",
"=",
"cart2dir",
"(",
"V",
"[",
"1",
"]",
")",
"# elongation direction",
"dec",
",",
"inc",
"=",
"doflip",
"(",
"Pdir",
"[",
"0",
"]",
",",
"Pdir",
"[",
"1",
"]",
")",
"ppars",
"[",
"'dec'",
"]",
"=",
"dec",
"ppars",
"[",
"'inc'",
"]",
"=",
"inc",
"ppars",
"[",
"'N'",
"]",
"=",
"len",
"(",
"data",
")",
"ppars",
"[",
"'tau1'",
"]",
"=",
"t",
"[",
"0",
"]",
"ppars",
"[",
"'tau2'",
"]",
"=",
"t",
"[",
"1",
"]",
"ppars",
"[",
"'tau3'",
"]",
"=",
"t",
"[",
"2",
"]",
"Pdir",
"=",
"cart2dir",
"(",
"V",
"[",
"1",
"]",
")",
"dec",
",",
"inc",
"=",
"doflip",
"(",
"Pdir",
"[",
"0",
"]",
",",
"Pdir",
"[",
"1",
"]",
")",
"ppars",
"[",
"'V2dec'",
"]",
"=",
"dec",
"ppars",
"[",
"'V2inc'",
"]",
"=",
"inc",
"Pdir",
"=",
"cart2dir",
"(",
"V",
"[",
"2",
"]",
")",
"dec",
",",
"inc",
"=",
"doflip",
"(",
"Pdir",
"[",
"0",
"]",
",",
"Pdir",
"[",
"1",
"]",
")",
"ppars",
"[",
"'V3dec'",
"]",
"=",
"dec",
"ppars",
"[",
"'V3inc'",
"]",
"=",
"inc",
"return",
"ppars"
]
| Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length] | [
"Gets",
"principal",
"components",
"from",
"data",
"in",
"form",
"of",
"a",
"list",
"of",
"[",
"dec",
"inc",
"]",
"data",
"."
]
| python | train | 27.035714 |
benhoff/vexbot | vexbot/adapters/messaging.py | https://github.com/benhoff/vexbot/blob/9b844eb20e84eea92a0e7db7d86a90094956c38f/vexbot/adapters/messaging.py#L257-L270 | def start(self) -> None:
"""
Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer.
"""
self._setup()
if self._run_control_loop:
asyncio.set_event_loop(asyncio.new_event_loop())
self._heartbeat_reciever.start()
self._logger.info(' Start Loop')
return self.loop.start()
else:
self._logger.debug(' run_control_loop == False') | [
"def",
"start",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_setup",
"(",
")",
"if",
"self",
".",
"_run_control_loop",
":",
"asyncio",
".",
"set_event_loop",
"(",
"asyncio",
".",
"new_event_loop",
"(",
")",
")",
"self",
".",
"_heartbeat_reciever",
".",
"start",
"(",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"' Start Loop'",
")",
"return",
"self",
".",
"loop",
".",
"start",
"(",
")",
"else",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"' run_control_loop == False'",
")"
]
| Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer. | [
"Start",
"the",
"internal",
"control",
"loop",
".",
"Potentially",
"blocking",
"depending",
"on",
"the",
"value",
"of",
"_run_control_loop",
"set",
"by",
"the",
"initializer",
"."
]
| python | train | 35.5 |
kodexlab/reliure | reliure/pipeline.py | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/pipeline.py#L221-L231 | def change_option_default(self, opt_name, default_val):
""" Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value
"""
if not self.has_option(opt_name):
raise ValueError("Unknow option name (%s)" % opt_name)
self._options[opt_name].default = default_val | [
"def",
"change_option_default",
"(",
"self",
",",
"opt_name",
",",
"default_val",
")",
":",
"if",
"not",
"self",
".",
"has_option",
"(",
"opt_name",
")",
":",
"raise",
"ValueError",
"(",
"\"Unknow option name (%s)\"",
"%",
"opt_name",
")",
"self",
".",
"_options",
"[",
"opt_name",
"]",
".",
"default",
"=",
"default_val"
]
| Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value | [
"Change",
"the",
"default",
"value",
"of",
"an",
"option",
":",
"param",
"opt_name",
":",
"option",
"name",
":",
"type",
"opt_name",
":",
"str",
":",
"param",
"value",
":",
"new",
"default",
"option",
"value"
]
| python | train | 36.363636 |
Clinical-Genomics/scout | scout/parse/variant/frequency.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/variant/frequency.py#L113-L147 | def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies | [
"def",
"parse_sv_frequencies",
"(",
"variant",
")",
":",
"frequency_keys",
"=",
"[",
"'clingen_cgh_benignAF'",
",",
"'clingen_cgh_benign'",
",",
"'clingen_cgh_pathogenicAF'",
",",
"'clingen_cgh_pathogenic'",
",",
"'clingen_ngi'",
",",
"'clingen_ngiAF'",
",",
"'swegen'",
",",
"'swegenAF'",
",",
"'decipherAF'",
",",
"'decipher'",
"]",
"sv_frequencies",
"=",
"{",
"}",
"for",
"key",
"in",
"frequency_keys",
":",
"value",
"=",
"variant",
".",
"INFO",
".",
"get",
"(",
"key",
",",
"0",
")",
"if",
"'AF'",
"in",
"key",
":",
"value",
"=",
"float",
"(",
"value",
")",
"else",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
">",
"0",
":",
"sv_frequencies",
"[",
"key",
"]",
"=",
"value",
"return",
"sv_frequencies"
]
| Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict) | [
"Parsing",
"of",
"some",
"custom",
"sv",
"frequencies",
"These",
"are",
"very",
"specific",
"at",
"the",
"moment",
"this",
"will",
"hopefully",
"get",
"better",
"over",
"time",
"when",
"the",
"field",
"of",
"structural",
"variants",
"is",
"more",
"developed",
"."
]
| python | test | 24.685714 |
dsoprea/PySecure | pysecure/adapters/sftpa.py | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L886-L912 | def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"not",
"None",
":",
"return",
"self",
".",
"__sf",
".",
"read",
"(",
"size",
")",
"block_size",
"=",
"self",
".",
"__class__",
".",
"__block_size",
"b",
"=",
"bytearray",
"(",
")",
"received_bytes",
"=",
"0",
"while",
"1",
":",
"partial",
"=",
"self",
".",
"__sf",
".",
"read",
"(",
"block_size",
")",
"# self.__log.debug(\"Reading (%d) bytes. (%d) bytes returned.\" % ",
"# (block_size, len(partial)))",
"b",
".",
"extend",
"(",
"partial",
")",
"received_bytes",
"+=",
"len",
"(",
"partial",
")",
"if",
"len",
"(",
"partial",
")",
"<",
"block_size",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"End of file.\"",
")",
"break",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Read (%d) bytes for total-file.\"",
"%",
"(",
"received_bytes",
")",
")",
"return",
"b"
]
| Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file. | [
"Read",
"a",
"length",
"of",
"bytes",
".",
"Return",
"empty",
"on",
"EOF",
".",
"If",
"size",
"is",
"omitted",
"return",
"whole",
"file",
"."
]
| python | train | 28.740741 |
all-umass/graphs | graphs/mixins/analysis.py | https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/analysis.py#L94-L97 | def profile(self):
"""Measure of bandedness, also known as 'envelope size'."""
leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0)
return (np.arange(self.num_vertices()) - leftmost_idx).sum() | [
"def",
"profile",
"(",
"self",
")",
":",
"leftmost_idx",
"=",
"np",
".",
"argmax",
"(",
"self",
".",
"matrix",
"(",
"'dense'",
")",
".",
"astype",
"(",
"bool",
")",
",",
"axis",
"=",
"0",
")",
"return",
"(",
"np",
".",
"arange",
"(",
"self",
".",
"num_vertices",
"(",
")",
")",
"-",
"leftmost_idx",
")",
".",
"sum",
"(",
")"
]
| Measure of bandedness, also known as 'envelope size'. | [
"Measure",
"of",
"bandedness",
"also",
"known",
"as",
"envelope",
"size",
"."
]
| python | train | 54 |
LudovicRousseau/pyscard | smartcard/CardConnection.py | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/CardConnection.py#L129-L154 | def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if self.errorcheckingchain is not None:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2 | [
"def",
"transmit",
"(",
"self",
",",
"bytes",
",",
"protocol",
"=",
"None",
")",
":",
"Observable",
".",
"setChanged",
"(",
"self",
")",
"Observable",
".",
"notifyObservers",
"(",
"self",
",",
"CardConnectionEvent",
"(",
"'command'",
",",
"[",
"bytes",
",",
"protocol",
"]",
")",
")",
"data",
",",
"sw1",
",",
"sw2",
"=",
"self",
".",
"doTransmit",
"(",
"bytes",
",",
"protocol",
")",
"Observable",
".",
"setChanged",
"(",
"self",
")",
"Observable",
".",
"notifyObservers",
"(",
"self",
",",
"CardConnectionEvent",
"(",
"'response'",
",",
"[",
"data",
",",
"sw1",
",",
"sw2",
"]",
")",
")",
"if",
"self",
".",
"errorcheckingchain",
"is",
"not",
"None",
":",
"self",
".",
"errorcheckingchain",
"[",
"0",
"]",
"(",
"data",
",",
"sw1",
",",
"sw2",
")",
"return",
"data",
",",
"sw1",
",",
"sw2"
]
| Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol | [
"Transmit",
"an",
"apdu",
".",
"Internally",
"calls",
"doTransmit",
"()",
"class",
"method",
"and",
"notify",
"observers",
"upon",
"command",
"/",
"response",
"APDU",
"events",
".",
"Subclasses",
"must",
"override",
"the",
"doTransmit",
"()",
"class",
"method",
"."
]
| python | train | 44.769231 |
rosenbrockc/fortpy | fortpy/parsers/module.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/parsers/module.py#L260-L265 | def _dict_increment(self, dictionary, key):
"""Increments the value of the dictionary at the specified key."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1 | [
"def",
"_dict_increment",
"(",
"self",
",",
"dictionary",
",",
"key",
")",
":",
"if",
"key",
"in",
"dictionary",
":",
"dictionary",
"[",
"key",
"]",
"+=",
"1",
"else",
":",
"dictionary",
"[",
"key",
"]",
"=",
"1"
]
| Increments the value of the dictionary at the specified key. | [
"Increments",
"the",
"value",
"of",
"the",
"dictionary",
"at",
"the",
"specified",
"key",
"."
]
| python | train | 37 |
Jammy2211/PyAutoLens | autolens/model/inversion/pixelizations.py | https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/pixelizations.py#L11-L39 | def setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(galaxies, grid_stack):
"""An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to.
"""
if not isinstance(grid_stack.regular, grids.PaddedRegularGrid):
for galaxy in galaxies:
if hasattr(galaxy, 'pixelization'):
if isinstance(galaxy.pixelization, ImagePlanePixelization):
image_plane_pix_grid = galaxy.pixelization.image_plane_pix_grid_from_regular_grid(
regular_grid=grid_stack.regular)
return grid_stack.new_grid_stack_with_pix_grid_added(pix_grid=image_plane_pix_grid.sparse_grid,
regular_to_nearest_pix=image_plane_pix_grid.regular_to_sparse)
return grid_stack | [
"def",
"setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack",
"(",
"galaxies",
",",
"grid_stack",
")",
":",
"if",
"not",
"isinstance",
"(",
"grid_stack",
".",
"regular",
",",
"grids",
".",
"PaddedRegularGrid",
")",
":",
"for",
"galaxy",
"in",
"galaxies",
":",
"if",
"hasattr",
"(",
"galaxy",
",",
"'pixelization'",
")",
":",
"if",
"isinstance",
"(",
"galaxy",
".",
"pixelization",
",",
"ImagePlanePixelization",
")",
":",
"image_plane_pix_grid",
"=",
"galaxy",
".",
"pixelization",
".",
"image_plane_pix_grid_from_regular_grid",
"(",
"regular_grid",
"=",
"grid_stack",
".",
"regular",
")",
"return",
"grid_stack",
".",
"new_grid_stack_with_pix_grid_added",
"(",
"pix_grid",
"=",
"image_plane_pix_grid",
".",
"sparse_grid",
",",
"regular_to_nearest_pix",
"=",
"image_plane_pix_grid",
".",
"regular_to_sparse",
")",
"return",
"grid_stack"
]
| An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to. | [
"An",
"image",
"-",
"plane",
"pixelization",
"is",
"one",
"where",
"its",
"pixel",
"centres",
"are",
"computed",
"by",
"tracing",
"a",
"sparse",
"grid",
"of",
"pixels",
"from",
"\\",
"the",
"image",
"s",
"regular",
"grid",
"to",
"other",
"planes",
"(",
"e",
".",
"g",
".",
"the",
"source",
"-",
"plane",
")",
"."
]
| python | valid | 57.586207 |
PmagPy/PmagPy | pmagpy/pmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L6538-L6570 | def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs | [
"def",
"sbar",
"(",
"Ss",
")",
":",
"if",
"type",
"(",
"Ss",
")",
"==",
"list",
":",
"Ss",
"=",
"np",
".",
"array",
"(",
"Ss",
")",
"npts",
"=",
"Ss",
".",
"shape",
"[",
"0",
"]",
"Ss",
"=",
"Ss",
".",
"transpose",
"(",
")",
"avd",
",",
"avs",
"=",
"[",
"]",
",",
"[",
"]",
"# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()",
"D",
"=",
"np",
".",
"array",
"(",
"[",
"Ss",
"[",
"0",
"]",
",",
"Ss",
"[",
"1",
"]",
",",
"Ss",
"[",
"2",
"]",
",",
"Ss",
"[",
"3",
"]",
"+",
"0.5",
"*",
"(",
"Ss",
"[",
"0",
"]",
"+",
"Ss",
"[",
"1",
"]",
")",
",",
"Ss",
"[",
"4",
"]",
"+",
"0.5",
"*",
"(",
"Ss",
"[",
"1",
"]",
"+",
"Ss",
"[",
"2",
"]",
")",
",",
"Ss",
"[",
"5",
"]",
"+",
"0.5",
"*",
"(",
"Ss",
"[",
"0",
"]",
"+",
"Ss",
"[",
"2",
"]",
")",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"6",
")",
":",
"avd",
".",
"append",
"(",
"np",
".",
"average",
"(",
"D",
"[",
"j",
"]",
")",
")",
"avs",
".",
"append",
"(",
"np",
".",
"average",
"(",
"Ss",
"[",
"j",
"]",
")",
")",
"D",
"=",
"D",
".",
"transpose",
"(",
")",
"# for s in Ss:",
"# print 'from sbar: ',s",
"# D.append(s[:]) # append a copy of s",
"# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])",
"# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])",
"# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])",
"# for j in range(6):",
"# avd[j]+=(D[-1][j])/float(npts)",
"# avs[j]+=(s[j])/float(npts)",
"# calculate sigma",
"nf",
"=",
"(",
"npts",
"-",
"1",
")",
"*",
"6",
"# number of degrees of freedom",
"s0",
"=",
"0",
"Dels",
"=",
"(",
"D",
"-",
"avd",
")",
"**",
"2",
"s0",
"=",
"np",
".",
"sum",
"(",
"Dels",
")",
"sigma",
"=",
"np",
".",
"sqrt",
"(",
"s0",
"/",
"float",
"(",
"nf",
")",
")",
"return",
"nf",
",",
"sigma",
",",
"avs"
]
| calculate average s,sigma from list of "s"s. | [
"calculate",
"average",
"s",
"sigma",
"from",
"list",
"of",
"s",
"s",
"."
]
| python | train | 33 |
pip-services3-python/pip-services3-commons-python | pip_services3_commons/random/RandomString.py | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomString.py#L74-L92 | def next_string(min_size, max_size):
"""
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
"""
result = ''
max_size = max_size if max_size != None else min_size
length = RandomInteger.next_integer(min_size, max_size)
for i in range(length):
result += random.choice(_chars)
return result | [
"def",
"next_string",
"(",
"min_size",
",",
"max_size",
")",
":",
"result",
"=",
"''",
"max_size",
"=",
"max_size",
"if",
"max_size",
"!=",
"None",
"else",
"min_size",
"length",
"=",
"RandomInteger",
".",
"next_integer",
"(",
"min_size",
",",
"max_size",
")",
"for",
"i",
"in",
"range",
"(",
"length",
")",
":",
"result",
"+=",
"random",
".",
"choice",
"(",
"_chars",
")",
"return",
"result"
]
| Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string. | [
"Generates",
"a",
"random",
"string",
"consisting",
"of",
"upper",
"and",
"lower",
"case",
"letters",
"(",
"of",
"the",
"English",
"alphabet",
")",
"digits",
"(",
"0",
"-",
"9",
")",
"and",
"symbols",
"(",
"_",
".",
":",
"-",
"/",
".",
"[]",
".",
"{}",
"#",
"-",
"!",
"$",
"=",
"%",
".",
"+",
"^",
".",
"&",
"*",
"-",
"()",
")",
"."
]
| python | train | 32.368421 |
AguaClara/aguaclara | aguaclara/core/physchem.py | https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L219-L230 | def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough):
"""Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows.
"""
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([Length, ">0", "Length"])
return (fric(FlowRate, Diam, Nu, PipeRough)
* 8 / (gravity.magnitude * np.pi**2)
* (Length * FlowRate**2) / Diam**5
) | [
"def",
"headloss_fric",
"(",
"FlowRate",
",",
"Diam",
",",
"Length",
",",
"Nu",
",",
"PipeRough",
")",
":",
"#Checking input validity - inputs not checked here are checked by",
"#functions this function calls.",
"ut",
".",
"check_range",
"(",
"[",
"Length",
",",
"\">0\"",
",",
"\"Length\"",
"]",
")",
"return",
"(",
"fric",
"(",
"FlowRate",
",",
"Diam",
",",
"Nu",
",",
"PipeRough",
")",
"*",
"8",
"/",
"(",
"gravity",
".",
"magnitude",
"*",
"np",
".",
"pi",
"**",
"2",
")",
"*",
"(",
"Length",
"*",
"FlowRate",
"**",
"2",
")",
"/",
"Diam",
"**",
"5",
")"
]
| Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows. | [
"Return",
"the",
"major",
"head",
"loss",
"(",
"due",
"to",
"wall",
"shear",
")",
"in",
"a",
"pipe",
"."
]
| python | train | 41 |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3568-L3572 | def ticket_satisfaction_rating_create(self, ticket_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating"
api_path = "/api/v2/tickets/{ticket_id}/satisfaction_rating.json"
api_path = api_path.format(ticket_id=ticket_id)
return self.call(api_path, method="POST", data=data, **kwargs) | [
"def",
"ticket_satisfaction_rating_create",
"(",
"self",
",",
"ticket_id",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/tickets/{ticket_id}/satisfaction_rating.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"ticket_id",
"=",
"ticket_id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"POST\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
]
| https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"satisfaction_ratings#create",
"-",
"a",
"-",
"satisfaction",
"-",
"rating"
]
| python | train | 75.4 |
projectatomic/atomic-reactor | atomic_reactor/util.py | https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/util.py#L947-L1001 | def get_manifest_digests(image, registry, insecure=False, dockercfg_path=None,
versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True):
"""Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest
"""
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
digests = {}
# If all of the media types return a 404 NOT_FOUND status, then we rethrow
# an exception, if all of the media types fail for some other reason - like
# bad headers - then we return a ManifestDigest object with no digests.
# This is interesting for the Pulp "retry until the manifest shows up" case.
all_not_found = True
saved_not_found = None
for version in versions:
media_type = get_manifest_media_type(version)
response, saved_not_found = get_manifest(image, registry_session, version)
if saved_not_found is None:
all_not_found = False
if not response:
continue
# set it to truthy value so that koji_import would know pulp supports these digests
digests[version] = True
if not response.headers.get('Docker-Content-Digest'):
logger.warning('Unable to fetch digest for %s, no Docker-Content-Digest header',
media_type)
continue
digests[version] = response.headers['Docker-Content-Digest']
context = '/'.join([x for x in [image.namespace, image.repo] if x])
tag = image.tag
logger.debug('Image %s:%s has %s manifest digest: %s',
context, tag, version, digests[version])
if not digests:
if all_not_found and len(versions) > 0:
raise saved_not_found
if require_digest:
raise RuntimeError('No digests found for {}'.format(image))
return ManifestDigest(**digests) | [
"def",
"get_manifest_digests",
"(",
"image",
",",
"registry",
",",
"insecure",
"=",
"False",
",",
"dockercfg_path",
"=",
"None",
",",
"versions",
"=",
"(",
"'v1'",
",",
"'v2'",
",",
"'v2_list'",
",",
"'oci'",
",",
"'oci_index'",
")",
",",
"require_digest",
"=",
"True",
")",
":",
"registry_session",
"=",
"RegistrySession",
"(",
"registry",
",",
"insecure",
"=",
"insecure",
",",
"dockercfg_path",
"=",
"dockercfg_path",
")",
"digests",
"=",
"{",
"}",
"# If all of the media types return a 404 NOT_FOUND status, then we rethrow",
"# an exception, if all of the media types fail for some other reason - like",
"# bad headers - then we return a ManifestDigest object with no digests.",
"# This is interesting for the Pulp \"retry until the manifest shows up\" case.",
"all_not_found",
"=",
"True",
"saved_not_found",
"=",
"None",
"for",
"version",
"in",
"versions",
":",
"media_type",
"=",
"get_manifest_media_type",
"(",
"version",
")",
"response",
",",
"saved_not_found",
"=",
"get_manifest",
"(",
"image",
",",
"registry_session",
",",
"version",
")",
"if",
"saved_not_found",
"is",
"None",
":",
"all_not_found",
"=",
"False",
"if",
"not",
"response",
":",
"continue",
"# set it to truthy value so that koji_import would know pulp supports these digests",
"digests",
"[",
"version",
"]",
"=",
"True",
"if",
"not",
"response",
".",
"headers",
".",
"get",
"(",
"'Docker-Content-Digest'",
")",
":",
"logger",
".",
"warning",
"(",
"'Unable to fetch digest for %s, no Docker-Content-Digest header'",
",",
"media_type",
")",
"continue",
"digests",
"[",
"version",
"]",
"=",
"response",
".",
"headers",
"[",
"'Docker-Content-Digest'",
"]",
"context",
"=",
"'/'",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"[",
"image",
".",
"namespace",
",",
"image",
".",
"repo",
"]",
"if",
"x",
"]",
")",
"tag",
"=",
"image",
".",
"tag",
"logger",
".",
"debug",
"(",
"'Image %s:%s has %s manifest digest: %s'",
",",
"context",
",",
"tag",
",",
"version",
",",
"digests",
"[",
"version",
"]",
")",
"if",
"not",
"digests",
":",
"if",
"all_not_found",
"and",
"len",
"(",
"versions",
")",
">",
"0",
":",
"raise",
"saved_not_found",
"if",
"require_digest",
":",
"raise",
"RuntimeError",
"(",
"'No digests found for {}'",
".",
"format",
"(",
"image",
")",
")",
"return",
"ManifestDigest",
"(",
"*",
"*",
"digests",
")"
]
| Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest | [
"Return",
"manifest",
"digest",
"for",
"image",
"."
]
| python | train | 43.472727 |
zhmcclient/python-zhmcclient | zhmcclient_mock/_urihandler.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L368-L376 | def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Update <resource> Properties."""
assert wait_for_completion is True # async not supported yet
try:
resource = hmc.lookup_by_uri(uri)
except KeyError:
raise InvalidResourceError(method, uri)
resource.update(body) | [
"def",
"post",
"(",
"method",
",",
"hmc",
",",
"uri",
",",
"uri_parms",
",",
"body",
",",
"logon_required",
",",
"wait_for_completion",
")",
":",
"assert",
"wait_for_completion",
"is",
"True",
"# async not supported yet",
"try",
":",
"resource",
"=",
"hmc",
".",
"lookup_by_uri",
"(",
"uri",
")",
"except",
"KeyError",
":",
"raise",
"InvalidResourceError",
"(",
"method",
",",
"uri",
")",
"resource",
".",
"update",
"(",
"body",
")"
]
| Operation: Update <resource> Properties. | [
"Operation",
":",
"Update",
"<resource",
">",
"Properties",
"."
]
| python | train | 41.888889 |
tensorflow/tensor2tensor | tensor2tensor/models/resnet.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L112-L188 | def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format="channels_first",
use_td=False,
targeting_rate=None,
keep_prob=None,
is_training=None):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
if use_td:
inputs_shape = common_layers.shape_list(inputs)
if use_td == "weight":
if data_format == "channels_last":
size = kernel_size * kernel_size * inputs_shape[-1]
else:
size = kernel_size * kernel_size * inputs_shape[1]
targeting_count = targeting_rate * tf.to_float(size)
targeting_fn = common_layers.weight_targeting
elif use_td == "unit":
targeting_count = targeting_rate * filters
targeting_fn = common_layers.unit_targeting
else:
raise Exception("Unrecognized targeted dropout type: %s" % use_td)
y = common_layers.td_conv(
inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
data_format=data_format,
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer())
else:
y = layers().Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)(inputs)
return y | [
"def",
"conv2d_fixed_padding",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"strides",
",",
"data_format",
"=",
"\"channels_first\"",
",",
"use_td",
"=",
"False",
",",
"targeting_rate",
"=",
"None",
",",
"keep_prob",
"=",
"None",
",",
"is_training",
"=",
"None",
")",
":",
"if",
"strides",
">",
"1",
":",
"inputs",
"=",
"fixed_padding",
"(",
"inputs",
",",
"kernel_size",
",",
"data_format",
"=",
"data_format",
")",
"if",
"use_td",
":",
"inputs_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"if",
"use_td",
"==",
"\"weight\"",
":",
"if",
"data_format",
"==",
"\"channels_last\"",
":",
"size",
"=",
"kernel_size",
"*",
"kernel_size",
"*",
"inputs_shape",
"[",
"-",
"1",
"]",
"else",
":",
"size",
"=",
"kernel_size",
"*",
"kernel_size",
"*",
"inputs_shape",
"[",
"1",
"]",
"targeting_count",
"=",
"targeting_rate",
"*",
"tf",
".",
"to_float",
"(",
"size",
")",
"targeting_fn",
"=",
"common_layers",
".",
"weight_targeting",
"elif",
"use_td",
"==",
"\"unit\"",
":",
"targeting_count",
"=",
"targeting_rate",
"*",
"filters",
"targeting_fn",
"=",
"common_layers",
".",
"unit_targeting",
"else",
":",
"raise",
"Exception",
"(",
"\"Unrecognized targeted dropout type: %s\"",
"%",
"use_td",
")",
"y",
"=",
"common_layers",
".",
"td_conv",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"targeting_count",
",",
"targeting_fn",
",",
"keep_prob",
",",
"is_training",
",",
"do_prune",
"=",
"True",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"(",
"\"SAME\"",
"if",
"strides",
"==",
"1",
"else",
"\"VALID\"",
")",
",",
"data_format",
"=",
"data_format",
",",
"use_bias",
"=",
"False",
",",
"kernel_initializer",
"=",
"tf",
".",
"variance_scaling_initializer",
"(",
")",
")",
"else",
":",
"y",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters",
"=",
"filters",
",",
"kernel_size",
"=",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"(",
"\"SAME\"",
"if",
"strides",
"==",
"1",
"else",
"\"VALID\"",
")",
",",
"use_bias",
"=",
"False",
",",
"kernel_initializer",
"=",
"tf",
".",
"variance_scaling_initializer",
"(",
")",
",",
"data_format",
"=",
"data_format",
")",
"(",
"inputs",
")",
"return",
"y"
]
| Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid. | [
"Strided",
"2",
"-",
"D",
"convolution",
"with",
"explicit",
"padding",
"."
]
| python | train | 35.38961 |
ayust/kitnirc | kitnirc/client.py | https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L689-L704 | def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.get_channel(channel)
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel) | [
"def",
"_parse_part",
"(",
"client",
",",
"command",
",",
"actor",
",",
"args",
")",
":",
"actor",
"=",
"User",
"(",
"actor",
")",
"channel",
",",
"_",
",",
"message",
"=",
"args",
".",
"partition",
"(",
"' :'",
")",
"channel",
"=",
"client",
".",
"server",
".",
"get_channel",
"(",
"channel",
")",
"channel",
".",
"remove_user",
"(",
"actor",
")",
"if",
"actor",
".",
"nick",
"==",
"client",
".",
"user",
".",
"nick",
":",
"client",
".",
"server",
".",
"remove_channel",
"(",
"channel",
")",
"client",
".",
"dispatch_event",
"(",
"\"PART\"",
",",
"actor",
",",
"channel",
",",
"message",
")",
"if",
"actor",
".",
"nick",
"!=",
"client",
".",
"user",
".",
"nick",
":",
"client",
".",
"dispatch_event",
"(",
"\"MEMBERS\"",
",",
"channel",
")"
]
| Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed | [
"Parse",
"a",
"PART",
"and",
"update",
"channel",
"states",
"then",
"dispatch",
"events",
"."
]
| python | train | 40.375 |
apache/incubator-mxnet | example/speech_recognition/stt_io_bucketingiter.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/speech_recognition/stt_io_bucketingiter.py#L132-L165 | def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
audio_paths = []
texts = []
for duration, audio_path, text in self.data[i][j:j+self.batch_size]:
audio_paths.append(audio_path)
texts.append(text)
if self.is_first_epoch:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
else:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
data_all = [mx.nd.array(data_set['x'])] + self.init_state_arrays
label_all = [mx.nd.array(data_set['y'])]
self.label = label_all
provide_data = [('data', (self.batch_size, self.buckets[i], self.width * self.height))] + self.init_states
return mx.io.DataBatch(data_all, label_all, pad=0,
bucket_key=self.buckets[i],
provide_data=provide_data,
provide_label=self.provide_label) | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"curr_idx",
"==",
"len",
"(",
"self",
".",
"idx",
")",
":",
"raise",
"StopIteration",
"i",
",",
"j",
"=",
"self",
".",
"idx",
"[",
"self",
".",
"curr_idx",
"]",
"self",
".",
"curr_idx",
"+=",
"1",
"audio_paths",
"=",
"[",
"]",
"texts",
"=",
"[",
"]",
"for",
"duration",
",",
"audio_path",
",",
"text",
"in",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"j",
":",
"j",
"+",
"self",
".",
"batch_size",
"]",
":",
"audio_paths",
".",
"append",
"(",
"audio_path",
")",
"texts",
".",
"append",
"(",
"text",
")",
"if",
"self",
".",
"is_first_epoch",
":",
"data_set",
"=",
"self",
".",
"datagen",
".",
"prepare_minibatch",
"(",
"audio_paths",
",",
"texts",
",",
"overwrite",
"=",
"True",
",",
"is_bi_graphemes",
"=",
"self",
".",
"is_bi_graphemes",
",",
"seq_length",
"=",
"self",
".",
"buckets",
"[",
"i",
"]",
",",
"save_feature_as_csvfile",
"=",
"self",
".",
"save_feature_as_csvfile",
")",
"else",
":",
"data_set",
"=",
"self",
".",
"datagen",
".",
"prepare_minibatch",
"(",
"audio_paths",
",",
"texts",
",",
"overwrite",
"=",
"False",
",",
"is_bi_graphemes",
"=",
"self",
".",
"is_bi_graphemes",
",",
"seq_length",
"=",
"self",
".",
"buckets",
"[",
"i",
"]",
",",
"save_feature_as_csvfile",
"=",
"self",
".",
"save_feature_as_csvfile",
")",
"data_all",
"=",
"[",
"mx",
".",
"nd",
".",
"array",
"(",
"data_set",
"[",
"'x'",
"]",
")",
"]",
"+",
"self",
".",
"init_state_arrays",
"label_all",
"=",
"[",
"mx",
".",
"nd",
".",
"array",
"(",
"data_set",
"[",
"'y'",
"]",
")",
"]",
"self",
".",
"label",
"=",
"label_all",
"provide_data",
"=",
"[",
"(",
"'data'",
",",
"(",
"self",
".",
"batch_size",
",",
"self",
".",
"buckets",
"[",
"i",
"]",
",",
"self",
".",
"width",
"*",
"self",
".",
"height",
")",
")",
"]",
"+",
"self",
".",
"init_states",
"return",
"mx",
".",
"io",
".",
"DataBatch",
"(",
"data_all",
",",
"label_all",
",",
"pad",
"=",
"0",
",",
"bucket_key",
"=",
"self",
".",
"buckets",
"[",
"i",
"]",
",",
"provide_data",
"=",
"provide_data",
",",
"provide_label",
"=",
"self",
".",
"provide_label",
")"
]
| Returns the next batch of data. | [
"Returns",
"the",
"next",
"batch",
"of",
"data",
"."
]
| python | train | 49.088235 |
useblocks/sphinxcontrib-needs | sphinxcontrib/needs/directives/needlist.py | https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/needlist.py#L69-L135 | def process_needlist(app, doctree, fromdocname):
"""
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
"""
env = app.builder.env
for node in doctree.traverse(Needlist):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needfilter = env.need_all_needlists[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
if current_needfilter["sort_by"] is not None:
if current_needfilter["sort_by"] == "id":
all_needs = sorted(all_needs, key=lambda node: node["id"])
elif current_needfilter["sort_by"] == "status":
all_needs = sorted(all_needs, key=status_sorter)
found_needs = procces_filters(all_needs, current_needfilter)
line_block = nodes.line_block()
for need_info in found_needs:
para = nodes.line()
description = "%s: %s" % (need_info["id"], need_info["title"])
if current_needfilter["show_status"] and need_info["status"] is not None:
description += " (%s)" % need_info["status"]
if current_needfilter["show_tags"] and need_info["tags"] is not None:
description += " [%s]" % "; ".join(need_info["tags"])
title = nodes.Text(description, description)
# Create a reference
if not need_info["hide"]:
ref = nodes.reference('', '')
ref['refdocname'] = need_info['docname']
ref['refuri'] = app.builder.get_relative_uri(
fromdocname, need_info['docname'])
ref['refuri'] += '#' + need_info['target_node']['refid']
ref.append(title)
para += ref
else:
para += title
line_block.append(para)
content.append(line_block)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needfilter["show_filters"]:
content.append(used_filter_paragraph(current_needfilter))
node.replace_self(content) | [
"def",
"process_needlist",
"(",
"app",
",",
"doctree",
",",
"fromdocname",
")",
":",
"env",
"=",
"app",
".",
"builder",
".",
"env",
"for",
"node",
"in",
"doctree",
".",
"traverse",
"(",
"Needlist",
")",
":",
"if",
"not",
"app",
".",
"config",
".",
"needs_include_needs",
":",
"# Ok, this is really dirty.",
"# If we replace a node, docutils checks, if it will not lose any attributes.",
"# But this is here the case, because we are using the attribute \"ids\" of a node.",
"# However, I do not understand, why losing an attribute is such a big deal, so we delete everything",
"# before docutils claims about it.",
"for",
"att",
"in",
"(",
"'ids'",
",",
"'names'",
",",
"'classes'",
",",
"'dupnames'",
")",
":",
"node",
"[",
"att",
"]",
"=",
"[",
"]",
"node",
".",
"replace_self",
"(",
"[",
"]",
")",
"continue",
"id",
"=",
"node",
".",
"attributes",
"[",
"\"ids\"",
"]",
"[",
"0",
"]",
"current_needfilter",
"=",
"env",
".",
"need_all_needlists",
"[",
"id",
"]",
"all_needs",
"=",
"env",
".",
"needs_all_needs",
"content",
"=",
"[",
"]",
"all_needs",
"=",
"list",
"(",
"all_needs",
".",
"values",
"(",
")",
")",
"if",
"current_needfilter",
"[",
"\"sort_by\"",
"]",
"is",
"not",
"None",
":",
"if",
"current_needfilter",
"[",
"\"sort_by\"",
"]",
"==",
"\"id\"",
":",
"all_needs",
"=",
"sorted",
"(",
"all_needs",
",",
"key",
"=",
"lambda",
"node",
":",
"node",
"[",
"\"id\"",
"]",
")",
"elif",
"current_needfilter",
"[",
"\"sort_by\"",
"]",
"==",
"\"status\"",
":",
"all_needs",
"=",
"sorted",
"(",
"all_needs",
",",
"key",
"=",
"status_sorter",
")",
"found_needs",
"=",
"procces_filters",
"(",
"all_needs",
",",
"current_needfilter",
")",
"line_block",
"=",
"nodes",
".",
"line_block",
"(",
")",
"for",
"need_info",
"in",
"found_needs",
":",
"para",
"=",
"nodes",
".",
"line",
"(",
")",
"description",
"=",
"\"%s: %s\"",
"%",
"(",
"need_info",
"[",
"\"id\"",
"]",
",",
"need_info",
"[",
"\"title\"",
"]",
")",
"if",
"current_needfilter",
"[",
"\"show_status\"",
"]",
"and",
"need_info",
"[",
"\"status\"",
"]",
"is",
"not",
"None",
":",
"description",
"+=",
"\" (%s)\"",
"%",
"need_info",
"[",
"\"status\"",
"]",
"if",
"current_needfilter",
"[",
"\"show_tags\"",
"]",
"and",
"need_info",
"[",
"\"tags\"",
"]",
"is",
"not",
"None",
":",
"description",
"+=",
"\" [%s]\"",
"%",
"\"; \"",
".",
"join",
"(",
"need_info",
"[",
"\"tags\"",
"]",
")",
"title",
"=",
"nodes",
".",
"Text",
"(",
"description",
",",
"description",
")",
"# Create a reference",
"if",
"not",
"need_info",
"[",
"\"hide\"",
"]",
":",
"ref",
"=",
"nodes",
".",
"reference",
"(",
"''",
",",
"''",
")",
"ref",
"[",
"'refdocname'",
"]",
"=",
"need_info",
"[",
"'docname'",
"]",
"ref",
"[",
"'refuri'",
"]",
"=",
"app",
".",
"builder",
".",
"get_relative_uri",
"(",
"fromdocname",
",",
"need_info",
"[",
"'docname'",
"]",
")",
"ref",
"[",
"'refuri'",
"]",
"+=",
"'#'",
"+",
"need_info",
"[",
"'target_node'",
"]",
"[",
"'refid'",
"]",
"ref",
".",
"append",
"(",
"title",
")",
"para",
"+=",
"ref",
"else",
":",
"para",
"+=",
"title",
"line_block",
".",
"append",
"(",
"para",
")",
"content",
".",
"append",
"(",
"line_block",
")",
"if",
"len",
"(",
"content",
")",
"==",
"0",
":",
"content",
".",
"append",
"(",
"no_needs_found_paragraph",
"(",
")",
")",
"if",
"current_needfilter",
"[",
"\"show_filters\"",
"]",
":",
"content",
".",
"append",
"(",
"used_filter_paragraph",
"(",
"current_needfilter",
")",
")",
"node",
".",
"replace_self",
"(",
"content",
")"
]
| Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location. | [
"Replace",
"all",
"needlist",
"nodes",
"with",
"a",
"list",
"of",
"the",
"collected",
"needs",
".",
"Augment",
"each",
"need",
"with",
"a",
"backlink",
"to",
"the",
"original",
"location",
"."
]
| python | train | 40.149254 |
apache/incubator-mxnet | example/sparse/matrix_factorization/data.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/matrix_factorization/data.py#L29-L56 | def get_movielens_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
"""
logging.info("Preparing data iterators for " + filename + " ... ")
user = []
item = []
score = []
with open(filename, 'r') as f:
num_samples = 0
for line in f:
tks = line.strip().split('::')
if len(tks) != 4:
continue
num_samples += 1
user.append((tks[0]))
item.append((tks[1]))
score.append((tks[2]))
# convert to ndarrays
user = mx.nd.array(user, dtype='int32')
item = mx.nd.array(item)
score = mx.nd.array(score)
# prepare data iters
data_train = {'user': user, 'item': item}
label_train = {'score': score}
iter_train = mx.io.NDArrayIter(data=data_train,label=label_train,
batch_size=batch_size, shuffle=True)
return mx.io.PrefetchingIter(iter_train) | [
"def",
"get_movielens_iter",
"(",
"filename",
",",
"batch_size",
")",
":",
"logging",
".",
"info",
"(",
"\"Preparing data iterators for \"",
"+",
"filename",
"+",
"\" ... \"",
")",
"user",
"=",
"[",
"]",
"item",
"=",
"[",
"]",
"score",
"=",
"[",
"]",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"num_samples",
"=",
"0",
"for",
"line",
"in",
"f",
":",
"tks",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'::'",
")",
"if",
"len",
"(",
"tks",
")",
"!=",
"4",
":",
"continue",
"num_samples",
"+=",
"1",
"user",
".",
"append",
"(",
"(",
"tks",
"[",
"0",
"]",
")",
")",
"item",
".",
"append",
"(",
"(",
"tks",
"[",
"1",
"]",
")",
")",
"score",
".",
"append",
"(",
"(",
"tks",
"[",
"2",
"]",
")",
")",
"# convert to ndarrays",
"user",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"user",
",",
"dtype",
"=",
"'int32'",
")",
"item",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"item",
")",
"score",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"score",
")",
"# prepare data iters",
"data_train",
"=",
"{",
"'user'",
":",
"user",
",",
"'item'",
":",
"item",
"}",
"label_train",
"=",
"{",
"'score'",
":",
"score",
"}",
"iter_train",
"=",
"mx",
".",
"io",
".",
"NDArrayIter",
"(",
"data",
"=",
"data_train",
",",
"label",
"=",
"label_train",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"True",
")",
"return",
"mx",
".",
"io",
".",
"PrefetchingIter",
"(",
"iter_train",
")"
]
| Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation. | [
"Not",
"particularly",
"fast",
"code",
"to",
"parse",
"the",
"text",
"file",
"and",
"load",
"into",
"NDArrays",
".",
"return",
"two",
"data",
"iters",
"one",
"for",
"train",
"the",
"other",
"for",
"validation",
"."
]
| python | train | 36.571429 |
hatemile/hatemile-for-python | hatemile/implementation/css.py | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L635-L645 | def _speak_normal_inherit(self, element):
"""
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._visit(element, self._speak_normal)
element.normalize() | [
"def",
"_speak_normal_inherit",
"(",
"self",
",",
"element",
")",
":",
"self",
".",
"_visit",
"(",
"element",
",",
"self",
".",
"_speak_normal",
")",
"element",
".",
"normalize",
"(",
")"
]
| Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement | [
"Speak",
"the",
"content",
"of",
"element",
"and",
"descendants",
"."
]
| python | train | 27.090909 |
pysathq/pysat | examples/rc2.py | https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/rc2.py#L249-L304 | def init(self, formula, incr=False):
"""
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
"""
# creating a solver object
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=incr, use_timer=True)
# adding soft clauses to oracle
for i, cl in enumerate(formula.soft):
selv = cl[0] # if clause is unit, selector variable is its literal
if len(cl) > 1:
self.topv += 1
selv = self.topv
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
# selector is not new; increment its weight
self.wght[selv] += formula.wght[i]
# storing the set of selectors
self.sels_set = set(self.sels)
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv,
len(formula.hard), len(formula.soft))) | [
"def",
"init",
"(",
"self",
",",
"formula",
",",
"incr",
"=",
"False",
")",
":",
"# creating a solver object",
"self",
".",
"oracle",
"=",
"Solver",
"(",
"name",
"=",
"self",
".",
"solver",
",",
"bootstrap_with",
"=",
"formula",
".",
"hard",
",",
"incr",
"=",
"incr",
",",
"use_timer",
"=",
"True",
")",
"# adding soft clauses to oracle",
"for",
"i",
",",
"cl",
"in",
"enumerate",
"(",
"formula",
".",
"soft",
")",
":",
"selv",
"=",
"cl",
"[",
"0",
"]",
"# if clause is unit, selector variable is its literal",
"if",
"len",
"(",
"cl",
")",
">",
"1",
":",
"self",
".",
"topv",
"+=",
"1",
"selv",
"=",
"self",
".",
"topv",
"cl",
".",
"append",
"(",
"-",
"self",
".",
"topv",
")",
"self",
".",
"oracle",
".",
"add_clause",
"(",
"cl",
")",
"if",
"selv",
"not",
"in",
"self",
".",
"wght",
":",
"# record selector and its weight",
"self",
".",
"sels",
".",
"append",
"(",
"selv",
")",
"self",
".",
"wght",
"[",
"selv",
"]",
"=",
"formula",
".",
"wght",
"[",
"i",
"]",
"self",
".",
"smap",
"[",
"selv",
"]",
"=",
"i",
"else",
":",
"# selector is not new; increment its weight",
"self",
".",
"wght",
"[",
"selv",
"]",
"+=",
"formula",
".",
"wght",
"[",
"i",
"]",
"# storing the set of selectors",
"self",
".",
"sels_set",
"=",
"set",
"(",
"self",
".",
"sels",
")",
"# at this point internal and external variables are the same",
"for",
"v",
"in",
"range",
"(",
"1",
",",
"formula",
".",
"nv",
"+",
"1",
")",
":",
"self",
".",
"vmap",
".",
"e2i",
"[",
"v",
"]",
"=",
"v",
"self",
".",
"vmap",
".",
"i2e",
"[",
"v",
"]",
"=",
"v",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print",
"(",
"'c formula: {0} vars, {1} hard, {2} soft'",
".",
"format",
"(",
"formula",
".",
"nv",
",",
"len",
"(",
"formula",
".",
"hard",
")",
",",
"len",
"(",
"formula",
".",
"soft",
")",
")",
")"
]
| Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool | [
"Initialize",
"the",
"internal",
"SAT",
"oracle",
".",
"The",
"oracle",
"is",
"used",
"incrementally",
"and",
"so",
"it",
"is",
"initialized",
"only",
"once",
"when",
"constructing",
"an",
"object",
"of",
"class",
":",
"class",
":",
"RC2",
".",
"Given",
"an",
"input",
":",
"class",
":",
".",
"WCNF",
"formula",
"the",
"method",
"bootstraps",
"the",
"oracle",
"with",
"its",
"hard",
"clauses",
".",
"It",
"also",
"augments",
"the",
"soft",
"clauses",
"with",
"fresh",
"selectors",
"and",
"adds",
"them",
"to",
"the",
"oracle",
"afterwards",
"."
]
| python | train | 36.357143 |
noobermin/lspreader | lspreader/lspreader.py | https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L43-L59 | def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out; | [
"def",
"get_list",
"(",
"file",
",",
"fmt",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
"in",
"fmt",
":",
"if",
"i",
"==",
"'i'",
":",
"out",
".",
"append",
"(",
"get_int",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'f'",
"or",
"i",
"==",
"'d'",
":",
"out",
".",
"append",
"(",
"get_float",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'s'",
":",
"out",
".",
"append",
"(",
"get_str",
"(",
"file",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected flag '{}'\"",
".",
"format",
"(",
"i",
")",
")",
"return",
"out"
]
| makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string | [
"makes",
"a",
"list",
"out",
"of",
"the",
"fmt",
"from",
"the",
"LspOutput",
"f",
"using",
"the",
"format",
"i",
"for",
"int",
"f",
"for",
"float",
"d",
"for",
"double",
"s",
"for",
"string"
]
| python | train | 28.235294 |
CybOXProject/mixbox | mixbox/datautils.py | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L26-L42 | def import_class(classpath):
"""Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass | [
"def",
"import_class",
"(",
"classpath",
")",
":",
"modname",
",",
"classname",
"=",
"classpath",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"modname",
")",
"klass",
"=",
"getattr",
"(",
"module",
",",
"classname",
")",
"return",
"klass"
]
| Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module. | [
"Import",
"the",
"class",
"referred",
"to",
"by",
"the",
"fully",
"qualified",
"class",
"path",
"."
]
| python | train | 32.117647 |
ratt-ru/PyMORESANE | pymoresane/iuwt_toolbox.py | https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt_toolbox.py#L292-L306 | def snr_ratio(in1, in2):
"""
The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals.
"""
out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2)))
return out1 | [
"def",
"snr_ratio",
"(",
"in1",
",",
"in2",
")",
":",
"out1",
"=",
"20",
"*",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"in1",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"in1",
"-",
"in2",
")",
")",
")",
"return",
"out1"
]
| The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals. | [
"The",
"following",
"function",
"simply",
"calculates",
"the",
"signal",
"to",
"noise",
"ratio",
"between",
"two",
"signals",
"."
]
| python | train | 30.733333 |
saltstack/salt | salt/modules/memcached.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/memcached.py#L51-L58 | def _connect(host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values.
'''
if six.text_type(port).isdigit():
return memcache.Client(['{0}:{1}'.format(host, port)], debug=0)
raise SaltInvocationError('port must be an integer') | [
"def",
"_connect",
"(",
"host",
"=",
"DEFAULT_HOST",
",",
"port",
"=",
"DEFAULT_PORT",
")",
":",
"if",
"six",
".",
"text_type",
"(",
"port",
")",
".",
"isdigit",
"(",
")",
":",
"return",
"memcache",
".",
"Client",
"(",
"[",
"'{0}:{1}'",
".",
"format",
"(",
"host",
",",
"port",
")",
"]",
",",
"debug",
"=",
"0",
")",
"raise",
"SaltInvocationError",
"(",
"'port must be an integer'",
")"
]
| Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values. | [
"Returns",
"a",
"tuple",
"of",
"(",
"user",
"host",
"port",
")",
"with",
"config",
"pillar",
"or",
"default",
"values",
"assigned",
"to",
"missing",
"values",
"."
]
| python | train | 42.5 |
Metatab/metapack | metapack/terms.py | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L287-L306 | def schema_columns(self):
"""Return column informatino only from this schema"""
t = self.schema_term
columns = []
if t:
for i, c in enumerate(t.children):
if c.term_is("Table.Column"):
p = c.all_props
p['pos'] = i
p['name'] = c.value
p['header'] = self._name_for_col_term(c, i)
columns.append(p)
return columns | [
"def",
"schema_columns",
"(",
"self",
")",
":",
"t",
"=",
"self",
".",
"schema_term",
"columns",
"=",
"[",
"]",
"if",
"t",
":",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"t",
".",
"children",
")",
":",
"if",
"c",
".",
"term_is",
"(",
"\"Table.Column\"",
")",
":",
"p",
"=",
"c",
".",
"all_props",
"p",
"[",
"'pos'",
"]",
"=",
"i",
"p",
"[",
"'name'",
"]",
"=",
"c",
".",
"value",
"p",
"[",
"'header'",
"]",
"=",
"self",
".",
"_name_for_col_term",
"(",
"c",
",",
"i",
")",
"columns",
".",
"append",
"(",
"p",
")",
"return",
"columns"
]
| Return column informatino only from this schema | [
"Return",
"column",
"informatino",
"only",
"from",
"this",
"schema"
]
| python | train | 24.3 |
sam-cox/pytides | pytides/tide.py | https://github.com/sam-cox/pytides/blob/63a2507299002f1979ea55a17a82561158d685f7/pytides/tide.py#L62-L96 | def _prepare(constituents, t0, t = None, radians = True):
"""
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
"""
#The equilibrium argument is constant and taken at the beginning of the
#time series (t0). The speed of the equilibrium argument changes very
#slowly, so again we take it to be constant over any length of data. The
#node factors change more rapidly.
if isinstance(t0, Iterable):
t0 = t0[0]
if t is None:
t = [t0]
if not isinstance(t, Iterable):
t = [t]
a0 = astro(t0)
a = [astro(t_i) for t_i in t]
#For convenience give u, V0 (but not speed!) in [0, 360)
V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis]
speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis]
u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
if radians:
speed = d2r*speed
V0 = d2r*V0
u = [d2r*each for each in u]
return speed, u, f, V0 | [
"def",
"_prepare",
"(",
"constituents",
",",
"t0",
",",
"t",
"=",
"None",
",",
"radians",
"=",
"True",
")",
":",
"#The equilibrium argument is constant and taken at the beginning of the",
"#time series (t0). The speed of the equilibrium argument changes very",
"#slowly, so again we take it to be constant over any length of data. The",
"#node factors change more rapidly.",
"if",
"isinstance",
"(",
"t0",
",",
"Iterable",
")",
":",
"t0",
"=",
"t0",
"[",
"0",
"]",
"if",
"t",
"is",
"None",
":",
"t",
"=",
"[",
"t0",
"]",
"if",
"not",
"isinstance",
"(",
"t",
",",
"Iterable",
")",
":",
"t",
"=",
"[",
"t",
"]",
"a0",
"=",
"astro",
"(",
"t0",
")",
"a",
"=",
"[",
"astro",
"(",
"t_i",
")",
"for",
"t_i",
"in",
"t",
"]",
"#For convenience give u, V0 (but not speed!) in [0, 360)",
"V0",
"=",
"np",
".",
"array",
"(",
"[",
"c",
".",
"V",
"(",
"a0",
")",
"for",
"c",
"in",
"constituents",
"]",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"speed",
"=",
"np",
".",
"array",
"(",
"[",
"c",
".",
"speed",
"(",
"a0",
")",
"for",
"c",
"in",
"constituents",
"]",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"u",
"=",
"[",
"np",
".",
"mod",
"(",
"np",
".",
"array",
"(",
"[",
"c",
".",
"u",
"(",
"a_i",
")",
"for",
"c",
"in",
"constituents",
"]",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"360.0",
")",
"for",
"a_i",
"in",
"a",
"]",
"f",
"=",
"[",
"np",
".",
"mod",
"(",
"np",
".",
"array",
"(",
"[",
"c",
".",
"f",
"(",
"a_i",
")",
"for",
"c",
"in",
"constituents",
"]",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"360.0",
")",
"for",
"a_i",
"in",
"a",
"]",
"if",
"radians",
":",
"speed",
"=",
"d2r",
"*",
"speed",
"V0",
"=",
"d2r",
"*",
"V0",
"u",
"=",
"[",
"d2r",
"*",
"each",
"for",
"each",
"in",
"u",
"]",
"return",
"speed",
",",
"u",
",",
"f",
",",
"V0"
]
| Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True) | [
"Return",
"constituent",
"speed",
"and",
"equilibrium",
"argument",
"at",
"a",
"given",
"time",
"and",
"constituent",
"node",
"factors",
"at",
"given",
"times",
".",
"Arguments",
":",
"constituents",
"--",
"list",
"of",
"constituents",
"to",
"prepare",
"t0",
"--",
"time",
"at",
"which",
"to",
"evaluate",
"speed",
"and",
"equilibrium",
"argument",
"for",
"each",
"constituent",
"t",
"--",
"list",
"of",
"times",
"at",
"which",
"to",
"evaluate",
"node",
"factors",
"for",
"each",
"constituent",
"(",
"default",
":",
"t0",
")",
"radians",
"--",
"whether",
"to",
"return",
"the",
"angular",
"arguments",
"in",
"radians",
"or",
"degrees",
"(",
"default",
":",
"True",
")"
]
| python | train | 40.114286 |
datasift/datasift-python | datasift/historics.py | https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/historics.py#L10-L39 | def prepare(self, hash, start, end, name, sources, sample=None):
""" Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
if len(sources) == 0:
raise HistoricSourcesRequired()
if not isinstance(sources, list):
sources = [sources]
params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)}
if sample:
params['sample'] = sample
return self.request.post('prepare', params) | [
"def",
"prepare",
"(",
"self",
",",
"hash",
",",
"start",
",",
"end",
",",
"name",
",",
"sources",
",",
"sample",
"=",
"None",
")",
":",
"if",
"len",
"(",
"sources",
")",
"==",
"0",
":",
"raise",
"HistoricSourcesRequired",
"(",
")",
"if",
"not",
"isinstance",
"(",
"sources",
",",
"list",
")",
":",
"sources",
"=",
"[",
"sources",
"]",
"params",
"=",
"{",
"'hash'",
":",
"hash",
",",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
",",
"'name'",
":",
"name",
",",
"'sources'",
":",
"','",
".",
"join",
"(",
"sources",
")",
"}",
"if",
"sample",
":",
"params",
"[",
"'sample'",
"]",
"=",
"sample",
"return",
"self",
".",
"request",
".",
"post",
"(",
"'prepare'",
",",
"params",
")"
]
| Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | [
"Prepare",
"a",
"historics",
"query",
"which",
"can",
"later",
"be",
"started",
"."
]
| python | train | 48.333333 |
reingart/gui2py | gui/controls/gridview.py | https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/gridview.py#L448-L455 | def clear(self):
"Remove all rows and reset internal structures"
## list has no clear ... remove items in reverse order
for i in range(len(self)-1, -1, -1):
del self[i]
self._key = 0
if hasattr(self._grid_view, "wx_obj"):
self._grid_view.wx_obj.ClearGrid() | [
"def",
"clear",
"(",
"self",
")",
":",
"## list has no clear ... remove items in reverse order\r",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"del",
"self",
"[",
"i",
"]",
"self",
".",
"_key",
"=",
"0",
"if",
"hasattr",
"(",
"self",
".",
"_grid_view",
",",
"\"wx_obj\"",
")",
":",
"self",
".",
"_grid_view",
".",
"wx_obj",
".",
"ClearGrid",
"(",
")"
]
| Remove all rows and reset internal structures | [
"Remove",
"all",
"rows",
"and",
"reset",
"internal",
"structures"
]
| python | test | 40 |
fedora-infra/fedmsg-atomic-composer | fedmsg_atomic_composer/composer.py | https://github.com/fedora-infra/fedmsg-atomic-composer/blob/9be9fd4955af0568f8743d7a1a243cd8f70020c3/fedmsg_atomic_composer/composer.py#L119-L126 | def mock_cmd(self, release, *cmd, **kwargs):
"""Run a mock command in the chroot for a given release"""
fmt = '{mock_cmd}'
if kwargs.get('new_chroot') is True:
fmt +=' --new-chroot'
fmt += ' --configdir={mock_dir}'
return self.call(fmt.format(**release).split()
+ list(cmd)) | [
"def",
"mock_cmd",
"(",
"self",
",",
"release",
",",
"*",
"cmd",
",",
"*",
"*",
"kwargs",
")",
":",
"fmt",
"=",
"'{mock_cmd}'",
"if",
"kwargs",
".",
"get",
"(",
"'new_chroot'",
")",
"is",
"True",
":",
"fmt",
"+=",
"' --new-chroot'",
"fmt",
"+=",
"' --configdir={mock_dir}'",
"return",
"self",
".",
"call",
"(",
"fmt",
".",
"format",
"(",
"*",
"*",
"release",
")",
".",
"split",
"(",
")",
"+",
"list",
"(",
"cmd",
")",
")"
]
| Run a mock command in the chroot for a given release | [
"Run",
"a",
"mock",
"command",
"in",
"the",
"chroot",
"for",
"a",
"given",
"release"
]
| python | train | 43 |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/pt.py | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/pt.py#L34-L49 | def from_stmt(stmt, engine, **kwargs):
"""
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
"""
result_proxy = engine.execute(stmt, **kwargs)
return from_db_cursor(result_proxy.cursor) | [
"def",
"from_stmt",
"(",
"stmt",
",",
"engine",
",",
"*",
"*",
"kwargs",
")",
":",
"result_proxy",
"=",
"engine",
".",
"execute",
"(",
"stmt",
",",
"*",
"*",
"kwargs",
")",
"return",
"from_db_cursor",
"(",
"result_proxy",
".",
"cursor",
")"
]
| Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable | [
"Execute",
"a",
"query",
"in",
"form",
"of",
"texture",
"clause",
"return",
"the",
"result",
"in",
"form",
"of"
]
| python | train | 22.125 |
UCL-INGI/INGInious | inginious/frontend/pages/course_admin/aggregation_edit.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/aggregation_edit.py#L21-L63 | def get_user_lists(self, course, aggregationid=''):
""" Get the available student and tutor lists for aggregation edition"""
tutor_list = course.get_staff()
# Determine student list and if they are grouped
student_list = list(self.database.aggregations.aggregate([
{"$match": {"courseid": course.get_id()}},
{"$unwind": "$students"},
{"$project": {
"classroom": "$_id",
"students": 1,
"grouped": {
"$anyElementTrue": {
"$map": {
"input": "$groups.students",
"as": "group",
"in": {
"$anyElementTrue": {
"$map": {
"input": "$$group",
"as": "groupmember",
"in": {"$eq": ["$$groupmember", "$students"]}
}
}
}
}
}
}
}}
]))
student_list = dict([(student["students"], student) for student in student_list])
users_info = self.user_manager.get_users_info(list(student_list.keys()) + tutor_list)
if aggregationid:
# Order the non-registered students
other_students = [student_list[entry]['students'] for entry in student_list.keys() if
not student_list[entry]['classroom'] == ObjectId(aggregationid)]
other_students = sorted(other_students, key=lambda val: (("0"+users_info[val][0]) if users_info[val] else ("1"+val)))
return student_list, tutor_list, other_students, users_info
else:
return student_list, tutor_list, users_info | [
"def",
"get_user_lists",
"(",
"self",
",",
"course",
",",
"aggregationid",
"=",
"''",
")",
":",
"tutor_list",
"=",
"course",
".",
"get_staff",
"(",
")",
"# Determine student list and if they are grouped",
"student_list",
"=",
"list",
"(",
"self",
".",
"database",
".",
"aggregations",
".",
"aggregate",
"(",
"[",
"{",
"\"$match\"",
":",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
"}",
"}",
",",
"{",
"\"$unwind\"",
":",
"\"$students\"",
"}",
",",
"{",
"\"$project\"",
":",
"{",
"\"classroom\"",
":",
"\"$_id\"",
",",
"\"students\"",
":",
"1",
",",
"\"grouped\"",
":",
"{",
"\"$anyElementTrue\"",
":",
"{",
"\"$map\"",
":",
"{",
"\"input\"",
":",
"\"$groups.students\"",
",",
"\"as\"",
":",
"\"group\"",
",",
"\"in\"",
":",
"{",
"\"$anyElementTrue\"",
":",
"{",
"\"$map\"",
":",
"{",
"\"input\"",
":",
"\"$$group\"",
",",
"\"as\"",
":",
"\"groupmember\"",
",",
"\"in\"",
":",
"{",
"\"$eq\"",
":",
"[",
"\"$$groupmember\"",
",",
"\"$students\"",
"]",
"}",
"}",
"}",
"}",
"}",
"}",
"}",
"}",
"}",
"]",
")",
")",
"student_list",
"=",
"dict",
"(",
"[",
"(",
"student",
"[",
"\"students\"",
"]",
",",
"student",
")",
"for",
"student",
"in",
"student_list",
"]",
")",
"users_info",
"=",
"self",
".",
"user_manager",
".",
"get_users_info",
"(",
"list",
"(",
"student_list",
".",
"keys",
"(",
")",
")",
"+",
"tutor_list",
")",
"if",
"aggregationid",
":",
"# Order the non-registered students",
"other_students",
"=",
"[",
"student_list",
"[",
"entry",
"]",
"[",
"'students'",
"]",
"for",
"entry",
"in",
"student_list",
".",
"keys",
"(",
")",
"if",
"not",
"student_list",
"[",
"entry",
"]",
"[",
"'classroom'",
"]",
"==",
"ObjectId",
"(",
"aggregationid",
")",
"]",
"other_students",
"=",
"sorted",
"(",
"other_students",
",",
"key",
"=",
"lambda",
"val",
":",
"(",
"(",
"\"0\"",
"+",
"users_info",
"[",
"val",
"]",
"[",
"0",
"]",
")",
"if",
"users_info",
"[",
"val",
"]",
"else",
"(",
"\"1\"",
"+",
"val",
")",
")",
")",
"return",
"student_list",
",",
"tutor_list",
",",
"other_students",
",",
"users_info",
"else",
":",
"return",
"student_list",
",",
"tutor_list",
",",
"users_info"
]
| Get the available student and tutor lists for aggregation edition | [
"Get",
"the",
"available",
"student",
"and",
"tutor",
"lists",
"for",
"aggregation",
"edition"
]
| python | train | 44.418605 |
retext-project/retext | ReText/window.py | https://github.com/retext-project/retext/blob/ad70435341dd89c7a74742df9d1f9af70859a969/ReText/window.py#L995-L1006 | def getPageSizeByName(self, pageSizeName):
""" Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize.
"""
pageSize = None
lowerCaseNames = {pageSize.lower(): pageSize for pageSize in
self.availablePageSizes()}
if pageSizeName.lower() in lowerCaseNames:
pageSize = getattr(QPagedPaintDevice, lowerCaseNames[pageSizeName.lower()])
return pageSize | [
"def",
"getPageSizeByName",
"(",
"self",
",",
"pageSizeName",
")",
":",
"pageSize",
"=",
"None",
"lowerCaseNames",
"=",
"{",
"pageSize",
".",
"lower",
"(",
")",
":",
"pageSize",
"for",
"pageSize",
"in",
"self",
".",
"availablePageSizes",
"(",
")",
"}",
"if",
"pageSizeName",
".",
"lower",
"(",
")",
"in",
"lowerCaseNames",
":",
"pageSize",
"=",
"getattr",
"(",
"QPagedPaintDevice",
",",
"lowerCaseNames",
"[",
"pageSizeName",
".",
"lower",
"(",
")",
"]",
")",
"return",
"pageSize"
]
| Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize. | [
"Returns",
"a",
"validated",
"PageSize",
"instance",
"corresponding",
"to",
"the",
"given",
"name",
".",
"Returns",
"None",
"if",
"the",
"name",
"is",
"not",
"a",
"valid",
"PageSize",
"."
]
| python | train | 36.5 |
jsommers/switchyard | switchyard/lib/packet/util.py | https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/packet/util.py#L14-L29 | def create_ip_arp_request(srchw, srcip, targetip):
'''
Create and return a packet containing an Ethernet header
and ARP header.
'''
ether = Ethernet()
ether.src = srchw
ether.dst = SpecialEthAddr.ETHER_BROADCAST.value
ether.ethertype = EtherType.ARP
arp = Arp()
arp.operation = ArpOperation.Request
arp.senderhwaddr = srchw
arp.senderprotoaddr = srcip
arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value
arp.targetprotoaddr = targetip
return ether + arp | [
"def",
"create_ip_arp_request",
"(",
"srchw",
",",
"srcip",
",",
"targetip",
")",
":",
"ether",
"=",
"Ethernet",
"(",
")",
"ether",
".",
"src",
"=",
"srchw",
"ether",
".",
"dst",
"=",
"SpecialEthAddr",
".",
"ETHER_BROADCAST",
".",
"value",
"ether",
".",
"ethertype",
"=",
"EtherType",
".",
"ARP",
"arp",
"=",
"Arp",
"(",
")",
"arp",
".",
"operation",
"=",
"ArpOperation",
".",
"Request",
"arp",
".",
"senderhwaddr",
"=",
"srchw",
"arp",
".",
"senderprotoaddr",
"=",
"srcip",
"arp",
".",
"targethwaddr",
"=",
"SpecialEthAddr",
".",
"ETHER_BROADCAST",
".",
"value",
"arp",
".",
"targetprotoaddr",
"=",
"targetip",
"return",
"ether",
"+",
"arp"
]
| Create and return a packet containing an Ethernet header
and ARP header. | [
"Create",
"and",
"return",
"a",
"packet",
"containing",
"an",
"Ethernet",
"header",
"and",
"ARP",
"header",
"."
]
| python | train | 31.375 |
geophysics-ubonn/reda | lib/reda/utils/data.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/data.py#L21-L66 | def download_data(identifier, outdir):
"""Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory
"""
# determine target
if use_local_data_repository is not None:
url_base = 'file:' + request.pathname2url(
use_local_data_repository + os.sep)
else:
url_base = repository_url
print('url_base: {}'.format(url_base))
url = url_base + inventory_filename
# download inventory file
filename, headers =request.urlretrieve(url)
df = pd.read_csv(
filename,
delim_whitespace=True,
comment='#',
header=None,
names=['identifier', 'rel_path'],
)
# find relative path to data file
rel_path_query = df.query('identifier == "{}"'.format(identifier))
if rel_path_query.shape[0] == 0:
raise Exception('identifier not found')
rel_path = rel_path_query['rel_path'].values[0]
# download the file
url = url_base + rel_path
print('data url: {}'.format(url))
filename, headers =request.urlretrieve(url)
if not os.path.isdir(outdir):
os.makedirs(outdir)
zip_obj = zipfile.ZipFile(filename)
zip_obj.extractall(outdir) | [
"def",
"download_data",
"(",
"identifier",
",",
"outdir",
")",
":",
"# determine target",
"if",
"use_local_data_repository",
"is",
"not",
"None",
":",
"url_base",
"=",
"'file:'",
"+",
"request",
".",
"pathname2url",
"(",
"use_local_data_repository",
"+",
"os",
".",
"sep",
")",
"else",
":",
"url_base",
"=",
"repository_url",
"print",
"(",
"'url_base: {}'",
".",
"format",
"(",
"url_base",
")",
")",
"url",
"=",
"url_base",
"+",
"inventory_filename",
"# download inventory file",
"filename",
",",
"headers",
"=",
"request",
".",
"urlretrieve",
"(",
"url",
")",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"delim_whitespace",
"=",
"True",
",",
"comment",
"=",
"'#'",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"'identifier'",
",",
"'rel_path'",
"]",
",",
")",
"# find relative path to data file",
"rel_path_query",
"=",
"df",
".",
"query",
"(",
"'identifier == \"{}\"'",
".",
"format",
"(",
"identifier",
")",
")",
"if",
"rel_path_query",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'identifier not found'",
")",
"rel_path",
"=",
"rel_path_query",
"[",
"'rel_path'",
"]",
".",
"values",
"[",
"0",
"]",
"# download the file",
"url",
"=",
"url_base",
"+",
"rel_path",
"print",
"(",
"'data url: {}'",
".",
"format",
"(",
"url",
")",
")",
"filename",
",",
"headers",
"=",
"request",
".",
"urlretrieve",
"(",
"url",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"outdir",
")",
":",
"os",
".",
"makedirs",
"(",
"outdir",
")",
"zip_obj",
"=",
"zipfile",
".",
"ZipFile",
"(",
"filename",
")",
"zip_obj",
".",
"extractall",
"(",
"outdir",
")"
]
| Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory | [
"Download",
"data",
"from",
"a",
"separate",
"data",
"repository",
"for",
"testing",
"."
]
| python | train | 28.021739 |
DigitalGlobe/gbdxtools | gbdxtools/vector_styles.py | https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/vector_styles.py#L216-L232 | def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity),
'fill-extrusion-color': VectorStyle.get_style_value(self.color),
'fill-extrusion-base': VectorStyle.get_style_value(self.base),
'fill-extrusion-height': VectorStyle.get_style_value(self.height)
}
if self.translate:
snippet['fill-extrusion-translate'] = self.translate
return snippet | [
"def",
"paint",
"(",
"self",
")",
":",
"snippet",
"=",
"{",
"'fill-extrusion-opacity'",
":",
"VectorStyle",
".",
"get_style_value",
"(",
"self",
".",
"opacity",
")",
",",
"'fill-extrusion-color'",
":",
"VectorStyle",
".",
"get_style_value",
"(",
"self",
".",
"color",
")",
",",
"'fill-extrusion-base'",
":",
"VectorStyle",
".",
"get_style_value",
"(",
"self",
".",
"base",
")",
",",
"'fill-extrusion-height'",
":",
"VectorStyle",
".",
"get_style_value",
"(",
"self",
".",
"height",
")",
"}",
"if",
"self",
".",
"translate",
":",
"snippet",
"[",
"'fill-extrusion-translate'",
"]",
"=",
"self",
".",
"translate",
"return",
"snippet"
]
| Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet | [
"Renders",
"a",
"javascript",
"snippet",
"suitable",
"for",
"use",
"as",
"a",
"mapbox",
"-",
"gl",
"fill",
"-",
"extrusion",
"paint",
"entry"
]
| python | valid | 39.764706 |
hammerlab/cohorts | cohorts/utils.py | https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L210-L219 | def set_attributes(obj, additional_data):
"""
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
"""
for key, value in additional_data.items():
if hasattr(obj, key):
raise ValueError("Key %s in additional_data already exists in this object" % key)
setattr(obj, _strip_column_name(key), value) | [
"def",
"set_attributes",
"(",
"obj",
",",
"additional_data",
")",
":",
"for",
"key",
",",
"value",
"in",
"additional_data",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"key",
")",
":",
"raise",
"ValueError",
"(",
"\"Key %s in additional_data already exists in this object\"",
"%",
"key",
")",
"setattr",
"(",
"obj",
",",
"_strip_column_name",
"(",
"key",
")",
",",
"value",
")"
]
| Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters. | [
"Given",
"an",
"object",
"and",
"a",
"dictionary",
"give",
"the",
"object",
"new",
"attributes",
"from",
"that",
"dictionary",
"."
]
| python | train | 44.7 |
MAVENSDC/PyTplot | pytplot/xlim.py | https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/xlim.py#L10-L44 | def xlim(min, max):
"""
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
"""
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfull'] = Range1d(min, max)
lim_info['xlast'] = Range1d(min, max)
tplot_opt_glob['x_range'] = [min, max]
return | [
"def",
"xlim",
"(",
"min",
",",
"max",
")",
":",
"if",
"not",
"isinstance",
"(",
"min",
",",
"(",
"int",
",",
"float",
",",
"complex",
")",
")",
":",
"min",
"=",
"tplot_utilities",
".",
"str_to_int",
"(",
"min",
")",
"if",
"not",
"isinstance",
"(",
"max",
",",
"(",
"int",
",",
"float",
",",
"complex",
")",
")",
":",
"max",
"=",
"tplot_utilities",
".",
"str_to_int",
"(",
"max",
")",
"if",
"'x_range'",
"in",
"tplot_opt_glob",
":",
"lim_info",
"[",
"'xlast'",
"]",
"=",
"tplot_opt_glob",
"[",
"'x_range'",
"]",
"else",
":",
"lim_info",
"[",
"'xfull'",
"]",
"=",
"Range1d",
"(",
"min",
",",
"max",
")",
"lim_info",
"[",
"'xlast'",
"]",
"=",
"Range1d",
"(",
"min",
",",
"max",
")",
"tplot_opt_glob",
"[",
"'x_range'",
"]",
"=",
"[",
"min",
",",
"max",
"]",
"return"
]
| This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00") | [
"This",
"function",
"will",
"set",
"the",
"x",
"axis",
"range",
"for",
"all",
"time",
"series",
"plots",
"Parameters",
":",
"min",
":",
"flt",
"The",
"time",
"to",
"start",
"all",
"time",
"series",
"plots",
".",
"Can",
"be",
"given",
"in",
"seconds",
"since",
"epoch",
"or",
"as",
"a",
"string",
"in",
"the",
"format",
"YYYY",
"-",
"MM",
"-",
"DD",
"HH",
":",
"MM",
":",
"SS",
"max",
":",
"flt",
"The",
"time",
"to",
"end",
"all",
"time",
"series",
"plots",
".",
"Can",
"be",
"given",
"in",
"seconds",
"since",
"epoch",
"or",
"as",
"a",
"string",
"in",
"the",
"format",
"YYYY",
"-",
"MM",
"-",
"DD",
"HH",
":",
"MM",
":",
"SS",
"Returns",
":",
"None",
"Examples",
":",
">>>",
"#",
"Set",
"the",
"timespan",
"to",
"be",
"2017",
"-",
"07",
"-",
"17",
"00",
":",
"00",
":",
"00",
"plus",
"1",
"day",
">>>",
"import",
"pytplot",
">>>",
"pytplot",
".",
"xlim",
"(",
"1500249600",
"1500249600",
"+",
"86400",
")",
">>>",
"#",
"The",
"same",
"as",
"above",
"but",
"using",
"different",
"inputs",
">>>",
"pytplot",
".",
"xlim",
"(",
"2017",
"-",
"07",
"-",
"17",
"00",
":",
"00",
":",
"00",
"2017",
"-",
"07",
"-",
"18",
"00",
":",
"00",
":",
"00",
")"
]
| python | train | 35.028571 |
assemblerflow/flowcraft | flowcraft/generator/inspect.py | https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1356-L1373 | def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines | [
"def",
"_get_log_lines",
"(",
"self",
",",
"n",
"=",
"300",
")",
":",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"last_lines",
"=",
"fh",
".",
"readlines",
"(",
")",
"[",
"-",
"n",
":",
"]",
"return",
"last_lines"
]
| Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log | [
"Returns",
"a",
"list",
"with",
"the",
"last",
"n",
"lines",
"of",
"the",
"nextflow",
"log",
"file"
]
| python | test | 23.277778 |
michaeljohnbarr/django-timezone-utils | timezone_utils/fields.py | https://github.com/michaeljohnbarr/django-timezone-utils/blob/61c8b50c59049cb7eccd4e3892f332f88b890f00/timezone_utils/fields.py#L179-L233 | def _check_choices_attribute(self): # pragma: no cover
"""Checks to make sure that choices contains valid timezone choices."""
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if optgroup_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if option_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
# When no error, return an empty list
return [] | [
"def",
"_check_choices_attribute",
"(",
"self",
")",
":",
"# pragma: no cover",
"if",
"self",
".",
"choices",
":",
"warning_params",
"=",
"{",
"'msg'",
":",
"(",
"\"'choices' contains an invalid time zone value '{value}' \"",
"\"which was not found as a supported time zone by pytz \"",
"\"{version}.\"",
")",
",",
"'hint'",
":",
"\"Values must be found in pytz.all_timezones.\"",
",",
"'obj'",
":",
"self",
",",
"}",
"for",
"option_key",
",",
"option_value",
"in",
"self",
".",
"choices",
":",
"if",
"isinstance",
"(",
"option_value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# This is an optgroup, so look inside the group for",
"# options.",
"for",
"optgroup_key",
"in",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
",",
"option_value",
")",
":",
"if",
"optgroup_key",
"not",
"in",
"pytz",
".",
"all_timezones",
":",
"# Make sure we don't raise this error on empty",
"# values",
"if",
"optgroup_key",
"not",
"in",
"self",
".",
"empty_values",
":",
"# Update the error message by adding the value",
"warning_params",
".",
"update",
"(",
"{",
"'msg'",
":",
"warning_params",
"[",
"'msg'",
"]",
".",
"format",
"(",
"value",
"=",
"optgroup_key",
",",
"version",
"=",
"pytz",
".",
"VERSION",
")",
"}",
")",
"# Return the warning",
"return",
"[",
"checks",
".",
"Warning",
"(",
"*",
"*",
"warning_params",
")",
"]",
"elif",
"option_key",
"not",
"in",
"pytz",
".",
"all_timezones",
":",
"# Make sure we don't raise this error on empty",
"# values",
"if",
"option_key",
"not",
"in",
"self",
".",
"empty_values",
":",
"# Update the error message by adding the value",
"warning_params",
".",
"update",
"(",
"{",
"'msg'",
":",
"warning_params",
"[",
"'msg'",
"]",
".",
"format",
"(",
"value",
"=",
"option_key",
",",
"version",
"=",
"pytz",
".",
"VERSION",
")",
"}",
")",
"# Return the warning",
"return",
"[",
"checks",
".",
"Warning",
"(",
"*",
"*",
"warning_params",
")",
"]",
"# When no error, return an empty list",
"return",
"[",
"]"
]
| Checks to make sure that choices contains valid timezone choices. | [
"Checks",
"to",
"make",
"sure",
"that",
"choices",
"contains",
"valid",
"timezone",
"choices",
"."
]
| python | train | 44.109091 |
gpoulter/python-ngram | ngram.py | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L267-L294 | def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared | [
"def",
"items_sharing_ngrams",
"(",
"self",
",",
"query",
")",
":",
"# From matched string to number of N-grams shared with query string",
"shared",
"=",
"{",
"}",
"# Dictionary mapping n-gram to string to number of occurrences of that",
"# ngram in the string that remain to be matched.",
"remaining",
"=",
"{",
"}",
"for",
"ngram",
"in",
"self",
".",
"split",
"(",
"query",
")",
":",
"try",
":",
"for",
"match",
",",
"count",
"in",
"self",
".",
"_grams",
"[",
"ngram",
"]",
".",
"items",
"(",
")",
":",
"remaining",
".",
"setdefault",
"(",
"ngram",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"match",
",",
"count",
")",
"# match as many occurrences as exist in matched string",
"if",
"remaining",
"[",
"ngram",
"]",
"[",
"match",
"]",
">",
"0",
":",
"remaining",
"[",
"ngram",
"]",
"[",
"match",
"]",
"-=",
"1",
"shared",
".",
"setdefault",
"(",
"match",
",",
"0",
")",
"shared",
"[",
"match",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"pass",
"return",
"shared"
]
| Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)] | [
"Retrieve",
"the",
"subset",
"of",
"items",
"that",
"share",
"n",
"-",
"grams",
"the",
"query",
"string",
"."
]
| python | train | 43.785714 |
wonambi-python/wonambi | wonambi/widgets/notes.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1892-L1927 | def create_dialog(self):
"""Create the dialog."""
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.idx_ok = bbox.button(QDialogButtonBox.Ok)
self.idx_cancel = bbox.button(QDialogButtonBox.Cancel)
filebutton = QPushButton()
filebutton.setText('Choose')
self.idx_filename = filebutton
self.xp_format = FormMenu(['CSV', 'Brain Vision'])
self.all_types = FormBool('All event types')
self.idx_evt_type = QListWidget()
self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection)
filebutton.clicked.connect(self.save_as)
self.all_types.connect(self.toggle_buttons)
bbox.clicked.connect(self.button_clicked)
form = QFormLayout()
form.addRow('Filename', self.idx_filename)
form.addRow('Format', self.xp_format)
form.addRow(self.all_types)
form.addRow('Event type(s)', self.idx_evt_type)
btnlayout = QHBoxLayout()
btnlayout.addStretch(1)
btnlayout.addWidget(bbox)
vlayout = QVBoxLayout()
vlayout.addLayout(form)
vlayout.addStretch(1)
vlayout.addLayout(btnlayout)
self.setLayout(vlayout) | [
"def",
"create_dialog",
"(",
"self",
")",
":",
"bbox",
"=",
"QDialogButtonBox",
"(",
"QDialogButtonBox",
".",
"Ok",
"|",
"QDialogButtonBox",
".",
"Cancel",
")",
"self",
".",
"idx_ok",
"=",
"bbox",
".",
"button",
"(",
"QDialogButtonBox",
".",
"Ok",
")",
"self",
".",
"idx_cancel",
"=",
"bbox",
".",
"button",
"(",
"QDialogButtonBox",
".",
"Cancel",
")",
"filebutton",
"=",
"QPushButton",
"(",
")",
"filebutton",
".",
"setText",
"(",
"'Choose'",
")",
"self",
".",
"idx_filename",
"=",
"filebutton",
"self",
".",
"xp_format",
"=",
"FormMenu",
"(",
"[",
"'CSV'",
",",
"'Brain Vision'",
"]",
")",
"self",
".",
"all_types",
"=",
"FormBool",
"(",
"'All event types'",
")",
"self",
".",
"idx_evt_type",
"=",
"QListWidget",
"(",
")",
"self",
".",
"idx_evt_type",
".",
"setSelectionMode",
"(",
"QAbstractItemView",
".",
"ExtendedSelection",
")",
"filebutton",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"save_as",
")",
"self",
".",
"all_types",
".",
"connect",
"(",
"self",
".",
"toggle_buttons",
")",
"bbox",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"button_clicked",
")",
"form",
"=",
"QFormLayout",
"(",
")",
"form",
".",
"addRow",
"(",
"'Filename'",
",",
"self",
".",
"idx_filename",
")",
"form",
".",
"addRow",
"(",
"'Format'",
",",
"self",
".",
"xp_format",
")",
"form",
".",
"addRow",
"(",
"self",
".",
"all_types",
")",
"form",
".",
"addRow",
"(",
"'Event type(s)'",
",",
"self",
".",
"idx_evt_type",
")",
"btnlayout",
"=",
"QHBoxLayout",
"(",
")",
"btnlayout",
".",
"addStretch",
"(",
"1",
")",
"btnlayout",
".",
"addWidget",
"(",
"bbox",
")",
"vlayout",
"=",
"QVBoxLayout",
"(",
")",
"vlayout",
".",
"addLayout",
"(",
"form",
")",
"vlayout",
".",
"addStretch",
"(",
"1",
")",
"vlayout",
".",
"addLayout",
"(",
"btnlayout",
")",
"self",
".",
"setLayout",
"(",
"vlayout",
")"
]
| Create the dialog. | [
"Create",
"the",
"dialog",
"."
]
| python | train | 34.555556 |
astropy/photutils | photutils/centroids/core.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L325-L355 | def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) | [
"def",
"centroid_2dg",
"(",
"data",
",",
"error",
"=",
"None",
",",
"mask",
"=",
"None",
")",
":",
"gfit",
"=",
"fit_2dgaussian",
"(",
"data",
",",
"error",
"=",
"error",
",",
"mask",
"=",
"mask",
")",
"return",
"np",
".",
"array",
"(",
"[",
"gfit",
".",
"x_mean",
".",
"value",
",",
"gfit",
".",
"y_mean",
".",
"value",
"]",
")"
]
| Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid. | [
"Calculate",
"the",
"centroid",
"of",
"a",
"2D",
"array",
"by",
"fitting",
"a",
"2D",
"Gaussian",
"(",
"plus",
"a",
"constant",
")",
"to",
"the",
"array",
"."
]
| python | train | 30.967742 |
GNS3/gns3-server | gns3server/compute/dynamips/__init__.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/__init__.py#L187-L207 | def project_closing(self, project):
"""
Called when a project is about to be closed.
:param project: Project instance
"""
yield from super().project_closing(project)
# delete the Dynamips devices corresponding to the project
tasks = []
for device in self._devices.values():
if device.project.id == project.id:
tasks.append(asyncio.async(device.delete()))
if tasks:
done, _ = yield from asyncio.wait(tasks)
for future in done:
try:
future.result()
except (Exception, GeneratorExit) as e:
log.error("Could not delete device {}".format(e), exc_info=1) | [
"def",
"project_closing",
"(",
"self",
",",
"project",
")",
":",
"yield",
"from",
"super",
"(",
")",
".",
"project_closing",
"(",
"project",
")",
"# delete the Dynamips devices corresponding to the project",
"tasks",
"=",
"[",
"]",
"for",
"device",
"in",
"self",
".",
"_devices",
".",
"values",
"(",
")",
":",
"if",
"device",
".",
"project",
".",
"id",
"==",
"project",
".",
"id",
":",
"tasks",
".",
"append",
"(",
"asyncio",
".",
"async",
"(",
"device",
".",
"delete",
"(",
")",
")",
")",
"if",
"tasks",
":",
"done",
",",
"_",
"=",
"yield",
"from",
"asyncio",
".",
"wait",
"(",
"tasks",
")",
"for",
"future",
"in",
"done",
":",
"try",
":",
"future",
".",
"result",
"(",
")",
"except",
"(",
"Exception",
",",
"GeneratorExit",
")",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Could not delete device {}\"",
".",
"format",
"(",
"e",
")",
",",
"exc_info",
"=",
"1",
")"
]
| Called when a project is about to be closed.
:param project: Project instance | [
"Called",
"when",
"a",
"project",
"is",
"about",
"to",
"be",
"closed",
"."
]
| python | train | 34.619048 |
pypa/pipenv | pipenv/vendor/requests/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/utils.py#L344-L375 | def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result | [
"def",
"parse_dict_header",
"(",
"value",
")",
":",
"result",
"=",
"{",
"}",
"for",
"item",
"in",
"_parse_list_header",
"(",
"value",
")",
":",
"if",
"'='",
"not",
"in",
"item",
":",
"result",
"[",
"item",
"]",
"=",
"None",
"continue",
"name",
",",
"value",
"=",
"item",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"value",
"[",
":",
"1",
"]",
"==",
"value",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"value",
"=",
"unquote_header_value",
"(",
"value",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
"[",
"name",
"]",
"=",
"value",
"return",
"result"
]
| Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict | [
"Parse",
"lists",
"of",
"key",
"value",
"pairs",
"as",
"described",
"by",
"RFC",
"2068",
"Section",
"2",
"and",
"convert",
"them",
"into",
"a",
"python",
"dict",
":"
]
| python | train | 29.375 |
nvdv/vprof | setup.py | https://github.com/nvdv/vprof/blob/4c3ff78f8920ab10cb9c00b14143452aa09ff6bb/setup.py#L112-L119 | def get_vprof_version(filename):
"""Returns actual version specified in filename."""
with open(filename) as src_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
src_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version info.') | [
"def",
"get_vprof_version",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"src_file",
":",
"version_match",
"=",
"re",
".",
"search",
"(",
"r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"",
",",
"src_file",
".",
"read",
"(",
")",
",",
"re",
".",
"M",
")",
"if",
"version_match",
":",
"return",
"version_match",
".",
"group",
"(",
"1",
")",
"raise",
"RuntimeError",
"(",
"'Unable to find version info.'",
")"
]
| Returns actual version specified in filename. | [
"Returns",
"actual",
"version",
"specified",
"in",
"filename",
"."
]
| python | test | 46.875 |
zalando/patroni | patroni/ha.py | https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/ha.py#L1153-L1190 | def handle_starting_instance(self):
"""Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to."""
# Check if we are in startup, when paused defer to main loop for manual failovers.
if not self.state_handler.check_for_startup() or self.is_paused():
self.set_start_timeout(None)
if self.is_paused():
self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped')
return None
# state_handler.state == 'starting' here
if self.has_lock():
if not self.update_lock():
logger.info("Lost lock while starting up. Demoting self.")
self.demote('immediate-nolock')
return 'stopped PostgreSQL while starting up because leader key was lost'
timeout = self._start_timeout or self.patroni.config['master_start_timeout']
time_left = timeout - self.state_handler.time_in_state()
if time_left <= 0:
if self.is_failover_possible(self.cluster.members):
logger.info("Demoting self because master startup is taking too long")
self.demote('immediate')
return 'stopped PostgreSQL because of startup timeout'
else:
return 'master start has timed out, but continuing to wait because failover is not possible'
else:
msg = self.process_manual_failover_from_leader()
if msg is not None:
return msg
return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left)
else:
# Use normal processing for standbys
logger.info("Still starting up as a standby.")
return None | [
"def",
"handle_starting_instance",
"(",
"self",
")",
":",
"# Check if we are in startup, when paused defer to main loop for manual failovers.",
"if",
"not",
"self",
".",
"state_handler",
".",
"check_for_startup",
"(",
")",
"or",
"self",
".",
"is_paused",
"(",
")",
":",
"self",
".",
"set_start_timeout",
"(",
"None",
")",
"if",
"self",
".",
"is_paused",
"(",
")",
":",
"self",
".",
"state_handler",
".",
"set_state",
"(",
"self",
".",
"state_handler",
".",
"is_running",
"(",
")",
"and",
"'running'",
"or",
"'stopped'",
")",
"return",
"None",
"# state_handler.state == 'starting' here",
"if",
"self",
".",
"has_lock",
"(",
")",
":",
"if",
"not",
"self",
".",
"update_lock",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"Lost lock while starting up. Demoting self.\"",
")",
"self",
".",
"demote",
"(",
"'immediate-nolock'",
")",
"return",
"'stopped PostgreSQL while starting up because leader key was lost'",
"timeout",
"=",
"self",
".",
"_start_timeout",
"or",
"self",
".",
"patroni",
".",
"config",
"[",
"'master_start_timeout'",
"]",
"time_left",
"=",
"timeout",
"-",
"self",
".",
"state_handler",
".",
"time_in_state",
"(",
")",
"if",
"time_left",
"<=",
"0",
":",
"if",
"self",
".",
"is_failover_possible",
"(",
"self",
".",
"cluster",
".",
"members",
")",
":",
"logger",
".",
"info",
"(",
"\"Demoting self because master startup is taking too long\"",
")",
"self",
".",
"demote",
"(",
"'immediate'",
")",
"return",
"'stopped PostgreSQL because of startup timeout'",
"else",
":",
"return",
"'master start has timed out, but continuing to wait because failover is not possible'",
"else",
":",
"msg",
"=",
"self",
".",
"process_manual_failover_from_leader",
"(",
")",
"if",
"msg",
"is",
"not",
"None",
":",
"return",
"msg",
"return",
"'PostgreSQL is still starting up, {0:.0f} seconds until timeout'",
".",
"format",
"(",
"time_left",
")",
"else",
":",
"# Use normal processing for standbys",
"logger",
".",
"info",
"(",
"\"Still starting up as a standby.\"",
")",
"return",
"None"
]
| Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to. | [
"Starting",
"up",
"PostgreSQL",
"may",
"take",
"a",
"long",
"time",
".",
"In",
"case",
"we",
"are",
"the",
"leader",
"we",
"may",
"want",
"to",
"fail",
"over",
"to",
"."
]
| python | train | 48.105263 |
automl/HpBandSter | hpbandster/optimizers/bohb.py | https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/optimizers/bohb.py#L119-L141 | def get_next_iteration(self, iteration, iteration_kwargs={}):
"""
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations
"""
# number of 'SH rungs'
s = self.max_SH_iter - 1 - (iteration%self.max_SH_iter)
# number of configurations in that bracket
n0 = int(np.floor((self.max_SH_iter)/(s+1)) * self.eta**s)
ns = [max(int(n0*(self.eta**(-i))), 1) for i in range(s+1)]
return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=self.budgets[(-s-1):], config_sampler=self.config_generator.get_config, **iteration_kwargs)) | [
"def",
"get_next_iteration",
"(",
"self",
",",
"iteration",
",",
"iteration_kwargs",
"=",
"{",
"}",
")",
":",
"# number of 'SH rungs'",
"s",
"=",
"self",
".",
"max_SH_iter",
"-",
"1",
"-",
"(",
"iteration",
"%",
"self",
".",
"max_SH_iter",
")",
"# number of configurations in that bracket",
"n0",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"(",
"self",
".",
"max_SH_iter",
")",
"/",
"(",
"s",
"+",
"1",
")",
")",
"*",
"self",
".",
"eta",
"**",
"s",
")",
"ns",
"=",
"[",
"max",
"(",
"int",
"(",
"n0",
"*",
"(",
"self",
".",
"eta",
"**",
"(",
"-",
"i",
")",
")",
")",
",",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"s",
"+",
"1",
")",
"]",
"return",
"(",
"SuccessiveHalving",
"(",
"HPB_iter",
"=",
"iteration",
",",
"num_configs",
"=",
"ns",
",",
"budgets",
"=",
"self",
".",
"budgets",
"[",
"(",
"-",
"s",
"-",
"1",
")",
":",
"]",
",",
"config_sampler",
"=",
"self",
".",
"config_generator",
".",
"get_config",
",",
"*",
"*",
"iteration_kwargs",
")",
")"
]
| BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations | [
"BO",
"-",
"HB",
"uses",
"(",
"just",
"like",
"Hyperband",
")",
"SuccessiveHalving",
"for",
"each",
"iteration",
".",
"See",
"Li",
"et",
"al",
".",
"(",
"2016",
")",
"for",
"reference",
".",
"Parameters",
"----------",
"iteration",
":",
"int",
"the",
"index",
"of",
"the",
"iteration",
"to",
"be",
"instantiated"
]
| python | train | 35 |
chaoss/grimoirelab-elk | grimoire_elk/enriched/discourse.py | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/discourse.py#L148-L154 | def __related_categories(self, category_id):
""" Get all related categories to a given one """
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related | [
"def",
"__related_categories",
"(",
"self",
",",
"category_id",
")",
":",
"related",
"=",
"[",
"]",
"for",
"cat",
"in",
"self",
".",
"categories_tree",
":",
"if",
"category_id",
"in",
"self",
".",
"categories_tree",
"[",
"cat",
"]",
":",
"related",
".",
"append",
"(",
"self",
".",
"categories",
"[",
"cat",
"]",
")",
"return",
"related"
]
| Get all related categories to a given one | [
"Get",
"all",
"related",
"categories",
"to",
"a",
"given",
"one"
]
| python | train | 41.571429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.