repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
rm-hull/luma.emulator
|
luma/emulator/render.py
|
https://github.com/rm-hull/luma.emulator/blob/ca3db028b33d17cda9247ea5189873ff0408d013/luma/emulator/render.py#L51-L65
|
def led_matrix(self, surface):
"""
Transforms the input surface into an LED matrix (1 pixel = 1 LED)
"""
scale = self._led_on.get_width()
w, h = self._input_size
pix = self._pygame.PixelArray(surface)
img = self._pygame.Surface((w * scale, h * scale))
for y in range(h):
for x in range(w):
led = self._led_on if pix[x, y] & 0xFFFFFF > 0 else self._led_off
img.blit(led, (x * scale, y * scale))
return img
|
[
"def",
"led_matrix",
"(",
"self",
",",
"surface",
")",
":",
"scale",
"=",
"self",
".",
"_led_on",
".",
"get_width",
"(",
")",
"w",
",",
"h",
"=",
"self",
".",
"_input_size",
"pix",
"=",
"self",
".",
"_pygame",
".",
"PixelArray",
"(",
"surface",
")",
"img",
"=",
"self",
".",
"_pygame",
".",
"Surface",
"(",
"(",
"w",
"*",
"scale",
",",
"h",
"*",
"scale",
")",
")",
"for",
"y",
"in",
"range",
"(",
"h",
")",
":",
"for",
"x",
"in",
"range",
"(",
"w",
")",
":",
"led",
"=",
"self",
".",
"_led_on",
"if",
"pix",
"[",
"x",
",",
"y",
"]",
"&",
"0xFFFFFF",
">",
"0",
"else",
"self",
".",
"_led_off",
"img",
".",
"blit",
"(",
"led",
",",
"(",
"x",
"*",
"scale",
",",
"y",
"*",
"scale",
")",
")",
"return",
"img"
] |
Transforms the input surface into an LED matrix (1 pixel = 1 LED)
|
[
"Transforms",
"the",
"input",
"surface",
"into",
"an",
"LED",
"matrix",
"(",
"1",
"pixel",
"=",
"1",
"LED",
")"
] |
python
|
train
|
inasafe/inasafe
|
safe/gui/tools/batch/batch_dialog.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L798-L801
|
def on_output_directory_chooser_clicked(self):
"""Auto connect slot activated when tbOutputDiris clicked."""
title = self.tr('Set the output directory for pdf report files')
self.choose_directory(self.output_directory, title)
|
[
"def",
"on_output_directory_chooser_clicked",
"(",
"self",
")",
":",
"title",
"=",
"self",
".",
"tr",
"(",
"'Set the output directory for pdf report files'",
")",
"self",
".",
"choose_directory",
"(",
"self",
".",
"output_directory",
",",
"title",
")"
] |
Auto connect slot activated when tbOutputDiris clicked.
|
[
"Auto",
"connect",
"slot",
"activated",
"when",
"tbOutputDiris",
"clicked",
"."
] |
python
|
train
|
robertpeteuil/multi-cloud-control
|
mcc/uimode.py
|
https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/uimode.py#L40-L62
|
def ui_main(fmt_table, node_dict):
"""Create the base UI in command mode."""
cmd_funct = {"quit": False,
"run": node_cmd,
"stop": node_cmd,
"connect": node_cmd,
"details": node_cmd,
"update": True}
ui_print("\033[?25l") # cursor off
print("{}\n".format(fmt_table))
sys.stdout.flush()
# refresh_main values:
# None = loop main-cmd, True = refresh-list, False = exit-program
refresh_main = None
while refresh_main is None:
cmd_name = get_user_cmd(node_dict)
if callable(cmd_funct[cmd_name]):
refresh_main = cmd_funct[cmd_name](cmd_name, node_dict)
else:
refresh_main = cmd_funct[cmd_name]
if cmd_name != "connect" and refresh_main:
ui_clear(len(node_dict) + 2)
return refresh_main
|
[
"def",
"ui_main",
"(",
"fmt_table",
",",
"node_dict",
")",
":",
"cmd_funct",
"=",
"{",
"\"quit\"",
":",
"False",
",",
"\"run\"",
":",
"node_cmd",
",",
"\"stop\"",
":",
"node_cmd",
",",
"\"connect\"",
":",
"node_cmd",
",",
"\"details\"",
":",
"node_cmd",
",",
"\"update\"",
":",
"True",
"}",
"ui_print",
"(",
"\"\\033[?25l\"",
")",
"# cursor off",
"print",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"fmt_table",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"# refresh_main values:",
"# None = loop main-cmd, True = refresh-list, False = exit-program",
"refresh_main",
"=",
"None",
"while",
"refresh_main",
"is",
"None",
":",
"cmd_name",
"=",
"get_user_cmd",
"(",
"node_dict",
")",
"if",
"callable",
"(",
"cmd_funct",
"[",
"cmd_name",
"]",
")",
":",
"refresh_main",
"=",
"cmd_funct",
"[",
"cmd_name",
"]",
"(",
"cmd_name",
",",
"node_dict",
")",
"else",
":",
"refresh_main",
"=",
"cmd_funct",
"[",
"cmd_name",
"]",
"if",
"cmd_name",
"!=",
"\"connect\"",
"and",
"refresh_main",
":",
"ui_clear",
"(",
"len",
"(",
"node_dict",
")",
"+",
"2",
")",
"return",
"refresh_main"
] |
Create the base UI in command mode.
|
[
"Create",
"the",
"base",
"UI",
"in",
"command",
"mode",
"."
] |
python
|
train
|
proycon/pynlpl
|
pynlpl/formats/folia.py
|
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5783-L5798
|
def correctwords(self, originalwords, newwords, **kwargs):
"""Generic correction method for words. You most likely want to use the helper functions
:meth:`Sentence.splitword` , :meth:`Sentence.mergewords`, :meth:`deleteword`, :meth:`insertword` instead"""
for w in originalwords:
if not isinstance(w, Word):
raise Exception("Original word is not a Word instance: " + str(type(w)))
elif w.sentence() != self:
raise Exception("Original not found as member of sentence!")
for w in newwords:
if not isinstance(w, Word):
raise Exception("New word is not a Word instance: " + str(type(w)))
if 'suggest' in kwargs and kwargs['suggest']:
del kwargs['suggest']
return self.correct(suggestion=newwords,current=originalwords, **kwargs)
else:
return self.correct(original=originalwords, new=newwords, **kwargs)
|
[
"def",
"correctwords",
"(",
"self",
",",
"originalwords",
",",
"newwords",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"w",
"in",
"originalwords",
":",
"if",
"not",
"isinstance",
"(",
"w",
",",
"Word",
")",
":",
"raise",
"Exception",
"(",
"\"Original word is not a Word instance: \"",
"+",
"str",
"(",
"type",
"(",
"w",
")",
")",
")",
"elif",
"w",
".",
"sentence",
"(",
")",
"!=",
"self",
":",
"raise",
"Exception",
"(",
"\"Original not found as member of sentence!\"",
")",
"for",
"w",
"in",
"newwords",
":",
"if",
"not",
"isinstance",
"(",
"w",
",",
"Word",
")",
":",
"raise",
"Exception",
"(",
"\"New word is not a Word instance: \"",
"+",
"str",
"(",
"type",
"(",
"w",
")",
")",
")",
"if",
"'suggest'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'suggest'",
"]",
":",
"del",
"kwargs",
"[",
"'suggest'",
"]",
"return",
"self",
".",
"correct",
"(",
"suggestion",
"=",
"newwords",
",",
"current",
"=",
"originalwords",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"self",
".",
"correct",
"(",
"original",
"=",
"originalwords",
",",
"new",
"=",
"newwords",
",",
"*",
"*",
"kwargs",
")"
] |
Generic correction method for words. You most likely want to use the helper functions
:meth:`Sentence.splitword` , :meth:`Sentence.mergewords`, :meth:`deleteword`, :meth:`insertword` instead
|
[
"Generic",
"correction",
"method",
"for",
"words",
".",
"You",
"most",
"likely",
"want",
"to",
"use",
"the",
"helper",
"functions",
":",
"meth",
":",
"Sentence",
".",
"splitword",
":",
"meth",
":",
"Sentence",
".",
"mergewords",
":",
"meth",
":",
"deleteword",
":",
"meth",
":",
"insertword",
"instead"
] |
python
|
train
|
pypyr/pypyr-cli
|
pypyr/steps/env.py
|
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/env.py#L46-L85
|
def env_get(context):
"""Get $ENVs into the pypyr context.
Context is a dictionary or dictionary-like. context is mandatory.
context['env']['get'] must exist. It's a dictionary.
Values are the names of the $ENVs to write to the pypyr context.
Keys are the pypyr context item to which to write the $ENV values.
For example, say input context is:
key1: value1
key2: value2
pypyrCurrentDir: value3
env:
get:
pypyrUser: USER
pypyrCurrentDir: PWD
This will result in context:
key1: value1
key2: value2
key3: value3
pypyrUser: <<value of $USER here>>
pypyrCurrentDir: <<value of $PWD here, not value3>>
"""
get = context['env'].get('get', None)
exists = False
if get:
logger.debug("start")
for k, v in get.items():
logger.debug(f"setting context {k} to $ENV {v}")
context[k] = os.environ[v]
logger.info(f"saved {len(get)} $ENVs to context.")
exists = True
logger.debug("done")
return exists
|
[
"def",
"env_get",
"(",
"context",
")",
":",
"get",
"=",
"context",
"[",
"'env'",
"]",
".",
"get",
"(",
"'get'",
",",
"None",
")",
"exists",
"=",
"False",
"if",
"get",
":",
"logger",
".",
"debug",
"(",
"\"start\"",
")",
"for",
"k",
",",
"v",
"in",
"get",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"f\"setting context {k} to $ENV {v}\"",
")",
"context",
"[",
"k",
"]",
"=",
"os",
".",
"environ",
"[",
"v",
"]",
"logger",
".",
"info",
"(",
"f\"saved {len(get)} $ENVs to context.\"",
")",
"exists",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"exists"
] |
Get $ENVs into the pypyr context.
Context is a dictionary or dictionary-like. context is mandatory.
context['env']['get'] must exist. It's a dictionary.
Values are the names of the $ENVs to write to the pypyr context.
Keys are the pypyr context item to which to write the $ENV values.
For example, say input context is:
key1: value1
key2: value2
pypyrCurrentDir: value3
env:
get:
pypyrUser: USER
pypyrCurrentDir: PWD
This will result in context:
key1: value1
key2: value2
key3: value3
pypyrUser: <<value of $USER here>>
pypyrCurrentDir: <<value of $PWD here, not value3>>
|
[
"Get",
"$ENVs",
"into",
"the",
"pypyr",
"context",
"."
] |
python
|
train
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/pip-1.2.1-py2.7.egg/pip/req.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/pip-1.2.1-py2.7.egg/pip/req.py#L1147-L1178
|
def install(self, install_options, global_options=()):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install
|
[
"def",
"install",
"(",
"self",
",",
"install_options",
",",
"global_options",
"=",
"(",
")",
")",
":",
"to_install",
"=",
"[",
"r",
"for",
"r",
"in",
"self",
".",
"requirements",
".",
"values",
"(",
")",
"if",
"not",
"r",
".",
"satisfied_by",
"]",
"if",
"to_install",
":",
"logger",
".",
"notify",
"(",
"'Installing collected packages: %s'",
"%",
"', '",
".",
"join",
"(",
"[",
"req",
".",
"name",
"for",
"req",
"in",
"to_install",
"]",
")",
")",
"logger",
".",
"indent",
"+=",
"2",
"try",
":",
"for",
"requirement",
"in",
"to_install",
":",
"if",
"requirement",
".",
"conflicts_with",
":",
"logger",
".",
"notify",
"(",
"'Found existing installation: %s'",
"%",
"requirement",
".",
"conflicts_with",
")",
"logger",
".",
"indent",
"+=",
"2",
"try",
":",
"requirement",
".",
"uninstall",
"(",
"auto_confirm",
"=",
"True",
")",
"finally",
":",
"logger",
".",
"indent",
"-=",
"2",
"try",
":",
"requirement",
".",
"install",
"(",
"install_options",
",",
"global_options",
")",
"except",
":",
"# if install did not succeed, rollback previous uninstall",
"if",
"requirement",
".",
"conflicts_with",
"and",
"not",
"requirement",
".",
"install_succeeded",
":",
"requirement",
".",
"rollback_uninstall",
"(",
")",
"raise",
"else",
":",
"if",
"requirement",
".",
"conflicts_with",
"and",
"requirement",
".",
"install_succeeded",
":",
"requirement",
".",
"commit_uninstall",
"(",
")",
"requirement",
".",
"remove_temporary_source",
"(",
")",
"finally",
":",
"logger",
".",
"indent",
"-=",
"2",
"self",
".",
"successfully_installed",
"=",
"to_install"
] |
Install everything in this set (after having downloaded and unpacked the packages)
|
[
"Install",
"everything",
"in",
"this",
"set",
"(",
"after",
"having",
"downloaded",
"and",
"unpacked",
"the",
"packages",
")"
] |
python
|
test
|
ligyxy/DictMySQL
|
dictmysql.py
|
https://github.com/ligyxy/DictMySQL/blob/f40d649193ccf58d1c7933189be1042b37afbe31/dictmysql.py#L49-L68
|
def _backtick_columns(cols):
"""
Quote the column names
"""
def bt(s):
b = '' if s == '*' or not s else '`'
return [_ for _ in [b + (s or '') + b] if _]
formatted = []
for c in cols:
if c[0] == '#':
formatted.append(c[1:])
elif c.startswith('(') and c.endswith(')'):
# WHERE (column_a, column_b) IN ((1,10), (1,20))
formatted.append(c)
else:
# backtick the former part when it meets the first dot, and then all the rest
formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:]))))
return ', '.join(formatted)
|
[
"def",
"_backtick_columns",
"(",
"cols",
")",
":",
"def",
"bt",
"(",
"s",
")",
":",
"b",
"=",
"''",
"if",
"s",
"==",
"'*'",
"or",
"not",
"s",
"else",
"'`'",
"return",
"[",
"_",
"for",
"_",
"in",
"[",
"b",
"+",
"(",
"s",
"or",
"''",
")",
"+",
"b",
"]",
"if",
"_",
"]",
"formatted",
"=",
"[",
"]",
"for",
"c",
"in",
"cols",
":",
"if",
"c",
"[",
"0",
"]",
"==",
"'#'",
":",
"formatted",
".",
"append",
"(",
"c",
"[",
"1",
":",
"]",
")",
"elif",
"c",
".",
"startswith",
"(",
"'('",
")",
"and",
"c",
".",
"endswith",
"(",
"')'",
")",
":",
"# WHERE (column_a, column_b) IN ((1,10), (1,20))",
"formatted",
".",
"append",
"(",
"c",
")",
"else",
":",
"# backtick the former part when it meets the first dot, and then all the rest",
"formatted",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"bt",
"(",
"c",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"+",
"bt",
"(",
"'.'",
".",
"join",
"(",
"c",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
")",
")",
")",
")",
"return",
"', '",
".",
"join",
"(",
"formatted",
")"
] |
Quote the column names
|
[
"Quote",
"the",
"column",
"names"
] |
python
|
train
|
knipknap/SpiffWorkflow
|
SpiffWorkflow/util/event.py
|
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/util/event.py#L161-L201
|
def emit(self, *args, **kwargs):
"""
Emits the signal, passing the given arguments to the callbacks.
If one of the callbacks returns a value other than None, no further
callbacks are invoked and the return value of the callback is
returned to the caller of emit().
:type args: tuple
:param args: Optional arguments passed to the callbacks.
:type kwargs: dict
:param kwargs: Optional keyword arguments passed to the callbacks.
:rtype: object
:returns: Returns None if all callbacks returned None. Returns
the return value of the last invoked callback otherwise.
"""
if self.hard_subscribers is not None:
for callback, user_args, user_kwargs in self.hard_subscribers:
kwargs.update(user_kwargs)
result = callback(*args + user_args, **kwargs)
if result is not None:
return result
if self.weak_subscribers is not None:
for callback, user_args, user_kwargs in self.weak_subscribers:
kwargs.update(user_kwargs)
# Even though WeakMethod notifies us when the underlying
# function is destroyed, and we remove the item from the
# the list of subscribers, there is no guarantee that
# this notification has already happened because the garbage
# collector may run while this loop is executed.
# Disabling the garbage collector temporarily also does
# not work, because other threads may be trying to do
# the same, causing yet another race condition.
# So the only solution is to skip such functions.
function = callback.get_function()
if function is None:
continue
result = function(*args + user_args, **kwargs)
if result is not None:
return result
|
[
"def",
"emit",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"hard_subscribers",
"is",
"not",
"None",
":",
"for",
"callback",
",",
"user_args",
",",
"user_kwargs",
"in",
"self",
".",
"hard_subscribers",
":",
"kwargs",
".",
"update",
"(",
"user_kwargs",
")",
"result",
"=",
"callback",
"(",
"*",
"args",
"+",
"user_args",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"if",
"self",
".",
"weak_subscribers",
"is",
"not",
"None",
":",
"for",
"callback",
",",
"user_args",
",",
"user_kwargs",
"in",
"self",
".",
"weak_subscribers",
":",
"kwargs",
".",
"update",
"(",
"user_kwargs",
")",
"# Even though WeakMethod notifies us when the underlying",
"# function is destroyed, and we remove the item from the",
"# the list of subscribers, there is no guarantee that",
"# this notification has already happened because the garbage",
"# collector may run while this loop is executed.",
"# Disabling the garbage collector temporarily also does",
"# not work, because other threads may be trying to do",
"# the same, causing yet another race condition.",
"# So the only solution is to skip such functions.",
"function",
"=",
"callback",
".",
"get_function",
"(",
")",
"if",
"function",
"is",
"None",
":",
"continue",
"result",
"=",
"function",
"(",
"*",
"args",
"+",
"user_args",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result"
] |
Emits the signal, passing the given arguments to the callbacks.
If one of the callbacks returns a value other than None, no further
callbacks are invoked and the return value of the callback is
returned to the caller of emit().
:type args: tuple
:param args: Optional arguments passed to the callbacks.
:type kwargs: dict
:param kwargs: Optional keyword arguments passed to the callbacks.
:rtype: object
:returns: Returns None if all callbacks returned None. Returns
the return value of the last invoked callback otherwise.
|
[
"Emits",
"the",
"signal",
"passing",
"the",
"given",
"arguments",
"to",
"the",
"callbacks",
".",
"If",
"one",
"of",
"the",
"callbacks",
"returns",
"a",
"value",
"other",
"than",
"None",
"no",
"further",
"callbacks",
"are",
"invoked",
"and",
"the",
"return",
"value",
"of",
"the",
"callback",
"is",
"returned",
"to",
"the",
"caller",
"of",
"emit",
"()",
"."
] |
python
|
valid
|
ungarj/mapchete
|
mapchete/formats/base.py
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/base.py#L318-L336
|
def output_is_valid(self, process_data):
"""
Check whether process output is allowed with output driver.
Parameters
----------
process_data : raw process output
Returns
-------
True or False
"""
if self.METADATA["data_type"] == "raster":
return (
is_numpy_or_masked_array(process_data) or
is_numpy_or_masked_array_with_tags(process_data)
)
elif self.METADATA["data_type"] == "vector":
return is_feature_list(process_data)
|
[
"def",
"output_is_valid",
"(",
"self",
",",
"process_data",
")",
":",
"if",
"self",
".",
"METADATA",
"[",
"\"data_type\"",
"]",
"==",
"\"raster\"",
":",
"return",
"(",
"is_numpy_or_masked_array",
"(",
"process_data",
")",
"or",
"is_numpy_or_masked_array_with_tags",
"(",
"process_data",
")",
")",
"elif",
"self",
".",
"METADATA",
"[",
"\"data_type\"",
"]",
"==",
"\"vector\"",
":",
"return",
"is_feature_list",
"(",
"process_data",
")"
] |
Check whether process output is allowed with output driver.
Parameters
----------
process_data : raw process output
Returns
-------
True or False
|
[
"Check",
"whether",
"process",
"output",
"is",
"allowed",
"with",
"output",
"driver",
"."
] |
python
|
valid
|
kajala/django-jutil
|
jutil/request.py
|
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/request.py#L8-L36
|
def get_geo_ip(ip: str, exceptions: bool=False, timeout: int=10) -> dict:
"""
Returns geo IP info or empty dict if geoip query fails at http://ipstack.com.
requires settings.IPSTACK_TOKEN set as valid access token to the API.
Example replies:
{'country_name': 'United States', 'country_code': 'US', 'region_code': 'TX', 'region_name': 'Texas', 'ip': '76.184.236.184', 'latitude': 33.1507, 'time_zone': 'America/Chicago', 'metro_code': 623, 'city': 'Frisco', 'longitude': -96.8236, 'zip_code': '75033'}
{'latitude': 60.1641, 'country_name': 'Finland', 'zip_code': '02920', 'region_name': 'Uusimaa', 'city': 'Espoo', 'metro_code': 0, 'ip': '194.100.27.41', 'time_zone': 'Europe/Helsinki', 'country_code': 'FI', 'longitude': 24.7136, 'region_code': '18'}
:param ip: str
:param exceptions: if True raises Exception on failure
:param timeout: timeout in seconds
:return: dict
"""
import requests
import traceback
try:
res = requests.get('http://api.ipstack.com/{}?access_key={}&format=1'.format(ip, settings.IPSTACK_TOKEN), timeout=timeout)
if res.status_code != 200:
if exceptions:
raise Exception('api.ipstack.com HTTP {}'.format(res.status_code))
return {}
return res.json()
except Exception as e:
msg = 'geoip({}) failed: {}'.format(ip, traceback.format_exc())
logger.error(msg)
if exceptions:
raise
return {}
|
[
"def",
"get_geo_ip",
"(",
"ip",
":",
"str",
",",
"exceptions",
":",
"bool",
"=",
"False",
",",
"timeout",
":",
"int",
"=",
"10",
")",
"->",
"dict",
":",
"import",
"requests",
"import",
"traceback",
"try",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"'http://api.ipstack.com/{}?access_key={}&format=1'",
".",
"format",
"(",
"ip",
",",
"settings",
".",
"IPSTACK_TOKEN",
")",
",",
"timeout",
"=",
"timeout",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"if",
"exceptions",
":",
"raise",
"Exception",
"(",
"'api.ipstack.com HTTP {}'",
".",
"format",
"(",
"res",
".",
"status_code",
")",
")",
"return",
"{",
"}",
"return",
"res",
".",
"json",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'geoip({}) failed: {}'",
".",
"format",
"(",
"ip",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"if",
"exceptions",
":",
"raise",
"return",
"{",
"}"
] |
Returns geo IP info or empty dict if geoip query fails at http://ipstack.com.
requires settings.IPSTACK_TOKEN set as valid access token to the API.
Example replies:
{'country_name': 'United States', 'country_code': 'US', 'region_code': 'TX', 'region_name': 'Texas', 'ip': '76.184.236.184', 'latitude': 33.1507, 'time_zone': 'America/Chicago', 'metro_code': 623, 'city': 'Frisco', 'longitude': -96.8236, 'zip_code': '75033'}
{'latitude': 60.1641, 'country_name': 'Finland', 'zip_code': '02920', 'region_name': 'Uusimaa', 'city': 'Espoo', 'metro_code': 0, 'ip': '194.100.27.41', 'time_zone': 'Europe/Helsinki', 'country_code': 'FI', 'longitude': 24.7136, 'region_code': '18'}
:param ip: str
:param exceptions: if True raises Exception on failure
:param timeout: timeout in seconds
:return: dict
|
[
"Returns",
"geo",
"IP",
"info",
"or",
"empty",
"dict",
"if",
"geoip",
"query",
"fails",
"at",
"http",
":",
"//",
"ipstack",
".",
"com",
".",
"requires",
"settings",
".",
"IPSTACK_TOKEN",
"set",
"as",
"valid",
"access",
"token",
"to",
"the",
"API",
"."
] |
python
|
train
|
numenta/nupic
|
src/nupic/data/generators/data_generator.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L59-L67
|
def setSeed(self, seed):
"""Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed
"""
rand.seed(seed)
np.random.seed(seed)
|
[
"def",
"setSeed",
"(",
"self",
",",
"seed",
")",
":",
"rand",
".",
"seed",
"(",
"seed",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")"
] |
Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed
|
[
"Set",
"the",
"random",
"seed",
"and",
"the",
"numpy",
"seed",
"Parameters",
":",
"--------------------------------------------------------------------",
"seed",
":",
"random",
"seed"
] |
python
|
valid
|
ensime/ensime-vim
|
ensime_shared/client.py
|
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L433-L436
|
def doc_uri(self, args, range=None):
"""Request doc of whatever at cursor."""
self.log.debug('doc_uri: in')
self.send_at_position("DocUri", False, "point")
|
[
"def",
"doc_uri",
"(",
"self",
",",
"args",
",",
"range",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'doc_uri: in'",
")",
"self",
".",
"send_at_position",
"(",
"\"DocUri\"",
",",
"False",
",",
"\"point\"",
")"
] |
Request doc of whatever at cursor.
|
[
"Request",
"doc",
"of",
"whatever",
"at",
"cursor",
"."
] |
python
|
train
|
jtwhite79/pyemu
|
pyemu/utils/helpers.py
|
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L1832-L1842
|
def setup_hfb_pars(self):
"""setup non-mult parameters for hfb (yuck!)
"""
if self.m.hfb6 is None:
self.logger.lraise("couldn't find hfb pak")
tpl_file,df = pyemu.gw_utils.write_hfb_template(self.m)
self.in_files.append(os.path.split(tpl_file.replace(".tpl",""))[-1])
self.tpl_files.append(os.path.split(tpl_file)[-1])
self.par_dfs["hfb"] = df
|
[
"def",
"setup_hfb_pars",
"(",
"self",
")",
":",
"if",
"self",
".",
"m",
".",
"hfb6",
"is",
"None",
":",
"self",
".",
"logger",
".",
"lraise",
"(",
"\"couldn't find hfb pak\"",
")",
"tpl_file",
",",
"df",
"=",
"pyemu",
".",
"gw_utils",
".",
"write_hfb_template",
"(",
"self",
".",
"m",
")",
"self",
".",
"in_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"split",
"(",
"tpl_file",
".",
"replace",
"(",
"\".tpl\"",
",",
"\"\"",
")",
")",
"[",
"-",
"1",
"]",
")",
"self",
".",
"tpl_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"split",
"(",
"tpl_file",
")",
"[",
"-",
"1",
"]",
")",
"self",
".",
"par_dfs",
"[",
"\"hfb\"",
"]",
"=",
"df"
] |
setup non-mult parameters for hfb (yuck!)
|
[
"setup",
"non",
"-",
"mult",
"parameters",
"for",
"hfb",
"(",
"yuck!",
")"
] |
python
|
train
|
scivision/lowtran
|
lowtran/scenarios.py
|
https://github.com/scivision/lowtran/blob/9954d859e53437436103f9ab54a7e2602ecaa1b7/lowtran/scenarios.py#L50-L85
|
def horizrad(infn: Path, outfn: Path, c1: dict) -> xarray.Dataset:
"""
read CSV, simulate, write, plot
"""
if infn is not None:
infn = Path(infn).expanduser()
if infn.suffix == '.h5':
TR = xarray.open_dataset(infn)
return TR
c1.update({'model': 0, # 0: user meterological data
'itype': 1, # 1: horizontal path
'iemsct': 1, # 1: radiance model
'im': 1, # 1: for horizontal path (see Lowtran manual p.42)
'ird1': 1, # 1: use card 2C2)
})
# %% read csv file
if not infn: # demo mode
c1['p'] = [949., 959.]
c1['t'] = [283.8, 285.]
c1['wmol'] = [[93.96, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[93.96, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]
c1['time'] = [parse('2017-04-05T12'),
parse('2017-04-05T18')]
else: # read csv, normal case
PTdata = read_csv(infn)
c1['p'] = PTdata['p']
c1['t'] = PTdata['Ta']
c1['wmol'] = np.zeros((PTdata.shape[0], 12))
c1['wmol'][:, 0] = PTdata['RH']
c1['time'] = [parse(t) for t in PTdata['time']]
# %% TR is 3-D array with axes: time, wavelength, and [transmission,radiance]
TR = loopuserdef(c1)
return TR
|
[
"def",
"horizrad",
"(",
"infn",
":",
"Path",
",",
"outfn",
":",
"Path",
",",
"c1",
":",
"dict",
")",
"->",
"xarray",
".",
"Dataset",
":",
"if",
"infn",
"is",
"not",
"None",
":",
"infn",
"=",
"Path",
"(",
"infn",
")",
".",
"expanduser",
"(",
")",
"if",
"infn",
".",
"suffix",
"==",
"'.h5'",
":",
"TR",
"=",
"xarray",
".",
"open_dataset",
"(",
"infn",
")",
"return",
"TR",
"c1",
".",
"update",
"(",
"{",
"'model'",
":",
"0",
",",
"# 0: user meterological data",
"'itype'",
":",
"1",
",",
"# 1: horizontal path",
"'iemsct'",
":",
"1",
",",
"# 1: radiance model",
"'im'",
":",
"1",
",",
"# 1: for horizontal path (see Lowtran manual p.42)",
"'ird1'",
":",
"1",
",",
"# 1: use card 2C2)",
"}",
")",
"# %% read csv file",
"if",
"not",
"infn",
":",
"# demo mode",
"c1",
"[",
"'p'",
"]",
"=",
"[",
"949.",
",",
"959.",
"]",
"c1",
"[",
"'t'",
"]",
"=",
"[",
"283.8",
",",
"285.",
"]",
"c1",
"[",
"'wmol'",
"]",
"=",
"[",
"[",
"93.96",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
"]",
",",
"[",
"93.96",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
",",
"0.",
"]",
"]",
"c1",
"[",
"'time'",
"]",
"=",
"[",
"parse",
"(",
"'2017-04-05T12'",
")",
",",
"parse",
"(",
"'2017-04-05T18'",
")",
"]",
"else",
":",
"# read csv, normal case",
"PTdata",
"=",
"read_csv",
"(",
"infn",
")",
"c1",
"[",
"'p'",
"]",
"=",
"PTdata",
"[",
"'p'",
"]",
"c1",
"[",
"'t'",
"]",
"=",
"PTdata",
"[",
"'Ta'",
"]",
"c1",
"[",
"'wmol'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"PTdata",
".",
"shape",
"[",
"0",
"]",
",",
"12",
")",
")",
"c1",
"[",
"'wmol'",
"]",
"[",
":",
",",
"0",
"]",
"=",
"PTdata",
"[",
"'RH'",
"]",
"c1",
"[",
"'time'",
"]",
"=",
"[",
"parse",
"(",
"t",
")",
"for",
"t",
"in",
"PTdata",
"[",
"'time'",
"]",
"]",
"# %% TR is 3-D array with axes: time, wavelength, and [transmission,radiance]",
"TR",
"=",
"loopuserdef",
"(",
"c1",
")",
"return",
"TR"
] |
read CSV, simulate, write, plot
|
[
"read",
"CSV",
"simulate",
"write",
"plot"
] |
python
|
train
|
wavefrontHQ/python-client
|
wavefront_api_client/api/dashboard_api.py
|
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/dashboard_api.py#L1003-L1024
|
def get_dashboard_version(self, id, version, **kwargs): # noqa: E501
"""Get a specific version of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_version(id, version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int version: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501
return data
|
[
"def",
"get_dashboard_version",
"(",
"self",
",",
"id",
",",
"version",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"get_dashboard_version_with_http_info",
"(",
"id",
",",
"version",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_dashboard_version_with_http_info",
"(",
"id",
",",
"version",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] |
Get a specific version of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_version(id, version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int version: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread.
|
[
"Get",
"a",
"specific",
"version",
"of",
"a",
"specific",
"dashboard",
"#",
"noqa",
":",
"E501"
] |
python
|
train
|
luckydonald/pytgbot
|
pytgbot/api_types/receivable/updates.py
|
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/receivable/updates.py#L149-L176
|
def to_array(self):
"""
Serializes this Update to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Update, self).to_array()
array['update_id'] = int(self.update_id) # type int
if self.message is not None:
array['message'] = self.message.to_array() # type Message
if self.edited_message is not None:
array['edited_message'] = self.edited_message.to_array() # type Message
if self.channel_post is not None:
array['channel_post'] = self.channel_post.to_array() # type Message
if self.edited_channel_post is not None:
array['edited_channel_post'] = self.edited_channel_post.to_array() # type Message
if self.inline_query is not None:
array['inline_query'] = self.inline_query.to_array() # type InlineQuery
if self.chosen_inline_result is not None:
array['chosen_inline_result'] = self.chosen_inline_result.to_array() # type ChosenInlineResult
if self.callback_query is not None:
array['callback_query'] = self.callback_query.to_array() # type CallbackQuery
if self.shipping_query is not None:
array['shipping_query'] = self.shipping_query.to_array() # type ShippingQuery
if self.pre_checkout_query is not None:
array['pre_checkout_query'] = self.pre_checkout_query.to_array() # type PreCheckoutQuery
return array
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"array",
"=",
"super",
"(",
"Update",
",",
"self",
")",
".",
"to_array",
"(",
")",
"array",
"[",
"'update_id'",
"]",
"=",
"int",
"(",
"self",
".",
"update_id",
")",
"# type int",
"if",
"self",
".",
"message",
"is",
"not",
"None",
":",
"array",
"[",
"'message'",
"]",
"=",
"self",
".",
"message",
".",
"to_array",
"(",
")",
"# type Message",
"if",
"self",
".",
"edited_message",
"is",
"not",
"None",
":",
"array",
"[",
"'edited_message'",
"]",
"=",
"self",
".",
"edited_message",
".",
"to_array",
"(",
")",
"# type Message",
"if",
"self",
".",
"channel_post",
"is",
"not",
"None",
":",
"array",
"[",
"'channel_post'",
"]",
"=",
"self",
".",
"channel_post",
".",
"to_array",
"(",
")",
"# type Message",
"if",
"self",
".",
"edited_channel_post",
"is",
"not",
"None",
":",
"array",
"[",
"'edited_channel_post'",
"]",
"=",
"self",
".",
"edited_channel_post",
".",
"to_array",
"(",
")",
"# type Message",
"if",
"self",
".",
"inline_query",
"is",
"not",
"None",
":",
"array",
"[",
"'inline_query'",
"]",
"=",
"self",
".",
"inline_query",
".",
"to_array",
"(",
")",
"# type InlineQuery",
"if",
"self",
".",
"chosen_inline_result",
"is",
"not",
"None",
":",
"array",
"[",
"'chosen_inline_result'",
"]",
"=",
"self",
".",
"chosen_inline_result",
".",
"to_array",
"(",
")",
"# type ChosenInlineResult",
"if",
"self",
".",
"callback_query",
"is",
"not",
"None",
":",
"array",
"[",
"'callback_query'",
"]",
"=",
"self",
".",
"callback_query",
".",
"to_array",
"(",
")",
"# type CallbackQuery",
"if",
"self",
".",
"shipping_query",
"is",
"not",
"None",
":",
"array",
"[",
"'shipping_query'",
"]",
"=",
"self",
".",
"shipping_query",
".",
"to_array",
"(",
")",
"# type ShippingQuery",
"if",
"self",
".",
"pre_checkout_query",
"is",
"not",
"None",
":",
"array",
"[",
"'pre_checkout_query'",
"]",
"=",
"self",
".",
"pre_checkout_query",
".",
"to_array",
"(",
")",
"# type PreCheckoutQuery",
"return",
"array"
] |
Serializes this Update to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
|
[
"Serializes",
"this",
"Update",
"to",
"a",
"dictionary",
"."
] |
python
|
train
|
arne-cl/discoursegraphs
|
src/discoursegraphs/readwrite/mmax2.py
|
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/mmax2.py#L270-L338
|
def add_annotation_layer(self, annotation_file, layer_name):
"""
adds all markables from the given annotation layer to the discourse
graph.
"""
assert os.path.isfile(annotation_file), \
"Annotation file doesn't exist: {}".format(annotation_file)
tree = etree.parse(annotation_file)
root = tree.getroot()
default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name}
# avoids eml.org namespace handling
for markable in root.iterchildren():
markable_node_id = markable.attrib['id']
markable_attribs = add_prefix(markable.attrib, self.ns+':')
self.add_node(markable_node_id,
layers=default_layers,
attr_dict=markable_attribs,
label=markable_node_id+':'+layer_name)
for target_node_id in spanstring2tokens(self, markable.attrib['span']):
# manually add to_node if it's not in the graph, yet
# cf. issue #39
if target_node_id not in self:
self.add_node(target_node_id,
# adding 'mmax:layer_name' here could be
# misleading (e.g. each token would be part
# of the 'mmax:sentence' layer
layers={self.ns, self.ns+':markable'},
label=target_node_id)
self.add_edge(markable_node_id, target_node_id,
layers=default_layers,
edge_type=EdgeTypes.spanning_relation,
label=self.ns+':'+layer_name)
# this is a workaround for Chiarcos-style MMAX files
if has_antecedent(markable):
antecedent_pointer = markable.attrib['anaphor_antecedent']
# mmax2 supports weird double antecedents,
# e.g. "markable_1000131;markable_1000132", cf. Issue #40
#
# handling these double antecendents increases the number of
# chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153
for antecedent in antecedent_pointer.split(';'):
ante_split = antecedent.split(":")
if len(ante_split) == 2:
# mark group:markable_n or secmark:markable_n as such
edge_label = '{}:antecedent'.format(ante_split[0])
else:
edge_label = ':antecedent'
# handles both 'markable_n' and 'layer:markable_n'
antecedent_node_id = ante_split[-1]
if len(ante_split) == 2:
antecedent_layer = ante_split[0]
default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer))
# manually add antecedent node if it's not yet in the graph
# cf. issue #39
if antecedent_node_id not in self:
self.add_node(antecedent_node_id,
layers=default_layers)
self.add_edge(markable_node_id, antecedent_node_id,
layers=default_layers,
edge_type=EdgeTypes.pointing_relation,
label=self.ns+edge_label)
|
[
"def",
"add_annotation_layer",
"(",
"self",
",",
"annotation_file",
",",
"layer_name",
")",
":",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"annotation_file",
")",
",",
"\"Annotation file doesn't exist: {}\"",
".",
"format",
"(",
"annotation_file",
")",
"tree",
"=",
"etree",
".",
"parse",
"(",
"annotation_file",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"default_layers",
"=",
"{",
"self",
".",
"ns",
",",
"self",
".",
"ns",
"+",
"':markable'",
",",
"self",
".",
"ns",
"+",
"':'",
"+",
"layer_name",
"}",
"# avoids eml.org namespace handling",
"for",
"markable",
"in",
"root",
".",
"iterchildren",
"(",
")",
":",
"markable_node_id",
"=",
"markable",
".",
"attrib",
"[",
"'id'",
"]",
"markable_attribs",
"=",
"add_prefix",
"(",
"markable",
".",
"attrib",
",",
"self",
".",
"ns",
"+",
"':'",
")",
"self",
".",
"add_node",
"(",
"markable_node_id",
",",
"layers",
"=",
"default_layers",
",",
"attr_dict",
"=",
"markable_attribs",
",",
"label",
"=",
"markable_node_id",
"+",
"':'",
"+",
"layer_name",
")",
"for",
"target_node_id",
"in",
"spanstring2tokens",
"(",
"self",
",",
"markable",
".",
"attrib",
"[",
"'span'",
"]",
")",
":",
"# manually add to_node if it's not in the graph, yet",
"# cf. issue #39",
"if",
"target_node_id",
"not",
"in",
"self",
":",
"self",
".",
"add_node",
"(",
"target_node_id",
",",
"# adding 'mmax:layer_name' here could be",
"# misleading (e.g. each token would be part",
"# of the 'mmax:sentence' layer",
"layers",
"=",
"{",
"self",
".",
"ns",
",",
"self",
".",
"ns",
"+",
"':markable'",
"}",
",",
"label",
"=",
"target_node_id",
")",
"self",
".",
"add_edge",
"(",
"markable_node_id",
",",
"target_node_id",
",",
"layers",
"=",
"default_layers",
",",
"edge_type",
"=",
"EdgeTypes",
".",
"spanning_relation",
",",
"label",
"=",
"self",
".",
"ns",
"+",
"':'",
"+",
"layer_name",
")",
"# this is a workaround for Chiarcos-style MMAX files",
"if",
"has_antecedent",
"(",
"markable",
")",
":",
"antecedent_pointer",
"=",
"markable",
".",
"attrib",
"[",
"'anaphor_antecedent'",
"]",
"# mmax2 supports weird double antecedents,",
"# e.g. \"markable_1000131;markable_1000132\", cf. Issue #40",
"#",
"# handling these double antecendents increases the number of",
"# chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153",
"for",
"antecedent",
"in",
"antecedent_pointer",
".",
"split",
"(",
"';'",
")",
":",
"ante_split",
"=",
"antecedent",
".",
"split",
"(",
"\":\"",
")",
"if",
"len",
"(",
"ante_split",
")",
"==",
"2",
":",
"# mark group:markable_n or secmark:markable_n as such",
"edge_label",
"=",
"'{}:antecedent'",
".",
"format",
"(",
"ante_split",
"[",
"0",
"]",
")",
"else",
":",
"edge_label",
"=",
"':antecedent'",
"# handles both 'markable_n' and 'layer:markable_n'",
"antecedent_node_id",
"=",
"ante_split",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"ante_split",
")",
"==",
"2",
":",
"antecedent_layer",
"=",
"ante_split",
"[",
"0",
"]",
"default_layers",
".",
"add",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"self",
".",
"ns",
",",
"antecedent_layer",
")",
")",
"# manually add antecedent node if it's not yet in the graph",
"# cf. issue #39",
"if",
"antecedent_node_id",
"not",
"in",
"self",
":",
"self",
".",
"add_node",
"(",
"antecedent_node_id",
",",
"layers",
"=",
"default_layers",
")",
"self",
".",
"add_edge",
"(",
"markable_node_id",
",",
"antecedent_node_id",
",",
"layers",
"=",
"default_layers",
",",
"edge_type",
"=",
"EdgeTypes",
".",
"pointing_relation",
",",
"label",
"=",
"self",
".",
"ns",
"+",
"edge_label",
")"
] |
adds all markables from the given annotation layer to the discourse
graph.
|
[
"adds",
"all",
"markables",
"from",
"the",
"given",
"annotation",
"layer",
"to",
"the",
"discourse",
"graph",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/rlax/ppo.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L140-L152
|
def log_params(params, name="params"):
"""Dumps the params with `logging.error`."""
for i, param in enumerate(params):
if not param:
# Empty tuple.
continue
if not isinstance(param, (list, tuple)):
logging.error(
"%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param))
else:
for j, p in enumerate(param):
logging.error(
"\t%s[%d, %d] = [%s]", name, i, j, onp.array(p))
|
[
"def",
"log_params",
"(",
"params",
",",
"name",
"=",
"\"params\"",
")",
":",
"for",
"i",
",",
"param",
"in",
"enumerate",
"(",
"params",
")",
":",
"if",
"not",
"param",
":",
"# Empty tuple.",
"continue",
"if",
"not",
"isinstance",
"(",
"param",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"logging",
".",
"error",
"(",
"\"%s[%d] : (%s) = [%s]\"",
",",
"name",
",",
"i",
",",
"param",
".",
"shape",
",",
"onp",
".",
"array",
"(",
"param",
")",
")",
"else",
":",
"for",
"j",
",",
"p",
"in",
"enumerate",
"(",
"param",
")",
":",
"logging",
".",
"error",
"(",
"\"\\t%s[%d, %d] = [%s]\"",
",",
"name",
",",
"i",
",",
"j",
",",
"onp",
".",
"array",
"(",
"p",
")",
")"
] |
Dumps the params with `logging.error`.
|
[
"Dumps",
"the",
"params",
"with",
"logging",
".",
"error",
"."
] |
python
|
train
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L513-L527
|
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_time_since_last_change(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_time_since_last_change = ET.SubElement(fcoe_intf_list, "fcoe-intf-time-since-last-change")
fcoe_intf_time_since_last_change.text = kwargs.pop('fcoe_intf_time_since_last_change')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_time_since_last_change",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"fcoe_get_interface",
"=",
"ET",
".",
"Element",
"(",
"\"fcoe_get_interface\"",
")",
"config",
"=",
"fcoe_get_interface",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_get_interface",
",",
"\"output\"",
")",
"fcoe_intf_list",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"fcoe-intf-list\"",
")",
"fcoe_intf_fcoe_port_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_intf_list",
",",
"\"fcoe-intf-fcoe-port-id\"",
")",
"fcoe_intf_fcoe_port_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'fcoe_intf_fcoe_port_id'",
")",
"fcoe_intf_time_since_last_change",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_intf_list",
",",
"\"fcoe-intf-time-since-last-change\"",
")",
"fcoe_intf_time_since_last_change",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'fcoe_intf_time_since_last_change'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
base4sistemas/satcomum
|
satcomum/br.py
|
https://github.com/base4sistemas/satcomum/blob/b42bec06cb0fb0ad2f6b1a2644a1e8fc8403f2c3/satcomum/br.py#L252-L271
|
def is_cnpjcpf(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
_numero = digitos(numero) if not estrito else numero
try:
cnpj(_numero)
return True
except NumeroCNPJError:
try:
cpf(_numero)
return True
except NumeroCPFError:
pass
return False
|
[
"def",
"is_cnpjcpf",
"(",
"numero",
",",
"estrito",
"=",
"False",
")",
":",
"_numero",
"=",
"digitos",
"(",
"numero",
")",
"if",
"not",
"estrito",
"else",
"numero",
"try",
":",
"cnpj",
"(",
"_numero",
")",
"return",
"True",
"except",
"NumeroCNPJError",
":",
"try",
":",
"cpf",
"(",
"_numero",
")",
"return",
"True",
"except",
"NumeroCPFError",
":",
"pass",
"return",
"False"
] |
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
[
"Uma",
"versão",
"conveniente",
"para",
"usar",
"em",
"testes",
"condicionais",
".",
"Apenas",
"retorna",
"verdadeiro",
"ou",
"falso",
"conforme",
"o",
"argumento",
"é",
"validado",
"."
] |
python
|
train
|
hubo1016/vlcp
|
vlcp/event/future.py
|
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/future.py#L89-L98
|
def set_result(self, result):
'''
Set the result to Future object, wake up all the waiters
:param result: result to set
'''
if hasattr(self, '_result'):
raise ValueError('Cannot set the result twice')
self._result = result
self._scheduler.emergesend(FutureEvent(self, result = result))
|
[
"def",
"set_result",
"(",
"self",
",",
"result",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_result'",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot set the result twice'",
")",
"self",
".",
"_result",
"=",
"result",
"self",
".",
"_scheduler",
".",
"emergesend",
"(",
"FutureEvent",
"(",
"self",
",",
"result",
"=",
"result",
")",
")"
] |
Set the result to Future object, wake up all the waiters
:param result: result to set
|
[
"Set",
"the",
"result",
"to",
"Future",
"object",
"wake",
"up",
"all",
"the",
"waiters",
":",
"param",
"result",
":",
"result",
"to",
"set"
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3432-L3445
|
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
|
[
"def",
"should_generate_summaries",
"(",
")",
":",
"name_scope",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"get_name_scope",
"(",
")",
"if",
"name_scope",
"and",
"\"while/\"",
"in",
"name_scope",
":",
"# Summaries don't work well within tf.while_loop()",
"return",
"False",
"if",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"reuse",
":",
"# Avoid generating separate summaries for different data shards",
"return",
"False",
"return",
"True"
] |
Is this an appropriate context to generate summaries.
Returns:
a boolean
|
[
"Is",
"this",
"an",
"appropriate",
"context",
"to",
"generate",
"summaries",
"."
] |
python
|
train
|
scheibler/khard
|
khard/actions.py
|
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/actions.py#L30-L43
|
def get_action(cls, alias):
"""Find the name of the action for the supplied alias. If no action is
asociated with the given alias, None is returned.
:param alias: the alias to look up
:type alias: str
:rturns: the name of the corresponding action or None
:rtype: str or NoneType
"""
for action, alias_list in cls.action_map.items():
if alias in alias_list:
return action
return None
|
[
"def",
"get_action",
"(",
"cls",
",",
"alias",
")",
":",
"for",
"action",
",",
"alias_list",
"in",
"cls",
".",
"action_map",
".",
"items",
"(",
")",
":",
"if",
"alias",
"in",
"alias_list",
":",
"return",
"action",
"return",
"None"
] |
Find the name of the action for the supplied alias. If no action is
asociated with the given alias, None is returned.
:param alias: the alias to look up
:type alias: str
:rturns: the name of the corresponding action or None
:rtype: str or NoneType
|
[
"Find",
"the",
"name",
"of",
"the",
"action",
"for",
"the",
"supplied",
"alias",
".",
"If",
"no",
"action",
"is",
"asociated",
"with",
"the",
"given",
"alias",
"None",
"is",
"returned",
"."
] |
python
|
test
|
robinandeer/puzzle
|
puzzle/plugins/gemini/mixins/case.py
|
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/case.py#L48-L68
|
def case(self, case_id=None):
"""Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object
"""
cases = self.cases()
if case_id:
for case in cases:
if case.case_id == case_id:
return case
else:
if cases:
return cases[0]
return None
|
[
"def",
"case",
"(",
"self",
",",
"case_id",
"=",
"None",
")",
":",
"cases",
"=",
"self",
".",
"cases",
"(",
")",
"if",
"case_id",
":",
"for",
"case",
"in",
"cases",
":",
"if",
"case",
".",
"case_id",
"==",
"case_id",
":",
"return",
"case",
"else",
":",
"if",
"cases",
":",
"return",
"cases",
"[",
"0",
"]",
"return",
"None"
] |
Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object
|
[
"Return",
"a",
"Case",
"object"
] |
python
|
train
|
TUNE-Archive/freight_forwarder
|
freight_forwarder/container/config.py
|
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/config.py#L329-L340
|
def hostname(self, hostname):
""" hostname setter
"""
if not isinstance(hostname, six.string_types):
raise TypeError("hostname must be a string. {0} was passed.".format(type(hostname)))
# if a host name is passed and its not valid raise else set hostname empty strings are the docker default.
if hostname and not is_valid_hostname(hostname):
raise ValueError("{0} isn't a valid hostname").format(hostname)
else:
self._hostname = hostname
|
[
"def",
"hostname",
"(",
"self",
",",
"hostname",
")",
":",
"if",
"not",
"isinstance",
"(",
"hostname",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"hostname must be a string. {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"hostname",
")",
")",
")",
"# if a host name is passed and its not valid raise else set hostname empty strings are the docker default.",
"if",
"hostname",
"and",
"not",
"is_valid_hostname",
"(",
"hostname",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} isn't a valid hostname\"",
")",
".",
"format",
"(",
"hostname",
")",
"else",
":",
"self",
".",
"_hostname",
"=",
"hostname"
] |
hostname setter
|
[
"hostname",
"setter"
] |
python
|
train
|
cloud-custodian/cloud-custodian
|
tools/c7n_traildb/c7n_traildb/trailes.py
|
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_traildb/c7n_traildb/trailes.py#L105-L114
|
def fetch_events(cursor, config, account_name):
"""Generator that returns the events"""
query = config['indexer'].get('query',
'select * from events where user_agent glob \'*CloudCustodian*\'')
for event in cursor.execute(query):
event['account'] = account_name
event['_index'] = config['indexer']['idx_name']
event['_type'] = config['indexer'].get('idx_type', 'traildb')
yield event
|
[
"def",
"fetch_events",
"(",
"cursor",
",",
"config",
",",
"account_name",
")",
":",
"query",
"=",
"config",
"[",
"'indexer'",
"]",
".",
"get",
"(",
"'query'",
",",
"'select * from events where user_agent glob \\'*CloudCustodian*\\''",
")",
"for",
"event",
"in",
"cursor",
".",
"execute",
"(",
"query",
")",
":",
"event",
"[",
"'account'",
"]",
"=",
"account_name",
"event",
"[",
"'_index'",
"]",
"=",
"config",
"[",
"'indexer'",
"]",
"[",
"'idx_name'",
"]",
"event",
"[",
"'_type'",
"]",
"=",
"config",
"[",
"'indexer'",
"]",
".",
"get",
"(",
"'idx_type'",
",",
"'traildb'",
")",
"yield",
"event"
] |
Generator that returns the events
|
[
"Generator",
"that",
"returns",
"the",
"events"
] |
python
|
train
|
watson-developer-cloud/python-sdk
|
ibm_watson/compare_comply_v1.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L1225-L1276
|
def _from_dict(cls, _dict):
"""Initialize a BodyCells object from a json dictionary."""
args = {}
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
if 'row_header_ids' in _dict:
args['row_header_ids'] = [
RowHeaderIds._from_dict(x)
for x in (_dict.get('row_header_ids'))
]
if 'row_header_texts' in _dict:
args['row_header_texts'] = [
RowHeaderTexts._from_dict(x)
for x in (_dict.get('row_header_texts'))
]
if 'row_header_texts_normalized' in _dict:
args['row_header_texts_normalized'] = [
RowHeaderTextsNormalized._from_dict(x)
for x in (_dict.get('row_header_texts_normalized'))
]
if 'column_header_ids' in _dict:
args['column_header_ids'] = [
ColumnHeaderIds._from_dict(x)
for x in (_dict.get('column_header_ids'))
]
if 'column_header_texts' in _dict:
args['column_header_texts'] = [
ColumnHeaderTexts._from_dict(x)
for x in (_dict.get('column_header_texts'))
]
if 'column_header_texts_normalized' in _dict:
args['column_header_texts_normalized'] = [
ColumnHeaderTextsNormalized._from_dict(x)
for x in (_dict.get('column_header_texts_normalized'))
]
if 'attributes' in _dict:
args['attributes'] = [
Attribute._from_dict(x) for x in (_dict.get('attributes'))
]
return cls(**args)
|
[
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'cell_id'",
"in",
"_dict",
":",
"args",
"[",
"'cell_id'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'cell_id'",
")",
"if",
"'location'",
"in",
"_dict",
":",
"args",
"[",
"'location'",
"]",
"=",
"Location",
".",
"_from_dict",
"(",
"_dict",
".",
"get",
"(",
"'location'",
")",
")",
"if",
"'text'",
"in",
"_dict",
":",
"args",
"[",
"'text'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'text'",
")",
"if",
"'row_index_begin'",
"in",
"_dict",
":",
"args",
"[",
"'row_index_begin'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'row_index_begin'",
")",
"if",
"'row_index_end'",
"in",
"_dict",
":",
"args",
"[",
"'row_index_end'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'row_index_end'",
")",
"if",
"'column_index_begin'",
"in",
"_dict",
":",
"args",
"[",
"'column_index_begin'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'column_index_begin'",
")",
"if",
"'column_index_end'",
"in",
"_dict",
":",
"args",
"[",
"'column_index_end'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'column_index_end'",
")",
"if",
"'row_header_ids'",
"in",
"_dict",
":",
"args",
"[",
"'row_header_ids'",
"]",
"=",
"[",
"RowHeaderIds",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'row_header_ids'",
")",
")",
"]",
"if",
"'row_header_texts'",
"in",
"_dict",
":",
"args",
"[",
"'row_header_texts'",
"]",
"=",
"[",
"RowHeaderTexts",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'row_header_texts'",
")",
")",
"]",
"if",
"'row_header_texts_normalized'",
"in",
"_dict",
":",
"args",
"[",
"'row_header_texts_normalized'",
"]",
"=",
"[",
"RowHeaderTextsNormalized",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'row_header_texts_normalized'",
")",
")",
"]",
"if",
"'column_header_ids'",
"in",
"_dict",
":",
"args",
"[",
"'column_header_ids'",
"]",
"=",
"[",
"ColumnHeaderIds",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'column_header_ids'",
")",
")",
"]",
"if",
"'column_header_texts'",
"in",
"_dict",
":",
"args",
"[",
"'column_header_texts'",
"]",
"=",
"[",
"ColumnHeaderTexts",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'column_header_texts'",
")",
")",
"]",
"if",
"'column_header_texts_normalized'",
"in",
"_dict",
":",
"args",
"[",
"'column_header_texts_normalized'",
"]",
"=",
"[",
"ColumnHeaderTextsNormalized",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'column_header_texts_normalized'",
")",
")",
"]",
"if",
"'attributes'",
"in",
"_dict",
":",
"args",
"[",
"'attributes'",
"]",
"=",
"[",
"Attribute",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'attributes'",
")",
")",
"]",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] |
Initialize a BodyCells object from a json dictionary.
|
[
"Initialize",
"a",
"BodyCells",
"object",
"from",
"a",
"json",
"dictionary",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer_util.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer_util.py#L927-L1037
|
def universal_transformer_with_lstm_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
"""
state, unused_inputs, memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# NOTE:
# state (ut_state): output of the lstm in the previous step
# inputs (ut_input): original input --> we don't use it here
# memory: lstm memory
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let lstm count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("lstm"):
# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})
transition_function_input_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_input_gate",
tf.reduce_mean(transition_function_input_gate))
# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})
transition_function_forget_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="forget",
bias_initializer=tf.zeros_initializer(),
activation=None,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)
transition_function_forget_gate = tf.sigmoid(
transition_function_forget_gate + forget_bias_tensor)
tf.contrib.summary.scalar("lstm_forget_gate",
tf.reduce_mean(transition_function_forget_gate))
# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})
transition_function_output_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="output",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_output_gate",
tf.reduce_mean(transition_function_output_gate))
# lstm input modulation
transition_function_input_modulation = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input_modulation",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_memory = (
memory * transition_function_forget_gate +
transition_function_input_gate * transition_function_input_modulation)
transition_function_output = (
tf.tanh(transition_function_memory) * transition_function_output_gate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, transition_function_memory
|
[
"def",
"universal_transformer_with_lstm_as_transition_function",
"(",
"layer_inputs",
",",
"step",
",",
"hparams",
",",
"ffn_unit",
",",
"attention_unit",
",",
"pad_remover",
"=",
"None",
")",
":",
"state",
",",
"unused_inputs",
",",
"memory",
"=",
"tf",
".",
"unstack",
"(",
"layer_inputs",
",",
"num",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"name",
"=",
"\"unstack\"",
")",
"# NOTE:",
"# state (ut_state): output of the lstm in the previous step",
"# inputs (ut_input): original input --> we don't use it here",
"# memory: lstm memory",
"# Multi_head_attention:",
"assert",
"not",
"hparams",
".",
"add_step_timing_signal",
"# Let lstm count for us!",
"mh_attention_input",
"=",
"step_preprocess",
"(",
"state",
",",
"step",
",",
"hparams",
")",
"transition_function_input",
"=",
"attention_unit",
"(",
"mh_attention_input",
")",
"# Transition Function:",
"if",
"hparams",
".",
"add_ffn_unit_to_the_transition_function",
":",
"transition_function_input",
"=",
"ffn_unit",
"(",
"transition_function_input",
")",
"transition_function_input",
"=",
"common_layers",
".",
"layer_preprocess",
"(",
"transition_function_input",
",",
"hparams",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"lstm\"",
")",
":",
"# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})",
"transition_function_input_gate",
"=",
"_ffn_layer_multi_inputs",
"(",
"[",
"transition_function_input",
",",
"state",
"]",
",",
"hparams",
",",
"name",
"=",
"\"input\"",
",",
"bias_initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"activation",
"=",
"tf",
".",
"sigmoid",
",",
"pad_remover",
"=",
"pad_remover",
",",
"preprocess",
"=",
"False",
",",
"postprocess",
"=",
"False",
")",
"tf",
".",
"contrib",
".",
"summary",
".",
"scalar",
"(",
"\"lstm_input_gate\"",
",",
"tf",
".",
"reduce_mean",
"(",
"transition_function_input_gate",
")",
")",
"# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})",
"transition_function_forget_gate",
"=",
"_ffn_layer_multi_inputs",
"(",
"[",
"transition_function_input",
",",
"state",
"]",
",",
"hparams",
",",
"name",
"=",
"\"forget\"",
",",
"bias_initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"activation",
"=",
"None",
",",
"pad_remover",
"=",
"pad_remover",
",",
"preprocess",
"=",
"False",
",",
"postprocess",
"=",
"False",
")",
"forget_bias_tensor",
"=",
"tf",
".",
"constant",
"(",
"hparams",
".",
"lstm_forget_bias",
")",
"transition_function_forget_gate",
"=",
"tf",
".",
"sigmoid",
"(",
"transition_function_forget_gate",
"+",
"forget_bias_tensor",
")",
"tf",
".",
"contrib",
".",
"summary",
".",
"scalar",
"(",
"\"lstm_forget_gate\"",
",",
"tf",
".",
"reduce_mean",
"(",
"transition_function_forget_gate",
")",
")",
"# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})",
"transition_function_output_gate",
"=",
"_ffn_layer_multi_inputs",
"(",
"[",
"transition_function_input",
",",
"state",
"]",
",",
"hparams",
",",
"name",
"=",
"\"output\"",
",",
"bias_initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"activation",
"=",
"tf",
".",
"sigmoid",
",",
"pad_remover",
"=",
"pad_remover",
",",
"preprocess",
"=",
"False",
",",
"postprocess",
"=",
"False",
")",
"tf",
".",
"contrib",
".",
"summary",
".",
"scalar",
"(",
"\"lstm_output_gate\"",
",",
"tf",
".",
"reduce_mean",
"(",
"transition_function_output_gate",
")",
")",
"# lstm input modulation",
"transition_function_input_modulation",
"=",
"_ffn_layer_multi_inputs",
"(",
"[",
"transition_function_input",
",",
"state",
"]",
",",
"hparams",
",",
"name",
"=",
"\"input_modulation\"",
",",
"bias_initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"activation",
"=",
"tf",
".",
"tanh",
",",
"pad_remover",
"=",
"pad_remover",
",",
"preprocess",
"=",
"False",
",",
"postprocess",
"=",
"False",
")",
"transition_function_memory",
"=",
"(",
"memory",
"*",
"transition_function_forget_gate",
"+",
"transition_function_input_gate",
"*",
"transition_function_input_modulation",
")",
"transition_function_output",
"=",
"(",
"tf",
".",
"tanh",
"(",
"transition_function_memory",
")",
"*",
"transition_function_output_gate",
")",
"transition_function_output",
"=",
"common_layers",
".",
"layer_preprocess",
"(",
"transition_function_output",
",",
"hparams",
")",
"return",
"transition_function_output",
",",
"unused_inputs",
",",
"transition_function_memory"
] |
Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
|
[
"Universal",
"Transformer",
"which",
"uses",
"a",
"lstm",
"as",
"transition",
"function",
"."
] |
python
|
train
|
ultrabug/py3status
|
py3status/parse_config.py
|
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/parse_config.py#L460-L475
|
def separator(self, separator=",", end_token=None):
"""
Read through tokens till the required separator is found. We ignore
newlines. If an end token is supplied raise a ParseEnd exception if it
is found.
"""
while True:
token = self.next()
t_value = token["value"]
if end_token and t_value == end_token:
raise self.ParseEnd()
if t_value == separator:
return
if t_value == "\n":
continue
self.error("Unexpected character")
|
[
"def",
"separator",
"(",
"self",
",",
"separator",
"=",
"\",\"",
",",
"end_token",
"=",
"None",
")",
":",
"while",
"True",
":",
"token",
"=",
"self",
".",
"next",
"(",
")",
"t_value",
"=",
"token",
"[",
"\"value\"",
"]",
"if",
"end_token",
"and",
"t_value",
"==",
"end_token",
":",
"raise",
"self",
".",
"ParseEnd",
"(",
")",
"if",
"t_value",
"==",
"separator",
":",
"return",
"if",
"t_value",
"==",
"\"\\n\"",
":",
"continue",
"self",
".",
"error",
"(",
"\"Unexpected character\"",
")"
] |
Read through tokens till the required separator is found. We ignore
newlines. If an end token is supplied raise a ParseEnd exception if it
is found.
|
[
"Read",
"through",
"tokens",
"till",
"the",
"required",
"separator",
"is",
"found",
".",
"We",
"ignore",
"newlines",
".",
"If",
"an",
"end",
"token",
"is",
"supplied",
"raise",
"a",
"ParseEnd",
"exception",
"if",
"it",
"is",
"found",
"."
] |
python
|
train
|
jmatt/threepio
|
threepio/__init__.py
|
https://github.com/jmatt/threepio/blob/91e2835c85c1618fcc4a1357dbb398353e662d1a/threepio/__init__.py#L32-L93
|
def initialize(logger_name=LOGGER_NAME,
log_filename=LOG_FILENAME,
app_logging_level=APP_LOGGING_LEVEL,
dep_logging_level=DEP_LOGGING_LEVEL,
format=None,
logger_class=None,
handlers=[],
global_logger=True):
"""
Constructs and initializes a `logging.Logger` object.
Returns :class:`logging.Logger` object.
:param logger_name: name of the new logger.
:param log_filename: The log file location :class:`str` or None.
:param app_logging_level: The logging level to use for the application.
:param dep_logging_level: The logging level to use for dependencies.
:param format: The format string to use :class: `str` or None.
:param logger_class: The logger class to use
:param handlers: List of handler instances to add.
:param global_logger: If true set threepio's global logger variable to this logger.
"""
# If there is no format, use a default format.
if not format:
format = "%(asctime)s %(name)s-%(levelname)s "\
+ "[%(pathname)s %(lineno)d] %(message)s"
formatter = logging.Formatter(format)
# Setup the root logging for dependencies, etc.
if log_filename:
logging.basicConfig(
level=dep_logging_level,
format=format,
filename=log_filename,
filemode='a+')
else:
logging.basicConfig(
level=dep_logging_level,
format=format)
# Setup and add separate application logging.
if logger_class:
original_class = logging.getLoggerClass()
logging.setLoggerClass(logger_class)
new_logger = logging.getLogger(logger_name)
logging.setLoggerClass(original_class)
else:
new_logger = logging.getLogger(logger_name)
# Set the app logging level.
new_logger.setLevel(app_logging_level) # required to get level to apply.
# Set the global_logger by default.
if global_logger:
global logger
logger = new_logger
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(app_logging_level)
new_logger.addHandler(handler)
return new_logger
|
[
"def",
"initialize",
"(",
"logger_name",
"=",
"LOGGER_NAME",
",",
"log_filename",
"=",
"LOG_FILENAME",
",",
"app_logging_level",
"=",
"APP_LOGGING_LEVEL",
",",
"dep_logging_level",
"=",
"DEP_LOGGING_LEVEL",
",",
"format",
"=",
"None",
",",
"logger_class",
"=",
"None",
",",
"handlers",
"=",
"[",
"]",
",",
"global_logger",
"=",
"True",
")",
":",
"# If there is no format, use a default format.",
"if",
"not",
"format",
":",
"format",
"=",
"\"%(asctime)s %(name)s-%(levelname)s \"",
"+",
"\"[%(pathname)s %(lineno)d] %(message)s\"",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"format",
")",
"# Setup the root logging for dependencies, etc.",
"if",
"log_filename",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"dep_logging_level",
",",
"format",
"=",
"format",
",",
"filename",
"=",
"log_filename",
",",
"filemode",
"=",
"'a+'",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"dep_logging_level",
",",
"format",
"=",
"format",
")",
"# Setup and add separate application logging.",
"if",
"logger_class",
":",
"original_class",
"=",
"logging",
".",
"getLoggerClass",
"(",
")",
"logging",
".",
"setLoggerClass",
"(",
"logger_class",
")",
"new_logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger_name",
")",
"logging",
".",
"setLoggerClass",
"(",
"original_class",
")",
"else",
":",
"new_logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger_name",
")",
"# Set the app logging level.",
"new_logger",
".",
"setLevel",
"(",
"app_logging_level",
")",
"# required to get level to apply.",
"# Set the global_logger by default.",
"if",
"global_logger",
":",
"global",
"logger",
"logger",
"=",
"new_logger",
"for",
"handler",
"in",
"handlers",
":",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"handler",
".",
"setLevel",
"(",
"app_logging_level",
")",
"new_logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"new_logger"
] |
Constructs and initializes a `logging.Logger` object.
Returns :class:`logging.Logger` object.
:param logger_name: name of the new logger.
:param log_filename: The log file location :class:`str` or None.
:param app_logging_level: The logging level to use for the application.
:param dep_logging_level: The logging level to use for dependencies.
:param format: The format string to use :class: `str` or None.
:param logger_class: The logger class to use
:param handlers: List of handler instances to add.
:param global_logger: If true set threepio's global logger variable to this logger.
|
[
"Constructs",
"and",
"initializes",
"a",
"logging",
".",
"Logger",
"object",
"."
] |
python
|
train
|
StackStorm/pybind
|
pybind/slxos/v17s_1_02/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L10317-L10340
|
def _set_preprovision(self, v, load=False):
"""
Setter method for preprovision, mapped from YANG variable /preprovision (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_preprovision is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_preprovision() directly.
YANG Description: Preprovision profile
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=preprovision.preprovision, is_container='container', presence=False, yang_name="preprovision", rest_name="preprovision", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Preprovision profile', u'hidden': u'full', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """preprovision must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=preprovision.preprovision, is_container='container', presence=False, yang_name="preprovision", rest_name="preprovision", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Preprovision profile', u'hidden': u'full', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='container', is_config=True)""",
})
self.__preprovision = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_preprovision",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"preprovision",
".",
"preprovision",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"preprovision\"",
",",
"rest_name",
"=",
"\"preprovision\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Preprovision profile'",
",",
"u'hidden'",
":",
"u'full'",
",",
"u'display-when'",
":",
"u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-preprovision'",
",",
"defining_module",
"=",
"'brocade-preprovision'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"preprovision must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=preprovision.preprovision, is_container='container', presence=False, yang_name=\"preprovision\", rest_name=\"preprovision\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Preprovision profile', u'hidden': u'full', u'display-when': u'((/vcsmode/vcs-mode = \"true\") and (/vcsmode/vcs-cluster-mode = \"true\"))', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__preprovision",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for preprovision, mapped from YANG variable /preprovision (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_preprovision is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_preprovision() directly.
YANG Description: Preprovision profile
|
[
"Setter",
"method",
"for",
"preprovision",
"mapped",
"from",
"YANG",
"variable",
"/",
"preprovision",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_preprovision",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_preprovision",
"()",
"directly",
"."
] |
python
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAMarket/QATTSBroker.py
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAMarket/QATTSBroker.py#L255-L292
|
def send_order(self, code, price, amount, towards, order_model, market=None):
"""下单
Arguments:
code {[type]} -- [description]
price {[type]} -- [description]
amount {[type]} -- [description]
towards {[type]} -- [description]
order_model {[type]} -- [description]
market:市场,SZ 深交所,SH 上交所
Returns:
[type] -- [description]
"""
towards = 0 if towards == ORDER_DIRECTION.BUY else 1
if order_model == ORDER_MODEL.MARKET:
order_model = 4
elif order_model == ORDER_MODEL.LIMIT:
order_model = 0
if market is None:
market = QAFetch.base.get_stock_market(code)
if not isinstance(market, str):
raise Exception('%s不正确,请检查code和market参数' % market)
market = market.lower()
if market not in ['sh', 'sz']:
raise Exception('%s不支持,请检查code和market参数' % market)
return self.data_to_df(self.call("send_order", {
'client_id': self.client_id,
'category': towards,
'price_type': order_model,
'gddm': self.gddm_sh if market == 'sh' else self.gddm_sz,
'zqdm': code,
'price': price,
'quantity': amount
}))
|
[
"def",
"send_order",
"(",
"self",
",",
"code",
",",
"price",
",",
"amount",
",",
"towards",
",",
"order_model",
",",
"market",
"=",
"None",
")",
":",
"towards",
"=",
"0",
"if",
"towards",
"==",
"ORDER_DIRECTION",
".",
"BUY",
"else",
"1",
"if",
"order_model",
"==",
"ORDER_MODEL",
".",
"MARKET",
":",
"order_model",
"=",
"4",
"elif",
"order_model",
"==",
"ORDER_MODEL",
".",
"LIMIT",
":",
"order_model",
"=",
"0",
"if",
"market",
"is",
"None",
":",
"market",
"=",
"QAFetch",
".",
"base",
".",
"get_stock_market",
"(",
"code",
")",
"if",
"not",
"isinstance",
"(",
"market",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"'%s不正确,请检查code和market参数' % market)",
"",
"",
"",
"market",
"=",
"market",
".",
"lower",
"(",
")",
"if",
"market",
"not",
"in",
"[",
"'sh'",
",",
"'sz'",
"]",
":",
"raise",
"Exception",
"(",
"'%s不支持,请检查code和market参数' % market)",
"",
"",
"",
"return",
"self",
".",
"data_to_df",
"(",
"self",
".",
"call",
"(",
"\"send_order\"",
",",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'category'",
":",
"towards",
",",
"'price_type'",
":",
"order_model",
",",
"'gddm'",
":",
"self",
".",
"gddm_sh",
"if",
"market",
"==",
"'sh'",
"else",
"self",
".",
"gddm_sz",
",",
"'zqdm'",
":",
"code",
",",
"'price'",
":",
"price",
",",
"'quantity'",
":",
"amount",
"}",
")",
")"
] |
下单
Arguments:
code {[type]} -- [description]
price {[type]} -- [description]
amount {[type]} -- [description]
towards {[type]} -- [description]
order_model {[type]} -- [description]
market:市场,SZ 深交所,SH 上交所
Returns:
[type] -- [description]
|
[
"下单"
] |
python
|
train
|
mrstephenneal/dirutility
|
dirutility/compare.py
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/compare.py#L6-L13
|
def unique(list1, list2):
"""
Get unique items in list1 that are not in list2
:return: Unique items only in list 1
"""
set2 = set(list2)
list1_unique = [x for x in tqdm(list1, desc='Unique', total=len(list1)) if x not in set2]
return list1_unique
|
[
"def",
"unique",
"(",
"list1",
",",
"list2",
")",
":",
"set2",
"=",
"set",
"(",
"list2",
")",
"list1_unique",
"=",
"[",
"x",
"for",
"x",
"in",
"tqdm",
"(",
"list1",
",",
"desc",
"=",
"'Unique'",
",",
"total",
"=",
"len",
"(",
"list1",
")",
")",
"if",
"x",
"not",
"in",
"set2",
"]",
"return",
"list1_unique"
] |
Get unique items in list1 that are not in list2
:return: Unique items only in list 1
|
[
"Get",
"unique",
"items",
"in",
"list1",
"that",
"are",
"not",
"in",
"list2",
":",
"return",
":",
"Unique",
"items",
"only",
"in",
"list",
"1"
] |
python
|
train
|
sods/ods
|
pods/datasets.py
|
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L373-L413
|
def to_arff(dataset, **kwargs):
"""Take a pods data set and write it as an ARFF file"""
pods_data = dataset(**kwargs)
vals = list(kwargs.values())
for i, v in enumerate(vals):
if isinstance(v, list):
vals[i] = '|'.join(v)
else:
vals[i] = str(v)
args = '_'.join(vals)
n = dataset.__name__
if len(args)>0:
n += '_' + args
n = n.replace(' ', '-')
ks = pods_data.keys()
d = None
if 'Y' in ks and 'X' in ks:
d = pd.DataFrame(pods_data['X'])
if 'Xtest' in ks:
d = d.append(pd.DataFrame(pods_data['Xtest']), ignore_index=True)
if 'covariates' in ks:
d.columns = pods_data['covariates']
dy = pd.DataFrame(pods_data['Y'])
if 'Ytest' in ks:
dy = dy.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True)
if 'response' in ks:
dy.columns = pods_data['response']
for c in dy.columns:
if c not in d.columns:
d[c] = dy[c]
else:
d['y'+str(c)] = dy[c]
elif 'Y' in ks:
d = pd.DataFrame(pods_data['Y'])
if 'Ytest' in ks:
d = d.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True)
elif 'data' in ks:
d = pd.DataFrame(pods_data['data'])
if d is not None:
df2arff(d, n, pods_data)
|
[
"def",
"to_arff",
"(",
"dataset",
",",
"*",
"*",
"kwargs",
")",
":",
"pods_data",
"=",
"dataset",
"(",
"*",
"*",
"kwargs",
")",
"vals",
"=",
"list",
"(",
"kwargs",
".",
"values",
"(",
")",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"vals",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"vals",
"[",
"i",
"]",
"=",
"'|'",
".",
"join",
"(",
"v",
")",
"else",
":",
"vals",
"[",
"i",
"]",
"=",
"str",
"(",
"v",
")",
"args",
"=",
"'_'",
".",
"join",
"(",
"vals",
")",
"n",
"=",
"dataset",
".",
"__name__",
"if",
"len",
"(",
"args",
")",
">",
"0",
":",
"n",
"+=",
"'_'",
"+",
"args",
"n",
"=",
"n",
".",
"replace",
"(",
"' '",
",",
"'-'",
")",
"ks",
"=",
"pods_data",
".",
"keys",
"(",
")",
"d",
"=",
"None",
"if",
"'Y'",
"in",
"ks",
"and",
"'X'",
"in",
"ks",
":",
"d",
"=",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'X'",
"]",
")",
"if",
"'Xtest'",
"in",
"ks",
":",
"d",
"=",
"d",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'Xtest'",
"]",
")",
",",
"ignore_index",
"=",
"True",
")",
"if",
"'covariates'",
"in",
"ks",
":",
"d",
".",
"columns",
"=",
"pods_data",
"[",
"'covariates'",
"]",
"dy",
"=",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'Y'",
"]",
")",
"if",
"'Ytest'",
"in",
"ks",
":",
"dy",
"=",
"dy",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'Ytest'",
"]",
")",
",",
"ignore_index",
"=",
"True",
")",
"if",
"'response'",
"in",
"ks",
":",
"dy",
".",
"columns",
"=",
"pods_data",
"[",
"'response'",
"]",
"for",
"c",
"in",
"dy",
".",
"columns",
":",
"if",
"c",
"not",
"in",
"d",
".",
"columns",
":",
"d",
"[",
"c",
"]",
"=",
"dy",
"[",
"c",
"]",
"else",
":",
"d",
"[",
"'y'",
"+",
"str",
"(",
"c",
")",
"]",
"=",
"dy",
"[",
"c",
"]",
"elif",
"'Y'",
"in",
"ks",
":",
"d",
"=",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'Y'",
"]",
")",
"if",
"'Ytest'",
"in",
"ks",
":",
"d",
"=",
"d",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'Ytest'",
"]",
")",
",",
"ignore_index",
"=",
"True",
")",
"elif",
"'data'",
"in",
"ks",
":",
"d",
"=",
"pd",
".",
"DataFrame",
"(",
"pods_data",
"[",
"'data'",
"]",
")",
"if",
"d",
"is",
"not",
"None",
":",
"df2arff",
"(",
"d",
",",
"n",
",",
"pods_data",
")"
] |
Take a pods data set and write it as an ARFF file
|
[
"Take",
"a",
"pods",
"data",
"set",
"and",
"write",
"it",
"as",
"an",
"ARFF",
"file"
] |
python
|
train
|
openthread/openthread
|
tools/harness-thci/OpenThread.py
|
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L362-L376
|
def __setAddressfilterMode(self, mode):
"""set address filter mode
Returns:
True: successful to set address filter mode.
False: fail to set address filter mode.
"""
print 'call setAddressFilterMode() ' + mode
try:
cmd = 'macfilter addr ' + mode
if self.__sendCommand(cmd)[0] == 'Done':
return True
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("__setAddressFilterMode() Error: " + str(e))
|
[
"def",
"__setAddressfilterMode",
"(",
"self",
",",
"mode",
")",
":",
"print",
"'call setAddressFilterMode() '",
"+",
"mode",
"try",
":",
"cmd",
"=",
"'macfilter addr '",
"+",
"mode",
"if",
"self",
".",
"__sendCommand",
"(",
"cmd",
")",
"[",
"0",
"]",
"==",
"'Done'",
":",
"return",
"True",
"return",
"False",
"except",
"Exception",
",",
"e",
":",
"ModuleHelper",
".",
"WriteIntoDebugLogger",
"(",
"\"__setAddressFilterMode() Error: \"",
"+",
"str",
"(",
"e",
")",
")"
] |
set address filter mode
Returns:
True: successful to set address filter mode.
False: fail to set address filter mode.
|
[
"set",
"address",
"filter",
"mode"
] |
python
|
train
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L471-L484
|
def file_unzipper(directory):
""" This function will unzip all files in the runroot directory and
subdirectories
"""
debug.log("Unzipping directory (%s)..."%directory)
#FINDING AND UNZIPPING ZIPPED FILES
for root, dirs, files in os.walk(directory, topdown=False):
if root != "":
orig_dir = os.getcwd()
os.chdir(directory)
Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait()
Popen('unzip -qq -o "*.zip" > /dev/null 2>&1', shell=True).wait()
Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait()
os.chdir(orig_dir)
|
[
"def",
"file_unzipper",
"(",
"directory",
")",
":",
"debug",
".",
"log",
"(",
"\"Unzipping directory (%s)...\"",
"%",
"directory",
")",
"#FINDING AND UNZIPPING ZIPPED FILES",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
",",
"topdown",
"=",
"False",
")",
":",
"if",
"root",
"!=",
"\"\"",
":",
"orig_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"directory",
")",
"Popen",
"(",
"'gunzip -q -f *.gz > /dev/null 2>&1'",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")",
"Popen",
"(",
"'unzip -qq -o \"*.zip\" > /dev/null 2>&1'",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")",
"Popen",
"(",
"'rm -f *.zip > /dev/null 2>&1'",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")",
"os",
".",
"chdir",
"(",
"orig_dir",
")"
] |
This function will unzip all files in the runroot directory and
subdirectories
|
[
"This",
"function",
"will",
"unzip",
"all",
"files",
"in",
"the",
"runroot",
"directory",
"and",
"subdirectories"
] |
python
|
train
|
rigetti/quantumflow
|
quantumflow/visualization.py
|
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/visualization.py#L47-L216
|
def circuit_to_latex(circ: Circuit,
qubits: Qubits = None,
document: bool = True) -> str:
"""
Create an image of a quantum circuit in LaTeX.
Can currently draw X, Y, Z, H, T, S, T_H, S_H, RX, RY, RZ, TX, TY, TZ,
TH, CNOT, CZ, SWAP, ISWAP, CCNOT, CSWAP, XX, YY, ZZ, CAN, P0 and P1 gates,
and the RESET operation.
Args:
circ: A quantum Circuit
qubits: Optional qubit list to specify qubit order
document: If false, just the qcircuit latex is returned. Else the
circuit image is wrapped in a standalone LaTeX document
ready for typesetting.
Returns:
A LaTeX string representation of the circuit.
Raises:
NotImplementedError: For unsupported gates.
Refs:
LaTeX Qcircuit package
(https://arxiv.org/pdf/quant-ph/0406003).
"""
if qubits is None:
qubits = circ.qubits
N = len(qubits)
qubit_idx = dict(zip(qubits, range(N)))
layers = _display_layers(circ, qubits)
layer_code = []
code = [r'\lstick{' + str(q) + r'}' for q in qubits]
layer_code.append(code)
def _two_qubit_gate(top, bot, label):
if bot-top == 1:
code_top = r'\multigate{1}{%s}' % label
code_bot = r'\ghost{%s}' % label
else:
code_top = r'\sgate{%s}{%s}' % (label, str(bot - top))
code_bot = r'\gate{%s}' % (label)
return code_top, code_bot
for layer in layers.elements:
code = [r'\qw'] * N
assert isinstance(layer, Circuit)
for gate in layer:
idx = [qubit_idx[q] for q in gate.qubits]
name = gate.name
if isinstance(gate, I):
pass
elif(len(idx) == 1) and name in ['X', 'Y', 'Z', 'H', 'T', 'S']:
code[idx[0]] = r'\gate{' + gate.name + '}'
elif isinstance(gate, S_H):
code[idx[0]] = r'\gate{S^\dag}'
elif isinstance(gate, T_H):
code[idx[0]] = r'\gate{T^\dag}'
elif isinstance(gate, RX):
theta = _latex_format(gate.params['theta'])
code[idx[0]] = r'\gate{R_x(%s)}' % theta
elif isinstance(gate, RY):
theta = _latex_format(gate.params['theta'])
code[idx[0]] = r'\gate{R_y(%s)}' % theta
elif isinstance(gate, RZ):
theta = _latex_format(gate.params['theta'])
code[idx[0]] = r'\gate{R_z(%s)}' % theta
elif isinstance(gate, TX):
t = _latex_format(gate.params['t'])
code[idx[0]] = r'\gate{X^{%s}}' % t
elif isinstance(gate, TY):
t = _latex_format(gate.params['t'])
code[idx[0]] = r'\gate{Y^{%s}}' % t
elif isinstance(gate, TZ):
t = _latex_format(gate.params['t'])
code[idx[0]] = r'\gate{Z^{%s}}' % t
elif isinstance(gate, TH):
t = _latex_format(gate.params['t'])
code[idx[0]] = r'\gate{H^{%s}}' % t
elif isinstance(gate, CNOT):
code[idx[0]] = r'\ctrl{' + str(idx[1] - idx[0]) + '}'
code[idx[1]] = r'\targ'
elif isinstance(gate, XX):
label = r'X\!X^{%s}' % _latex_format(gate.params['t'])
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, YY):
label = r'Y\!Y^{%s}' % _latex_format(gate.params['t'])
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, ZZ):
label = r'Z\!Z^{%s}' % _latex_format(gate.params['t'])
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, CPHASE):
theta = _latex_format(gate.params['theta'])
label = r'\text{CPHASE}({%s})' % theta
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, PSWAP):
theta = _latex_format(gate.params['theta'])
label = r'\text{PSWAP}({%s})' % theta
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, CZ):
code[idx[0]] = r'\ctrl{' + str(idx[1] - idx[0]) + '}'
code[idx[1]] = r'\ctrl{' + str(idx[0] - idx[1]) + '}'
elif isinstance(gate, SWAP):
code[idx[0]] = r'\qswap \qwx[' + str(idx[1] - idx[0]) + ']'
code[idx[1]] = r'\qswap'
elif isinstance(gate, CAN):
tx = _latex_format(gate.params['tx'])
ty = _latex_format(gate.params['ty'])
tz = _latex_format(gate.params['tz'])
label = r'{\text{CAN}(%s, %s, %s)}' % (tx, ty, tz)
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, ISWAP):
label = r'{ \text{iSWAP}}'
top = min(idx)
bot = max(idx)
code[top], code[bot] = _two_qubit_gate(top, bot, label)
elif isinstance(gate, CCNOT):
code[idx[0]] = r'\ctrl{' + str(idx[1]-idx[0]) + '}'
code[idx[1]] = r'\ctrl{' + str(idx[2]-idx[1]) + '}'
code[idx[2]] = r'\targ'
elif isinstance(gate, CSWAP):
code[idx[0]] = r'\ctrl{' + str(idx[1]-idx[0]) + '}'
code[idx[1]] = r'\qswap \qwx[' + str(idx[2] - idx[1]) + ']'
code[idx[2]] = r'\qswap'
elif isinstance(gate, P0):
code[idx[0]] = r'\push{\ket{0}\!\!\bra{0}} \qw'
elif isinstance(gate, P1):
code[idx[0]] = r'\push{\ket{1}\!\!\bra{1}} \qw'
elif isinstance(gate, Reset):
for i in idx:
code[i] = r'\push{\rule{0.1em}{0.5em}\, \ket{0}\,} \qw'
elif isinstance(gate, Measure):
code[idx[0]] = r'\meter'
else:
raise NotImplementedError(str(gate))
layer_code.append(code)
code = [r'\qw'] * N
layer_code.append(code)
latex_lines = [''] * N
for line, wire in enumerate(zip(*layer_code)):
latex = '& ' + ' & '.join(wire)
if line < N - 1: # Not last line
latex += r' \\'
latex_lines[line] = latex
latex_code = _QCIRCUIT % '\n'.join(latex_lines)
if document:
latex_code = _DOCUMENT_HEADER + latex_code + _DOCUMENT_FOOTER
return latex_code
|
[
"def",
"circuit_to_latex",
"(",
"circ",
":",
"Circuit",
",",
"qubits",
":",
"Qubits",
"=",
"None",
",",
"document",
":",
"bool",
"=",
"True",
")",
"->",
"str",
":",
"if",
"qubits",
"is",
"None",
":",
"qubits",
"=",
"circ",
".",
"qubits",
"N",
"=",
"len",
"(",
"qubits",
")",
"qubit_idx",
"=",
"dict",
"(",
"zip",
"(",
"qubits",
",",
"range",
"(",
"N",
")",
")",
")",
"layers",
"=",
"_display_layers",
"(",
"circ",
",",
"qubits",
")",
"layer_code",
"=",
"[",
"]",
"code",
"=",
"[",
"r'\\lstick{'",
"+",
"str",
"(",
"q",
")",
"+",
"r'}'",
"for",
"q",
"in",
"qubits",
"]",
"layer_code",
".",
"append",
"(",
"code",
")",
"def",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
":",
"if",
"bot",
"-",
"top",
"==",
"1",
":",
"code_top",
"=",
"r'\\multigate{1}{%s}'",
"%",
"label",
"code_bot",
"=",
"r'\\ghost{%s}'",
"%",
"label",
"else",
":",
"code_top",
"=",
"r'\\sgate{%s}{%s}'",
"%",
"(",
"label",
",",
"str",
"(",
"bot",
"-",
"top",
")",
")",
"code_bot",
"=",
"r'\\gate{%s}'",
"%",
"(",
"label",
")",
"return",
"code_top",
",",
"code_bot",
"for",
"layer",
"in",
"layers",
".",
"elements",
":",
"code",
"=",
"[",
"r'\\qw'",
"]",
"*",
"N",
"assert",
"isinstance",
"(",
"layer",
",",
"Circuit",
")",
"for",
"gate",
"in",
"layer",
":",
"idx",
"=",
"[",
"qubit_idx",
"[",
"q",
"]",
"for",
"q",
"in",
"gate",
".",
"qubits",
"]",
"name",
"=",
"gate",
".",
"name",
"if",
"isinstance",
"(",
"gate",
",",
"I",
")",
":",
"pass",
"elif",
"(",
"len",
"(",
"idx",
")",
"==",
"1",
")",
"and",
"name",
"in",
"[",
"'X'",
",",
"'Y'",
",",
"'Z'",
",",
"'H'",
",",
"'T'",
",",
"'S'",
"]",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{'",
"+",
"gate",
".",
"name",
"+",
"'}'",
"elif",
"isinstance",
"(",
"gate",
",",
"S_H",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{S^\\dag}'",
"elif",
"isinstance",
"(",
"gate",
",",
"T_H",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{T^\\dag}'",
"elif",
"isinstance",
"(",
"gate",
",",
"RX",
")",
":",
"theta",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'theta'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{R_x(%s)}'",
"%",
"theta",
"elif",
"isinstance",
"(",
"gate",
",",
"RY",
")",
":",
"theta",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'theta'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{R_y(%s)}'",
"%",
"theta",
"elif",
"isinstance",
"(",
"gate",
",",
"RZ",
")",
":",
"theta",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'theta'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{R_z(%s)}'",
"%",
"theta",
"elif",
"isinstance",
"(",
"gate",
",",
"TX",
")",
":",
"t",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{X^{%s}}'",
"%",
"t",
"elif",
"isinstance",
"(",
"gate",
",",
"TY",
")",
":",
"t",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{Y^{%s}}'",
"%",
"t",
"elif",
"isinstance",
"(",
"gate",
",",
"TZ",
")",
":",
"t",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{Z^{%s}}'",
"%",
"t",
"elif",
"isinstance",
"(",
"gate",
",",
"TH",
")",
":",
"t",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\gate{H^{%s}}'",
"%",
"t",
"elif",
"isinstance",
"(",
"gate",
",",
"CNOT",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"1",
"]",
"-",
"idx",
"[",
"0",
"]",
")",
"+",
"'}'",
"code",
"[",
"idx",
"[",
"1",
"]",
"]",
"=",
"r'\\targ'",
"elif",
"isinstance",
"(",
"gate",
",",
"XX",
")",
":",
"label",
"=",
"r'X\\!X^{%s}'",
"%",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"YY",
")",
":",
"label",
"=",
"r'Y\\!Y^{%s}'",
"%",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"ZZ",
")",
":",
"label",
"=",
"r'Z\\!Z^{%s}'",
"%",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'t'",
"]",
")",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"CPHASE",
")",
":",
"theta",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'theta'",
"]",
")",
"label",
"=",
"r'\\text{CPHASE}({%s})'",
"%",
"theta",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"PSWAP",
")",
":",
"theta",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'theta'",
"]",
")",
"label",
"=",
"r'\\text{PSWAP}({%s})'",
"%",
"theta",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"CZ",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"1",
"]",
"-",
"idx",
"[",
"0",
"]",
")",
"+",
"'}'",
"code",
"[",
"idx",
"[",
"1",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"0",
"]",
"-",
"idx",
"[",
"1",
"]",
")",
"+",
"'}'",
"elif",
"isinstance",
"(",
"gate",
",",
"SWAP",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\qswap \\qwx['",
"+",
"str",
"(",
"idx",
"[",
"1",
"]",
"-",
"idx",
"[",
"0",
"]",
")",
"+",
"']'",
"code",
"[",
"idx",
"[",
"1",
"]",
"]",
"=",
"r'\\qswap'",
"elif",
"isinstance",
"(",
"gate",
",",
"CAN",
")",
":",
"tx",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'tx'",
"]",
")",
"ty",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'ty'",
"]",
")",
"tz",
"=",
"_latex_format",
"(",
"gate",
".",
"params",
"[",
"'tz'",
"]",
")",
"label",
"=",
"r'{\\text{CAN}(%s, %s, %s)}'",
"%",
"(",
"tx",
",",
"ty",
",",
"tz",
")",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"ISWAP",
")",
":",
"label",
"=",
"r'{ \\text{iSWAP}}'",
"top",
"=",
"min",
"(",
"idx",
")",
"bot",
"=",
"max",
"(",
"idx",
")",
"code",
"[",
"top",
"]",
",",
"code",
"[",
"bot",
"]",
"=",
"_two_qubit_gate",
"(",
"top",
",",
"bot",
",",
"label",
")",
"elif",
"isinstance",
"(",
"gate",
",",
"CCNOT",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"1",
"]",
"-",
"idx",
"[",
"0",
"]",
")",
"+",
"'}'",
"code",
"[",
"idx",
"[",
"1",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"2",
"]",
"-",
"idx",
"[",
"1",
"]",
")",
"+",
"'}'",
"code",
"[",
"idx",
"[",
"2",
"]",
"]",
"=",
"r'\\targ'",
"elif",
"isinstance",
"(",
"gate",
",",
"CSWAP",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\ctrl{'",
"+",
"str",
"(",
"idx",
"[",
"1",
"]",
"-",
"idx",
"[",
"0",
"]",
")",
"+",
"'}'",
"code",
"[",
"idx",
"[",
"1",
"]",
"]",
"=",
"r'\\qswap \\qwx['",
"+",
"str",
"(",
"idx",
"[",
"2",
"]",
"-",
"idx",
"[",
"1",
"]",
")",
"+",
"']'",
"code",
"[",
"idx",
"[",
"2",
"]",
"]",
"=",
"r'\\qswap'",
"elif",
"isinstance",
"(",
"gate",
",",
"P0",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\push{\\ket{0}\\!\\!\\bra{0}} \\qw'",
"elif",
"isinstance",
"(",
"gate",
",",
"P1",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\push{\\ket{1}\\!\\!\\bra{1}} \\qw'",
"elif",
"isinstance",
"(",
"gate",
",",
"Reset",
")",
":",
"for",
"i",
"in",
"idx",
":",
"code",
"[",
"i",
"]",
"=",
"r'\\push{\\rule{0.1em}{0.5em}\\, \\ket{0}\\,} \\qw'",
"elif",
"isinstance",
"(",
"gate",
",",
"Measure",
")",
":",
"code",
"[",
"idx",
"[",
"0",
"]",
"]",
"=",
"r'\\meter'",
"else",
":",
"raise",
"NotImplementedError",
"(",
"str",
"(",
"gate",
")",
")",
"layer_code",
".",
"append",
"(",
"code",
")",
"code",
"=",
"[",
"r'\\qw'",
"]",
"*",
"N",
"layer_code",
".",
"append",
"(",
"code",
")",
"latex_lines",
"=",
"[",
"''",
"]",
"*",
"N",
"for",
"line",
",",
"wire",
"in",
"enumerate",
"(",
"zip",
"(",
"*",
"layer_code",
")",
")",
":",
"latex",
"=",
"'& '",
"+",
"' & '",
".",
"join",
"(",
"wire",
")",
"if",
"line",
"<",
"N",
"-",
"1",
":",
"# Not last line",
"latex",
"+=",
"r' \\\\'",
"latex_lines",
"[",
"line",
"]",
"=",
"latex",
"latex_code",
"=",
"_QCIRCUIT",
"%",
"'\\n'",
".",
"join",
"(",
"latex_lines",
")",
"if",
"document",
":",
"latex_code",
"=",
"_DOCUMENT_HEADER",
"+",
"latex_code",
"+",
"_DOCUMENT_FOOTER",
"return",
"latex_code"
] |
Create an image of a quantum circuit in LaTeX.
Can currently draw X, Y, Z, H, T, S, T_H, S_H, RX, RY, RZ, TX, TY, TZ,
TH, CNOT, CZ, SWAP, ISWAP, CCNOT, CSWAP, XX, YY, ZZ, CAN, P0 and P1 gates,
and the RESET operation.
Args:
circ: A quantum Circuit
qubits: Optional qubit list to specify qubit order
document: If false, just the qcircuit latex is returned. Else the
circuit image is wrapped in a standalone LaTeX document
ready for typesetting.
Returns:
A LaTeX string representation of the circuit.
Raises:
NotImplementedError: For unsupported gates.
Refs:
LaTeX Qcircuit package
(https://arxiv.org/pdf/quant-ph/0406003).
|
[
"Create",
"an",
"image",
"of",
"a",
"quantum",
"circuit",
"in",
"LaTeX",
"."
] |
python
|
train
|
juju/charm-helpers
|
charmhelpers/contrib/amulet/utils.py
|
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L703-L728
|
def file_contents_safe(self, sentry_unit, file_name,
max_wait=60, fatal=False):
"""Get file contents from a sentry unit. Wrap amulet file_contents
with retry logic to address races where a file checks as existing,
but no longer exists by the time file_contents is called.
Return None if file not found. Optionally raise if fatal is True."""
unit_name = sentry_unit.info['unit_name']
file_contents = False
tries = 0
while not file_contents and tries < (max_wait / 4):
try:
file_contents = sentry_unit.file_contents(file_name)
except IOError:
self.log.debug('Attempt {} to open file {} from {} '
'failed'.format(tries, file_name,
unit_name))
time.sleep(4)
tries += 1
if file_contents:
return file_contents
elif not fatal:
return None
elif fatal:
msg = 'Failed to get file contents from unit.'
amulet.raise_status(amulet.FAIL, msg)
|
[
"def",
"file_contents_safe",
"(",
"self",
",",
"sentry_unit",
",",
"file_name",
",",
"max_wait",
"=",
"60",
",",
"fatal",
"=",
"False",
")",
":",
"unit_name",
"=",
"sentry_unit",
".",
"info",
"[",
"'unit_name'",
"]",
"file_contents",
"=",
"False",
"tries",
"=",
"0",
"while",
"not",
"file_contents",
"and",
"tries",
"<",
"(",
"max_wait",
"/",
"4",
")",
":",
"try",
":",
"file_contents",
"=",
"sentry_unit",
".",
"file_contents",
"(",
"file_name",
")",
"except",
"IOError",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Attempt {} to open file {} from {} '",
"'failed'",
".",
"format",
"(",
"tries",
",",
"file_name",
",",
"unit_name",
")",
")",
"time",
".",
"sleep",
"(",
"4",
")",
"tries",
"+=",
"1",
"if",
"file_contents",
":",
"return",
"file_contents",
"elif",
"not",
"fatal",
":",
"return",
"None",
"elif",
"fatal",
":",
"msg",
"=",
"'Failed to get file contents from unit.'",
"amulet",
".",
"raise_status",
"(",
"amulet",
".",
"FAIL",
",",
"msg",
")"
] |
Get file contents from a sentry unit. Wrap amulet file_contents
with retry logic to address races where a file checks as existing,
but no longer exists by the time file_contents is called.
Return None if file not found. Optionally raise if fatal is True.
|
[
"Get",
"file",
"contents",
"from",
"a",
"sentry",
"unit",
".",
"Wrap",
"amulet",
"file_contents",
"with",
"retry",
"logic",
"to",
"address",
"races",
"where",
"a",
"file",
"checks",
"as",
"existing",
"but",
"no",
"longer",
"exists",
"by",
"the",
"time",
"file_contents",
"is",
"called",
".",
"Return",
"None",
"if",
"file",
"not",
"found",
".",
"Optionally",
"raise",
"if",
"fatal",
"is",
"True",
"."
] |
python
|
train
|
FactoryBoy/factory_boy
|
factory/django.py
|
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/django.py#L54-L66
|
def _lazy_load_get_model():
"""Lazy loading of get_model.
get_model loads django.conf.settings, which may fail if
the settings haven't been configured yet.
"""
if django is None:
def _get_model(app, model):
raise import_failure
else:
from django import apps as django_apps
_get_model = django_apps.apps.get_model
_LAZY_LOADS['get_model'] = _get_model
|
[
"def",
"_lazy_load_get_model",
"(",
")",
":",
"if",
"django",
"is",
"None",
":",
"def",
"_get_model",
"(",
"app",
",",
"model",
")",
":",
"raise",
"import_failure",
"else",
":",
"from",
"django",
"import",
"apps",
"as",
"django_apps",
"_get_model",
"=",
"django_apps",
".",
"apps",
".",
"get_model",
"_LAZY_LOADS",
"[",
"'get_model'",
"]",
"=",
"_get_model"
] |
Lazy loading of get_model.
get_model loads django.conf.settings, which may fail if
the settings haven't been configured yet.
|
[
"Lazy",
"loading",
"of",
"get_model",
"."
] |
python
|
train
|
carta/ldap_tools
|
src/ldap_tools/key.py
|
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L66-L75
|
def install(self): # pragma: no cover
"""Install/download ssh keys from LDAP for consumption by SSH."""
keys = self.get_keys_from_ldap()
for user, ssh_keys in keys.items():
user_dir = API.__authorized_keys_path(user)
if not os.path.isdir(user_dir):
os.makedirs(user_dir)
authorized_keys_file = os.path.join(user_dir, 'authorized_keys')
with open(authorized_keys_file, 'w') as FILE:
print("\n".join([k.decode() for k in ssh_keys]), file=FILE)
|
[
"def",
"install",
"(",
"self",
")",
":",
"# pragma: no cover",
"keys",
"=",
"self",
".",
"get_keys_from_ldap",
"(",
")",
"for",
"user",
",",
"ssh_keys",
"in",
"keys",
".",
"items",
"(",
")",
":",
"user_dir",
"=",
"API",
".",
"__authorized_keys_path",
"(",
"user",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"user_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"user_dir",
")",
"authorized_keys_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"user_dir",
",",
"'authorized_keys'",
")",
"with",
"open",
"(",
"authorized_keys_file",
",",
"'w'",
")",
"as",
"FILE",
":",
"print",
"(",
"\"\\n\"",
".",
"join",
"(",
"[",
"k",
".",
"decode",
"(",
")",
"for",
"k",
"in",
"ssh_keys",
"]",
")",
",",
"file",
"=",
"FILE",
")"
] |
Install/download ssh keys from LDAP for consumption by SSH.
|
[
"Install",
"/",
"download",
"ssh",
"keys",
"from",
"LDAP",
"for",
"consumption",
"by",
"SSH",
"."
] |
python
|
train
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/build/Factory.py
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/build/Factory.py#L37-L50
|
def register(self, locator, factory):
"""
Registers a component using a factory method.
:param locator: a locator to identify component to be created.
:param factory: a factory function that receives a locator and returns a created component.
"""
if locator == None:
raise Exception("Locator cannot be null")
if factory == None:
raise Exception("Factory cannot be null")
self._registrations.append(Registration(locator, factory))
|
[
"def",
"register",
"(",
"self",
",",
"locator",
",",
"factory",
")",
":",
"if",
"locator",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Locator cannot be null\"",
")",
"if",
"factory",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Factory cannot be null\"",
")",
"self",
".",
"_registrations",
".",
"append",
"(",
"Registration",
"(",
"locator",
",",
"factory",
")",
")"
] |
Registers a component using a factory method.
:param locator: a locator to identify component to be created.
:param factory: a factory function that receives a locator and returns a created component.
|
[
"Registers",
"a",
"component",
"using",
"a",
"factory",
"method",
"."
] |
python
|
train
|
lowandrew/OLCTools
|
metagenomefilter/automateCLARK.py
|
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/automateCLARK.py#L62-L78
|
def clean_sequences(self):
"""Removes reads/contigs that contain plasmids, and masks phage sequences."""
logging.info('Removing plasmids and masking phages')
plasmid_db = os.path.join(self.reffilepath, 'plasmidfinder', 'plasmid_database.fa')
phage_db = os.path.join(self.reffilepath, 'prophages', 'combinedtargets.tfa')
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
plasmid_removal = 'bbduk.sh ref={} in={} out={} overwrite'\
.format(plasmid_db, sample.general.combined, sample.general.combined.replace('.f', '_noplasmid.f'))
subprocess.call(plasmid_removal, shell=True, stdout=self.devnull, stderr=self.devnull)
phage_masking = 'bbduk.sh ref={} in={} out={} kmask=N overwrite'\
.format(phage_db, sample.general.combined.replace('.f', '_noplasmid.f'),
sample.general.combined.replace('.f', '_clean.f'))
subprocess.call(phage_masking, shell=True, stdout=self.devnull, stderr=self.devnull)
os.remove(sample.general.combined)
os.rename(sample.general.combined.replace('.f', '_clean.f'), sample.general.combined)
os.remove(sample.general.combined.replace('.f', '_noplasmid.f'))
|
[
"def",
"clean_sequences",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Removing plasmids and masking phages'",
")",
"plasmid_db",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reffilepath",
",",
"'plasmidfinder'",
",",
"'plasmid_database.fa'",
")",
"phage_db",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reffilepath",
",",
"'prophages'",
",",
"'combinedtargets.tfa'",
")",
"with",
"progressbar",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
"as",
"bar",
":",
"for",
"sample",
"in",
"bar",
":",
"plasmid_removal",
"=",
"'bbduk.sh ref={} in={} out={} overwrite'",
".",
"format",
"(",
"plasmid_db",
",",
"sample",
".",
"general",
".",
"combined",
",",
"sample",
".",
"general",
".",
"combined",
".",
"replace",
"(",
"'.f'",
",",
"'_noplasmid.f'",
")",
")",
"subprocess",
".",
"call",
"(",
"plasmid_removal",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"self",
".",
"devnull",
",",
"stderr",
"=",
"self",
".",
"devnull",
")",
"phage_masking",
"=",
"'bbduk.sh ref={} in={} out={} kmask=N overwrite'",
".",
"format",
"(",
"phage_db",
",",
"sample",
".",
"general",
".",
"combined",
".",
"replace",
"(",
"'.f'",
",",
"'_noplasmid.f'",
")",
",",
"sample",
".",
"general",
".",
"combined",
".",
"replace",
"(",
"'.f'",
",",
"'_clean.f'",
")",
")",
"subprocess",
".",
"call",
"(",
"phage_masking",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"self",
".",
"devnull",
",",
"stderr",
"=",
"self",
".",
"devnull",
")",
"os",
".",
"remove",
"(",
"sample",
".",
"general",
".",
"combined",
")",
"os",
".",
"rename",
"(",
"sample",
".",
"general",
".",
"combined",
".",
"replace",
"(",
"'.f'",
",",
"'_clean.f'",
")",
",",
"sample",
".",
"general",
".",
"combined",
")",
"os",
".",
"remove",
"(",
"sample",
".",
"general",
".",
"combined",
".",
"replace",
"(",
"'.f'",
",",
"'_noplasmid.f'",
")",
")"
] |
Removes reads/contigs that contain plasmids, and masks phage sequences.
|
[
"Removes",
"reads",
"/",
"contigs",
"that",
"contain",
"plasmids",
"and",
"masks",
"phage",
"sequences",
"."
] |
python
|
train
|
codelv/enaml-native
|
src/enamlnative/widgets/web_view.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/widgets/web_view.py#L105-L115
|
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'event':
name = 'do_'+change['name']
if hasattr(self.proxy, name):
handler = getattr(self.proxy, name)
handler()
else:
super(WebView, self)._update_proxy(change)
|
[
"def",
"_update_proxy",
"(",
"self",
",",
"change",
")",
":",
"if",
"change",
"[",
"'type'",
"]",
"==",
"'event'",
":",
"name",
"=",
"'do_'",
"+",
"change",
"[",
"'name'",
"]",
"if",
"hasattr",
"(",
"self",
".",
"proxy",
",",
"name",
")",
":",
"handler",
"=",
"getattr",
"(",
"self",
".",
"proxy",
",",
"name",
")",
"handler",
"(",
")",
"else",
":",
"super",
"(",
"WebView",
",",
"self",
")",
".",
"_update_proxy",
"(",
"change",
")"
] |
An observer which sends the state change to the proxy.
|
[
"An",
"observer",
"which",
"sends",
"the",
"state",
"change",
"to",
"the",
"proxy",
"."
] |
python
|
train
|
tyarkoni/pliers
|
pliers/graph.py
|
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/graph.py#L237-L298
|
def draw(self, filename, color=True):
''' Render a plot of the graph via pygraphviz.
Args:
filename (str): Path to save the generated image to.
color (bool): If True, will color graph nodes based on their type,
otherwise will draw a black-and-white graph.
'''
verify_dependencies(['pgv'])
if not hasattr(self, '_results'):
raise RuntimeError("Graph cannot be drawn before it is executed. "
"Try calling run() first.")
g = pgv.AGraph(directed=True)
g.node_attr['colorscheme'] = 'set312'
for elem in self._results:
if not hasattr(elem, 'history'):
continue
log = elem.history
while log:
# Configure nodes
source_from = log.parent[6] if log.parent else ''
s_node = hash((source_from, log[2]))
s_color = stim_list.index(log[2])
s_color = s_color % 12 + 1
t_node = hash((log[6], log[7]))
t_style = 'filled,' if color else ''
t_style += 'dotted' if log.implicit else ''
if log[6].endswith('Extractor'):
t_color = '#0082c8'
elif log[6].endswith('Filter'):
t_color = '#e6194b'
else:
t_color = '#3cb44b'
r_node = hash((log[6], log[5]))
r_color = stim_list.index(log[5])
r_color = r_color % 12 + 1
# Add nodes
if color:
g.add_node(s_node, label=log[2], shape='ellipse',
style='filled', fillcolor=s_color)
g.add_node(t_node, label=log[6], shape='box',
style=t_style, fillcolor=t_color)
g.add_node(r_node, label=log[5], shape='ellipse',
style='filled', fillcolor=r_color)
else:
g.add_node(s_node, label=log[2], shape='ellipse')
g.add_node(t_node, label=log[6], shape='box',
style=t_style)
g.add_node(r_node, label=log[5], shape='ellipse')
# Add edges
g.add_edge(s_node, t_node, style=t_style)
g.add_edge(t_node, r_node, style=t_style)
log = log.parent
g.draw(filename, prog='dot')
|
[
"def",
"draw",
"(",
"self",
",",
"filename",
",",
"color",
"=",
"True",
")",
":",
"verify_dependencies",
"(",
"[",
"'pgv'",
"]",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_results'",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Graph cannot be drawn before it is executed. \"",
"\"Try calling run() first.\"",
")",
"g",
"=",
"pgv",
".",
"AGraph",
"(",
"directed",
"=",
"True",
")",
"g",
".",
"node_attr",
"[",
"'colorscheme'",
"]",
"=",
"'set312'",
"for",
"elem",
"in",
"self",
".",
"_results",
":",
"if",
"not",
"hasattr",
"(",
"elem",
",",
"'history'",
")",
":",
"continue",
"log",
"=",
"elem",
".",
"history",
"while",
"log",
":",
"# Configure nodes",
"source_from",
"=",
"log",
".",
"parent",
"[",
"6",
"]",
"if",
"log",
".",
"parent",
"else",
"''",
"s_node",
"=",
"hash",
"(",
"(",
"source_from",
",",
"log",
"[",
"2",
"]",
")",
")",
"s_color",
"=",
"stim_list",
".",
"index",
"(",
"log",
"[",
"2",
"]",
")",
"s_color",
"=",
"s_color",
"%",
"12",
"+",
"1",
"t_node",
"=",
"hash",
"(",
"(",
"log",
"[",
"6",
"]",
",",
"log",
"[",
"7",
"]",
")",
")",
"t_style",
"=",
"'filled,'",
"if",
"color",
"else",
"''",
"t_style",
"+=",
"'dotted'",
"if",
"log",
".",
"implicit",
"else",
"''",
"if",
"log",
"[",
"6",
"]",
".",
"endswith",
"(",
"'Extractor'",
")",
":",
"t_color",
"=",
"'#0082c8'",
"elif",
"log",
"[",
"6",
"]",
".",
"endswith",
"(",
"'Filter'",
")",
":",
"t_color",
"=",
"'#e6194b'",
"else",
":",
"t_color",
"=",
"'#3cb44b'",
"r_node",
"=",
"hash",
"(",
"(",
"log",
"[",
"6",
"]",
",",
"log",
"[",
"5",
"]",
")",
")",
"r_color",
"=",
"stim_list",
".",
"index",
"(",
"log",
"[",
"5",
"]",
")",
"r_color",
"=",
"r_color",
"%",
"12",
"+",
"1",
"# Add nodes",
"if",
"color",
":",
"g",
".",
"add_node",
"(",
"s_node",
",",
"label",
"=",
"log",
"[",
"2",
"]",
",",
"shape",
"=",
"'ellipse'",
",",
"style",
"=",
"'filled'",
",",
"fillcolor",
"=",
"s_color",
")",
"g",
".",
"add_node",
"(",
"t_node",
",",
"label",
"=",
"log",
"[",
"6",
"]",
",",
"shape",
"=",
"'box'",
",",
"style",
"=",
"t_style",
",",
"fillcolor",
"=",
"t_color",
")",
"g",
".",
"add_node",
"(",
"r_node",
",",
"label",
"=",
"log",
"[",
"5",
"]",
",",
"shape",
"=",
"'ellipse'",
",",
"style",
"=",
"'filled'",
",",
"fillcolor",
"=",
"r_color",
")",
"else",
":",
"g",
".",
"add_node",
"(",
"s_node",
",",
"label",
"=",
"log",
"[",
"2",
"]",
",",
"shape",
"=",
"'ellipse'",
")",
"g",
".",
"add_node",
"(",
"t_node",
",",
"label",
"=",
"log",
"[",
"6",
"]",
",",
"shape",
"=",
"'box'",
",",
"style",
"=",
"t_style",
")",
"g",
".",
"add_node",
"(",
"r_node",
",",
"label",
"=",
"log",
"[",
"5",
"]",
",",
"shape",
"=",
"'ellipse'",
")",
"# Add edges",
"g",
".",
"add_edge",
"(",
"s_node",
",",
"t_node",
",",
"style",
"=",
"t_style",
")",
"g",
".",
"add_edge",
"(",
"t_node",
",",
"r_node",
",",
"style",
"=",
"t_style",
")",
"log",
"=",
"log",
".",
"parent",
"g",
".",
"draw",
"(",
"filename",
",",
"prog",
"=",
"'dot'",
")"
] |
Render a plot of the graph via pygraphviz.
Args:
filename (str): Path to save the generated image to.
color (bool): If True, will color graph nodes based on their type,
otherwise will draw a black-and-white graph.
|
[
"Render",
"a",
"plot",
"of",
"the",
"graph",
"via",
"pygraphviz",
"."
] |
python
|
train
|
ForensicArtifacts/artifacts
|
tools/stats.py
|
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/tools/stats.py#L81-L112
|
def BuildStats(self):
"""Builds the statistics."""
artifact_reader = reader.YamlArtifactsReader()
self.label_counts = {}
self.os_counts = {}
self.path_count = 0
self.reg_key_count = 0
self.source_type_counts = {}
self.total_count = 0
for artifact_definition in artifact_reader.ReadDirectory('data'):
if hasattr(artifact_definition, 'labels'):
for label in artifact_definition.labels:
self.label_counts[label] = self.label_counts.get(label, 0) + 1
for source in artifact_definition.sources:
self.total_count += 1
source_type = source.type_indicator
self.source_type_counts[source_type] = self.source_type_counts.get(
source_type, 0) + 1
if source_type == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
self.reg_key_count += len(source.keys)
elif source_type == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
self.reg_key_count += len(source.key_value_pairs)
elif source_type in (definitions.TYPE_INDICATOR_FILE,
definitions.TYPE_INDICATOR_DIRECTORY):
self.path_count += len(source.paths)
os_list = source.supported_os
for os_str in os_list:
self.os_counts[os_str] = self.os_counts.get(os_str, 0) + 1
|
[
"def",
"BuildStats",
"(",
"self",
")",
":",
"artifact_reader",
"=",
"reader",
".",
"YamlArtifactsReader",
"(",
")",
"self",
".",
"label_counts",
"=",
"{",
"}",
"self",
".",
"os_counts",
"=",
"{",
"}",
"self",
".",
"path_count",
"=",
"0",
"self",
".",
"reg_key_count",
"=",
"0",
"self",
".",
"source_type_counts",
"=",
"{",
"}",
"self",
".",
"total_count",
"=",
"0",
"for",
"artifact_definition",
"in",
"artifact_reader",
".",
"ReadDirectory",
"(",
"'data'",
")",
":",
"if",
"hasattr",
"(",
"artifact_definition",
",",
"'labels'",
")",
":",
"for",
"label",
"in",
"artifact_definition",
".",
"labels",
":",
"self",
".",
"label_counts",
"[",
"label",
"]",
"=",
"self",
".",
"label_counts",
".",
"get",
"(",
"label",
",",
"0",
")",
"+",
"1",
"for",
"source",
"in",
"artifact_definition",
".",
"sources",
":",
"self",
".",
"total_count",
"+=",
"1",
"source_type",
"=",
"source",
".",
"type_indicator",
"self",
".",
"source_type_counts",
"[",
"source_type",
"]",
"=",
"self",
".",
"source_type_counts",
".",
"get",
"(",
"source_type",
",",
"0",
")",
"+",
"1",
"if",
"source_type",
"==",
"definitions",
".",
"TYPE_INDICATOR_WINDOWS_REGISTRY_KEY",
":",
"self",
".",
"reg_key_count",
"+=",
"len",
"(",
"source",
".",
"keys",
")",
"elif",
"source_type",
"==",
"definitions",
".",
"TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE",
":",
"self",
".",
"reg_key_count",
"+=",
"len",
"(",
"source",
".",
"key_value_pairs",
")",
"elif",
"source_type",
"in",
"(",
"definitions",
".",
"TYPE_INDICATOR_FILE",
",",
"definitions",
".",
"TYPE_INDICATOR_DIRECTORY",
")",
":",
"self",
".",
"path_count",
"+=",
"len",
"(",
"source",
".",
"paths",
")",
"os_list",
"=",
"source",
".",
"supported_os",
"for",
"os_str",
"in",
"os_list",
":",
"self",
".",
"os_counts",
"[",
"os_str",
"]",
"=",
"self",
".",
"os_counts",
".",
"get",
"(",
"os_str",
",",
"0",
")",
"+",
"1"
] |
Builds the statistics.
|
[
"Builds",
"the",
"statistics",
"."
] |
python
|
train
|
pypa/pipenv
|
pipenv/vendor/pathlib2/__init__.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1138-L1161
|
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
|
[
"def",
"match",
"(",
"self",
",",
"path_pattern",
")",
":",
"cf",
"=",
"self",
".",
"_flavour",
".",
"casefold",
"path_pattern",
"=",
"cf",
"(",
"path_pattern",
")",
"drv",
",",
"root",
",",
"pat_parts",
"=",
"self",
".",
"_flavour",
".",
"parse_parts",
"(",
"(",
"path_pattern",
",",
")",
")",
"if",
"not",
"pat_parts",
":",
"raise",
"ValueError",
"(",
"\"empty pattern\"",
")",
"if",
"drv",
"and",
"drv",
"!=",
"cf",
"(",
"self",
".",
"_drv",
")",
":",
"return",
"False",
"if",
"root",
"and",
"root",
"!=",
"cf",
"(",
"self",
".",
"_root",
")",
":",
"return",
"False",
"parts",
"=",
"self",
".",
"_cparts",
"if",
"drv",
"or",
"root",
":",
"if",
"len",
"(",
"pat_parts",
")",
"!=",
"len",
"(",
"parts",
")",
":",
"return",
"False",
"pat_parts",
"=",
"pat_parts",
"[",
"1",
":",
"]",
"elif",
"len",
"(",
"pat_parts",
")",
">",
"len",
"(",
"parts",
")",
":",
"return",
"False",
"for",
"part",
",",
"pat",
"in",
"zip",
"(",
"reversed",
"(",
"parts",
")",
",",
"reversed",
"(",
"pat_parts",
")",
")",
":",
"if",
"not",
"fnmatch",
".",
"fnmatchcase",
"(",
"part",
",",
"pat",
")",
":",
"return",
"False",
"return",
"True"
] |
Return True if this path matches the given pattern.
|
[
"Return",
"True",
"if",
"this",
"path",
"matches",
"the",
"given",
"pattern",
"."
] |
python
|
train
|
pyviz/holoviews
|
holoviews/core/dimension.py
|
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L1444-L1456
|
def regroup(self, group):
"""Deprecated method to apply new group to items.
Equivalent functionality possible using:
ViewableTree(tree.relabel(group='Group').values())
"""
if util.config.future_deprecations:
self.param.warning('%s.regroup is deprecated, use relabel '
'method with a group argument instead.'
% type(self).__name__)
new_items = [el.relabel(group=group) for el in self.data.values()]
return reduce(lambda x,y: x+y, new_items)
|
[
"def",
"regroup",
"(",
"self",
",",
"group",
")",
":",
"if",
"util",
".",
"config",
".",
"future_deprecations",
":",
"self",
".",
"param",
".",
"warning",
"(",
"'%s.regroup is deprecated, use relabel '",
"'method with a group argument instead.'",
"%",
"type",
"(",
"self",
")",
".",
"__name__",
")",
"new_items",
"=",
"[",
"el",
".",
"relabel",
"(",
"group",
"=",
"group",
")",
"for",
"el",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
"]",
"return",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"+",
"y",
",",
"new_items",
")"
] |
Deprecated method to apply new group to items.
Equivalent functionality possible using:
ViewableTree(tree.relabel(group='Group').values())
|
[
"Deprecated",
"method",
"to",
"apply",
"new",
"group",
"to",
"items",
"."
] |
python
|
train
|
textX/textX
|
examples/StateMachine/state_machine.py
|
https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/examples/StateMachine/state_machine.py#L46-L62
|
def interpret(self):
"""
Main interpreter loop.
"""
self.print_menu()
while True:
try:
event = input()
if event == 'q':
return
event = int(event)
event = self.model.events[event-1]
except Exception:
print('Invalid input')
self.event(event)
self.print_menu()
|
[
"def",
"interpret",
"(",
"self",
")",
":",
"self",
".",
"print_menu",
"(",
")",
"while",
"True",
":",
"try",
":",
"event",
"=",
"input",
"(",
")",
"if",
"event",
"==",
"'q'",
":",
"return",
"event",
"=",
"int",
"(",
"event",
")",
"event",
"=",
"self",
".",
"model",
".",
"events",
"[",
"event",
"-",
"1",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Invalid input'",
")",
"self",
".",
"event",
"(",
"event",
")",
"self",
".",
"print_menu",
"(",
")"
] |
Main interpreter loop.
|
[
"Main",
"interpreter",
"loop",
"."
] |
python
|
train
|
orbingol/NURBS-Python
|
geomdl/_tessellate.py
|
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/_tessellate.py#L179-L214
|
def make_quad_mesh(points, size_u, size_v):
""" Generates a mesh of quadrilateral elements.
:param points: list of points
:type points: list, tuple
:param size_u: number of points on the u-direction (column)
:type size_u: int
:param size_v: number of points on the v-direction (row)
:type size_v: int
:return: a tuple containing lists of vertices and quads
:rtype: tuple
"""
# Numbering
vertex_idx = 0
quad_idx = 0
# Generate vertices
vertices = []
for pt in points:
vrt = Vertex(*pt, id=vertex_idx)
vertices.append(vrt)
vertex_idx += 1
# Generate quads
quads = []
for i in range(0, size_u - 1):
for j in range(0, size_v - 1):
v1 = vertices[j + (size_v * i)]
v2 = vertices[j + (size_v * (i + 1))]
v3 = vertices[j + 1 + (size_v * (i + 1))]
v4 = vertices[j + 1 + (size_v * i)]
qd = Quad(v1, v2, v3, v4, id=quad_idx)
quads.append(qd)
quad_idx += 1
return vertices, quads
|
[
"def",
"make_quad_mesh",
"(",
"points",
",",
"size_u",
",",
"size_v",
")",
":",
"# Numbering",
"vertex_idx",
"=",
"0",
"quad_idx",
"=",
"0",
"# Generate vertices",
"vertices",
"=",
"[",
"]",
"for",
"pt",
"in",
"points",
":",
"vrt",
"=",
"Vertex",
"(",
"*",
"pt",
",",
"id",
"=",
"vertex_idx",
")",
"vertices",
".",
"append",
"(",
"vrt",
")",
"vertex_idx",
"+=",
"1",
"# Generate quads",
"quads",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"size_u",
"-",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"size_v",
"-",
"1",
")",
":",
"v1",
"=",
"vertices",
"[",
"j",
"+",
"(",
"size_v",
"*",
"i",
")",
"]",
"v2",
"=",
"vertices",
"[",
"j",
"+",
"(",
"size_v",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
"v3",
"=",
"vertices",
"[",
"j",
"+",
"1",
"+",
"(",
"size_v",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
"v4",
"=",
"vertices",
"[",
"j",
"+",
"1",
"+",
"(",
"size_v",
"*",
"i",
")",
"]",
"qd",
"=",
"Quad",
"(",
"v1",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"id",
"=",
"quad_idx",
")",
"quads",
".",
"append",
"(",
"qd",
")",
"quad_idx",
"+=",
"1",
"return",
"vertices",
",",
"quads"
] |
Generates a mesh of quadrilateral elements.
:param points: list of points
:type points: list, tuple
:param size_u: number of points on the u-direction (column)
:type size_u: int
:param size_v: number of points on the v-direction (row)
:type size_v: int
:return: a tuple containing lists of vertices and quads
:rtype: tuple
|
[
"Generates",
"a",
"mesh",
"of",
"quadrilateral",
"elements",
"."
] |
python
|
train
|
annoviko/pyclustering
|
pyclustering/utils/__init__.py
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/utils/__init__.py#L1202-L1213
|
def list_math_subtraction(a, b):
"""!
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists.
"""
return [a[i] - b[i] for i in range(len(a))];
|
[
"def",
"list_math_subtraction",
"(",
"a",
",",
"b",
")",
":",
"return",
"[",
"a",
"[",
"i",
"]",
"-",
"b",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
")",
")",
"]"
] |
!
@brief Calculates subtraction of two lists.
@details Each element from list 'a' is subtracted by element from list 'b' accordingly.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): List of elements that supports mathematical subtraction.
@return (list) Results of subtraction of two lists.
|
[
"!"
] |
python
|
valid
|
saltstack/salt
|
salt/spm/__init__.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L1093-L1112
|
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'jinja|yaml'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
|
[
"def",
"_render",
"(",
"self",
",",
"data",
",",
"formula_def",
")",
":",
"# FORMULA can contain a renderer option",
"renderer",
"=",
"formula_def",
".",
"get",
"(",
"'renderer'",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer'",
",",
"'jinja|yaml'",
")",
")",
"rend",
"=",
"salt",
".",
"loader",
".",
"render",
"(",
"self",
".",
"opts",
",",
"{",
"}",
")",
"blacklist",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer_blacklist'",
")",
"whitelist",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'renderer_whitelist'",
")",
"template_vars",
"=",
"formula_def",
".",
"copy",
"(",
")",
"template_vars",
"[",
"'opts'",
"]",
"=",
"self",
".",
"opts",
".",
"copy",
"(",
")",
"return",
"compile_template",
"(",
"':string:'",
",",
"rend",
",",
"renderer",
",",
"blacklist",
",",
"whitelist",
",",
"input_data",
"=",
"data",
",",
"*",
"*",
"template_vars",
")"
] |
Render a [pre|post]_local_state or [pre|post]_tgt_state script
|
[
"Render",
"a",
"[",
"pre|post",
"]",
"_local_state",
"or",
"[",
"pre|post",
"]",
"_tgt_state",
"script"
] |
python
|
train
|
abseil/abseil-py
|
absl/logging/__init__.py
|
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L569-L611
|
def find_log_dir_and_names(program_name=None, log_dir=None):
"""Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
'usr/bin/foobar.py' this method should probably be called with
program_name='foobar' However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with py_ if it is the program_name
argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
"""
if not program_name:
# Strip the extension (foobar.par becomes foobar, and
# fubar.py becomes fubar). We do this so that the log
# file names are similar to C++ log file names.
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Prepend py_ to files so that python code gets a unique file, and
# so that C++ libraries do not try to write to the same log files as us.
program_name = 'py_%s' % program_name
actual_log_dir = find_log_dir(log_dir=log_dir)
try:
username = getpass.getuser()
except KeyError:
# This can happen, e.g. when running under docker w/o passwd file.
if hasattr(os, 'getuid'):
# Windows doesn't have os.getuid
username = str(os.getuid())
else:
username = 'unknown'
hostname = socket.gethostname()
file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
return actual_log_dir, file_prefix, program_name
|
[
"def",
"find_log_dir_and_names",
"(",
"program_name",
"=",
"None",
",",
"log_dir",
"=",
"None",
")",
":",
"if",
"not",
"program_name",
":",
"# Strip the extension (foobar.par becomes foobar, and",
"# fubar.py becomes fubar). We do this so that the log",
"# file names are similar to C++ log file names.",
"program_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"[",
"0",
"]",
"# Prepend py_ to files so that python code gets a unique file, and",
"# so that C++ libraries do not try to write to the same log files as us.",
"program_name",
"=",
"'py_%s'",
"%",
"program_name",
"actual_log_dir",
"=",
"find_log_dir",
"(",
"log_dir",
"=",
"log_dir",
")",
"try",
":",
"username",
"=",
"getpass",
".",
"getuser",
"(",
")",
"except",
"KeyError",
":",
"# This can happen, e.g. when running under docker w/o passwd file.",
"if",
"hasattr",
"(",
"os",
",",
"'getuid'",
")",
":",
"# Windows doesn't have os.getuid",
"username",
"=",
"str",
"(",
"os",
".",
"getuid",
"(",
")",
")",
"else",
":",
"username",
"=",
"'unknown'",
"hostname",
"=",
"socket",
".",
"gethostname",
"(",
")",
"file_prefix",
"=",
"'%s.%s.%s.log'",
"%",
"(",
"program_name",
",",
"hostname",
",",
"username",
")",
"return",
"actual_log_dir",
",",
"file_prefix",
",",
"program_name"
] |
Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
'usr/bin/foobar.py' this method should probably be called with
program_name='foobar' However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with py_ if it is the program_name
argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
|
[
"Computes",
"the",
"directory",
"and",
"filename",
"prefix",
"for",
"log",
"file",
"."
] |
python
|
train
|
FactoryBoy/factory_boy
|
factory/builder.py
|
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/builder.py#L70-L94
|
def update(self, values):
"""Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
"""
for k, v in values.items():
root, sub = self.split(k)
if sub is None:
self.declarations[root] = v
else:
self.contexts[root][sub] = v
extra_context_keys = set(self.contexts) - set(self.declarations)
if extra_context_keys:
raise errors.InvalidDeclarationError(
"Received deep context for unknown fields: %r (known=%r)" % (
{
self.join(root, sub): v
for root in extra_context_keys
for sub, v in self.contexts[root].items()
},
sorted(self.declarations),
)
)
|
[
"def",
"update",
"(",
"self",
",",
"values",
")",
":",
"for",
"k",
",",
"v",
"in",
"values",
".",
"items",
"(",
")",
":",
"root",
",",
"sub",
"=",
"self",
".",
"split",
"(",
"k",
")",
"if",
"sub",
"is",
"None",
":",
"self",
".",
"declarations",
"[",
"root",
"]",
"=",
"v",
"else",
":",
"self",
".",
"contexts",
"[",
"root",
"]",
"[",
"sub",
"]",
"=",
"v",
"extra_context_keys",
"=",
"set",
"(",
"self",
".",
"contexts",
")",
"-",
"set",
"(",
"self",
".",
"declarations",
")",
"if",
"extra_context_keys",
":",
"raise",
"errors",
".",
"InvalidDeclarationError",
"(",
"\"Received deep context for unknown fields: %r (known=%r)\"",
"%",
"(",
"{",
"self",
".",
"join",
"(",
"root",
",",
"sub",
")",
":",
"v",
"for",
"root",
"in",
"extra_context_keys",
"for",
"sub",
",",
"v",
"in",
"self",
".",
"contexts",
"[",
"root",
"]",
".",
"items",
"(",
")",
"}",
",",
"sorted",
"(",
"self",
".",
"declarations",
")",
",",
")",
")"
] |
Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
|
[
"Add",
"new",
"declarations",
"to",
"this",
"set",
"/"
] |
python
|
train
|
dbrattli/OSlash
|
oslash/abc/monoid.py
|
https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/abc/monoid.py#L48-L60
|
def concat(cls, xs):
"""mconcat :: [m] -> m
Fold a list using the monoid. For most types, the default
definition for mconcat will be used, but the function is
included in the class definition so that an optimized version
can be provided for specific types.
"""
def reducer(a, b):
return a.append(b)
return reduce(reducer, xs, cls.empty())
|
[
"def",
"concat",
"(",
"cls",
",",
"xs",
")",
":",
"def",
"reducer",
"(",
"a",
",",
"b",
")",
":",
"return",
"a",
".",
"append",
"(",
"b",
")",
"return",
"reduce",
"(",
"reducer",
",",
"xs",
",",
"cls",
".",
"empty",
"(",
")",
")"
] |
mconcat :: [m] -> m
Fold a list using the monoid. For most types, the default
definition for mconcat will be used, but the function is
included in the class definition so that an optimized version
can be provided for specific types.
|
[
"mconcat",
"::",
"[",
"m",
"]",
"-",
">",
"m"
] |
python
|
train
|
google/tangent
|
tangent/ast.py
|
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/ast.py#L83-L90
|
def copy_node(node):
"""Copy a node but keep its annotations intact."""
if not isinstance(node, gast.AST):
return [copy_node(n) for n in node]
new_node = copy.deepcopy(node)
setattr(new_node, anno.ANNOTATION_FIELD,
getattr(node, anno.ANNOTATION_FIELD, {}).copy())
return new_node
|
[
"def",
"copy_node",
"(",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"gast",
".",
"AST",
")",
":",
"return",
"[",
"copy_node",
"(",
"n",
")",
"for",
"n",
"in",
"node",
"]",
"new_node",
"=",
"copy",
".",
"deepcopy",
"(",
"node",
")",
"setattr",
"(",
"new_node",
",",
"anno",
".",
"ANNOTATION_FIELD",
",",
"getattr",
"(",
"node",
",",
"anno",
".",
"ANNOTATION_FIELD",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
")",
"return",
"new_node"
] |
Copy a node but keep its annotations intact.
|
[
"Copy",
"a",
"node",
"but",
"keep",
"its",
"annotations",
"intact",
"."
] |
python
|
train
|
rocioar/flake8-django
|
flake8_django/checkers/model_form.py
|
https://github.com/rocioar/flake8-django/blob/917f196e2518412bda6e841c1ed3de312004fc67/flake8_django/checkers/model_form.py#L39-L68
|
def run(self, node):
"""
Captures the use of exclude in ModelForm Meta
"""
if not self.checker_applies(node):
return
issues = []
for body in node.body:
if not isinstance(body, ast.ClassDef):
continue
for element in body.body:
if not isinstance(element, ast.Assign):
continue
for target in element.targets:
if target.id == 'fields' and self.is_string_dunder_all(element):
issues.append(
DJ07(
lineno=node.lineno,
col=node.col_offset,
)
)
elif target.id == 'exclude':
issues.append(
DJ06(
lineno=node.lineno,
col=node.col_offset,
)
)
return issues
|
[
"def",
"run",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"self",
".",
"checker_applies",
"(",
"node",
")",
":",
"return",
"issues",
"=",
"[",
"]",
"for",
"body",
"in",
"node",
".",
"body",
":",
"if",
"not",
"isinstance",
"(",
"body",
",",
"ast",
".",
"ClassDef",
")",
":",
"continue",
"for",
"element",
"in",
"body",
".",
"body",
":",
"if",
"not",
"isinstance",
"(",
"element",
",",
"ast",
".",
"Assign",
")",
":",
"continue",
"for",
"target",
"in",
"element",
".",
"targets",
":",
"if",
"target",
".",
"id",
"==",
"'fields'",
"and",
"self",
".",
"is_string_dunder_all",
"(",
"element",
")",
":",
"issues",
".",
"append",
"(",
"DJ07",
"(",
"lineno",
"=",
"node",
".",
"lineno",
",",
"col",
"=",
"node",
".",
"col_offset",
",",
")",
")",
"elif",
"target",
".",
"id",
"==",
"'exclude'",
":",
"issues",
".",
"append",
"(",
"DJ06",
"(",
"lineno",
"=",
"node",
".",
"lineno",
",",
"col",
"=",
"node",
".",
"col_offset",
",",
")",
")",
"return",
"issues"
] |
Captures the use of exclude in ModelForm Meta
|
[
"Captures",
"the",
"use",
"of",
"exclude",
"in",
"ModelForm",
"Meta"
] |
python
|
train
|
ioos/compliance-checker
|
compliance_checker/cf/cf.py
|
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L1802-L1850
|
def check_dimensionless_vertical_coordinate(self, ds):
'''
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
z_variables = cfutil.get_z_variables(ds)
deprecated_units = [
'level',
'layer',
'sigma_level'
]
for name in z_variables:
variable = ds.variables[name]
standard_name = getattr(variable, 'standard_name', None)
units = getattr(variable, 'units', None)
formula_terms = getattr(variable, 'formula_terms', None)
# Skip the variable if it's dimensional
if (formula_terms is None and
standard_name not in dimless_vertical_coordinates):
continue
is_not_deprecated = TestCtx(BaseCheck.LOW, self.section_titles["4.3"])
is_not_deprecated.assert_true(units not in deprecated_units,
"§4.3.2: units are deprecated by CF in variable {}: {}"
"".format(name, units))
ret_val.append(is_not_deprecated.to_result())
ret_val.append(self._check_formula_terms(ds, name))
return ret_val
|
[
"def",
"check_dimensionless_vertical_coordinate",
"(",
"self",
",",
"ds",
")",
":",
"ret_val",
"=",
"[",
"]",
"z_variables",
"=",
"cfutil",
".",
"get_z_variables",
"(",
"ds",
")",
"deprecated_units",
"=",
"[",
"'level'",
",",
"'layer'",
",",
"'sigma_level'",
"]",
"for",
"name",
"in",
"z_variables",
":",
"variable",
"=",
"ds",
".",
"variables",
"[",
"name",
"]",
"standard_name",
"=",
"getattr",
"(",
"variable",
",",
"'standard_name'",
",",
"None",
")",
"units",
"=",
"getattr",
"(",
"variable",
",",
"'units'",
",",
"None",
")",
"formula_terms",
"=",
"getattr",
"(",
"variable",
",",
"'formula_terms'",
",",
"None",
")",
"# Skip the variable if it's dimensional",
"if",
"(",
"formula_terms",
"is",
"None",
"and",
"standard_name",
"not",
"in",
"dimless_vertical_coordinates",
")",
":",
"continue",
"is_not_deprecated",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"LOW",
",",
"self",
".",
"section_titles",
"[",
"\"4.3\"",
"]",
")",
"is_not_deprecated",
".",
"assert_true",
"(",
"units",
"not",
"in",
"deprecated_units",
",",
"\"§4.3.2: units are deprecated by CF in variable {}: {}\"",
"\"\"",
".",
"format",
"(",
"name",
",",
"units",
")",
")",
"ret_val",
".",
"append",
"(",
"is_not_deprecated",
".",
"to_result",
"(",
")",
")",
"ret_val",
".",
"append",
"(",
"self",
".",
"_check_formula_terms",
"(",
"ds",
",",
"name",
")",
")",
"return",
"ret_val"
] |
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
|
[
"Check",
"the",
"validity",
"of",
"dimensionless",
"coordinates",
"under",
"CF"
] |
python
|
train
|
dnanexus/dx-toolkit
|
src/python/dxpy/utils/resolver.py
|
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L615-L718
|
def _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=None, describe=True,
enclose_in_list=False):
"""
:param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param project: The potential project the entity belongs to
:type project: string
:param folderpath: Path to the entity
:type folderpath: string
:param entity_name: The name of the entity
:type entity_name: string
:param expected_classes: A list of expected classes the entity is allowed
to belong to if it is an ID (e.g. "record",
"file", "job"); if None, then entity_name may be
any data object class
:type expected_classes: list or None
:param describe: Dictionary of inputs to the describe API call; if
no describe input is provided (default value True), then
an empty mapping is passed to the describe API method
:type describe: dict or True
:param enclose_in_list: Whether the describe output is to be in the form
of a list (if False, the last return value is a
dictionary; if True, the last return value is a
list of one dictionary); it will only have an
effect if entity_name is a DX ID and is described
:type enclose_in_list: boolean
:returns: Whether or not the entity needs to be resolved with a more
general resolution method, the project, the folderpath, and the
entity name
:rtype: tuple of 4 elements
:raises: ResolutionError if the entity fails to be described
Attempts to resolve the entity to a folder or an object, and describes
the entity iff it is a DX ID of an expected class in the list
expected_classes.
Otherwise, determines whether or not more general resolution may be able
to resolve the entity.
If a more general resolution method is needed, then the return values will
look like:
(True, <project>, <folderpath>, <entity_name>)
If the entity is a DX ID, but is not one of the supplied expected
classes, then the return values will look like:
(False, None, None, None)
If the entity can be successfully described, then the return values will
look like:
<desc_output> ::= {"id": entity_name, "describe": {...}}
<desc_or_desc_list> ::= <desc_output> || [<desc_output>]
(False, <project>, <folderpath>, <desc_or_desc_list>)
If the entity may be a folder, then the return values will look like:
(False, <project>, <folderpath>, None)
TODO: Allow arbitrary flags for the describe mapping.
"""
if entity_name is None:
# Definitely a folder (or project)
# TODO: find a good way to check if folder exists and expected=folder
return False, project, folderpath, None
elif is_hashid(entity_name):
found_valid_class = True
if expected_classes is not None:
found_valid_class = False
for klass in expected_classes:
if entity_name.startswith(klass):
found_valid_class = True
if not found_valid_class:
return False, None, None, None
if describe is True:
describe = {}
# entity is an ID of a valid class, try to describe it
if 'project' not in describe:
if project != dxpy.WORKSPACE_ID:
describe['project'] = project
elif dxpy.WORKSPACE_ID is not None:
describe['project'] = dxpy.WORKSPACE_ID
try:
desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe)
desc = dxpy.append_underlying_workflow_describe(desc)
except Exception as details:
if 'project' in describe:
# Now try it without the hint
del describe['project']
try:
desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe)
except Exception as details2:
raise ResolutionError(str(details2))
else:
raise ResolutionError(str(details))
result = {"id": entity_name, "describe": desc}
if enclose_in_list:
return False, project, folderpath, [result]
else:
return False, project, folderpath, result
else:
# Need to resolve later
return True, project, folderpath, entity_name
|
[
"def",
"_check_resolution_needed",
"(",
"path",
",",
"project",
",",
"folderpath",
",",
"entity_name",
",",
"expected_classes",
"=",
"None",
",",
"describe",
"=",
"True",
",",
"enclose_in_list",
"=",
"False",
")",
":",
"if",
"entity_name",
"is",
"None",
":",
"# Definitely a folder (or project)",
"# TODO: find a good way to check if folder exists and expected=folder",
"return",
"False",
",",
"project",
",",
"folderpath",
",",
"None",
"elif",
"is_hashid",
"(",
"entity_name",
")",
":",
"found_valid_class",
"=",
"True",
"if",
"expected_classes",
"is",
"not",
"None",
":",
"found_valid_class",
"=",
"False",
"for",
"klass",
"in",
"expected_classes",
":",
"if",
"entity_name",
".",
"startswith",
"(",
"klass",
")",
":",
"found_valid_class",
"=",
"True",
"if",
"not",
"found_valid_class",
":",
"return",
"False",
",",
"None",
",",
"None",
",",
"None",
"if",
"describe",
"is",
"True",
":",
"describe",
"=",
"{",
"}",
"# entity is an ID of a valid class, try to describe it",
"if",
"'project'",
"not",
"in",
"describe",
":",
"if",
"project",
"!=",
"dxpy",
".",
"WORKSPACE_ID",
":",
"describe",
"[",
"'project'",
"]",
"=",
"project",
"elif",
"dxpy",
".",
"WORKSPACE_ID",
"is",
"not",
"None",
":",
"describe",
"[",
"'project'",
"]",
"=",
"dxpy",
".",
"WORKSPACE_ID",
"try",
":",
"desc",
"=",
"dxpy",
".",
"DXHTTPRequest",
"(",
"'/'",
"+",
"entity_name",
"+",
"'/describe'",
",",
"describe",
")",
"desc",
"=",
"dxpy",
".",
"append_underlying_workflow_describe",
"(",
"desc",
")",
"except",
"Exception",
"as",
"details",
":",
"if",
"'project'",
"in",
"describe",
":",
"# Now try it without the hint",
"del",
"describe",
"[",
"'project'",
"]",
"try",
":",
"desc",
"=",
"dxpy",
".",
"DXHTTPRequest",
"(",
"'/'",
"+",
"entity_name",
"+",
"'/describe'",
",",
"describe",
")",
"except",
"Exception",
"as",
"details2",
":",
"raise",
"ResolutionError",
"(",
"str",
"(",
"details2",
")",
")",
"else",
":",
"raise",
"ResolutionError",
"(",
"str",
"(",
"details",
")",
")",
"result",
"=",
"{",
"\"id\"",
":",
"entity_name",
",",
"\"describe\"",
":",
"desc",
"}",
"if",
"enclose_in_list",
":",
"return",
"False",
",",
"project",
",",
"folderpath",
",",
"[",
"result",
"]",
"else",
":",
"return",
"False",
",",
"project",
",",
"folderpath",
",",
"result",
"else",
":",
"# Need to resolve later",
"return",
"True",
",",
"project",
",",
"folderpath",
",",
"entity_name"
] |
:param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param project: The potential project the entity belongs to
:type project: string
:param folderpath: Path to the entity
:type folderpath: string
:param entity_name: The name of the entity
:type entity_name: string
:param expected_classes: A list of expected classes the entity is allowed
to belong to if it is an ID (e.g. "record",
"file", "job"); if None, then entity_name may be
any data object class
:type expected_classes: list or None
:param describe: Dictionary of inputs to the describe API call; if
no describe input is provided (default value True), then
an empty mapping is passed to the describe API method
:type describe: dict or True
:param enclose_in_list: Whether the describe output is to be in the form
of a list (if False, the last return value is a
dictionary; if True, the last return value is a
list of one dictionary); it will only have an
effect if entity_name is a DX ID and is described
:type enclose_in_list: boolean
:returns: Whether or not the entity needs to be resolved with a more
general resolution method, the project, the folderpath, and the
entity name
:rtype: tuple of 4 elements
:raises: ResolutionError if the entity fails to be described
Attempts to resolve the entity to a folder or an object, and describes
the entity iff it is a DX ID of an expected class in the list
expected_classes.
Otherwise, determines whether or not more general resolution may be able
to resolve the entity.
If a more general resolution method is needed, then the return values will
look like:
(True, <project>, <folderpath>, <entity_name>)
If the entity is a DX ID, but is not one of the supplied expected
classes, then the return values will look like:
(False, None, None, None)
If the entity can be successfully described, then the return values will
look like:
<desc_output> ::= {"id": entity_name, "describe": {...}}
<desc_or_desc_list> ::= <desc_output> || [<desc_output>]
(False, <project>, <folderpath>, <desc_or_desc_list>)
If the entity may be a folder, then the return values will look like:
(False, <project>, <folderpath>, None)
TODO: Allow arbitrary flags for the describe mapping.
|
[
":",
"param",
"path",
":",
"Path",
"to",
"the",
"object",
"that",
"required",
"resolution",
";",
"propagated",
"from",
"command",
"-",
"line",
":",
"type",
"path",
":",
"string",
":",
"param",
"project",
":",
"The",
"potential",
"project",
"the",
"entity",
"belongs",
"to",
":",
"type",
"project",
":",
"string",
":",
"param",
"folderpath",
":",
"Path",
"to",
"the",
"entity",
":",
"type",
"folderpath",
":",
"string",
":",
"param",
"entity_name",
":",
"The",
"name",
"of",
"the",
"entity",
":",
"type",
"entity_name",
":",
"string",
":",
"param",
"expected_classes",
":",
"A",
"list",
"of",
"expected",
"classes",
"the",
"entity",
"is",
"allowed",
"to",
"belong",
"to",
"if",
"it",
"is",
"an",
"ID",
"(",
"e",
".",
"g",
".",
"record",
"file",
"job",
")",
";",
"if",
"None",
"then",
"entity_name",
"may",
"be",
"any",
"data",
"object",
"class",
":",
"type",
"expected_classes",
":",
"list",
"or",
"None",
":",
"param",
"describe",
":",
"Dictionary",
"of",
"inputs",
"to",
"the",
"describe",
"API",
"call",
";",
"if",
"no",
"describe",
"input",
"is",
"provided",
"(",
"default",
"value",
"True",
")",
"then",
"an",
"empty",
"mapping",
"is",
"passed",
"to",
"the",
"describe",
"API",
"method",
":",
"type",
"describe",
":",
"dict",
"or",
"True",
":",
"param",
"enclose_in_list",
":",
"Whether",
"the",
"describe",
"output",
"is",
"to",
"be",
"in",
"the",
"form",
"of",
"a",
"list",
"(",
"if",
"False",
"the",
"last",
"return",
"value",
"is",
"a",
"dictionary",
";",
"if",
"True",
"the",
"last",
"return",
"value",
"is",
"a",
"list",
"of",
"one",
"dictionary",
")",
";",
"it",
"will",
"only",
"have",
"an",
"effect",
"if",
"entity_name",
"is",
"a",
"DX",
"ID",
"and",
"is",
"described",
":",
"type",
"enclose_in_list",
":",
"boolean",
":",
"returns",
":",
"Whether",
"or",
"not",
"the",
"entity",
"needs",
"to",
"be",
"resolved",
"with",
"a",
"more",
"general",
"resolution",
"method",
"the",
"project",
"the",
"folderpath",
"and",
"the",
"entity",
"name",
":",
"rtype",
":",
"tuple",
"of",
"4",
"elements",
":",
"raises",
":",
"ResolutionError",
"if",
"the",
"entity",
"fails",
"to",
"be",
"described"
] |
python
|
train
|
kislyuk/ensure
|
ensure/main.py
|
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L282-L287
|
def does_not_contain(self, element):
"""
Ensures :attr:`subject` does not contain *element*.
"""
self._run(unittest_case.assertNotIn, (element, self._subject))
return ChainInspector(self._subject)
|
[
"def",
"does_not_contain",
"(",
"self",
",",
"element",
")",
":",
"self",
".",
"_run",
"(",
"unittest_case",
".",
"assertNotIn",
",",
"(",
"element",
",",
"self",
".",
"_subject",
")",
")",
"return",
"ChainInspector",
"(",
"self",
".",
"_subject",
")"
] |
Ensures :attr:`subject` does not contain *element*.
|
[
"Ensures",
":",
"attr",
":",
"subject",
"does",
"not",
"contain",
"*",
"element",
"*",
"."
] |
python
|
train
|
dhermes/bezier
|
src/bezier/curve.py
|
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/curve.py#L443-L538
|
def reduce_(self):
r"""Return a degree-reduced version of the current curve.
.. _pseudo-inverse:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse
Does this by converting the current nodes :math:`v_0, \ldots, v_n`
to new nodes :math:`w_0, \ldots, w_{n - 1}` that correspond to
reversing the :meth:`elevate` process.
This uses the `pseudo-inverse`_ of the elevation matrix. For example
when elevating from degree 2 to 3, the matrix :math:`E_2` is given by
.. math::
\mathbf{v} = \left[\begin{array}{c c c} v_0 & v_1 & v_2
\end{array}\right] \longmapsto \left[\begin{array}{c c c c}
v_0 & \frac{v_0 + 2 v_1}{3} & \frac{2 v_1 + v_2}{3} & v_2
\end{array}\right] = \frac{1}{3} \mathbf{v}
\left[\begin{array}{c c c c} 3 & 1 & 0 & 0 \\
0 & 2 & 2 & 0 \\ 0 & 0 & 1 & 3 \end{array}\right]
and the (right) pseudo-inverse is given by
.. math::
R_2 = E_2^T \left(E_2 E_2^T\right)^{-1} = \frac{1}{20}
\left[\begin{array}{c c c} 19 & -5 & 1 \\
3 & 15 & -3 \\ -3 & 15 & 3 \\ 1 & -5 & 19
\end{array}\right].
.. warning::
Though degree-elevation preserves the start and end nodes, degree
reduction has no such guarantee. Rather, the nodes produced are
"best" in the least squares sense (when solving the normal
equations).
.. image:: ../../images/curve_reduce.png
:align: center
.. testsetup:: curve-reduce, curve-reduce-approx
import numpy as np
import bezier
.. doctest:: curve-reduce
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-3.0, 0.0, 1.0, 0.0],
... [ 3.0, 2.0, 3.0, 6.0],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-3. , 1.5, 0. ],
[ 3. , 1.5, 6. ]])
.. testcleanup:: curve-reduce
import make_images
make_images.curve_reduce(curve, reduced)
In the case that the current curve **is not** degree-elevated.
.. image:: ../../images/curve_reduce_approx.png
:align: center
.. doctest:: curve-reduce-approx
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 3.75, 5.0],
... [2.5, 5.0 , 7.5 , 2.5],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-0.125, 2.5 , 5.125],
[ 2.125, 8.125, 2.875]])
.. testcleanup:: curve-reduce-approx
import make_images
make_images.curve_reduce_approx(curve, reduced)
Returns:
Curve: The degree-reduced curve.
"""
new_nodes = _curve_helpers.reduce_pseudo_inverse(self._nodes)
return Curve(new_nodes, self._degree - 1, _copy=False)
|
[
"def",
"reduce_",
"(",
"self",
")",
":",
"new_nodes",
"=",
"_curve_helpers",
".",
"reduce_pseudo_inverse",
"(",
"self",
".",
"_nodes",
")",
"return",
"Curve",
"(",
"new_nodes",
",",
"self",
".",
"_degree",
"-",
"1",
",",
"_copy",
"=",
"False",
")"
] |
r"""Return a degree-reduced version of the current curve.
.. _pseudo-inverse:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse
Does this by converting the current nodes :math:`v_0, \ldots, v_n`
to new nodes :math:`w_0, \ldots, w_{n - 1}` that correspond to
reversing the :meth:`elevate` process.
This uses the `pseudo-inverse`_ of the elevation matrix. For example
when elevating from degree 2 to 3, the matrix :math:`E_2` is given by
.. math::
\mathbf{v} = \left[\begin{array}{c c c} v_0 & v_1 & v_2
\end{array}\right] \longmapsto \left[\begin{array}{c c c c}
v_0 & \frac{v_0 + 2 v_1}{3} & \frac{2 v_1 + v_2}{3} & v_2
\end{array}\right] = \frac{1}{3} \mathbf{v}
\left[\begin{array}{c c c c} 3 & 1 & 0 & 0 \\
0 & 2 & 2 & 0 \\ 0 & 0 & 1 & 3 \end{array}\right]
and the (right) pseudo-inverse is given by
.. math::
R_2 = E_2^T \left(E_2 E_2^T\right)^{-1} = \frac{1}{20}
\left[\begin{array}{c c c} 19 & -5 & 1 \\
3 & 15 & -3 \\ -3 & 15 & 3 \\ 1 & -5 & 19
\end{array}\right].
.. warning::
Though degree-elevation preserves the start and end nodes, degree
reduction has no such guarantee. Rather, the nodes produced are
"best" in the least squares sense (when solving the normal
equations).
.. image:: ../../images/curve_reduce.png
:align: center
.. testsetup:: curve-reduce, curve-reduce-approx
import numpy as np
import bezier
.. doctest:: curve-reduce
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-3.0, 0.0, 1.0, 0.0],
... [ 3.0, 2.0, 3.0, 6.0],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-3. , 1.5, 0. ],
[ 3. , 1.5, 6. ]])
.. testcleanup:: curve-reduce
import make_images
make_images.curve_reduce(curve, reduced)
In the case that the current curve **is not** degree-elevated.
.. image:: ../../images/curve_reduce_approx.png
:align: center
.. doctest:: curve-reduce-approx
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 3.75, 5.0],
... [2.5, 5.0 , 7.5 , 2.5],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-0.125, 2.5 , 5.125],
[ 2.125, 8.125, 2.875]])
.. testcleanup:: curve-reduce-approx
import make_images
make_images.curve_reduce_approx(curve, reduced)
Returns:
Curve: The degree-reduced curve.
|
[
"r",
"Return",
"a",
"degree",
"-",
"reduced",
"version",
"of",
"the",
"current",
"curve",
"."
] |
python
|
train
|
project-rig/rig
|
rig/place_and_route/place/breadth_first.py
|
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/breadth_first.py#L8-L39
|
def breadth_first_vertex_order(vertices_resources, nets):
"""A generator which iterates over a set of vertices in a breadth-first
order in terms of connectivity.
For use as a vertex ordering for the sequential placer.
"""
# Special case: no vertices, just stop immediately
if len(vertices_resources) == 0:
return
# Enumerate the set of nets attached to each vertex
vertex_neighbours = defaultdict(set)
for net in nets:
# Note: Iterating over a Net object produces the set of vertices
# involved in the net.
vertex_neighbours[net.source].update(net)
for sink in net.sinks:
vertex_neighbours[sink].update(net)
# Perform a breadth-first iteration over the vertices.
unplaced_vertices = set(vertices_resources)
vertex_queue = deque()
while vertex_queue or unplaced_vertices:
if not vertex_queue:
vertex_queue.append(unplaced_vertices.pop())
vertex = vertex_queue.popleft()
yield vertex
vertex_queue.extend(v for v in vertex_neighbours[vertex]
if v in unplaced_vertices)
unplaced_vertices.difference_update(vertex_neighbours[vertex])
|
[
"def",
"breadth_first_vertex_order",
"(",
"vertices_resources",
",",
"nets",
")",
":",
"# Special case: no vertices, just stop immediately",
"if",
"len",
"(",
"vertices_resources",
")",
"==",
"0",
":",
"return",
"# Enumerate the set of nets attached to each vertex",
"vertex_neighbours",
"=",
"defaultdict",
"(",
"set",
")",
"for",
"net",
"in",
"nets",
":",
"# Note: Iterating over a Net object produces the set of vertices",
"# involved in the net.",
"vertex_neighbours",
"[",
"net",
".",
"source",
"]",
".",
"update",
"(",
"net",
")",
"for",
"sink",
"in",
"net",
".",
"sinks",
":",
"vertex_neighbours",
"[",
"sink",
"]",
".",
"update",
"(",
"net",
")",
"# Perform a breadth-first iteration over the vertices.",
"unplaced_vertices",
"=",
"set",
"(",
"vertices_resources",
")",
"vertex_queue",
"=",
"deque",
"(",
")",
"while",
"vertex_queue",
"or",
"unplaced_vertices",
":",
"if",
"not",
"vertex_queue",
":",
"vertex_queue",
".",
"append",
"(",
"unplaced_vertices",
".",
"pop",
"(",
")",
")",
"vertex",
"=",
"vertex_queue",
".",
"popleft",
"(",
")",
"yield",
"vertex",
"vertex_queue",
".",
"extend",
"(",
"v",
"for",
"v",
"in",
"vertex_neighbours",
"[",
"vertex",
"]",
"if",
"v",
"in",
"unplaced_vertices",
")",
"unplaced_vertices",
".",
"difference_update",
"(",
"vertex_neighbours",
"[",
"vertex",
"]",
")"
] |
A generator which iterates over a set of vertices in a breadth-first
order in terms of connectivity.
For use as a vertex ordering for the sequential placer.
|
[
"A",
"generator",
"which",
"iterates",
"over",
"a",
"set",
"of",
"vertices",
"in",
"a",
"breadth",
"-",
"first",
"order",
"in",
"terms",
"of",
"connectivity",
"."
] |
python
|
train
|
sparklingpandas/sparklingpandas
|
sparklingpandas/groupby.py
|
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L119-L126
|
def groups(self):
"""Returns dict {group name -> group labels}."""
self._prep_pandas_groupby()
def extract_group_labels(frame):
return (frame[0], frame[1].index.values)
return self._mergedRDD.map(extract_group_labels).collectAsMap()
|
[
"def",
"groups",
"(",
"self",
")",
":",
"self",
".",
"_prep_pandas_groupby",
"(",
")",
"def",
"extract_group_labels",
"(",
"frame",
")",
":",
"return",
"(",
"frame",
"[",
"0",
"]",
",",
"frame",
"[",
"1",
"]",
".",
"index",
".",
"values",
")",
"return",
"self",
".",
"_mergedRDD",
".",
"map",
"(",
"extract_group_labels",
")",
".",
"collectAsMap",
"(",
")"
] |
Returns dict {group name -> group labels}.
|
[
"Returns",
"dict",
"{",
"group",
"name",
"-",
">",
"group",
"labels",
"}",
"."
] |
python
|
train
|
chrisspen/weka
|
weka/arff.py
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L370-L383
|
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
|
[
"def",
"parse",
"(",
"cls",
",",
"s",
",",
"schema_only",
"=",
"False",
")",
":",
"a",
"=",
"cls",
"(",
")",
"a",
".",
"state",
"=",
"'comment'",
"a",
".",
"lineno",
"=",
"1",
"for",
"l",
"in",
"s",
".",
"splitlines",
"(",
")",
":",
"a",
".",
"parseline",
"(",
"l",
")",
"a",
".",
"lineno",
"+=",
"1",
"if",
"schema_only",
"and",
"a",
".",
"state",
"==",
"'data'",
":",
"# Don't parse data if we're only loading the schema.",
"break",
"return",
"a"
] |
Parse an ARFF File already loaded into a string.
|
[
"Parse",
"an",
"ARFF",
"File",
"already",
"loaded",
"into",
"a",
"string",
"."
] |
python
|
train
|
adamreeve/npTDMS
|
nptdms/tdms.py
|
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L208-L226
|
def as_dataframe(self, time_index=False, absolute_time=False):
"""
Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame
"""
import pandas as pd
dataframe_dict = OrderedDict()
for key, value in self.objects.items():
if value.has_data:
index = value.time_track(absolute_time) if time_index else None
dataframe_dict[key] = pd.Series(data=value.data, index=index)
return pd.DataFrame.from_dict(dataframe_dict)
|
[
"def",
"as_dataframe",
"(",
"self",
",",
"time_index",
"=",
"False",
",",
"absolute_time",
"=",
"False",
")",
":",
"import",
"pandas",
"as",
"pd",
"dataframe_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"objects",
".",
"items",
"(",
")",
":",
"if",
"value",
".",
"has_data",
":",
"index",
"=",
"value",
".",
"time_track",
"(",
"absolute_time",
")",
"if",
"time_index",
"else",
"None",
"dataframe_dict",
"[",
"key",
"]",
"=",
"pd",
".",
"Series",
"(",
"data",
"=",
"value",
".",
"data",
",",
"index",
"=",
"index",
")",
"return",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"dataframe_dict",
")"
] |
Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame
|
[
"Converts",
"the",
"TDMS",
"file",
"to",
"a",
"DataFrame"
] |
python
|
train
|
zetaops/pyoko
|
pyoko/manage.py
|
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/manage.py#L884-L893
|
def _print_fields(self, fields):
"""Print the fields, padding the names as necessary to align them."""
# Prepare a formatting string that aligns the names and types based on the longest ones
longest_name = max(fields, key=lambda f: len(f[1]))[1]
longest_type = max(fields, key=lambda f: len(f[2]))[2]
field_format = '%s%-{}s %-{}s %s'.format(
len(longest_name) + self._padding_after_name,
len(longest_type) + self._padding_after_type)
for field in fields:
self._print(field_format % field)
|
[
"def",
"_print_fields",
"(",
"self",
",",
"fields",
")",
":",
"# Prepare a formatting string that aligns the names and types based on the longest ones",
"longest_name",
"=",
"max",
"(",
"fields",
",",
"key",
"=",
"lambda",
"f",
":",
"len",
"(",
"f",
"[",
"1",
"]",
")",
")",
"[",
"1",
"]",
"longest_type",
"=",
"max",
"(",
"fields",
",",
"key",
"=",
"lambda",
"f",
":",
"len",
"(",
"f",
"[",
"2",
"]",
")",
")",
"[",
"2",
"]",
"field_format",
"=",
"'%s%-{}s %-{}s %s'",
".",
"format",
"(",
"len",
"(",
"longest_name",
")",
"+",
"self",
".",
"_padding_after_name",
",",
"len",
"(",
"longest_type",
")",
"+",
"self",
".",
"_padding_after_type",
")",
"for",
"field",
"in",
"fields",
":",
"self",
".",
"_print",
"(",
"field_format",
"%",
"field",
")"
] |
Print the fields, padding the names as necessary to align them.
|
[
"Print",
"the",
"fields",
"padding",
"the",
"names",
"as",
"necessary",
"to",
"align",
"them",
"."
] |
python
|
train
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1088-L1124
|
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
|
[
"def",
"remove_unused_categories",
"(",
"self",
",",
"inplace",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"cat",
"=",
"self",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
")",
"idx",
",",
"inv",
"=",
"np",
".",
"unique",
"(",
"cat",
".",
"_codes",
",",
"return_inverse",
"=",
"True",
")",
"if",
"idx",
".",
"size",
"!=",
"0",
"and",
"idx",
"[",
"0",
"]",
"==",
"-",
"1",
":",
"# na sentinel",
"idx",
",",
"inv",
"=",
"idx",
"[",
"1",
":",
"]",
",",
"inv",
"-",
"1",
"new_categories",
"=",
"cat",
".",
"dtype",
".",
"categories",
".",
"take",
"(",
"idx",
")",
"new_dtype",
"=",
"CategoricalDtype",
".",
"_from_fastpath",
"(",
"new_categories",
",",
"ordered",
"=",
"self",
".",
"ordered",
")",
"cat",
".",
"_dtype",
"=",
"new_dtype",
"cat",
".",
"_codes",
"=",
"coerce_indexer_dtype",
"(",
"inv",
",",
"new_dtype",
".",
"categories",
")",
"if",
"not",
"inplace",
":",
"return",
"cat"
] |
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
|
[
"Remove",
"categories",
"which",
"are",
"not",
"used",
"."
] |
python
|
train
|
sixty-north/asq
|
asq/queryables.py
|
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L417-L476
|
def select_many_with_correspondence(
self,
collection_selector=identity,
result_selector=KeyedElement):
'''Projects each element of a sequence to an intermediate new sequence,
and flattens the resulting sequence, into one sequence and uses a
selector function to incorporate the corresponding source for each item
in the result sequence.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
result sequence are KeyedElement namedtuples.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector which incorporates the corresponding source element
into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector or selector are not callable.
'''
if self.closed():
raise ValueError("Attempt to call "
"select_many_with_correspondence() on a closed Queryable.")
if not is_callable(collection_selector):
raise TypeError("select_many_with_correspondence() parameter "
"projector={0} is not callable".format(repr(collection_selector)))
if not is_callable(result_selector):
raise TypeError("select_many_with_correspondence() parameter "
"selector={0} is not callable".format(repr(result_selector)))
return self._create(
self._generate_select_many_with_correspondence(collection_selector,
result_selector))
|
[
"def",
"select_many_with_correspondence",
"(",
"self",
",",
"collection_selector",
"=",
"identity",
",",
"result_selector",
"=",
"KeyedElement",
")",
":",
"if",
"self",
".",
"closed",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Attempt to call \"",
"\"select_many_with_correspondence() on a closed Queryable.\"",
")",
"if",
"not",
"is_callable",
"(",
"collection_selector",
")",
":",
"raise",
"TypeError",
"(",
"\"select_many_with_correspondence() parameter \"",
"\"projector={0} is not callable\"",
".",
"format",
"(",
"repr",
"(",
"collection_selector",
")",
")",
")",
"if",
"not",
"is_callable",
"(",
"result_selector",
")",
":",
"raise",
"TypeError",
"(",
"\"select_many_with_correspondence() parameter \"",
"\"selector={0} is not callable\"",
".",
"format",
"(",
"repr",
"(",
"result_selector",
")",
")",
")",
"return",
"self",
".",
"_create",
"(",
"self",
".",
"_generate_select_many_with_correspondence",
"(",
"collection_selector",
",",
"result_selector",
")",
")"
] |
Projects each element of a sequence to an intermediate new sequence,
and flattens the resulting sequence, into one sequence and uses a
selector function to incorporate the corresponding source for each item
in the result sequence.
Note: This method uses deferred execution.
Args:
collection_selector: A unary function mapping each element of the
source iterable into an intermediate sequence. The single
argument of the collection_selector is the value of an element
from the source sequence. The return value should be an
iterable derived from that element value. The default
collection_selector, which is the identity function, assumes
that each element of the source sequence is itself iterable.
result_selector:
An optional binary function mapping the elements in the
flattened intermediate sequence together with their
corresponding source elements to elements of the result
sequence. The two positional arguments of the result_selector
are, first the source element corresponding to an element from
the intermediate sequence, and second the actual element from
the intermediate sequence. The return value should be the
corresponding value in the result sequence. If no
result_selector function is provided, the elements of the
result sequence are KeyedElement namedtuples.
Returns:
A Queryable over a generated sequence whose elements are the result
of applying the one-to-many collection_selector to each element of
the source sequence, concatenating the results into an intermediate
sequence, and then mapping each of those elements through the
result_selector which incorporates the corresponding source element
into the result sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If projector or selector are not callable.
|
[
"Projects",
"each",
"element",
"of",
"a",
"sequence",
"to",
"an",
"intermediate",
"new",
"sequence",
"and",
"flattens",
"the",
"resulting",
"sequence",
"into",
"one",
"sequence",
"and",
"uses",
"a",
"selector",
"function",
"to",
"incorporate",
"the",
"corresponding",
"source",
"for",
"each",
"item",
"in",
"the",
"result",
"sequence",
"."
] |
python
|
train
|
weluse/django-nose-selenium
|
noseselenium/plugins.py
|
https://github.com/weluse/django-nose-selenium/blob/19a09b9455545f70271f884649323a38812793e6/noseselenium/plugins.py#L81-L89
|
def _patch_static_handler(handler):
"""Patch in support for static files serving if supported and enabled.
"""
if django.VERSION[:2] < (1, 3):
return
from django.contrib.staticfiles.handlers import StaticFilesHandler
return StaticFilesHandler(handler)
|
[
"def",
"_patch_static_handler",
"(",
"handler",
")",
":",
"if",
"django",
".",
"VERSION",
"[",
":",
"2",
"]",
"<",
"(",
"1",
",",
"3",
")",
":",
"return",
"from",
"django",
".",
"contrib",
".",
"staticfiles",
".",
"handlers",
"import",
"StaticFilesHandler",
"return",
"StaticFilesHandler",
"(",
"handler",
")"
] |
Patch in support for static files serving if supported and enabled.
|
[
"Patch",
"in",
"support",
"for",
"static",
"files",
"serving",
"if",
"supported",
"and",
"enabled",
"."
] |
python
|
train
|
kodexlab/reliure
|
reliure/web.py
|
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/web.py#L248-L257
|
def options(self):
""" Engine options discover HTTP entry point
"""
#configure engine with an empty dict to ensure default selection/options
self.engine.configure({})
conf = self.engine.as_dict()
conf["returns"] = [oname for oname in six.iterkeys(self._outputs)]
# Note: we overide args to only list the ones that are declared in this view
conf["args"] = [iname for iname in six.iterkeys(self._inputs)]
return jsonify(conf)
|
[
"def",
"options",
"(",
"self",
")",
":",
"#configure engine with an empty dict to ensure default selection/options",
"self",
".",
"engine",
".",
"configure",
"(",
"{",
"}",
")",
"conf",
"=",
"self",
".",
"engine",
".",
"as_dict",
"(",
")",
"conf",
"[",
"\"returns\"",
"]",
"=",
"[",
"oname",
"for",
"oname",
"in",
"six",
".",
"iterkeys",
"(",
"self",
".",
"_outputs",
")",
"]",
"# Note: we overide args to only list the ones that are declared in this view",
"conf",
"[",
"\"args\"",
"]",
"=",
"[",
"iname",
"for",
"iname",
"in",
"six",
".",
"iterkeys",
"(",
"self",
".",
"_inputs",
")",
"]",
"return",
"jsonify",
"(",
"conf",
")"
] |
Engine options discover HTTP entry point
|
[
"Engine",
"options",
"discover",
"HTTP",
"entry",
"point"
] |
python
|
train
|
yunojuno/python-env-utils
|
env_utils/utils.py
|
https://github.com/yunojuno/python-env-utils/blob/7f3b5635f93322759856644901221955908e7e99/env_utils/utils.py#L146-L149
|
def get_list(key, *default, **kwargs):
"""Return env var as a list."""
separator = kwargs.get('separator', ' ')
return get_env(key, *default, coerce=lambda x: x.split(separator))
|
[
"def",
"get_list",
"(",
"key",
",",
"*",
"default",
",",
"*",
"*",
"kwargs",
")",
":",
"separator",
"=",
"kwargs",
".",
"get",
"(",
"'separator'",
",",
"' '",
")",
"return",
"get_env",
"(",
"key",
",",
"*",
"default",
",",
"coerce",
"=",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"separator",
")",
")"
] |
Return env var as a list.
|
[
"Return",
"env",
"var",
"as",
"a",
"list",
"."
] |
python
|
train
|
BlockHub/blockhubdpostools
|
dpostools/dbtools.py
|
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/dbtools.py#L519-L654
|
def tbw(self, delegate_address, blacklist=None, share_fees=False, compound_interest=False):
"""This function doesn't work yet. Instead use legacy.trueshare() for a functional tbw script"""
if not blacklist:
blacklist = []
delegate_public_key = self.account_details(address=delegate_address)['public_key']
height_at_calculation = self.node_height_details()['height']
# string format of the rawasset
minvote = '{{"votes":["-{0}"]}}'.format(delegate_public_key)
plusvote = '{{"votes":["+{0}"]}}'.format(delegate_public_key)
events = self.get_events_vote_cluster(delegate_address)
votes = self.get_historic_voters(delegate_address)
blocks = self.get_blocks(delegate_address)
# create a map of voters
voter_dict = {}
for voter in votes:
voter_dict.update({voter: {
'balance': 0.0,
'status': False,
'last_payout': votes[voter]['height'],
'share': 0.0,
'vote_height': votes[voter]['height'],
'blocks_forged': []}
})
for blacklisted_address in blacklist:
voter_dict.pop(blacklisted_address, None)
last_payout = self.get_last_out_transactions(delegate_address)
# not all voters have had a payout, thus a KeyError is thrown
for payout in last_payout:
try:
voter_dict[payout]['last_payout'] = last_payout[payout]['height']
except KeyError:
pass
# the change in the previous state of the voter_dict. This is added to the voterdict if
# no state change occurs in the blockchain.
delta_state = {}
no_state_change = False
block_keys = sorted(list(blocks.keys()))
block_nr = 0
try:
for id in events:
# calculating poolbalances and updating shares
if events[id]['height'] > blocks[block_keys[block_nr]]['height']:
# if the state is the same for the votepool, the previous calculation can be reused.
block_nr += 1
if no_state_change:
for x in delta_state:
voter_dict[x]['share'] += delta_state[x]
continue
# update pool balances
poolbalance = 0
delta_state = {}
for i in voter_dict:
# here we update the poolbalance
if compound_interest:
balance = voter_dict[i]['balance'] + voter_dict[i]['share']
else:
balance = voter_dict[i]['balance']
if voter_dict[i]['status']:
# if not voter_dict[i]['balance'] < 0:
poolbalance += balance
# else:
# raise exceptions.NegativeBalanceError('balance lower than zero for: {0}. balance: {1}'.format(i, voter_dict[i]['balance']))
# here we calculate the share per voter
for i in voter_dict:
if compound_interest:
balance = voter_dict[i]['balance'] + voter_dict[i]['share']
else:
balance = voter_dict[i]['balance']
if voter_dict[i]['status'] and voter_dict[i]['last_payout'] < blocks[block_keys[block_nr]]['height']:
if share_fees:
share = (balance / poolbalance) * (blocks[block_keys[block_nr]]['reward'] +
blocks[block_keys[block_nr]]['totalFee'])
else:
share = (balance / poolbalance) * blocks[block_keys[block_nr]]['reward']
voter_dict[i]['share'] += share
delta_state.update({i: share})
no_state_change = True
continue
# parsing an event
no_state_change = False
if events[id]['event_type'] == 'transaction':
if events[id]['recipient_id'] == 'Acw2vAVA48TcV8EnoBmZKJdV8bxnW6Y4E9':
print(events[id]['amount'])
# parsing a transaction
if events[id]['event_type'] == 'transaction':
if events[id]['recipient_id'] in voter_dict:
voter_dict[events[id]['recipient_id']]['balance'] += events[id]['amount']
if events[id]['sender_id'] in voter_dict:
voter_dict[events[id]['sender_id']]['balance'] -= (events[id]['amount'] + events[id]['fee'])
if events[id]['sender_id'] in voter_dict and events[id]['type'] == 3 and plusvote in events[id]['rawasset']:
voter_dict[events[id]['sender_id']]['status'] = True
if events[id]['sender_id'] in voter_dict and events[id]['type'] == 3 and minvote in events[id]['rawasset']:
voter_dict[events[id]['sender_id']]['status'] = False
# parsing a forged block (if forged by a voter)
if events[id]['event_type'] == 'block':
voter_dict[events[id]['address']]['balance'] += (events[id]['reward'] + events[id]['total_fee'])
# the transaction for loop ends with the final transaction. However more blocks may be forged. This copies
# the final delta share and adds it to the share x the amount of blocks left.
remaining_blocks = len(block_keys) - block_nr - 1
for i in range(remaining_blocks):
for x in delta_state:
voter_dict[x]['share'] += delta_state[x]
# and indexerror indicates that we have ran out of forged blocks, thus the calculation is done (blocks[block_nr]
# throw the error)
except IndexError:
raise
return voter_dict, height_at_calculation
|
[
"def",
"tbw",
"(",
"self",
",",
"delegate_address",
",",
"blacklist",
"=",
"None",
",",
"share_fees",
"=",
"False",
",",
"compound_interest",
"=",
"False",
")",
":",
"if",
"not",
"blacklist",
":",
"blacklist",
"=",
"[",
"]",
"delegate_public_key",
"=",
"self",
".",
"account_details",
"(",
"address",
"=",
"delegate_address",
")",
"[",
"'public_key'",
"]",
"height_at_calculation",
"=",
"self",
".",
"node_height_details",
"(",
")",
"[",
"'height'",
"]",
"# string format of the rawasset",
"minvote",
"=",
"'{{\"votes\":[\"-{0}\"]}}'",
".",
"format",
"(",
"delegate_public_key",
")",
"plusvote",
"=",
"'{{\"votes\":[\"+{0}\"]}}'",
".",
"format",
"(",
"delegate_public_key",
")",
"events",
"=",
"self",
".",
"get_events_vote_cluster",
"(",
"delegate_address",
")",
"votes",
"=",
"self",
".",
"get_historic_voters",
"(",
"delegate_address",
")",
"blocks",
"=",
"self",
".",
"get_blocks",
"(",
"delegate_address",
")",
"# create a map of voters",
"voter_dict",
"=",
"{",
"}",
"for",
"voter",
"in",
"votes",
":",
"voter_dict",
".",
"update",
"(",
"{",
"voter",
":",
"{",
"'balance'",
":",
"0.0",
",",
"'status'",
":",
"False",
",",
"'last_payout'",
":",
"votes",
"[",
"voter",
"]",
"[",
"'height'",
"]",
",",
"'share'",
":",
"0.0",
",",
"'vote_height'",
":",
"votes",
"[",
"voter",
"]",
"[",
"'height'",
"]",
",",
"'blocks_forged'",
":",
"[",
"]",
"}",
"}",
")",
"for",
"blacklisted_address",
"in",
"blacklist",
":",
"voter_dict",
".",
"pop",
"(",
"blacklisted_address",
",",
"None",
")",
"last_payout",
"=",
"self",
".",
"get_last_out_transactions",
"(",
"delegate_address",
")",
"# not all voters have had a payout, thus a KeyError is thrown",
"for",
"payout",
"in",
"last_payout",
":",
"try",
":",
"voter_dict",
"[",
"payout",
"]",
"[",
"'last_payout'",
"]",
"=",
"last_payout",
"[",
"payout",
"]",
"[",
"'height'",
"]",
"except",
"KeyError",
":",
"pass",
"# the change in the previous state of the voter_dict. This is added to the voterdict if",
"# no state change occurs in the blockchain.",
"delta_state",
"=",
"{",
"}",
"no_state_change",
"=",
"False",
"block_keys",
"=",
"sorted",
"(",
"list",
"(",
"blocks",
".",
"keys",
"(",
")",
")",
")",
"block_nr",
"=",
"0",
"try",
":",
"for",
"id",
"in",
"events",
":",
"# calculating poolbalances and updating shares",
"if",
"events",
"[",
"id",
"]",
"[",
"'height'",
"]",
">",
"blocks",
"[",
"block_keys",
"[",
"block_nr",
"]",
"]",
"[",
"'height'",
"]",
":",
"# if the state is the same for the votepool, the previous calculation can be reused.",
"block_nr",
"+=",
"1",
"if",
"no_state_change",
":",
"for",
"x",
"in",
"delta_state",
":",
"voter_dict",
"[",
"x",
"]",
"[",
"'share'",
"]",
"+=",
"delta_state",
"[",
"x",
"]",
"continue",
"# update pool balances",
"poolbalance",
"=",
"0",
"delta_state",
"=",
"{",
"}",
"for",
"i",
"in",
"voter_dict",
":",
"# here we update the poolbalance",
"if",
"compound_interest",
":",
"balance",
"=",
"voter_dict",
"[",
"i",
"]",
"[",
"'balance'",
"]",
"+",
"voter_dict",
"[",
"i",
"]",
"[",
"'share'",
"]",
"else",
":",
"balance",
"=",
"voter_dict",
"[",
"i",
"]",
"[",
"'balance'",
"]",
"if",
"voter_dict",
"[",
"i",
"]",
"[",
"'status'",
"]",
":",
"# if not voter_dict[i]['balance'] < 0:",
"poolbalance",
"+=",
"balance",
"# else:",
"# raise exceptions.NegativeBalanceError('balance lower than zero for: {0}. balance: {1}'.format(i, voter_dict[i]['balance']))",
"# here we calculate the share per voter",
"for",
"i",
"in",
"voter_dict",
":",
"if",
"compound_interest",
":",
"balance",
"=",
"voter_dict",
"[",
"i",
"]",
"[",
"'balance'",
"]",
"+",
"voter_dict",
"[",
"i",
"]",
"[",
"'share'",
"]",
"else",
":",
"balance",
"=",
"voter_dict",
"[",
"i",
"]",
"[",
"'balance'",
"]",
"if",
"voter_dict",
"[",
"i",
"]",
"[",
"'status'",
"]",
"and",
"voter_dict",
"[",
"i",
"]",
"[",
"'last_payout'",
"]",
"<",
"blocks",
"[",
"block_keys",
"[",
"block_nr",
"]",
"]",
"[",
"'height'",
"]",
":",
"if",
"share_fees",
":",
"share",
"=",
"(",
"balance",
"/",
"poolbalance",
")",
"*",
"(",
"blocks",
"[",
"block_keys",
"[",
"block_nr",
"]",
"]",
"[",
"'reward'",
"]",
"+",
"blocks",
"[",
"block_keys",
"[",
"block_nr",
"]",
"]",
"[",
"'totalFee'",
"]",
")",
"else",
":",
"share",
"=",
"(",
"balance",
"/",
"poolbalance",
")",
"*",
"blocks",
"[",
"block_keys",
"[",
"block_nr",
"]",
"]",
"[",
"'reward'",
"]",
"voter_dict",
"[",
"i",
"]",
"[",
"'share'",
"]",
"+=",
"share",
"delta_state",
".",
"update",
"(",
"{",
"i",
":",
"share",
"}",
")",
"no_state_change",
"=",
"True",
"continue",
"# parsing an event",
"no_state_change",
"=",
"False",
"if",
"events",
"[",
"id",
"]",
"[",
"'event_type'",
"]",
"==",
"'transaction'",
":",
"if",
"events",
"[",
"id",
"]",
"[",
"'recipient_id'",
"]",
"==",
"'Acw2vAVA48TcV8EnoBmZKJdV8bxnW6Y4E9'",
":",
"print",
"(",
"events",
"[",
"id",
"]",
"[",
"'amount'",
"]",
")",
"# parsing a transaction",
"if",
"events",
"[",
"id",
"]",
"[",
"'event_type'",
"]",
"==",
"'transaction'",
":",
"if",
"events",
"[",
"id",
"]",
"[",
"'recipient_id'",
"]",
"in",
"voter_dict",
":",
"voter_dict",
"[",
"events",
"[",
"id",
"]",
"[",
"'recipient_id'",
"]",
"]",
"[",
"'balance'",
"]",
"+=",
"events",
"[",
"id",
"]",
"[",
"'amount'",
"]",
"if",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"in",
"voter_dict",
":",
"voter_dict",
"[",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"]",
"[",
"'balance'",
"]",
"-=",
"(",
"events",
"[",
"id",
"]",
"[",
"'amount'",
"]",
"+",
"events",
"[",
"id",
"]",
"[",
"'fee'",
"]",
")",
"if",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"in",
"voter_dict",
"and",
"events",
"[",
"id",
"]",
"[",
"'type'",
"]",
"==",
"3",
"and",
"plusvote",
"in",
"events",
"[",
"id",
"]",
"[",
"'rawasset'",
"]",
":",
"voter_dict",
"[",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"]",
"[",
"'status'",
"]",
"=",
"True",
"if",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"in",
"voter_dict",
"and",
"events",
"[",
"id",
"]",
"[",
"'type'",
"]",
"==",
"3",
"and",
"minvote",
"in",
"events",
"[",
"id",
"]",
"[",
"'rawasset'",
"]",
":",
"voter_dict",
"[",
"events",
"[",
"id",
"]",
"[",
"'sender_id'",
"]",
"]",
"[",
"'status'",
"]",
"=",
"False",
"# parsing a forged block (if forged by a voter)",
"if",
"events",
"[",
"id",
"]",
"[",
"'event_type'",
"]",
"==",
"'block'",
":",
"voter_dict",
"[",
"events",
"[",
"id",
"]",
"[",
"'address'",
"]",
"]",
"[",
"'balance'",
"]",
"+=",
"(",
"events",
"[",
"id",
"]",
"[",
"'reward'",
"]",
"+",
"events",
"[",
"id",
"]",
"[",
"'total_fee'",
"]",
")",
"# the transaction for loop ends with the final transaction. However more blocks may be forged. This copies",
"# the final delta share and adds it to the share x the amount of blocks left.",
"remaining_blocks",
"=",
"len",
"(",
"block_keys",
")",
"-",
"block_nr",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"remaining_blocks",
")",
":",
"for",
"x",
"in",
"delta_state",
":",
"voter_dict",
"[",
"x",
"]",
"[",
"'share'",
"]",
"+=",
"delta_state",
"[",
"x",
"]",
"# and indexerror indicates that we have ran out of forged blocks, thus the calculation is done (blocks[block_nr]",
"# throw the error)",
"except",
"IndexError",
":",
"raise",
"return",
"voter_dict",
",",
"height_at_calculation"
] |
This function doesn't work yet. Instead use legacy.trueshare() for a functional tbw script
|
[
"This",
"function",
"doesn",
"t",
"work",
"yet",
".",
"Instead",
"use",
"legacy",
".",
"trueshare",
"()",
"for",
"a",
"functional",
"tbw",
"script"
] |
python
|
valid
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1368-L1385
|
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
|
[
"def",
"_ingest_boolean",
"(",
"self",
",",
"input_boolean",
",",
"path_to_root",
")",
":",
"valid_boolean",
"=",
"False",
"try",
":",
"valid_boolean",
"=",
"self",
".",
"_validate_boolean",
"(",
"input_boolean",
",",
"path_to_root",
")",
"except",
":",
"rules_path_to_root",
"=",
"re",
".",
"sub",
"(",
"'\\[\\d+\\]'",
",",
"'[0]'",
",",
"path_to_root",
")",
"if",
"'default_value'",
"in",
"self",
".",
"keyMap",
"[",
"rules_path_to_root",
"]",
":",
"valid_boolean",
"=",
"self",
".",
"keyMap",
"[",
"rules_path_to_root",
"]",
"[",
"'default_value'",
"]",
"return",
"valid_boolean"
] |
a helper method for ingesting a boolean
:return: valid_boolean
|
[
"a",
"helper",
"method",
"for",
"ingesting",
"a",
"boolean"
] |
python
|
train
|
open-homeautomation/pknx
|
knxip/ip.py
|
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/ip.py#L182-L193
|
def init_group_write(self, dst_addr=1, data=None, dptsize=0):
"""Initialize the CEMI frame for a group write operation."""
self.init_group(dst_addr)
# unnumbered data packet, group write
self.tpci_apci = 0x00 * 256 + 0x80
self.dptsize = dptsize
if data is None:
self.data = [0]
else:
self.data = data
|
[
"def",
"init_group_write",
"(",
"self",
",",
"dst_addr",
"=",
"1",
",",
"data",
"=",
"None",
",",
"dptsize",
"=",
"0",
")",
":",
"self",
".",
"init_group",
"(",
"dst_addr",
")",
"# unnumbered data packet, group write",
"self",
".",
"tpci_apci",
"=",
"0x00",
"*",
"256",
"+",
"0x80",
"self",
".",
"dptsize",
"=",
"dptsize",
"if",
"data",
"is",
"None",
":",
"self",
".",
"data",
"=",
"[",
"0",
"]",
"else",
":",
"self",
".",
"data",
"=",
"data"
] |
Initialize the CEMI frame for a group write operation.
|
[
"Initialize",
"the",
"CEMI",
"frame",
"for",
"a",
"group",
"write",
"operation",
"."
] |
python
|
train
|
zhmcclient/python-zhmcclient
|
zhmcclient_mock/_hmc.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_hmc.py#L3077-L3110
|
def get_metric_values_response(self):
"""
Get the faked metrics, for all metric groups and all resources that
have been prepared on the manager object of this context object, as a
string in the format needed for the "Get Metrics" operation response.
Returns:
"MetricsResponse" string as described for the "Get Metrics"
operation response.
"""
mv_list = self.get_metric_values()
resp_lines = []
for mv in mv_list:
group_name = mv[0]
resp_lines.append('"{}"'.format(group_name))
mo_vals = mv[1]
for mo_val in mo_vals:
resp_lines.append('"{}"'.format(mo_val.resource_uri))
resp_lines.append(
str(timestamp_from_datetime(mo_val.timestamp)))
v_list = []
for n, v in mo_val.values:
if isinstance(v, six.string_types):
v_str = '"{}"'.format(v)
else:
v_str = str(v)
v_list.append(v_str)
v_line = ','.join(v_list)
resp_lines.append(v_line)
resp_lines.append('')
resp_lines.append('')
resp_lines.append('')
return '\n'.join(resp_lines) + '\n'
|
[
"def",
"get_metric_values_response",
"(",
"self",
")",
":",
"mv_list",
"=",
"self",
".",
"get_metric_values",
"(",
")",
"resp_lines",
"=",
"[",
"]",
"for",
"mv",
"in",
"mv_list",
":",
"group_name",
"=",
"mv",
"[",
"0",
"]",
"resp_lines",
".",
"append",
"(",
"'\"{}\"'",
".",
"format",
"(",
"group_name",
")",
")",
"mo_vals",
"=",
"mv",
"[",
"1",
"]",
"for",
"mo_val",
"in",
"mo_vals",
":",
"resp_lines",
".",
"append",
"(",
"'\"{}\"'",
".",
"format",
"(",
"mo_val",
".",
"resource_uri",
")",
")",
"resp_lines",
".",
"append",
"(",
"str",
"(",
"timestamp_from_datetime",
"(",
"mo_val",
".",
"timestamp",
")",
")",
")",
"v_list",
"=",
"[",
"]",
"for",
"n",
",",
"v",
"in",
"mo_val",
".",
"values",
":",
"if",
"isinstance",
"(",
"v",
",",
"six",
".",
"string_types",
")",
":",
"v_str",
"=",
"'\"{}\"'",
".",
"format",
"(",
"v",
")",
"else",
":",
"v_str",
"=",
"str",
"(",
"v",
")",
"v_list",
".",
"append",
"(",
"v_str",
")",
"v_line",
"=",
"','",
".",
"join",
"(",
"v_list",
")",
"resp_lines",
".",
"append",
"(",
"v_line",
")",
"resp_lines",
".",
"append",
"(",
"''",
")",
"resp_lines",
".",
"append",
"(",
"''",
")",
"resp_lines",
".",
"append",
"(",
"''",
")",
"return",
"'\\n'",
".",
"join",
"(",
"resp_lines",
")",
"+",
"'\\n'"
] |
Get the faked metrics, for all metric groups and all resources that
have been prepared on the manager object of this context object, as a
string in the format needed for the "Get Metrics" operation response.
Returns:
"MetricsResponse" string as described for the "Get Metrics"
operation response.
|
[
"Get",
"the",
"faked",
"metrics",
"for",
"all",
"metric",
"groups",
"and",
"all",
"resources",
"that",
"have",
"been",
"prepared",
"on",
"the",
"manager",
"object",
"of",
"this",
"context",
"object",
"as",
"a",
"string",
"in",
"the",
"format",
"needed",
"for",
"the",
"Get",
"Metrics",
"operation",
"response",
"."
] |
python
|
train
|
deepmind/sonnet
|
sonnet/python/modules/sequential.py
|
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/sequential.py#L79-L107
|
def _build(self, *args):
"""Connects the Sequential module into the graph.
Args:
*args: A tuple of inputs, to be unpacked as the arguments to the first
layer.
Returns:
The output value of the last layer.
"""
net = args
if not self._layers:
# If the sequential is passed a single arg, this will end up being
# wrapped in an extra layer of tuple by *args. Normally we internally
# handle this in the loop below, but if there are no layers we unpack here
# in order to make Sequential([]) act like an identity, which seems right.
if len(args) == 1:
return args[0]
else:
return args
for layer in self._layers:
if isinstance(net, tuple):
net = layer(*net)
else:
net = layer(net)
return net
|
[
"def",
"_build",
"(",
"self",
",",
"*",
"args",
")",
":",
"net",
"=",
"args",
"if",
"not",
"self",
".",
"_layers",
":",
"# If the sequential is passed a single arg, this will end up being",
"# wrapped in an extra layer of tuple by *args. Normally we internally",
"# handle this in the loop below, but if there are no layers we unpack here",
"# in order to make Sequential([]) act like an identity, which seems right.",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"return",
"args",
"[",
"0",
"]",
"else",
":",
"return",
"args",
"for",
"layer",
"in",
"self",
".",
"_layers",
":",
"if",
"isinstance",
"(",
"net",
",",
"tuple",
")",
":",
"net",
"=",
"layer",
"(",
"*",
"net",
")",
"else",
":",
"net",
"=",
"layer",
"(",
"net",
")",
"return",
"net"
] |
Connects the Sequential module into the graph.
Args:
*args: A tuple of inputs, to be unpacked as the arguments to the first
layer.
Returns:
The output value of the last layer.
|
[
"Connects",
"the",
"Sequential",
"module",
"into",
"the",
"graph",
"."
] |
python
|
train
|
chaoss/grimoirelab-perceval
|
perceval/backends/core/git.py
|
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/git.py#L1204-L1223
|
def _update_ref(self, ref, delete=False):
"""Update a reference."""
cmd = ['git', 'update-ref']
if delete:
cmd.extend(['-d', ref.refname])
action = 'deleted'
else:
cmd.extend([ref.refname, ref.hash])
action = 'updated to %s' % ref.hash
try:
self._exec(cmd, cwd=self.dirpath, env=self.gitenv)
except RepositoryError as e:
logger.warning("Git %s ref could not be %s during sync process in %s (%s); skipped",
ref.refname, action, self.uri, self.dirpath)
else:
logger.debug("Git %s ref %s in %s (%s)",
ref.refname, action, self.uri, self.dirpath)
|
[
"def",
"_update_ref",
"(",
"self",
",",
"ref",
",",
"delete",
"=",
"False",
")",
":",
"cmd",
"=",
"[",
"'git'",
",",
"'update-ref'",
"]",
"if",
"delete",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-d'",
",",
"ref",
".",
"refname",
"]",
")",
"action",
"=",
"'deleted'",
"else",
":",
"cmd",
".",
"extend",
"(",
"[",
"ref",
".",
"refname",
",",
"ref",
".",
"hash",
"]",
")",
"action",
"=",
"'updated to %s'",
"%",
"ref",
".",
"hash",
"try",
":",
"self",
".",
"_exec",
"(",
"cmd",
",",
"cwd",
"=",
"self",
".",
"dirpath",
",",
"env",
"=",
"self",
".",
"gitenv",
")",
"except",
"RepositoryError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Git %s ref could not be %s during sync process in %s (%s); skipped\"",
",",
"ref",
".",
"refname",
",",
"action",
",",
"self",
".",
"uri",
",",
"self",
".",
"dirpath",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Git %s ref %s in %s (%s)\"",
",",
"ref",
".",
"refname",
",",
"action",
",",
"self",
".",
"uri",
",",
"self",
".",
"dirpath",
")"
] |
Update a reference.
|
[
"Update",
"a",
"reference",
"."
] |
python
|
test
|
AkihikoITOH/capybara
|
capybara/virtualenv/lib/python2.7/site-packages/werkzeug/wrappers.py
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/werkzeug/wrappers.py#L1078-L1167
|
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii'))
for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
|
[
"def",
"get_wsgi_headers",
"(",
"self",
",",
"environ",
")",
":",
"headers",
"=",
"Headers",
"(",
"self",
".",
"headers",
")",
"location",
"=",
"None",
"content_location",
"=",
"None",
"content_length",
"=",
"None",
"status",
"=",
"self",
".",
"status_code",
"# iterate over the headers to find all values in one go. Because",
"# get_wsgi_headers is used each response that gives us a tiny",
"# speedup.",
"for",
"key",
",",
"value",
"in",
"headers",
":",
"ikey",
"=",
"key",
".",
"lower",
"(",
")",
"if",
"ikey",
"==",
"u'location'",
":",
"location",
"=",
"value",
"elif",
"ikey",
"==",
"u'content-location'",
":",
"content_location",
"=",
"value",
"elif",
"ikey",
"==",
"u'content-length'",
":",
"content_length",
"=",
"value",
"# make sure the location header is an absolute URL",
"if",
"location",
"is",
"not",
"None",
":",
"old_location",
"=",
"location",
"if",
"isinstance",
"(",
"location",
",",
"text_type",
")",
":",
"# Safe conversion is necessary here as we might redirect",
"# to a broken URI scheme (for instance itms-services).",
"location",
"=",
"iri_to_uri",
"(",
"location",
",",
"safe_conversion",
"=",
"True",
")",
"if",
"self",
".",
"autocorrect_location_header",
":",
"current_url",
"=",
"get_current_url",
"(",
"environ",
",",
"root_only",
"=",
"True",
")",
"if",
"isinstance",
"(",
"current_url",
",",
"text_type",
")",
":",
"current_url",
"=",
"iri_to_uri",
"(",
"current_url",
")",
"location",
"=",
"url_join",
"(",
"current_url",
",",
"location",
")",
"if",
"location",
"!=",
"old_location",
":",
"headers",
"[",
"'Location'",
"]",
"=",
"location",
"# make sure the content location is a URL",
"if",
"content_location",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"content_location",
",",
"text_type",
")",
":",
"headers",
"[",
"'Content-Location'",
"]",
"=",
"iri_to_uri",
"(",
"content_location",
")",
"# remove entity headers and set content length to zero if needed.",
"# Also update content_length accordingly so that the automatic",
"# content length detection does not trigger in the following",
"# code.",
"if",
"100",
"<=",
"status",
"<",
"200",
"or",
"status",
"==",
"204",
":",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"content_length",
"=",
"u'0'",
"elif",
"status",
"==",
"304",
":",
"remove_entity_headers",
"(",
"headers",
")",
"# if we can determine the content length automatically, we",
"# should try to do that. But only if this does not involve",
"# flattening the iterator or encoding of unicode strings in",
"# the response. We however should not do that if we have a 304",
"# response.",
"if",
"self",
".",
"automatically_set_content_length",
"and",
"self",
".",
"is_sequence",
"and",
"content_length",
"is",
"None",
"and",
"status",
"!=",
"304",
":",
"try",
":",
"content_length",
"=",
"sum",
"(",
"len",
"(",
"to_bytes",
"(",
"x",
",",
"'ascii'",
")",
")",
"for",
"x",
"in",
"self",
".",
"response",
")",
"except",
"UnicodeError",
":",
"# aha, something non-bytestringy in there, too bad, we",
"# can't safely figure out the length of the response.",
"pass",
"else",
":",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"str",
"(",
"content_length",
")",
"return",
"headers"
] |
This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
|
[
"This",
"is",
"automatically",
"called",
"right",
"before",
"the",
"response",
"is",
"started",
"and",
"returns",
"headers",
"modified",
"for",
"the",
"given",
"environment",
".",
"It",
"returns",
"a",
"copy",
"of",
"the",
"headers",
"from",
"the",
"response",
"with",
"some",
"modifications",
"applied",
"if",
"necessary",
"."
] |
python
|
test
|
openwisp/netdiff
|
netdiff/parsers/olsr.py
|
https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/olsr.py#L11-L18
|
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(OlsrParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_jsoninfo(e.data)
|
[
"def",
"to_python",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"return",
"super",
"(",
"OlsrParser",
",",
"self",
")",
".",
"to_python",
"(",
"data",
")",
"except",
"ConversionException",
"as",
"e",
":",
"return",
"self",
".",
"_txtinfo_to_jsoninfo",
"(",
"e",
".",
"data",
")"
] |
Adds support for txtinfo format
|
[
"Adds",
"support",
"for",
"txtinfo",
"format"
] |
python
|
train
|
ewels/MultiQC
|
multiqc/modules/rseqc/gene_body_coverage.py
|
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/rseqc/gene_body_coverage.py#L15-L106
|
def parse_reports(self):
""" Find RSeQC gene_body_coverage reports and parse their data """
# Set up vars
self.gene_body_cov_hist_counts = dict()
self.gene_body_cov_hist_percent = dict()
# TODO - Do separate parsing step to find skewness values
# and add these to the general stats table?
# Go through files and parse data
for f in self.find_log_files('rseqc/gene_body_coverage'):
# RSeQC >= v2.4
if f['f'].startswith('Percentile'):
keys = []
nrows = 0
for l in f['f'].splitlines():
s = l.split()
if len(keys) == 0:
keys = s[1:]
else:
nrows += 1
s_name = self.clean_s_name(s[0], f['root'])
if s_name in self.gene_body_cov_hist_counts:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name, section='gene_body_coverage')
self.gene_body_cov_hist_counts[s_name] = OrderedDict()
for k, var in enumerate(s[1:]):
self.gene_body_cov_hist_counts[s_name][int(keys[k])] = float(var)
if nrows == 0:
log.warning("Empty geneBodyCoverage file found: {}".format(f['fn']))
# RSeQC < v2.4
elif f['f'].startswith('Total reads'):
if f['s_name'].endswith('.geneBodyCoverage'):
f['s_name'] = f['s_name'][:-17]
if f['s_name'] in self.gene_body_cov_hist_counts:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='gene_body_coverage')
self.gene_body_cov_hist_counts[f['s_name']] = OrderedDict()
nrows = 0
for l in f['f'].splitlines():
s = l.split()
try:
nrows += 1
self.gene_body_cov_hist_counts[f['s_name']][int(s[0])] = float(s[1])
except:
pass
if nrows == 0:
del self.gene_body_cov_hist_counts[f['s_name']]
log.warning("Empty geneBodyCoverage file found: {}".format(f['fn']))
# Filter to strip out ignored sample names
self.gene_body_cov_hist_counts = self.ignore_samples(self.gene_body_cov_hist_counts)
if len(self.gene_body_cov_hist_counts) > 0:
# Make a normalised percentage version of the data
for s_name in self.gene_body_cov_hist_counts:
self.gene_body_cov_hist_percent[s_name] = OrderedDict()
total = sum( self.gene_body_cov_hist_counts[s_name].values() )
for k, v in self.gene_body_cov_hist_counts[s_name].items():
self.gene_body_cov_hist_percent[s_name][k] = (v/total)*100
# Add line graph to section
pconfig = {
'id': 'rseqc_gene_body_coverage_plot',
'title': 'RSeQC: Gene Body Coverage',
'ylab': '% Coverage',
'xlab': "Gene Body Percentile (5' -> 3')",
'xmin': 0,
'xmax': 100,
'tt_label': "<strong>{point.x}% from 5'</strong>: {point.y:.2f}",
'data_labels': [
{'name': 'Percentages', 'ylab': 'Percentage Coverage'},
{'name': 'Counts', 'ylab': 'Coverage'}
]
}
self.add_section (
name = 'Gene Body Coverage',
anchor = 'rseqc-gene_body_coverage',
description = '<a href="http://rseqc.sourceforge.net/#genebody-coverage-py" target="_blank">Gene Body Coverage</a>' \
" calculates read coverage over gene bodies." \
" This is used to check if reads coverage is uniform and" \
" if there is any 5' or 3' bias.",
plot = linegraph.plot([self.gene_body_cov_hist_percent, self.gene_body_cov_hist_counts], pconfig)
)
# Return number of samples found
return len(self.gene_body_cov_hist_counts)
|
[
"def",
"parse_reports",
"(",
"self",
")",
":",
"# Set up vars",
"self",
".",
"gene_body_cov_hist_counts",
"=",
"dict",
"(",
")",
"self",
".",
"gene_body_cov_hist_percent",
"=",
"dict",
"(",
")",
"# TODO - Do separate parsing step to find skewness values",
"# and add these to the general stats table?",
"# Go through files and parse data",
"for",
"f",
"in",
"self",
".",
"find_log_files",
"(",
"'rseqc/gene_body_coverage'",
")",
":",
"# RSeQC >= v2.4",
"if",
"f",
"[",
"'f'",
"]",
".",
"startswith",
"(",
"'Percentile'",
")",
":",
"keys",
"=",
"[",
"]",
"nrows",
"=",
"0",
"for",
"l",
"in",
"f",
"[",
"'f'",
"]",
".",
"splitlines",
"(",
")",
":",
"s",
"=",
"l",
".",
"split",
"(",
")",
"if",
"len",
"(",
"keys",
")",
"==",
"0",
":",
"keys",
"=",
"s",
"[",
"1",
":",
"]",
"else",
":",
"nrows",
"+=",
"1",
"s_name",
"=",
"self",
".",
"clean_s_name",
"(",
"s",
"[",
"0",
"]",
",",
"f",
"[",
"'root'",
"]",
")",
"if",
"s_name",
"in",
"self",
".",
"gene_body_cov_hist_counts",
":",
"log",
".",
"debug",
"(",
"\"Duplicate sample name found! Overwriting: {}\"",
".",
"format",
"(",
"s_name",
")",
")",
"self",
".",
"add_data_source",
"(",
"f",
",",
"s_name",
",",
"section",
"=",
"'gene_body_coverage'",
")",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"s_name",
"]",
"=",
"OrderedDict",
"(",
")",
"for",
"k",
",",
"var",
"in",
"enumerate",
"(",
"s",
"[",
"1",
":",
"]",
")",
":",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"s_name",
"]",
"[",
"int",
"(",
"keys",
"[",
"k",
"]",
")",
"]",
"=",
"float",
"(",
"var",
")",
"if",
"nrows",
"==",
"0",
":",
"log",
".",
"warning",
"(",
"\"Empty geneBodyCoverage file found: {}\"",
".",
"format",
"(",
"f",
"[",
"'fn'",
"]",
")",
")",
"# RSeQC < v2.4",
"elif",
"f",
"[",
"'f'",
"]",
".",
"startswith",
"(",
"'Total reads'",
")",
":",
"if",
"f",
"[",
"'s_name'",
"]",
".",
"endswith",
"(",
"'.geneBodyCoverage'",
")",
":",
"f",
"[",
"'s_name'",
"]",
"=",
"f",
"[",
"'s_name'",
"]",
"[",
":",
"-",
"17",
"]",
"if",
"f",
"[",
"'s_name'",
"]",
"in",
"self",
".",
"gene_body_cov_hist_counts",
":",
"log",
".",
"debug",
"(",
"\"Duplicate sample name found! Overwriting: {}\"",
".",
"format",
"(",
"f",
"[",
"'s_name'",
"]",
")",
")",
"self",
".",
"add_data_source",
"(",
"f",
",",
"section",
"=",
"'gene_body_coverage'",
")",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"f",
"[",
"'s_name'",
"]",
"]",
"=",
"OrderedDict",
"(",
")",
"nrows",
"=",
"0",
"for",
"l",
"in",
"f",
"[",
"'f'",
"]",
".",
"splitlines",
"(",
")",
":",
"s",
"=",
"l",
".",
"split",
"(",
")",
"try",
":",
"nrows",
"+=",
"1",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"f",
"[",
"'s_name'",
"]",
"]",
"[",
"int",
"(",
"s",
"[",
"0",
"]",
")",
"]",
"=",
"float",
"(",
"s",
"[",
"1",
"]",
")",
"except",
":",
"pass",
"if",
"nrows",
"==",
"0",
":",
"del",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"f",
"[",
"'s_name'",
"]",
"]",
"log",
".",
"warning",
"(",
"\"Empty geneBodyCoverage file found: {}\"",
".",
"format",
"(",
"f",
"[",
"'fn'",
"]",
")",
")",
"# Filter to strip out ignored sample names",
"self",
".",
"gene_body_cov_hist_counts",
"=",
"self",
".",
"ignore_samples",
"(",
"self",
".",
"gene_body_cov_hist_counts",
")",
"if",
"len",
"(",
"self",
".",
"gene_body_cov_hist_counts",
")",
">",
"0",
":",
"# Make a normalised percentage version of the data",
"for",
"s_name",
"in",
"self",
".",
"gene_body_cov_hist_counts",
":",
"self",
".",
"gene_body_cov_hist_percent",
"[",
"s_name",
"]",
"=",
"OrderedDict",
"(",
")",
"total",
"=",
"sum",
"(",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"s_name",
"]",
".",
"values",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"gene_body_cov_hist_counts",
"[",
"s_name",
"]",
".",
"items",
"(",
")",
":",
"self",
".",
"gene_body_cov_hist_percent",
"[",
"s_name",
"]",
"[",
"k",
"]",
"=",
"(",
"v",
"/",
"total",
")",
"*",
"100",
"# Add line graph to section",
"pconfig",
"=",
"{",
"'id'",
":",
"'rseqc_gene_body_coverage_plot'",
",",
"'title'",
":",
"'RSeQC: Gene Body Coverage'",
",",
"'ylab'",
":",
"'% Coverage'",
",",
"'xlab'",
":",
"\"Gene Body Percentile (5' -> 3')\"",
",",
"'xmin'",
":",
"0",
",",
"'xmax'",
":",
"100",
",",
"'tt_label'",
":",
"\"<strong>{point.x}% from 5'</strong>: {point.y:.2f}\"",
",",
"'data_labels'",
":",
"[",
"{",
"'name'",
":",
"'Percentages'",
",",
"'ylab'",
":",
"'Percentage Coverage'",
"}",
",",
"{",
"'name'",
":",
"'Counts'",
",",
"'ylab'",
":",
"'Coverage'",
"}",
"]",
"}",
"self",
".",
"add_section",
"(",
"name",
"=",
"'Gene Body Coverage'",
",",
"anchor",
"=",
"'rseqc-gene_body_coverage'",
",",
"description",
"=",
"'<a href=\"http://rseqc.sourceforge.net/#genebody-coverage-py\" target=\"_blank\">Gene Body Coverage</a>'",
"\" calculates read coverage over gene bodies.\"",
"\" This is used to check if reads coverage is uniform and\"",
"\" if there is any 5' or 3' bias.\"",
",",
"plot",
"=",
"linegraph",
".",
"plot",
"(",
"[",
"self",
".",
"gene_body_cov_hist_percent",
",",
"self",
".",
"gene_body_cov_hist_counts",
"]",
",",
"pconfig",
")",
")",
"# Return number of samples found",
"return",
"len",
"(",
"self",
".",
"gene_body_cov_hist_counts",
")"
] |
Find RSeQC gene_body_coverage reports and parse their data
|
[
"Find",
"RSeQC",
"gene_body_coverage",
"reports",
"and",
"parse",
"their",
"data"
] |
python
|
train
|
pmacosta/peng
|
peng/functions.py
|
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L804-L1051
|
def pprint_vector(vector, limit=False, width=None, indent=0, eng=False, frac_length=3):
r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
"""
# pylint: disable=R0912,R0913
num_digits = 12
approx = lambda x: float(x) if "." not in x else round(float(x), num_digits)
def limstr(value):
str1 = str(value)
iscomplex = isinstance(value, complex)
str1 = str1.lstrip("(").rstrip(")")
if "." not in str1:
return str1
if iscomplex:
sign = "+" if value.imag >= 0 else "-"
regexp = re.compile(
r"(.*(?:[Ee][\+-]\d+)?)"
+ (r"\+" if sign == "+" else "-")
+ r"(.*(?:[Ee][\+-]\d+)?j)"
)
rvalue, ivalue = regexp.match(str1).groups()
return (
str(complex(approx(rvalue), approx(sign + ivalue.strip("j"))))
.lstrip("(")
.rstrip(")")
)
str2 = str(round(value, num_digits))
return str2 if len(str1) > len(str2) else str1
def _str(*args):
"""
Convert numbers to string, optionally represented in engineering notation.
Numbers may be integers, float or complex
"""
ret = [
(limstr(element) if not eng else peng(element, frac_length, True))
if not isinstance(element, complex)
else (
limstr(element)
if not eng
else "{real}{sign}{imag}j".format(
real=peng(element.real, frac_length, True),
imag=peng(abs(element.imag), frac_length, True),
sign="+" if element.imag >= 0 else "-",
)
)
for element in args
]
return ret[0] if len(ret) == 1 else ret
if vector is None:
return "None"
lvector = len(vector)
if (not limit) or (limit and (lvector < 7)):
items = _str(*vector)
uret = "[ {0} ]".format(", ".join(items))
else:
items = _str(*(vector[:3] + vector[-3:]))
uret = "[ {0}, ..., {1} ]".format(", ".join(items[:3]), ", ".join(items[-3:]))
if (width is None) or (len(uret) < width):
return uret
# -4 comes from the fact that an opening '[ ' and a closing ' ]'
# are added to the multi-line vector string
if any([len(item) > width - 4 for item in items]):
raise ValueError("Argument `width` is too small")
# Text needs to be wrapped in multiple lines
# Figure out how long the first line needs to be
wobj = textwrap.TextWrapper(initial_indent="[ ", width=width)
# uret[2:] -> do not include initial '[ ' as this is specified as
# the initial indent to the text wrapper
rlist = wobj.wrap(uret[2:])
first_line = rlist[0]
first_line_elements = first_line.count(",")
# Reconstruct string representation of vector excluding first line
# Remove ... from text to be wrapped because it is placed in a single
# line centered with the content
uret_left = (",".join(uret.split(",")[first_line_elements:])).replace("...,", "")
wobj = textwrap.TextWrapper(width=width - 2)
wrapped_text = wobj.wrap(uret_left.lstrip())
# Construct candidate wrapped and indented list of vector elements
rlist = [first_line] + [
(" " * (indent + 2)) + item.rstrip() for item in wrapped_text
]
last_line = rlist[-1]
last_line_elements = last_line.count(",") + 1
# "Manually" format limit output so that it is either 3 lines, first and
# last line with 3 elements and the middle with '...' or 7 lines, each with
# 1 element and the middle with '...'
# If numbers are not to be aligned at commas (variable width) then use the
# existing results of the wrap() function
if limit and (lvector > 6):
if (first_line_elements < 3) or (
(first_line_elements == 3) and (last_line_elements < 3)
):
rlist = [
"[ {0},".format(_str(vector[0])),
_str(vector[1]),
_str(vector[2]),
"...",
_str(vector[-3]),
_str(vector[-2]),
"{0} ]".format(_str(vector[-1])),
]
first_line_elements = 1
else:
rlist = [
"[ {0},".format(", ".join(_str(*vector[:3]))),
"...",
"{0} ]".format(", ".join(_str(*vector[-3:]))),
]
first_line = rlist[0]
elif limit:
rlist = [item.lstrip() for item in rlist]
first_comma_index = first_line.find(",")
actual_width = len(first_line) - 2
if not eng:
if not limit:
return "\n".join(rlist)
num_elements = len(rlist)
return "\n".join(
[
"{spaces}{line}{comma}".format(
spaces=(" " * (indent + 2)) if num > 0 else "",
line=(
line.center(actual_width).rstrip()
if line.strip() == "..."
else line
),
comma=(
","
if (
(num < num_elements - 1)
and (not line.endswith(","))
and (line.strip() != "...")
)
else ""
),
)
if num > 0
else line
for num, line in enumerate(rlist)
]
)
# Align elements across multiple lines
if limit:
remainder_list = [line.lstrip() for line in rlist[1:]]
else:
remainder_list = _split_every(
text=uret[len(first_line) :],
sep=",",
count=first_line_elements,
lstrip=True,
)
new_wrapped_lines_list = [first_line]
for line in remainder_list[:-1]:
new_wrapped_lines_list.append(
"{0},".format(line).rjust(actual_width)
if line != "..."
else line.center(actual_width).rstrip()
)
# Align last line on fist comma (if it exists) or
# on length of field if does not
if remainder_list[-1].find(",") == -1:
marker = len(remainder_list[-1]) - 2
else:
marker = remainder_list[-1].find(",")
new_wrapped_lines_list.append(
"{0}{1}".format((first_comma_index - marker - 2) * " ", remainder_list[-1])
)
return "\n".join(
[
"{spaces}{line}".format(spaces=" " * (indent + 2), line=line)
if num > 0
else line
for num, line in enumerate(new_wrapped_lines_list)
]
)
|
[
"def",
"pprint_vector",
"(",
"vector",
",",
"limit",
"=",
"False",
",",
"width",
"=",
"None",
",",
"indent",
"=",
"0",
",",
"eng",
"=",
"False",
",",
"frac_length",
"=",
"3",
")",
":",
"# pylint: disable=R0912,R0913",
"num_digits",
"=",
"12",
"approx",
"=",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
"if",
"\".\"",
"not",
"in",
"x",
"else",
"round",
"(",
"float",
"(",
"x",
")",
",",
"num_digits",
")",
"def",
"limstr",
"(",
"value",
")",
":",
"str1",
"=",
"str",
"(",
"value",
")",
"iscomplex",
"=",
"isinstance",
"(",
"value",
",",
"complex",
")",
"str1",
"=",
"str1",
".",
"lstrip",
"(",
"\"(\"",
")",
".",
"rstrip",
"(",
"\")\"",
")",
"if",
"\".\"",
"not",
"in",
"str1",
":",
"return",
"str1",
"if",
"iscomplex",
":",
"sign",
"=",
"\"+\"",
"if",
"value",
".",
"imag",
">=",
"0",
"else",
"\"-\"",
"regexp",
"=",
"re",
".",
"compile",
"(",
"r\"(.*(?:[Ee][\\+-]\\d+)?)\"",
"+",
"(",
"r\"\\+\"",
"if",
"sign",
"==",
"\"+\"",
"else",
"\"-\"",
")",
"+",
"r\"(.*(?:[Ee][\\+-]\\d+)?j)\"",
")",
"rvalue",
",",
"ivalue",
"=",
"regexp",
".",
"match",
"(",
"str1",
")",
".",
"groups",
"(",
")",
"return",
"(",
"str",
"(",
"complex",
"(",
"approx",
"(",
"rvalue",
")",
",",
"approx",
"(",
"sign",
"+",
"ivalue",
".",
"strip",
"(",
"\"j\"",
")",
")",
")",
")",
".",
"lstrip",
"(",
"\"(\"",
")",
".",
"rstrip",
"(",
"\")\"",
")",
")",
"str2",
"=",
"str",
"(",
"round",
"(",
"value",
",",
"num_digits",
")",
")",
"return",
"str2",
"if",
"len",
"(",
"str1",
")",
">",
"len",
"(",
"str2",
")",
"else",
"str1",
"def",
"_str",
"(",
"*",
"args",
")",
":",
"\"\"\"\n Convert numbers to string, optionally represented in engineering notation.\n\n Numbers may be integers, float or complex\n \"\"\"",
"ret",
"=",
"[",
"(",
"limstr",
"(",
"element",
")",
"if",
"not",
"eng",
"else",
"peng",
"(",
"element",
",",
"frac_length",
",",
"True",
")",
")",
"if",
"not",
"isinstance",
"(",
"element",
",",
"complex",
")",
"else",
"(",
"limstr",
"(",
"element",
")",
"if",
"not",
"eng",
"else",
"\"{real}{sign}{imag}j\"",
".",
"format",
"(",
"real",
"=",
"peng",
"(",
"element",
".",
"real",
",",
"frac_length",
",",
"True",
")",
",",
"imag",
"=",
"peng",
"(",
"abs",
"(",
"element",
".",
"imag",
")",
",",
"frac_length",
",",
"True",
")",
",",
"sign",
"=",
"\"+\"",
"if",
"element",
".",
"imag",
">=",
"0",
"else",
"\"-\"",
",",
")",
")",
"for",
"element",
"in",
"args",
"]",
"return",
"ret",
"[",
"0",
"]",
"if",
"len",
"(",
"ret",
")",
"==",
"1",
"else",
"ret",
"if",
"vector",
"is",
"None",
":",
"return",
"\"None\"",
"lvector",
"=",
"len",
"(",
"vector",
")",
"if",
"(",
"not",
"limit",
")",
"or",
"(",
"limit",
"and",
"(",
"lvector",
"<",
"7",
")",
")",
":",
"items",
"=",
"_str",
"(",
"*",
"vector",
")",
"uret",
"=",
"\"[ {0} ]\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"items",
")",
")",
"else",
":",
"items",
"=",
"_str",
"(",
"*",
"(",
"vector",
"[",
":",
"3",
"]",
"+",
"vector",
"[",
"-",
"3",
":",
"]",
")",
")",
"uret",
"=",
"\"[ {0}, ..., {1} ]\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"items",
"[",
":",
"3",
"]",
")",
",",
"\", \"",
".",
"join",
"(",
"items",
"[",
"-",
"3",
":",
"]",
")",
")",
"if",
"(",
"width",
"is",
"None",
")",
"or",
"(",
"len",
"(",
"uret",
")",
"<",
"width",
")",
":",
"return",
"uret",
"# -4 comes from the fact that an opening '[ ' and a closing ' ]'",
"# are added to the multi-line vector string",
"if",
"any",
"(",
"[",
"len",
"(",
"item",
")",
">",
"width",
"-",
"4",
"for",
"item",
"in",
"items",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Argument `width` is too small\"",
")",
"# Text needs to be wrapped in multiple lines",
"# Figure out how long the first line needs to be",
"wobj",
"=",
"textwrap",
".",
"TextWrapper",
"(",
"initial_indent",
"=",
"\"[ \"",
",",
"width",
"=",
"width",
")",
"# uret[2:] -> do not include initial '[ ' as this is specified as",
"# the initial indent to the text wrapper",
"rlist",
"=",
"wobj",
".",
"wrap",
"(",
"uret",
"[",
"2",
":",
"]",
")",
"first_line",
"=",
"rlist",
"[",
"0",
"]",
"first_line_elements",
"=",
"first_line",
".",
"count",
"(",
"\",\"",
")",
"# Reconstruct string representation of vector excluding first line",
"# Remove ... from text to be wrapped because it is placed in a single",
"# line centered with the content",
"uret_left",
"=",
"(",
"\",\"",
".",
"join",
"(",
"uret",
".",
"split",
"(",
"\",\"",
")",
"[",
"first_line_elements",
":",
"]",
")",
")",
".",
"replace",
"(",
"\"...,\"",
",",
"\"\"",
")",
"wobj",
"=",
"textwrap",
".",
"TextWrapper",
"(",
"width",
"=",
"width",
"-",
"2",
")",
"wrapped_text",
"=",
"wobj",
".",
"wrap",
"(",
"uret_left",
".",
"lstrip",
"(",
")",
")",
"# Construct candidate wrapped and indented list of vector elements",
"rlist",
"=",
"[",
"first_line",
"]",
"+",
"[",
"(",
"\" \"",
"*",
"(",
"indent",
"+",
"2",
")",
")",
"+",
"item",
".",
"rstrip",
"(",
")",
"for",
"item",
"in",
"wrapped_text",
"]",
"last_line",
"=",
"rlist",
"[",
"-",
"1",
"]",
"last_line_elements",
"=",
"last_line",
".",
"count",
"(",
"\",\"",
")",
"+",
"1",
"# \"Manually\" format limit output so that it is either 3 lines, first and",
"# last line with 3 elements and the middle with '...' or 7 lines, each with",
"# 1 element and the middle with '...'",
"# If numbers are not to be aligned at commas (variable width) then use the",
"# existing results of the wrap() function",
"if",
"limit",
"and",
"(",
"lvector",
">",
"6",
")",
":",
"if",
"(",
"first_line_elements",
"<",
"3",
")",
"or",
"(",
"(",
"first_line_elements",
"==",
"3",
")",
"and",
"(",
"last_line_elements",
"<",
"3",
")",
")",
":",
"rlist",
"=",
"[",
"\"[ {0},\"",
".",
"format",
"(",
"_str",
"(",
"vector",
"[",
"0",
"]",
")",
")",
",",
"_str",
"(",
"vector",
"[",
"1",
"]",
")",
",",
"_str",
"(",
"vector",
"[",
"2",
"]",
")",
",",
"\"...\"",
",",
"_str",
"(",
"vector",
"[",
"-",
"3",
"]",
")",
",",
"_str",
"(",
"vector",
"[",
"-",
"2",
"]",
")",
",",
"\"{0} ]\"",
".",
"format",
"(",
"_str",
"(",
"vector",
"[",
"-",
"1",
"]",
")",
")",
",",
"]",
"first_line_elements",
"=",
"1",
"else",
":",
"rlist",
"=",
"[",
"\"[ {0},\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"_str",
"(",
"*",
"vector",
"[",
":",
"3",
"]",
")",
")",
")",
",",
"\"...\"",
",",
"\"{0} ]\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"_str",
"(",
"*",
"vector",
"[",
"-",
"3",
":",
"]",
")",
")",
")",
",",
"]",
"first_line",
"=",
"rlist",
"[",
"0",
"]",
"elif",
"limit",
":",
"rlist",
"=",
"[",
"item",
".",
"lstrip",
"(",
")",
"for",
"item",
"in",
"rlist",
"]",
"first_comma_index",
"=",
"first_line",
".",
"find",
"(",
"\",\"",
")",
"actual_width",
"=",
"len",
"(",
"first_line",
")",
"-",
"2",
"if",
"not",
"eng",
":",
"if",
"not",
"limit",
":",
"return",
"\"\\n\"",
".",
"join",
"(",
"rlist",
")",
"num_elements",
"=",
"len",
"(",
"rlist",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"{spaces}{line}{comma}\"",
".",
"format",
"(",
"spaces",
"=",
"(",
"\" \"",
"*",
"(",
"indent",
"+",
"2",
")",
")",
"if",
"num",
">",
"0",
"else",
"\"\"",
",",
"line",
"=",
"(",
"line",
".",
"center",
"(",
"actual_width",
")",
".",
"rstrip",
"(",
")",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"\"...\"",
"else",
"line",
")",
",",
"comma",
"=",
"(",
"\",\"",
"if",
"(",
"(",
"num",
"<",
"num_elements",
"-",
"1",
")",
"and",
"(",
"not",
"line",
".",
"endswith",
"(",
"\",\"",
")",
")",
"and",
"(",
"line",
".",
"strip",
"(",
")",
"!=",
"\"...\"",
")",
")",
"else",
"\"\"",
")",
",",
")",
"if",
"num",
">",
"0",
"else",
"line",
"for",
"num",
",",
"line",
"in",
"enumerate",
"(",
"rlist",
")",
"]",
")",
"# Align elements across multiple lines",
"if",
"limit",
":",
"remainder_list",
"=",
"[",
"line",
".",
"lstrip",
"(",
")",
"for",
"line",
"in",
"rlist",
"[",
"1",
":",
"]",
"]",
"else",
":",
"remainder_list",
"=",
"_split_every",
"(",
"text",
"=",
"uret",
"[",
"len",
"(",
"first_line",
")",
":",
"]",
",",
"sep",
"=",
"\",\"",
",",
"count",
"=",
"first_line_elements",
",",
"lstrip",
"=",
"True",
",",
")",
"new_wrapped_lines_list",
"=",
"[",
"first_line",
"]",
"for",
"line",
"in",
"remainder_list",
"[",
":",
"-",
"1",
"]",
":",
"new_wrapped_lines_list",
".",
"append",
"(",
"\"{0},\"",
".",
"format",
"(",
"line",
")",
".",
"rjust",
"(",
"actual_width",
")",
"if",
"line",
"!=",
"\"...\"",
"else",
"line",
".",
"center",
"(",
"actual_width",
")",
".",
"rstrip",
"(",
")",
")",
"# Align last line on fist comma (if it exists) or",
"# on length of field if does not",
"if",
"remainder_list",
"[",
"-",
"1",
"]",
".",
"find",
"(",
"\",\"",
")",
"==",
"-",
"1",
":",
"marker",
"=",
"len",
"(",
"remainder_list",
"[",
"-",
"1",
"]",
")",
"-",
"2",
"else",
":",
"marker",
"=",
"remainder_list",
"[",
"-",
"1",
"]",
".",
"find",
"(",
"\",\"",
")",
"new_wrapped_lines_list",
".",
"append",
"(",
"\"{0}{1}\"",
".",
"format",
"(",
"(",
"first_comma_index",
"-",
"marker",
"-",
"2",
")",
"*",
"\" \"",
",",
"remainder_list",
"[",
"-",
"1",
"]",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"{spaces}{line}\"",
".",
"format",
"(",
"spaces",
"=",
"\" \"",
"*",
"(",
"indent",
"+",
"2",
")",
",",
"line",
"=",
"line",
")",
"if",
"num",
">",
"0",
"else",
"line",
"for",
"num",
",",
"line",
"in",
"enumerate",
"(",
"new_wrapped_lines_list",
")",
"]",
")"
] |
r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
|
[
"r",
"Format",
"a",
"list",
"of",
"numbers",
"(",
"vector",
")",
"or",
"a",
"Numpy",
"vector",
"for",
"printing",
"."
] |
python
|
test
|
yatiml/yatiml
|
yatiml/helpers.py
|
https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/helpers.py#L321-L425
|
def seq_attribute_to_map(self,
attribute: str,
key_attribute: str,
value_attribute: Optional[str] = None,
strict: Optional[bool] = True) -> None:
"""Converts a sequence attribute to a map.
This function takes an attribute of this Node that is \
a sequence of mappings and turns it into a mapping of mappings. \
It assumes that each of the mappings in the original sequence \
has an attribute containing a unique value, which it will use \
as a key for the new outer mapping.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
and call seq_attribute_to_map('items', 'item_id'), then the \
Node will be modified to represent this::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
which is often more intuitive for people to read and write.
If the attribute does not exist, or is not a sequence of \
mappings, this function will silently do nothing. If the keys \
are not unique and strict is False, it will also do nothing. If \
the keys are not unique and strict is True, it will raise an \
error.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the attribute in each item to use \
as a key for the new mapping.
strict: Whether to give an error if the intended keys are \
not unique.
Raises:
SeasoningError: If the keys are not unique and strict is \
True.
"""
if not self.has_attribute(attribute):
return
attr_node = self.get_attribute(attribute)
if not attr_node.is_sequence():
return
start_mark = attr_node.yaml_node.start_mark
end_mark = attr_node.yaml_node.end_mark
# check that all list items are mappings and that the keys are unique
# strings
seen_keys = set() # type: Set[str]
for item in attr_node.seq_items():
key_attr_node = item.get_attribute(key_attribute)
if not key_attr_node.is_scalar(str):
raise SeasoningError(
('Attribute names must be strings in'
'YAtiML, {} is not a string.').format(key_attr_node))
if key_attr_node.get_value() in seen_keys:
if strict:
raise SeasoningError(
('Found a duplicate key {}: {} when'
' converting from sequence to mapping'.format(
key_attribute, key_attr_node.get_value())))
return
seen_keys.add(key_attr_node.get_value()) # type: ignore
# construct mapping
mapping_values = list()
for item in attr_node.seq_items():
# we've already checked that it's a SequenceNode above
key_node = item.get_attribute(key_attribute).yaml_node
item.remove_attribute(key_attribute)
if value_attribute is not None:
value_node = item.get_attribute(value_attribute).yaml_node
if len(item.yaml_node.value) == 1:
# no other attributes, use short form
mapping_values.append((key_node, value_node))
else:
mapping_values.append((key_node, item.yaml_node))
else:
mapping_values.append((key_node, item.yaml_node))
# create mapping node
mapping = yaml.MappingNode('tag:yaml.org,2002:map', mapping_values,
start_mark, end_mark)
self.set_attribute(attribute, mapping)
|
[
"def",
"seq_attribute_to_map",
"(",
"self",
",",
"attribute",
":",
"str",
",",
"key_attribute",
":",
"str",
",",
"value_attribute",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"strict",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"has_attribute",
"(",
"attribute",
")",
":",
"return",
"attr_node",
"=",
"self",
".",
"get_attribute",
"(",
"attribute",
")",
"if",
"not",
"attr_node",
".",
"is_sequence",
"(",
")",
":",
"return",
"start_mark",
"=",
"attr_node",
".",
"yaml_node",
".",
"start_mark",
"end_mark",
"=",
"attr_node",
".",
"yaml_node",
".",
"end_mark",
"# check that all list items are mappings and that the keys are unique",
"# strings",
"seen_keys",
"=",
"set",
"(",
")",
"# type: Set[str]",
"for",
"item",
"in",
"attr_node",
".",
"seq_items",
"(",
")",
":",
"key_attr_node",
"=",
"item",
".",
"get_attribute",
"(",
"key_attribute",
")",
"if",
"not",
"key_attr_node",
".",
"is_scalar",
"(",
"str",
")",
":",
"raise",
"SeasoningError",
"(",
"(",
"'Attribute names must be strings in'",
"'YAtiML, {} is not a string.'",
")",
".",
"format",
"(",
"key_attr_node",
")",
")",
"if",
"key_attr_node",
".",
"get_value",
"(",
")",
"in",
"seen_keys",
":",
"if",
"strict",
":",
"raise",
"SeasoningError",
"(",
"(",
"'Found a duplicate key {}: {} when'",
"' converting from sequence to mapping'",
".",
"format",
"(",
"key_attribute",
",",
"key_attr_node",
".",
"get_value",
"(",
")",
")",
")",
")",
"return",
"seen_keys",
".",
"add",
"(",
"key_attr_node",
".",
"get_value",
"(",
")",
")",
"# type: ignore",
"# construct mapping",
"mapping_values",
"=",
"list",
"(",
")",
"for",
"item",
"in",
"attr_node",
".",
"seq_items",
"(",
")",
":",
"# we've already checked that it's a SequenceNode above",
"key_node",
"=",
"item",
".",
"get_attribute",
"(",
"key_attribute",
")",
".",
"yaml_node",
"item",
".",
"remove_attribute",
"(",
"key_attribute",
")",
"if",
"value_attribute",
"is",
"not",
"None",
":",
"value_node",
"=",
"item",
".",
"get_attribute",
"(",
"value_attribute",
")",
".",
"yaml_node",
"if",
"len",
"(",
"item",
".",
"yaml_node",
".",
"value",
")",
"==",
"1",
":",
"# no other attributes, use short form",
"mapping_values",
".",
"append",
"(",
"(",
"key_node",
",",
"value_node",
")",
")",
"else",
":",
"mapping_values",
".",
"append",
"(",
"(",
"key_node",
",",
"item",
".",
"yaml_node",
")",
")",
"else",
":",
"mapping_values",
".",
"append",
"(",
"(",
"key_node",
",",
"item",
".",
"yaml_node",
")",
")",
"# create mapping node",
"mapping",
"=",
"yaml",
".",
"MappingNode",
"(",
"'tag:yaml.org,2002:map'",
",",
"mapping_values",
",",
"start_mark",
",",
"end_mark",
")",
"self",
".",
"set_attribute",
"(",
"attribute",
",",
"mapping",
")"
] |
Converts a sequence attribute to a map.
This function takes an attribute of this Node that is \
a sequence of mappings and turns it into a mapping of mappings. \
It assumes that each of the mappings in the original sequence \
has an attribute containing a unique value, which it will use \
as a key for the new outer mapping.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
and call seq_attribute_to_map('items', 'item_id'), then the \
Node will be modified to represent this::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
which is often more intuitive for people to read and write.
If the attribute does not exist, or is not a sequence of \
mappings, this function will silently do nothing. If the keys \
are not unique and strict is False, it will also do nothing. If \
the keys are not unique and strict is True, it will raise an \
error.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the attribute in each item to use \
as a key for the new mapping.
strict: Whether to give an error if the intended keys are \
not unique.
Raises:
SeasoningError: If the keys are not unique and strict is \
True.
|
[
"Converts",
"a",
"sequence",
"attribute",
"to",
"a",
"map",
"."
] |
python
|
train
|
hyperledger/indy-plenum
|
plenum/server/replica.py
|
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1333-L1348
|
def processCommit(self, commit: Commit, sender: str) -> None:
"""
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
"""
self.logger.debug("{} received COMMIT{} from {}".format(
self, (commit.viewNo, commit.ppSeqNo), sender))
if self.validateCommit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self.addToCommits(commit, sender)
self.logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo)))
|
[
"def",
"processCommit",
"(",
"self",
",",
"commit",
":",
"Commit",
",",
"sender",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"{} received COMMIT{} from {}\"",
".",
"format",
"(",
"self",
",",
"(",
"commit",
".",
"viewNo",
",",
"commit",
".",
"ppSeqNo",
")",
",",
"sender",
")",
")",
"if",
"self",
".",
"validateCommit",
"(",
"commit",
",",
"sender",
")",
":",
"self",
".",
"stats",
".",
"inc",
"(",
"TPCStat",
".",
"CommitRcvd",
")",
"self",
".",
"addToCommits",
"(",
"commit",
",",
"sender",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"{} processed incoming COMMIT{}\"",
".",
"format",
"(",
"self",
",",
"(",
"commit",
".",
"viewNo",
",",
"commit",
".",
"ppSeqNo",
")",
")",
")"
] |
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
|
[
"Validate",
"and",
"process",
"the",
"COMMIT",
"specified",
".",
"If",
"validation",
"is",
"successful",
"return",
"the",
"message",
"to",
"the",
"node",
"."
] |
python
|
train
|
Alignak-monitoring/alignak
|
alignak/external_command.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2436-L2450
|
def disable_svc_check(self, service):
"""Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.active_checks_enabled:
service.disable_active_checks(self.daemon.checks)
service.modified_attributes |= \
DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.send_an_element(service.get_update_status_brok())
|
[
"def",
"disable_svc_check",
"(",
"self",
",",
"service",
")",
":",
"if",
"service",
".",
"active_checks_enabled",
":",
"service",
".",
"disable_active_checks",
"(",
"self",
".",
"daemon",
".",
"checks",
")",
"service",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_ACTIVE_CHECKS_ENABLED\"",
"]",
".",
"value",
"self",
".",
"send_an_element",
"(",
"service",
".",
"get_update_status_brok",
"(",
")",
")"
] |
Disable checks for a service
Format of the line that triggers function call::
DISABLE_SVC_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
|
[
"Disable",
"checks",
"for",
"a",
"service",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] |
python
|
train
|
learningequality/iceqube
|
src/iceqube/storage/backends/inmem.py
|
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/storage/backends/inmem.py#L304-L312
|
def _ns_query(self, session):
"""
Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend
during initialization.
Returns: a SQLAlchemy query object
"""
return session.query(ORMJob).filter(ORMJob.app == self.app,
ORMJob.namespace == self.namespace)
|
[
"def",
"_ns_query",
"(",
"self",
",",
"session",
")",
":",
"return",
"session",
".",
"query",
"(",
"ORMJob",
")",
".",
"filter",
"(",
"ORMJob",
".",
"app",
"==",
"self",
".",
"app",
",",
"ORMJob",
".",
"namespace",
"==",
"self",
".",
"namespace",
")"
] |
Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend
during initialization.
Returns: a SQLAlchemy query object
|
[
"Return",
"a",
"SQLAlchemy",
"query",
"that",
"is",
"already",
"namespaced",
"by",
"the",
"app",
"and",
"namespace",
"given",
"to",
"this",
"backend",
"during",
"initialization",
".",
"Returns",
":",
"a",
"SQLAlchemy",
"query",
"object"
] |
python
|
train
|
saltstack/salt
|
salt/modules/win_system.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_system.py#L788-L846
|
def _join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False):
'''
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``[email protected]`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
'''
NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name
NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name
NETSETUP_JOIN_WITH_NEW_NAME = 0x400 # pylint: disable=invalid-name
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
join_options |= NETSETUP_JOIN_WITH_NEW_NAME
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
with salt.utils.winapi.Com():
conn = wmi.WMI()
comp = conn.Win32_ComputerSystem()[0]
# Return the results of the command as an error
# JoinDomainOrWorkgroup returns a strangely formatted value that looks like
# (0,) so return the first item
return comp.JoinDomainOrWorkgroup(
Name=domain, Password=password, UserName=username, AccountOU=account_ou,
FJoinOptions=join_options)[0]
|
[
"def",
"_join_domain",
"(",
"domain",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"account_ou",
"=",
"None",
",",
"account_exists",
"=",
"False",
")",
":",
"NETSETUP_JOIN_DOMAIN",
"=",
"0x1",
"# pylint: disable=invalid-name",
"NETSETUP_ACCOUNT_CREATE",
"=",
"0x2",
"# pylint: disable=invalid-name",
"NETSETUP_DOMAIN_JOIN_IF_JOINED",
"=",
"0x20",
"# pylint: disable=invalid-name",
"NETSETUP_JOIN_WITH_NEW_NAME",
"=",
"0x400",
"# pylint: disable=invalid-name",
"join_options",
"=",
"0x0",
"join_options",
"|=",
"NETSETUP_JOIN_DOMAIN",
"join_options",
"|=",
"NETSETUP_DOMAIN_JOIN_IF_JOINED",
"join_options",
"|=",
"NETSETUP_JOIN_WITH_NEW_NAME",
"if",
"not",
"account_exists",
":",
"join_options",
"|=",
"NETSETUP_ACCOUNT_CREATE",
"with",
"salt",
".",
"utils",
".",
"winapi",
".",
"Com",
"(",
")",
":",
"conn",
"=",
"wmi",
".",
"WMI",
"(",
")",
"comp",
"=",
"conn",
".",
"Win32_ComputerSystem",
"(",
")",
"[",
"0",
"]",
"# Return the results of the command as an error",
"# JoinDomainOrWorkgroup returns a strangely formatted value that looks like",
"# (0,) so return the first item",
"return",
"comp",
".",
"JoinDomainOrWorkgroup",
"(",
"Name",
"=",
"domain",
",",
"Password",
"=",
"password",
",",
"UserName",
"=",
"username",
",",
"AccountOU",
"=",
"account_ou",
",",
"FJoinOptions",
"=",
"join_options",
")",
"[",
"0",
"]"
] |
Helper function to join the domain.
Args:
domain (str): The domain to which the computer should be joined, e.g.
``example.com``
username (str): Username of an account which is authorized to join
computers to the specified domain. Need to be either fully qualified
like ``[email protected]`` or simply ``user``
password (str): Password of the specified user
account_ou (str): The DN of the OU below which the account for this
computer should be created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join
the domain if the account already exists. If set to ``False`` the
computer account will be created if it does not exist, otherwise it
will use the existing account. Default is False.
Returns:
int:
:param domain:
:param username:
:param password:
:param account_ou:
:param account_exists:
:return:
|
[
"Helper",
"function",
"to",
"join",
"the",
"domain",
"."
] |
python
|
train
|
ThreatConnect-Inc/tcex
|
tcex/tcex_vault.py
|
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_vault.py#L23-L31
|
def create(self, key, value, lease='1h'):
"""Create key/value pair in Vault.
Args:
key (string): The data key.
value (string): The data value.
lease (string): The least time.
"""
return self._client.write(key, value, lease=lease)
|
[
"def",
"create",
"(",
"self",
",",
"key",
",",
"value",
",",
"lease",
"=",
"'1h'",
")",
":",
"return",
"self",
".",
"_client",
".",
"write",
"(",
"key",
",",
"value",
",",
"lease",
"=",
"lease",
")"
] |
Create key/value pair in Vault.
Args:
key (string): The data key.
value (string): The data value.
lease (string): The least time.
|
[
"Create",
"key",
"/",
"value",
"pair",
"in",
"Vault",
"."
] |
python
|
train
|
Nachtfeuer/pipeline
|
spline/components/config.py
|
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/config.py#L29-L46
|
def schema():
"""Provide schema for shell configuration."""
return Schema({
'script': And(Or(type(' '), type(u' ')), len),
Optional('title', default=''): str,
Optional('model', default={}): {Optional(And(str, len)): object},
Optional('env', default={}): {Optional(And(str, len)): And(str, len)},
Optional('item', default=None): object,
Optional('dry_run', default=False): bool,
Optional('debug', default=False): bool,
Optional('strict', default=False): bool,
Optional('variables', default={}): {
Optional(And(Or(type(' '), type(u' ')), len, Regex(r'([a-zA-Z][_a-zA-Z]*)'))):
Or(type(' '), type(u' '))
},
Optional('temporary_scripts_path', default=''): Or(type(''), type(u'')),
Optional('internal', default=False): bool
})
|
[
"def",
"schema",
"(",
")",
":",
"return",
"Schema",
"(",
"{",
"'script'",
":",
"And",
"(",
"Or",
"(",
"type",
"(",
"' '",
")",
",",
"type",
"(",
"u' '",
")",
")",
",",
"len",
")",
",",
"Optional",
"(",
"'title'",
",",
"default",
"=",
"''",
")",
":",
"str",
",",
"Optional",
"(",
"'model'",
",",
"default",
"=",
"{",
"}",
")",
":",
"{",
"Optional",
"(",
"And",
"(",
"str",
",",
"len",
")",
")",
":",
"object",
"}",
",",
"Optional",
"(",
"'env'",
",",
"default",
"=",
"{",
"}",
")",
":",
"{",
"Optional",
"(",
"And",
"(",
"str",
",",
"len",
")",
")",
":",
"And",
"(",
"str",
",",
"len",
")",
"}",
",",
"Optional",
"(",
"'item'",
",",
"default",
"=",
"None",
")",
":",
"object",
",",
"Optional",
"(",
"'dry_run'",
",",
"default",
"=",
"False",
")",
":",
"bool",
",",
"Optional",
"(",
"'debug'",
",",
"default",
"=",
"False",
")",
":",
"bool",
",",
"Optional",
"(",
"'strict'",
",",
"default",
"=",
"False",
")",
":",
"bool",
",",
"Optional",
"(",
"'variables'",
",",
"default",
"=",
"{",
"}",
")",
":",
"{",
"Optional",
"(",
"And",
"(",
"Or",
"(",
"type",
"(",
"' '",
")",
",",
"type",
"(",
"u' '",
")",
")",
",",
"len",
",",
"Regex",
"(",
"r'([a-zA-Z][_a-zA-Z]*)'",
")",
")",
")",
":",
"Or",
"(",
"type",
"(",
"' '",
")",
",",
"type",
"(",
"u' '",
")",
")",
"}",
",",
"Optional",
"(",
"'temporary_scripts_path'",
",",
"default",
"=",
"''",
")",
":",
"Or",
"(",
"type",
"(",
"''",
")",
",",
"type",
"(",
"u''",
")",
")",
",",
"Optional",
"(",
"'internal'",
",",
"default",
"=",
"False",
")",
":",
"bool",
"}",
")"
] |
Provide schema for shell configuration.
|
[
"Provide",
"schema",
"for",
"shell",
"configuration",
"."
] |
python
|
train
|
wummel/dosage
|
dosagelib/scraper.py
|
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L198-L211
|
def getPrevUrl(self, url, data):
"""Find previous URL."""
prevUrl = None
if self.prevSearch:
try:
prevUrl = self.fetchUrl(url, data, self.prevSearch)
except ValueError as msg:
# assume there is no previous URL, but print a warning
out.warn(u"%s Assuming no previous comic strips exist." % msg)
else:
prevUrl = self.prevUrlModifier(prevUrl)
out.debug(u"Found previous URL %s" % prevUrl)
getHandler().comicPageLink(self.getName(), url, prevUrl)
return prevUrl
|
[
"def",
"getPrevUrl",
"(",
"self",
",",
"url",
",",
"data",
")",
":",
"prevUrl",
"=",
"None",
"if",
"self",
".",
"prevSearch",
":",
"try",
":",
"prevUrl",
"=",
"self",
".",
"fetchUrl",
"(",
"url",
",",
"data",
",",
"self",
".",
"prevSearch",
")",
"except",
"ValueError",
"as",
"msg",
":",
"# assume there is no previous URL, but print a warning",
"out",
".",
"warn",
"(",
"u\"%s Assuming no previous comic strips exist.\"",
"%",
"msg",
")",
"else",
":",
"prevUrl",
"=",
"self",
".",
"prevUrlModifier",
"(",
"prevUrl",
")",
"out",
".",
"debug",
"(",
"u\"Found previous URL %s\"",
"%",
"prevUrl",
")",
"getHandler",
"(",
")",
".",
"comicPageLink",
"(",
"self",
".",
"getName",
"(",
")",
",",
"url",
",",
"prevUrl",
")",
"return",
"prevUrl"
] |
Find previous URL.
|
[
"Find",
"previous",
"URL",
"."
] |
python
|
train
|
chrisspen/dtree
|
dtree.py
|
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L145-L154
|
def normpdf(x, mu, sigma):
"""
Describes the relative likelihood that a real-valued random variable X will
take on a given value.
http://en.wikipedia.org/wiki/Probability_density_function
"""
u = (x-mu)/abs(sigma)
y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2)
return y
|
[
"def",
"normpdf",
"(",
"x",
",",
"mu",
",",
"sigma",
")",
":",
"u",
"=",
"(",
"x",
"-",
"mu",
")",
"/",
"abs",
"(",
"sigma",
")",
"y",
"=",
"(",
"1",
"/",
"(",
"math",
".",
"sqrt",
"(",
"2",
"*",
"pi",
")",
"*",
"abs",
"(",
"sigma",
")",
")",
")",
"*",
"math",
".",
"exp",
"(",
"-",
"u",
"*",
"u",
"/",
"2",
")",
"return",
"y"
] |
Describes the relative likelihood that a real-valued random variable X will
take on a given value.
http://en.wikipedia.org/wiki/Probability_density_function
|
[
"Describes",
"the",
"relative",
"likelihood",
"that",
"a",
"real",
"-",
"valued",
"random",
"variable",
"X",
"will",
"take",
"on",
"a",
"given",
"value",
".",
"http",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Probability_density_function"
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/sparse.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L72-L104
|
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
|
[
"def",
"_new_alloc_handle",
"(",
"stype",
",",
"shape",
",",
"ctx",
",",
"delay_alloc",
",",
"dtype",
",",
"aux_types",
",",
"aux_shapes",
"=",
"None",
")",
":",
"hdl",
"=",
"NDArrayHandle",
"(",
")",
"for",
"aux_t",
"in",
"aux_types",
":",
"if",
"np",
".",
"dtype",
"(",
"aux_t",
")",
"!=",
"np",
".",
"dtype",
"(",
"\"int64\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"only int64 is supported for aux types\"",
")",
"aux_type_ids",
"=",
"[",
"int",
"(",
"_DTYPE_NP_TO_MX",
"[",
"np",
".",
"dtype",
"(",
"aux_t",
")",
".",
"type",
"]",
")",
"for",
"aux_t",
"in",
"aux_types",
"]",
"aux_shapes",
"=",
"[",
"(",
"0",
",",
")",
"for",
"aux_t",
"in",
"aux_types",
"]",
"if",
"aux_shapes",
"is",
"None",
"else",
"aux_shapes",
"aux_shape_lens",
"=",
"[",
"len",
"(",
"aux_shape",
")",
"for",
"aux_shape",
"in",
"aux_shapes",
"]",
"aux_shapes",
"=",
"py_sum",
"(",
"aux_shapes",
",",
"(",
")",
")",
"num_aux",
"=",
"mx_uint",
"(",
"len",
"(",
"aux_types",
")",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayCreateSparseEx",
"(",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"_STORAGE_TYPE_STR_TO_ID",
"[",
"stype",
"]",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"shape",
")",
")",
",",
"mx_uint",
"(",
"len",
"(",
"shape",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_typeid",
")",
",",
"ctypes",
".",
"c_int",
"(",
"ctx",
".",
"device_id",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"delay_alloc",
")",
")",
",",
"ctypes",
".",
"c_int",
"(",
"int",
"(",
"_DTYPE_NP_TO_MX",
"[",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"type",
"]",
")",
")",
",",
"num_aux",
",",
"c_array_buf",
"(",
"ctypes",
".",
"c_int",
",",
"native_array",
"(",
"'i'",
",",
"aux_type_ids",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"aux_shape_lens",
")",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"native_array",
"(",
"'I'",
",",
"aux_shapes",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"hdl"
] |
Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
|
[
"Return",
"a",
"new",
"handle",
"with",
"specified",
"storage",
"type",
"shape",
"dtype",
"and",
"context",
"."
] |
python
|
train
|
ARMmbed/icetea
|
examples/sample_cloud.py
|
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/examples/sample_cloud.py#L24-L30
|
def create(host, port, result_converter=None, testcase_converter=None, args=None):
"""
Function which is called by Icetea to create an instance of the cloud client. This function
must exists.
This function myust not return None. Either return an instance of Client or raise.
"""
return SampleClient(host, port, result_converter, testcase_converter, args)
|
[
"def",
"create",
"(",
"host",
",",
"port",
",",
"result_converter",
"=",
"None",
",",
"testcase_converter",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"return",
"SampleClient",
"(",
"host",
",",
"port",
",",
"result_converter",
",",
"testcase_converter",
",",
"args",
")"
] |
Function which is called by Icetea to create an instance of the cloud client. This function
must exists.
This function myust not return None. Either return an instance of Client or raise.
|
[
"Function",
"which",
"is",
"called",
"by",
"Icetea",
"to",
"create",
"an",
"instance",
"of",
"the",
"cloud",
"client",
".",
"This",
"function",
"must",
"exists",
".",
"This",
"function",
"myust",
"not",
"return",
"None",
".",
"Either",
"return",
"an",
"instance",
"of",
"Client",
"or",
"raise",
"."
] |
python
|
train
|
trailofbits/protofuzz
|
protofuzz/gen.py
|
https://github.com/trailofbits/protofuzz/blob/589492d34de9a0da6cc5554094e2588b893b2fd8/protofuzz/gen.py#L130-L152
|
def make_dependent(self, source, target, action):
'''
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
'''
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
# pylint: disable=protected-access
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators()
|
[
"def",
"make_dependent",
"(",
"self",
",",
"source",
",",
"target",
",",
"action",
")",
":",
"if",
"not",
"self",
".",
"_generators",
":",
"return",
"src_permuter",
",",
"src",
"=",
"self",
".",
"_resolve_child",
"(",
"source",
")",
"dest",
"=",
"self",
".",
"_resolve_child",
"(",
"target",
")",
"[",
"1",
"]",
"# pylint: disable=protected-access",
"container",
"=",
"src_permuter",
".",
"_generators",
"idx",
"=",
"container",
".",
"index",
"(",
"src",
")",
"container",
"[",
"idx",
"]",
"=",
"DependentValueGenerator",
"(",
"src",
".",
"name",
"(",
")",
",",
"dest",
",",
"action",
")",
"self",
".",
"_update_independent_generators",
"(",
")"
] |
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
|
[
"Create",
"a",
"dependency",
"between",
"path",
"source",
"and",
"path",
"target",
"via",
"the",
"callable",
"action",
"."
] |
python
|
train
|
readbeyond/aeneas
|
aeneas/validator.py
|
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/validator.py#L259-L276
|
def check_file_encoding(self, input_file_path):
"""
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking encoding of file '%s'", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File '%s' cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result
|
[
"def",
"check_file_encoding",
"(",
"self",
",",
"input_file_path",
")",
":",
"self",
".",
"log",
"(",
"[",
"u\"Checking encoding of file '%s'\"",
",",
"input_file_path",
"]",
")",
"self",
".",
"result",
"=",
"ValidatorResult",
"(",
")",
"if",
"self",
".",
"_are_safety_checks_disabled",
"(",
"u\"check_file_encoding\"",
")",
":",
"return",
"self",
".",
"result",
"if",
"not",
"gf",
".",
"file_can_be_read",
"(",
"input_file_path",
")",
":",
"self",
".",
"_failed",
"(",
"u\"File '%s' cannot be read.\"",
"%",
"(",
"input_file_path",
")",
")",
"return",
"self",
".",
"result",
"with",
"io",
".",
"open",
"(",
"input_file_path",
",",
"\"rb\"",
")",
"as",
"file_object",
":",
"bstring",
"=",
"file_object",
".",
"read",
"(",
")",
"self",
".",
"_check_utf8_encoding",
"(",
"bstring",
")",
"return",
"self",
".",
"result"
] |
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
|
[
"Check",
"whether",
"the",
"given",
"file",
"is",
"UTF",
"-",
"8",
"encoded",
"."
] |
python
|
train
|
jtwhite79/pyemu
|
pyemu/mat/mat_handler.py
|
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/mat/mat_handler.py#L18-L64
|
def save_coo(x, row_names, col_names, filename, chunk=None):
"""write a PEST-compatible binary file. The data format is
[int,int,float] for i,j,value. It is autodetected during
the read with Matrix.from_binary().
Parameters
----------
x : numpy.sparse
coo sparse matrix
row_names : list
list of row_names
col_names : list
list of col_names
filename : str
filename to save binary file
droptol : float
absolute value tolerance to make values smaller than zero. Default is None
chunk : int
number of elements to write in a single pass. Default is None
"""
f = open(filename, 'wb')
# print("counting nnz")
# write the header
header = np.array((x.shape[1], x.shape[0], x.nnz),
dtype=Matrix.binary_header_dt)
header.tofile(f)
data = np.core.records.fromarrays([x.row, x.col, x.data], dtype=Matrix.coo_rec_dt)
data.tofile(f)
for name in col_names:
if len(name) > Matrix.new_par_length:
name = name[:Matrix.new_par_length - 1]
elif len(name) < Matrix.new_par_length:
for i in range(len(name), Matrix.new_par_length):
name = name + ' '
f.write(name.encode())
for name in row_names:
if len(name) > Matrix.new_obs_length:
name = name[:Matrix.new_obs_length - 1]
elif len(name) < Matrix.new_obs_length:
for i in range(len(name), Matrix.new_obs_length):
name = name + ' '
f.write(name.encode())
f.close()
|
[
"def",
"save_coo",
"(",
"x",
",",
"row_names",
",",
"col_names",
",",
"filename",
",",
"chunk",
"=",
"None",
")",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"'wb'",
")",
"# print(\"counting nnz\")",
"# write the header",
"header",
"=",
"np",
".",
"array",
"(",
"(",
"x",
".",
"shape",
"[",
"1",
"]",
",",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"x",
".",
"nnz",
")",
",",
"dtype",
"=",
"Matrix",
".",
"binary_header_dt",
")",
"header",
".",
"tofile",
"(",
"f",
")",
"data",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromarrays",
"(",
"[",
"x",
".",
"row",
",",
"x",
".",
"col",
",",
"x",
".",
"data",
"]",
",",
"dtype",
"=",
"Matrix",
".",
"coo_rec_dt",
")",
"data",
".",
"tofile",
"(",
"f",
")",
"for",
"name",
"in",
"col_names",
":",
"if",
"len",
"(",
"name",
")",
">",
"Matrix",
".",
"new_par_length",
":",
"name",
"=",
"name",
"[",
":",
"Matrix",
".",
"new_par_length",
"-",
"1",
"]",
"elif",
"len",
"(",
"name",
")",
"<",
"Matrix",
".",
"new_par_length",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"name",
")",
",",
"Matrix",
".",
"new_par_length",
")",
":",
"name",
"=",
"name",
"+",
"' '",
"f",
".",
"write",
"(",
"name",
".",
"encode",
"(",
")",
")",
"for",
"name",
"in",
"row_names",
":",
"if",
"len",
"(",
"name",
")",
">",
"Matrix",
".",
"new_obs_length",
":",
"name",
"=",
"name",
"[",
":",
"Matrix",
".",
"new_obs_length",
"-",
"1",
"]",
"elif",
"len",
"(",
"name",
")",
"<",
"Matrix",
".",
"new_obs_length",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"name",
")",
",",
"Matrix",
".",
"new_obs_length",
")",
":",
"name",
"=",
"name",
"+",
"' '",
"f",
".",
"write",
"(",
"name",
".",
"encode",
"(",
")",
")",
"f",
".",
"close",
"(",
")"
] |
write a PEST-compatible binary file. The data format is
[int,int,float] for i,j,value. It is autodetected during
the read with Matrix.from_binary().
Parameters
----------
x : numpy.sparse
coo sparse matrix
row_names : list
list of row_names
col_names : list
list of col_names
filename : str
filename to save binary file
droptol : float
absolute value tolerance to make values smaller than zero. Default is None
chunk : int
number of elements to write in a single pass. Default is None
|
[
"write",
"a",
"PEST",
"-",
"compatible",
"binary",
"file",
".",
"The",
"data",
"format",
"is",
"[",
"int",
"int",
"float",
"]",
"for",
"i",
"j",
"value",
".",
"It",
"is",
"autodetected",
"during",
"the",
"read",
"with",
"Matrix",
".",
"from_binary",
"()",
"."
] |
python
|
train
|
gem/oq-engine
|
openquake/calculators/base.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L568-L586
|
def load_riskmodel(self):
# to be called before read_exposure
# NB: this is called even if there is no risk model
"""
Read the risk model and set the attribute .riskmodel.
The riskmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
"""
logging.info('Reading the risk model if present')
self.riskmodel = readinput.get_risk_model(self.oqparam)
if not self.riskmodel:
parent = self.datastore.parent
if 'risk_model' in parent:
self.riskmodel = riskinput.CompositeRiskModel.read(parent)
return
if self.oqparam.ground_motion_fields and not self.oqparam.imtls:
raise InvalidFile('No intensity_measure_types specified in %s' %
self.oqparam.inputs['job_ini'])
self.save_params()
|
[
"def",
"load_riskmodel",
"(",
"self",
")",
":",
"# to be called before read_exposure",
"# NB: this is called even if there is no risk model",
"logging",
".",
"info",
"(",
"'Reading the risk model if present'",
")",
"self",
".",
"riskmodel",
"=",
"readinput",
".",
"get_risk_model",
"(",
"self",
".",
"oqparam",
")",
"if",
"not",
"self",
".",
"riskmodel",
":",
"parent",
"=",
"self",
".",
"datastore",
".",
"parent",
"if",
"'risk_model'",
"in",
"parent",
":",
"self",
".",
"riskmodel",
"=",
"riskinput",
".",
"CompositeRiskModel",
".",
"read",
"(",
"parent",
")",
"return",
"if",
"self",
".",
"oqparam",
".",
"ground_motion_fields",
"and",
"not",
"self",
".",
"oqparam",
".",
"imtls",
":",
"raise",
"InvalidFile",
"(",
"'No intensity_measure_types specified in %s'",
"%",
"self",
".",
"oqparam",
".",
"inputs",
"[",
"'job_ini'",
"]",
")",
"self",
".",
"save_params",
"(",
")"
] |
Read the risk model and set the attribute .riskmodel.
The riskmodel can be empty for hazard calculations.
Save the loss ratios (if any) in the datastore.
|
[
"Read",
"the",
"risk",
"model",
"and",
"set",
"the",
"attribute",
".",
"riskmodel",
".",
"The",
"riskmodel",
"can",
"be",
"empty",
"for",
"hazard",
"calculations",
".",
"Save",
"the",
"loss",
"ratios",
"(",
"if",
"any",
")",
"in",
"the",
"datastore",
"."
] |
python
|
train
|
bmcfee/pumpp
|
pumpp/base.py
|
https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/base.py#L52-L84
|
def register(self, field, shape, dtype):
'''Register a field as a tensor with specified shape and type.
A `Tensor` of the given shape and type will be registered in this
object's `fields` dict.
Parameters
----------
field : str
The name of the field
shape : iterable of `int` or `None`
The shape of the output variable.
This does not include a dimension for multiple outputs.
`None` may be used to indicate variable-length outputs
dtype : type
The data type of the field
Raises
------
ParameterError
If dtype or shape are improperly specified
'''
if not isinstance(dtype, type):
raise ParameterError('dtype={} must be a type'.format(dtype))
if not (isinstance(shape, Iterable) and
all([s is None or isinstance(s, int) for s in shape])):
raise ParameterError('shape={} must be an iterable of integers'.format(shape))
self.fields[self.scope(field)] = Tensor(tuple(shape), dtype)
|
[
"def",
"register",
"(",
"self",
",",
"field",
",",
"shape",
",",
"dtype",
")",
":",
"if",
"not",
"isinstance",
"(",
"dtype",
",",
"type",
")",
":",
"raise",
"ParameterError",
"(",
"'dtype={} must be a type'",
".",
"format",
"(",
"dtype",
")",
")",
"if",
"not",
"(",
"isinstance",
"(",
"shape",
",",
"Iterable",
")",
"and",
"all",
"(",
"[",
"s",
"is",
"None",
"or",
"isinstance",
"(",
"s",
",",
"int",
")",
"for",
"s",
"in",
"shape",
"]",
")",
")",
":",
"raise",
"ParameterError",
"(",
"'shape={} must be an iterable of integers'",
".",
"format",
"(",
"shape",
")",
")",
"self",
".",
"fields",
"[",
"self",
".",
"scope",
"(",
"field",
")",
"]",
"=",
"Tensor",
"(",
"tuple",
"(",
"shape",
")",
",",
"dtype",
")"
] |
Register a field as a tensor with specified shape and type.
A `Tensor` of the given shape and type will be registered in this
object's `fields` dict.
Parameters
----------
field : str
The name of the field
shape : iterable of `int` or `None`
The shape of the output variable.
This does not include a dimension for multiple outputs.
`None` may be used to indicate variable-length outputs
dtype : type
The data type of the field
Raises
------
ParameterError
If dtype or shape are improperly specified
|
[
"Register",
"a",
"field",
"as",
"a",
"tensor",
"with",
"specified",
"shape",
"and",
"type",
"."
] |
python
|
train
|
django-auth-ldap/django-auth-ldap
|
django_auth_ldap/backend.py
|
https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L591-L632
|
def _get_or_create_user(self, force_populate=False):
"""
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
"""
save_user = False
username = self.backend.ldap_to_django_username(self._username)
self._user, built = self.backend.get_or_build_user(username, self)
self._user.ldap_user = self
self._user.ldap_username = self._username
should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or built
if built:
if self.settings.NO_NEW_USERS:
raise self.AuthenticationFailed(
"user does not satisfy AUTH_LDAP_NO_NEW_USERS"
)
logger.debug("Creating Django user {}".format(username))
self._user.set_unusable_password()
save_user = True
if should_populate:
logger.debug("Populating Django user {}".format(username))
self._populate_user()
save_user = True
# Give the client a chance to finish populating the user just
# before saving.
populate_user.send(self.backend.__class__, user=self._user, ldap_user=self)
if save_user:
self._user.save()
# This has to wait until we're sure the user has a pk.
if self.settings.MIRROR_GROUPS or self.settings.MIRROR_GROUPS_EXCEPT:
self._normalize_mirror_settings()
self._mirror_groups()
|
[
"def",
"_get_or_create_user",
"(",
"self",
",",
"force_populate",
"=",
"False",
")",
":",
"save_user",
"=",
"False",
"username",
"=",
"self",
".",
"backend",
".",
"ldap_to_django_username",
"(",
"self",
".",
"_username",
")",
"self",
".",
"_user",
",",
"built",
"=",
"self",
".",
"backend",
".",
"get_or_build_user",
"(",
"username",
",",
"self",
")",
"self",
".",
"_user",
".",
"ldap_user",
"=",
"self",
"self",
".",
"_user",
".",
"ldap_username",
"=",
"self",
".",
"_username",
"should_populate",
"=",
"force_populate",
"or",
"self",
".",
"settings",
".",
"ALWAYS_UPDATE_USER",
"or",
"built",
"if",
"built",
":",
"if",
"self",
".",
"settings",
".",
"NO_NEW_USERS",
":",
"raise",
"self",
".",
"AuthenticationFailed",
"(",
"\"user does not satisfy AUTH_LDAP_NO_NEW_USERS\"",
")",
"logger",
".",
"debug",
"(",
"\"Creating Django user {}\"",
".",
"format",
"(",
"username",
")",
")",
"self",
".",
"_user",
".",
"set_unusable_password",
"(",
")",
"save_user",
"=",
"True",
"if",
"should_populate",
":",
"logger",
".",
"debug",
"(",
"\"Populating Django user {}\"",
".",
"format",
"(",
"username",
")",
")",
"self",
".",
"_populate_user",
"(",
")",
"save_user",
"=",
"True",
"# Give the client a chance to finish populating the user just",
"# before saving.",
"populate_user",
".",
"send",
"(",
"self",
".",
"backend",
".",
"__class__",
",",
"user",
"=",
"self",
".",
"_user",
",",
"ldap_user",
"=",
"self",
")",
"if",
"save_user",
":",
"self",
".",
"_user",
".",
"save",
"(",
")",
"# This has to wait until we're sure the user has a pk.",
"if",
"self",
".",
"settings",
".",
"MIRROR_GROUPS",
"or",
"self",
".",
"settings",
".",
"MIRROR_GROUPS_EXCEPT",
":",
"self",
".",
"_normalize_mirror_settings",
"(",
")",
"self",
".",
"_mirror_groups",
"(",
")"
] |
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
|
[
"Loads",
"the",
"User",
"model",
"object",
"from",
"the",
"database",
"or",
"creates",
"it",
"if",
"it",
"doesn",
"t",
"exist",
".",
"Also",
"populates",
"the",
"fields",
"subject",
"to",
"AUTH_LDAP_ALWAYS_UPDATE_USER",
"."
] |
python
|
train
|
tonyfischetti/sake
|
sakelib/build.py
|
https://github.com/tonyfischetti/sake/blob/b7ad20fe8e7137db99a20ac06b8da26492601b00/sakelib/build.py#L115-L129
|
def write_shas_to_shastore(sha_dict):
"""
Writes a sha1 dictionary stored in memory to
the .shastore file
"""
if sys.version_info[0] < 3:
fn_open = open
else:
fn_open = io.open
with fn_open(".shastore", "w") as fh:
fh.write("---\n")
fh.write('sake version: {}\n'.format(constants.VERSION))
if sha_dict:
fh.write(yaml.dump(sha_dict))
fh.write("...")
|
[
"def",
"write_shas_to_shastore",
"(",
"sha_dict",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"fn_open",
"=",
"open",
"else",
":",
"fn_open",
"=",
"io",
".",
"open",
"with",
"fn_open",
"(",
"\".shastore\"",
",",
"\"w\"",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"\"---\\n\"",
")",
"fh",
".",
"write",
"(",
"'sake version: {}\\n'",
".",
"format",
"(",
"constants",
".",
"VERSION",
")",
")",
"if",
"sha_dict",
":",
"fh",
".",
"write",
"(",
"yaml",
".",
"dump",
"(",
"sha_dict",
")",
")",
"fh",
".",
"write",
"(",
"\"...\"",
")"
] |
Writes a sha1 dictionary stored in memory to
the .shastore file
|
[
"Writes",
"a",
"sha1",
"dictionary",
"stored",
"in",
"memory",
"to",
"the",
".",
"shastore",
"file"
] |
python
|
valid
|
mlperf/training
|
object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py
|
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py#L68-L157
|
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
"""
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
pred_bbox = pred_boxlist.bbox.numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
gt_bbox = gt_boxlist.bbox.numpy()
gt_label = gt_boxlist.get_field("labels").numpy()
gt_difficult = gt_boxlist.get_field("difficult").numpy()
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = boxlist_iou(
BoxList(pred_bbox_l, gt_boxlist.size),
BoxList(gt_bbox_l, gt_boxlist.size),
).numpy()
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
|
[
"def",
"calc_detection_voc_prec_rec",
"(",
"gt_boxlists",
",",
"pred_boxlists",
",",
"iou_thresh",
"=",
"0.5",
")",
":",
"n_pos",
"=",
"defaultdict",
"(",
"int",
")",
"score",
"=",
"defaultdict",
"(",
"list",
")",
"match",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"gt_boxlist",
",",
"pred_boxlist",
"in",
"zip",
"(",
"gt_boxlists",
",",
"pred_boxlists",
")",
":",
"pred_bbox",
"=",
"pred_boxlist",
".",
"bbox",
".",
"numpy",
"(",
")",
"pred_label",
"=",
"pred_boxlist",
".",
"get_field",
"(",
"\"labels\"",
")",
".",
"numpy",
"(",
")",
"pred_score",
"=",
"pred_boxlist",
".",
"get_field",
"(",
"\"scores\"",
")",
".",
"numpy",
"(",
")",
"gt_bbox",
"=",
"gt_boxlist",
".",
"bbox",
".",
"numpy",
"(",
")",
"gt_label",
"=",
"gt_boxlist",
".",
"get_field",
"(",
"\"labels\"",
")",
".",
"numpy",
"(",
")",
"gt_difficult",
"=",
"gt_boxlist",
".",
"get_field",
"(",
"\"difficult\"",
")",
".",
"numpy",
"(",
")",
"for",
"l",
"in",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"(",
"pred_label",
",",
"gt_label",
")",
")",
".",
"astype",
"(",
"int",
")",
")",
":",
"pred_mask_l",
"=",
"pred_label",
"==",
"l",
"pred_bbox_l",
"=",
"pred_bbox",
"[",
"pred_mask_l",
"]",
"pred_score_l",
"=",
"pred_score",
"[",
"pred_mask_l",
"]",
"# sort by score",
"order",
"=",
"pred_score_l",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"pred_bbox_l",
"=",
"pred_bbox_l",
"[",
"order",
"]",
"pred_score_l",
"=",
"pred_score_l",
"[",
"order",
"]",
"gt_mask_l",
"=",
"gt_label",
"==",
"l",
"gt_bbox_l",
"=",
"gt_bbox",
"[",
"gt_mask_l",
"]",
"gt_difficult_l",
"=",
"gt_difficult",
"[",
"gt_mask_l",
"]",
"n_pos",
"[",
"l",
"]",
"+=",
"np",
".",
"logical_not",
"(",
"gt_difficult_l",
")",
".",
"sum",
"(",
")",
"score",
"[",
"l",
"]",
".",
"extend",
"(",
"pred_score_l",
")",
"if",
"len",
"(",
"pred_bbox_l",
")",
"==",
"0",
":",
"continue",
"if",
"len",
"(",
"gt_bbox_l",
")",
"==",
"0",
":",
"match",
"[",
"l",
"]",
".",
"extend",
"(",
"(",
"0",
",",
")",
"*",
"pred_bbox_l",
".",
"shape",
"[",
"0",
"]",
")",
"continue",
"# VOC evaluation follows integer typed bounding boxes.",
"pred_bbox_l",
"=",
"pred_bbox_l",
".",
"copy",
"(",
")",
"pred_bbox_l",
"[",
":",
",",
"2",
":",
"]",
"+=",
"1",
"gt_bbox_l",
"=",
"gt_bbox_l",
".",
"copy",
"(",
")",
"gt_bbox_l",
"[",
":",
",",
"2",
":",
"]",
"+=",
"1",
"iou",
"=",
"boxlist_iou",
"(",
"BoxList",
"(",
"pred_bbox_l",
",",
"gt_boxlist",
".",
"size",
")",
",",
"BoxList",
"(",
"gt_bbox_l",
",",
"gt_boxlist",
".",
"size",
")",
",",
")",
".",
"numpy",
"(",
")",
"gt_index",
"=",
"iou",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
"# set -1 if there is no matching ground truth",
"gt_index",
"[",
"iou",
".",
"max",
"(",
"axis",
"=",
"1",
")",
"<",
"iou_thresh",
"]",
"=",
"-",
"1",
"del",
"iou",
"selec",
"=",
"np",
".",
"zeros",
"(",
"gt_bbox_l",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"bool",
")",
"for",
"gt_idx",
"in",
"gt_index",
":",
"if",
"gt_idx",
">=",
"0",
":",
"if",
"gt_difficult_l",
"[",
"gt_idx",
"]",
":",
"match",
"[",
"l",
"]",
".",
"append",
"(",
"-",
"1",
")",
"else",
":",
"if",
"not",
"selec",
"[",
"gt_idx",
"]",
":",
"match",
"[",
"l",
"]",
".",
"append",
"(",
"1",
")",
"else",
":",
"match",
"[",
"l",
"]",
".",
"append",
"(",
"0",
")",
"selec",
"[",
"gt_idx",
"]",
"=",
"True",
"else",
":",
"match",
"[",
"l",
"]",
".",
"append",
"(",
"0",
")",
"n_fg_class",
"=",
"max",
"(",
"n_pos",
".",
"keys",
"(",
")",
")",
"+",
"1",
"prec",
"=",
"[",
"None",
"]",
"*",
"n_fg_class",
"rec",
"=",
"[",
"None",
"]",
"*",
"n_fg_class",
"for",
"l",
"in",
"n_pos",
".",
"keys",
"(",
")",
":",
"score_l",
"=",
"np",
".",
"array",
"(",
"score",
"[",
"l",
"]",
")",
"match_l",
"=",
"np",
".",
"array",
"(",
"match",
"[",
"l",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"order",
"=",
"score_l",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"match_l",
"=",
"match_l",
"[",
"order",
"]",
"tp",
"=",
"np",
".",
"cumsum",
"(",
"match_l",
"==",
"1",
")",
"fp",
"=",
"np",
".",
"cumsum",
"(",
"match_l",
"==",
"0",
")",
"# If an element of fp + tp is 0,",
"# the corresponding element of prec[l] is nan.",
"prec",
"[",
"l",
"]",
"=",
"tp",
"/",
"(",
"fp",
"+",
"tp",
")",
"# If n_pos[l] is 0, rec[l] is None.",
"if",
"n_pos",
"[",
"l",
"]",
">",
"0",
":",
"rec",
"[",
"l",
"]",
"=",
"tp",
"/",
"n_pos",
"[",
"l",
"]",
"return",
"prec",
",",
"rec"
] |
Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
|
[
"Calculate",
"precision",
"and",
"recall",
"based",
"on",
"evaluation",
"code",
"of",
"PASCAL",
"VOC",
".",
"This",
"function",
"calculates",
"precision",
"and",
"recall",
"of",
"predicted",
"bounding",
"boxes",
"obtained",
"from",
"a",
"dataset",
"which",
"has",
":",
"math",
":",
"N",
"images",
".",
"The",
"code",
"is",
"based",
"on",
"the",
"evaluation",
"code",
"used",
"in",
"PASCAL",
"VOC",
"Challenge",
"."
] |
python
|
train
|
pytroll/satpy
|
satpy/scene.py
|
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/scene.py#L1036-L1097
|
def resample(self, destination=None, datasets=None, generate=True,
unload=True, resampler=None, reduce_data=True,
**resample_kwargs):
"""Resample datasets and return a new scene.
Args:
destination (AreaDefinition, GridDefinition): area definition to
resample to. If not specified then the area returned by
`Scene.max_area()` will be used.
datasets (list): Limit datasets to resample to these specified
`DatasetID` objects . By default all currently loaded
datasets are resampled.
generate (bool): Generate any requested composites that could not
be previously due to incompatible areas (default: True).
unload (bool): Remove any datasets no longer needed after
requested composites have been generated (default: True).
resampler (str): Name of resampling method to use. By default,
this is a nearest neighbor KDTree-based resampling
('nearest'). Other possible values include 'native', 'ewa',
etc. See the :mod:`~satpy.resample` documentation for more
information.
reduce_data (bool): Reduce data by matching the input and output
areas and slicing the data arrays (default: True)
resample_kwargs: Remaining keyword arguments to pass to individual
resampler classes. See the individual resampler class
documentation :mod:`here <satpy.resample>` for available
arguments.
"""
to_resample_ids = [dsid for (dsid, dataset) in self.datasets.items()
if (not datasets) or dsid in datasets]
if destination is None:
destination = self.max_area(to_resample_ids)
new_scn = self.copy(datasets=to_resample_ids)
# we may have some datasets we asked for but don't exist yet
new_scn.wishlist = self.wishlist.copy()
self._resampled_scene(new_scn, destination, resampler=resampler,
reduce_data=reduce_data, **resample_kwargs)
# regenerate anything from the wishlist that needs it (combining
# multiple resolutions, etc.)
if generate:
keepables = new_scn.generate_composites()
else:
# don't lose datasets that we may need later for generating
# composites
keepables = set(new_scn.datasets.keys()) | new_scn.wishlist
if new_scn.missing_datasets:
# copy the set of missing datasets because they won't be valid
# after they are removed in the next line
missing = new_scn.missing_datasets.copy()
new_scn._remove_failed_datasets(keepables)
missing_str = ", ".join(str(x) for x in missing)
LOG.warning(
"The following datasets "
"were not created: {}".format(missing_str))
if unload:
new_scn.unload(keepables)
return new_scn
|
[
"def",
"resample",
"(",
"self",
",",
"destination",
"=",
"None",
",",
"datasets",
"=",
"None",
",",
"generate",
"=",
"True",
",",
"unload",
"=",
"True",
",",
"resampler",
"=",
"None",
",",
"reduce_data",
"=",
"True",
",",
"*",
"*",
"resample_kwargs",
")",
":",
"to_resample_ids",
"=",
"[",
"dsid",
"for",
"(",
"dsid",
",",
"dataset",
")",
"in",
"self",
".",
"datasets",
".",
"items",
"(",
")",
"if",
"(",
"not",
"datasets",
")",
"or",
"dsid",
"in",
"datasets",
"]",
"if",
"destination",
"is",
"None",
":",
"destination",
"=",
"self",
".",
"max_area",
"(",
"to_resample_ids",
")",
"new_scn",
"=",
"self",
".",
"copy",
"(",
"datasets",
"=",
"to_resample_ids",
")",
"# we may have some datasets we asked for but don't exist yet",
"new_scn",
".",
"wishlist",
"=",
"self",
".",
"wishlist",
".",
"copy",
"(",
")",
"self",
".",
"_resampled_scene",
"(",
"new_scn",
",",
"destination",
",",
"resampler",
"=",
"resampler",
",",
"reduce_data",
"=",
"reduce_data",
",",
"*",
"*",
"resample_kwargs",
")",
"# regenerate anything from the wishlist that needs it (combining",
"# multiple resolutions, etc.)",
"if",
"generate",
":",
"keepables",
"=",
"new_scn",
".",
"generate_composites",
"(",
")",
"else",
":",
"# don't lose datasets that we may need later for generating",
"# composites",
"keepables",
"=",
"set",
"(",
"new_scn",
".",
"datasets",
".",
"keys",
"(",
")",
")",
"|",
"new_scn",
".",
"wishlist",
"if",
"new_scn",
".",
"missing_datasets",
":",
"# copy the set of missing datasets because they won't be valid",
"# after they are removed in the next line",
"missing",
"=",
"new_scn",
".",
"missing_datasets",
".",
"copy",
"(",
")",
"new_scn",
".",
"_remove_failed_datasets",
"(",
"keepables",
")",
"missing_str",
"=",
"\", \"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"missing",
")",
"LOG",
".",
"warning",
"(",
"\"The following datasets \"",
"\"were not created: {}\"",
".",
"format",
"(",
"missing_str",
")",
")",
"if",
"unload",
":",
"new_scn",
".",
"unload",
"(",
"keepables",
")",
"return",
"new_scn"
] |
Resample datasets and return a new scene.
Args:
destination (AreaDefinition, GridDefinition): area definition to
resample to. If not specified then the area returned by
`Scene.max_area()` will be used.
datasets (list): Limit datasets to resample to these specified
`DatasetID` objects . By default all currently loaded
datasets are resampled.
generate (bool): Generate any requested composites that could not
be previously due to incompatible areas (default: True).
unload (bool): Remove any datasets no longer needed after
requested composites have been generated (default: True).
resampler (str): Name of resampling method to use. By default,
this is a nearest neighbor KDTree-based resampling
('nearest'). Other possible values include 'native', 'ewa',
etc. See the :mod:`~satpy.resample` documentation for more
information.
reduce_data (bool): Reduce data by matching the input and output
areas and slicing the data arrays (default: True)
resample_kwargs: Remaining keyword arguments to pass to individual
resampler classes. See the individual resampler class
documentation :mod:`here <satpy.resample>` for available
arguments.
|
[
"Resample",
"datasets",
"and",
"return",
"a",
"new",
"scene",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.