repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
ska-sa/purr | Purr/Plugins/local_pychart/arrow.py | https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/arrow.py#L111-L146 | def draw(self, points, can=None):
"""Parameter <points> specifies the
list of points the arrow traverses through.
It should contain at least two points, i.e.,
the tail and tip. Parameter
<can> is an optional parameter that specifies the output.
<<canvas>>
"""
if can == None: can = canvas.default_canvas()
assert self.check_integrity()
xtip = points[-1][0]
ytip = points[-1][1]
xtail = points[-2][0]
ytail = points[-2][1]
can.newpath()
can.set_line_style(self.line_style)
if len(points) > 2:
can.moveto(points[0][0], points[0][1])
for i in range(1, len(points) - 1):
can.lineto(points[i][0], points[i][1])
draw_arrowbody(can, xscale(xtail), yscale(ytail),
yscale(xtip), yscale(ytip),
nscale(self.head_len))
can.set_fill_color(self.head_color)
draw_arrowhead(can, xscale(xtail), yscale(ytail),
xscale(xtip), yscale(ytip),
nscale(self.thickness),
nscale(self.head_len),
self.head_style)
can.setbb(xtail, ytail)
can.setbb(xtip, ytip) | [
"def",
"draw",
"(",
"self",
",",
"points",
",",
"can",
"=",
"None",
")",
":",
"if",
"can",
"==",
"None",
":",
"can",
"=",
"canvas",
".",
"default_canvas",
"(",
")",
"assert",
"self",
".",
"check_integrity",
"(",
")",
"xtip",
"=",
"points",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"ytip",
"=",
"points",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"xtail",
"=",
"points",
"[",
"-",
"2",
"]",
"[",
"0",
"]",
"ytail",
"=",
"points",
"[",
"-",
"2",
"]",
"[",
"1",
"]",
"can",
".",
"newpath",
"(",
")",
"can",
".",
"set_line_style",
"(",
"self",
".",
"line_style",
")",
"if",
"len",
"(",
"points",
")",
">",
"2",
":",
"can",
".",
"moveto",
"(",
"points",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"points",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"points",
")",
"-",
"1",
")",
":",
"can",
".",
"lineto",
"(",
"points",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"points",
"[",
"i",
"]",
"[",
"1",
"]",
")",
"draw_arrowbody",
"(",
"can",
",",
"xscale",
"(",
"xtail",
")",
",",
"yscale",
"(",
"ytail",
")",
",",
"yscale",
"(",
"xtip",
")",
",",
"yscale",
"(",
"ytip",
")",
",",
"nscale",
"(",
"self",
".",
"head_len",
")",
")",
"can",
".",
"set_fill_color",
"(",
"self",
".",
"head_color",
")",
"draw_arrowhead",
"(",
"can",
",",
"xscale",
"(",
"xtail",
")",
",",
"yscale",
"(",
"ytail",
")",
",",
"xscale",
"(",
"xtip",
")",
",",
"yscale",
"(",
"ytip",
")",
",",
"nscale",
"(",
"self",
".",
"thickness",
")",
",",
"nscale",
"(",
"self",
".",
"head_len",
")",
",",
"self",
".",
"head_style",
")",
"can",
".",
"setbb",
"(",
"xtail",
",",
"ytail",
")",
"can",
".",
"setbb",
"(",
"xtip",
",",
"ytip",
")"
] | Parameter <points> specifies the
list of points the arrow traverses through.
It should contain at least two points, i.e.,
the tail and tip. Parameter
<can> is an optional parameter that specifies the output.
<<canvas>> | [
"Parameter",
"<points",
">",
"specifies",
"the",
"list",
"of",
"points",
"the",
"arrow",
"traverses",
"through",
".",
"It",
"should",
"contain",
"at",
"least",
"two",
"points",
"i",
".",
"e",
".",
"the",
"tail",
"and",
"tip",
".",
"Parameter",
"<can",
">",
"is",
"an",
"optional",
"parameter",
"that",
"specifies",
"the",
"output",
".",
"<<canvas",
">>"
] | python | train |
nosedjango/nosedjango | nosedjango/nosedjango.py | https://github.com/nosedjango/nosedjango/blob/cd4d06857c88291769bc38e5c9573f43b7ffcd6a/nosedjango/nosedjango.py#L232-L317 | def begin(self):
"""
Create the test database and schema, if needed, and switch the
connection over to that database. Then call install() to install
all apps listed in the loaded settings module.
"""
for plugin in self.nose_config.plugins.plugins:
if getattr(plugin, 'django_plugin', False):
self.django_plugins.append(plugin)
os.environ['DJANGO_SETTINGS_MODULE'] = self.settings_module
if self.conf.addPaths:
map(add_path, self.conf.where)
try:
__import__(self.settings_module)
self.settings_path = self.settings_module
except ImportError:
# Settings module is not found in PYTHONPATH. Try to do
# some funky backwards crawling in directory tree, ie. add
# the working directory (and any package parents) to
# sys.path before trying to import django modules;
# otherwise, they won't be able to find project.settings
# if the working dir is project/ or project/..
self.settings_path = get_settings_path(self.settings_module)
if not self.settings_path:
# short circuit if no settings file can be found
raise RuntimeError("Can't find Django settings file!")
add_path(self.settings_path)
sys.path.append(self.settings_path)
from django.conf import settings
# Some Django code paths evaluate differently
# between DEBUG and not DEBUG. Example of this include the url
# dispatcher when 404's are hit. Django's own test runner forces DEBUG
# to be off.
settings.DEBUG = False
self.call_plugins_method('beforeConnectionSetup', settings)
from django.core import management
from django.test.utils import setup_test_environment
if hasattr(settings, 'DATABASES'):
self.old_db = settings.DATABASES['default']['NAME']
else:
self.old_db = settings.DATABASE_NAME
from django.db import connections
self._monkeypatch_test_classes()
for connection in connections.all():
self.call_plugins_method(
'beforeTestSetup', settings, setup_test_environment,
connection)
try:
setup_test_environment()
except RuntimeError: # Django 1.11 + multiprocess this happens.
pass
import django
if hasattr(django, 'setup'):
django.setup()
self.call_plugins_method('afterTestSetup', settings)
management.get_commands()
# Ensure that nothing (eg. South) steals away our syncdb command
if self.django_version < self.DJANGO_1_7:
management._commands['syncdb'] = 'django.core'
for connection in connections.all():
self.call_plugins_method(
'beforeTestDb', settings, connection, management)
connection.creation.create_test_db(
verbosity=self.verbosity,
autoclobber=True,
)
logger.debug("Running syncdb")
self._num_syncdb_calls += 1
self.call_plugins_method('afterTestDb', settings, connection)
self.store_original_transaction_methods() | [
"def",
"begin",
"(",
"self",
")",
":",
"for",
"plugin",
"in",
"self",
".",
"nose_config",
".",
"plugins",
".",
"plugins",
":",
"if",
"getattr",
"(",
"plugin",
",",
"'django_plugin'",
",",
"False",
")",
":",
"self",
".",
"django_plugins",
".",
"append",
"(",
"plugin",
")",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"=",
"self",
".",
"settings_module",
"if",
"self",
".",
"conf",
".",
"addPaths",
":",
"map",
"(",
"add_path",
",",
"self",
".",
"conf",
".",
"where",
")",
"try",
":",
"__import__",
"(",
"self",
".",
"settings_module",
")",
"self",
".",
"settings_path",
"=",
"self",
".",
"settings_module",
"except",
"ImportError",
":",
"# Settings module is not found in PYTHONPATH. Try to do",
"# some funky backwards crawling in directory tree, ie. add",
"# the working directory (and any package parents) to",
"# sys.path before trying to import django modules;",
"# otherwise, they won't be able to find project.settings",
"# if the working dir is project/ or project/..",
"self",
".",
"settings_path",
"=",
"get_settings_path",
"(",
"self",
".",
"settings_module",
")",
"if",
"not",
"self",
".",
"settings_path",
":",
"# short circuit if no settings file can be found",
"raise",
"RuntimeError",
"(",
"\"Can't find Django settings file!\"",
")",
"add_path",
"(",
"self",
".",
"settings_path",
")",
"sys",
".",
"path",
".",
"append",
"(",
"self",
".",
"settings_path",
")",
"from",
"django",
".",
"conf",
"import",
"settings",
"# Some Django code paths evaluate differently",
"# between DEBUG and not DEBUG. Example of this include the url",
"# dispatcher when 404's are hit. Django's own test runner forces DEBUG",
"# to be off.",
"settings",
".",
"DEBUG",
"=",
"False",
"self",
".",
"call_plugins_method",
"(",
"'beforeConnectionSetup'",
",",
"settings",
")",
"from",
"django",
".",
"core",
"import",
"management",
"from",
"django",
".",
"test",
".",
"utils",
"import",
"setup_test_environment",
"if",
"hasattr",
"(",
"settings",
",",
"'DATABASES'",
")",
":",
"self",
".",
"old_db",
"=",
"settings",
".",
"DATABASES",
"[",
"'default'",
"]",
"[",
"'NAME'",
"]",
"else",
":",
"self",
".",
"old_db",
"=",
"settings",
".",
"DATABASE_NAME",
"from",
"django",
".",
"db",
"import",
"connections",
"self",
".",
"_monkeypatch_test_classes",
"(",
")",
"for",
"connection",
"in",
"connections",
".",
"all",
"(",
")",
":",
"self",
".",
"call_plugins_method",
"(",
"'beforeTestSetup'",
",",
"settings",
",",
"setup_test_environment",
",",
"connection",
")",
"try",
":",
"setup_test_environment",
"(",
")",
"except",
"RuntimeError",
":",
"# Django 1.11 + multiprocess this happens.",
"pass",
"import",
"django",
"if",
"hasattr",
"(",
"django",
",",
"'setup'",
")",
":",
"django",
".",
"setup",
"(",
")",
"self",
".",
"call_plugins_method",
"(",
"'afterTestSetup'",
",",
"settings",
")",
"management",
".",
"get_commands",
"(",
")",
"# Ensure that nothing (eg. South) steals away our syncdb command",
"if",
"self",
".",
"django_version",
"<",
"self",
".",
"DJANGO_1_7",
":",
"management",
".",
"_commands",
"[",
"'syncdb'",
"]",
"=",
"'django.core'",
"for",
"connection",
"in",
"connections",
".",
"all",
"(",
")",
":",
"self",
".",
"call_plugins_method",
"(",
"'beforeTestDb'",
",",
"settings",
",",
"connection",
",",
"management",
")",
"connection",
".",
"creation",
".",
"create_test_db",
"(",
"verbosity",
"=",
"self",
".",
"verbosity",
",",
"autoclobber",
"=",
"True",
",",
")",
"logger",
".",
"debug",
"(",
"\"Running syncdb\"",
")",
"self",
".",
"_num_syncdb_calls",
"+=",
"1",
"self",
".",
"call_plugins_method",
"(",
"'afterTestDb'",
",",
"settings",
",",
"connection",
")",
"self",
".",
"store_original_transaction_methods",
"(",
")"
] | Create the test database and schema, if needed, and switch the
connection over to that database. Then call install() to install
all apps listed in the loaded settings module. | [
"Create",
"the",
"test",
"database",
"and",
"schema",
"if",
"needed",
"and",
"switch",
"the",
"connection",
"over",
"to",
"that",
"database",
".",
"Then",
"call",
"install",
"()",
"to",
"install",
"all",
"apps",
"listed",
"in",
"the",
"loaded",
"settings",
"module",
"."
] | python | valid |
mwgielen/jackal | jackal/scripts/eternalblue.py | https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/eternalblue.py#L123-L134 | def detect_os(self, ip):
"""
Runs the checker.py scripts to detect the os.
"""
process = subprocess.run(['python2', os.path.join(self.datadir, 'MS17-010', 'checker.py'), str(ip)], stdout=subprocess.PIPE)
out = process.stdout.decode('utf-8').split('\n')
system_os = ''
for line in out:
if line.startswith('Target OS:'):
system_os = line.replace('Target OS: ', '')
break
return system_os | [
"def",
"detect_os",
"(",
"self",
",",
"ip",
")",
":",
"process",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'python2'",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"datadir",
",",
"'MS17-010'",
",",
"'checker.py'",
")",
",",
"str",
"(",
"ip",
")",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
"=",
"process",
".",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'\\n'",
")",
"system_os",
"=",
"''",
"for",
"line",
"in",
"out",
":",
"if",
"line",
".",
"startswith",
"(",
"'Target OS:'",
")",
":",
"system_os",
"=",
"line",
".",
"replace",
"(",
"'Target OS: '",
",",
"''",
")",
"break",
"return",
"system_os"
] | Runs the checker.py scripts to detect the os. | [
"Runs",
"the",
"checker",
".",
"py",
"scripts",
"to",
"detect",
"the",
"os",
"."
] | python | valid |
Capitains/MyCapytain | MyCapytain/resolvers/cts/api.py | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/api.py#L44-L65 | def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False):
""" Retrieve a text node from the API
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: CapitainsCtsPassage
:rtype: CapitainsCtsPassage
"""
text = CtsText(
urn=textId,
retriever=self.endpoint
)
if metadata or prevnext:
return text.getPassagePlus(reference=subreference)
else:
return text.getTextualNode(subreference=subreference) | [
"def",
"getTextualNode",
"(",
"self",
",",
"textId",
",",
"subreference",
"=",
"None",
",",
"prevnext",
"=",
"False",
",",
"metadata",
"=",
"False",
")",
":",
"text",
"=",
"CtsText",
"(",
"urn",
"=",
"textId",
",",
"retriever",
"=",
"self",
".",
"endpoint",
")",
"if",
"metadata",
"or",
"prevnext",
":",
"return",
"text",
".",
"getPassagePlus",
"(",
"reference",
"=",
"subreference",
")",
"else",
":",
"return",
"text",
".",
"getTextualNode",
"(",
"subreference",
"=",
"subreference",
")"
] | Retrieve a text node from the API
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: CapitainsCtsPassage
:rtype: CapitainsCtsPassage | [
"Retrieve",
"a",
"text",
"node",
"from",
"the",
"API"
] | python | train |
HewlettPackard/python-hpOneView | hpOneView/oneview_client.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L1117-L1126 | def appliance_device_snmp_v1_trap_destinations(self):
"""
Gets the ApplianceDeviceSNMPv1TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv1TrapDestinations:
"""
if not self.__appliance_device_snmp_v1_trap_destinations:
self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v1_trap_destinations | [
"def",
"appliance_device_snmp_v1_trap_destinations",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__appliance_device_snmp_v1_trap_destinations",
":",
"self",
".",
"__appliance_device_snmp_v1_trap_destinations",
"=",
"ApplianceDeviceSNMPv1TrapDestinations",
"(",
"self",
".",
"__connection",
")",
"return",
"self",
".",
"__appliance_device_snmp_v1_trap_destinations"
] | Gets the ApplianceDeviceSNMPv1TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv1TrapDestinations: | [
"Gets",
"the",
"ApplianceDeviceSNMPv1TrapDestinations",
"API",
"client",
"."
] | python | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6628-L6639 | def GetRemainder(self):
"""Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. """
ret = libxml2mod.xmlTextReaderGetRemainder(self._o)
if ret is None:raise treeError('xmlTextReaderGetRemainder() failed')
__tmp = inputBuffer(_obj=ret)
return __tmp | [
"def",
"GetRemainder",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlTextReaderGetRemainder",
"(",
"self",
".",
"_o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlTextReaderGetRemainder() failed'",
")",
"__tmp",
"=",
"inputBuffer",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. | [
"Method",
"to",
"get",
"the",
"remainder",
"of",
"the",
"buffered",
"XML",
".",
"this",
"method",
"stops",
"the",
"parser",
"set",
"its",
"state",
"to",
"End",
"Of",
"File",
"and",
"return",
"the",
"input",
"stream",
"with",
"what",
"is",
"left",
"that",
"the",
"parser",
"did",
"not",
"use",
".",
"The",
"implementation",
"is",
"not",
"good",
"the",
"parser",
"certainly",
"procgressed",
"past",
"what",
"s",
"left",
"in",
"reader",
"-",
">",
"input",
"and",
"there",
"is",
"an",
"allocation",
"problem",
".",
"Best",
"would",
"be",
"to",
"rewrite",
"it",
"differently",
"."
] | python | train |
projecthamster/hamster | src/hamster/lib/graphics.py | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/graphics.py#L58-L89 | def parse(self, color):
"""parse string or a color tuple into color usable for cairo (all values
in the normalized (0..1) range"""
assert color is not None
#parse color into rgb values
if isinstance(color, str):
match = self.hex_color_long.match(color)
if match:
color = [int(color, 16) / 65535.0 for color in match.groups()]
else:
match = self.hex_color_normal.match(color)
if match:
color = [int(color, 16) / 255.0 for color in match.groups()]
else:
match = self.hex_color_short.match(color)
color = [int(color + color, 16) / 255.0 for color in match.groups()]
elif isinstance(color, gdk.Color):
color = [color.red / 65535.0,
color.green / 65535.0,
color.blue / 65535.0]
elif isinstance(color, (list, tuple)):
# otherwise we assume we have color components in 0..255 range
if color[0] > 1 or color[1] > 1 or color[2] > 1:
color = [c / 255.0 for c in color]
else:
color = [color.red, color.green, color.blue]
return color | [
"def",
"parse",
"(",
"self",
",",
"color",
")",
":",
"assert",
"color",
"is",
"not",
"None",
"#parse color into rgb values",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
":",
"match",
"=",
"self",
".",
"hex_color_long",
".",
"match",
"(",
"color",
")",
"if",
"match",
":",
"color",
"=",
"[",
"int",
"(",
"color",
",",
"16",
")",
"/",
"65535.0",
"for",
"color",
"in",
"match",
".",
"groups",
"(",
")",
"]",
"else",
":",
"match",
"=",
"self",
".",
"hex_color_normal",
".",
"match",
"(",
"color",
")",
"if",
"match",
":",
"color",
"=",
"[",
"int",
"(",
"color",
",",
"16",
")",
"/",
"255.0",
"for",
"color",
"in",
"match",
".",
"groups",
"(",
")",
"]",
"else",
":",
"match",
"=",
"self",
".",
"hex_color_short",
".",
"match",
"(",
"color",
")",
"color",
"=",
"[",
"int",
"(",
"color",
"+",
"color",
",",
"16",
")",
"/",
"255.0",
"for",
"color",
"in",
"match",
".",
"groups",
"(",
")",
"]",
"elif",
"isinstance",
"(",
"color",
",",
"gdk",
".",
"Color",
")",
":",
"color",
"=",
"[",
"color",
".",
"red",
"/",
"65535.0",
",",
"color",
".",
"green",
"/",
"65535.0",
",",
"color",
".",
"blue",
"/",
"65535.0",
"]",
"elif",
"isinstance",
"(",
"color",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# otherwise we assume we have color components in 0..255 range",
"if",
"color",
"[",
"0",
"]",
">",
"1",
"or",
"color",
"[",
"1",
"]",
">",
"1",
"or",
"color",
"[",
"2",
"]",
">",
"1",
":",
"color",
"=",
"[",
"c",
"/",
"255.0",
"for",
"c",
"in",
"color",
"]",
"else",
":",
"color",
"=",
"[",
"color",
".",
"red",
",",
"color",
".",
"green",
",",
"color",
".",
"blue",
"]",
"return",
"color"
] | parse string or a color tuple into color usable for cairo (all values
in the normalized (0..1) range | [
"parse",
"string",
"or",
"a",
"color",
"tuple",
"into",
"color",
"usable",
"for",
"cairo",
"(",
"all",
"values",
"in",
"the",
"normalized",
"(",
"0",
"..",
"1",
")",
"range"
] | python | train |
bioidiap/bob.ip.facedetect | bob/ip/facedetect/script/plot_froc.py | https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/script/plot_froc.py#L168-L225 | def main(command_line_arguments=None):
"""Reads score files, computes error measures and plots curves."""
args = command_line_options(command_line_arguments)
# get some colors for plotting
cmap = mpl.cm.get_cmap(name='hsv')
count = len(args.files) + (len(args.baselines) if args.baselines else 0)
colors = [cmap(i) for i in numpy.linspace(0, 1.0, count+1)]
# First, read the score files
logger.info("Loading %d score files" % len(args.files))
scores = [read_score_file(os.path.join(args.directory, f)) for f in args.files]
false_alarms = []
detection_rate = []
logger.info("Computing FROC curves")
for score in scores:
# compute some thresholds
tmin = min(score[2])
tmax = max(score[2])
count = 100
thresholds = [tmin + float(x)/count * (tmax - tmin) for x in range(count+2)]
false_alarms.append([])
detection_rate.append([])
for threshold in thresholds:
detection_rate[-1].append(numpy.count_nonzero(numpy.array(score[1]) >= threshold) / float(score[0]))
false_alarms[-1].append(numpy.count_nonzero(numpy.array(score[2]) >= threshold))
# to display 0 in a semilogx plot, we have to add a little
# false_alarms[-1][-1] += 1e-8
# also read baselines
if args.baselines is not None:
for baseline in args.baselines:
dr = []
fa = []
with open(os.path.join(args.baseline_directory, baseline)) as f:
for line in f:
splits = line.rstrip().split()
dr.append(float(splits[0]))
fa.append(int(splits[1]))
false_alarms.append(fa)
detection_rate.append(dr)
logger.info("Plotting FROC curves to file '%s'", args.output)
# create a multi-page PDF for the ROC curve
pdf = PdfPages(args.output)
figure = _plot_froc(false_alarms, detection_rate, colors, args.legends, args.title, args.max)
mpl.xlabel('False Alarm (of %d pruned)' % len(scores[0][2]))
mpl.ylabel('Detection Rate in \%% (total %d faces)' % scores[0][0])
pdf.savefig(figure)
pdf.close()
if args.count_detections:
for i, f in enumerate(args.files):
det, all = count_detections(f)
print("The number of detected faces for %s is %d out of %d" % (args.legends[i], det, all)) | [
"def",
"main",
"(",
"command_line_arguments",
"=",
"None",
")",
":",
"args",
"=",
"command_line_options",
"(",
"command_line_arguments",
")",
"# get some colors for plotting",
"cmap",
"=",
"mpl",
".",
"cm",
".",
"get_cmap",
"(",
"name",
"=",
"'hsv'",
")",
"count",
"=",
"len",
"(",
"args",
".",
"files",
")",
"+",
"(",
"len",
"(",
"args",
".",
"baselines",
")",
"if",
"args",
".",
"baselines",
"else",
"0",
")",
"colors",
"=",
"[",
"cmap",
"(",
"i",
")",
"for",
"i",
"in",
"numpy",
".",
"linspace",
"(",
"0",
",",
"1.0",
",",
"count",
"+",
"1",
")",
"]",
"# First, read the score files",
"logger",
".",
"info",
"(",
"\"Loading %d score files\"",
"%",
"len",
"(",
"args",
".",
"files",
")",
")",
"scores",
"=",
"[",
"read_score_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"directory",
",",
"f",
")",
")",
"for",
"f",
"in",
"args",
".",
"files",
"]",
"false_alarms",
"=",
"[",
"]",
"detection_rate",
"=",
"[",
"]",
"logger",
".",
"info",
"(",
"\"Computing FROC curves\"",
")",
"for",
"score",
"in",
"scores",
":",
"# compute some thresholds",
"tmin",
"=",
"min",
"(",
"score",
"[",
"2",
"]",
")",
"tmax",
"=",
"max",
"(",
"score",
"[",
"2",
"]",
")",
"count",
"=",
"100",
"thresholds",
"=",
"[",
"tmin",
"+",
"float",
"(",
"x",
")",
"/",
"count",
"*",
"(",
"tmax",
"-",
"tmin",
")",
"for",
"x",
"in",
"range",
"(",
"count",
"+",
"2",
")",
"]",
"false_alarms",
".",
"append",
"(",
"[",
"]",
")",
"detection_rate",
".",
"append",
"(",
"[",
"]",
")",
"for",
"threshold",
"in",
"thresholds",
":",
"detection_rate",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"numpy",
".",
"count_nonzero",
"(",
"numpy",
".",
"array",
"(",
"score",
"[",
"1",
"]",
")",
">=",
"threshold",
")",
"/",
"float",
"(",
"score",
"[",
"0",
"]",
")",
")",
"false_alarms",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"numpy",
".",
"count_nonzero",
"(",
"numpy",
".",
"array",
"(",
"score",
"[",
"2",
"]",
")",
">=",
"threshold",
")",
")",
"# to display 0 in a semilogx plot, we have to add a little",
"# false_alarms[-1][-1] += 1e-8",
"# also read baselines",
"if",
"args",
".",
"baselines",
"is",
"not",
"None",
":",
"for",
"baseline",
"in",
"args",
".",
"baselines",
":",
"dr",
"=",
"[",
"]",
"fa",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"baseline_directory",
",",
"baseline",
")",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"splits",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"dr",
".",
"append",
"(",
"float",
"(",
"splits",
"[",
"0",
"]",
")",
")",
"fa",
".",
"append",
"(",
"int",
"(",
"splits",
"[",
"1",
"]",
")",
")",
"false_alarms",
".",
"append",
"(",
"fa",
")",
"detection_rate",
".",
"append",
"(",
"dr",
")",
"logger",
".",
"info",
"(",
"\"Plotting FROC curves to file '%s'\"",
",",
"args",
".",
"output",
")",
"# create a multi-page PDF for the ROC curve",
"pdf",
"=",
"PdfPages",
"(",
"args",
".",
"output",
")",
"figure",
"=",
"_plot_froc",
"(",
"false_alarms",
",",
"detection_rate",
",",
"colors",
",",
"args",
".",
"legends",
",",
"args",
".",
"title",
",",
"args",
".",
"max",
")",
"mpl",
".",
"xlabel",
"(",
"'False Alarm (of %d pruned)'",
"%",
"len",
"(",
"scores",
"[",
"0",
"]",
"[",
"2",
"]",
")",
")",
"mpl",
".",
"ylabel",
"(",
"'Detection Rate in \\%% (total %d faces)'",
"%",
"scores",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"pdf",
".",
"savefig",
"(",
"figure",
")",
"pdf",
".",
"close",
"(",
")",
"if",
"args",
".",
"count_detections",
":",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"args",
".",
"files",
")",
":",
"det",
",",
"all",
"=",
"count_detections",
"(",
"f",
")",
"print",
"(",
"\"The number of detected faces for %s is %d out of %d\"",
"%",
"(",
"args",
".",
"legends",
"[",
"i",
"]",
",",
"det",
",",
"all",
")",
")"
] | Reads score files, computes error measures and plots curves. | [
"Reads",
"score",
"files",
"computes",
"error",
"measures",
"and",
"plots",
"curves",
"."
] | python | train |
TankerHQ/python-cli-ui | cli_ui/__init__.py | https://github.com/TankerHQ/python-cli-ui/blob/4c9928827cea06cf80e6a1f5bd86478d8566863f/cli_ui/__init__.py#L608-L621 | def did_you_mean(message: str, user_input: str, choices: Sequence[str]) -> str:
""" Given a list of choices and an invalid user input, display the closest
items in the list that match the input.
"""
if not choices:
return message
else:
result = {
difflib.SequenceMatcher(a=user_input, b=choice).ratio(): choice
for choice in choices
}
message += "\nDid you mean: %s?" % result[max(result)]
return message | [
"def",
"did_you_mean",
"(",
"message",
":",
"str",
",",
"user_input",
":",
"str",
",",
"choices",
":",
"Sequence",
"[",
"str",
"]",
")",
"->",
"str",
":",
"if",
"not",
"choices",
":",
"return",
"message",
"else",
":",
"result",
"=",
"{",
"difflib",
".",
"SequenceMatcher",
"(",
"a",
"=",
"user_input",
",",
"b",
"=",
"choice",
")",
".",
"ratio",
"(",
")",
":",
"choice",
"for",
"choice",
"in",
"choices",
"}",
"message",
"+=",
"\"\\nDid you mean: %s?\"",
"%",
"result",
"[",
"max",
"(",
"result",
")",
"]",
"return",
"message"
] | Given a list of choices and an invalid user input, display the closest
items in the list that match the input. | [
"Given",
"a",
"list",
"of",
"choices",
"and",
"an",
"invalid",
"user",
"input",
"display",
"the",
"closest",
"items",
"in",
"the",
"list",
"that",
"match",
"the",
"input",
"."
] | python | train |
spotify/gordon-gcp | src/gordon_gcp/clients/http.py | https://github.com/spotify/gordon-gcp/blob/5ab19e3c2fe6ace72ee91e2ef1a1326f90b805da/src/gordon_gcp/clients/http.py#L72-L82 | async def valid_token_set(self):
"""Check for validity of token, and refresh if none or expired."""
is_valid = False
if self._auth_client.token:
# Account for a token near expiration
now = datetime.datetime.utcnow()
skew = datetime.timedelta(seconds=60)
if self._auth_client.expiry > (now + skew):
is_valid = True
return is_valid | [
"async",
"def",
"valid_token_set",
"(",
"self",
")",
":",
"is_valid",
"=",
"False",
"if",
"self",
".",
"_auth_client",
".",
"token",
":",
"# Account for a token near expiration",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"skew",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"60",
")",
"if",
"self",
".",
"_auth_client",
".",
"expiry",
">",
"(",
"now",
"+",
"skew",
")",
":",
"is_valid",
"=",
"True",
"return",
"is_valid"
] | Check for validity of token, and refresh if none or expired. | [
"Check",
"for",
"validity",
"of",
"token",
"and",
"refresh",
"if",
"none",
"or",
"expired",
"."
] | python | train |
WZBSocialScienceCenter/tmtoolkit | tmtoolkit/topicmod/visualize.py | https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/visualize.py#L124-L179 | def plot_doc_topic_heatmap(fig, ax, doc_topic_distrib, doc_labels, topic_labels=None,
which_documents=None, which_document_indices=None,
which_topics=None, which_topic_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_documents is not None and which_document_indices is not None:
raise ValueError('only `which_documents` or `which_document_indices` can be set, not both')
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_documents is not None:
which_document_indices = np.where(np.isin(doc_labels, which_documents))[0]
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
select_distrib_subset = False
if topic_labels is None:
topic_labels = np.array(range(1, doc_topic_distrib.shape[1]+1))
elif not isinstance(topic_labels, np.ndarray):
topic_labels = np.array(topic_labels)
if which_document_indices is not None:
select_distrib_subset = True
doc_labels = np.array(doc_labels)[which_document_indices]
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if select_distrib_subset:
doc_topic_distrib = mat2d_window_from_indices(doc_topic_distrib, which_document_indices, which_topic_indices)
return plot_heatmap(fig, ax, doc_topic_distrib,
xaxislabel=xaxislabel or 'topic',
yaxislabel=yaxislabel or 'document',
xticklabels=topic_labels,
yticklabels=doc_labels,
**kwargs) | [
"def",
"plot_doc_topic_heatmap",
"(",
"fig",
",",
"ax",
",",
"doc_topic_distrib",
",",
"doc_labels",
",",
"topic_labels",
"=",
"None",
",",
"which_documents",
"=",
"None",
",",
"which_document_indices",
"=",
"None",
",",
"which_topics",
"=",
"None",
",",
"which_topic_indices",
"=",
"None",
",",
"xaxislabel",
"=",
"None",
",",
"yaxislabel",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"which_documents",
"is",
"not",
"None",
"and",
"which_document_indices",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'only `which_documents` or `which_document_indices` can be set, not both'",
")",
"if",
"which_topics",
"is",
"not",
"None",
"and",
"which_topic_indices",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'only `which_topics` or `which_topic_indices` can be set, not both'",
")",
"if",
"which_documents",
"is",
"not",
"None",
":",
"which_document_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isin",
"(",
"doc_labels",
",",
"which_documents",
")",
")",
"[",
"0",
"]",
"if",
"which_topics",
"is",
"not",
"None",
":",
"which_topic_indices",
"=",
"np",
".",
"array",
"(",
"which_topics",
")",
"-",
"1",
"select_distrib_subset",
"=",
"False",
"if",
"topic_labels",
"is",
"None",
":",
"topic_labels",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"1",
",",
"doc_topic_distrib",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
")",
"elif",
"not",
"isinstance",
"(",
"topic_labels",
",",
"np",
".",
"ndarray",
")",
":",
"topic_labels",
"=",
"np",
".",
"array",
"(",
"topic_labels",
")",
"if",
"which_document_indices",
"is",
"not",
"None",
":",
"select_distrib_subset",
"=",
"True",
"doc_labels",
"=",
"np",
".",
"array",
"(",
"doc_labels",
")",
"[",
"which_document_indices",
"]",
"if",
"which_topic_indices",
"is",
"not",
"None",
":",
"select_distrib_subset",
"=",
"True",
"topic_labels",
"=",
"topic_labels",
"[",
"which_topic_indices",
"]",
"if",
"select_distrib_subset",
":",
"doc_topic_distrib",
"=",
"mat2d_window_from_indices",
"(",
"doc_topic_distrib",
",",
"which_document_indices",
",",
"which_topic_indices",
")",
"return",
"plot_heatmap",
"(",
"fig",
",",
"ax",
",",
"doc_topic_distrib",
",",
"xaxislabel",
"=",
"xaxislabel",
"or",
"'topic'",
",",
"yaxislabel",
"=",
"yaxislabel",
"or",
"'document'",
",",
"xticklabels",
"=",
"topic_labels",
",",
"yticklabels",
"=",
"doc_labels",
",",
"*",
"*",
"kwargs",
")"
] | Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture. | [
"Plot",
"a",
"heatmap",
"for",
"a",
"document",
"-",
"topic",
"distribution",
"doc_topic_distrib",
"to",
"a",
"matplotlib",
"Figure",
"fig",
"and",
"Axes",
"ax",
"using",
"doc_labels",
"as",
"document",
"labels",
"on",
"the",
"y",
"-",
"axis",
"and",
"topics",
"from",
"1",
"to",
"n_topics",
"=",
"doc_topic_distrib",
".",
"shape",
"[",
"1",
"]",
"on",
"the",
"x",
"-",
"axis",
".",
"Custom",
"topic",
"labels",
"can",
"be",
"passed",
"as",
"topic_labels",
".",
"A",
"subset",
"of",
"documents",
"can",
"be",
"specified",
"either",
"with",
"a",
"sequence",
"which_documents",
"containing",
"a",
"subset",
"of",
"document",
"labels",
"from",
"doc_labels",
"or",
"which_document_indices",
"containing",
"a",
"sequence",
"of",
"document",
"indices",
".",
"A",
"subset",
"of",
"topics",
"can",
"be",
"specified",
"either",
"with",
"a",
"sequence",
"which_topics",
"containing",
"sequence",
"of",
"numbers",
"between",
"[",
"1",
"n_topics",
"]",
"or",
"which_topic_indices",
"which",
"is",
"a",
"number",
"between",
"[",
"0",
"n_topics",
"-",
"1",
"]",
"Additional",
"arguments",
"can",
"be",
"passed",
"via",
"kwargs",
"to",
"plot_heatmap",
"."
] | python | train |
TheGhouls/oct | oct/results/output.py | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/output.py#L24-L33 | def print_infos(results):
"""Print informations in standard output
:param ReportResults results: the report result containing all compiled informations
"""
print('transactions: %i' % results.total_transactions)
print('timers: %i' % results.total_timers)
print('errors: %i' % results.total_errors)
print('test start: %s' % results.start_datetime)
print('test finish: %s\n' % results.finish_datetime) | [
"def",
"print_infos",
"(",
"results",
")",
":",
"print",
"(",
"'transactions: %i'",
"%",
"results",
".",
"total_transactions",
")",
"print",
"(",
"'timers: %i'",
"%",
"results",
".",
"total_timers",
")",
"print",
"(",
"'errors: %i'",
"%",
"results",
".",
"total_errors",
")",
"print",
"(",
"'test start: %s'",
"%",
"results",
".",
"start_datetime",
")",
"print",
"(",
"'test finish: %s\\n'",
"%",
"results",
".",
"finish_datetime",
")"
] | Print informations in standard output
:param ReportResults results: the report result containing all compiled informations | [
"Print",
"informations",
"in",
"standard",
"output"
] | python | train |
sixty-north/cosmic-ray | src/cosmic_ray/modules.py | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/modules.py#L7-L20 | def find_modules(module_path):
"""Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files).
"""
if module_path.is_file():
if module_path.suffix == '.py':
yield module_path
elif module_path.is_dir():
pyfiles = glob.glob('{}/**/*.py'.format(module_path), recursive=True)
yield from (Path(pyfile) for pyfile in pyfiles) | [
"def",
"find_modules",
"(",
"module_path",
")",
":",
"if",
"module_path",
".",
"is_file",
"(",
")",
":",
"if",
"module_path",
".",
"suffix",
"==",
"'.py'",
":",
"yield",
"module_path",
"elif",
"module_path",
".",
"is_dir",
"(",
")",
":",
"pyfiles",
"=",
"glob",
".",
"glob",
"(",
"'{}/**/*.py'",
".",
"format",
"(",
"module_path",
")",
",",
"recursive",
"=",
"True",
")",
"yield",
"from",
"(",
"Path",
"(",
"pyfile",
")",
"for",
"pyfile",
"in",
"pyfiles",
")"
] | Find all modules in the module (possibly package) represented by `module_path`.
Args:
module_path: A pathlib.Path to a Python package or module.
Returns: An iterable of paths Python modules (i.e. *py files). | [
"Find",
"all",
"modules",
"in",
"the",
"module",
"(",
"possibly",
"package",
")",
"represented",
"by",
"module_path",
"."
] | python | train |
treycucco/pyebnf | pyebnf/_hand_written_parser.py | https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/_hand_written_parser.py#L226-L234 | def operator(self, text):
"""operator = "|" | "." | "," | "-";"""
self._attempting(text)
return alternation([
"|",
".",
",",
"-"
])(text).retyped(TokenType.operator) | [
"def",
"operator",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"_attempting",
"(",
"text",
")",
"return",
"alternation",
"(",
"[",
"\"|\"",
",",
"\".\"",
",",
"\",\"",
",",
"\"-\"",
"]",
")",
"(",
"text",
")",
".",
"retyped",
"(",
"TokenType",
".",
"operator",
")"
] | operator = "|" | "." | "," | "-"; | [
"operator",
"=",
"|",
"|",
".",
"|",
"|",
"-",
";"
] | python | test |
mlperf/training | reinforcement/tensorflow/minigo/validate.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/validate.py#L47-L65 | def validate(*tf_records):
"""Validate a model's performance on a set of holdout data."""
if FLAGS.use_tpu:
def _input_fn(params):
return preprocessing.get_tpu_input_tensors(
params['batch_size'], tf_records, filter_amount=1.0)
else:
def _input_fn():
return preprocessing.get_input_tensors(
FLAGS.train_batch_size, tf_records, filter_amount=1.0,
shuffle_examples=False)
steps = FLAGS.examples_to_validate // FLAGS.train_batch_size
if FLAGS.use_tpu:
steps //= FLAGS.num_tpu_cores
estimator = dual_net.get_estimator()
with utils.logged_timer("Validating"):
estimator.evaluate(_input_fn, steps=steps, name=FLAGS.validate_name) | [
"def",
"validate",
"(",
"*",
"tf_records",
")",
":",
"if",
"FLAGS",
".",
"use_tpu",
":",
"def",
"_input_fn",
"(",
"params",
")",
":",
"return",
"preprocessing",
".",
"get_tpu_input_tensors",
"(",
"params",
"[",
"'batch_size'",
"]",
",",
"tf_records",
",",
"filter_amount",
"=",
"1.0",
")",
"else",
":",
"def",
"_input_fn",
"(",
")",
":",
"return",
"preprocessing",
".",
"get_input_tensors",
"(",
"FLAGS",
".",
"train_batch_size",
",",
"tf_records",
",",
"filter_amount",
"=",
"1.0",
",",
"shuffle_examples",
"=",
"False",
")",
"steps",
"=",
"FLAGS",
".",
"examples_to_validate",
"//",
"FLAGS",
".",
"train_batch_size",
"if",
"FLAGS",
".",
"use_tpu",
":",
"steps",
"//=",
"FLAGS",
".",
"num_tpu_cores",
"estimator",
"=",
"dual_net",
".",
"get_estimator",
"(",
")",
"with",
"utils",
".",
"logged_timer",
"(",
"\"Validating\"",
")",
":",
"estimator",
".",
"evaluate",
"(",
"_input_fn",
",",
"steps",
"=",
"steps",
",",
"name",
"=",
"FLAGS",
".",
"validate_name",
")"
] | Validate a model's performance on a set of holdout data. | [
"Validate",
"a",
"model",
"s",
"performance",
"on",
"a",
"set",
"of",
"holdout",
"data",
"."
] | python | train |
charlesthomas/proauth2 | proauth2/async_proauth2.py | https://github.com/charlesthomas/proauth2/blob/f88c8df966a1802414047ed304d02df1dd520097/proauth2/async_proauth2.py#L109-L120 | def authenticate_token(self, token, callback):
'''
authenticate_token checks the passed token and returns the user_id it is
associated with. it is assumed that this method won't be directly exposed to
the oauth client, but some kind of framework or wrapper. this allows the
framework to have the user_id without doing additional DB calls.
'''
token_data = yield Task(self.data_store.fetch, 'tokens', token=token)
if not token_data:
raise Proauth2Error('access_denied',
'token does not exist or has been revoked')
callback(token_data['user_id']) | [
"def",
"authenticate_token",
"(",
"self",
",",
"token",
",",
"callback",
")",
":",
"token_data",
"=",
"yield",
"Task",
"(",
"self",
".",
"data_store",
".",
"fetch",
",",
"'tokens'",
",",
"token",
"=",
"token",
")",
"if",
"not",
"token_data",
":",
"raise",
"Proauth2Error",
"(",
"'access_denied'",
",",
"'token does not exist or has been revoked'",
")",
"callback",
"(",
"token_data",
"[",
"'user_id'",
"]",
")"
] | authenticate_token checks the passed token and returns the user_id it is
associated with. it is assumed that this method won't be directly exposed to
the oauth client, but some kind of framework or wrapper. this allows the
framework to have the user_id without doing additional DB calls. | [
"authenticate_token",
"checks",
"the",
"passed",
"token",
"and",
"returns",
"the",
"user_id",
"it",
"is",
"associated",
"with",
".",
"it",
"is",
"assumed",
"that",
"this",
"method",
"won",
"t",
"be",
"directly",
"exposed",
"to",
"the",
"oauth",
"client",
"but",
"some",
"kind",
"of",
"framework",
"or",
"wrapper",
".",
"this",
"allows",
"the",
"framework",
"to",
"have",
"the",
"user_id",
"without",
"doing",
"additional",
"DB",
"calls",
"."
] | python | valid |
johnwheeler/flask-ask | flask_ask/cache.py | https://github.com/johnwheeler/flask-ask/blob/fe407646ae404a8c90b363c86d5c4c201b6a5580/flask_ask/cache.py#L65-L80 | def top_stream(cache, user_id):
"""
Peek at the top of the stack in the cache.
:param cache: werkzeug BasicCache-like object
:param user_id: id of user, used as key in cache
:return: top item in user's cached stack, otherwise None
"""
if not user_id:
return None
stack = cache.get(user_id)
if stack is None:
return None
return stack.pop() | [
"def",
"top_stream",
"(",
"cache",
",",
"user_id",
")",
":",
"if",
"not",
"user_id",
":",
"return",
"None",
"stack",
"=",
"cache",
".",
"get",
"(",
"user_id",
")",
"if",
"stack",
"is",
"None",
":",
"return",
"None",
"return",
"stack",
".",
"pop",
"(",
")"
] | Peek at the top of the stack in the cache.
:param cache: werkzeug BasicCache-like object
:param user_id: id of user, used as key in cache
:return: top item in user's cached stack, otherwise None | [
"Peek",
"at",
"the",
"top",
"of",
"the",
"stack",
"in",
"the",
"cache",
"."
] | python | train |
latchset/jwcrypto | jwcrypto/jwk.py | https://github.com/latchset/jwcrypto/blob/961df898dc08f63fe3d900f2002618740bc66b4a/jwcrypto/jwk.py#L490-L501 | def from_json(cls, key):
"""Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK.
"""
obj = cls()
try:
jkey = json_decode(key)
except Exception as e: # pylint: disable=broad-except
raise InvalidJWKValue(e)
obj.import_key(**jkey)
return obj | [
"def",
"from_json",
"(",
"cls",
",",
"key",
")",
":",
"obj",
"=",
"cls",
"(",
")",
"try",
":",
"jkey",
"=",
"json_decode",
"(",
"key",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"raise",
"InvalidJWKValue",
"(",
"e",
")",
"obj",
".",
"import_key",
"(",
"*",
"*",
"jkey",
")",
"return",
"obj"
] | Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK. | [
"Creates",
"a",
"RFC",
"7517",
"JWK",
"from",
"the",
"standard",
"JSON",
"format",
"."
] | python | train |
twisted/mantissa | xmantissa/people.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L798-L819 | def _gatherPluginMethods(self, methodName):
"""
Walk through each L{IOrganizerPlugin} powerup, yielding the bound
method if the powerup implements C{methodName}. Upon encountering a
plugin which fails to implement it, issue a
L{PendingDeprecationWarning}.
@param methodName: The name of a L{IOrganizerPlugin} method.
@type methodName: C{str}
@return: Iterable of methods.
"""
for plugin in self.getOrganizerPlugins():
implementation = getattr(plugin, methodName, None)
if implementation is not None:
yield implementation
else:
warn(
('IOrganizerPlugin now has the %r method, %s'
' did not implement it') % (
methodName, plugin.__class__),
category=PendingDeprecationWarning) | [
"def",
"_gatherPluginMethods",
"(",
"self",
",",
"methodName",
")",
":",
"for",
"plugin",
"in",
"self",
".",
"getOrganizerPlugins",
"(",
")",
":",
"implementation",
"=",
"getattr",
"(",
"plugin",
",",
"methodName",
",",
"None",
")",
"if",
"implementation",
"is",
"not",
"None",
":",
"yield",
"implementation",
"else",
":",
"warn",
"(",
"(",
"'IOrganizerPlugin now has the %r method, %s'",
"' did not implement it'",
")",
"%",
"(",
"methodName",
",",
"plugin",
".",
"__class__",
")",
",",
"category",
"=",
"PendingDeprecationWarning",
")"
] | Walk through each L{IOrganizerPlugin} powerup, yielding the bound
method if the powerup implements C{methodName}. Upon encountering a
plugin which fails to implement it, issue a
L{PendingDeprecationWarning}.
@param methodName: The name of a L{IOrganizerPlugin} method.
@type methodName: C{str}
@return: Iterable of methods. | [
"Walk",
"through",
"each",
"L",
"{",
"IOrganizerPlugin",
"}",
"powerup",
"yielding",
"the",
"bound",
"method",
"if",
"the",
"powerup",
"implements",
"C",
"{",
"methodName",
"}",
".",
"Upon",
"encountering",
"a",
"plugin",
"which",
"fails",
"to",
"implement",
"it",
"issue",
"a",
"L",
"{",
"PendingDeprecationWarning",
"}",
"."
] | python | train |
huggingface/pytorch-pretrained-BERT | examples/run_squad.py | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L741-L761 | def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs | [
"def",
"_compute_softmax",
"(",
"scores",
")",
":",
"if",
"not",
"scores",
":",
"return",
"[",
"]",
"max_score",
"=",
"None",
"for",
"score",
"in",
"scores",
":",
"if",
"max_score",
"is",
"None",
"or",
"score",
">",
"max_score",
":",
"max_score",
"=",
"score",
"exp_scores",
"=",
"[",
"]",
"total_sum",
"=",
"0.0",
"for",
"score",
"in",
"scores",
":",
"x",
"=",
"math",
".",
"exp",
"(",
"score",
"-",
"max_score",
")",
"exp_scores",
".",
"append",
"(",
"x",
")",
"total_sum",
"+=",
"x",
"probs",
"=",
"[",
"]",
"for",
"score",
"in",
"exp_scores",
":",
"probs",
".",
"append",
"(",
"score",
"/",
"total_sum",
")",
"return",
"probs"
] | Compute softmax probability over raw logits. | [
"Compute",
"softmax",
"probability",
"over",
"raw",
"logits",
"."
] | python | train |
iotile/coretools | transport_plugins/bled112/iotile_transport_bled112/bgapi_structures.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/bled112/iotile_transport_bled112/bgapi_structures.py#L27-L41 | def process_gatt_service(services, event):
"""Process a BGAPI event containing a GATT service description and add it to a dictionary
Args:
services (dict): A dictionary of discovered services that is updated with this event
event (BGAPIPacket): An event containing a GATT service
"""
length = len(event.payload) - 5
handle, start, end, uuid = unpack('<BHH%ds' % length, event.payload)
uuid = process_uuid(uuid)
services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end} | [
"def",
"process_gatt_service",
"(",
"services",
",",
"event",
")",
":",
"length",
"=",
"len",
"(",
"event",
".",
"payload",
")",
"-",
"5",
"handle",
",",
"start",
",",
"end",
",",
"uuid",
"=",
"unpack",
"(",
"'<BHH%ds'",
"%",
"length",
",",
"event",
".",
"payload",
")",
"uuid",
"=",
"process_uuid",
"(",
"uuid",
")",
"services",
"[",
"uuid",
"]",
"=",
"{",
"'uuid_raw'",
":",
"uuid",
",",
"'start_handle'",
":",
"start",
",",
"'end_handle'",
":",
"end",
"}"
] | Process a BGAPI event containing a GATT service description and add it to a dictionary
Args:
services (dict): A dictionary of discovered services that is updated with this event
event (BGAPIPacket): An event containing a GATT service | [
"Process",
"a",
"BGAPI",
"event",
"containing",
"a",
"GATT",
"service",
"description",
"and",
"add",
"it",
"to",
"a",
"dictionary"
] | python | train |
bokeh/bokeh | bokeh/models/transforms.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/models/transforms.py#L170-L206 | def from_coffeescript(cls, func, v_func, args={}):
''' Create a ``CustomJSTransform`` instance from a pair of CoffeeScript
snippets. The function bodies are translated to JavaScript functions
using node and therefore require return statements.
The ``func`` snippet namespace will contain the variable ``x`` (the
untransformed value) at render time. The ``v_func`` snippet namespace
will contain the variable ``xs`` (the untransformed vector) at render
time.
Example:
.. code-block:: coffeescript
func = "return Math.cos(x)"
v_func = "return [Math.cos(x) for x in xs]"
transform = CustomJSTransform.from_coffeescript(func, v_func)
Args:
func (str) : a coffeescript snippet to transform a single ``x`` value
v_func (str) : a coffeescript snippet function to transform a vector ``xs``
Returns:
CustomJSTransform
'''
compiled = nodejs_compile(func, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
v_compiled = nodejs_compile(v_func, lang="coffeescript", file="???")
if "error" in v_compiled:
raise CompilationError(v_compiled.error)
return cls(func=compiled.code, v_func=v_compiled.code, args=args) | [
"def",
"from_coffeescript",
"(",
"cls",
",",
"func",
",",
"v_func",
",",
"args",
"=",
"{",
"}",
")",
":",
"compiled",
"=",
"nodejs_compile",
"(",
"func",
",",
"lang",
"=",
"\"coffeescript\"",
",",
"file",
"=",
"\"???\"",
")",
"if",
"\"error\"",
"in",
"compiled",
":",
"raise",
"CompilationError",
"(",
"compiled",
".",
"error",
")",
"v_compiled",
"=",
"nodejs_compile",
"(",
"v_func",
",",
"lang",
"=",
"\"coffeescript\"",
",",
"file",
"=",
"\"???\"",
")",
"if",
"\"error\"",
"in",
"v_compiled",
":",
"raise",
"CompilationError",
"(",
"v_compiled",
".",
"error",
")",
"return",
"cls",
"(",
"func",
"=",
"compiled",
".",
"code",
",",
"v_func",
"=",
"v_compiled",
".",
"code",
",",
"args",
"=",
"args",
")"
] | Create a ``CustomJSTransform`` instance from a pair of CoffeeScript
snippets. The function bodies are translated to JavaScript functions
using node and therefore require return statements.
The ``func`` snippet namespace will contain the variable ``x`` (the
untransformed value) at render time. The ``v_func`` snippet namespace
will contain the variable ``xs`` (the untransformed vector) at render
time.
Example:
.. code-block:: coffeescript
func = "return Math.cos(x)"
v_func = "return [Math.cos(x) for x in xs]"
transform = CustomJSTransform.from_coffeescript(func, v_func)
Args:
func (str) : a coffeescript snippet to transform a single ``x`` value
v_func (str) : a coffeescript snippet function to transform a vector ``xs``
Returns:
CustomJSTransform | [
"Create",
"a",
"CustomJSTransform",
"instance",
"from",
"a",
"pair",
"of",
"CoffeeScript",
"snippets",
".",
"The",
"function",
"bodies",
"are",
"translated",
"to",
"JavaScript",
"functions",
"using",
"node",
"and",
"therefore",
"require",
"return",
"statements",
"."
] | python | train |
JoelBender/bacpypes | sandbox/io.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/sandbox/io.py#L1287-L1297 | def abort(err):
"""Abort everything, everywhere."""
if _debug: abort._debug("abort %r", err)
# start with the server
if IOServer._highlander:
IOServer._highlander.abort(err)
# now do everything local
for controller in _local_controllers.values():
controller.abort(err) | [
"def",
"abort",
"(",
"err",
")",
":",
"if",
"_debug",
":",
"abort",
".",
"_debug",
"(",
"\"abort %r\"",
",",
"err",
")",
"# start with the server",
"if",
"IOServer",
".",
"_highlander",
":",
"IOServer",
".",
"_highlander",
".",
"abort",
"(",
"err",
")",
"# now do everything local",
"for",
"controller",
"in",
"_local_controllers",
".",
"values",
"(",
")",
":",
"controller",
".",
"abort",
"(",
"err",
")"
] | Abort everything, everywhere. | [
"Abort",
"everything",
"everywhere",
"."
] | python | train |
dwavesystems/dimod | dimod/binary_quadratic_model.py | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L459-L504 | def add_variables_from(self, linear, vartype=None):
"""Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0
"""
if isinstance(linear, abc.Mapping):
for v, bias in iteritems(linear):
self.add_variable(v, bias, vartype=vartype)
else:
try:
for v, bias in linear:
self.add_variable(v, bias, vartype=vartype)
except TypeError:
raise TypeError("expected 'linear' to be a dict or an iterable of 2-tuples.") | [
"def",
"add_variables_from",
"(",
"self",
",",
"linear",
",",
"vartype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"linear",
",",
"abc",
".",
"Mapping",
")",
":",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"linear",
")",
":",
"self",
".",
"add_variable",
"(",
"v",
",",
"bias",
",",
"vartype",
"=",
"vartype",
")",
"else",
":",
"try",
":",
"for",
"v",
",",
"bias",
"in",
"linear",
":",
"self",
".",
"add_variable",
"(",
"v",
",",
"bias",
",",
"vartype",
"=",
"vartype",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"expected 'linear' to be a dict or an iterable of 2-tuples.\"",
")"
] | Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0 | [
"Add",
"variables",
"and",
"/",
"or",
"linear",
"biases",
"to",
"a",
"binary",
"quadratic",
"model",
"."
] | python | train |
dyve/django-bootstrap3 | bootstrap3/forms.py | https://github.com/dyve/django-bootstrap3/blob/1d4095ba113a1faff228f9592bdad4f0b3aed653/bootstrap3/forms.py#L73-L78 | def render_field(field, **kwargs):
"""
Render a field to a Bootstrap layout
"""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render() | [
"def",
"render_field",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"renderer_cls",
"=",
"get_field_renderer",
"(",
"*",
"*",
"kwargs",
")",
"return",
"renderer_cls",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
".",
"render",
"(",
")"
] | Render a field to a Bootstrap layout | [
"Render",
"a",
"field",
"to",
"a",
"Bootstrap",
"layout"
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/strelka2.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L20-L34 | def run(align_bams, items, ref_file, assoc_files, region, out_file):
"""Run strelka2 variant calling, either paired tumor/normal or germline calling.
region can be a single region or list of multiple regions for multicore calling.
"""
call_file = "%s-raw.vcf.gz" % utils.splitext_plus(out_file)[0]
strelka_work_dir = "%s-work" % utils.splitext_plus(out_file)[0]
paired = vcfutils.get_paired_bams(align_bams, items)
if paired:
assert paired.normal_bam, "Strelka2 requires a normal sample"
call_file = _run_somatic(paired, ref_file, assoc_files, region, call_file, strelka_work_dir)
else:
call_file = _run_germline(align_bams, items, ref_file,
assoc_files, region, call_file, strelka_work_dir)
return _af_annotate_and_filter(paired, items, call_file, out_file) | [
"def",
"run",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"out_file",
")",
":",
"call_file",
"=",
"\"%s-raw.vcf.gz\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"out_file",
")",
"[",
"0",
"]",
"strelka_work_dir",
"=",
"\"%s-work\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"out_file",
")",
"[",
"0",
"]",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"align_bams",
",",
"items",
")",
"if",
"paired",
":",
"assert",
"paired",
".",
"normal_bam",
",",
"\"Strelka2 requires a normal sample\"",
"call_file",
"=",
"_run_somatic",
"(",
"paired",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"call_file",
",",
"strelka_work_dir",
")",
"else",
":",
"call_file",
"=",
"_run_germline",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"call_file",
",",
"strelka_work_dir",
")",
"return",
"_af_annotate_and_filter",
"(",
"paired",
",",
"items",
",",
"call_file",
",",
"out_file",
")"
] | Run strelka2 variant calling, either paired tumor/normal or germline calling.
region can be a single region or list of multiple regions for multicore calling. | [
"Run",
"strelka2",
"variant",
"calling",
"either",
"paired",
"tumor",
"/",
"normal",
"or",
"germline",
"calling",
"."
] | python | train |
spookylukey/django-paypal | paypal/pro/creditcard.py | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/creditcard.py#L68-L72 | def verify(self):
"""Returns the card type if valid else None."""
if self.is_number() and not self.is_test() and self.is_mod10():
return self.get_type()
return None | [
"def",
"verify",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_number",
"(",
")",
"and",
"not",
"self",
".",
"is_test",
"(",
")",
"and",
"self",
".",
"is_mod10",
"(",
")",
":",
"return",
"self",
".",
"get_type",
"(",
")",
"return",
"None"
] | Returns the card type if valid else None. | [
"Returns",
"the",
"card",
"type",
"if",
"valid",
"else",
"None",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L489-L505 | def get_form_layout(self, process_id, wit_ref_name):
"""GetFormLayout.
[Preview API] Gets the form layout.
:param str process_id: The ID of the process.
:param str wit_ref_name: The reference name of the work item type.
:rtype: :class:`<FormLayout> <azure.devops.v5_0.work_item_tracking_process.models.FormLayout>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
response = self._send(http_method='GET',
location_id='fa8646eb-43cd-4b71-9564-40106fd63e40',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('FormLayout', response) | [
"def",
"get_form_layout",
"(",
"self",
",",
"process_id",
",",
"wit_ref_name",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"process_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'processId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'process_id'",
",",
"process_id",
",",
"'str'",
")",
"if",
"wit_ref_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'witRefName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'wit_ref_name'",
",",
"wit_ref_name",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'fa8646eb-43cd-4b71-9564-40106fd63e40'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'FormLayout'",
",",
"response",
")"
] | GetFormLayout.
[Preview API] Gets the form layout.
:param str process_id: The ID of the process.
:param str wit_ref_name: The reference name of the work item type.
:rtype: :class:`<FormLayout> <azure.devops.v5_0.work_item_tracking_process.models.FormLayout>` | [
"GetFormLayout",
".",
"[",
"Preview",
"API",
"]",
"Gets",
"the",
"form",
"layout",
".",
":",
"param",
"str",
"process_id",
":",
"The",
"ID",
"of",
"the",
"process",
".",
":",
"param",
"str",
"wit_ref_name",
":",
"The",
"reference",
"name",
"of",
"the",
"work",
"item",
"type",
".",
":",
"rtype",
":",
":",
"class",
":",
"<FormLayout",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work_item_tracking_process",
".",
"models",
".",
"FormLayout",
">"
] | python | train |
ejeschke/ginga | ginga/trcalc.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/trcalc.py#L556-L617 | def calc_image_merge_clip(p1, p2, dst, q1, q2):
"""
p1 (x1, y1, z1) and p2 (x2, y2, z2) define the extent of the (non-scaled)
data shown. The image, defined by region q1, q2 is to be placed at dst
in the image (destination may be outside of the actual data array).
Refines the modified points (q1', q2') defining the clipped rectangle
needed to be cut from the source array and scaled.
"""
x1, y1 = p1[:2]
x2, y2 = p2[:2]
dst_x, dst_y = dst[:2]
a1, b1 = q1[:2]
a2, b2 = q2[:2]
src_wd, src_ht = a2 - a1, b2 - b1
# Trim off parts of srcarr that would be "hidden"
# to the left and above the dstarr edge.
ex = y1 - dst_y
if ex > 0:
src_ht -= ex
dst_y += ex
b1 += ex
ex = x1 - dst_x
if ex > 0:
src_wd -= ex
dst_x += ex
a1 += ex
# Trim off parts of srcarr that would be "hidden"
# to the right and below dstarr edge.
ex = dst_y + src_ht - y2
if ex > 0:
src_ht -= ex
b2 -= ex
ex = dst_x + src_wd - x2
if ex > 0:
src_wd -= ex
a2 -= ex
if len(p1) > 2:
# 3D image
z1, z2, dst_z, c1, c2 = p1[2], p2[2], dst[2], q1[2], q2[2]
src_dp = c2 - c1
ex = z1 - dst_z
if ex > 0:
src_dp -= ex
dst_z += ex
c1 += ex
ex = dst_z + src_dp - z2
if ex > 0:
src_dp -= ex
c2 -= ex
return ((dst_x, dst_y, dst_z), (a1, b1, c1), (a2, b2, c2))
else:
return ((dst_x, dst_y), (a1, b1), (a2, b2)) | [
"def",
"calc_image_merge_clip",
"(",
"p1",
",",
"p2",
",",
"dst",
",",
"q1",
",",
"q2",
")",
":",
"x1",
",",
"y1",
"=",
"p1",
"[",
":",
"2",
"]",
"x2",
",",
"y2",
"=",
"p2",
"[",
":",
"2",
"]",
"dst_x",
",",
"dst_y",
"=",
"dst",
"[",
":",
"2",
"]",
"a1",
",",
"b1",
"=",
"q1",
"[",
":",
"2",
"]",
"a2",
",",
"b2",
"=",
"q2",
"[",
":",
"2",
"]",
"src_wd",
",",
"src_ht",
"=",
"a2",
"-",
"a1",
",",
"b2",
"-",
"b1",
"# Trim off parts of srcarr that would be \"hidden\"",
"# to the left and above the dstarr edge.",
"ex",
"=",
"y1",
"-",
"dst_y",
"if",
"ex",
">",
"0",
":",
"src_ht",
"-=",
"ex",
"dst_y",
"+=",
"ex",
"b1",
"+=",
"ex",
"ex",
"=",
"x1",
"-",
"dst_x",
"if",
"ex",
">",
"0",
":",
"src_wd",
"-=",
"ex",
"dst_x",
"+=",
"ex",
"a1",
"+=",
"ex",
"# Trim off parts of srcarr that would be \"hidden\"",
"# to the right and below dstarr edge.",
"ex",
"=",
"dst_y",
"+",
"src_ht",
"-",
"y2",
"if",
"ex",
">",
"0",
":",
"src_ht",
"-=",
"ex",
"b2",
"-=",
"ex",
"ex",
"=",
"dst_x",
"+",
"src_wd",
"-",
"x2",
"if",
"ex",
">",
"0",
":",
"src_wd",
"-=",
"ex",
"a2",
"-=",
"ex",
"if",
"len",
"(",
"p1",
")",
">",
"2",
":",
"# 3D image",
"z1",
",",
"z2",
",",
"dst_z",
",",
"c1",
",",
"c2",
"=",
"p1",
"[",
"2",
"]",
",",
"p2",
"[",
"2",
"]",
",",
"dst",
"[",
"2",
"]",
",",
"q1",
"[",
"2",
"]",
",",
"q2",
"[",
"2",
"]",
"src_dp",
"=",
"c2",
"-",
"c1",
"ex",
"=",
"z1",
"-",
"dst_z",
"if",
"ex",
">",
"0",
":",
"src_dp",
"-=",
"ex",
"dst_z",
"+=",
"ex",
"c1",
"+=",
"ex",
"ex",
"=",
"dst_z",
"+",
"src_dp",
"-",
"z2",
"if",
"ex",
">",
"0",
":",
"src_dp",
"-=",
"ex",
"c2",
"-=",
"ex",
"return",
"(",
"(",
"dst_x",
",",
"dst_y",
",",
"dst_z",
")",
",",
"(",
"a1",
",",
"b1",
",",
"c1",
")",
",",
"(",
"a2",
",",
"b2",
",",
"c2",
")",
")",
"else",
":",
"return",
"(",
"(",
"dst_x",
",",
"dst_y",
")",
",",
"(",
"a1",
",",
"b1",
")",
",",
"(",
"a2",
",",
"b2",
")",
")"
] | p1 (x1, y1, z1) and p2 (x2, y2, z2) define the extent of the (non-scaled)
data shown. The image, defined by region q1, q2 is to be placed at dst
in the image (destination may be outside of the actual data array).
Refines the modified points (q1', q2') defining the clipped rectangle
needed to be cut from the source array and scaled. | [
"p1",
"(",
"x1",
"y1",
"z1",
")",
"and",
"p2",
"(",
"x2",
"y2",
"z2",
")",
"define",
"the",
"extent",
"of",
"the",
"(",
"non",
"-",
"scaled",
")",
"data",
"shown",
".",
"The",
"image",
"defined",
"by",
"region",
"q1",
"q2",
"is",
"to",
"be",
"placed",
"at",
"dst",
"in",
"the",
"image",
"(",
"destination",
"may",
"be",
"outside",
"of",
"the",
"actual",
"data",
"array",
")",
"."
] | python | train |
adafruit/Adafruit_Python_GPIO | Adafruit_GPIO/PWM.py | https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/PWM.py#L55-L63 | def set_duty_cycle(self, pin, dutycycle):
"""Set percent duty cycle of PWM output on specified pin. Duty cycle must
be a value 0.0 to 100.0 (inclusive).
"""
if dutycycle < 0.0 or dutycycle > 100.0:
raise ValueError('Invalid duty cycle value, must be between 0.0 to 100.0 (inclusive).')
if pin not in self.pwm:
raise ValueError('Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'.format(pin))
self.pwm[pin].ChangeDutyCycle(dutycycle) | [
"def",
"set_duty_cycle",
"(",
"self",
",",
"pin",
",",
"dutycycle",
")",
":",
"if",
"dutycycle",
"<",
"0.0",
"or",
"dutycycle",
">",
"100.0",
":",
"raise",
"ValueError",
"(",
"'Invalid duty cycle value, must be between 0.0 to 100.0 (inclusive).'",
")",
"if",
"pin",
"not",
"in",
"self",
".",
"pwm",
":",
"raise",
"ValueError",
"(",
"'Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'",
".",
"format",
"(",
"pin",
")",
")",
"self",
".",
"pwm",
"[",
"pin",
"]",
".",
"ChangeDutyCycle",
"(",
"dutycycle",
")"
] | Set percent duty cycle of PWM output on specified pin. Duty cycle must
be a value 0.0 to 100.0 (inclusive). | [
"Set",
"percent",
"duty",
"cycle",
"of",
"PWM",
"output",
"on",
"specified",
"pin",
".",
"Duty",
"cycle",
"must",
"be",
"a",
"value",
"0",
".",
"0",
"to",
"100",
".",
"0",
"(",
"inclusive",
")",
"."
] | python | valid |
PyCQA/pylint | pylint/pyreverse/diagrams.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diagrams.py#L241-L247 | def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports
"""
mod_name = node.root().name
obj = self.module(mod_name)
if from_module not in obj.node.depends:
obj.node.depends.append(from_module) | [
"def",
"add_from_depend",
"(",
"self",
",",
"node",
",",
"from_module",
")",
":",
"mod_name",
"=",
"node",
".",
"root",
"(",
")",
".",
"name",
"obj",
"=",
"self",
".",
"module",
"(",
"mod_name",
")",
"if",
"from_module",
"not",
"in",
"obj",
".",
"node",
".",
"depends",
":",
"obj",
".",
"node",
".",
"depends",
".",
"append",
"(",
"from_module",
")"
] | add dependencies created by from-imports | [
"add",
"dependencies",
"created",
"by",
"from",
"-",
"imports"
] | python | test |
uw-it-aca/uw-restclients-canvas | uw_canvas/sections.py | https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/sections.py#L9-L16 | def get_section(self, section_id, params={}):
"""
Return section resource for given canvas section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.show
"""
url = SECTIONS_API.format(section_id)
return CanvasSection(data=self._get_resource(url, params=params)) | [
"def",
"get_section",
"(",
"self",
",",
"section_id",
",",
"params",
"=",
"{",
"}",
")",
":",
"url",
"=",
"SECTIONS_API",
".",
"format",
"(",
"section_id",
")",
"return",
"CanvasSection",
"(",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
",",
"params",
"=",
"params",
")",
")"
] | Return section resource for given canvas section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.show | [
"Return",
"section",
"resource",
"for",
"given",
"canvas",
"section",
"id",
"."
] | python | test |
Yelp/kafka-utils | kafka_utils/kafka_cluster_manager/cmds/command.py | https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cmds/command.py#L107-L119 | def execute_plan(self, plan, allow_rf_change=False):
"""Save proposed-plan and execute the same if requested."""
if self.should_execute():
result = self.zk.execute_plan(plan, allow_rf_change=allow_rf_change)
if not result:
self.log.error('Plan execution unsuccessful.')
sys.exit(1)
else:
self.log.info(
'Plan sent to zookeeper for reassignment successfully.',
)
else:
self.log.info('Proposed plan won\'t be executed (--apply and confirmation needed).') | [
"def",
"execute_plan",
"(",
"self",
",",
"plan",
",",
"allow_rf_change",
"=",
"False",
")",
":",
"if",
"self",
".",
"should_execute",
"(",
")",
":",
"result",
"=",
"self",
".",
"zk",
".",
"execute_plan",
"(",
"plan",
",",
"allow_rf_change",
"=",
"allow_rf_change",
")",
"if",
"not",
"result",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Plan execution unsuccessful.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Plan sent to zookeeper for reassignment successfully.'",
",",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Proposed plan won\\'t be executed (--apply and confirmation needed).'",
")"
] | Save proposed-plan and execute the same if requested. | [
"Save",
"proposed",
"-",
"plan",
"and",
"execute",
"the",
"same",
"if",
"requested",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/core/fakemodule.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/fakemodule.py#L18-L46 | def init_fakemod_dict(fm,adict=None):
"""Initialize a FakeModule instance __dict__.
Kept as a standalone function and not a method so the FakeModule API can
remain basically empty.
This should be considered for private IPython use, used in managing
namespaces for %run.
Parameters
----------
fm : FakeModule instance
adict : dict, optional
"""
dct = {}
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method, so we add it if missing:
dct.setdefault('__nonzero__',lambda : True)
dct.setdefault('__file__',__file__)
if adict is not None:
dct.update(adict)
# Hard assignment of the object's __dict__. This is nasty but deliberate.
fm.__dict__.clear()
fm.__dict__.update(dct) | [
"def",
"init_fakemod_dict",
"(",
"fm",
",",
"adict",
"=",
"None",
")",
":",
"dct",
"=",
"{",
"}",
"# It seems pydoc (and perhaps others) needs any module instance to",
"# implement a __nonzero__ method, so we add it if missing:",
"dct",
".",
"setdefault",
"(",
"'__nonzero__'",
",",
"lambda",
":",
"True",
")",
"dct",
".",
"setdefault",
"(",
"'__file__'",
",",
"__file__",
")",
"if",
"adict",
"is",
"not",
"None",
":",
"dct",
".",
"update",
"(",
"adict",
")",
"# Hard assignment of the object's __dict__. This is nasty but deliberate.",
"fm",
".",
"__dict__",
".",
"clear",
"(",
")",
"fm",
".",
"__dict__",
".",
"update",
"(",
"dct",
")"
] | Initialize a FakeModule instance __dict__.
Kept as a standalone function and not a method so the FakeModule API can
remain basically empty.
This should be considered for private IPython use, used in managing
namespaces for %run.
Parameters
----------
fm : FakeModule instance
adict : dict, optional | [
"Initialize",
"a",
"FakeModule",
"instance",
"__dict__",
"."
] | python | test |
rosenbrockc/fortpy | fortpy/config.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/config.py#L85-L89 | def getenvar(self, envar):
from os import getenv
"""Retrieves the value of an environment variable if it exists."""
if getenv(envar) is not None:
self._vardict[envar] = getenv(envar) | [
"def",
"getenvar",
"(",
"self",
",",
"envar",
")",
":",
"from",
"os",
"import",
"getenv",
"if",
"getenv",
"(",
"envar",
")",
"is",
"not",
"None",
":",
"self",
".",
"_vardict",
"[",
"envar",
"]",
"=",
"getenv",
"(",
"envar",
")"
] | Retrieves the value of an environment variable if it exists. | [
"Retrieves",
"the",
"value",
"of",
"an",
"environment",
"variable",
"if",
"it",
"exists",
"."
] | python | train |
theolind/pymysensors | mysensors/gateway_tcp.py | https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_tcp.py#L76-L108 | def _connect(self):
"""Connect to socket. This should be run in a new thread."""
while self.protocol:
_LOGGER.info('Trying to connect to %s', self.server_address)
try:
sock = socket.create_connection(
self.server_address, self.reconnect_timeout)
except socket.timeout:
_LOGGER.error(
'Connecting to socket timed out for %s',
self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
except OSError:
_LOGGER.error(
'Failed to connect to socket at %s', self.server_address)
_LOGGER.info(
'Waiting %s secs before trying to connect again',
self.reconnect_timeout)
time.sleep(self.reconnect_timeout)
else:
self.tcp_check_timer = time.time()
self.tcp_disconnect_timer = time.time()
transport = TCPTransport(
sock, lambda: self.protocol, self._check_connection)
poll_thread = threading.Thread(target=self._poll_queue)
self._stop_event.clear()
poll_thread.start()
transport.start()
transport.connect()
return | [
"def",
"_connect",
"(",
"self",
")",
":",
"while",
"self",
".",
"protocol",
":",
"_LOGGER",
".",
"info",
"(",
"'Trying to connect to %s'",
",",
"self",
".",
"server_address",
")",
"try",
":",
"sock",
"=",
"socket",
".",
"create_connection",
"(",
"self",
".",
"server_address",
",",
"self",
".",
"reconnect_timeout",
")",
"except",
"socket",
".",
"timeout",
":",
"_LOGGER",
".",
"error",
"(",
"'Connecting to socket timed out for %s'",
",",
"self",
".",
"server_address",
")",
"_LOGGER",
".",
"info",
"(",
"'Waiting %s secs before trying to connect again'",
",",
"self",
".",
"reconnect_timeout",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"reconnect_timeout",
")",
"except",
"OSError",
":",
"_LOGGER",
".",
"error",
"(",
"'Failed to connect to socket at %s'",
",",
"self",
".",
"server_address",
")",
"_LOGGER",
".",
"info",
"(",
"'Waiting %s secs before trying to connect again'",
",",
"self",
".",
"reconnect_timeout",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"reconnect_timeout",
")",
"else",
":",
"self",
".",
"tcp_check_timer",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"tcp_disconnect_timer",
"=",
"time",
".",
"time",
"(",
")",
"transport",
"=",
"TCPTransport",
"(",
"sock",
",",
"lambda",
":",
"self",
".",
"protocol",
",",
"self",
".",
"_check_connection",
")",
"poll_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_poll_queue",
")",
"self",
".",
"_stop_event",
".",
"clear",
"(",
")",
"poll_thread",
".",
"start",
"(",
")",
"transport",
".",
"start",
"(",
")",
"transport",
".",
"connect",
"(",
")",
"return"
] | Connect to socket. This should be run in a new thread. | [
"Connect",
"to",
"socket",
".",
"This",
"should",
"be",
"run",
"in",
"a",
"new",
"thread",
"."
] | python | train |
Robpol86/libnl | libnl/socket_.py | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/socket_.py#L276-L308 | def nl_socket_set_buffer_size(sk, rxbuf, txbuf):
"""Set socket buffer size of Netlink socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L675
Sets the socket buffer size of a Netlink socket to the specified values `rxbuf` and `txbuf`. Providing a value of 0
assumes a good default value.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
rxbuf -- new receive socket buffer size in bytes (integer).
txbuf -- new transmit socket buffer size in bytes (integer).
Returns:
0 on success or a negative error code.
"""
rxbuf = 32768 if rxbuf <= 0 else rxbuf
txbuf = 32768 if txbuf <= 0 else txbuf
if sk.s_fd == -1:
return -NLE_BAD_SOCK
try:
sk.socket_instance.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, txbuf)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
try:
sk.socket_instance.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, rxbuf)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
sk.s_flags |= NL_SOCK_BUFSIZE_SET
return 0 | [
"def",
"nl_socket_set_buffer_size",
"(",
"sk",
",",
"rxbuf",
",",
"txbuf",
")",
":",
"rxbuf",
"=",
"32768",
"if",
"rxbuf",
"<=",
"0",
"else",
"rxbuf",
"txbuf",
"=",
"32768",
"if",
"txbuf",
"<=",
"0",
"else",
"txbuf",
"if",
"sk",
".",
"s_fd",
"==",
"-",
"1",
":",
"return",
"-",
"NLE_BAD_SOCK",
"try",
":",
"sk",
".",
"socket_instance",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_SNDBUF",
",",
"txbuf",
")",
"except",
"OSError",
"as",
"exc",
":",
"return",
"-",
"nl_syserr2nlerr",
"(",
"exc",
".",
"errno",
")",
"try",
":",
"sk",
".",
"socket_instance",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_RCVBUF",
",",
"rxbuf",
")",
"except",
"OSError",
"as",
"exc",
":",
"return",
"-",
"nl_syserr2nlerr",
"(",
"exc",
".",
"errno",
")",
"sk",
".",
"s_flags",
"|=",
"NL_SOCK_BUFSIZE_SET",
"return",
"0"
] | Set socket buffer size of Netlink socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L675
Sets the socket buffer size of a Netlink socket to the specified values `rxbuf` and `txbuf`. Providing a value of 0
assumes a good default value.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
rxbuf -- new receive socket buffer size in bytes (integer).
txbuf -- new transmit socket buffer size in bytes (integer).
Returns:
0 on success or a negative error code. | [
"Set",
"socket",
"buffer",
"size",
"of",
"Netlink",
"socket",
"."
] | python | train |
libyal/dtfabric | dtfabric/runtime/data_maps.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L1055-L1088 | def _LinearMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
"""Maps a data type sequence on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
tuple[object, ...]: mapped values.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
elements_data_size = self._data_type_definition.GetByteSize()
self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size)
try:
struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])
mapped_values = map(self._element_data_type_map.MapValue, struct_tuple)
except Exception as exception:
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: {2!s}').format(
self._data_type_definition.name, byte_offset, exception)
raise errors.MappingError(error_string)
if context:
context.byte_size = elements_data_size
return tuple(mapped_values) | [
"def",
"_LinearMapByteStream",
"(",
"self",
",",
"byte_stream",
",",
"byte_offset",
"=",
"0",
",",
"context",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"elements_data_size",
"=",
"self",
".",
"_data_type_definition",
".",
"GetByteSize",
"(",
")",
"self",
".",
"_CheckByteStreamSize",
"(",
"byte_stream",
",",
"byte_offset",
",",
"elements_data_size",
")",
"try",
":",
"struct_tuple",
"=",
"self",
".",
"_operation",
".",
"ReadFrom",
"(",
"byte_stream",
"[",
"byte_offset",
":",
"]",
")",
"mapped_values",
"=",
"map",
"(",
"self",
".",
"_element_data_type_map",
".",
"MapValue",
",",
"struct_tuple",
")",
"except",
"Exception",
"as",
"exception",
":",
"error_string",
"=",
"(",
"'Unable to read: {0:s} from byte stream at offset: {1:d} '",
"'with error: {2!s}'",
")",
".",
"format",
"(",
"self",
".",
"_data_type_definition",
".",
"name",
",",
"byte_offset",
",",
"exception",
")",
"raise",
"errors",
".",
"MappingError",
"(",
"error_string",
")",
"if",
"context",
":",
"context",
".",
"byte_size",
"=",
"elements_data_size",
"return",
"tuple",
"(",
"mapped_values",
")"
] | Maps a data type sequence on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
tuple[object, ...]: mapped values.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream. | [
"Maps",
"a",
"data",
"type",
"sequence",
"on",
"a",
"byte",
"stream",
"."
] | python | train |
saltstack/salt | salt/utils/win_update.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_update.py#L101-L167 | def list(self):
'''
Create a dictionary with the details for the updates in the collection.
Returns:
dict: Details about each update
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.list()
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return 'Nothing to return'
log.debug('Building a detailed report of the results.')
# Build a dictionary containing details for each update
results = {}
for update in self.updates:
results[update.Identity.UpdateID] = {
'guid': update.Identity.UpdateID,
'Title': six.text_type(update.Title),
'Type': self.update_types[update.Type],
'Description': update.Description,
'Downloaded': bool(update.IsDownloaded),
'Installed': bool(update.IsInstalled),
'Mandatory': bool(update.IsMandatory),
'EULAAccepted': bool(update.EulaAccepted),
'NeedsReboot': bool(update.RebootRequired),
'Severity': six.text_type(update.MsrcSeverity),
'UserInput':
bool(update.InstallationBehavior.CanRequestUserInput),
'RebootBehavior':
self.reboot_behavior[
update.InstallationBehavior.RebootBehavior],
'KBs': ['KB' + item for item in update.KBArticleIDs],
'Categories': [item.Name for item in update.Categories]
}
return results | [
"def",
"list",
"(",
"self",
")",
":",
"# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx",
"if",
"self",
".",
"count",
"(",
")",
"==",
"0",
":",
"return",
"'Nothing to return'",
"log",
".",
"debug",
"(",
"'Building a detailed report of the results.'",
")",
"# Build a dictionary containing details for each update",
"results",
"=",
"{",
"}",
"for",
"update",
"in",
"self",
".",
"updates",
":",
"results",
"[",
"update",
".",
"Identity",
".",
"UpdateID",
"]",
"=",
"{",
"'guid'",
":",
"update",
".",
"Identity",
".",
"UpdateID",
",",
"'Title'",
":",
"six",
".",
"text_type",
"(",
"update",
".",
"Title",
")",
",",
"'Type'",
":",
"self",
".",
"update_types",
"[",
"update",
".",
"Type",
"]",
",",
"'Description'",
":",
"update",
".",
"Description",
",",
"'Downloaded'",
":",
"bool",
"(",
"update",
".",
"IsDownloaded",
")",
",",
"'Installed'",
":",
"bool",
"(",
"update",
".",
"IsInstalled",
")",
",",
"'Mandatory'",
":",
"bool",
"(",
"update",
".",
"IsMandatory",
")",
",",
"'EULAAccepted'",
":",
"bool",
"(",
"update",
".",
"EulaAccepted",
")",
",",
"'NeedsReboot'",
":",
"bool",
"(",
"update",
".",
"RebootRequired",
")",
",",
"'Severity'",
":",
"six",
".",
"text_type",
"(",
"update",
".",
"MsrcSeverity",
")",
",",
"'UserInput'",
":",
"bool",
"(",
"update",
".",
"InstallationBehavior",
".",
"CanRequestUserInput",
")",
",",
"'RebootBehavior'",
":",
"self",
".",
"reboot_behavior",
"[",
"update",
".",
"InstallationBehavior",
".",
"RebootBehavior",
"]",
",",
"'KBs'",
":",
"[",
"'KB'",
"+",
"item",
"for",
"item",
"in",
"update",
".",
"KBArticleIDs",
"]",
",",
"'Categories'",
":",
"[",
"item",
".",
"Name",
"for",
"item",
"in",
"update",
".",
"Categories",
"]",
"}",
"return",
"results"
] | Create a dictionary with the details for the updates in the collection.
Returns:
dict: Details about each update
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.list() | [
"Create",
"a",
"dictionary",
"with",
"the",
"details",
"for",
"the",
"updates",
"in",
"the",
"collection",
"."
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/dicts.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/dicts.py#L152-L167 | def set_null_values_in_dict(d: Dict[str, Any],
null_literals: List[Any]) -> None:
"""
Within ``d`` (in place), replace any values found in ``null_literals`` with
``None``.
"""
if not null_literals:
return
# DO NOT add/delete values to/from a dictionary during iteration, but it
# is OK to modify existing keys:
# https://stackoverflow.com/questions/6777485
# https://stackoverflow.com/questions/2315520
# https://docs.python.org/3/library/stdtypes.html#dict-views
for k, v in d.items():
if v in null_literals:
d[k] = None | [
"def",
"set_null_values_in_dict",
"(",
"d",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"null_literals",
":",
"List",
"[",
"Any",
"]",
")",
"->",
"None",
":",
"if",
"not",
"null_literals",
":",
"return",
"# DO NOT add/delete values to/from a dictionary during iteration, but it",
"# is OK to modify existing keys:",
"# https://stackoverflow.com/questions/6777485",
"# https://stackoverflow.com/questions/2315520",
"# https://docs.python.org/3/library/stdtypes.html#dict-views",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"v",
"in",
"null_literals",
":",
"d",
"[",
"k",
"]",
"=",
"None"
] | Within ``d`` (in place), replace any values found in ``null_literals`` with
``None``. | [
"Within",
"d",
"(",
"in",
"place",
")",
"replace",
"any",
"values",
"found",
"in",
"null_literals",
"with",
"None",
"."
] | python | train |
Rapptz/discord.py | discord/ext/commands/cog.py | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/cog.py#L223-L225 | def get_listeners(self):
"""Returns a :class:`list` of (name, function) listener pairs that are defined in this cog."""
return [(name, getattr(self, method_name)) for name, method_name in self.__cog_listeners__] | [
"def",
"get_listeners",
"(",
"self",
")",
":",
"return",
"[",
"(",
"name",
",",
"getattr",
"(",
"self",
",",
"method_name",
")",
")",
"for",
"name",
",",
"method_name",
"in",
"self",
".",
"__cog_listeners__",
"]"
] | Returns a :class:`list` of (name, function) listener pairs that are defined in this cog. | [
"Returns",
"a",
":",
"class",
":",
"list",
"of",
"(",
"name",
"function",
")",
"listener",
"pairs",
"that",
"are",
"defined",
"in",
"this",
"cog",
"."
] | python | train |
staugur/Flask-PluginKit | flask_pluginkit/flask_pluginkit.py | https://github.com/staugur/Flask-PluginKit/blob/512aabf87fa13f4dc1082abd08d1d1dcf3b03f16/flask_pluginkit/flask_pluginkit.py#L411-L467 | def __getPluginInfo(self, plugin, package_abspath, package_name):
""" Organize plugin information.
:returns: dict: plugin info
"""
if not isValidSemver(plugin.__version__):
raise VersionError("The plugin version does not conform to the standard named %s" % package_name)
try:
url = plugin.__url__
except AttributeError:
url = None
try:
license = plugin.__license__
except AttributeError:
license = None
try:
license_file = plugin.__license_file__
except AttributeError:
license_file = None
try:
readme_file = plugin.__readme_file__
except AttributeError:
readme_file = None
try:
plugin_state = plugin.__state__
except AttributeError:
plugin_state = "enabled"
# 插件状态首先读取`__state`状态值,优先级低于状态文件,ENABLED文件优先级低于DISABLED文件
if os.path.isfile(os.path.join(package_abspath, "ENABLED")):
plugin_state = "enabled"
if os.path.isfile(os.path.join(package_abspath, "DISABLED")):
plugin_state = "disabled"
return {
"plugin_name": plugin.__plugin_name__,
"plugin_package_name": package_name,
"plugin_package_abspath": package_abspath,
"plugin_description": plugin.__description__,
"plugin_version": plugin.__version__,
"plugin_author": plugin.__author__,
"plugin_url": url,
"plugin_license": license,
"plugin_license_file": license_file,
"plugin_readme_file": readme_file,
"plugin_state": plugin_state,
"plugin_tpl_path": os.path.join(package_abspath, "templates"),
"plugin_ats_path": os.path.join(package_abspath, "static"),
"plugin_tep": {},
"plugin_hep": {},
"plugin_bep": {},
"plugin_yep": {}
} | [
"def",
"__getPluginInfo",
"(",
"self",
",",
"plugin",
",",
"package_abspath",
",",
"package_name",
")",
":",
"if",
"not",
"isValidSemver",
"(",
"plugin",
".",
"__version__",
")",
":",
"raise",
"VersionError",
"(",
"\"The plugin version does not conform to the standard named %s\"",
"%",
"package_name",
")",
"try",
":",
"url",
"=",
"plugin",
".",
"__url__",
"except",
"AttributeError",
":",
"url",
"=",
"None",
"try",
":",
"license",
"=",
"plugin",
".",
"__license__",
"except",
"AttributeError",
":",
"license",
"=",
"None",
"try",
":",
"license_file",
"=",
"plugin",
".",
"__license_file__",
"except",
"AttributeError",
":",
"license_file",
"=",
"None",
"try",
":",
"readme_file",
"=",
"plugin",
".",
"__readme_file__",
"except",
"AttributeError",
":",
"readme_file",
"=",
"None",
"try",
":",
"plugin_state",
"=",
"plugin",
".",
"__state__",
"except",
"AttributeError",
":",
"plugin_state",
"=",
"\"enabled\"",
"# 插件状态首先读取`__state`状态值,优先级低于状态文件,ENABLED文件优先级低于DISABLED文件",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_abspath",
",",
"\"ENABLED\"",
")",
")",
":",
"plugin_state",
"=",
"\"enabled\"",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_abspath",
",",
"\"DISABLED\"",
")",
")",
":",
"plugin_state",
"=",
"\"disabled\"",
"return",
"{",
"\"plugin_name\"",
":",
"plugin",
".",
"__plugin_name__",
",",
"\"plugin_package_name\"",
":",
"package_name",
",",
"\"plugin_package_abspath\"",
":",
"package_abspath",
",",
"\"plugin_description\"",
":",
"plugin",
".",
"__description__",
",",
"\"plugin_version\"",
":",
"plugin",
".",
"__version__",
",",
"\"plugin_author\"",
":",
"plugin",
".",
"__author__",
",",
"\"plugin_url\"",
":",
"url",
",",
"\"plugin_license\"",
":",
"license",
",",
"\"plugin_license_file\"",
":",
"license_file",
",",
"\"plugin_readme_file\"",
":",
"readme_file",
",",
"\"plugin_state\"",
":",
"plugin_state",
",",
"\"plugin_tpl_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"package_abspath",
",",
"\"templates\"",
")",
",",
"\"plugin_ats_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",
"package_abspath",
",",
"\"static\"",
")",
",",
"\"plugin_tep\"",
":",
"{",
"}",
",",
"\"plugin_hep\"",
":",
"{",
"}",
",",
"\"plugin_bep\"",
":",
"{",
"}",
",",
"\"plugin_yep\"",
":",
"{",
"}",
"}"
] | Organize plugin information.
:returns: dict: plugin info | [
"Organize",
"plugin",
"information",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/git/git_client_base.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L2314-L2339 | def create_comment(self, comment, repository_id, pull_request_id, thread_id, project=None):
"""CreateComment.
[Preview API] Create a comment on a specific thread in a pull request (up to 500 comments can be created per thread).
:param :class:`<Comment> <azure.devops.v5_1.git.models.Comment>` comment: The comment to create. Comments can be up to 150,000 characters.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread that the desired comment is in.
:param str project: Project ID or project name
:rtype: :class:`<Comment> <azure.devops.v5_1.git.models.Comment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if thread_id is not None:
route_values['threadId'] = self._serialize.url('thread_id', thread_id, 'int')
content = self._serialize.body(comment, 'Comment')
response = self._send(http_method='POST',
location_id='965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Comment', response) | [
"def",
"create_comment",
"(",
"self",
",",
"comment",
",",
"repository_id",
",",
"pull_request_id",
",",
"thread_id",
",",
"project",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"thread_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'threadId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'thread_id'",
",",
"thread_id",
",",
"'int'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"comment",
",",
"'Comment'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'POST'",
",",
"location_id",
"=",
"'965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Comment'",
",",
"response",
")"
] | CreateComment.
[Preview API] Create a comment on a specific thread in a pull request (up to 500 comments can be created per thread).
:param :class:`<Comment> <azure.devops.v5_1.git.models.Comment>` comment: The comment to create. Comments can be up to 150,000 characters.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread that the desired comment is in.
:param str project: Project ID or project name
:rtype: :class:`<Comment> <azure.devops.v5_1.git.models.Comment>` | [
"CreateComment",
".",
"[",
"Preview",
"API",
"]",
"Create",
"a",
"comment",
"on",
"a",
"specific",
"thread",
"in",
"a",
"pull",
"request",
"(",
"up",
"to",
"500",
"comments",
"can",
"be",
"created",
"per",
"thread",
")",
".",
":",
"param",
":",
"class",
":",
"<Comment",
">",
"<azure",
".",
"devops",
".",
"v5_1",
".",
"git",
".",
"models",
".",
"Comment",
">",
"comment",
":",
"The",
"comment",
"to",
"create",
".",
"Comments",
"can",
"be",
"up",
"to",
"150",
"000",
"characters",
".",
":",
"param",
"str",
"repository_id",
":",
"The",
"repository",
"ID",
"of",
"the",
"pull",
"request",
"s",
"target",
"branch",
".",
":",
"param",
"int",
"pull_request_id",
":",
"ID",
"of",
"the",
"pull",
"request",
".",
":",
"param",
"int",
"thread_id",
":",
"ID",
"of",
"the",
"thread",
"that",
"the",
"desired",
"comment",
"is",
"in",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"rtype",
":",
":",
"class",
":",
"<Comment",
">",
"<azure",
".",
"devops",
".",
"v5_1",
".",
"git",
".",
"models",
".",
"Comment",
">"
] | python | train |
richardcornish/django-pygmentify | pygmentify/utils/pygmentify.py | https://github.com/richardcornish/django-pygmentify/blob/a2d3f6b3c3019d810d46f6ff6beb4e9f53190e7b/pygmentify/utils/pygmentify.py#L12-L33 | def bits_to_dict(bits):
"""Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
"""
# Strip any trailing commas
cleaned_bits = [bit[:-1] if bit.endswith(',') else bit for bit in bits]
# Create dictionary by splitting on equal signs
options = dict(bit.split('=') for bit in cleaned_bits)
# Coerce strings of types to Python types
for key in options:
if options[key] == "'true'" or options[key] == "'false'":
options[key] = options[key].title()
options[key] = ast.literal_eval(options[key])
return options | [
"def",
"bits_to_dict",
"(",
"bits",
")",
":",
"# Strip any trailing commas",
"cleaned_bits",
"=",
"[",
"bit",
"[",
":",
"-",
"1",
"]",
"if",
"bit",
".",
"endswith",
"(",
"','",
")",
"else",
"bit",
"for",
"bit",
"in",
"bits",
"]",
"# Create dictionary by splitting on equal signs",
"options",
"=",
"dict",
"(",
"bit",
".",
"split",
"(",
"'='",
")",
"for",
"bit",
"in",
"cleaned_bits",
")",
"# Coerce strings of types to Python types",
"for",
"key",
"in",
"options",
":",
"if",
"options",
"[",
"key",
"]",
"==",
"\"'true'\"",
"or",
"options",
"[",
"key",
"]",
"==",
"\"'false'\"",
":",
"options",
"[",
"key",
"]",
"=",
"options",
"[",
"key",
"]",
".",
"title",
"(",
")",
"options",
"[",
"key",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"options",
"[",
"key",
"]",
")",
"return",
"options"
] | Convert a Django template tag's kwargs into a dictionary of Python types.
The only necessary types are number, boolean, list, and string.
http://pygments.org/docs/formatters/#HtmlFormatter
from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"]
to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],} | [
"Convert",
"a",
"Django",
"template",
"tag",
"s",
"kwargs",
"into",
"a",
"dictionary",
"of",
"Python",
"types",
"."
] | python | train |
tmux-python/libtmux | libtmux/common.py | https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/common.py#L324-L351 | def where(self, attrs, first=False):
"""
Return objects matching child objects properties.
Parameters
----------
attrs : dict
tmux properties to match values of
Returns
-------
list
"""
# from https://github.com/serkanyersen/underscore.py
def by(val, *args):
for key, value in attrs.items():
try:
if attrs[key] != val[key]:
return False
except KeyError:
return False
return True
if first:
return list(filter(by, self.children))[0]
else:
return list(filter(by, self.children)) | [
"def",
"where",
"(",
"self",
",",
"attrs",
",",
"first",
"=",
"False",
")",
":",
"# from https://github.com/serkanyersen/underscore.py",
"def",
"by",
"(",
"val",
",",
"*",
"args",
")",
":",
"for",
"key",
",",
"value",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"attrs",
"[",
"key",
"]",
"!=",
"val",
"[",
"key",
"]",
":",
"return",
"False",
"except",
"KeyError",
":",
"return",
"False",
"return",
"True",
"if",
"first",
":",
"return",
"list",
"(",
"filter",
"(",
"by",
",",
"self",
".",
"children",
")",
")",
"[",
"0",
"]",
"else",
":",
"return",
"list",
"(",
"filter",
"(",
"by",
",",
"self",
".",
"children",
")",
")"
] | Return objects matching child objects properties.
Parameters
----------
attrs : dict
tmux properties to match values of
Returns
-------
list | [
"Return",
"objects",
"matching",
"child",
"objects",
"properties",
"."
] | python | train |
lordmauve/lepton | lepton/texturizer.py | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/lepton/texturizer.py#L52-L67 | def from_images(cls, images, weights=None, filter=None, wrap=None,
aspect_adjust_width=False, aspect_adjust_height=False):
"""Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024
"""
import pyglet
atlas, textures = _atlas_from_images(images)
texturizer = cls(
atlas.texture.id, [tex.tex_coords for tex in textures],
weights, filter or pyglet.gl.GL_LINEAR, wrap or pyglet.gl.GL_CLAMP,
aspect_adjust_width, aspect_adjust_height)
texturizer.atlas = atlas
texturizer.textures = textures
return texturizer | [
"def",
"from_images",
"(",
"cls",
",",
"images",
",",
"weights",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"wrap",
"=",
"None",
",",
"aspect_adjust_width",
"=",
"False",
",",
"aspect_adjust_height",
"=",
"False",
")",
":",
"import",
"pyglet",
"atlas",
",",
"textures",
"=",
"_atlas_from_images",
"(",
"images",
")",
"texturizer",
"=",
"cls",
"(",
"atlas",
".",
"texture",
".",
"id",
",",
"[",
"tex",
".",
"tex_coords",
"for",
"tex",
"in",
"textures",
"]",
",",
"weights",
",",
"filter",
"or",
"pyglet",
".",
"gl",
".",
"GL_LINEAR",
",",
"wrap",
"or",
"pyglet",
".",
"gl",
".",
"GL_CLAMP",
",",
"aspect_adjust_width",
",",
"aspect_adjust_height",
")",
"texturizer",
".",
"atlas",
"=",
"atlas",
"texturizer",
".",
"textures",
"=",
"textures",
"return",
"texturizer"
] | Create a SpriteTexturizer from a sequence of Pyglet images.
Note all the images must be able to fit into a single OpenGL texture, so
their combined size should typically be less than 1024x1024 | [
"Create",
"a",
"SpriteTexturizer",
"from",
"a",
"sequence",
"of",
"Pyglet",
"images",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1701-L1714 | def delay_svc_notification(self, service, notification_time):
"""Modify service first notification delay
Format of the line that triggers function call::
DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
:param service: service to edit
:type service: alignak.objects.service.Service
:param notification_time: new value to set
:type notification_time:
:return: None
"""
service.first_notification_delay = notification_time
self.send_an_element(service.get_update_status_brok()) | [
"def",
"delay_svc_notification",
"(",
"self",
",",
"service",
",",
"notification_time",
")",
":",
"service",
".",
"first_notification_delay",
"=",
"notification_time",
"self",
".",
"send_an_element",
"(",
"service",
".",
"get_update_status_brok",
"(",
")",
")"
] | Modify service first notification delay
Format of the line that triggers function call::
DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
:param service: service to edit
:type service: alignak.objects.service.Service
:param notification_time: new value to set
:type notification_time:
:return: None | [
"Modify",
"service",
"first",
"notification",
"delay",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
Dentosal/python-sc2 | sc2/unit.py | https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/unit.py#L133-L141 | def can_attack_air(self) -> bool:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
return weapon is not None
return False | [
"def",
"can_attack_air",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"self",
".",
"_weapons",
":",
"weapon",
"=",
"next",
"(",
"(",
"weapon",
"for",
"weapon",
"in",
"self",
".",
"_weapons",
"if",
"weapon",
".",
"type",
"in",
"{",
"TargetType",
".",
"Air",
".",
"value",
",",
"TargetType",
".",
"Any",
".",
"value",
"}",
")",
",",
"None",
",",
")",
"return",
"weapon",
"is",
"not",
"None",
"return",
"False"
] | Does not include upgrades | [
"Does",
"not",
"include",
"upgrades"
] | python | train |
jobovy/galpy | galpy/df/evolveddiskdf.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/evolveddiskdf.py#L2282-L2286 | def _vmomentsurfaceIntegrand(vR,vT,R,az,df,n,m,sigmaR1,sigmaT1,t,initvmoment):
"""Internal function that is the integrand for the velocity moment times
surface mass integration"""
o= Orbit([R,vR*sigmaR1,vT*sigmaT1,az])
return vR**n*vT**m*df(o,t)/initvmoment | [
"def",
"_vmomentsurfaceIntegrand",
"(",
"vR",
",",
"vT",
",",
"R",
",",
"az",
",",
"df",
",",
"n",
",",
"m",
",",
"sigmaR1",
",",
"sigmaT1",
",",
"t",
",",
"initvmoment",
")",
":",
"o",
"=",
"Orbit",
"(",
"[",
"R",
",",
"vR",
"*",
"sigmaR1",
",",
"vT",
"*",
"sigmaT1",
",",
"az",
"]",
")",
"return",
"vR",
"**",
"n",
"*",
"vT",
"**",
"m",
"*",
"df",
"(",
"o",
",",
"t",
")",
"/",
"initvmoment"
] | Internal function that is the integrand for the velocity moment times
surface mass integration | [
"Internal",
"function",
"that",
"is",
"the",
"integrand",
"for",
"the",
"velocity",
"moment",
"times",
"surface",
"mass",
"integration"
] | python | train |
blockstack/virtualchain | virtualchain/lib/blockchain/bitcoin_blockchain/blocks.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/blocks.py#L450-L492 | def parse_tx( self, txn, block_header, block_hash, txindex ):
"""
Given a transaction message and its index in the block,
go and create a "verbose" transaction structure
containing all the information in a nice, easy-to-read
dict (i.e. like what bitcoind would give us).
Does not work on coinbase transactions.
Does not include segwit witnesses
"""
txn_serializer = TxSerializer()
tx_bin = txn_serializer.serialize(txn)
txdata = {
"version": txn.version,
"locktime": txn.lock_time,
"hex": binascii.hexlify( tx_bin ),
"txid": txn.calculate_hash(),
"size": len( tx_bin ),
"blockhash": block_hash,
"blocktime": block_header.get('timestamp', 0),
# non-standard; added by us for virtualchain
"txindex": txindex,
"relindex": None,
"senders": None,
"fee": 0,
"nulldata": None,
"ins": None, # library-specific field, to be passed to the state engine
"outs": None, # library-specific field, to be passed to the state engine
"tx_merkle_path": None
}
# keep these around too, since this is what gets fed into the virtualchain state engine implementation
virtualchain_btc_tx_data = bits.btc_tx_deserialize(txdata['hex'])
txdata['ins'] = virtualchain_btc_tx_data['ins']
txdata['outs'] = virtualchain_btc_tx_data['outs']
# we know how many senders there have to be
txdata['senders'] = [None] * len(txdata['ins'])
return txdata | [
"def",
"parse_tx",
"(",
"self",
",",
"txn",
",",
"block_header",
",",
"block_hash",
",",
"txindex",
")",
":",
"txn_serializer",
"=",
"TxSerializer",
"(",
")",
"tx_bin",
"=",
"txn_serializer",
".",
"serialize",
"(",
"txn",
")",
"txdata",
"=",
"{",
"\"version\"",
":",
"txn",
".",
"version",
",",
"\"locktime\"",
":",
"txn",
".",
"lock_time",
",",
"\"hex\"",
":",
"binascii",
".",
"hexlify",
"(",
"tx_bin",
")",
",",
"\"txid\"",
":",
"txn",
".",
"calculate_hash",
"(",
")",
",",
"\"size\"",
":",
"len",
"(",
"tx_bin",
")",
",",
"\"blockhash\"",
":",
"block_hash",
",",
"\"blocktime\"",
":",
"block_header",
".",
"get",
"(",
"'timestamp'",
",",
"0",
")",
",",
"# non-standard; added by us for virtualchain",
"\"txindex\"",
":",
"txindex",
",",
"\"relindex\"",
":",
"None",
",",
"\"senders\"",
":",
"None",
",",
"\"fee\"",
":",
"0",
",",
"\"nulldata\"",
":",
"None",
",",
"\"ins\"",
":",
"None",
",",
"# library-specific field, to be passed to the state engine",
"\"outs\"",
":",
"None",
",",
"# library-specific field, to be passed to the state engine",
"\"tx_merkle_path\"",
":",
"None",
"}",
"# keep these around too, since this is what gets fed into the virtualchain state engine implementation ",
"virtualchain_btc_tx_data",
"=",
"bits",
".",
"btc_tx_deserialize",
"(",
"txdata",
"[",
"'hex'",
"]",
")",
"txdata",
"[",
"'ins'",
"]",
"=",
"virtualchain_btc_tx_data",
"[",
"'ins'",
"]",
"txdata",
"[",
"'outs'",
"]",
"=",
"virtualchain_btc_tx_data",
"[",
"'outs'",
"]",
"# we know how many senders there have to be ",
"txdata",
"[",
"'senders'",
"]",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"txdata",
"[",
"'ins'",
"]",
")",
"return",
"txdata"
] | Given a transaction message and its index in the block,
go and create a "verbose" transaction structure
containing all the information in a nice, easy-to-read
dict (i.e. like what bitcoind would give us).
Does not work on coinbase transactions.
Does not include segwit witnesses | [
"Given",
"a",
"transaction",
"message",
"and",
"its",
"index",
"in",
"the",
"block",
"go",
"and",
"create",
"a",
"verbose",
"transaction",
"structure",
"containing",
"all",
"the",
"information",
"in",
"a",
"nice",
"easy",
"-",
"to",
"-",
"read",
"dict",
"(",
"i",
".",
"e",
".",
"like",
"what",
"bitcoind",
"would",
"give",
"us",
")",
"."
] | python | train |
SuperCowPowers/workbench | workbench/workers/view_memory_deep.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/view_memory_deep.py#L11-L20 | def execute(self, input_data):
''' Execute the ViewMemoryDeep worker '''
# Aggregate the output from all the memory workers, clearly this could be kewler
output = input_data['view_memory']
output['tables'] = {}
for data in [input_data[key] for key in ViewMemoryDeep.dependencies]:
for name,table in data['tables'].iteritems():
output['tables'].update({name: table})
return output | [
"def",
"execute",
"(",
"self",
",",
"input_data",
")",
":",
"# Aggregate the output from all the memory workers, clearly this could be kewler",
"output",
"=",
"input_data",
"[",
"'view_memory'",
"]",
"output",
"[",
"'tables'",
"]",
"=",
"{",
"}",
"for",
"data",
"in",
"[",
"input_data",
"[",
"key",
"]",
"for",
"key",
"in",
"ViewMemoryDeep",
".",
"dependencies",
"]",
":",
"for",
"name",
",",
"table",
"in",
"data",
"[",
"'tables'",
"]",
".",
"iteritems",
"(",
")",
":",
"output",
"[",
"'tables'",
"]",
".",
"update",
"(",
"{",
"name",
":",
"table",
"}",
")",
"return",
"output"
] | Execute the ViewMemoryDeep worker | [
"Execute",
"the",
"ViewMemoryDeep",
"worker"
] | python | train |
google/grr | grr/core/grr_response_core/stats/default_stats_collector.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/stats/default_stats_collector.py#L108-L115 | def Record(self, value, fields=None):
"""Records the given observation in a distribution."""
key = _FieldsToKey(fields)
metric_value = self._metric_values.get(key)
if metric_value is None:
metric_value = self._DefaultValue()
self._metric_values[key] = metric_value
metric_value.Record(value) | [
"def",
"Record",
"(",
"self",
",",
"value",
",",
"fields",
"=",
"None",
")",
":",
"key",
"=",
"_FieldsToKey",
"(",
"fields",
")",
"metric_value",
"=",
"self",
".",
"_metric_values",
".",
"get",
"(",
"key",
")",
"if",
"metric_value",
"is",
"None",
":",
"metric_value",
"=",
"self",
".",
"_DefaultValue",
"(",
")",
"self",
".",
"_metric_values",
"[",
"key",
"]",
"=",
"metric_value",
"metric_value",
".",
"Record",
"(",
"value",
")"
] | Records the given observation in a distribution. | [
"Records",
"the",
"given",
"observation",
"in",
"a",
"distribution",
"."
] | python | train |
JnyJny/Geometry | Geometry/triangle.py | https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/triangle.py#L335-L345 | def CB(self):
'''
Vertices C and B, list.
'''
try:
return self._CB
except AttributeError:
pass
self._CB = [self.C, self.B]
return self._CB | [
"def",
"CB",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_CB",
"except",
"AttributeError",
":",
"pass",
"self",
".",
"_CB",
"=",
"[",
"self",
".",
"C",
",",
"self",
".",
"B",
"]",
"return",
"self",
".",
"_CB"
] | Vertices C and B, list. | [
"Vertices",
"C",
"and",
"B",
"list",
"."
] | python | train |
decryptus/sonicprobe | sonicprobe/libs/threading_tcp_server.py | https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/threading_tcp_server.py#L123-L145 | def process_request_thread(self, mainthread):
"""obtain request from queue instead of directly from server socket"""
life_time = time.time()
nb_requests = 0
while not mainthread.killed():
if self.max_life_time > 0:
if (time.time() - life_time) >= self.max_life_time:
mainthread.add_worker(1)
return
try:
SocketServer.ThreadingTCPServer.process_request_thread(self, *self.requests.get(True, 0.5))
except Queue.Empty:
continue
else:
SocketServer.ThreadingTCPServer.process_request_thread(self, *self.requests.get())
LOG.debug("nb_requests: %d, max_requests: %d", nb_requests, self.max_requests)
nb_requests += 1
if self.max_requests > 0 and nb_requests >= self.max_requests:
mainthread.add_worker(1)
return | [
"def",
"process_request_thread",
"(",
"self",
",",
"mainthread",
")",
":",
"life_time",
"=",
"time",
".",
"time",
"(",
")",
"nb_requests",
"=",
"0",
"while",
"not",
"mainthread",
".",
"killed",
"(",
")",
":",
"if",
"self",
".",
"max_life_time",
">",
"0",
":",
"if",
"(",
"time",
".",
"time",
"(",
")",
"-",
"life_time",
")",
">=",
"self",
".",
"max_life_time",
":",
"mainthread",
".",
"add_worker",
"(",
"1",
")",
"return",
"try",
":",
"SocketServer",
".",
"ThreadingTCPServer",
".",
"process_request_thread",
"(",
"self",
",",
"*",
"self",
".",
"requests",
".",
"get",
"(",
"True",
",",
"0.5",
")",
")",
"except",
"Queue",
".",
"Empty",
":",
"continue",
"else",
":",
"SocketServer",
".",
"ThreadingTCPServer",
".",
"process_request_thread",
"(",
"self",
",",
"*",
"self",
".",
"requests",
".",
"get",
"(",
")",
")",
"LOG",
".",
"debug",
"(",
"\"nb_requests: %d, max_requests: %d\"",
",",
"nb_requests",
",",
"self",
".",
"max_requests",
")",
"nb_requests",
"+=",
"1",
"if",
"self",
".",
"max_requests",
">",
"0",
"and",
"nb_requests",
">=",
"self",
".",
"max_requests",
":",
"mainthread",
".",
"add_worker",
"(",
"1",
")",
"return"
] | obtain request from queue instead of directly from server socket | [
"obtain",
"request",
"from",
"queue",
"instead",
"of",
"directly",
"from",
"server",
"socket"
] | python | train |
spyder-ide/spyder | spyder/api/plugins.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/api/plugins.py#L102-L119 | def initialize_plugin(self):
"""
Initialize plugin: connect signals, setup actions, etc.
It must be run at the end of __init__
"""
self.create_toggle_view_action()
self.plugin_actions = self.get_plugin_actions() + [MENU_SEPARATOR,
self.undock_action]
add_actions(self.options_menu, self.plugin_actions)
self.options_button.setMenu(self.options_menu)
self.options_menu.aboutToShow.connect(self.refresh_actions)
self.sig_show_message.connect(self.show_message)
self.sig_update_plugin_title.connect(self.update_plugin_title)
self.sig_option_changed.connect(self.set_option)
self.setWindowTitle(self.get_plugin_title()) | [
"def",
"initialize_plugin",
"(",
"self",
")",
":",
"self",
".",
"create_toggle_view_action",
"(",
")",
"self",
".",
"plugin_actions",
"=",
"self",
".",
"get_plugin_actions",
"(",
")",
"+",
"[",
"MENU_SEPARATOR",
",",
"self",
".",
"undock_action",
"]",
"add_actions",
"(",
"self",
".",
"options_menu",
",",
"self",
".",
"plugin_actions",
")",
"self",
".",
"options_button",
".",
"setMenu",
"(",
"self",
".",
"options_menu",
")",
"self",
".",
"options_menu",
".",
"aboutToShow",
".",
"connect",
"(",
"self",
".",
"refresh_actions",
")",
"self",
".",
"sig_show_message",
".",
"connect",
"(",
"self",
".",
"show_message",
")",
"self",
".",
"sig_update_plugin_title",
".",
"connect",
"(",
"self",
".",
"update_plugin_title",
")",
"self",
".",
"sig_option_changed",
".",
"connect",
"(",
"self",
".",
"set_option",
")",
"self",
".",
"setWindowTitle",
"(",
"self",
".",
"get_plugin_title",
"(",
")",
")"
] | Initialize plugin: connect signals, setup actions, etc.
It must be run at the end of __init__ | [
"Initialize",
"plugin",
":",
"connect",
"signals",
"setup",
"actions",
"etc",
"."
] | python | train |
cykl/infoqscraper | infoqscraper/client.py | https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/client.py#L92-L105 | def fetch_no_cache(self, url):
""" Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched.
"""
try:
with contextlib.closing(self.opener.open(url)) as response:
# InfoQ does not send a 404 but a 302 redirecting to a valid URL...
if response.code != 200 or response.url == INFOQ_404_URL:
raise DownloadError("%s not found" % url)
return response.read()
except urllib.error.URLError as e:
raise DownloadError("Failed to get %s: %s" % (url, e)) | [
"def",
"fetch_no_cache",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"with",
"contextlib",
".",
"closing",
"(",
"self",
".",
"opener",
".",
"open",
"(",
"url",
")",
")",
"as",
"response",
":",
"# InfoQ does not send a 404 but a 302 redirecting to a valid URL...",
"if",
"response",
".",
"code",
"!=",
"200",
"or",
"response",
".",
"url",
"==",
"INFOQ_404_URL",
":",
"raise",
"DownloadError",
"(",
"\"%s not found\"",
"%",
"url",
")",
"return",
"response",
".",
"read",
"(",
")",
"except",
"urllib",
".",
"error",
".",
"URLError",
"as",
"e",
":",
"raise",
"DownloadError",
"(",
"\"Failed to get %s: %s\"",
"%",
"(",
"url",
",",
"e",
")",
")"
] | Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched. | [
"Fetch",
"the",
"resource",
"specified",
"and",
"return",
"its",
"content",
"."
] | python | train |
genialis/resolwe | resolwe/flow/views/entity.py | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L163-L175 | def move_to_collection(self, request, *args, **kwargs):
"""Move samples from source to destination collection."""
ids = self.get_ids(request.data)
src_collection_id = self.get_id(request.data, 'source_collection')
dst_collection_id = self.get_id(request.data, 'destination_collection')
src_collection = self._get_collection_for_user(src_collection_id, request.user)
dst_collection = self._get_collection_for_user(dst_collection_id, request.user)
entity_qs = self._get_entities(request.user, ids)
entity_qs.move_to_collection(src_collection, dst_collection)
return Response() | [
"def",
"move_to_collection",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ids",
"=",
"self",
".",
"get_ids",
"(",
"request",
".",
"data",
")",
"src_collection_id",
"=",
"self",
".",
"get_id",
"(",
"request",
".",
"data",
",",
"'source_collection'",
")",
"dst_collection_id",
"=",
"self",
".",
"get_id",
"(",
"request",
".",
"data",
",",
"'destination_collection'",
")",
"src_collection",
"=",
"self",
".",
"_get_collection_for_user",
"(",
"src_collection_id",
",",
"request",
".",
"user",
")",
"dst_collection",
"=",
"self",
".",
"_get_collection_for_user",
"(",
"dst_collection_id",
",",
"request",
".",
"user",
")",
"entity_qs",
"=",
"self",
".",
"_get_entities",
"(",
"request",
".",
"user",
",",
"ids",
")",
"entity_qs",
".",
"move_to_collection",
"(",
"src_collection",
",",
"dst_collection",
")",
"return",
"Response",
"(",
")"
] | Move samples from source to destination collection. | [
"Move",
"samples",
"from",
"source",
"to",
"destination",
"collection",
"."
] | python | train |
pygalle-io/pygalle.core.base.klass | src/pygalle/core/base/klass/__init__.py | https://github.com/pygalle-io/pygalle.core.base.klass/blob/fa683f7f88b63ca46a0970af81a558c9efbbe942/src/pygalle/core/base/klass/__init__.py#L245-L258 | def instance_of(self, kls: Any) -> bool:
""" Return true if the current object is an instance of passed type.
# Arguments
kls: The class.
# Returns:
bool:
* Return true if the current object is an instance of passed type.
* False else.
"""
if not kls:
raise ValueError
return isinstance(self, kls) | [
"def",
"instance_of",
"(",
"self",
",",
"kls",
":",
"Any",
")",
"->",
"bool",
":",
"if",
"not",
"kls",
":",
"raise",
"ValueError",
"return",
"isinstance",
"(",
"self",
",",
"kls",
")"
] | Return true if the current object is an instance of passed type.
# Arguments
kls: The class.
# Returns:
bool:
* Return true if the current object is an instance of passed type.
* False else. | [
"Return",
"true",
"if",
"the",
"current",
"object",
"is",
"an",
"instance",
"of",
"passed",
"type",
"."
] | python | train |
googleapis/google-cloud-python | datastore/google/cloud/datastore/client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/client.py#L542-L624 | def query(self, **kwargs):
"""Proxy to :class:`google.cloud.datastore.query.Query`.
Passes our ``project``.
Using query to search a datastore:
.. testsetup:: query
import os
import uuid
from google.cloud import datastore
unique = os.getenv('CIRCLE_BUILD_NUM', str(uuid.uuid4())[0:8])
client = datastore.Client(namespace='ns{}'.format(unique))
query = client.query(kind='_Doctest')
def do_something(entity):
pass
.. doctest:: query
>>> query = client.query(kind='MyKind')
>>> query.add_filter('property', '=', 'val')
Using the query iterator
.. doctest:: query
>>> query_iter = query.fetch()
>>> for entity in query_iter:
... do_something(entity)
or manually page through results
.. testsetup:: query-page
import os
import uuid
from google.cloud import datastore
from tests.system.test_system import Config # system tests
unique = os.getenv('CIRCLE_BUILD_NUM', str(uuid.uuid4())[0:8])
client = datastore.Client(namespace='ns{}'.format(unique))
key = client.key('_Doctest')
entity1 = datastore.Entity(key=key)
entity1['foo'] = 1337
entity2 = datastore.Entity(key=key)
entity2['foo'] = 42
Config.TO_DELETE.extend([entity1, entity2])
client.put_multi([entity1, entity2])
query = client.query(kind='_Doctest')
cursor = None
.. doctest:: query-page
>>> query_iter = query.fetch(start_cursor=cursor)
>>> pages = query_iter.pages
>>>
>>> first_page = next(pages)
>>> first_page_entities = list(first_page)
>>> query_iter.next_page_token is None
True
:type kwargs: dict
:param kwargs: Parameters for initializing and instance of
:class:`~google.cloud.datastore.query.Query`.
:rtype: :class:`~google.cloud.datastore.query.Query`
:returns: A query object.
"""
if "client" in kwargs:
raise TypeError("Cannot pass client")
if "project" in kwargs:
raise TypeError("Cannot pass project")
kwargs["project"] = self.project
if "namespace" not in kwargs:
kwargs["namespace"] = self.namespace
return Query(self, **kwargs) | [
"def",
"query",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"client\"",
"in",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"Cannot pass client\"",
")",
"if",
"\"project\"",
"in",
"kwargs",
":",
"raise",
"TypeError",
"(",
"\"Cannot pass project\"",
")",
"kwargs",
"[",
"\"project\"",
"]",
"=",
"self",
".",
"project",
"if",
"\"namespace\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"namespace\"",
"]",
"=",
"self",
".",
"namespace",
"return",
"Query",
"(",
"self",
",",
"*",
"*",
"kwargs",
")"
] | Proxy to :class:`google.cloud.datastore.query.Query`.
Passes our ``project``.
Using query to search a datastore:
.. testsetup:: query
import os
import uuid
from google.cloud import datastore
unique = os.getenv('CIRCLE_BUILD_NUM', str(uuid.uuid4())[0:8])
client = datastore.Client(namespace='ns{}'.format(unique))
query = client.query(kind='_Doctest')
def do_something(entity):
pass
.. doctest:: query
>>> query = client.query(kind='MyKind')
>>> query.add_filter('property', '=', 'val')
Using the query iterator
.. doctest:: query
>>> query_iter = query.fetch()
>>> for entity in query_iter:
... do_something(entity)
or manually page through results
.. testsetup:: query-page
import os
import uuid
from google.cloud import datastore
from tests.system.test_system import Config # system tests
unique = os.getenv('CIRCLE_BUILD_NUM', str(uuid.uuid4())[0:8])
client = datastore.Client(namespace='ns{}'.format(unique))
key = client.key('_Doctest')
entity1 = datastore.Entity(key=key)
entity1['foo'] = 1337
entity2 = datastore.Entity(key=key)
entity2['foo'] = 42
Config.TO_DELETE.extend([entity1, entity2])
client.put_multi([entity1, entity2])
query = client.query(kind='_Doctest')
cursor = None
.. doctest:: query-page
>>> query_iter = query.fetch(start_cursor=cursor)
>>> pages = query_iter.pages
>>>
>>> first_page = next(pages)
>>> first_page_entities = list(first_page)
>>> query_iter.next_page_token is None
True
:type kwargs: dict
:param kwargs: Parameters for initializing and instance of
:class:`~google.cloud.datastore.query.Query`.
:rtype: :class:`~google.cloud.datastore.query.Query`
:returns: A query object. | [
"Proxy",
"to",
":",
"class",
":",
"google",
".",
"cloud",
".",
"datastore",
".",
"query",
".",
"Query",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/msazure.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L2253-L2286 | def show_deployment(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Return information about a deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_deployment my-azure name=my_deployment
'''
if call != 'function':
raise SaltCloudSystemExit(
'The get_deployment function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
if kwargs is None:
kwargs = {}
if 'service_name' not in kwargs:
raise SaltCloudSystemExit('A service name must be specified as "service_name"')
if 'deployment_name' not in kwargs:
raise SaltCloudSystemExit('A deployment name must be specified as "deployment_name"')
data = conn.get_deployment_by_name(
service_name=kwargs['service_name'],
deployment_name=kwargs['deployment_name'],
)
return object_to_dict(data) | [
"def",
"show_deployment",
"(",
"kwargs",
"=",
"None",
",",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The get_deployment function must be called with -f or --function.'",
")",
"if",
"not",
"conn",
":",
"conn",
"=",
"get_conn",
"(",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'service_name'",
"not",
"in",
"kwargs",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'A service name must be specified as \"service_name\"'",
")",
"if",
"'deployment_name'",
"not",
"in",
"kwargs",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'A deployment name must be specified as \"deployment_name\"'",
")",
"data",
"=",
"conn",
".",
"get_deployment_by_name",
"(",
"service_name",
"=",
"kwargs",
"[",
"'service_name'",
"]",
",",
"deployment_name",
"=",
"kwargs",
"[",
"'deployment_name'",
"]",
",",
")",
"return",
"object_to_dict",
"(",
"data",
")"
] | .. versionadded:: 2015.8.0
Return information about a deployment
CLI Example:
.. code-block:: bash
salt-cloud -f show_deployment my-azure name=my_deployment | [
"..",
"versionadded",
"::",
"2015",
".",
"8",
".",
"0"
] | python | train |
bitesofcode/projexui | projexui/widgets/xorbcolumnnavigator.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnnavigator.py#L155-L168 | def currentSchemaPath(self):
"""
Returns the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:return <str>
"""
item = self.currentItem()
path = []
while item:
path.append(nativestring(item.text(0)))
item = item.parent()
return '.'.join(reversed(path)) | [
"def",
"currentSchemaPath",
"(",
"self",
")",
":",
"item",
"=",
"self",
".",
"currentItem",
"(",
")",
"path",
"=",
"[",
"]",
"while",
"item",
":",
"path",
".",
"append",
"(",
"nativestring",
"(",
"item",
".",
"text",
"(",
"0",
")",
")",
")",
"item",
"=",
"item",
".",
"parent",
"(",
")",
"return",
"'.'",
".",
"join",
"(",
"reversed",
"(",
"path",
")",
")"
] | Returns the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:return <str> | [
"Returns",
"the",
"column",
"path",
"for",
"the",
"current",
"item",
".",
"This",
"will",
"be",
"a",
".",
"joined",
"path",
"based",
"on",
"the",
"root",
"schema",
"to",
"the",
"given",
"column",
".",
":",
"return",
"<str",
">"
] | python | train |
sorgerlab/indra | indra/tools/gene_network.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/gene_network.py#L177-L198 | def get_statements(self, filter=False):
"""Return the combined list of statements from BEL and Pathway Commons.
Internally calls :py:meth:`get_biopax_stmts` and
:py:meth:`get_bel_stmts`.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted the BEL large corpus and Pathway
Commons.
"""
bp_stmts = self.get_biopax_stmts(filter=filter)
bel_stmts = self.get_bel_stmts(filter=filter)
return bp_stmts + bel_stmts | [
"def",
"get_statements",
"(",
"self",
",",
"filter",
"=",
"False",
")",
":",
"bp_stmts",
"=",
"self",
".",
"get_biopax_stmts",
"(",
"filter",
"=",
"filter",
")",
"bel_stmts",
"=",
"self",
".",
"get_bel_stmts",
"(",
"filter",
"=",
"filter",
")",
"return",
"bp_stmts",
"+",
"bel_stmts"
] | Return the combined list of statements from BEL and Pathway Commons.
Internally calls :py:meth:`get_biopax_stmts` and
:py:meth:`get_bel_stmts`.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted the BEL large corpus and Pathway
Commons. | [
"Return",
"the",
"combined",
"list",
"of",
"statements",
"from",
"BEL",
"and",
"Pathway",
"Commons",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/mcmc/sample.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample.py#L81-L372 | def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=lambda current_state, kernel_results: kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from an Markov chain at `current_state` and whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such "thinning"
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized (in calls to
`sess.run`), and thus do not increase memory requirements.
Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s
`parallel_iterations=1`, otherwise results will not be reproducible.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "mcmc_sample_chain").
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### Examples
##### Sample from a diagonal-variance Gaussian.
I.e.,
```none
for i=1..n:
x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood
```
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
dims = 10
true_stddev = np.sqrt(np.linspace(1., 3., dims))
likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)
states = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros(dims),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=None)
sample_mean = tf.reduce_mean(states, axis=0)
# ==> approx all zeros
sample_stddev = tf.sqrt(tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0))
# ==> approx equal true_stddev
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
# prior
w ~ MultivariateNormal(loc=0, scale=eye(d))
for i=1..n:
# likelihood
x[i] ~ Normal(loc=w^T F[i], scale=1)
```
where `F` denotes factors.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify model.
def make_prior(dims):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.matmul(weights, factors, adjoint_b=True))
def joint_log_prob(num_weights, factors, x, w):
return (make_prior(num_weights).log_prob(w) +
make_likelihood(w, factors).log_prob(x))
def unnormalized_log_posterior(w):
# Posterior is proportional to: `p(W, X=x | factors)`.
return joint_log_prob(num_weights, factors, x, w)
# Setup data.
num_weights = 10 # == d
num_factors = 40 # == n
num_chains = 100
weights = make_prior(num_weights).sample(1)
factors = tf.random_normal([num_factors, num_weights])
x = make_likelihood(weights, factors).sample()
# Sample from Hamiltonian Monte Carlo Markov Chain.
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros([num_chains, num_weights], name='init_weights'),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
step_size=0.1,
num_leapfrog_steps=2))
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
# ==> approx equal to weights
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
# ==> less than 1
```
##### Custom tracing functions.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
likelihood = tfd.Normal(loc=0., scale=1.)
def sample_chain(trace_fn):
return tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=0.,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=trace_fn)
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
_, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)
_, kernel_results = sample_chain(trace_fn=trace_everything)
acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))
# Equivalent to, but more efficient than:
acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))
```
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
if not kernel.is_calibrated:
warnings.warn("supplied `TransitionKernel` is not calibrated. Markov "
"chain may not converge to intended target distribution.")
with tf.compat.v1.name_scope(
name, "mcmc_sample_chain",
[num_results, num_burnin_steps, num_steps_between_results]):
num_results = tf.convert_to_tensor(
value=num_results, dtype=tf.int32, name="num_results")
num_burnin_steps = tf.convert_to_tensor(
value=num_burnin_steps, dtype=tf.int32, name="num_burnin_steps")
num_steps_between_results = tf.convert_to_tensor(
value=num_steps_between_results,
dtype=tf.int32,
name="num_steps_between_results")
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(value=x, name="current_state"),
current_state)
if previous_kernel_results is None:
previous_kernel_results = kernel.bootstrap_results(current_state)
if trace_fn is None:
# It simplifies the logic to use a dummy function here.
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn("Tracing all kernel results by default is deprecated. Set "
"the `trace_fn` argument to None (the future default "
"value) or an explicit callback that traces the values "
"you are interested in.")
def _trace_scan_fn(state_and_results, num_steps):
next_state, current_kernel_results = mcmc_util.smart_for_loop(
loop_num_iter=num_steps,
body_fn=kernel.one_step,
initial_loop_vars=list(state_and_results),
parallel_iterations=parallel_iterations)
return next_state, current_kernel_results
(_, final_kernel_results), (all_states, trace) = mcmc_util.trace_scan(
loop_fn=_trace_scan_fn,
initial_state=(current_state, previous_kernel_results),
elems=tf.one_hot(
indices=0,
depth=num_results,
on_value=1 + num_burnin_steps,
off_value=1 + num_steps_between_results,
dtype=tf.int32),
# pylint: disable=g-long-lambda
trace_fn=lambda state_and_results: (state_and_results[0],
trace_fn(*state_and_results)),
# pylint: enable=g-long-lambda
parallel_iterations=parallel_iterations)
if return_final_kernel_results:
return CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return StatesAndTrace(all_states=all_states, trace=trace) | [
"def",
"sample_chain",
"(",
"num_results",
",",
"current_state",
",",
"previous_kernel_results",
"=",
"None",
",",
"kernel",
"=",
"None",
",",
"num_burnin_steps",
"=",
"0",
",",
"num_steps_between_results",
"=",
"0",
",",
"trace_fn",
"=",
"lambda",
"current_state",
",",
"kernel_results",
":",
"kernel_results",
",",
"return_final_kernel_results",
"=",
"False",
",",
"parallel_iterations",
"=",
"10",
",",
"name",
"=",
"None",
",",
")",
":",
"if",
"not",
"kernel",
".",
"is_calibrated",
":",
"warnings",
".",
"warn",
"(",
"\"supplied `TransitionKernel` is not calibrated. Markov \"",
"\"chain may not converge to intended target distribution.\"",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"\"mcmc_sample_chain\"",
",",
"[",
"num_results",
",",
"num_burnin_steps",
",",
"num_steps_between_results",
"]",
")",
":",
"num_results",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_results\"",
")",
"num_burnin_steps",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_burnin_steps",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_burnin_steps\"",
")",
"num_steps_between_results",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"num_steps_between_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"\"num_steps_between_results\"",
")",
"current_state",
"=",
"tf",
".",
"nest",
".",
"map_structure",
"(",
"lambda",
"x",
":",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"x",
",",
"name",
"=",
"\"current_state\"",
")",
",",
"current_state",
")",
"if",
"previous_kernel_results",
"is",
"None",
":",
"previous_kernel_results",
"=",
"kernel",
".",
"bootstrap_results",
"(",
"current_state",
")",
"if",
"trace_fn",
"is",
"None",
":",
"# It simplifies the logic to use a dummy function here.",
"trace_fn",
"=",
"lambda",
"*",
"args",
":",
"(",
")",
"no_trace",
"=",
"True",
"else",
":",
"no_trace",
"=",
"False",
"if",
"trace_fn",
"is",
"sample_chain",
".",
"__defaults__",
"[",
"4",
"]",
":",
"warnings",
".",
"warn",
"(",
"\"Tracing all kernel results by default is deprecated. Set \"",
"\"the `trace_fn` argument to None (the future default \"",
"\"value) or an explicit callback that traces the values \"",
"\"you are interested in.\"",
")",
"def",
"_trace_scan_fn",
"(",
"state_and_results",
",",
"num_steps",
")",
":",
"next_state",
",",
"current_kernel_results",
"=",
"mcmc_util",
".",
"smart_for_loop",
"(",
"loop_num_iter",
"=",
"num_steps",
",",
"body_fn",
"=",
"kernel",
".",
"one_step",
",",
"initial_loop_vars",
"=",
"list",
"(",
"state_and_results",
")",
",",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"return",
"next_state",
",",
"current_kernel_results",
"(",
"_",
",",
"final_kernel_results",
")",
",",
"(",
"all_states",
",",
"trace",
")",
"=",
"mcmc_util",
".",
"trace_scan",
"(",
"loop_fn",
"=",
"_trace_scan_fn",
",",
"initial_state",
"=",
"(",
"current_state",
",",
"previous_kernel_results",
")",
",",
"elems",
"=",
"tf",
".",
"one_hot",
"(",
"indices",
"=",
"0",
",",
"depth",
"=",
"num_results",
",",
"on_value",
"=",
"1",
"+",
"num_burnin_steps",
",",
"off_value",
"=",
"1",
"+",
"num_steps_between_results",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"# pylint: disable=g-long-lambda",
"trace_fn",
"=",
"lambda",
"state_and_results",
":",
"(",
"state_and_results",
"[",
"0",
"]",
",",
"trace_fn",
"(",
"*",
"state_and_results",
")",
")",
",",
"# pylint: enable=g-long-lambda",
"parallel_iterations",
"=",
"parallel_iterations",
")",
"if",
"return_final_kernel_results",
":",
"return",
"CheckpointableStatesAndTrace",
"(",
"all_states",
"=",
"all_states",
",",
"trace",
"=",
"trace",
",",
"final_kernel_results",
"=",
"final_kernel_results",
")",
"else",
":",
"if",
"no_trace",
":",
"return",
"all_states",
"else",
":",
"return",
"StatesAndTrace",
"(",
"all_states",
"=",
"all_states",
",",
"trace",
"=",
"trace",
")"
] | Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from an Markov chain at `current_state` and whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such "thinning"
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized (in calls to
`sess.run`), and thus do not increase memory requirements.
Warning: when setting a `seed` in the `kernel`, ensure that `sample_chain`'s
`parallel_iterations=1`, otherwise results will not be reproducible.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "mcmc_sample_chain").
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### Examples
##### Sample from a diagonal-variance Gaussian.
I.e.,
```none
for i=1..n:
x[i] ~ MultivariateNormal(loc=0, scale=diag(true_stddev)) # likelihood
```
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
dims = 10
true_stddev = np.sqrt(np.linspace(1., 3., dims))
likelihood = tfd.MultivariateNormalDiag(loc=0., scale_diag=true_stddev)
states = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros(dims),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=None)
sample_mean = tf.reduce_mean(states, axis=0)
# ==> approx all zeros
sample_stddev = tf.sqrt(tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0))
# ==> approx equal true_stddev
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
# prior
w ~ MultivariateNormal(loc=0, scale=eye(d))
for i=1..n:
# likelihood
x[i] ~ Normal(loc=w^T F[i], scale=1)
```
where `F` denotes factors.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify model.
def make_prior(dims):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.matmul(weights, factors, adjoint_b=True))
def joint_log_prob(num_weights, factors, x, w):
return (make_prior(num_weights).log_prob(w) +
make_likelihood(w, factors).log_prob(x))
def unnormalized_log_posterior(w):
# Posterior is proportional to: `p(W, X=x | factors)`.
return joint_log_prob(num_weights, factors, x, w)
# Setup data.
num_weights = 10 # == d
num_factors = 40 # == n
num_chains = 100
weights = make_prior(num_weights).sample(1)
factors = tf.random_normal([num_factors, num_weights])
x = make_likelihood(weights, factors).sample()
# Sample from Hamiltonian Monte Carlo Markov Chain.
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=tf.zeros([num_chains, num_weights], name='init_weights'),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
step_size=0.1,
num_leapfrog_steps=2))
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
# ==> approx equal to weights
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
# ==> less than 1
```
##### Custom tracing functions.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
likelihood = tfd.Normal(loc=0., scale=1.)
def sample_chain(trace_fn):
return tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=500,
current_state=0.,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=likelihood.log_prob,
step_size=0.5,
num_leapfrog_steps=2),
trace_fn=trace_fn)
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
_, log_accept_ratio = sample_chain(trace_fn=trace_log_accept_ratio)
_, kernel_results = sample_chain(trace_fn=trace_everything)
acceptance_prob = tf.exp(tf.minimum(log_accept_ratio_, 0.))
# Equivalent to, but more efficient than:
acceptance_prob = tf.exp(tf.minimum(kernel_results.log_accept_ratio_, 0.))
```
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf | [
"Implements",
"Markov",
"chain",
"Monte",
"Carlo",
"via",
"repeated",
"TransitionKernel",
"steps",
"."
] | python | test |
prompt-toolkit/pymux | pymux/commands/commands.py | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L466-L474 | def send_prefix(pymux, variables):
"""
Send prefix to active pane.
"""
process = pymux.arrangement.get_active_pane().process
for k in pymux.key_bindings_manager.prefix:
vt100_data = prompt_toolkit_key_to_vt100_key(k)
process.write_input(vt100_data) | [
"def",
"send_prefix",
"(",
"pymux",
",",
"variables",
")",
":",
"process",
"=",
"pymux",
".",
"arrangement",
".",
"get_active_pane",
"(",
")",
".",
"process",
"for",
"k",
"in",
"pymux",
".",
"key_bindings_manager",
".",
"prefix",
":",
"vt100_data",
"=",
"prompt_toolkit_key_to_vt100_key",
"(",
"k",
")",
"process",
".",
"write_input",
"(",
"vt100_data",
")"
] | Send prefix to active pane. | [
"Send",
"prefix",
"to",
"active",
"pane",
"."
] | python | train |
6809/MC6809 | MC6809/components/cpu_utils/MC6809_registers.py | https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/cpu_utils/MC6809_registers.py#L122-L150 | def convert_differend_width(src_reg, dst_reg):
"""
e.g.:
8bit $cd TFR into 16bit, results in: $ffcd
16bit $1234 TFR into 8bit, results in: $34
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x0000)
>>> hex(convert_differend_width(src_reg=reg8, dst_reg=reg16))
'0xffcd'
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x1234)
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> hex(convert_differend_width(src_reg=reg16, dst_reg=reg8))
'0x34'
TODO: verify this behaviour on real hardware
see: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4886
"""
src_value = src_reg.value
if src_reg.WIDTH == 8 and dst_reg.WIDTH == 16:
# e.g.: $cd -> $ffcd
src_value += 0xff00
elif src_reg.WIDTH == 16 and dst_reg.WIDTH == 8:
# This not not really needed, because all 8Bit register will
# limit the value, too.
# e.g.: $1234 -> $34
src_value = src_value & 0xff
return src_value | [
"def",
"convert_differend_width",
"(",
"src_reg",
",",
"dst_reg",
")",
":",
"src_value",
"=",
"src_reg",
".",
"value",
"if",
"src_reg",
".",
"WIDTH",
"==",
"8",
"and",
"dst_reg",
".",
"WIDTH",
"==",
"16",
":",
"# e.g.: $cd -> $ffcd",
"src_value",
"+=",
"0xff00",
"elif",
"src_reg",
".",
"WIDTH",
"==",
"16",
"and",
"dst_reg",
".",
"WIDTH",
"==",
"8",
":",
"# This not not really needed, because all 8Bit register will",
"# limit the value, too.",
"# e.g.: $1234 -> $34",
"src_value",
"=",
"src_value",
"&",
"0xff",
"return",
"src_value"
] | e.g.:
8bit $cd TFR into 16bit, results in: $ffcd
16bit $1234 TFR into 8bit, results in: $34
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x0000)
>>> hex(convert_differend_width(src_reg=reg8, dst_reg=reg16))
'0xffcd'
>>> reg16 = ValueStorage16Bit(name="foo", initial_value=0x1234)
>>> reg8 = ValueStorage8Bit(name="bar", initial_value=0xcd)
>>> hex(convert_differend_width(src_reg=reg16, dst_reg=reg8))
'0x34'
TODO: verify this behaviour on real hardware
see: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4886 | [
"e",
".",
"g",
".",
":",
"8bit",
"$cd",
"TFR",
"into",
"16bit",
"results",
"in",
":",
"$ffcd",
"16bit",
"$1234",
"TFR",
"into",
"8bit",
"results",
"in",
":",
"$34"
] | python | train |
ulule/django-badgify | badgify/utils.py | https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/utils.py#L115-L120 | def chunks(l, n):
"""
Yields successive n-sized chunks from l.
"""
for i in _range(0, len(l), n):
yield l[i:i + n] | [
"def",
"chunks",
"(",
"l",
",",
"n",
")",
":",
"for",
"i",
"in",
"_range",
"(",
"0",
",",
"len",
"(",
"l",
")",
",",
"n",
")",
":",
"yield",
"l",
"[",
"i",
":",
"i",
"+",
"n",
"]"
] | Yields successive n-sized chunks from l. | [
"Yields",
"successive",
"n",
"-",
"sized",
"chunks",
"from",
"l",
"."
] | python | train |
achedeuzot/django-timestampable | django_timestampable/models.py | https://github.com/achedeuzot/django-timestampable/blob/44d774655c383e43d931e64ac5e1d41dd737873d/django_timestampable/models.py#L31-L41 | def update_timestampable_model(sender, instance, *args, **kwargs):
'''
Using signals guarantees that timestamps are set no matter what:
loading fixtures, bulk inserts, bulk updates, etc.
Indeed, the `save()` method is *not* called when using fixtures.
'''
if not isinstance(instance, TimestampableModel):
return
if not instance.pk:
instance.created_at = now()
instance.updated_at = now() | [
"def",
"update_timestampable_model",
"(",
"sender",
",",
"instance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"instance",
",",
"TimestampableModel",
")",
":",
"return",
"if",
"not",
"instance",
".",
"pk",
":",
"instance",
".",
"created_at",
"=",
"now",
"(",
")",
"instance",
".",
"updated_at",
"=",
"now",
"(",
")"
] | Using signals guarantees that timestamps are set no matter what:
loading fixtures, bulk inserts, bulk updates, etc.
Indeed, the `save()` method is *not* called when using fixtures. | [
"Using",
"signals",
"guarantees",
"that",
"timestamps",
"are",
"set",
"no",
"matter",
"what",
":",
"loading",
"fixtures",
"bulk",
"inserts",
"bulk",
"updates",
"etc",
".",
"Indeed",
"the",
"save",
"()",
"method",
"is",
"*",
"not",
"*",
"called",
"when",
"using",
"fixtures",
"."
] | python | train |
persandstrom/python-verisure | verisure/session.py | https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L127-L150 | def _get_installations(self):
""" Get information about installations """
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text) | [
"def",
"_get_installations",
"(",
"self",
")",
":",
"response",
"=",
"None",
"for",
"base_url",
"in",
"urls",
".",
"BASE_URLS",
":",
"urls",
".",
"BASE_URL",
"=",
"base_url",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"urls",
".",
"get_installations",
"(",
"self",
".",
"_username",
")",
",",
"headers",
"=",
"{",
"'Cookie'",
":",
"'vid={}'",
".",
"format",
"(",
"self",
".",
"_vid",
")",
",",
"'Accept'",
":",
"'application/json,'",
"'text/javascript, */*; q=0.01'",
",",
"}",
")",
"if",
"2",
"==",
"response",
".",
"status_code",
"//",
"100",
":",
"break",
"elif",
"503",
"==",
"response",
".",
"status_code",
":",
"continue",
"else",
":",
"raise",
"ResponseError",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"ex",
":",
"raise",
"RequestError",
"(",
"ex",
")",
"_validate_response",
"(",
"response",
")",
"self",
".",
"installations",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")"
] | Get information about installations | [
"Get",
"information",
"about",
"installations"
] | python | train |
stphivos/django-mock-queries | django_mock_queries/mocks.py | https://github.com/stphivos/django-mock-queries/blob/1522a0debfa78f4a986818d92eef826410becc85/django_mock_queries/mocks.py#L174-L181 | def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model | [
"def",
"find_all_models",
"(",
"models",
")",
":",
"for",
"model",
"in",
"models",
":",
"yield",
"model",
"# noinspection PyProtectedMember",
"for",
"parent",
"in",
"model",
".",
"_meta",
".",
"parents",
".",
"keys",
"(",
")",
":",
"for",
"parent_model",
"in",
"find_all_models",
"(",
"(",
"parent",
",",
")",
")",
":",
"yield",
"parent_model"
] | Yield all models and their parents. | [
"Yield",
"all",
"models",
"and",
"their",
"parents",
"."
] | python | train |
tanghaibao/goatools | goatools/cli/wr_hierarchy.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/wr_hierarchy.py#L129-L137 | def prt_hier(self, prt=sys.stdout):
"""Write hierarchy below specfied GO IDs."""
objwr = WrHierGO(self.gosubdag, **self.kws)
assert self.goids, "NO VALID GO IDs WERE PROVIDED"
if 'up' not in objwr.usrset:
for goid in self.goids:
objwr.prt_hier_down(goid, prt)
else:
objwr.prt_hier_up(self.goids, prt) | [
"def",
"prt_hier",
"(",
"self",
",",
"prt",
"=",
"sys",
".",
"stdout",
")",
":",
"objwr",
"=",
"WrHierGO",
"(",
"self",
".",
"gosubdag",
",",
"*",
"*",
"self",
".",
"kws",
")",
"assert",
"self",
".",
"goids",
",",
"\"NO VALID GO IDs WERE PROVIDED\"",
"if",
"'up'",
"not",
"in",
"objwr",
".",
"usrset",
":",
"for",
"goid",
"in",
"self",
".",
"goids",
":",
"objwr",
".",
"prt_hier_down",
"(",
"goid",
",",
"prt",
")",
"else",
":",
"objwr",
".",
"prt_hier_up",
"(",
"self",
".",
"goids",
",",
"prt",
")"
] | Write hierarchy below specfied GO IDs. | [
"Write",
"hierarchy",
"below",
"specfied",
"GO",
"IDs",
"."
] | python | train |
SheffieldML/GPy | GPy/util/mocap.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/mocap.py#L634-L643 | def set_rotation_matrices(self):
"""Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders."""
for i in range(len(self.vertices)):
self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0],
self.vertices[i].meta['axis'][1],
self.vertices[i].meta['axis'][2],
self.vertices[i].meta['axis_order'],
degrees=True)
# Todo: invert this by applying angle operations in reverse order
self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C']) | [
"def",
"set_rotation_matrices",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"vertices",
")",
")",
":",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'C'",
"]",
"=",
"rotation_matrix",
"(",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'axis'",
"]",
"[",
"0",
"]",
",",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'axis'",
"]",
"[",
"1",
"]",
",",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'axis'",
"]",
"[",
"2",
"]",
",",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'axis_order'",
"]",
",",
"degrees",
"=",
"True",
")",
"# Todo: invert this by applying angle operations in reverse order",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'Cinv'",
"]",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"self",
".",
"vertices",
"[",
"i",
"]",
".",
"meta",
"[",
"'C'",
"]",
")"
] | Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders. | [
"Set",
"the",
"meta",
"information",
"at",
"each",
"vertex",
"to",
"contain",
"the",
"correct",
"matrices",
"C",
"and",
"Cinv",
"as",
"prescribed",
"by",
"the",
"rotations",
"and",
"rotation",
"orders",
"."
] | python | train |
pyBookshelf/bookshelf | bookshelf/api_v2/pkg.py | https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/pkg.py#L124-L130 | def install_gem(gem):
""" install a particular gem """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
# convert 0 into True, any errors will always raise an exception
return not bool(
run("gem install %s --no-rdoc --no-ri" % gem).return_code) | [
"def",
"install_gem",
"(",
"gem",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"False",
",",
"capture",
"=",
"True",
")",
":",
"# convert 0 into True, any errors will always raise an exception",
"return",
"not",
"bool",
"(",
"run",
"(",
"\"gem install %s --no-rdoc --no-ri\"",
"%",
"gem",
")",
".",
"return_code",
")"
] | install a particular gem | [
"install",
"a",
"particular",
"gem"
] | python | train |
unt-libraries/pyuntl | pyuntl/highwire_structure.py | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/highwire_structure.py#L52-L68 | def get_author(self, **kwargs):
"""Determine the authors from the creator field."""
qualifier = kwargs.get('qualifier', '')
children = kwargs.get('children', [])
creator_type_per = False
author_name = None
# Find the creator type in children.
for child in children:
if child.tag == 'type' and child.content == 'per':
creator_type_per = True
# Get the author name.
elif child.tag == 'name':
author_name = child.content
if qualifier == 'aut' and creator_type_per and author_name:
return author_name
return None | [
"def",
"get_author",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"''",
")",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"creator_type_per",
"=",
"False",
"author_name",
"=",
"None",
"# Find the creator type in children.",
"for",
"child",
"in",
"children",
":",
"if",
"child",
".",
"tag",
"==",
"'type'",
"and",
"child",
".",
"content",
"==",
"'per'",
":",
"creator_type_per",
"=",
"True",
"# Get the author name.",
"elif",
"child",
".",
"tag",
"==",
"'name'",
":",
"author_name",
"=",
"child",
".",
"content",
"if",
"qualifier",
"==",
"'aut'",
"and",
"creator_type_per",
"and",
"author_name",
":",
"return",
"author_name",
"return",
"None"
] | Determine the authors from the creator field. | [
"Determine",
"the",
"authors",
"from",
"the",
"creator",
"field",
"."
] | python | train |
bwohlberg/sporco | sporco/admm/parcbpdn.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L250-L262 | def par_u1step(i):
r"""Dual variable update for :math:`\mathbf{u}_{1,G_i}`, one of the
disjoint problems for updating :math:`\mathbf{u}_1`.
Parameters
----------
i : int
Index of grouping to update
"""
global mp_U1
grpind = slice(mp_grp[i], mp_grp[i+1])
mp_U1[grpind] += mp_alpha*(mp_X[grpind] - mp_Y1[grpind]) | [
"def",
"par_u1step",
"(",
"i",
")",
":",
"global",
"mp_U1",
"grpind",
"=",
"slice",
"(",
"mp_grp",
"[",
"i",
"]",
",",
"mp_grp",
"[",
"i",
"+",
"1",
"]",
")",
"mp_U1",
"[",
"grpind",
"]",
"+=",
"mp_alpha",
"*",
"(",
"mp_X",
"[",
"grpind",
"]",
"-",
"mp_Y1",
"[",
"grpind",
"]",
")"
] | r"""Dual variable update for :math:`\mathbf{u}_{1,G_i}`, one of the
disjoint problems for updating :math:`\mathbf{u}_1`.
Parameters
----------
i : int
Index of grouping to update | [
"r",
"Dual",
"variable",
"update",
"for",
":",
"math",
":",
"\\",
"mathbf",
"{",
"u",
"}",
"_",
"{",
"1",
"G_i",
"}",
"one",
"of",
"the",
"disjoint",
"problems",
"for",
"updating",
":",
"math",
":",
"\\",
"mathbf",
"{",
"u",
"}",
"_1",
"."
] | python | train |
dourvaris/nano-python | src/nano/rpc.py | https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L1194-L1228 | def delegators(self, account):
"""
Returns a list of pairs of delegator names given **account** a
representative and its balance
.. version 8.0 required
:param account: Account to return delegators for
:type account: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.delegators(
... account="xrb_1111111111111111111111111111111111111111111111111117353trpda"
... )
{
"xrb_13bqhi1cdqq8yb9szneoc38qk899d58i5rcrgdk5mkdm86hekpoez3zxw5sd":
"500000000000000000000000000000000000",
"xrb_17k6ug685154an8gri9whhe5kb5z1mf5w6y39gokc1657sh95fegm8ht1zpn":
"961647970820730000000000000000000000"
}
"""
account = self._process_value(account, 'account')
payload = {"account": account}
resp = self.call('delegators', payload)
delegators = resp.get('delegators') or {}
for k, v in delegators.items():
delegators[k] = int(v)
return delegators | [
"def",
"delegators",
"(",
"self",
",",
"account",
")",
":",
"account",
"=",
"self",
".",
"_process_value",
"(",
"account",
",",
"'account'",
")",
"payload",
"=",
"{",
"\"account\"",
":",
"account",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'delegators'",
",",
"payload",
")",
"delegators",
"=",
"resp",
".",
"get",
"(",
"'delegators'",
")",
"or",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"delegators",
".",
"items",
"(",
")",
":",
"delegators",
"[",
"k",
"]",
"=",
"int",
"(",
"v",
")",
"return",
"delegators"
] | Returns a list of pairs of delegator names given **account** a
representative and its balance
.. version 8.0 required
:param account: Account to return delegators for
:type account: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.delegators(
... account="xrb_1111111111111111111111111111111111111111111111111117353trpda"
... )
{
"xrb_13bqhi1cdqq8yb9szneoc38qk899d58i5rcrgdk5mkdm86hekpoez3zxw5sd":
"500000000000000000000000000000000000",
"xrb_17k6ug685154an8gri9whhe5kb5z1mf5w6y39gokc1657sh95fegm8ht1zpn":
"961647970820730000000000000000000000"
} | [
"Returns",
"a",
"list",
"of",
"pairs",
"of",
"delegator",
"names",
"given",
"**",
"account",
"**",
"a",
"representative",
"and",
"its",
"balance"
] | python | train |
zalando/patroni | patroni/ha.py | https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/ha.py#L1153-L1190 | def handle_starting_instance(self):
"""Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to."""
# Check if we are in startup, when paused defer to main loop for manual failovers.
if not self.state_handler.check_for_startup() or self.is_paused():
self.set_start_timeout(None)
if self.is_paused():
self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped')
return None
# state_handler.state == 'starting' here
if self.has_lock():
if not self.update_lock():
logger.info("Lost lock while starting up. Demoting self.")
self.demote('immediate-nolock')
return 'stopped PostgreSQL while starting up because leader key was lost'
timeout = self._start_timeout or self.patroni.config['master_start_timeout']
time_left = timeout - self.state_handler.time_in_state()
if time_left <= 0:
if self.is_failover_possible(self.cluster.members):
logger.info("Demoting self because master startup is taking too long")
self.demote('immediate')
return 'stopped PostgreSQL because of startup timeout'
else:
return 'master start has timed out, but continuing to wait because failover is not possible'
else:
msg = self.process_manual_failover_from_leader()
if msg is not None:
return msg
return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left)
else:
# Use normal processing for standbys
logger.info("Still starting up as a standby.")
return None | [
"def",
"handle_starting_instance",
"(",
"self",
")",
":",
"# Check if we are in startup, when paused defer to main loop for manual failovers.",
"if",
"not",
"self",
".",
"state_handler",
".",
"check_for_startup",
"(",
")",
"or",
"self",
".",
"is_paused",
"(",
")",
":",
"self",
".",
"set_start_timeout",
"(",
"None",
")",
"if",
"self",
".",
"is_paused",
"(",
")",
":",
"self",
".",
"state_handler",
".",
"set_state",
"(",
"self",
".",
"state_handler",
".",
"is_running",
"(",
")",
"and",
"'running'",
"or",
"'stopped'",
")",
"return",
"None",
"# state_handler.state == 'starting' here",
"if",
"self",
".",
"has_lock",
"(",
")",
":",
"if",
"not",
"self",
".",
"update_lock",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"Lost lock while starting up. Demoting self.\"",
")",
"self",
".",
"demote",
"(",
"'immediate-nolock'",
")",
"return",
"'stopped PostgreSQL while starting up because leader key was lost'",
"timeout",
"=",
"self",
".",
"_start_timeout",
"or",
"self",
".",
"patroni",
".",
"config",
"[",
"'master_start_timeout'",
"]",
"time_left",
"=",
"timeout",
"-",
"self",
".",
"state_handler",
".",
"time_in_state",
"(",
")",
"if",
"time_left",
"<=",
"0",
":",
"if",
"self",
".",
"is_failover_possible",
"(",
"self",
".",
"cluster",
".",
"members",
")",
":",
"logger",
".",
"info",
"(",
"\"Demoting self because master startup is taking too long\"",
")",
"self",
".",
"demote",
"(",
"'immediate'",
")",
"return",
"'stopped PostgreSQL because of startup timeout'",
"else",
":",
"return",
"'master start has timed out, but continuing to wait because failover is not possible'",
"else",
":",
"msg",
"=",
"self",
".",
"process_manual_failover_from_leader",
"(",
")",
"if",
"msg",
"is",
"not",
"None",
":",
"return",
"msg",
"return",
"'PostgreSQL is still starting up, {0:.0f} seconds until timeout'",
".",
"format",
"(",
"time_left",
")",
"else",
":",
"# Use normal processing for standbys",
"logger",
".",
"info",
"(",
"\"Still starting up as a standby.\"",
")",
"return",
"None"
] | Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to. | [
"Starting",
"up",
"PostgreSQL",
"may",
"take",
"a",
"long",
"time",
".",
"In",
"case",
"we",
"are",
"the",
"leader",
"we",
"may",
"want",
"to",
"fail",
"over",
"to",
"."
] | python | train |
sigmaris/python-gssapi | gssapi/creds.py | https://github.com/sigmaris/python-gssapi/blob/a8ca577b3ccf9d9fa48f16f4954a1eddd5896236/gssapi/creds.py#L218-L226 | def mechs(self):
"""
The set of mechanisms supported by the credential.
:type: :class:`~gssapi.oids.OIDSet`
"""
if not self._mechs:
self._mechs = self._inquire(False, False, False, True)[3]
return self._mechs | [
"def",
"mechs",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_mechs",
":",
"self",
".",
"_mechs",
"=",
"self",
".",
"_inquire",
"(",
"False",
",",
"False",
",",
"False",
",",
"True",
")",
"[",
"3",
"]",
"return",
"self",
".",
"_mechs"
] | The set of mechanisms supported by the credential.
:type: :class:`~gssapi.oids.OIDSet` | [
"The",
"set",
"of",
"mechanisms",
"supported",
"by",
"the",
"credential",
"."
] | python | test |
quantopian/zipline | zipline/data/hdf5_daily_bars.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L424-L443 | def from_file(cls, h5_file, country_code):
"""
Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
if h5_file.attrs['version'] != VERSION:
raise ValueError(
'mismatched version: file is of version %s, expected %s' % (
h5_file.attrs['version'],
VERSION,
),
)
return cls(h5_file[country_code]) | [
"def",
"from_file",
"(",
"cls",
",",
"h5_file",
",",
"country_code",
")",
":",
"if",
"h5_file",
".",
"attrs",
"[",
"'version'",
"]",
"!=",
"VERSION",
":",
"raise",
"ValueError",
"(",
"'mismatched version: file is of version %s, expected %s'",
"%",
"(",
"h5_file",
".",
"attrs",
"[",
"'version'",
"]",
",",
"VERSION",
",",
")",
",",
")",
"return",
"cls",
"(",
"h5_file",
"[",
"country_code",
"]",
")"
] | Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read. | [
"Construct",
"from",
"an",
"h5py",
".",
"File",
"and",
"a",
"country",
"code",
"."
] | python | train |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/task/sample.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/task/sample.py#L63-L80 | def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, )) | [
"def",
"list",
"(",
"self",
",",
"language",
"=",
"values",
".",
"unset",
",",
"limit",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"return",
"list",
"(",
"self",
".",
"stream",
"(",
"language",
"=",
"language",
",",
"limit",
"=",
"limit",
",",
"page_size",
"=",
"page_size",
",",
")",
")"
] | Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance] | [
"Lists",
"SampleInstance",
"records",
"from",
"the",
"API",
"as",
"a",
"list",
".",
"Unlike",
"stream",
"()",
"this",
"operation",
"is",
"eager",
"and",
"will",
"load",
"limit",
"records",
"into",
"memory",
"before",
"returning",
"."
] | python | train |
Azure/azure-storage-python | azure-storage-queue/azure/storage/queue/queueservice.py | https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-queue/azure/storage/queue/queueservice.py#L586-L611 | def get_queue_metadata(self, queue_name, timeout=None):
'''
Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A dictionary representing the queue metadata with an
approximate_message_count int property on the dict estimating the
number of messages in the queue.
:rtype: dict(str, str)
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(queue_name)
request.query = {
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _parse_metadata_and_message_count) | [
"def",
"get_queue_metadata",
"(",
"self",
",",
"queue_name",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'queue_name'",
",",
"queue_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
"secondary",
"=",
"True",
")",
"request",
".",
"path",
"=",
"_get_path",
"(",
"queue_name",
")",
"request",
".",
"query",
"=",
"{",
"'comp'",
":",
"'metadata'",
",",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
",",
"}",
"return",
"self",
".",
"_perform_request",
"(",
"request",
",",
"_parse_metadata_and_message_count",
")"
] | Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A dictionary representing the queue metadata with an
approximate_message_count int property on the dict estimating the
number of messages in the queue.
:rtype: dict(str, str) | [
"Retrieves",
"user",
"-",
"defined",
"metadata",
"and",
"queue",
"properties",
"on",
"the",
"specified",
"queue",
".",
"Metadata",
"is",
"associated",
"with",
"the",
"queue",
"as",
"name",
"-",
"value",
"pairs",
"."
] | python | train |
lowandrew/OLCTools | spadespipeline/legacy_vtyper.py | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L399-L419 | def filer(args):
"""
Create metadata objects with necessary attributes for each FASTA file found in the sequence path
:param args: Argument parser object with necessary variables
:return: samples: List of metadata objects
"""
# List to store all the metadata objects
samples = list()
# Find all the sequence files in the path
fastas = sorted(glob(os.path.join(args.sequencepath, '*.fa*')))
for fasta in fastas:
# Create a metadata object for each sample
metadata = MetadataObject()
# Populate the metadata object with the required attributes
metadata.name = os.path.splitext(os.path.basename(fasta))[0]
metadata.general = GenObject()
metadata.commands = GenObject()
metadata.general.bestassemblyfile = fasta
metadata.general.outputdirectory = os.path.join(args.sequencepath, metadata.name)
samples.append(metadata)
return samples | [
"def",
"filer",
"(",
"args",
")",
":",
"# List to store all the metadata objects",
"samples",
"=",
"list",
"(",
")",
"# Find all the sequence files in the path",
"fastas",
"=",
"sorted",
"(",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sequencepath",
",",
"'*.fa*'",
")",
")",
")",
"for",
"fasta",
"in",
"fastas",
":",
"# Create a metadata object for each sample",
"metadata",
"=",
"MetadataObject",
"(",
")",
"# Populate the metadata object with the required attributes",
"metadata",
".",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"fasta",
")",
")",
"[",
"0",
"]",
"metadata",
".",
"general",
"=",
"GenObject",
"(",
")",
"metadata",
".",
"commands",
"=",
"GenObject",
"(",
")",
"metadata",
".",
"general",
".",
"bestassemblyfile",
"=",
"fasta",
"metadata",
".",
"general",
".",
"outputdirectory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"sequencepath",
",",
"metadata",
".",
"name",
")",
"samples",
".",
"append",
"(",
"metadata",
")",
"return",
"samples"
] | Create metadata objects with necessary attributes for each FASTA file found in the sequence path
:param args: Argument parser object with necessary variables
:return: samples: List of metadata objects | [
"Create",
"metadata",
"objects",
"with",
"necessary",
"attributes",
"for",
"each",
"FASTA",
"file",
"found",
"in",
"the",
"sequence",
"path",
":",
"param",
"args",
":",
"Argument",
"parser",
"object",
"with",
"necessary",
"variables",
":",
"return",
":",
"samples",
":",
"List",
"of",
"metadata",
"objects"
] | python | train |
mojaie/chorus | chorus/rdkit.py | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/rdkit.py#L20-L39 | def to_rdmol(mol):
"""Convert molecule to RDMol"""
rwmol = Chem.RWMol(Chem.MolFromSmiles(''))
key_to_idx = {}
bond_type = {1: Chem.BondType.SINGLE,
2: Chem.BondType.DOUBLE,
3: Chem.BondType.TRIPLE}
conf = Chem.Conformer(rwmol.GetNumAtoms())
for k, a in mol.atoms_iter():
i = rwmol.AddAtom(Chem.Atom(atom_number(a.symbol)))
key_to_idx[k] = i
conf.SetAtomPosition(i, a.coords)
rwmol.AddConformer(conf)
for u, v, b in mol.bonds_iter():
ui = key_to_idx[u]
vi = key_to_idx[v]
rwmol.AddBond(ui, vi, bond_type[b.order])
Chem.GetSSSR(rwmol) # Ring recognition is required for fingerprint
rwmol.UpdatePropertyCache(strict=False)
return rwmol.GetMol() | [
"def",
"to_rdmol",
"(",
"mol",
")",
":",
"rwmol",
"=",
"Chem",
".",
"RWMol",
"(",
"Chem",
".",
"MolFromSmiles",
"(",
"''",
")",
")",
"key_to_idx",
"=",
"{",
"}",
"bond_type",
"=",
"{",
"1",
":",
"Chem",
".",
"BondType",
".",
"SINGLE",
",",
"2",
":",
"Chem",
".",
"BondType",
".",
"DOUBLE",
",",
"3",
":",
"Chem",
".",
"BondType",
".",
"TRIPLE",
"}",
"conf",
"=",
"Chem",
".",
"Conformer",
"(",
"rwmol",
".",
"GetNumAtoms",
"(",
")",
")",
"for",
"k",
",",
"a",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
":",
"i",
"=",
"rwmol",
".",
"AddAtom",
"(",
"Chem",
".",
"Atom",
"(",
"atom_number",
"(",
"a",
".",
"symbol",
")",
")",
")",
"key_to_idx",
"[",
"k",
"]",
"=",
"i",
"conf",
".",
"SetAtomPosition",
"(",
"i",
",",
"a",
".",
"coords",
")",
"rwmol",
".",
"AddConformer",
"(",
"conf",
")",
"for",
"u",
",",
"v",
",",
"b",
"in",
"mol",
".",
"bonds_iter",
"(",
")",
":",
"ui",
"=",
"key_to_idx",
"[",
"u",
"]",
"vi",
"=",
"key_to_idx",
"[",
"v",
"]",
"rwmol",
".",
"AddBond",
"(",
"ui",
",",
"vi",
",",
"bond_type",
"[",
"b",
".",
"order",
"]",
")",
"Chem",
".",
"GetSSSR",
"(",
"rwmol",
")",
"# Ring recognition is required for fingerprint",
"rwmol",
".",
"UpdatePropertyCache",
"(",
"strict",
"=",
"False",
")",
"return",
"rwmol",
".",
"GetMol",
"(",
")"
] | Convert molecule to RDMol | [
"Convert",
"molecule",
"to",
"RDMol"
] | python | train |
moonso/extract_vcf | extract_vcf/config_parser.py | https://github.com/moonso/extract_vcf/blob/c8381b362fa6734cd2ee65ef260738868d981aaf/extract_vcf/config_parser.py#L196-L290 | def check_plugin(self, plugin):
"""
Check if the section is in the proper format vcf format.
Args:
vcf_section (dict): The information from a vcf section
Returns:
True is it is in the proper format
"""
vcf_section = self[plugin]
try:
vcf_field = vcf_section['field']
if not vcf_field in self.vcf_columns:
raise ValidateError(
"field has to be in {0}\n"
"Wrong field name in plugin: {1}".format(
self.vcf_columns, plugin
))
if vcf_field == 'INFO':
try:
info_key = vcf_section['info_key']
if info_key == 'CSQ':
try:
csq_key = vcf_section['csq_key']
except KeyError:
raise ValidateError(
"CSQ entrys has to refer to an csq field.\n"
"Refer with keyword 'csq_key'\n"
"csq_key is missing in section: {0}".format(
plugin
)
)
except KeyError:
raise ValidateError(
"INFO entrys has to refer to an INFO field.\n"
"Refer with keyword 'info_key'\n"
"info_key is missing in section: {0}".format(
plugin
)
)
except KeyError:
raise ValidateError(
"Vcf entrys have to refer to a field in the VCF with keyword"
" 'field'.\nMissing keyword 'field' in plugin: {0}".format(
plugin
))
try:
data_type = vcf_section['data_type']
if not data_type in self.data_types:
raise ValidateError(
"data_type has to be in {0}\n"
"Wrong data_type in plugin: {1}".format(
self.data_types, plugin)
)
except KeyError:
raise ValidateError(
"Vcf entrys have to refer to a data type in the VCF with "
"keyword 'data_type'.\n"
"Missing data_type in plugin: {0}".format(plugin)
)
separators = vcf_section.get('separators', None)
if separators:
if len(separators) == 1:
self[plugin]['separators'] = list(separators)
else:
if data_type != 'flag':
raise ValidateError(
"If data_type != flag the separators have to be defined"
"Missing separators in plugin: {0}".format(plugin)
)
record_rule = vcf_section.get('record_rule', None)
if record_rule:
if not record_rule in ['min', 'max']:
raise ValidateError(
"Record rules have to be in {0}\n"
"Wrong record_rule in plugin: {1}".format(
['min', 'max'], plugin)
)
else:
self.logger.info("Setting record rule to default: 'max'")
return True | [
"def",
"check_plugin",
"(",
"self",
",",
"plugin",
")",
":",
"vcf_section",
"=",
"self",
"[",
"plugin",
"]",
"try",
":",
"vcf_field",
"=",
"vcf_section",
"[",
"'field'",
"]",
"if",
"not",
"vcf_field",
"in",
"self",
".",
"vcf_columns",
":",
"raise",
"ValidateError",
"(",
"\"field has to be in {0}\\n\"",
"\"Wrong field name in plugin: {1}\"",
".",
"format",
"(",
"self",
".",
"vcf_columns",
",",
"plugin",
")",
")",
"if",
"vcf_field",
"==",
"'INFO'",
":",
"try",
":",
"info_key",
"=",
"vcf_section",
"[",
"'info_key'",
"]",
"if",
"info_key",
"==",
"'CSQ'",
":",
"try",
":",
"csq_key",
"=",
"vcf_section",
"[",
"'csq_key'",
"]",
"except",
"KeyError",
":",
"raise",
"ValidateError",
"(",
"\"CSQ entrys has to refer to an csq field.\\n\"",
"\"Refer with keyword 'csq_key'\\n\"",
"\"csq_key is missing in section: {0}\"",
".",
"format",
"(",
"plugin",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValidateError",
"(",
"\"INFO entrys has to refer to an INFO field.\\n\"",
"\"Refer with keyword 'info_key'\\n\"",
"\"info_key is missing in section: {0}\"",
".",
"format",
"(",
"plugin",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValidateError",
"(",
"\"Vcf entrys have to refer to a field in the VCF with keyword\"",
"\" 'field'.\\nMissing keyword 'field' in plugin: {0}\"",
".",
"format",
"(",
"plugin",
")",
")",
"try",
":",
"data_type",
"=",
"vcf_section",
"[",
"'data_type'",
"]",
"if",
"not",
"data_type",
"in",
"self",
".",
"data_types",
":",
"raise",
"ValidateError",
"(",
"\"data_type has to be in {0}\\n\"",
"\"Wrong data_type in plugin: {1}\"",
".",
"format",
"(",
"self",
".",
"data_types",
",",
"plugin",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValidateError",
"(",
"\"Vcf entrys have to refer to a data type in the VCF with \"",
"\"keyword 'data_type'.\\n\"",
"\"Missing data_type in plugin: {0}\"",
".",
"format",
"(",
"plugin",
")",
")",
"separators",
"=",
"vcf_section",
".",
"get",
"(",
"'separators'",
",",
"None",
")",
"if",
"separators",
":",
"if",
"len",
"(",
"separators",
")",
"==",
"1",
":",
"self",
"[",
"plugin",
"]",
"[",
"'separators'",
"]",
"=",
"list",
"(",
"separators",
")",
"else",
":",
"if",
"data_type",
"!=",
"'flag'",
":",
"raise",
"ValidateError",
"(",
"\"If data_type != flag the separators have to be defined\"",
"\"Missing separators in plugin: {0}\"",
".",
"format",
"(",
"plugin",
")",
")",
"record_rule",
"=",
"vcf_section",
".",
"get",
"(",
"'record_rule'",
",",
"None",
")",
"if",
"record_rule",
":",
"if",
"not",
"record_rule",
"in",
"[",
"'min'",
",",
"'max'",
"]",
":",
"raise",
"ValidateError",
"(",
"\"Record rules have to be in {0}\\n\"",
"\"Wrong record_rule in plugin: {1}\"",
".",
"format",
"(",
"[",
"'min'",
",",
"'max'",
"]",
",",
"plugin",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Setting record rule to default: 'max'\"",
")",
"return",
"True"
] | Check if the section is in the proper format vcf format.
Args:
vcf_section (dict): The information from a vcf section
Returns:
True is it is in the proper format | [
"Check",
"if",
"the",
"section",
"is",
"in",
"the",
"proper",
"format",
"vcf",
"format",
"."
] | python | train |
OCHA-DAP/hdx-python-api | src/hdx/data/dataset.py | https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L753-L767 | def get_all_resources(datasets):
# type: (List['Dataset']) -> List[hdx.data.resource.Resource]
"""Get all resources from a list of datasets (such as returned by search)
Args:
datasets (List[Dataset]): list of datasets
Returns:
List[hdx.data.resource.Resource]: list of resources within those datasets
"""
resources = []
for dataset in datasets:
for resource in dataset.get_resources():
resources.append(resource)
return resources | [
"def",
"get_all_resources",
"(",
"datasets",
")",
":",
"# type: (List['Dataset']) -> List[hdx.data.resource.Resource]",
"resources",
"=",
"[",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"for",
"resource",
"in",
"dataset",
".",
"get_resources",
"(",
")",
":",
"resources",
".",
"append",
"(",
"resource",
")",
"return",
"resources"
] | Get all resources from a list of datasets (such as returned by search)
Args:
datasets (List[Dataset]): list of datasets
Returns:
List[hdx.data.resource.Resource]: list of resources within those datasets | [
"Get",
"all",
"resources",
"from",
"a",
"list",
"of",
"datasets",
"(",
"such",
"as",
"returned",
"by",
"search",
")"
] | python | train |
bitesofcode/projexui | projexui/widgets/xorbrecordbox.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordbox.py#L125-L142 | def addRecord(self, record):
"""
Adds the given record to the system.
:param record | <str>
"""
label_mapper = self.labelMapper()
icon_mapper = self.iconMapper()
self.addItem(label_mapper(record))
self.setItemData(self.count() - 1, wrapVariant(record), Qt.UserRole)
# load icon
if icon_mapper:
self.setItemIcon(self.count() - 1, icon_mapper(record))
if self.showTreePopup():
XOrbRecordItem(self.treePopupWidget(), record) | [
"def",
"addRecord",
"(",
"self",
",",
"record",
")",
":",
"label_mapper",
"=",
"self",
".",
"labelMapper",
"(",
")",
"icon_mapper",
"=",
"self",
".",
"iconMapper",
"(",
")",
"self",
".",
"addItem",
"(",
"label_mapper",
"(",
"record",
")",
")",
"self",
".",
"setItemData",
"(",
"self",
".",
"count",
"(",
")",
"-",
"1",
",",
"wrapVariant",
"(",
"record",
")",
",",
"Qt",
".",
"UserRole",
")",
"# load icon\r",
"if",
"icon_mapper",
":",
"self",
".",
"setItemIcon",
"(",
"self",
".",
"count",
"(",
")",
"-",
"1",
",",
"icon_mapper",
"(",
"record",
")",
")",
"if",
"self",
".",
"showTreePopup",
"(",
")",
":",
"XOrbRecordItem",
"(",
"self",
".",
"treePopupWidget",
"(",
")",
",",
"record",
")"
] | Adds the given record to the system.
:param record | <str> | [
"Adds",
"the",
"given",
"record",
"to",
"the",
"system",
".",
":",
"param",
"record",
"|",
"<str",
">"
] | python | train |
ansible/tower-cli | tower_cli/resources/host.py | https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/host.py#L69-L86 | def list_facts(self, pk=None, **kwargs):
"""Return a JSON object of all available facts of the given host.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
List all available facts of the given host.
:param pk: Primary key of the target host.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of all available facts of the given host.
:rtype: dict
=====API DOCS=====
"""
res = self.get(pk=pk, **kwargs)
url = self.endpoint + '%d/%s/' % (res['id'], 'ansible_facts')
return client.get(url, params={}).json() | [
"def",
"list_facts",
"(",
"self",
",",
"pk",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"res",
"=",
"self",
".",
"get",
"(",
"pk",
"=",
"pk",
",",
"*",
"*",
"kwargs",
")",
"url",
"=",
"self",
".",
"endpoint",
"+",
"'%d/%s/'",
"%",
"(",
"res",
"[",
"'id'",
"]",
",",
"'ansible_facts'",
")",
"return",
"client",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"}",
")",
".",
"json",
"(",
")"
] | Return a JSON object of all available facts of the given host.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
List all available facts of the given host.
:param pk: Primary key of the target host.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of all available facts of the given host.
:rtype: dict
=====API DOCS===== | [
"Return",
"a",
"JSON",
"object",
"of",
"all",
"available",
"facts",
"of",
"the",
"given",
"host",
"."
] | python | valid |
pyviz/holoviews | holoviews/core/boundingregion.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/boundingregion.py#L303-L308 | def centroid(self):
"""
Return the centroid of the rectangle.
"""
left, bottom, right, top = self.lbrt()
return (right + left) / 2.0, (top + bottom) / 2.0 | [
"def",
"centroid",
"(",
"self",
")",
":",
"left",
",",
"bottom",
",",
"right",
",",
"top",
"=",
"self",
".",
"lbrt",
"(",
")",
"return",
"(",
"right",
"+",
"left",
")",
"/",
"2.0",
",",
"(",
"top",
"+",
"bottom",
")",
"/",
"2.0"
] | Return the centroid of the rectangle. | [
"Return",
"the",
"centroid",
"of",
"the",
"rectangle",
"."
] | python | train |
adamrehn/ue4cli | ue4cli/UnrealManagerBase.py | https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L228-L244 | def getThirdPartyLibCmakeFlags(self, libs):
"""
Retrieves the CMake invocation flags for building against the Unreal-bundled versions of the specified third-party libraries
"""
fmt = PrintingFormat.singleLine()
if libs[0] == '--multiline':
fmt = PrintingFormat.multiLine()
libs = libs[1:]
platformDefaults = True
if libs[0] == '--nodefaults':
platformDefaults = False
libs = libs[1:]
details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults)
CMakeCustomFlags.processLibraryDetails(details)
return details.getCMakeFlags(self.getEngineRoot(), fmt) | [
"def",
"getThirdPartyLibCmakeFlags",
"(",
"self",
",",
"libs",
")",
":",
"fmt",
"=",
"PrintingFormat",
".",
"singleLine",
"(",
")",
"if",
"libs",
"[",
"0",
"]",
"==",
"'--multiline'",
":",
"fmt",
"=",
"PrintingFormat",
".",
"multiLine",
"(",
")",
"libs",
"=",
"libs",
"[",
"1",
":",
"]",
"platformDefaults",
"=",
"True",
"if",
"libs",
"[",
"0",
"]",
"==",
"'--nodefaults'",
":",
"platformDefaults",
"=",
"False",
"libs",
"=",
"libs",
"[",
"1",
":",
"]",
"details",
"=",
"self",
".",
"getThirdpartyLibs",
"(",
"libs",
",",
"includePlatformDefaults",
"=",
"platformDefaults",
")",
"CMakeCustomFlags",
".",
"processLibraryDetails",
"(",
"details",
")",
"return",
"details",
".",
"getCMakeFlags",
"(",
"self",
".",
"getEngineRoot",
"(",
")",
",",
"fmt",
")"
] | Retrieves the CMake invocation flags for building against the Unreal-bundled versions of the specified third-party libraries | [
"Retrieves",
"the",
"CMake",
"invocation",
"flags",
"for",
"building",
"against",
"the",
"Unreal",
"-",
"bundled",
"versions",
"of",
"the",
"specified",
"third",
"-",
"party",
"libraries"
] | python | train |
arviz-devs/arviz | arviz/plots/forestplot.py | https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/forestplot.py#L270-L283 | def make_plotters(self):
"""Initialize an object for each variable to be plotted."""
plotters, y = {}, 0
for var_name in self.var_names:
plotters[var_name] = VarHandler(
var_name,
self.data,
y,
model_names=self.model_names,
combined=self.combined,
colors=self.colors,
)
y = plotters[var_name].y_max()
return plotters | [
"def",
"make_plotters",
"(",
"self",
")",
":",
"plotters",
",",
"y",
"=",
"{",
"}",
",",
"0",
"for",
"var_name",
"in",
"self",
".",
"var_names",
":",
"plotters",
"[",
"var_name",
"]",
"=",
"VarHandler",
"(",
"var_name",
",",
"self",
".",
"data",
",",
"y",
",",
"model_names",
"=",
"self",
".",
"model_names",
",",
"combined",
"=",
"self",
".",
"combined",
",",
"colors",
"=",
"self",
".",
"colors",
",",
")",
"y",
"=",
"plotters",
"[",
"var_name",
"]",
".",
"y_max",
"(",
")",
"return",
"plotters"
] | Initialize an object for each variable to be plotted. | [
"Initialize",
"an",
"object",
"for",
"each",
"variable",
"to",
"be",
"plotted",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/sourceconverter.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L548-L595 | def convert_mfdist(self, node):
"""
Convert the given node into a Magnitude-Frequency Distribution
object.
:param node: a node of kind incrementalMFD or truncGutenbergRichterMFD
:returns: a :class:`openquake.hazardlib.mfd.EvenlyDiscretizedMFD.` or
:class:`openquake.hazardlib.mfd.TruncatedGRMFD` instance
"""
with context(self.fname, node):
[mfd_node] = [subnode for subnode in node
if subnode.tag.endswith(
('incrementalMFD', 'truncGutenbergRichterMFD',
'arbitraryMFD', 'YoungsCoppersmithMFD',
'multiMFD'))]
if mfd_node.tag.endswith('incrementalMFD'):
return mfd.EvenlyDiscretizedMFD(
min_mag=mfd_node['minMag'], bin_width=mfd_node['binWidth'],
occurrence_rates=~mfd_node.occurRates)
elif mfd_node.tag.endswith('truncGutenbergRichterMFD'):
return mfd.TruncatedGRMFD(
a_val=mfd_node['aValue'], b_val=mfd_node['bValue'],
min_mag=mfd_node['minMag'], max_mag=mfd_node['maxMag'],
bin_width=self.width_of_mfd_bin)
elif mfd_node.tag.endswith('arbitraryMFD'):
return mfd.ArbitraryMFD(
magnitudes=~mfd_node.magnitudes,
occurrence_rates=~mfd_node.occurRates)
elif mfd_node.tag.endswith('YoungsCoppersmithMFD'):
if "totalMomentRate" in mfd_node.attrib.keys():
# Return Youngs & Coppersmith from the total moment rate
return mfd.YoungsCoppersmith1985MFD.from_total_moment_rate(
min_mag=mfd_node["minMag"], b_val=mfd_node["bValue"],
char_mag=mfd_node["characteristicMag"],
total_moment_rate=mfd_node["totalMomentRate"],
bin_width=mfd_node["binWidth"])
elif "characteristicRate" in mfd_node.attrib.keys():
# Return Youngs & Coppersmith from the total moment rate
return mfd.YoungsCoppersmith1985MFD.\
from_characteristic_rate(
min_mag=mfd_node["minMag"],
b_val=mfd_node["bValue"],
char_mag=mfd_node["characteristicMag"],
char_rate=mfd_node["characteristicRate"],
bin_width=mfd_node["binWidth"])
elif mfd_node.tag.endswith('multiMFD'):
return mfd.multi_mfd.MultiMFD.from_node(
mfd_node, self.width_of_mfd_bin) | [
"def",
"convert_mfdist",
"(",
"self",
",",
"node",
")",
":",
"with",
"context",
"(",
"self",
".",
"fname",
",",
"node",
")",
":",
"[",
"mfd_node",
"]",
"=",
"[",
"subnode",
"for",
"subnode",
"in",
"node",
"if",
"subnode",
".",
"tag",
".",
"endswith",
"(",
"(",
"'incrementalMFD'",
",",
"'truncGutenbergRichterMFD'",
",",
"'arbitraryMFD'",
",",
"'YoungsCoppersmithMFD'",
",",
"'multiMFD'",
")",
")",
"]",
"if",
"mfd_node",
".",
"tag",
".",
"endswith",
"(",
"'incrementalMFD'",
")",
":",
"return",
"mfd",
".",
"EvenlyDiscretizedMFD",
"(",
"min_mag",
"=",
"mfd_node",
"[",
"'minMag'",
"]",
",",
"bin_width",
"=",
"mfd_node",
"[",
"'binWidth'",
"]",
",",
"occurrence_rates",
"=",
"~",
"mfd_node",
".",
"occurRates",
")",
"elif",
"mfd_node",
".",
"tag",
".",
"endswith",
"(",
"'truncGutenbergRichterMFD'",
")",
":",
"return",
"mfd",
".",
"TruncatedGRMFD",
"(",
"a_val",
"=",
"mfd_node",
"[",
"'aValue'",
"]",
",",
"b_val",
"=",
"mfd_node",
"[",
"'bValue'",
"]",
",",
"min_mag",
"=",
"mfd_node",
"[",
"'minMag'",
"]",
",",
"max_mag",
"=",
"mfd_node",
"[",
"'maxMag'",
"]",
",",
"bin_width",
"=",
"self",
".",
"width_of_mfd_bin",
")",
"elif",
"mfd_node",
".",
"tag",
".",
"endswith",
"(",
"'arbitraryMFD'",
")",
":",
"return",
"mfd",
".",
"ArbitraryMFD",
"(",
"magnitudes",
"=",
"~",
"mfd_node",
".",
"magnitudes",
",",
"occurrence_rates",
"=",
"~",
"mfd_node",
".",
"occurRates",
")",
"elif",
"mfd_node",
".",
"tag",
".",
"endswith",
"(",
"'YoungsCoppersmithMFD'",
")",
":",
"if",
"\"totalMomentRate\"",
"in",
"mfd_node",
".",
"attrib",
".",
"keys",
"(",
")",
":",
"# Return Youngs & Coppersmith from the total moment rate",
"return",
"mfd",
".",
"YoungsCoppersmith1985MFD",
".",
"from_total_moment_rate",
"(",
"min_mag",
"=",
"mfd_node",
"[",
"\"minMag\"",
"]",
",",
"b_val",
"=",
"mfd_node",
"[",
"\"bValue\"",
"]",
",",
"char_mag",
"=",
"mfd_node",
"[",
"\"characteristicMag\"",
"]",
",",
"total_moment_rate",
"=",
"mfd_node",
"[",
"\"totalMomentRate\"",
"]",
",",
"bin_width",
"=",
"mfd_node",
"[",
"\"binWidth\"",
"]",
")",
"elif",
"\"characteristicRate\"",
"in",
"mfd_node",
".",
"attrib",
".",
"keys",
"(",
")",
":",
"# Return Youngs & Coppersmith from the total moment rate",
"return",
"mfd",
".",
"YoungsCoppersmith1985MFD",
".",
"from_characteristic_rate",
"(",
"min_mag",
"=",
"mfd_node",
"[",
"\"minMag\"",
"]",
",",
"b_val",
"=",
"mfd_node",
"[",
"\"bValue\"",
"]",
",",
"char_mag",
"=",
"mfd_node",
"[",
"\"characteristicMag\"",
"]",
",",
"char_rate",
"=",
"mfd_node",
"[",
"\"characteristicRate\"",
"]",
",",
"bin_width",
"=",
"mfd_node",
"[",
"\"binWidth\"",
"]",
")",
"elif",
"mfd_node",
".",
"tag",
".",
"endswith",
"(",
"'multiMFD'",
")",
":",
"return",
"mfd",
".",
"multi_mfd",
".",
"MultiMFD",
".",
"from_node",
"(",
"mfd_node",
",",
"self",
".",
"width_of_mfd_bin",
")"
] | Convert the given node into a Magnitude-Frequency Distribution
object.
:param node: a node of kind incrementalMFD or truncGutenbergRichterMFD
:returns: a :class:`openquake.hazardlib.mfd.EvenlyDiscretizedMFD.` or
:class:`openquake.hazardlib.mfd.TruncatedGRMFD` instance | [
"Convert",
"the",
"given",
"node",
"into",
"a",
"Magnitude",
"-",
"Frequency",
"Distribution",
"object",
"."
] | python | train |
heuer/segno | segno/utils.py | https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/utils.py#L123-L149 | def matrix_iter(matrix, version, scale=1, border=None):
"""\
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
If either the `scale` or `border` value is invalid, a :py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
"""
check_valid_border(border)
scale = int(scale)
check_valid_scale(scale)
border = get_border(version, border)
width, height = get_symbol_size(version, scale=1, border=0)
def get_bit(i, j):
return 0x1 if (0 <= i < height and 0 <= j < width and matrix[i][j]) else 0x0
for i in range(-border, height + border):
for s in range(scale):
yield chain.from_iterable(([get_bit(i, j)] * scale for j in range(-border, width + border))) | [
"def",
"matrix_iter",
"(",
"matrix",
",",
"version",
",",
"scale",
"=",
"1",
",",
"border",
"=",
"None",
")",
":",
"check_valid_border",
"(",
"border",
")",
"scale",
"=",
"int",
"(",
"scale",
")",
"check_valid_scale",
"(",
"scale",
")",
"border",
"=",
"get_border",
"(",
"version",
",",
"border",
")",
"width",
",",
"height",
"=",
"get_symbol_size",
"(",
"version",
",",
"scale",
"=",
"1",
",",
"border",
"=",
"0",
")",
"def",
"get_bit",
"(",
"i",
",",
"j",
")",
":",
"return",
"0x1",
"if",
"(",
"0",
"<=",
"i",
"<",
"height",
"and",
"0",
"<=",
"j",
"<",
"width",
"and",
"matrix",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"else",
"0x0",
"for",
"i",
"in",
"range",
"(",
"-",
"border",
",",
"height",
"+",
"border",
")",
":",
"for",
"s",
"in",
"range",
"(",
"scale",
")",
":",
"yield",
"chain",
".",
"from_iterable",
"(",
"(",
"[",
"get_bit",
"(",
"i",
",",
"j",
")",
"]",
"*",
"scale",
"for",
"j",
"in",
"range",
"(",
"-",
"border",
",",
"width",
"+",
"border",
")",
")",
")"
] | \
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
If either the `scale` or `border` value is invalid, a :py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided | [
"\\",
"Returns",
"an",
"iterator",
"/",
"generator",
"over",
"the",
"provided",
"matrix",
"which",
"includes",
"the",
"border",
"and",
"the",
"scaling",
"factor",
"."
] | python | train |
peterbrittain/asciimatics | asciimatics/renderers.py | https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/renderers.py#L226-L232 | def _clear(self):
"""
Clear the current image.
"""
self._plain_image = [" " * self._width for _ in range(self._height)]
self._colour_map = [[(None, 0, 0) for _ in range(self._width)]
for _ in range(self._height)] | [
"def",
"_clear",
"(",
"self",
")",
":",
"self",
".",
"_plain_image",
"=",
"[",
"\" \"",
"*",
"self",
".",
"_width",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_height",
")",
"]",
"self",
".",
"_colour_map",
"=",
"[",
"[",
"(",
"None",
",",
"0",
",",
"0",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_width",
")",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_height",
")",
"]"
] | Clear the current image. | [
"Clear",
"the",
"current",
"image",
"."
] | python | train |
adamfast/faadata | faadata/airports/load.py | https://github.com/adamfast/faadata/blob/3c7d651b28160b7cb24724f67ebffd6bd0b490b9/faadata/airports/load.py#L9-L18 | def clean_chars(value):
"Hack to remove non-ASCII data. Should convert to Unicode: code page 437?"
value = value.replace('\xb9', ' ')
value = value.replace('\xf8', ' ')
value = value.replace('\xab', ' ')
value = value.replace('\xa7', ' ')
value = value.replace('\xa8', ' ')
value = value.replace('\xfb', ' ')
value = value.replace('\xfc', ' ')
return value | [
"def",
"clean_chars",
"(",
"value",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xb9'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xf8'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xab'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xa7'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xa8'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xfb'",
",",
"' '",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\xfc'",
",",
"' '",
")",
"return",
"value"
] | Hack to remove non-ASCII data. Should convert to Unicode: code page 437? | [
"Hack",
"to",
"remove",
"non",
"-",
"ASCII",
"data",
".",
"Should",
"convert",
"to",
"Unicode",
":",
"code",
"page",
"437?"
] | python | train |
inspirehep/harvesting-kit | harvestingkit/elsevier_package.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/elsevier_package.py#L194-L219 | def _crawl_elsevier_and_find_issue_xml(self):
"""
Information about the current volume, issue, etc. is available
in a file called issue.xml that is available in a higher directory.
"""
self._found_issues = []
if not self.path and not self.package_name:
for issue in self.conn._get_issues():
dirname = issue.rstrip('/issue.xml')
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s" % (dirname, err))
else:
def visit(dummy, dirname, names):
if "issue.xml" in names:
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s"
% (dirname, err))
walk(self.path, visit, None) | [
"def",
"_crawl_elsevier_and_find_issue_xml",
"(",
"self",
")",
":",
"self",
".",
"_found_issues",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"path",
"and",
"not",
"self",
".",
"package_name",
":",
"for",
"issue",
"in",
"self",
".",
"conn",
".",
"_get_issues",
"(",
")",
":",
"dirname",
"=",
"issue",
".",
"rstrip",
"(",
"'/issue.xml'",
")",
"try",
":",
"self",
".",
"_normalize_issue_dir_with_dtd",
"(",
"dirname",
")",
"self",
".",
"_found_issues",
".",
"append",
"(",
"dirname",
")",
"except",
"Exception",
"as",
"err",
":",
"register_exception",
"(",
")",
"print",
"(",
"\"ERROR: can't normalize %s: %s\"",
"%",
"(",
"dirname",
",",
"err",
")",
")",
"else",
":",
"def",
"visit",
"(",
"dummy",
",",
"dirname",
",",
"names",
")",
":",
"if",
"\"issue.xml\"",
"in",
"names",
":",
"try",
":",
"self",
".",
"_normalize_issue_dir_with_dtd",
"(",
"dirname",
")",
"self",
".",
"_found_issues",
".",
"append",
"(",
"dirname",
")",
"except",
"Exception",
"as",
"err",
":",
"register_exception",
"(",
")",
"print",
"(",
"\"ERROR: can't normalize %s: %s\"",
"%",
"(",
"dirname",
",",
"err",
")",
")",
"walk",
"(",
"self",
".",
"path",
",",
"visit",
",",
"None",
")"
] | Information about the current volume, issue, etc. is available
in a file called issue.xml that is available in a higher directory. | [
"Information",
"about",
"the",
"current",
"volume",
"issue",
"etc",
".",
"is",
"available",
"in",
"a",
"file",
"called",
"issue",
".",
"xml",
"that",
"is",
"available",
"in",
"a",
"higher",
"directory",
"."
] | python | valid |
ibelie/typy | typy/google/protobuf/json_format.py | https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L80-L94 | def MessageToJson(message, including_default_value_fields=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
js = _MessageToJsonObject(message, including_default_value_fields)
return json.dumps(js, indent=2) | [
"def",
"MessageToJson",
"(",
"message",
",",
"including_default_value_fields",
"=",
"False",
")",
":",
"js",
"=",
"_MessageToJsonObject",
"(",
"message",
",",
"including_default_value_fields",
")",
"return",
"json",
".",
"dumps",
"(",
"js",
",",
"indent",
"=",
"2",
")"
] | Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
Returns:
A string containing the JSON formatted protocol buffer message. | [
"Converts",
"protobuf",
"message",
"to",
"JSON",
"format",
"."
] | python | valid |
google/grr | grr/proto/setup.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/proto/setup.py#L53-L63 | def compile_protos():
"""Builds necessary assets from sources."""
# If there's no makefile, we're likely installing from an sdist,
# so there's no need to compile the protos (they should be already
# compiled).
if not os.path.exists(os.path.join(THIS_DIRECTORY, "makefile.py")):
return
# Only compile protobufs if we're inside GRR source tree.
subprocess.check_call(
["python", "makefile.py", "--clean"], cwd=THIS_DIRECTORY) | [
"def",
"compile_protos",
"(",
")",
":",
"# If there's no makefile, we're likely installing from an sdist,",
"# so there's no need to compile the protos (they should be already",
"# compiled).",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"THIS_DIRECTORY",
",",
"\"makefile.py\"",
")",
")",
":",
"return",
"# Only compile protobufs if we're inside GRR source tree.",
"subprocess",
".",
"check_call",
"(",
"[",
"\"python\"",
",",
"\"makefile.py\"",
",",
"\"--clean\"",
"]",
",",
"cwd",
"=",
"THIS_DIRECTORY",
")"
] | Builds necessary assets from sources. | [
"Builds",
"necessary",
"assets",
"from",
"sources",
"."
] | python | train |
pyslackers/slack-sansio | slack/events.py | https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/events.py#L278-L303 | def dispatch(self, message: Message) -> Iterator[Any]:
"""
Yields handlers matching the routing of the incoming :class:`slack.events.Message`
Args:
message: :class:`slack.events.Message`
Yields:
handler
"""
if "text" in message:
text = message["text"] or ""
elif "message" in message:
text = message["message"].get("text", "")
else:
text = ""
msg_subtype = message.get("subtype")
for subtype, matchs in itertools.chain(
self._routes[message["channel"]].items(), self._routes["*"].items()
):
if msg_subtype == subtype or subtype is None:
for match, endpoints in matchs.items():
if match.search(text):
yield from endpoints | [
"def",
"dispatch",
"(",
"self",
",",
"message",
":",
"Message",
")",
"->",
"Iterator",
"[",
"Any",
"]",
":",
"if",
"\"text\"",
"in",
"message",
":",
"text",
"=",
"message",
"[",
"\"text\"",
"]",
"or",
"\"\"",
"elif",
"\"message\"",
"in",
"message",
":",
"text",
"=",
"message",
"[",
"\"message\"",
"]",
".",
"get",
"(",
"\"text\"",
",",
"\"\"",
")",
"else",
":",
"text",
"=",
"\"\"",
"msg_subtype",
"=",
"message",
".",
"get",
"(",
"\"subtype\"",
")",
"for",
"subtype",
",",
"matchs",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"_routes",
"[",
"message",
"[",
"\"channel\"",
"]",
"]",
".",
"items",
"(",
")",
",",
"self",
".",
"_routes",
"[",
"\"*\"",
"]",
".",
"items",
"(",
")",
")",
":",
"if",
"msg_subtype",
"==",
"subtype",
"or",
"subtype",
"is",
"None",
":",
"for",
"match",
",",
"endpoints",
"in",
"matchs",
".",
"items",
"(",
")",
":",
"if",
"match",
".",
"search",
"(",
"text",
")",
":",
"yield",
"from",
"endpoints"
] | Yields handlers matching the routing of the incoming :class:`slack.events.Message`
Args:
message: :class:`slack.events.Message`
Yields:
handler | [
"Yields",
"handlers",
"matching",
"the",
"routing",
"of",
"the",
"incoming",
":",
"class",
":",
"slack",
".",
"events",
".",
"Message"
] | python | train |
chemlab/chemlab | chemlab/utils/__init__.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/utils/__init__.py#L16-L23 | def periodic_distance(a, b, periodic):
'''Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes.
'''
delta = np.abs(a - b)
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.sqrt((delta ** 2).sum(axis=-1)) | [
"def",
"periodic_distance",
"(",
"a",
",",
"b",
",",
"periodic",
")",
":",
"delta",
"=",
"np",
".",
"abs",
"(",
"a",
"-",
"b",
")",
"delta",
"=",
"np",
".",
"where",
"(",
"delta",
">",
"0.5",
"*",
"periodic",
",",
"periodic",
"-",
"delta",
",",
"delta",
")",
"return",
"np",
".",
"sqrt",
"(",
"(",
"delta",
"**",
"2",
")",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
")"
] | Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes. | [
"Periodic",
"distance",
"between",
"two",
"arrays",
".",
"Periodic",
"is",
"a",
"3",
"dimensional",
"array",
"containing",
"the",
"3",
"box",
"sizes",
"."
] | python | train |
Esri/ArcREST | src/arcrest/webmap/symbols.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/webmap/symbols.py#L229-L235 | def outlineColor(self, value):
""" sets the outline color """
if isinstance(value, (list, Color)):
if value is list:
self._outlineColor = value
else:
self._outlineColor = value.asList | [
"def",
"outlineColor",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"Color",
")",
")",
":",
"if",
"value",
"is",
"list",
":",
"self",
".",
"_outlineColor",
"=",
"value",
"else",
":",
"self",
".",
"_outlineColor",
"=",
"value",
".",
"asList"
] | sets the outline color | [
"sets",
"the",
"outline",
"color"
] | python | train |
ubc/ubcpi | ubcpi/answer_pool.py | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/answer_pool.py#L21-L40 | def get_max_size(pool, num_option, item_length):
"""
Calculate the max number of item that an option can stored in the pool at give time.
This is to limit the pool size to POOL_SIZE
Args:
option_index (int): the index of the option to calculate the size for
pool (dict): answer pool
num_option (int): total number of options available for the question
item_length (int): the length of the item
Returns:
int: the max number of items that `option_index` can have
"""
max_items = POOL_SIZE / item_length
# existing items plus the reserved for min size. If there is an option has 1 item, POOL_OPTION_MIN_SIZE - 1 space
# is reserved.
existing = POOL_OPTION_MIN_SIZE * num_option + sum([max(0, len(pool.get(i, {})) - 5) for i in xrange(num_option)])
return int(max_items - existing) | [
"def",
"get_max_size",
"(",
"pool",
",",
"num_option",
",",
"item_length",
")",
":",
"max_items",
"=",
"POOL_SIZE",
"/",
"item_length",
"# existing items plus the reserved for min size. If there is an option has 1 item, POOL_OPTION_MIN_SIZE - 1 space",
"# is reserved.",
"existing",
"=",
"POOL_OPTION_MIN_SIZE",
"*",
"num_option",
"+",
"sum",
"(",
"[",
"max",
"(",
"0",
",",
"len",
"(",
"pool",
".",
"get",
"(",
"i",
",",
"{",
"}",
")",
")",
"-",
"5",
")",
"for",
"i",
"in",
"xrange",
"(",
"num_option",
")",
"]",
")",
"return",
"int",
"(",
"max_items",
"-",
"existing",
")"
] | Calculate the max number of item that an option can stored in the pool at give time.
This is to limit the pool size to POOL_SIZE
Args:
option_index (int): the index of the option to calculate the size for
pool (dict): answer pool
num_option (int): total number of options available for the question
item_length (int): the length of the item
Returns:
int: the max number of items that `option_index` can have | [
"Calculate",
"the",
"max",
"number",
"of",
"item",
"that",
"an",
"option",
"can",
"stored",
"in",
"the",
"pool",
"at",
"give",
"time",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.